summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap3
-rw-r--r--Documentation/ABI/stable/sysfs-devices2
-rw-r--r--Documentation/PCI/pci.txt1
-rw-r--r--Documentation/arm/CCN.txt16
-rw-r--r--Documentation/cpu-freq/cpufreq-stats.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt7
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-st.txt2
-rw-r--r--Documentation/devicetree/bindings/net/dsa/qca8k.txt89
-rw-r--r--Documentation/devicetree/bindings/net/ethernet.txt4
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-net.txt8
-rw-r--r--Documentation/devicetree/bindings/net/smsc911x.txt15
-rw-r--r--Documentation/devicetree/bindings/serial/8250.txt19
-rw-r--r--Documentation/filesystems/overlayfs.txt8
-rw-r--r--Documentation/i2c/slave-interface5
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst21
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-dqevent.rst8
-rw-r--r--Documentation/networking/ipvlan.txt7
-rw-r--r--Documentation/rapidio/mport_cdev.txt4
-rw-r--r--MAINTAINERS59
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig11
-rw-r--r--arch/alpha/include/asm/uaccess.h19
-rw-r--r--arch/arc/include/asm/uaccess.h11
-rw-r--r--arch/arm/boot/dts/am335x-baltos.dtsi2
-rw-r--r--arch/arm/boot/dts/am335x-igep0033.dtsi2
-rw-r--r--arch/arm/boot/dts/am335x-phycore-som.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-388-clearfog.dts8
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi.dtsi1
-rw-r--r--arch/arm/boot/dts/bcm283x.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos5410-odroidxu.dts3
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6sx-sabreauto.dts2
-rw-r--r--arch/arm/boot/dts/imx7d-sdb.dts2
-rw-r--r--arch/arm/boot/dts/kirkwood-ib62x0.dts2
-rw-r--r--arch/arm/boot/dts/kirkwood-openrd.dtsi4
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv.dtsi11
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-som.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-overo-base.dtsi4
-rw-r--r--arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-overo-tobi-common.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi3
-rw-r--r--arch/arm/boot/dts/rk3066a.dtsi2
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi2
-rw-r--r--arch/arm/boot/dts/rk3xxx.dtsi2
-rw-r--r--arch/arm/boot/dts/stih407-family.dtsi10
-rw-r--r--arch/arm/boot/dts/stih410.dtsi12
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra114-dalmore.dts2
-rw-r--r--arch/arm/boot/dts/tegra114-roth.dts2
-rw-r--r--arch/arm/boot/dts/tegra114-tn7.dts2
-rw-r--r--arch/arm/boot/dts/tegra124-jetson-tk1.dts4
-rw-r--r--arch/arm/common/locomo.c5
-rw-r--r--arch/arm/common/sa1111.c32
-rw-r--r--arch/arm/configs/keystone_defconfig1
-rw-r--r--arch/arm/configs/multi_v7_defconfig2
-rw-r--r--arch/arm/crypto/aes-ce-glue.c2
-rw-r--r--arch/arm/include/asm/pgtable-2level-hwdef.h1
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h1
-rw-r--r--arch/arm/kernel/hyp-stub.S13
-rw-r--r--arch/arm/kvm/arm.c2
-rw-r--r--arch/arm/kvm/mmu.c4
-rw-r--r--arch/arm/mach-exynos/suspend.c6
-rw-r--r--arch/arm/mach-imx/mach-imx6ul.c1
-rw-r--r--arch/arm/mach-imx/pm-imx6.c4
-rw-r--r--arch/arm/mach-omap2/cm33xx.c6
-rw-r--r--arch/arm/mach-omap2/cminst44xx.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c8
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c12
-rw-r--r--arch/arm/mach-pxa/lubbock.c14
-rw-r--r--arch/arm/mach-sa1100/clock.c5
-rw-r--r--arch/arm/mach-sa1100/generic.c4
-rw-r--r--arch/arm/mach-sa1100/generic.h2
-rw-r--r--arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c62
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/arm/mm/proc-v7.S1
-rw-r--r--arch/arm/xen/enlighten.c7
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi8
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi8
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi8
l---------arch/arm64/boot/dts/broadcom/bcm2835-rpi.dtsi1
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts4
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm2837.dtsi2
l---------arch/arm64/boot/dts/broadcom/bcm283x-rpi-smsc9514.dtsi1
l---------arch/arm64/boot/dts/broadcom/bcm283x.dtsi1
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2.dtsi8
-rw-r--r--arch/arm64/boot/dts/cavium/thunder-88xx.dtsi8
-rw-r--r--arch/arm64/boot/dts/exynos/exynos7.dtsi8
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi8
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi8
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806.dtsi8
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi2
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi8
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi8
-rw-r--r--arch/arm64/crypto/aes-glue.c2
-rw-r--r--arch/arm64/include/asm/percpu.h8
-rw-r--r--arch/arm64/include/asm/spinlock.h10
-rw-r--r--arch/arm64/mm/proc.S9
-rw-r--r--arch/avr32/include/asm/uaccess.h11
-rw-r--r--arch/avr32/kernel/avr32_ksyms.c2
-rw-r--r--arch/avr32/lib/copy_user.S8
-rw-r--r--arch/blackfin/include/asm/uaccess.h9
-rw-r--r--arch/cris/include/asm/uaccess.h71
-rw-r--r--arch/frv/include/asm/uaccess.h12
-rw-r--r--arch/hexagon/include/asm/uaccess.h3
-rw-r--r--arch/ia64/include/asm/uaccess.h33
-rw-r--r--arch/m32r/include/asm/uaccess.h2
-rw-r--r--arch/metag/include/asm/uaccess.h3
-rw-r--r--arch/microblaze/include/asm/uaccess.h11
-rw-r--r--arch/mips/include/asm/uaccess.h3
-rw-r--r--arch/mn10300/include/asm/uaccess.h1
-rw-r--r--arch/mn10300/lib/usercopy.c4
-rw-r--r--arch/nios2/include/asm/uaccess.h13
-rw-r--r--arch/openrisc/include/asm/uaccess.h35
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/configs/c8000_defconfig1
-rw-r--r--arch/parisc/configs/generic-64bit_defconfig1
-rw-r--r--arch/parisc/include/asm/uaccess.h27
-rw-r--r--arch/powerpc/include/asm/cpu_has_feature.h2
-rw-r--r--arch/powerpc/include/asm/uaccess.h38
-rw-r--r--arch/powerpc/kernel/idle_book3s.S10
-rw-r--r--arch/powerpc/lib/checksum_32.S7
-rw-r--r--arch/powerpc/mm/slb_low.S7
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c44
-rw-r--r--arch/powerpc/platforms/pseries/setup.c2
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c12
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/configs/default_defconfig1
-rw-r--r--arch/s390/configs/gcov_defconfig1
-rw-r--r--arch/s390/configs/performance_defconfig1
-rw-r--r--arch/s390/defconfig1
-rw-r--r--arch/s390/include/asm/uaccess.h27
-rw-r--r--arch/s390/kvm/kvm-s390.c10
-rw-r--r--arch/s390/kvm/vsie.c2
-rw-r--r--arch/score/include/asm/uaccess.h46
-rw-r--r--arch/sh/include/asm/uaccess.h5
-rw-r--r--arch/sh/include/asm/uaccess_64.h1
-rw-r--r--arch/sparc/include/asm/uaccess_32.h13
-rw-r--r--arch/sparc/include/asm/uaccess_64.h7
-rw-r--r--arch/tile/Kconfig1
-rw-r--r--arch/tile/include/asm/uaccess.h22
-rw-r--r--arch/um/kernel/skas/syscall.c10
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/boot/compressed/eboot.c134
-rw-r--r--arch/x86/configs/tiny.config2
-rw-r--r--arch/x86/events/amd/core.c4
-rw-r--r--arch/x86/events/amd/uncore.c22
-rw-r--r--arch/x86/events/intel/bts.c123
-rw-r--r--arch/x86/events/intel/core.c15
-rw-r--r--arch/x86/events/intel/cqm.c9
-rw-r--r--arch/x86/events/intel/ds.c19
-rw-r--r--arch/x86/events/intel/pt.c18
-rw-r--r--arch/x86/include/asm/uaccess.h79
-rw-r--r--arch/x86/kernel/apic/apic.c4
-rw-r--r--arch/x86/kernel/cpu/amd.c12
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c13
-rw-r--r--arch/x86/kernel/kvmclock.c1
-rw-r--r--arch/x86/kernel/paravirt.c4
-rw-r--r--arch/x86/kvm/ioapic.c8
-rw-r--r--arch/x86/kvm/pmu_amd.c4
-rw-r--r--arch/x86/kvm/x86.c8
-rw-r--r--arch/x86/mm/pat.c17
-rw-r--r--arch/x86/pci/fixup.c20
-rw-r--r--arch/x86/um/ptrace_32.c3
-rw-r--r--arch/x86/um/ptrace_64.c4
-rw-r--r--crypto/blkcipher.c3
-rw-r--r--crypto/cryptd.c12
-rw-r--r--crypto/echainiv.c115
-rw-r--r--drivers/acpi/nfit/mce.c2
-rw-r--r--drivers/acpi/scan.c6
-rw-r--r--drivers/ata/libahci.c2
-rw-r--r--drivers/ata/pata_ninja32.c2
-rw-r--r--drivers/atm/eni.c5
-rw-r--r--drivers/atm/he.c10
-rw-r--r--drivers/atm/iphase.c19
-rw-r--r--drivers/atm/nicstar.c15
-rw-r--r--drivers/atm/zatm.c16
-rw-r--r--drivers/base/power/runtime.c14
-rw-r--r--drivers/base/regmap/regcache-rbtree.c38
-rw-r--r--drivers/base/regmap/regcache.c5
-rw-r--r--drivers/base/regmap/regmap.c2
-rw-r--r--drivers/bcma/driver_chipcommon.c32
-rw-r--r--drivers/bcma/main.c6
-rw-r--r--drivers/bluetooth/Kconfig23
-rw-r--r--drivers/bluetooth/Makefile2
-rw-r--r--drivers/bluetooth/bcm203x.c4
-rw-r--r--drivers/bluetooth/btqca.c8
-rw-r--r--drivers/bluetooth/btqcomsmd.c182
-rw-r--r--drivers/bluetooth/btrtl.c107
-rw-r--r--drivers/bluetooth/btusb.c13
-rw-r--r--drivers/bluetooth/hci_bcm.c2
-rw-r--r--drivers/bluetooth/hci_intel.c6
-rw-r--r--drivers/bluetooth/hci_ldisc.c6
-rw-r--r--drivers/bluetooth/hci_mrvl.c387
-rw-r--r--drivers/bluetooth/hci_qca.c2
-rw-r--r--drivers/bluetooth/hci_uart.h9
-rw-r--r--drivers/bus/arm-cci.c2
-rw-r--r--drivers/bus/arm-ccn.c112
-rw-r--r--drivers/bus/vexpress-config.c1
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/tpm/tpm2-cmd.c3
-rw-r--r--drivers/char/virtio_console.c23
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c9
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c11
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c16
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_nk.c6
-rw-r--r--drivers/clk/sunxi/clk-a10-pll2.c4
-rw-r--r--drivers/clk/sunxi/clk-sun8i-mbus.c2
-rw-r--r--drivers/clk/tegra/clk-tegra114.c4
-rw-r--r--drivers/clocksource/timer-atmel-pit.c1
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c2
-rw-r--r--drivers/crypto/caam/caamalg.c77
-rw-r--r--drivers/crypto/chelsio/chcr_core.c10
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c4
-rw-r--r--drivers/crypto/vmx/aes_xts.c2
-rw-r--r--drivers/dax/dax.c2
-rw-r--r--drivers/dma/at_xdmac.c4
-rw-r--r--drivers/dma/fsl_raid.c1
-rw-r--r--drivers/dma/img-mdc-dma.c4
-rw-r--r--drivers/dma/pxa_dma.c11
-rw-r--r--drivers/dma/sh/usb-dmac.c19
-rw-r--r--drivers/firmware/arm_scpi.c5
-rw-r--r--drivers/firmware/dmi-id.c8
-rw-r--r--drivers/firmware/efi/efi.c7
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c169
-rw-r--r--drivers/firmware/efi/libstub/fdt.c54
-rw-r--r--drivers/firmware/efi/libstub/random.c12
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-mcp23s08.c2
-rw-r--r--drivers/gpio/gpio-sa1100.c2
-rw-r--r--drivers/gpio/gpiolib-of.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c12
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c10
-rw-r--r--drivers/gpu/drm/drm_ioc32.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c26
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c9
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c3
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c27
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c14
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c26
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c21
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h9
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c18
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c10
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c2
-rw-r--r--drivers/i2c/busses/i2c-cadence.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c16
-rw-r--r--drivers/i2c/busses/i2c-rcar.c2
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c14
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c2
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c15
-rw-r--r--drivers/iio/accel/Kconfig5
-rw-r--r--drivers/iio/accel/bma220_spi.c2
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c11
-rw-r--r--drivers/iio/accel/kxsd9.c2
-rw-r--r--drivers/iio/adc/Kconfig1
-rw-r--r--drivers/iio/adc/ad799x.c1
-rw-r--r--drivers/iio/adc/at91_adc.c4
-rw-r--r--drivers/iio/adc/rockchip_saradc.c30
-rw-r--r--drivers/iio/adc/ti-ads1015.c3
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c16
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c2
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c4
-rw-r--r--drivers/iio/dac/stx104.c38
-rw-r--r--drivers/iio/humidity/Kconfig2
-rw-r--r--drivers/iio/humidity/am2315.c2
-rw-r--r--drivers/iio/humidity/hdc100x.c27
-rw-r--r--drivers/iio/industrialio-buffer.c27
-rw-r--r--drivers/iio/industrialio-core.c5
-rw-r--r--drivers/iio/light/Kconfig3
-rw-r--r--drivers/iio/pressure/bmp280-core.c8
-rw-r--r--drivers/iio/proximity/as3935.c2
-rw-r--r--drivers/infiniband/core/multicast.c13
-rw-r--r--drivers/infiniband/hw/cxgb4/Kconfig1
-rw-r--r--drivers/infiniband/hw/cxgb4/Makefile1
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c294
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h10
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c92
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h1
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c132
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h4
-rw-r--r--drivers/infiniband/hw/hfi1/init.c3
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c12
-rw-r--r--drivers/infiniband/hw/hfi1/pio_copy.c12
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c8
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c26
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c23
-rw-r--r--drivers/infiniband/hw/mlx4/main.c3
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c14
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c37
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c22
-rw-r--r--drivers/infiniband/hw/mlx5/main.c17
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c6
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c13
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c25
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c55
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c57
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c11
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c16
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c23
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h2
-rw-r--r--drivers/irqchip/irq-atmel-aic.c5
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c5
-rw-r--r--drivers/mailbox/Kconfig1
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c11
-rw-r--r--drivers/md/bitmap.c47
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-crypt.c9
-rw-r--r--drivers/md/dm-log-writes.c10
-rw-r--r--drivers/md/md-cluster.c12
-rw-r--r--drivers/md/md.c40
-rw-r--r--drivers/md/raid10.c13
-rw-r--r--drivers/md/raid5-cache.c46
-rw-r--r--drivers/md/raid5.c74
-rw-r--r--drivers/media/cec-edid.c5
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c1
-rw-r--r--drivers/media/pci/saa7134/saa7134-dvb.c1
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c1
-rw-r--r--drivers/media/platform/Kconfig2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h1
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c42
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c6
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h1
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c16
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c16
-rw-r--r--drivers/media/platform/rcar-fcp.c8
-rw-r--r--drivers/memory/omap-gpmc.c21
-rw-r--r--drivers/misc/Kconfig10
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/bh1780gli.c259
-rw-r--r--drivers/misc/lkdtm_rodata.c2
-rw-r--r--drivers/misc/lkdtm_usercopy.c25
-rw-r--r--drivers/misc/mei/hw-me.c10
-rw-r--r--drivers/misc/mei/pci-me.c4
-rw-r--r--drivers/mmc/host/omap.c18
-rw-r--r--drivers/mmc/host/omap_hsmmc.c16
-rw-r--r--drivers/mmc/host/sdhci-st.c15
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/bonding/bond_main.c7
-rw-r--r--drivers/net/can/flexcan.c13
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c11
-rw-r--r--drivers/net/dsa/Kconfig9
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/b53/b53_common.c37
-rw-r--r--drivers/net/dsa/b53/b53_priv.h1
-rw-r--r--drivers/net/dsa/bcm_sf2.c4
-rw-r--r--drivers/net/dsa/bcm_sf2.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c457
-rw-r--r--drivers/net/dsa/mv88e6xxx/mv88e6xxx.h63
-rw-r--r--drivers/net/dsa/qca8k.c1040
-rw-r--r--drivers/net/dsa/qca8k.h185
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/amd/7990.c6
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c65
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c24
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h7
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c98
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h9
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c18
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h10
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.c14
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.h1
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c239
-rw-r--r--drivers/net/ethernet/broadcom/b44.c116
-rw-r--r--drivers/net/ethernet/broadcom/b44.h1
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c79
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c139
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c187
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h1251
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c90
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c16
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c11
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c57
-rw-r--r--drivers/net/ethernet/cadence/macb.c23
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h3
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c11
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c34
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c94
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h80
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c135
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c721
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h48
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c972
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c483
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h294
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c227
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c9
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/Makefile4
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c149
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h160
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h71
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c75
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h45
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c40
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h7
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c357
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c97
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.h8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_mac.h2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c344
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h16
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c59
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c59
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c59
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c132
-rw-r--r--drivers/net/ethernet/i825xx/82596.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c44
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c5
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c87
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c87
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c72
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c187
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c58
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h17
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c206
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c34
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c10
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c9
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c51
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c881
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h146
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c127
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c345
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h143
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c810
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c660
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c67
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c308
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c242
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c416
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c217
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c135
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c570
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c269
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c4
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h233
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_bpf.h212
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c1811
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c171
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h47
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c147
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h51
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c13
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_offload.c291
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c30
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c6898
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.h54
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h1056
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h894
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c45
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c24
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c11
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c5
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c6
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c7
-rw-r--r--drivers/net/ethernet/sfc/falcon_boards.c4
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c3
-rw-r--r--drivers/net/ethernet/sfc/ptp.c14
-rw-r--r--drivers/net/ethernet/sfc/sriov.c5
-rw-r--r--drivers/net/ethernet/sfc/sriov.h2
-rw-r--r--drivers/net/ethernet/sis/sis900.c4
-rw-r--r--drivers/net/ethernet/sis/sis900.h2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c222
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c40
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c4
-rw-r--r--drivers/net/ethernet/via/via-velocity.c21
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig4
-rw-r--r--drivers/net/hamradio/6pack.c12
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h14
-rw-r--r--drivers/net/hyperv/netvsc.c89
-rw-r--r--drivers/net/hyperv/netvsc_drv.c188
-rw-r--r--drivers/net/ieee802154/fakelb.c14
-rw-r--r--drivers/net/ipvlan/ipvlan.h6
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c94
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c87
-rw-r--r--drivers/net/macsec.c1
-rw-r--r--drivers/net/phy/Kconfig13
-rw-r--r--drivers/net/phy/mdio-xgene.c6
-rw-r--r--drivers/net/phy/microchip.c2
-rw-r--r--drivers/net/phy/mscc.c229
-rw-r--r--drivers/net/usb/kaweth.c15
-rw-r--r--drivers/net/usb/r8152.c281
-rw-r--r--drivers/net/vrf.c294
-rw-r--r--drivers/net/vxlan.c42
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c125
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h79
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c204
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h76
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c70
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c78
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.c26
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.h11
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c27
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h33
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c59
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c186
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h24
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c104
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c6
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c151
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c53
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.h14
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c92
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c15
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c63
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c34
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c46
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c9
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c9
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h11
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c12
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h932
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c32
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-9000.c83
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c342
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c57
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c13
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ioctl.c20
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c27
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.h7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c78
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c143
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c26
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h73
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c22
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c270
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c182
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c28
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c61
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c134
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c144
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.h3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c1
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.h10
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/eeprom.c12
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c10
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mac.c38
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c1
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mcu.c20
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mt7601u.h4
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c44
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/regs.h4
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/tx.c19
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/util.h77
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h22
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c5
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c5
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c7
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c328
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h60
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c2
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.c29
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.h13
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h1
-rw-r--r--drivers/net/wireless/wl3501_cs.c7
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/xen-netback/common.h8
-rw-r--r--drivers/net/xen-netback/hash.c68
-rw-r--r--drivers/net/xen-netback/interface.c38
-rw-r--r--drivers/net/xen-netback/netback.c18
-rw-r--r--drivers/net/xen-netback/xenbus.c83
-rw-r--r--drivers/net/xen-netfront.c15
-rw-r--r--drivers/nvdimm/bus.c6
-rw-r--r--drivers/nvme/host/Kconfig4
-rw-r--r--drivers/nvme/host/fabrics.c23
-rw-r--r--drivers/nvme/host/fabrics.h2
-rw-r--r--drivers/nvme/host/pci.c9
-rw-r--r--drivers/nvme/host/rdma.c188
-rw-r--r--drivers/nvme/target/Kconfig2
-rw-r--r--drivers/nvme/target/loop.c4
-rw-r--r--drivers/nvme/target/rdma.c7
-rw-r--r--drivers/pci/quirks.c7
-rw-r--r--drivers/pci/remove.c3
-rw-r--r--drivers/pcmcia/ds.c12
-rw-r--r--drivers/pcmcia/pxa2xx_base.c9
-rw-r--r--drivers/pcmcia/pxa2xx_base.h2
-rw-r--r--drivers/pcmcia/sa1111_badge4.c22
-rw-r--r--drivers/pcmcia/sa1111_generic.c22
-rw-r--r--drivers/pcmcia/sa1111_jornada720.c25
-rw-r--r--drivers/pcmcia/sa1111_lubbock.c32
-rw-r--r--drivers/pcmcia/sa1111_neponset.c26
-rw-r--r--drivers/pcmcia/sa11xx_base.c8
-rw-r--r--drivers/pcmcia/soc_common.c2
-rw-r--r--drivers/perf/arm_pmu.c3
-rw-r--r--drivers/phy/phy-brcm-sata.c2
-rw-r--r--drivers/phy/phy-sun4i-usb.c68
-rw-r--r--drivers/phy/phy-sun9i-usb.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c5
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c12
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c4
-rw-r--r--drivers/rapidio/devices/tsi721.c2
-rw-r--r--drivers/rapidio/rio_cm.c19
-rw-r--r--drivers/regulator/max14577-regulator.c4
-rw-r--r--drivers/regulator/max77693-regulator.c4
-rw-r--r--drivers/regulator/qcom_smd-regulator.c30
-rw-r--r--drivers/s390/net/qeth_core.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c32
-rw-r--r--drivers/s390/net/qeth_l2_main.c6
-rw-r--r--drivers/s390/net/qeth_l3_main.c29
-rw-r--r--drivers/s390/net/qeth_l3_sys.c5
-rw-r--r--drivers/scsi/constants.c5
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c3
-rw-r--r--drivers/scsi/scsi_devinfo.c4
-rw-r--r--drivers/scsi/scsi_transport_sas.c16
-rw-r--r--drivers/scsi/ses.c2
-rw-r--r--drivers/scsi/wd719x.c2
-rw-r--r--drivers/spi/spi-img-spfi.c2
-rw-r--r--drivers/spi/spi-mt65xx.c1
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c1
-rw-r--r--drivers/spi/spi-qup.c1
-rw-r--r--drivers/spi/spi-sh-msiof.c3
-rw-r--r--drivers/spi/spi.c10
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1760.c1
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c46
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c2
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c12
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c43
-rw-r--r--drivers/staging/media/cec/TODO1
-rw-r--r--drivers/staging/media/cec/cec-adap.c26
-rw-r--r--drivers/staging/media/cec/cec-api.c12
-rw-r--r--drivers/staging/media/cec/cec-core.c27
-rw-r--r--drivers/staging/media/pulse8-cec/pulse8-cec.c10
-rw-r--r--drivers/staging/wilc1000/host_interface.c3
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c2
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c4
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c234
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c3
-rw-r--r--drivers/thermal/rcar_thermal.c1
-rw-r--r--drivers/thunderbolt/nhi.c6
-rw-r--r--drivers/thunderbolt/switch.c4
-rw-r--r--drivers/tty/serial/8250/8250.h35
-rw-r--r--drivers/tty/serial/8250/8250_core.c9
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c4
-rw-r--r--drivers/tty/serial/8250/8250_mid.c3
-rw-r--r--drivers/tty/serial/8250/8250_omap.c31
-rw-r--r--drivers/tty/serial/8250/8250_pci.c139
-rw-r--r--drivers/tty/serial/8250/8250_port.c7
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/usb/chipidea/udc.c16
-rw-r--r--drivers/usb/core/config.c30
-rw-r--r--drivers/usb/core/devio.c16
-rw-r--r--drivers/usb/dwc2/core.h1
-rw-r--r--drivers/usb/dwc2/platform.c22
-rw-r--r--drivers/usb/dwc3/core.c1
-rw-r--r--drivers/usb/dwc3/debug.h2
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c13
-rw-r--r--drivers/usb/dwc3/gadget.c6
-rw-r--r--drivers/usb/gadget/function/f_eem.c12
-rw-r--r--drivers/usb/gadget/function/f_rndis.c3
-rw-r--r--drivers/usb/gadget/function/u_serial.c7
-rw-r--r--drivers/usb/gadget/udc/core.c2
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c7
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c2
-rw-r--r--drivers/usb/host/xhci-ring.c6
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/musb_virthub.c7
-rw-r--r--drivers/usb/phy/phy-generic.c8
-rw-r--r--drivers/usb/renesas_usbhs/mod.c11
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c2
-rw-r--r--drivers/usb/serial/mos7720.c2
-rw-r--r--drivers/usb/serial/mos7840.c4
-rw-r--r--drivers/usb/serial/option.c9
-rw-r--r--drivers/usb/serial/usb-serial-simple.c3
-rw-r--r--drivers/virtio/virtio_ring.c2
-rw-r--r--fs/afs/rxrpc.c105
-rw-r--r--fs/aio.c7
-rw-r--r--fs/autofs4/expire.c55
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/extent-tree.c33
-rw-r--r--fs/btrfs/relocation.c8
-rw-r--r--fs/btrfs/send.c8
-rw-r--r--fs/btrfs/tree-log.c1
-rw-r--r--fs/ceph/dir.c2
-rw-r--r--fs/cifs/cifsfs.c29
-rw-r--r--fs/cifs/cifsproto.h2
-rw-r--r--fs/cifs/connect.c31
-rw-r--r--fs/crypto/policy.c41
-rw-r--r--fs/devpts/inode.c3
-rw-r--r--fs/ext4/ioctl.c2
-rw-r--r--fs/f2fs/file.c9
-rw-r--r--fs/fuse/file.c7
-rw-r--r--fs/ioctl.c6
-rw-r--r--fs/iomap.c5
-rw-r--r--fs/kernfs/file.c28
-rw-r--r--fs/nfs/blocklayout/blocklayout.c2
-rw-r--r--fs/nfs/blocklayout/blocklayout.h3
-rw-r--r--fs/nfs/blocklayout/extent_tree.c10
-rw-r--r--fs/nfs/callback.c1
-rw-r--r--fs/nfs/callback_proc.c8
-rw-r--r--fs/nfs/client.c10
-rw-r--r--fs/nfs/file.c5
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c45
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.h2
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c23
-rw-r--r--fs/nfs/internal.h5
-rw-r--r--fs/nfs/nfs42proc.c34
-rw-r--r--fs/nfs/nfs4client.c5
-rw-r--r--fs/nfs/nfs4proc.c110
-rw-r--r--fs/nfs/nfs4session.c53
-rw-r--r--fs/nfs/nfs4session.h7
-rw-r--r--fs/nfs/pnfs.c44
-rw-r--r--fs/nfs/super.c19
-rw-r--r--fs/notify/fanotify/fanotify.c13
-rw-r--r--fs/notify/fanotify/fanotify_user.c36
-rw-r--r--fs/notify/group.c19
-rw-r--r--fs/notify/notification.c23
-rw-r--r--fs/ocfs2/alloc.c56
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h5
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c12
-rw-r--r--fs/ocfs2/file.c34
-rw-r--r--fs/ocfs2/suballoc.c14
-rw-r--r--fs/overlayfs/copy_up.c2
-rw-r--r--fs/overlayfs/dir.c58
-rw-r--r--fs/overlayfs/inode.c108
-rw-r--r--fs/overlayfs/overlayfs.h17
-rw-r--r--fs/overlayfs/readdir.c63
-rw-r--r--fs/overlayfs/super.c93
-rw-r--r--fs/proc/base.c7
-rw-r--r--fs/proc/kcore.c31
-rw-r--r--fs/proc/task_mmu.c2
-rw-r--r--fs/ramfs/file-mmu.c9
-rw-r--r--fs/sysfs/file.c8
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c2
-rw-r--r--fs/xfs/libxfs/xfs_btree.c14
-rw-r--r--fs/xfs/libxfs/xfs_defer.c17
-rw-r--r--fs/xfs/libxfs/xfs_defer.h2
-rw-r--r--fs/xfs/libxfs/xfs_format.h6
-rw-r--r--fs/xfs/libxfs/xfs_sb.c3
-rw-r--r--fs/xfs/xfs_buf.c2
-rw-r--r--fs/xfs/xfs_super.c9
-rw-r--r--fs/xfs/xfs_trace.h2
-rw-r--r--include/asm-generic/uaccess.h21
-rw-r--r--include/linux/acpi.h2
-rw-r--r--include/linux/bitfield.h93
-rw-r--r--include/linux/bpf.h4
-rw-r--r--include/linux/bpf_verifier.h90
-rw-r--r--include/linux/bug.h3
-rw-r--r--include/linux/cec-funcs.h78
-rw-r--r--include/linux/cec.h5
-rw-r--r--include/linux/compiler-gcc.h2
-rw-r--r--include/linux/compiler.h7
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/efi.h27
-rw-r--r--include/linux/fence.h2
-rw-r--r--include/linux/filter.h64
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/fscrypto.h5
-rw-r--r--include/linux/fsnotify_backend.h6
-rw-r--r--include/linux/hyperv.h84
-rw-r--r--include/linux/if_link.h1
-rw-r--r--include/linux/if_vlan.h33
-rw-r--r--include/linux/iio/sw_trigger.h2
-rw-r--r--include/linux/inet_diag.h4
-rw-r--r--include/linux/iomap.h8
-rw-r--r--include/linux/irq.h10
-rw-r--r--include/linux/ktime.h5
-rw-r--r--include/linux/mempolicy.h4
-rw-r--r--include/linux/mfd/da8xx-cfgchip.h153
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h8
-rw-r--r--include/linux/mlx4/cmd.h3
-rw-r--r--include/linux/mlx4/device.h3
-rw-r--r--include/linux/mlx4/qp.h2
-rw-r--r--include/linux/mlx5/driver.h4
-rw-r--r--include/linux/mlx5/mlx5_ifc.h5
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/mmzone.h16
-rw-r--r--include/linux/netdevice.h21
-rw-r--r--include/linux/netfilter.h2
-rw-r--r--include/linux/nvme.h2
-rw-r--r--include/linux/pagemap.h38
-rw-r--r--include/linux/pci.h9
-rw-r--r--include/linux/phy.h3
-rw-r--r--include/linux/ptp_clock_kernel.h5
-rw-r--r--include/linux/qed/common_hsi.h3
-rw-r--r--include/linux/qed/qed_if.h4
-rw-r--r--include/linux/rhashtable.h491
-rw-r--r--include/linux/serial_8250.h1
-rw-r--r--include/linux/skbuff.h17
-rw-r--r--include/linux/tcp.h21
-rw-r--r--include/linux/thread_info.h7
-rw-r--r--include/linux/uio.h2
-rw-r--r--include/linux/win_minmax.h37
-rw-r--r--include/media/cec.h2
-rw-r--r--include/net/af_rxrpc.h16
-rw-r--r--include/net/af_unix.h2
-rw-r--r--include/net/bluetooth/bluetooth.h4
-rw-r--r--include/net/bluetooth/hci.h7
-rw-r--r--include/net/bluetooth/hci_core.h11
-rw-r--r--include/net/bluetooth/hci_mon.h4
-rw-r--r--include/net/bluetooth/mgmt.h24
-rw-r--r--include/net/cfg80211.h45
-rw-r--r--include/net/dsa.h3
-rw-r--r--include/net/dst_metadata.h52
-rw-r--r--include/net/flow.h3
-rw-r--r--include/net/ieee80211_radiotap.h21
-rw-r--r--include/net/inet_connection_sock.h4
-rw-r--r--include/net/ip6_route.h3
-rw-r--r--include/net/ip6_tunnel.h1
-rw-r--r--include/net/ip_fib.h3
-rw-r--r--include/net/ip_tunnels.h21
-rw-r--r--include/net/l3mdev.h153
-rw-r--r--include/net/mac80211.h33
-rw-r--r--include/net/netfilter/nf_conntrack_synproxy.h14
-rw-r--r--include/net/netfilter/nft_meta.h4
-rw-r--r--include/net/netfilter/nft_reject.h4
-rw-r--r--include/net/netns/xfrm.h12
-rw-r--r--include/net/pkt_cls.h16
-rw-r--r--include/net/route.h10
-rw-r--r--include/net/sch_generic.h72
-rw-r--r--include/net/sctp/sctp.h10
-rw-r--r--include/net/sctp/sm.h94
-rw-r--r--include/net/sctp/structs.h5
-rw-r--r--include/net/sock.h10
-rw-r--r--include/net/tc_act/tc_ife.h2
-rw-r--r--include/net/tc_act/tc_skbmod.h30
-rw-r--r--include/net/tc_act/tc_tunnel_key.h30
-rw-r--r--include/net/tc_act/tc_vlan.h25
-rw-r--r--include/net/tcp.h55
-rw-r--r--include/net/vxlan.h18
-rw-r--r--include/net/xfrm.h6
-rw-r--r--include/rxrpc/packet.h16
-rw-r--r--include/scsi/scsi_transport_sas.h5
-rw-r--r--include/trace/events/rxrpc.h570
-rw-r--r--include/uapi/linux/bpf.h28
-rw-r--r--include/uapi/linux/if_link.h29
-rw-r--r--include/uapi/linux/if_tunnel.h1
-rw-r--r--include/uapi/linux/inet_diag.h14
-rw-r--r--include/uapi/linux/openvswitch.h17
-rw-r--r--include/uapi/linux/pkt_cls.h22
-rw-r--r--include/uapi/linux/pkt_sched.h4
-rw-r--r--include/uapi/linux/tc_act/tc_ife.h3
-rw-r--r--include/uapi/linux/tc_act/tc_skbmod.h39
-rw-r--r--include/uapi/linux/tc_act/tc_tunnel_key.h41
-rw-r--r--include/uapi/linux/tc_act/tc_vlan.h1
-rw-r--r--include/uapi/linux/tcp.h3
-rw-r--r--include/uapi/linux/xfrm.h2
-rw-r--r--kernel/audit_watch.c8
-rw-r--r--kernel/bpf/core.c2
-rw-r--r--kernel/bpf/helpers.c55
-rw-r--r--kernel/bpf/stackmap.c5
-rw-r--r--kernel/bpf/verifier.c497
-rw-r--r--kernel/cgroup.c6
-rw-r--r--kernel/configs/tiny.config8
-rw-r--r--kernel/cpuset.c15
-rw-r--r--kernel/events/core.c47
-rw-r--r--kernel/events/ring_buffer.c15
-rw-r--r--kernel/exit.c7
-rw-r--r--kernel/fork.c37
-rw-r--r--kernel/kexec_file.c3
-rw-r--r--kernel/memremap.c9
-rw-r--r--kernel/power/qos.c11
-rw-r--r--kernel/printk/nmi.c38
-rw-r--r--kernel/sched/core.c22
-rw-r--r--kernel/seccomp.c12
-rw-r--r--kernel/time/tick-sched.c3
-rw-r--r--kernel/trace/bpf_trace.c87
-rw-r--r--lib/Kconfig.debug18
-rw-r--r--lib/Makefile3
-rw-r--r--lib/iov_iter.c24
-rw-r--r--lib/rhashtable.c258
-rw-r--r--lib/test_bpf.c1
-rw-r--r--lib/test_hash.c26
-rw-r--r--lib/usercopy.c9
-rw-r--r--lib/win_minmax.c98
-rw-r--r--mm/debug.c6
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/khugepaged.c25
-rw-r--r--mm/memcontrol.c31
-rw-r--r--mm/memory_hotplug.c4
-rw-r--r--mm/mempolicy.c17
-rw-r--r--mm/page_alloc.c55
-rw-r--r--mm/page_io.c3
-rw-r--r--mm/swapfile.c1
-rw-r--r--mm/usercopy.c64
-rw-r--r--mm/vmscan.c22
-rw-r--r--net/6lowpan/ndisc.c2
-rw-r--r--net/appletalk/ddp.c2
-rw-r--r--net/atm/lec.c12
-rw-r--r--net/batman-adv/bat_v_elp.c2
-rw-r--r--net/batman-adv/routing.c28
-rw-r--r--net/bluetooth/af_bluetooth.c15
-rw-r--r--net/bluetooth/hci_core.c1
-rw-r--r--net/bluetooth/hci_request.c49
-rw-r--r--net/bluetooth/hci_request.h5
-rw-r--r--net/bluetooth/hci_sock.c396
-rw-r--r--net/bluetooth/leds.c27
-rw-r--r--net/bluetooth/leds.h10
-rw-r--r--net/bluetooth/mgmt.c349
-rw-r--r--net/bluetooth/mgmt_util.c66
-rw-r--r--net/bluetooth/smp.c5
-rw-r--r--net/bridge/br_input.c7
-rw-r--r--net/bridge/br_multicast.c2
-rw-r--r--net/bridge/br_stp_if.c43
-rw-r--r--net/bridge/netfilter/ebtables.c2
-rw-r--r--net/bridge/netfilter/nft_meta_bridge.c1
-rw-r--r--net/core/dev.c19
-rw-r--r--net/core/filter.c344
-rw-r--r--net/core/flow_dissector.c6
-rw-r--r--net/core/rtnetlink.c195
-rw-r--r--net/core/skbuff.c85
-rw-r--r--net/core/sock.c5
-rw-r--r--net/dsa/Kconfig3
-rw-r--r--net/dsa/Makefile1
-rw-r--r--net/dsa/dsa.c11
-rw-r--r--net/dsa/dsa2.c12
-rw-r--r--net/dsa/dsa_priv.h2
-rw-r--r--net/dsa/slave.c35
-rw-r--r--net/dsa/tag_qca.c138
-rw-r--r--net/ipv4/Kconfig18
-rw-r--r--net/ipv4/Makefile3
-rw-r--r--net/ipv4/af_inet.c14
-rw-r--r--net/ipv4/devinet.c11
-rw-r--r--net/ipv4/fib_frontend.c3
-rw-r--r--net/ipv4/fib_rules.c3
-rw-r--r--net/ipv4/fib_semantics.c8
-rw-r--r--net/ipv4/fib_trie.c10
-rw-r--r--net/ipv4/gre_offload.c6
-rw-r--r--net/ipv4/inet_diag.c49
-rw-r--r--net/ipv4/ip_gre.c23
-rw-r--r--net/ipv4/ip_input.c5
-rw-r--r--net/ipv4/ip_output.c13
-rw-r--r--net/ipv4/ip_sockglue.c7
-rw-r--r--net/ipv4/ip_tunnel.c76
-rw-r--r--net/ipv4/ip_tunnel_core.c2
-rw-r--r--net/ipv4/ip_vti.c15
-rw-r--r--net/ipv4/ipip.c35
-rw-r--r--net/ipv4/ipmr.c7
-rw-r--r--net/ipv4/netfilter/nft_chain_route_ipv4.c11
-rw-r--r--net/ipv4/netfilter/nft_reject_ipv4.c1
-rw-r--r--net/ipv4/raw.c6
-rw-r--r--net/ipv4/route.c34
-rw-r--r--net/ipv4/tcp.c61
-rw-r--r--net/ipv4/tcp_bbr.c896
-rw-r--r--net/ipv4/tcp_cdg.c12
-rw-r--r--net/ipv4/tcp_cong.c2
-rw-r--r--net/ipv4/tcp_fastopen.c2
-rw-r--r--net/ipv4/tcp_input.c495
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_minisocks.c6
-rw-r--r--net/ipv4/tcp_offload.c13
-rw-r--r--net/ipv4/tcp_output.c106
-rw-r--r--net/ipv4/tcp_rate.c186
-rw-r--r--net/ipv4/tcp_timer.c1
-rw-r--r--net/ipv4/tcp_yeah.c2
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv4/udp_diag.c14
-rw-r--r--net/ipv4/udp_offload.c6
-rw-r--r--net/ipv4/xfrm4_policy.c4
-rw-r--r--net/ipv6/addrconf.c18
-rw-r--r--net/ipv6/fib6_rules.c3
-rw-r--r--net/ipv6/ip6_fib.c6
-rw-r--r--net/ipv6/ip6_gre.c2
-rw-r--r--net/ipv6/ip6_offload.c5
-rw-r--r--net/ipv6/ip6_output.c19
-rw-r--r--net/ipv6/ip6_tunnel.c176
-rw-r--r--net/ipv6/ip6_vti.c19
-rw-r--r--net/ipv6/ip6mr.c7
-rw-r--r--net/ipv6/ndisc.c11
-rw-r--r--net/ipv6/netfilter/nft_chain_route_ipv6.c10
-rw-r--r--net/ipv6/netfilter/nft_reject_ipv6.c1
-rw-r--r--net/ipv6/output_core.c7
-rw-r--r--net/ipv6/ping.c9
-rw-r--r--net/ipv6/raw.c7
-rw-r--r--net/ipv6/route.c48
-rw-r--r--net/ipv6/tcp_ipv6.c8
-rw-r--r--net/ipv6/xfrm6_input.c15
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/irda/af_irda.c5
-rw-r--r--net/kcm/kcmsock.c3
-rw-r--r--net/l2tp/l2tp_core.c3
-rw-r--r--net/l2tp/l2tp_core.h2
-rw-r--r--net/l2tp/l2tp_eth.c6
-rw-r--r--net/l2tp/l2tp_ppp.c4
-rw-r--r--net/l3mdev/l3mdev.c105
-rw-r--r--net/llc/af_llc.c4
-rw-r--r--net/mac80211/agg-rx.c19
-rw-r--r--net/mac80211/agg-tx.c3
-rw-r--r--net/mac80211/cfg.c35
-rw-r--r--net/mac80211/debugfs.c152
-rw-r--r--net/mac80211/debugfs_netdev.c37
-rw-r--r--net/mac80211/debugfs_sta.c56
-rw-r--r--net/mac80211/driver-ops.c2
-rw-r--r--net/mac80211/driver-ops.h18
-rw-r--r--net/mac80211/ieee80211_i.h13
-rw-r--r--net/mac80211/iface.c21
-rw-r--r--net/mac80211/main.c3
-rw-r--r--net/mac80211/mesh_hwmp.c3
-rw-r--r--net/mac80211/mesh_pathtbl.c2
-rw-r--r--net/mac80211/mlme.c12
-rw-r--r--net/mac80211/pm.c3
-rw-r--r--net/mac80211/rx.c76
-rw-r--r--net/mac80211/scan.c2
-rw-r--r--net/mac80211/sta_info.c61
-rw-r--r--net/mac80211/sta_info.h22
-rw-r--r--net/mac80211/status.c15
-rw-r--r--net/mac80211/tdls.c7
-rw-r--r--net/mac80211/tx.c94
-rw-r--r--net/mac80211/util.c3
-rw-r--r--net/mac802154/iface.c1
-rw-r--r--net/mac802154/rx.c9
-rw-r--r--net/netfilter/core.c51
-rw-r--r--net/netfilter/nf_conntrack_core.c6
-rw-r--r--net/netfilter/nf_nat_core.c5
-rw-r--r--net/netfilter/nf_tables_trace.c2
-rw-r--r--net/netfilter/nfnetlink_acct.c6
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c49
-rw-r--r--net/netfilter/nft_meta.c17
-rw-r--r--net/netfilter/nft_reject.c16
-rw-r--r--net/netfilter/nft_reject_inet.c7
-rw-r--r--net/netfilter/xt_sctp.c2
-rw-r--r--net/netlink/diag.c1
-rw-r--r--net/openvswitch/actions.c46
-rw-r--r--net/openvswitch/datapath.c25
-rw-r--r--net/openvswitch/flow.c107
-rw-r--r--net/openvswitch/flow.h12
-rw-r--r--net/openvswitch/flow_netlink.c316
-rw-r--r--net/openvswitch/flow_netlink.h3
-rw-r--r--net/openvswitch/flow_table.c25
-rw-r--r--net/openvswitch/vport.c7
-rw-r--r--net/rxrpc/Kconfig14
-rw-r--r--net/rxrpc/af_rxrpc.c126
-rw-r--r--net/rxrpc/ar-internal.h607
-rw-r--r--net/rxrpc/call_accept.c699
-rw-r--r--net/rxrpc/call_event.c1404
-rw-r--r--net/rxrpc/call_object.c781
-rw-r--r--net/rxrpc/conn_client.c114
-rw-r--r--net/rxrpc/conn_event.c150
-rw-r--r--net/rxrpc/conn_object.c91
-rw-r--r--net/rxrpc/conn_service.c123
-rw-r--r--net/rxrpc/input.c1391
-rw-r--r--net/rxrpc/insecure.c26
-rw-r--r--net/rxrpc/local_event.c19
-rw-r--r--net/rxrpc/local_object.c52
-rw-r--r--net/rxrpc/misc.c177
-rw-r--r--net/rxrpc/output.c396
-rw-r--r--net/rxrpc/peer_event.c90
-rw-r--r--net/rxrpc/peer_object.c184
-rw-r--r--net/rxrpc/proc.c42
-rw-r--r--net/rxrpc/recvmsg.c823
-rw-r--r--net/rxrpc/rxkad.c202
-rw-r--r--net/rxrpc/security.c12
-rw-r--r--net/rxrpc/sendmsg.c222
-rw-r--r--net/rxrpc/skbuff.c194
-rw-r--r--net/rxrpc/sysctl.c12
-rw-r--r--net/rxrpc/utils.c2
-rw-r--r--net/sched/Kconfig27
-rw-r--r--net/sched/Makefile3
-rw-r--r--net/sched/act_api.c36
-rw-r--r--net/sched/act_bpf.c5
-rw-r--r--net/sched/act_csum.c36
-rw-r--r--net/sched/act_gact.c3
-rw-r--r--net/sched/act_ife.c26
-rw-r--r--net/sched/act_meta_skbtcindex.c79
-rw-r--r--net/sched/act_mirred.c11
-rw-r--r--net/sched/act_police.c12
-rw-r--r--net/sched/act_skbmod.c301
-rw-r--r--net/sched/act_tunnel_key.c342
-rw-r--r--net/sched/act_vlan.c29
-rw-r--r--net/sched/cls_api.c18
-rw-r--r--net/sched/cls_bpf.c126
-rw-r--r--net/sched/cls_flow.c27
-rw-r--r--net/sched/cls_flower.c124
-rw-r--r--net/sched/cls_fw.c10
-rw-r--r--net/sched/cls_route.c12
-rw-r--r--net/sched/cls_tcindex.c12
-rw-r--r--net/sched/cls_u32.c30
-rw-r--r--net/sched/sch_api.c41
-rw-r--r--net/sched/sch_codel.c4
-rw-r--r--net/sched/sch_fifo.c4
-rw-r--r--net/sched/sch_fq.c71
-rw-r--r--net/sched/sch_generic.c28
-rw-r--r--net/sched/sch_htb.c24
-rw-r--r--net/sched/sch_netem.c20
-rw-r--r--net/sched/sch_pie.c4
-rw-r--r--net/sctp/associola.c2
-rw-r--r--net/sctp/auth.c2
-rw-r--r--net/sctp/chunk.c26
-rw-r--r--net/sctp/input.c35
-rw-r--r--net/sctp/inqueue.c2
-rw-r--r--net/sctp/output.c73
-rw-r--r--net/sctp/outqueue.c99
-rw-r--r--net/sctp/sctp_diag.c20
-rw-r--r--net/sctp/sm_make_chunk.c28
-rw-r--r--net/sctp/sm_sideeffect.c25
-rw-r--r--net/sctp/sm_statefuns.c6
-rw-r--r--net/sctp/socket.c8
-rw-r--r--net/sctp/transport.c4
-rw-r--r--net/sctp/ulpevent.c4
-rw-r--r--net/sctp/ulpqueue.c3
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c5
-rw-r--r--net/sunrpc/clnt.c4
-rw-r--r--net/sunrpc/xprtrdma/verbs.c45
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h1
-rw-r--r--net/sunrpc/xprtsock.c2
-rw-r--r--net/tipc/name_distr.c8
-rw-r--r--net/tipc/udp_media.c3
-rw-r--r--net/unix/af_unix.c111
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/core.h6
-rw-r--r--net/wireless/ibss.c11
-rw-r--r--net/wireless/mlme.c2
-rw-r--r--net/wireless/nl80211.c87
-rw-r--r--net/wireless/scan.c58
-rw-r--r--net/wireless/sme.c3
-rw-r--r--net/wireless/sysfs.c2
-rw-r--r--net/wireless/util.c13
-rw-r--r--net/wireless/wext-compat.c9
-rw-r--r--net/wireless/wext-core.c25
-rw-r--r--net/wireless/wext-sme.c5
-rw-r--r--net/x25/af_x25.c4
-rw-r--r--net/xfrm/xfrm_algo.c2
-rw-r--r--net/xfrm/xfrm_input.c14
-rw-r--r--net/xfrm/xfrm_policy.c149
-rw-r--r--net/xfrm/xfrm_replay.c6
-rw-r--r--net/xfrm/xfrm_state.c126
-rw-r--r--net/xfrm/xfrm_sysctl.c4
-rw-r--r--net/xfrm/xfrm_user.c22
-rw-r--r--samples/bpf/tcbpf2_kern.c190
-rwxr-xr-xsamples/bpf/test_ipip.sh178
-rwxr-xr-xsamples/bpf/test_tunnel_bpf.sh56
-rw-r--r--samples/bpf/test_verifier.c533
-rwxr-xr-xscripts/checkpatch.pl9
-rwxr-xr-xscripts/faddr2line177
-rwxr-xr-xscripts/package/builddeb4
-rwxr-xr-xscripts/tags.sh1
-rw-r--r--security/Kconfig11
-rw-r--r--sound/core/rawmidi.c4
-rw-r--r--sound/core/timer.c34
-rw-r--r--sound/firewire/fireworks/fireworks.h1
-rw-r--r--sound/firewire/fireworks/fireworks_hwdep.c71
-rw-r--r--sound/firewire/fireworks/fireworks_proc.c4
-rw-r--r--sound/firewire/fireworks/fireworks_transaction.c5
-rw-r--r--sound/firewire/tascam/tascam-hwdep.c33
-rw-r--r--sound/pci/hda/patch_realtek.c15
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--tools/iio/iio_generic_buffer.c4
-rw-r--r--tools/lguest/lguest.c6
1352 files changed, 49627 insertions, 17803 deletions
diff --git a/.mailmap b/.mailmap
index 2a91c14c80bf..de22daefd9da 100644
--- a/.mailmap
+++ b/.mailmap
@@ -88,6 +88,7 @@ Kay Sievers <kay.sievers@vrfy.org>
Kenneth W Chen <kenneth.w.chen@intel.com>
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
Koushik <raghavendra.koushik@neterion.com>
+Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
Leonid I Ananiev <leonid.i.ananiev@intel.com>
@@ -158,6 +159,8 @@ Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
+Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
+Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
Takashi YOSHII <takashi.yoshii.zj@renesas.com>
Yusuke Goda <goda.yusuke@renesas.com>
Gustavo Padovan <gustavo@las.ic.unicamp.br>
diff --git a/Documentation/ABI/stable/sysfs-devices b/Documentation/ABI/stable/sysfs-devices
index 43f78b88da28..df449d79b563 100644
--- a/Documentation/ABI/stable/sysfs-devices
+++ b/Documentation/ABI/stable/sysfs-devices
@@ -1,7 +1,7 @@
# Note: This documents additional properties of any device beyond what
# is documented in Documentation/sysfs-rules.txt
-What: /sys/devices/*/of_path
+What: /sys/devices/*/of_node
Date: February 2015
Contact: Device Tree mailing list <devicetree@vger.kernel.org>
Description:
diff --git a/Documentation/PCI/pci.txt b/Documentation/PCI/pci.txt
index 123881f62219..77f49dc5be23 100644
--- a/Documentation/PCI/pci.txt
+++ b/Documentation/PCI/pci.txt
@@ -124,7 +124,6 @@ initialization with a pointer to a structure describing the driver
The ID table is an array of struct pci_device_id entries ending with an
all-zero entry. Definitions with static const are generally preferred.
-Use of the deprecated macro DEFINE_PCI_DEVICE_TABLE should be avoided.
Each entry consists of:
diff --git a/Documentation/arm/CCN.txt b/Documentation/arm/CCN.txt
index ffca443a19b4..15cdb7bc57c3 100644
--- a/Documentation/arm/CCN.txt
+++ b/Documentation/arm/CCN.txt
@@ -18,13 +18,17 @@ and config2 fields of the perf_event_attr structure. The "events"
directory provides configuration templates for all documented
events, that can be used with perf tool. For example "xp_valid_flit"
is an equivalent of "type=0x8,event=0x4". Other parameters must be
-explicitly specified. For events originating from device, "node"
-defines its index. All crosspoint events require "xp" (index),
-"port" (device port number) and "vc" (virtual channel ID) and
-"dir" (direction). Watchpoints (special "event" value 0xfe) also
-require comparator values ("cmp_l" and "cmp_h") and "mask", being
-index of the comparator mask.
+explicitly specified.
+For events originating from device, "node" defines its index.
+
+Crosspoint PMU events require "xp" (index), "bus" (bus number)
+and "vc" (virtual channel ID).
+
+Crosspoint watchpoint-based events (special "event" value 0xfe)
+require "xp" and "vc" as as above plus "port" (device port index),
+"dir" (transmit/receive direction), comparator values ("cmp_l"
+and "cmp_h") and "mask", being index of the comparator mask.
Masks are defined separately from the event description
(due to limited number of the config values) in the "cmp_mask"
directory, with first 8 configurable by user and additional
diff --git a/Documentation/cpu-freq/cpufreq-stats.txt b/Documentation/cpu-freq/cpufreq-stats.txt
index fc647492e940..8d9773f23550 100644
--- a/Documentation/cpu-freq/cpufreq-stats.txt
+++ b/Documentation/cpu-freq/cpufreq-stats.txt
@@ -103,7 +103,7 @@ Config Main Menu
Power management options (ACPI, APM) --->
CPU Frequency scaling --->
[*] CPU Frequency scaling
- <*> CPU frequency translation statistics
+ [*] CPU frequency translation statistics
[*] CPU frequency translation statistics details
diff --git a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
index bf99e2f24788..205593f56fe7 100644
--- a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
@@ -16,6 +16,11 @@ Required properties:
- vref-supply: The regulator supply ADC reference voltage.
- #io-channel-cells: Should be 1, see ../iio-bindings.txt
+Optional properties:
+- resets: Must contain an entry for each entry in reset-names if need support
+ this option. See ../reset/reset.txt for details.
+- reset-names: Must include the name "saradc-apb".
+
Example:
saradc: saradc@2006c000 {
compatible = "rockchip,saradc";
@@ -23,6 +28,8 @@ Example:
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
clock-names = "saradc", "apb_pclk";
+ resets = <&cru SRST_SARADC>;
+ reset-names = "saradc-apb";
#io-channel-cells = <1>;
vref-supply = <&vcc18>;
};
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-st.txt b/Documentation/devicetree/bindings/mmc/sdhci-st.txt
index 88faa91125bf..3cd4c43a3260 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-st.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-st.txt
@@ -10,7 +10,7 @@ Required properties:
subsystem (mmcss) inside the FlashSS (available in STiH407 SoC
family).
-- clock-names: Should be "mmc".
+- clock-names: Should be "mmc" and "icn". (NB: The latter is not compulsory)
See: Documentation/devicetree/bindings/resource-names.txt
- clocks: Phandle to the clock.
See: Documentation/devicetree/bindings/clock/clock-bindings.txt
diff --git a/Documentation/devicetree/bindings/net/dsa/qca8k.txt b/Documentation/devicetree/bindings/net/dsa/qca8k.txt
new file mode 100644
index 000000000000..9c67ee4890d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt
@@ -0,0 +1,89 @@
+* Qualcomm Atheros QCA8xxx switch family
+
+Required properties:
+
+- compatible: should be "qca,qca8337"
+- #size-cells: must be 0
+- #address-cells: must be 1
+
+Subnodes:
+
+The integrated switch subnode should be specified according to the binding
+described in dsa/dsa.txt. As the QCA8K switches do not have a N:N mapping of
+port and PHY id, each subnode describing a port needs to have a valid phandle
+referencing the internal PHY connected to it. The CPU port of this switch is
+always port 0.
+
+Example:
+
+
+ &mdio0 {
+ phy_port1: phy@0 {
+ reg = <0>;
+ };
+
+ phy_port2: phy@1 {
+ reg = <1>;
+ };
+
+ phy_port3: phy@2 {
+ reg = <2>;
+ };
+
+ phy_port4: phy@3 {
+ reg = <3>;
+ };
+
+ phy_port5: phy@4 {
+ reg = <4>;
+ };
+
+ switch0@0 {
+ compatible = "qca,qca8337";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ label = "cpu";
+ ethernet = <&gmac1>;
+ phy-mode = "rgmii";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan1";
+ phy-handle = <&phy_port1>;
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ phy-handle = <&phy_port2>;
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan3";
+ phy-handle = <&phy_port3>;
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "lan4";
+ phy-handle = <&phy_port4>;
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "wan";
+ phy-handle = <&phy_port5>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
index 5d88f37480b6..e1d76812419c 100644
--- a/Documentation/devicetree/bindings/net/ethernet.txt
+++ b/Documentation/devicetree/bindings/net/ethernet.txt
@@ -11,8 +11,8 @@ The following properties are common to the Ethernet controllers:
the maximum frame size (there's contradiction in ePAPR).
- phy-mode: string, operation mode of the PHY interface; supported values are
"mii", "gmii", "sgmii", "qsgmii", "tbi", "rev-mii", "rmii", "rgmii", "rgmii-id",
- "rgmii-rxid", "rgmii-txid", "rtbi", "smii", "xgmii"; this is now a de-facto
- standard property;
+ "rgmii-rxid", "rgmii-txid", "rtbi", "smii", "xgmii", "trgmii"; this is now a
+ de-facto standard property;
- phy-connection-type: the same as "phy-mode" property but described in ePAPR;
- phy-handle: phandle, specifies a reference to a node representing a PHY
device; this property is described in ePAPR and so preferred;
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
index 32eaaca04d9b..f09525772369 100644
--- a/Documentation/devicetree/bindings/net/mediatek-net.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-net.txt
@@ -24,14 +24,17 @@ Required properties:
Optional properties:
- interrupt-parent: Should be the phandle for the interrupt controller
that services interrupts for this device
-
+- mediatek,hwlro: the capability if the hardware supports LRO functions
* Ethernet MAC node
Required properties:
- compatible: Should be "mediatek,eth-mac"
- reg: The number of the MAC
-- phy-handle: see ethernet.txt file in the same directory.
+- phy-handle: see ethernet.txt file in the same directory and
+ the phy-mode "trgmii" required being provided when reg
+ is equal to 0 and the MAC uses fixed-link to connect
+ with internal switch such as MT7530.
Example:
@@ -51,6 +54,7 @@ eth: ethernet@1b100000 {
reset-names = "eth";
mediatek,ethsys = <&ethsys>;
mediatek,pctl = <&syscfg_pctl_a>;
+ mediatek,hwlro;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/net/smsc911x.txt b/Documentation/devicetree/bindings/net/smsc911x.txt
index 3fed3c124411..16c3a9501f5d 100644
--- a/Documentation/devicetree/bindings/net/smsc911x.txt
+++ b/Documentation/devicetree/bindings/net/smsc911x.txt
@@ -3,9 +3,11 @@
Required properties:
- compatible : Should be "smsc,lan<model>", "smsc,lan9115"
- reg : Address and length of the io space for SMSC LAN
-- interrupts : Should contain SMSC LAN interrupt line
-- interrupt-parent : Should be the phandle for the interrupt controller
- that services interrupts for this device
+- interrupts : one or two interrupt specifiers
+ - The first interrupt is the SMSC LAN interrupt line
+ - The second interrupt (if present) is the PME (power
+ management event) interrupt that is able to wake up the host
+ system with a 50ms pulse on network activity
- phy-mode : See ethernet.txt file in the same directory
Optional properties:
@@ -21,6 +23,10 @@ Optional properties:
external PHY
- smsc,save-mac-address : Indicates that mac address needs to be saved
before resetting the controller
+- reset-gpios : a GPIO line connected to the RESET (active low) signal
+ of the device. On many systems this is wired high so the device goes
+ out of reset at power-on, but if it is under program control, this
+ optional GPIO can wake up in response to it.
Examples:
@@ -29,7 +35,8 @@ lan9220@f4000000 {
reg = <0xf4000000 0x2000000>;
phy-mode = "mii";
interrupt-parent = <&gpio1>;
- interrupts = <31>;
+ interrupts = <31>, <32>;
+ reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
reg-io-width = <4>;
smsc,irq-push-pull;
};
diff --git a/Documentation/devicetree/bindings/serial/8250.txt b/Documentation/devicetree/bindings/serial/8250.txt
index f5561ac7e17e..936ab5b87324 100644
--- a/Documentation/devicetree/bindings/serial/8250.txt
+++ b/Documentation/devicetree/bindings/serial/8250.txt
@@ -42,9 +42,6 @@ Optional properties:
- auto-flow-control: one way to enable automatic flow control support. The
driver is allowed to detect support for the capability even without this
property.
-- {rts,cts,dtr,dsr,rng,dcd}-gpios: specify a GPIO for RTS/CTS/DTR/DSR/RI/DCD
- line respectively. It will use specified GPIO instead of the peripheral
- function pin for the UART feature. If unsure, don't specify this property.
Note:
* fsl,ns16550:
@@ -66,19 +63,3 @@ Example:
interrupts = <10>;
reg-shift = <2>;
};
-
-Example for OMAP UART using GPIO-based modem control signals:
-
- uart4: serial@49042000 {
- compatible = "ti,omap3-uart";
- reg = <0x49042000 0x400>;
- interrupts = <80>;
- ti,hwmods = "uart4";
- clock-frequency = <48000000>;
- cts-gpios = <&gpio3 5 GPIO_ACTIVE_LOW>;
- rts-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
- dtr-gpios = <&gpio1 12 GPIO_ACTIVE_LOW>;
- dsr-gpios = <&gpio1 13 GPIO_ACTIVE_LOW>;
- dcd-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
- rng-gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
- };
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
index d6259c786316..bcbf9710e4af 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.txt
@@ -183,12 +183,10 @@ The copy_up operation essentially creates a new, identical file and
moves it over to the old name. The new file may be on a different
filesystem, so both st_dev and st_ino of the file may change.
-Any open files referring to this inode will access the old data and
-metadata. Similarly any file locks obtained before copy_up will not
-apply to the copied up file.
+Any open files referring to this inode will access the old data.
-On a file opened with O_RDONLY fchmod(2), fchown(2), futimesat(2) and
-fsetxattr(2) will fail with EROFS.
+Any file locks (and leases) obtained before copy_up will not apply
+to the copied up file.
If a file with multiple hard links is copied up, then this will
"break" the link. Changes will not be propagated to other names
diff --git a/Documentation/i2c/slave-interface b/Documentation/i2c/slave-interface
index 80807adb8ded..7e2a228f21bc 100644
--- a/Documentation/i2c/slave-interface
+++ b/Documentation/i2c/slave-interface
@@ -145,6 +145,11 @@ If you want to add slave support to the bus driver:
* Catch the slave interrupts and send appropriate i2c_slave_events to the backend.
+Note that most hardware supports being master _and_ slave on the same bus. So,
+if you extend a bus driver, please make sure that the driver supports that as
+well. In almost all cases, slave support does not need to disable the master
+functionality.
+
Check the i2c-rcar driver as an example.
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
index 04ee90099676..201d4839931c 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
@@ -144,7 +144,7 @@ logical address types are already defined will return with error ``EBUSY``.
- ``flags``
- - Flags. No flags are defined yet, so set this to 0.
+ - Flags. See :ref:`cec-log-addrs-flags` for a list of available flags.
- .. row 7
@@ -201,6 +201,25 @@ logical address types are already defined will return with error ``EBUSY``.
give the CEC framework more information about the device type, even
though the framework won't use it directly in the CEC message.
+.. _cec-log-addrs-flags:
+
+.. flat-table:: Flags for struct cec_log_addrs
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 3 1 4
+
+
+ - .. _`CEC-LOG-ADDRS-FL-ALLOW-UNREG-FALLBACK`:
+
+ - ``CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK``
+
+ - 1
+
+ - By default if no logical address of the requested type can be claimed, then
+ it will go back to the unconfigured state. If this flag is set, then it will
+ fallback to the Unregistered logical address. Note that if the Unregistered
+ logical address was explicitly requested, then this flag has no effect.
+
.. _cec-versions:
.. flat-table:: CEC Versions
diff --git a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
index 7a6d6d00ce19..2e1e73928396 100644
--- a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
@@ -64,7 +64,8 @@ it is guaranteed that the state did change in between the two events.
- ``phys_addr``
- - The current physical address.
+ - The current physical address. This is ``CEC_PHYS_ADDR_INVALID`` if no
+ valid physical address is set.
- .. row 2
@@ -72,7 +73,10 @@ it is guaranteed that the state did change in between the two events.
- ``log_addr_mask``
- - The current set of claimed logical addresses.
+ - The current set of claimed logical addresses. This is 0 if no logical
+ addresses are claimed or if ``phys_addr`` is ``CEC_PHYS_ADDR_INVALID``.
+ If bit 15 is set (``1 << CEC_LOG_ADDR_UNREGISTERED``) then this device
+ has the unregistered logical address. In that case all other bits are 0.
diff --git a/Documentation/networking/ipvlan.txt b/Documentation/networking/ipvlan.txt
index 14422f8fcdc4..24196cef7c91 100644
--- a/Documentation/networking/ipvlan.txt
+++ b/Documentation/networking/ipvlan.txt
@@ -22,7 +22,7 @@ The driver can be built into the kernel (CONFIG_IPVLAN=y) or as a module
There are no module parameters for this driver and it can be configured
using IProute2/ip utility.
- ip link add link <master-dev> <slave-dev> type ipvlan mode { l2 | L3 }
+ ip link add link <master-dev> <slave-dev> type ipvlan mode { l2 | l3 | l3s }
e.g. ip link add link ipvl0 eth0 type ipvlan mode l2
@@ -48,6 +48,11 @@ master device for the L2 processing and routing from that instance will be
used before packets are queued on the outbound device. In this mode the slaves
will not receive nor can send multicast / broadcast traffic.
+4.3 L3S mode:
+ This is very similar to the L3 mode except that iptables (conn-tracking)
+works in this mode and hence it is L3-symmetric (L3s). This will have slightly less
+performance but that shouldn't matter since you are choosing this mode over plain-L3
+mode to make conn-tracking work.
5. What to choose (macvlan vs. ipvlan)?
These two devices are very similar in many regards and the specific use
diff --git a/Documentation/rapidio/mport_cdev.txt b/Documentation/rapidio/mport_cdev.txt
index 6e491a662461..a53f786ee2e9 100644
--- a/Documentation/rapidio/mport_cdev.txt
+++ b/Documentation/rapidio/mport_cdev.txt
@@ -80,6 +80,10 @@ functionality of their platform when planning to use this driver:
III. Module parameters
+- 'dma_timeout' - DMA transfer completion timeout (in msec, default value 3000).
+ This parameter set a maximum completion wait time for SYNC mode DMA
+ transfer requests and for RIO_WAIT_FOR_ASYNC ioctl requests.
+
- 'dbg_level' - This parameter allows to control amount of debug information
generated by this device driver. This parameter is formed by set of
bit masks that correspond to the specific functional blocks.
diff --git a/MAINTAINERS b/MAINTAINERS
index 9a1783547baf..efd69c76b069 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -807,6 +807,7 @@ M: Laura Abbott <labbott@redhat.com>
M: Sumit Semwal <sumit.semwal@linaro.org>
L: devel@driverdev.osuosl.org
S: Supported
+F: Documentation/devicetree/bindings/staging/ion/
F: drivers/staging/android/ion
F: drivers/staging/android/uapi/ion.h
F: drivers/staging/android/uapi/ion_test.h
@@ -1632,7 +1633,8 @@ N: rockchip
ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
M: Kukjin Kim <kgene@kernel.org>
-M: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M: Krzysztof Kozlowski <krzk@kernel.org>
+R: Javier Martinez Canillas <javier@osg.samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
S: Maintained
@@ -1652,7 +1654,6 @@ F: drivers/*/*s3c64xx*
F: drivers/*/*s5pv210*
F: drivers/memory/samsung/*
F: drivers/soc/samsung/*
-F: drivers/spi/spi-s3c*
F: Documentation/arm/Samsung/
F: Documentation/devicetree/bindings/arm/samsung/
F: Documentation/devicetree/bindings/sram/samsung-sram.txt
@@ -1840,6 +1841,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
ARM/UNIPHIER ARCHITECTURE
M: Masahiro Yamada <yamada.masahiro@socionext.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-uniphier.git
S: Maintained
F: arch/arm/boot/dts/uniphier*
F: arch/arm/include/asm/hardware/cache-uniphier.h
@@ -2493,7 +2495,7 @@ F: include/net/bluetooth/
BONDING DRIVER
M: Jay Vosburgh <j.vosburgh@gmail.com>
M: Veaceslav Falico <vfalico@gmail.com>
-M: Andy Gospodarek <gospo@cumulusnetworks.com>
+M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org
W: http://sourceforge.net/projects/bonding/
S: Supported
@@ -2508,7 +2510,7 @@ S: Supported
F: kernel/bpf/
BROADCOM B44 10/100 ETHERNET DRIVER
-M: Gary Zambrano <zambrano@broadcom.com>
+M: Michael Chan <michael.chan@broadcom.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/b44.*
@@ -3256,7 +3258,7 @@ F: kernel/cpuset.c
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
M: Johannes Weiner <hannes@cmpxchg.org>
M: Michal Hocko <mhocko@kernel.org>
-M: Vladimir Davydov <vdavydov@virtuozzo.com>
+M: Vladimir Davydov <vdavydov.dev@gmail.com>
L: cgroups@vger.kernel.org
L: linux-mm@kvack.org
S: Maintained
@@ -3277,7 +3279,7 @@ S: Maintained
F: drivers/net/wan/cosa*
CPMAC ETHERNET DRIVER
-M: Florian Fainelli <florian@openwrt.org>
+M: Florian Fainelli <f.fainelli@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/ti/cpmac.c
@@ -6110,7 +6112,7 @@ S: Supported
F: drivers/cpufreq/intel_pstate.c
INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
-M: Maik Broemme <mbroemme@plusserver.de>
+M: Maik Broemme <mbroemme@libmpq.org>
L: linux-fbdev@vger.kernel.org
S: Maintained
F: Documentation/fb/intelfb.txt
@@ -7473,7 +7475,8 @@ F: Documentation/devicetree/bindings/sound/max9860.txt
F: sound/soc/codecs/max9860.*
MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS
-M: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
L: linux-pm@vger.kernel.org
S: Supported
F: drivers/power/max14577_charger.c
@@ -7489,7 +7492,8 @@ F: include/dt-bindings/*/*max77802.h
MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS
M: Chanwoo Choi <cw00.choi@samsung.com>
-M: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
L: linux-kernel@vger.kernel.org
S: Supported
F: drivers/*/max14577*.c
@@ -8166,6 +8170,15 @@ S: Maintained
W: https://fedorahosted.org/dropwatch/
F: net/core/drop_monitor.c
+NETWORKING [DSA]
+M: Andrew Lunn <andrew@lunn.ch>
+M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+M: Florian Fainelli <f.fainelli@gmail.com>
+S: Maintained
+F: net/dsa/
+F: include/net/dsa.h
+F: drivers/net/dsa/
+
NETWORKING [GENERAL]
M: "David S. Miller" <davem@davemloft.net>
L: netdev@vger.kernel.org
@@ -9255,7 +9268,7 @@ F: drivers/pinctrl/sh-pfc/
PIN CONTROLLER - SAMSUNG
M: Tomasz Figa <tomasz.figa@gmail.com>
-M: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M: Krzysztof Kozlowski <krzk@kernel.org>
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
@@ -10195,7 +10208,7 @@ S: Maintained
F: drivers/platform/x86/samsung-laptop.c
SAMSUNG AUDIO (ASoC) DRIVERS
-M: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M: Krzysztof Kozlowski <krzk@kernel.org>
M: Sangbeom Kim <sbkim73@samsung.com>
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -10210,7 +10223,8 @@ F: drivers/video/fbdev/s3c-fb.c
SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS
M: Sangbeom Kim <sbkim73@samsung.com>
-M: Krzysztof Kozlowski <k.kozlowski@samsung.com>
+M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
L: linux-kernel@vger.kernel.org
L: linux-samsung-soc@vger.kernel.org
S: Supported
@@ -10269,6 +10283,17 @@ S: Supported
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
F: drivers/clk/samsung/
+SAMSUNG SPI DRIVERS
+M: Kukjin Kim <kgene@kernel.org>
+M: Krzysztof Kozlowski <krzk@kernel.org>
+M: Andi Shyti <andi.shyti@samsung.com>
+L: linux-spi@vger.kernel.org
+L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/spi/spi-samsung.txt
+F: drivers/spi/spi-s3c*
+F: include/linux/platform_data/spi-s3c64xx.h
+
SAMSUNG SXGBE DRIVERS
M: Byungho An <bh74.an@samsung.com>
M: Girish K S <ks.giri@samsung.com>
@@ -11248,12 +11273,8 @@ S: Odd Fixes
F: drivers/staging/vt665?/
STAGING - WILC1000 WIFI DRIVER
-M: Johnny Kim <johnny.kim@atmel.com>
-M: Austin Shin <austin.shin@atmel.com>
-M: Chris Park <chris.park@atmel.com>
-M: Tony Cho <tony.cho@atmel.com>
-M: Glen Lee <glen.lee@atmel.com>
-M: Leo Kim <leo.kim@atmel.com>
+M: Aditya Shankar <aditya.shankar@microchip.com>
+M: Ganesh Krishna <ganesh.krishna@microchip.com>
L: linux-wireless@vger.kernel.org
S: Supported
F: drivers/staging/wilc1000/
@@ -12574,7 +12595,7 @@ F: include/linux/if_*vlan.h
F: net/8021q/
VLYNQ BUS
-M: Florian Fainelli <florian@openwrt.org>
+M: Florian Fainelli <f.fainelli@gmail.com>
L: openwrt-devel@lists.openwrt.org (subscribers-only)
S: Maintained
F: drivers/vlynq/vlynq.c
diff --git a/Makefile b/Makefile
index 67f42d57e6e7..74e22c2f408b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 8
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc7
NAME = Psychotic Stoned Sheep
# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index e9c9334507dd..fd6e9712af81 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -336,17 +336,6 @@ config HAVE_ARCH_SECCOMP_FILTER
results in the system call being skipped immediately.
- seccomp syscall wired up
- For best performance, an arch should use seccomp_phase1 and
- seccomp_phase2 directly. It should call seccomp_phase1 for all
- syscalls if TIF_SECCOMP is set, but seccomp_phase1 does not
- need to be called from a ptrace-safe context. It must then
- call seccomp_phase2 if seccomp_phase1 returns anything other
- than SECCOMP_PHASE1_OK or SECCOMP_PHASE1_SKIP.
-
- As an additional optimization, an arch may provide seccomp_data
- directly to seccomp_phase1; this avoids multiple calls
- to the syscall_xyz helpers for every syscall.
-
config SECCOMP_FILTER
def_bool y
depends on HAVE_ARCH_SECCOMP_FILTER && SECCOMP && NET
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
index c419b43c461d..466e42e96bfa 100644
--- a/arch/alpha/include/asm/uaccess.h
+++ b/arch/alpha/include/asm/uaccess.h
@@ -371,14 +371,6 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len)
return __cu_len;
}
-extern inline long
-__copy_tofrom_user(void *to, const void *from, long len, const void __user *validate)
-{
- if (__access_ok((unsigned long)validate, len, get_fs()))
- len = __copy_tofrom_user_nocheck(to, from, len);
- return len;
-}
-
#define __copy_to_user(to, from, n) \
({ \
__chk_user_ptr(to); \
@@ -393,17 +385,22 @@ __copy_tofrom_user(void *to, const void *from, long len, const void __user *vali
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
-
extern inline long
copy_to_user(void __user *to, const void *from, long n)
{
- return __copy_tofrom_user((__force void *)to, from, n, to);
+ if (likely(__access_ok((unsigned long)to, n, get_fs())))
+ n = __copy_tofrom_user_nocheck((__force void *)to, from, n);
+ return n;
}
extern inline long
copy_from_user(void *to, const void __user *from, long n)
{
- return __copy_tofrom_user(to, (__force void *)from, n, from);
+ if (likely(__access_ok((unsigned long)from, n, get_fs())))
+ n = __copy_tofrom_user_nocheck(to, (__force void *)from, n);
+ else
+ memset(to, 0, n);
+ return n;
}
extern void __do_clear_user(void);
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index a78d5670884f..41faf17cd28d 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -83,7 +83,10 @@
"2: ;nop\n" \
" .section .fixup, \"ax\"\n" \
" .align 4\n" \
- "3: mov %0, %3\n" \
+ "3: # return -EFAULT\n" \
+ " mov %0, %3\n" \
+ " # zero out dst ptr\n" \
+ " mov %1, 0\n" \
" j 2b\n" \
" .previous\n" \
" .section __ex_table, \"a\"\n" \
@@ -101,7 +104,11 @@
"2: ;nop\n" \
" .section .fixup, \"ax\"\n" \
" .align 4\n" \
- "3: mov %0, %3\n" \
+ "3: # return -EFAULT\n" \
+ " mov %0, %3\n" \
+ " # zero out dst ptr\n" \
+ " mov %1, 0\n" \
+ " mov %R1, 0\n" \
" j 2b\n" \
" .previous\n" \
" .section __ex_table, \"a\"\n" \
diff --git a/arch/arm/boot/dts/am335x-baltos.dtsi b/arch/arm/boot/dts/am335x-baltos.dtsi
index c8609d8d2c55..b689172632ef 100644
--- a/arch/arm/boot/dts/am335x-baltos.dtsi
+++ b/arch/arm/boot/dts/am335x-baltos.dtsi
@@ -226,7 +226,7 @@
#address-cells = <1>;
#size-cells = <1>;
- elm_id = <&elm>;
+ ti,elm-id = <&elm>;
};
};
diff --git a/arch/arm/boot/dts/am335x-igep0033.dtsi b/arch/arm/boot/dts/am335x-igep0033.dtsi
index df63484ef9b3..e7d9ca1305fa 100644
--- a/arch/arm/boot/dts/am335x-igep0033.dtsi
+++ b/arch/arm/boot/dts/am335x-igep0033.dtsi
@@ -161,7 +161,7 @@
#address-cells = <1>;
#size-cells = <1>;
- elm_id = <&elm>;
+ ti,elm-id = <&elm>;
/* MTD partition table */
partition@0 {
diff --git a/arch/arm/boot/dts/am335x-phycore-som.dtsi b/arch/arm/boot/dts/am335x-phycore-som.dtsi
index 86f773165d5c..1263c9d4cba3 100644
--- a/arch/arm/boot/dts/am335x-phycore-som.dtsi
+++ b/arch/arm/boot/dts/am335x-phycore-som.dtsi
@@ -197,7 +197,7 @@
gpmc,wr-access-ns = <30>;
gpmc,wr-data-mux-bus-ns = <0>;
- elm_id = <&elm>;
+ ti,elm-id = <&elm>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/armada-388-clearfog.dts b/arch/arm/boot/dts/armada-388-clearfog.dts
index 2e0556af6e5e..d3e6bd805006 100644
--- a/arch/arm/boot/dts/armada-388-clearfog.dts
+++ b/arch/arm/boot/dts/armada-388-clearfog.dts
@@ -390,12 +390,12 @@
port@0 {
reg = <0>;
- label = "lan1";
+ label = "lan5";
};
port@1 {
reg = <1>;
- label = "lan2";
+ label = "lan4";
};
port@2 {
@@ -405,12 +405,12 @@
port@3 {
reg = <3>;
- label = "lan4";
+ label = "lan2";
};
port@4 {
reg = <4>;
- label = "lan5";
+ label = "lan1";
};
port@5 {
diff --git a/arch/arm/boot/dts/bcm2835-rpi.dtsi b/arch/arm/boot/dts/bcm2835-rpi.dtsi
index caf2707680c1..e9b47b2bbc33 100644
--- a/arch/arm/boot/dts/bcm2835-rpi.dtsi
+++ b/arch/arm/boot/dts/bcm2835-rpi.dtsi
@@ -2,6 +2,7 @@
/ {
memory {
+ device_type = "memory";
reg = <0 0x10000000>;
};
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index b98252232d20..445624a1a1de 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -2,7 +2,6 @@
#include <dt-bindings/clock/bcm2835.h>
#include <dt-bindings/clock/bcm2835-aux.h>
#include <dt-bindings/gpio/gpio.h>
-#include "skeleton.dtsi"
/* This include file covers the common peripherals and configuration between
* bcm2835 and bcm2836 implementations, leaving the CPU configuration to
@@ -13,6 +12,8 @@
compatible = "brcm,bcm2835";
model = "BCM2835";
interrupt-parent = <&intc>;
+ #address-cells = <1>;
+ #size-cells = <1>;
chosen {
bootargs = "earlyprintk console=ttyAMA0";
diff --git a/arch/arm/boot/dts/exynos5410-odroidxu.dts b/arch/arm/boot/dts/exynos5410-odroidxu.dts
index d9499310a301..f6d135245a4b 100644
--- a/arch/arm/boot/dts/exynos5410-odroidxu.dts
+++ b/arch/arm/boot/dts/exynos5410-odroidxu.dts
@@ -447,14 +447,11 @@
samsung,dw-mshc-ciu-div = <3>;
samsung,dw-mshc-sdr-timing = <0 4>;
samsung,dw-mshc-ddr-timing = <0 2>;
- samsung,dw-mshc-hs400-timing = <0 2>;
- samsung,read-strobe-delay = <90>;
pinctrl-names = "default";
pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus1 &sd0_bus4 &sd0_bus8 &sd0_cd>;
bus-width = <8>;
cap-mmc-highspeed;
mmc-hs200-1_8v;
- mmc-hs400-1_8v;
vmmc-supply = <&ldo20_reg>;
vqmmc-supply = <&ldo11_reg>;
};
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index b620ac884cfd..b13b0b2db881 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -243,7 +243,7 @@
clocks = <&clks IMX6QDL_CLK_SPDIF_GCLK>, <&clks IMX6QDL_CLK_OSC>,
<&clks IMX6QDL_CLK_SPDIF>, <&clks IMX6QDL_CLK_ASRC>,
<&clks IMX6QDL_CLK_DUMMY>, <&clks IMX6QDL_CLK_ESAI_EXTAL>,
- <&clks IMX6QDL_CLK_IPG>, <&clks IMX6QDL_CLK_MLB>,
+ <&clks IMX6QDL_CLK_IPG>, <&clks IMX6QDL_CLK_DUMMY>,
<&clks IMX6QDL_CLK_DUMMY>, <&clks IMX6QDL_CLK_SPBA>;
clock-names = "core", "rxtx0",
"rxtx1", "rxtx2",
diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts
index 96ea936eeeb0..240a2864d044 100644
--- a/arch/arm/boot/dts/imx6sx-sabreauto.dts
+++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts
@@ -64,7 +64,7 @@
cd-gpios = <&gpio7 11 GPIO_ACTIVE_LOW>;
no-1-8-v;
keep-power-in-suspend;
- enable-sdio-wakup;
+ wakeup-source;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts
index 95ee268ed510..2f33c463cbce 100644
--- a/arch/arm/boot/dts/imx7d-sdb.dts
+++ b/arch/arm/boot/dts/imx7d-sdb.dts
@@ -131,7 +131,7 @@
ti,y-min = /bits/ 16 <0>;
ti,y-max = /bits/ 16 <0>;
ti,pressure-max = /bits/ 16 <0>;
- ti,x-plat-ohms = /bits/ 16 <400>;
+ ti,x-plate-ohms = /bits/ 16 <400>;
wakeup-source;
};
};
diff --git a/arch/arm/boot/dts/kirkwood-ib62x0.dts b/arch/arm/boot/dts/kirkwood-ib62x0.dts
index ef84d8699a76..5bf62897014c 100644
--- a/arch/arm/boot/dts/kirkwood-ib62x0.dts
+++ b/arch/arm/boot/dts/kirkwood-ib62x0.dts
@@ -113,7 +113,7 @@
partition@e0000 {
label = "u-boot environment";
- reg = <0xe0000 0x100000>;
+ reg = <0xe0000 0x20000>;
};
partition@100000 {
diff --git a/arch/arm/boot/dts/kirkwood-openrd.dtsi b/arch/arm/boot/dts/kirkwood-openrd.dtsi
index e4ecab112601..7175511a92da 100644
--- a/arch/arm/boot/dts/kirkwood-openrd.dtsi
+++ b/arch/arm/boot/dts/kirkwood-openrd.dtsi
@@ -116,6 +116,10 @@
};
};
+&pciec {
+ status = "okay";
+};
+
&pcie0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
index 365f39ff58bb..0ff1c2de95bf 100644
--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
@@ -35,10 +35,15 @@
ranges = <0 0 0x00000000 0x1000000>; /* CS0: 16MB for NAND */
nand@0,0 {
- linux,mtd-name = "micron,mt29f4g16abbda3w";
+ compatible = "ti,omap2-nand";
reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
+ interrupt-parent = <&gpmc>;
+ interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */
+ <1 IRQ_TYPE_NONE>; /* termcount */
+ linux,mtd-name = "micron,mt29f4g16abbda3w";
nand-bus-width = <16>;
ti,nand-ecc-opt = "bch8";
+ rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */
gpmc,sync-clk-ps = <0>;
gpmc,cs-on-ns = <0>;
gpmc,cs-rd-off-ns = <44>;
@@ -54,10 +59,6 @@
gpmc,wr-access-ns = <40>;
gpmc,wr-data-mux-bus-ns = <0>;
gpmc,device-width = <2>;
-
- gpmc,page-burst-access-ns = <5>;
- gpmc,cycle2cycle-delay-ns = <50>;
-
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
index 5e9a13c0eaf7..1c2c74655416 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
@@ -46,6 +46,7 @@
linux,mtd-name = "micron,mt29f4g16abbda3w";
nand-bus-width = <16>;
ti,nand-ecc-opt = "bch8";
+ rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */
gpmc,sync-clk-ps = <0>;
gpmc,cs-on-ns = <0>;
gpmc,cs-rd-off-ns = <44>;
diff --git a/arch/arm/boot/dts/omap3-overo-base.dtsi b/arch/arm/boot/dts/omap3-overo-base.dtsi
index de256fa8da48..3e946cac55f3 100644
--- a/arch/arm/boot/dts/omap3-overo-base.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-base.dtsi
@@ -223,7 +223,9 @@
};
&gpmc {
- ranges = <0 0 0x00000000 0x20000000>;
+ ranges = <0 0 0x30000000 0x1000000>, /* CS0 */
+ <4 0 0x2b000000 0x1000000>, /* CS4 */
+ <5 0 0x2c000000 0x1000000>; /* CS5 */
nand@0,0 {
compatible = "ti,omap2-nand";
diff --git a/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi b/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi
index 7df27926ead2..4f4c6efbd518 100644
--- a/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi
@@ -55,8 +55,6 @@
#include "omap-gpmc-smsc9221.dtsi"
&gpmc {
- ranges = <5 0 0x2c000000 0x1000000>; /* CS5 */
-
ethernet@gpmc {
reg = <5 0 0xff>;
interrupt-parent = <&gpio6>;
diff --git a/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi b/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi
index 9e24b6a1d07b..1b304e2f1bd2 100644
--- a/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi
@@ -27,8 +27,6 @@
#include "omap-gpmc-smsc9221.dtsi"
&gpmc {
- ranges = <5 0 0x2c000000 0x1000000>; /* CS5 */
-
ethernet@gpmc {
reg = <5 0 0xff>;
interrupt-parent = <&gpio6>;
diff --git a/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi b/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi
index 334109e14613..82e98ee3023a 100644
--- a/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi
@@ -15,9 +15,6 @@
#include "omap-gpmc-smsc9221.dtsi"
&gpmc {
- ranges = <4 0 0x2b000000 0x1000000>, /* CS4 */
- <5 0 0x2c000000 0x1000000>; /* CS5 */
-
smsc1: ethernet@gpmc {
reg = <5 0 0xff>;
interrupt-parent = <&gpio6>;
diff --git a/arch/arm/boot/dts/rk3066a.dtsi b/arch/arm/boot/dts/rk3066a.dtsi
index c0ba86c3a2ab..0d0dae3a1694 100644
--- a/arch/arm/boot/dts/rk3066a.dtsi
+++ b/arch/arm/boot/dts/rk3066a.dtsi
@@ -197,6 +197,8 @@
clock-names = "saradc", "apb_pclk";
interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
#io-channel-cells = <1>;
+ resets = <&cru SRST_SARADC>;
+ reset-names = "saradc-apb";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index cd33f0170890..91c4b3c7a8d5 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -279,6 +279,8 @@
#io-channel-cells = <1>;
clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
clock-names = "saradc", "apb_pclk";
+ resets = <&cru SRST_SARADC>;
+ reset-names = "saradc-apb";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi
index 99bbcc2c9b89..e2cd683b4e4b 100644
--- a/arch/arm/boot/dts/rk3xxx.dtsi
+++ b/arch/arm/boot/dts/rk3xxx.dtsi
@@ -399,6 +399,8 @@
#io-channel-cells = <1>;
clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
clock-names = "saradc", "apb_pclk";
+ resets = <&cru SRST_SARADC>;
+ reset-names = "saradc-apb";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi
index d294e82447a2..8b063ab10c19 100644
--- a/arch/arm/boot/dts/stih407-family.dtsi
+++ b/arch/arm/boot/dts/stih407-family.dtsi
@@ -550,8 +550,9 @@
interrupt-names = "mmcirq";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mmc0>;
- clock-names = "mmc";
- clocks = <&clk_s_c0_flexgen CLK_MMC_0>;
+ clock-names = "mmc", "icn";
+ clocks = <&clk_s_c0_flexgen CLK_MMC_0>,
+ <&clk_s_c0_flexgen CLK_RX_ICN_HVA>;
bus-width = <8>;
non-removable;
};
@@ -565,8 +566,9 @@
interrupt-names = "mmcirq";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sd1>;
- clock-names = "mmc";
- clocks = <&clk_s_c0_flexgen CLK_MMC_1>;
+ clock-names = "mmc", "icn";
+ clocks = <&clk_s_c0_flexgen CLK_MMC_1>,
+ <&clk_s_c0_flexgen CLK_RX_ICN_HVA>;
resets = <&softreset STIH407_MMC1_SOFTRESET>;
bus-width = <4>;
};
diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi
index 18ed1ad10d32..40318869c733 100644
--- a/arch/arm/boot/dts/stih410.dtsi
+++ b/arch/arm/boot/dts/stih410.dtsi
@@ -41,7 +41,8 @@
compatible = "st,st-ohci-300x";
reg = <0x9a03c00 0x100>;
interrupts = <GIC_SPI 180 IRQ_TYPE_NONE>;
- clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+ clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
+ <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
<&softreset STIH407_USB2_PORT0_SOFTRESET>;
reset-names = "power", "softreset";
@@ -57,7 +58,8 @@
interrupts = <GIC_SPI 151 IRQ_TYPE_NONE>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb0>;
- clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+ clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
+ <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
<&softreset STIH407_USB2_PORT0_SOFTRESET>;
reset-names = "power", "softreset";
@@ -71,7 +73,8 @@
compatible = "st,st-ohci-300x";
reg = <0x9a83c00 0x100>;
interrupts = <GIC_SPI 181 IRQ_TYPE_NONE>;
- clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+ clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
+ <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
<&softreset STIH407_USB2_PORT1_SOFTRESET>;
reset-names = "power", "softreset";
@@ -87,7 +90,8 @@
interrupts = <GIC_SPI 153 IRQ_TYPE_NONE>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb1>;
- clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
+ clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
+ <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
<&softreset STIH407_USB2_PORT1_SOFTRESET>;
reset-names = "power", "softreset";
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index e012890e0cf2..a17ba0243db3 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -84,7 +84,7 @@
trips {
cpu_alert0: cpu_alert0 {
/* milliCelsius */
- temperature = <850000>;
+ temperature = <85000>;
hysteresis = <2000>;
type = "passive";
};
diff --git a/arch/arm/boot/dts/tegra114-dalmore.dts b/arch/arm/boot/dts/tegra114-dalmore.dts
index 1dfc492cc004..1444fbd543e7 100644
--- a/arch/arm/boot/dts/tegra114-dalmore.dts
+++ b/arch/arm/boot/dts/tegra114-dalmore.dts
@@ -897,7 +897,7 @@
palmas: tps65913@58 {
compatible = "ti,palmas";
reg = <0x58>;
- interrupts = <0 86 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
#interrupt-cells = <2>;
interrupt-controller;
diff --git a/arch/arm/boot/dts/tegra114-roth.dts b/arch/arm/boot/dts/tegra114-roth.dts
index 70cf40996c3f..966a7fc044af 100644
--- a/arch/arm/boot/dts/tegra114-roth.dts
+++ b/arch/arm/boot/dts/tegra114-roth.dts
@@ -802,7 +802,7 @@
palmas: pmic@58 {
compatible = "ti,palmas";
reg = <0x58>;
- interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
#interrupt-cells = <2>;
interrupt-controller;
diff --git a/arch/arm/boot/dts/tegra114-tn7.dts b/arch/arm/boot/dts/tegra114-tn7.dts
index 17dd14545862..a161fa1dfb61 100644
--- a/arch/arm/boot/dts/tegra114-tn7.dts
+++ b/arch/arm/boot/dts/tegra114-tn7.dts
@@ -63,7 +63,7 @@
palmas: pmic@58 {
compatible = "ti,palmas";
reg = <0x58>;
- interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
#interrupt-cells = <2>;
interrupt-controller;
diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
index 6403e0de540e..e52b82449a79 100644
--- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts
+++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
@@ -1382,7 +1382,7 @@
* Pin 41: BR_UART1_TXD
* Pin 44: BR_UART1_RXD
*/
- serial@0,70006000 {
+ serial@70006000 {
compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
status = "okay";
};
@@ -1394,7 +1394,7 @@
* Pin 71: UART2_CTS_L
* Pin 74: UART2_RTS_L
*/
- serial@0,70006040 {
+ serial@70006040 {
compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
status = "okay";
};
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c
index 0e97b4b871f9..6c7b06854fce 100644
--- a/arch/arm/common/locomo.c
+++ b/arch/arm/common/locomo.c
@@ -140,7 +140,7 @@ static struct locomo_dev_info locomo_devices[] = {
static void locomo_handler(struct irq_desc *desc)
{
- struct locomo *lchip = irq_desc_get_chip_data(desc);
+ struct locomo *lchip = irq_desc_get_handler_data(desc);
int req, i;
/* Acknowledge the parent IRQ */
@@ -200,8 +200,7 @@ static void locomo_setup_irq(struct locomo *lchip)
* Install handler for IRQ_LOCOMO_HW.
*/
irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING);
- irq_set_chip_data(lchip->irq, lchip);
- irq_set_chained_handler(lchip->irq, locomo_handler);
+ irq_set_chained_handler_and_data(lchip->irq, locomo_handler, lchip);
/* Install handlers for IRQ_LOCOMO_* */
for ( ; irq <= lchip->irq_base + 3; irq++) {
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index fb0a0a4dfea4..2e076c492005 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -472,8 +472,8 @@ static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
* specifies that S0ReadyInt and S1ReadyInt should be '1'.
*/
sa1111_writel(0, irqbase + SA1111_INTPOL0);
- sa1111_writel(SA1111_IRQMASK_HI(IRQ_S0_READY_NINT) |
- SA1111_IRQMASK_HI(IRQ_S1_READY_NINT),
+ sa1111_writel(BIT(IRQ_S0_READY_NINT & 31) |
+ BIT(IRQ_S1_READY_NINT & 31),
irqbase + SA1111_INTPOL1);
/* clear all IRQs */
@@ -754,7 +754,7 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
if (sachip->irq != NO_IRQ) {
ret = sa1111_setup_irq(sachip, pd->irq_base);
if (ret)
- goto err_unmap;
+ goto err_clk;
}
#ifdef CONFIG_ARCH_SA1100
@@ -799,6 +799,8 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
return 0;
+ err_clk:
+ clk_disable(sachip->clk);
err_unmap:
iounmap(sachip->base);
err_clk_unprep:
@@ -869,9 +871,9 @@ struct sa1111_save_data {
#ifdef CONFIG_PM
-static int sa1111_suspend(struct platform_device *dev, pm_message_t state)
+static int sa1111_suspend_noirq(struct device *dev)
{
- struct sa1111 *sachip = platform_get_drvdata(dev);
+ struct sa1111 *sachip = dev_get_drvdata(dev);
struct sa1111_save_data *save;
unsigned long flags;
unsigned int val;
@@ -934,9 +936,9 @@ static int sa1111_suspend(struct platform_device *dev, pm_message_t state)
* restored by their respective drivers, and must be called
* via LDM after this function.
*/
-static int sa1111_resume(struct platform_device *dev)
+static int sa1111_resume_noirq(struct device *dev)
{
- struct sa1111 *sachip = platform_get_drvdata(dev);
+ struct sa1111 *sachip = dev_get_drvdata(dev);
struct sa1111_save_data *save;
unsigned long flags, id;
void __iomem *base;
@@ -952,7 +954,7 @@ static int sa1111_resume(struct platform_device *dev)
id = sa1111_readl(sachip->base + SA1111_SKID);
if ((id & SKID_ID_MASK) != SKID_SA1111_ID) {
__sa1111_remove(sachip);
- platform_set_drvdata(dev, NULL);
+ dev_set_drvdata(dev, NULL);
kfree(save);
return 0;
}
@@ -1003,8 +1005,8 @@ static int sa1111_resume(struct platform_device *dev)
}
#else
-#define sa1111_suspend NULL
-#define sa1111_resume NULL
+#define sa1111_suspend_noirq NULL
+#define sa1111_resume_noirq NULL
#endif
static int sa1111_probe(struct platform_device *pdev)
@@ -1017,7 +1019,7 @@ static int sa1111_probe(struct platform_device *pdev)
return -EINVAL;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return -ENXIO;
+ return irq;
return __sa1111_probe(&pdev->dev, mem, irq);
}
@@ -1038,6 +1040,11 @@ static int sa1111_remove(struct platform_device *pdev)
return 0;
}
+static struct dev_pm_ops sa1111_pm_ops = {
+ .suspend_noirq = sa1111_suspend_noirq,
+ .resume_noirq = sa1111_resume_noirq,
+};
+
/*
* Not sure if this should be on the system bus or not yet.
* We really want some way to register a system device at
@@ -1050,10 +1057,9 @@ static int sa1111_remove(struct platform_device *pdev)
static struct platform_driver sa1111_device_driver = {
.probe = sa1111_probe,
.remove = sa1111_remove,
- .suspend = sa1111_suspend,
- .resume = sa1111_resume,
.driver = {
.name = "sa1111",
+ .pm = &sa1111_pm_ops,
},
};
diff --git a/arch/arm/configs/keystone_defconfig b/arch/arm/configs/keystone_defconfig
index 71b42e66488a..78cd2f197e01 100644
--- a/arch/arm/configs/keystone_defconfig
+++ b/arch/arm/configs/keystone_defconfig
@@ -161,6 +161,7 @@ CONFIG_USB_MON=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_USB_DWC3=y
+CONFIG_NOP_USB_XCEIV=y
CONFIG_KEYSTONE_USB_PHY=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 2c8665cd9dc5..ea3566fb92e2 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -781,7 +781,7 @@ CONFIG_MXS_DMA=y
CONFIG_DMA_BCM2835=y
CONFIG_DMA_OMAP=y
CONFIG_QCOM_BAM_DMA=y
-CONFIG_XILINX_VDMA=y
+CONFIG_XILINX_DMA=y
CONFIG_DMA_SUN6I=y
CONFIG_STAGING=y
CONFIG_SENSORS_ISL29018=y
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
index da3c0428507b..aef022a87c53 100644
--- a/arch/arm/crypto/aes-ce-glue.c
+++ b/arch/arm/crypto/aes-ce-glue.c
@@ -284,7 +284,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
err = blkcipher_walk_done(desc, &walk,
walk.nbytes % AES_BLOCK_SIZE);
}
- if (nbytes) {
+ if (walk.nbytes % AES_BLOCK_SIZE) {
u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
u8 __aligned(8) tail[AES_BLOCK_SIZE];
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
index d0131ee6f6af..3f82e9da7cec 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -47,6 +47,7 @@
#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
+#define PMD_SECT_CACHE_MASK (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
#define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2))
/*
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index f8f1cff62065..4cd664abfcd3 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -62,6 +62,7 @@
#define PMD_SECT_WT (_AT(pmdval_t, 2) << 2) /* normal inner write-through */
#define PMD_SECT_WB (_AT(pmdval_t, 3) << 2) /* normal inner write-back */
#define PMD_SECT_WBWA (_AT(pmdval_t, 7) << 2) /* normal inner write-alloc */
+#define PMD_SECT_CACHE_MASK (_AT(pmdval_t, 7) << 2)
/*
* + Level 3 descriptor (PTE)
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 0b1e4a93d67e..15d073ae5da2 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -142,6 +142,19 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
and r7, #0x1f @ Preserve HPMN
mcr p15, 4, r7, c1, c1, 1 @ HDCR
+ @ Make sure NS-SVC is initialised appropriately
+ mrc p15, 0, r7, c1, c0, 0 @ SCTLR
+ orr r7, #(1 << 5) @ CP15 barriers enabled
+ bic r7, #(3 << 7) @ Clear SED/ITD for v8 (RES0 for v7)
+ bic r7, #(3 << 19) @ WXN and UWXN disabled
+ mcr p15, 0, r7, c1, c0, 0 @ SCTLR
+
+ mrc p15, 0, r7, c0, c0, 0 @ MIDR
+ mcr p15, 4, r7, c0, c0, 0 @ VPIDR
+
+ mrc p15, 0, r7, c0, c0, 5 @ MPIDR
+ mcr p15, 4, r7, c0, c0, 5 @ VMPIDR
+
#if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
@ make CNTP_* and CNTPCT accessible from PL1
mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 75f130ef6504..c94b90d43772 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -158,8 +158,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
{
int i;
- kvm_free_stage2_pgd(kvm);
-
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) {
kvm_arch_vcpu_free(kvm->vcpus[i]);
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 29d0b23af2a9..e9a5c0e0c115 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1714,7 +1714,8 @@ int kvm_mmu_init(void)
kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL));
if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
- hyp_idmap_start < kern_hyp_va(~0UL)) {
+ hyp_idmap_start < kern_hyp_va(~0UL) &&
+ hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
/*
* The idmap page is intersecting with the VA space,
* it is not safe to continue further.
@@ -1893,6 +1894,7 @@ void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
+ kvm_free_stage2_pgd(kvm);
}
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index 3750575c73c5..06332f626565 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -255,6 +255,12 @@ static int __init exynos_pmu_irq_init(struct device_node *node,
return -ENOMEM;
}
+ /*
+ * Clear the OF_POPULATED flag set in of_irq_init so that
+ * later the Exynos PMU platform device won't be skipped.
+ */
+ of_node_clear_flag(node, OF_POPULATED);
+
return 0;
}
diff --git a/arch/arm/mach-imx/mach-imx6ul.c b/arch/arm/mach-imx/mach-imx6ul.c
index 5d9bfab279dd..6bb7d9cf1e38 100644
--- a/arch/arm/mach-imx/mach-imx6ul.c
+++ b/arch/arm/mach-imx/mach-imx6ul.c
@@ -64,6 +64,7 @@ static void __init imx6ul_init_machine(void)
if (parent == NULL)
pr_warn("failed to initialize soc device\n");
+ of_platform_default_populate(NULL, NULL, parent);
imx6ul_enet_init();
imx_anatop_init();
imx6ul_pm_init();
diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
index 58924b3844df..fe708e26d021 100644
--- a/arch/arm/mach-imx/pm-imx6.c
+++ b/arch/arm/mach-imx/pm-imx6.c
@@ -295,7 +295,7 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode)
val &= ~BM_CLPCR_SBYOS;
if (cpu_is_imx6sl())
val |= BM_CLPCR_BYPASS_PMIC_READY;
- if (cpu_is_imx6sl() || cpu_is_imx6sx())
+ if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul())
val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
else
val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS;
@@ -310,7 +310,7 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode)
val |= 0x3 << BP_CLPCR_STBY_COUNT;
val |= BM_CLPCR_VSTBY;
val |= BM_CLPCR_SBYOS;
- if (cpu_is_imx6sl())
+ if (cpu_is_imx6sl() || cpu_is_imx6sx())
val |= BM_CLPCR_BYPASS_PMIC_READY;
if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul())
val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
diff --git a/arch/arm/mach-omap2/cm33xx.c b/arch/arm/mach-omap2/cm33xx.c
index c073fb57dd13..6f2d0aec0513 100644
--- a/arch/arm/mach-omap2/cm33xx.c
+++ b/arch/arm/mach-omap2/cm33xx.c
@@ -220,9 +220,6 @@ static int am33xx_cm_wait_module_ready(u8 part, s16 inst, u16 clkctrl_offs,
{
int i = 0;
- if (!clkctrl_offs)
- return 0;
-
omap_test_timeout(_is_module_ready(inst, clkctrl_offs),
MAX_MODULE_READY_TIME, i);
@@ -246,9 +243,6 @@ static int am33xx_cm_wait_module_idle(u8 part, s16 inst, u16 clkctrl_offs,
{
int i = 0;
- if (!clkctrl_offs)
- return 0;
-
omap_test_timeout((_clkctrl_idlest(inst, clkctrl_offs) ==
CLKCTRL_IDLEST_DISABLED),
MAX_MODULE_READY_TIME, i);
diff --git a/arch/arm/mach-omap2/cminst44xx.c b/arch/arm/mach-omap2/cminst44xx.c
index 2c0e07ed6b99..2ab27ade136a 100644
--- a/arch/arm/mach-omap2/cminst44xx.c
+++ b/arch/arm/mach-omap2/cminst44xx.c
@@ -278,9 +278,6 @@ static int omap4_cminst_wait_module_ready(u8 part, s16 inst, u16 clkctrl_offs,
{
int i = 0;
- if (!clkctrl_offs)
- return 0;
-
omap_test_timeout(_is_module_ready(part, inst, clkctrl_offs),
MAX_MODULE_READY_TIME, i);
@@ -304,9 +301,6 @@ static int omap4_cminst_wait_module_idle(u8 part, s16 inst, u16 clkctrl_offs,
{
int i = 0;
- if (!clkctrl_offs)
- return 0;
-
omap_test_timeout((_clkctrl_idlest(part, inst, clkctrl_offs) ==
CLKCTRL_IDLEST_DISABLED),
MAX_MODULE_DISABLE_TIME, i);
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 5b709383381c..1052b29697b8 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1053,6 +1053,10 @@ static int _omap4_wait_target_disable(struct omap_hwmod *oh)
if (oh->flags & HWMOD_NO_IDLEST)
return 0;
+ if (!oh->prcm.omap4.clkctrl_offs &&
+ !(oh->prcm.omap4.flags & HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET))
+ return 0;
+
return omap_cm_wait_module_idle(oh->clkdm->prcm_partition,
oh->clkdm->cm_inst,
oh->prcm.omap4.clkctrl_offs, 0);
@@ -2971,6 +2975,10 @@ static int _omap4_wait_target_ready(struct omap_hwmod *oh)
if (!_find_mpu_rt_port(oh))
return 0;
+ if (!oh->prcm.omap4.clkctrl_offs &&
+ !(oh->prcm.omap4.flags & HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET))
+ return 0;
+
/* XXX check module SIDLEMODE, hardreset status */
return omap_cm_wait_module_ready(oh->clkdm->prcm_partition,
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index 4041bad79a9a..78904017f18c 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -443,8 +443,12 @@ struct omap_hwmod_omap2_prcm {
* HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT: Some IP blocks don't have a PRCM
* module-level context loss register associated with them; this
* flag bit should be set in those cases
+ * HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET: Some IP blocks have a valid CLKCTRL
+ * offset of zero; this flag bit should be set in those cases to
+ * distinguish from hwmods that have no clkctrl offset.
*/
#define HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT (1 << 0)
+#define HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET (1 << 1)
/**
* struct omap_hwmod_omap4_prcm - OMAP4-specific PRCM data
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
index 55c5878577f4..e2d84aa7f595 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
@@ -29,6 +29,7 @@
#define CLKCTRL(oh, clkctrl) ((oh).prcm.omap4.clkctrl_offs = (clkctrl))
#define RSTCTRL(oh, rstctrl) ((oh).prcm.omap4.rstctrl_offs = (rstctrl))
#define RSTST(oh, rstst) ((oh).prcm.omap4.rstst_offs = (rstst))
+#define PRCM_FLAGS(oh, flag) ((oh).prcm.omap4.flags = (flag))
/*
* 'l3' class
@@ -1296,6 +1297,7 @@ static void omap_hwmod_am33xx_clkctrl(void)
CLKCTRL(am33xx_i2c1_hwmod, AM33XX_CM_WKUP_I2C0_CLKCTRL_OFFSET);
CLKCTRL(am33xx_wd_timer1_hwmod, AM33XX_CM_WKUP_WDT1_CLKCTRL_OFFSET);
CLKCTRL(am33xx_rtc_hwmod, AM33XX_CM_RTC_RTC_CLKCTRL_OFFSET);
+ PRCM_FLAGS(am33xx_rtc_hwmod, HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET);
CLKCTRL(am33xx_mmc2_hwmod, AM33XX_CM_PER_MMC2_CLKCTRL_OFFSET);
CLKCTRL(am33xx_gpmc_hwmod, AM33XX_CM_PER_GPMC_CLKCTRL_OFFSET);
CLKCTRL(am33xx_l4_ls_hwmod, AM33XX_CM_PER_L4LS_CLKCTRL_OFFSET);
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index d72ee6185d5e..1cc4a6f3954e 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -722,8 +722,20 @@ static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
* display serial interface controller
*/
+static struct omap_hwmod_class_sysconfig omap3xxx_dsi_sysc = {
+ .rev_offs = 0x0000,
+ .sysc_offs = 0x0010,
+ .syss_offs = 0x0014,
+ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
+ SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
+ SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+ .sysc_fields = &omap_hwmod_sysc_type1,
+};
+
static struct omap_hwmod_class omap3xxx_dsi_hwmod_class = {
.name = "dsi",
+ .sysc = &omap3xxx_dsi_sysc,
};
static struct omap_hwmod_irq_info omap3xxx_dsi1_irqs[] = {
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c
index 7245f3359564..d6159f8ef0c2 100644
--- a/arch/arm/mach-pxa/lubbock.c
+++ b/arch/arm/mach-pxa/lubbock.c
@@ -137,6 +137,18 @@ static struct pxa2xx_udc_mach_info udc_info __initdata = {
// no D+ pullup; lubbock can't connect/disconnect in software
};
+static void lubbock_init_pcmcia(void)
+{
+ struct clk *clk;
+
+ /* Add an alias for the SA1111 PCMCIA clock */
+ clk = clk_get_sys("pxa2xx-pcmcia", NULL);
+ if (!IS_ERR(clk)) {
+ clkdev_create(clk, NULL, "1800");
+ clk_put(clk);
+ }
+}
+
static struct resource sa1111_resources[] = {
[0] = {
.start = 0x10000000,
@@ -467,6 +479,8 @@ static void __init lubbock_init(void)
pxa_set_btuart_info(NULL);
pxa_set_stuart_info(NULL);
+ lubbock_init_pcmcia();
+
clk_add_alias("SA1111_CLK", NULL, "GPIO11_CLK", NULL);
pxa_set_udc_info(&udc_info);
pxa_set_fb_info(NULL, &sharp_lm8v31);
diff --git a/arch/arm/mach-sa1100/clock.c b/arch/arm/mach-sa1100/clock.c
index cbf53bb9c814..0db46895c82a 100644
--- a/arch/arm/mach-sa1100/clock.c
+++ b/arch/arm/mach-sa1100/clock.c
@@ -125,6 +125,8 @@ static unsigned long clk_36864_get_rate(struct clk *clk)
}
static struct clkops clk_36864_ops = {
+ .enable = clk_cpu_enable,
+ .disable = clk_cpu_disable,
.get_rate = clk_36864_get_rate,
};
@@ -140,9 +142,8 @@ static struct clk_lookup sa11xx_clkregs[] = {
CLKDEV_INIT(NULL, "OSTIMER0", &clk_36864),
};
-static int __init sa11xx_clk_init(void)
+int __init sa11xx_clk_init(void)
{
clkdev_add_table(sa11xx_clkregs, ARRAY_SIZE(sa11xx_clkregs));
return 0;
}
-core_initcall(sa11xx_clk_init);
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index 345e63f4eb71..3e09beddb6e8 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -34,6 +34,7 @@
#include <mach/hardware.h>
#include <mach/irqs.h>
+#include <mach/reset.h>
#include "generic.h"
#include <clocksource/pxa.h>
@@ -95,6 +96,8 @@ static void sa1100_power_off(void)
void sa11x0_restart(enum reboot_mode mode, const char *cmd)
{
+ clear_reset_status(RESET_STATUS_ALL);
+
if (mode == REBOOT_SOFT) {
/* Jump into ROM at address 0 */
soft_restart(0);
@@ -388,6 +391,7 @@ void __init sa1100_init_irq(void)
sa11x0_init_irq_nodt(IRQ_GPIO0_SC, irq_resource.start);
sa1100_init_gpio();
+ sa11xx_clk_init();
}
/*
diff --git a/arch/arm/mach-sa1100/generic.h b/arch/arm/mach-sa1100/generic.h
index 0d92e119b36b..68199b603ff7 100644
--- a/arch/arm/mach-sa1100/generic.h
+++ b/arch/arm/mach-sa1100/generic.h
@@ -44,3 +44,5 @@ int sa11x0_pm_init(void);
#else
static inline int sa11x0_pm_init(void) { return 0; }
#endif
+
+int sa11xx_clk_init(void);
diff --git a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
index 62437b57813e..73e3adbc1330 100644
--- a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
@@ -41,39 +41,26 @@
#define REGULATOR_IRQ_MASK BIT(2) /* IRQ2, active low */
-static void __iomem *irqc;
-
-static const u8 da9063_mask_regs[] = {
- DA9063_REG_IRQ_MASK_A,
- DA9063_REG_IRQ_MASK_B,
- DA9063_REG_IRQ_MASK_C,
- DA9063_REG_IRQ_MASK_D,
-};
-
-/* DA9210 System Control and Event Registers */
+/* start of DA9210 System Control and Event Registers */
#define DA9210_REG_MASK_A 0x54
-#define DA9210_REG_MASK_B 0x55
-
-static const u8 da9210_mask_regs[] = {
- DA9210_REG_MASK_A,
- DA9210_REG_MASK_B,
-};
-
-static void da9xxx_mask_irqs(struct i2c_client *client, const u8 regs[],
- unsigned int nregs)
-{
- unsigned int i;
- dev_info(&client->dev, "Masking %s interrupt sources\n", client->name);
+static void __iomem *irqc;
- for (i = 0; i < nregs; i++) {
- int error = i2c_smbus_write_byte_data(client, regs[i], ~0);
- if (error) {
- dev_err(&client->dev, "i2c error %d\n", error);
- return;
- }
- }
-}
+/* first byte sets the memory pointer, following are consecutive reg values */
+static u8 da9063_irq_clr[] = { DA9063_REG_IRQ_MASK_A, 0xff, 0xff, 0xff, 0xff };
+static u8 da9210_irq_clr[] = { DA9210_REG_MASK_A, 0xff, 0xff };
+
+static struct i2c_msg da9xxx_msgs[2] = {
+ {
+ .addr = 0x58,
+ .len = ARRAY_SIZE(da9063_irq_clr),
+ .buf = da9063_irq_clr,
+ }, {
+ .addr = 0x68,
+ .len = ARRAY_SIZE(da9210_irq_clr),
+ .buf = da9210_irq_clr,
+ },
+};
static int regulator_quirk_notify(struct notifier_block *nb,
unsigned long action, void *data)
@@ -93,12 +80,15 @@ static int regulator_quirk_notify(struct notifier_block *nb,
client = to_i2c_client(dev);
dev_dbg(dev, "Detected %s\n", client->name);
- if ((client->addr == 0x58 && !strcmp(client->name, "da9063")))
- da9xxx_mask_irqs(client, da9063_mask_regs,
- ARRAY_SIZE(da9063_mask_regs));
- else if (client->addr == 0x68 && !strcmp(client->name, "da9210"))
- da9xxx_mask_irqs(client, da9210_mask_regs,
- ARRAY_SIZE(da9210_mask_regs));
+ if ((client->addr == 0x58 && !strcmp(client->name, "da9063")) ||
+ (client->addr == 0x68 && !strcmp(client->name, "da9210"))) {
+ int ret;
+
+ dev_info(&client->dev, "clearing da9063/da9210 interrupts\n");
+ ret = i2c_transfer(client->adapter, da9xxx_msgs, ARRAY_SIZE(da9xxx_msgs));
+ if (ret != ARRAY_SIZE(da9xxx_msgs))
+ dev_err(&client->dev, "i2c error %d\n", ret);
+ }
mon = ioread32(irqc + IRQC_MONITOR);
if (mon & REGULATOR_IRQ_MASK)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 6344913f0804..30fe03f95c85 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -137,7 +137,7 @@ void __init init_default_cache_policy(unsigned long pmd)
initial_pmd_value = pmd;
- pmd &= PMD_SECT_TEX(1) | PMD_SECT_BUFFERABLE | PMD_SECT_CACHEABLE;
+ pmd &= PMD_SECT_CACHE_MASK;
for (i = 0; i < ARRAY_SIZE(cache_policies); i++)
if (cache_policies[i].pmd == pmd) {
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index a7123b4e129d..d00d52c9de3e 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -16,6 +16,7 @@
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
+#include <asm/memory.h>
#include "proc-macros.S"
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 3d2cef6488ea..f193414d0f6f 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -170,9 +170,6 @@ static int xen_starting_cpu(unsigned int cpu)
pr_info("Xen: initializing cpu%d\n", cpu);
vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
- /* Direct vCPU id mapping for ARM guests. */
- per_cpu(xen_vcpu_id, cpu) = cpu;
-
info.mfn = virt_to_gfn(vcpup);
info.offset = xen_offset_in_page(vcpup);
@@ -330,6 +327,7 @@ static int __init xen_guest_init(void)
{
struct xen_add_to_physmap xatp;
struct shared_info *shared_info_page = NULL;
+ int cpu;
if (!xen_domain())
return 0;
@@ -380,7 +378,8 @@ static int __init xen_guest_init(void)
return -ENOMEM;
/* Direct vCPU id mapping for ARM guests. */
- per_cpu(xen_vcpu_id, 0) = 0;
+ for_each_possible_cpu(cpu)
+ per_cpu(xen_vcpu_id, cpu) = cpu;
xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames();
if (xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
index 445aa678f914..c2b9bcb0ef61 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
@@ -255,10 +255,10 @@
/* Local timer */
timer {
compatible = "arm,armv8-timer";
- interrupts = <1 13 0xf01>,
- <1 14 0xf01>,
- <1 11 0xf01>,
- <1 10 0xf01>;
+ interrupts = <1 13 0xf08>,
+ <1 14 0xf08>,
+ <1 11 0xf08>,
+ <1 10 0xf08>;
};
timer0: timer0@ffc03000 {
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index e502c24b0ac7..bf6c8d051002 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -102,13 +102,13 @@
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13
- (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>,
+ (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 14
- (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>,
+ (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11
- (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>,
+ (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10
- (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_EDGE_RISING)>;
+ (GIC_CPU_MASK_RAW(0xff) | IRQ_TYPE_LEVEL_LOW)>;
};
xtal: xtal-clk {
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index d5c3435324e8..31ea70a5a3ff 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -110,10 +110,10 @@
timer {
compatible = "arm,armv8-timer";
- interrupts = <1 0 0xff01>, /* Secure Phys IRQ */
- <1 13 0xff01>, /* Non-secure Phys IRQ */
- <1 14 0xff01>, /* Virt IRQ */
- <1 15 0xff01>; /* Hyp IRQ */
+ interrupts = <1 0 0xff08>, /* Secure Phys IRQ */
+ <1 13 0xff08>, /* Non-secure Phys IRQ */
+ <1 14 0xff08>, /* Virt IRQ */
+ <1 15 0xff08>; /* Hyp IRQ */
clock-frequency = <50000000>;
};
diff --git a/arch/arm64/boot/dts/broadcom/bcm2835-rpi.dtsi b/arch/arm64/boot/dts/broadcom/bcm2835-rpi.dtsi
new file mode 120000
index 000000000000..3937b77cb310
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/bcm2835-rpi.dtsi
@@ -0,0 +1 @@
+../../../../arm/boot/dts/bcm2835-rpi.dtsi \ No newline at end of file
diff --git a/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts b/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts
index 6f47dd2bb1db..7841b724e340 100644
--- a/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts
+++ b/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b.dts
@@ -1,7 +1,7 @@
/dts-v1/;
#include "bcm2837.dtsi"
-#include "../../../../arm/boot/dts/bcm2835-rpi.dtsi"
-#include "../../../../arm/boot/dts/bcm283x-rpi-smsc9514.dtsi"
+#include "bcm2835-rpi.dtsi"
+#include "bcm283x-rpi-smsc9514.dtsi"
/ {
compatible = "raspberrypi,3-model-b", "brcm,bcm2837";
diff --git a/arch/arm64/boot/dts/broadcom/bcm2837.dtsi b/arch/arm64/boot/dts/broadcom/bcm2837.dtsi
index f2a31d06845d..8216bbb29fe0 100644
--- a/arch/arm64/boot/dts/broadcom/bcm2837.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcm2837.dtsi
@@ -1,4 +1,4 @@
-#include "../../../../arm/boot/dts/bcm283x.dtsi"
+#include "bcm283x.dtsi"
/ {
compatible = "brcm,bcm2836";
diff --git a/arch/arm64/boot/dts/broadcom/bcm283x-rpi-smsc9514.dtsi b/arch/arm64/boot/dts/broadcom/bcm283x-rpi-smsc9514.dtsi
new file mode 120000
index 000000000000..dca7c057d5a5
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/bcm283x-rpi-smsc9514.dtsi
@@ -0,0 +1 @@
+../../../../arm/boot/dts/bcm283x-rpi-smsc9514.dtsi \ No newline at end of file
diff --git a/arch/arm64/boot/dts/broadcom/bcm283x.dtsi b/arch/arm64/boot/dts/broadcom/bcm283x.dtsi
new file mode 120000
index 000000000000..5f54e4cab99b
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/bcm283x.dtsi
@@ -0,0 +1 @@
+../../../../arm/boot/dts/bcm283x.dtsi \ No newline at end of file
diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi
index f53b0955bfd3..d4a12fad8afd 100644
--- a/arch/arm64/boot/dts/broadcom/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi
@@ -88,13 +88,13 @@
timer {
compatible = "arm,armv8-timer";
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(0xff) |
- IRQ_TYPE_EDGE_RISING)>,
+ IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 14 (GIC_CPU_MASK_RAW(0xff) |
- IRQ_TYPE_EDGE_RISING)>,
+ IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11 (GIC_CPU_MASK_RAW(0xff) |
- IRQ_TYPE_EDGE_RISING)>,
+ IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 10 (GIC_CPU_MASK_RAW(0xff) |
- IRQ_TYPE_EDGE_RISING)>;
+ IRQ_TYPE_LEVEL_LOW)>;
};
pmu {
diff --git a/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi b/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
index 2eb9b225f0bc..04dc8a8d1539 100644
--- a/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
+++ b/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
@@ -354,10 +354,10 @@
timer {
compatible = "arm,armv8-timer";
- interrupts = <1 13 0xff01>,
- <1 14 0xff01>,
- <1 11 0xff01>,
- <1 10 0xff01>;
+ interrupts = <1 13 4>,
+ <1 14 4>,
+ <1 11 4>,
+ <1 10 4>;
};
pmu {
diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi
index ca663dfe5189..162831546e18 100644
--- a/arch/arm64/boot/dts/exynos/exynos7.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi
@@ -473,10 +473,10 @@
timer {
compatible = "arm,armv8-timer";
- interrupts = <1 13 0xff01>,
- <1 14 0xff01>,
- <1 11 0xff01>,
- <1 10 0xff01>;
+ interrupts = <1 13 0xff08>,
+ <1 14 0xff08>,
+ <1 11 0xff08>,
+ <1 10 0xff08>;
};
pmu_system_controller: system-controller@105c0000 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index e669fbd7f9c3..a67e210e2019 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -119,10 +119,10 @@
timer {
compatible = "arm,armv8-timer";
- interrupts = <1 13 0x1>, /* Physical Secure PPI */
- <1 14 0x1>, /* Physical Non-Secure PPI */
- <1 11 0x1>, /* Virtual PPI */
- <1 10 0x1>; /* Hypervisor PPI */
+ interrupts = <1 13 0xf08>, /* Physical Secure PPI */
+ <1 14 0xf08>, /* Physical Non-Secure PPI */
+ <1 11 0xf08>, /* Virtual PPI */
+ <1 10 0xf08>; /* Hypervisor PPI */
};
pmu {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
index 21023a388c29..e3b6034ea5d9 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
@@ -191,10 +191,10 @@
timer {
compatible = "arm,armv8-timer";
- interrupts = <1 13 0x8>, /* Physical Secure PPI, active-low */
- <1 14 0x8>, /* Physical Non-Secure PPI, active-low */
- <1 11 0x8>, /* Virtual PPI, active-low */
- <1 10 0x8>; /* Hypervisor PPI, active-low */
+ interrupts = <1 13 4>, /* Physical Secure PPI, active-low */
+ <1 14 4>, /* Physical Non-Secure PPI, active-low */
+ <1 11 4>, /* Virtual PPI, active-low */
+ <1 10 4>; /* Hypervisor PPI, active-low */
};
pmu {
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
index eab1a42fb934..c2a6745f168c 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
@@ -122,10 +122,10 @@
timer {
compatible = "arm,armv8-timer";
- interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>,
- <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>,
- <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>,
- <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
};
odmi: odmi@300000 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index d02a900378e1..4f44d1191bfd 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -270,6 +270,8 @@
#io-channel-cells = <1>;
clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
clock-names = "saradc", "apb_pclk";
+ resets = <&cru SRST_SARADC>;
+ reset-names = "saradc-apb";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
index c223915f0907..d73bdc8c9115 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
@@ -129,10 +129,10 @@
timer {
compatible = "arm,armv8-timer";
- interrupts = <1 13 0xf01>,
- <1 14 0xf01>,
- <1 11 0xf01>,
- <1 10 0xf01>;
+ interrupts = <1 13 4>,
+ <1 14 4>,
+ <1 11 4>,
+ <1 10 4>;
};
soc {
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index e595f22e7e4b..3e2e51fbd2bc 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -65,10 +65,10 @@
timer {
compatible = "arm,armv8-timer";
interrupt-parent = <&gic>;
- interrupts = <1 13 0xf01>,
- <1 14 0xf01>,
- <1 11 0xf01>,
- <1 10 0xf01>;
+ interrupts = <1 13 0xf08>,
+ <1 14 0xf08>,
+ <1 11 0xf08>,
+ <1 10 0xf08>;
};
amba_apu {
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 5c888049d061..6b2aa0fd6cd0 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -216,7 +216,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
err = blkcipher_walk_done(desc, &walk,
walk.nbytes % AES_BLOCK_SIZE);
}
- if (nbytes) {
+ if (walk.nbytes % AES_BLOCK_SIZE) {
u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
u8 __aligned(8) tail[AES_BLOCK_SIZE];
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 0a456bef8c79..2fee2f59288c 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -199,19 +199,19 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
#define _percpu_read(pcp) \
({ \
typeof(pcp) __retval; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
__retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \
sizeof(pcp)); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
__retval; \
})
#define _percpu_write(pcp, val) \
do { \
- preempt_disable(); \
+ preempt_disable_notrace(); \
__percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \
sizeof(pcp)); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
} while(0) \
#define _pcp_protect(operation, pcp, val) \
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index e875a5a551d7..89206b568cd4 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -363,4 +363,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
+/*
+ * Accesses appearing in program order before a spin_lock() operation
+ * can be reordered with accesses inside the critical section, by virtue
+ * of arch_spin_lock being constructed using acquire semantics.
+ *
+ * In cases where this is problematic (e.g. try_to_wake_up), an
+ * smp_mb__before_spinlock() can restore the required ordering.
+ */
+#define smp_mb__before_spinlock() smp_mb()
+
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 5bb61de23201..9d37e967fa19 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -100,7 +100,16 @@ ENTRY(cpu_do_resume)
msr tcr_el1, x8
msr vbar_el1, x9
+
+ /*
+ * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
+ * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
+ * exception. Mask them until local_dbg_restore() in cpu_suspend()
+ * resets them.
+ */
+ disable_dbg
msr mdscr_el1, x10
+
msr sctlr_el1, x12
/*
* Restore oslsr_el1 by writing oslar_el1
diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h
index 68cf638faf48..b1ec1fa06463 100644
--- a/arch/avr32/include/asm/uaccess.h
+++ b/arch/avr32/include/asm/uaccess.h
@@ -74,7 +74,7 @@ extern __kernel_size_t __copy_user(void *to, const void *from,
extern __kernel_size_t copy_to_user(void __user *to, const void *from,
__kernel_size_t n);
-extern __kernel_size_t copy_from_user(void *to, const void __user *from,
+extern __kernel_size_t ___copy_from_user(void *to, const void __user *from,
__kernel_size_t n);
static inline __kernel_size_t __copy_to_user(void __user *to, const void *from,
@@ -88,6 +88,15 @@ static inline __kernel_size_t __copy_from_user(void *to,
{
return __copy_user(to, (const void __force *)from, n);
}
+static inline __kernel_size_t copy_from_user(void *to,
+ const void __user *from,
+ __kernel_size_t n)
+{
+ size_t res = ___copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
+}
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
diff --git a/arch/avr32/kernel/avr32_ksyms.c b/arch/avr32/kernel/avr32_ksyms.c
index d93ead02daed..7c6cf14f0985 100644
--- a/arch/avr32/kernel/avr32_ksyms.c
+++ b/arch/avr32/kernel/avr32_ksyms.c
@@ -36,7 +36,7 @@ EXPORT_SYMBOL(copy_page);
/*
* Userspace access stuff.
*/
-EXPORT_SYMBOL(copy_from_user);
+EXPORT_SYMBOL(___copy_from_user);
EXPORT_SYMBOL(copy_to_user);
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(strncpy_from_user);
diff --git a/arch/avr32/lib/copy_user.S b/arch/avr32/lib/copy_user.S
index ea59c04b07de..075373471da1 100644
--- a/arch/avr32/lib/copy_user.S
+++ b/arch/avr32/lib/copy_user.S
@@ -23,13 +23,13 @@
*/
.text
.align 1
- .global copy_from_user
- .type copy_from_user, @function
-copy_from_user:
+ .global ___copy_from_user
+ .type ___copy_from_user, @function
+___copy_from_user:
branch_if_kernel r8, __copy_user
ret_if_privileged r8, r11, r10, r10
rjmp __copy_user
- .size copy_from_user, . - copy_from_user
+ .size ___copy_from_user, . - ___copy_from_user
.global copy_to_user
.type copy_to_user, @function
diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h
index 12f5d6851bbc..0a2a70096d8b 100644
--- a/arch/blackfin/include/asm/uaccess.h
+++ b/arch/blackfin/include/asm/uaccess.h
@@ -171,11 +171,12 @@ static inline int bad_user_access_length(void)
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (access_ok(VERIFY_READ, from, n))
+ if (likely(access_ok(VERIFY_READ, from, n))) {
memcpy(to, (const void __force *)from, n);
- else
- return n;
- return 0;
+ return 0;
+ }
+ memset(to, 0, n);
+ return n;
}
static inline unsigned long __must_check
diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h
index e3530d0f13ee..56c7d5750abd 100644
--- a/arch/cris/include/asm/uaccess.h
+++ b/arch/cris/include/asm/uaccess.h
@@ -194,30 +194,6 @@ extern unsigned long __copy_user(void __user *to, const void *from, unsigned lon
extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);
extern unsigned long __do_clear_user(void __user *to, unsigned long n);
-static inline unsigned long
-__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- if (access_ok(VERIFY_WRITE, to, n))
- return __copy_user(to, from, n);
- return n;
-}
-
-static inline unsigned long
-__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- if (access_ok(VERIFY_READ, from, n))
- return __copy_user_zeroing(to, from, n);
- return n;
-}
-
-static inline unsigned long
-__generic_clear_user(void __user *to, unsigned long n)
-{
- if (access_ok(VERIFY_WRITE, to, n))
- return __do_clear_user(to, n);
- return n;
-}
-
static inline long
__strncpy_from_user(char *dst, const char __user *src, long count)
{
@@ -282,7 +258,7 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
else if (n == 24)
__asm_copy_from_user_24(to, from, ret);
else
- ret = __generic_copy_from_user(to, from, n);
+ ret = __copy_user_zeroing(to, from, n);
return ret;
}
@@ -333,7 +309,7 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
else if (n == 24)
__asm_copy_to_user_24(to, from, ret);
else
- ret = __generic_copy_to_user(to, from, n);
+ ret = __copy_user(to, from, n);
return ret;
}
@@ -366,26 +342,43 @@ __constant_clear_user(void __user *to, unsigned long n)
else if (n == 24)
__asm_clear_24(to, ret);
else
- ret = __generic_clear_user(to, n);
+ ret = __do_clear_user(to, n);
return ret;
}
-#define clear_user(to, n) \
- (__builtin_constant_p(n) ? \
- __constant_clear_user(to, n) : \
- __generic_clear_user(to, n))
+static inline size_t clear_user(void __user *to, size_t n)
+{
+ if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
+ return n;
+ if (__builtin_constant_p(n))
+ return __constant_clear_user(to, n);
+ else
+ return __do_clear_user(to, n);
+}
-#define copy_from_user(to, from, n) \
- (__builtin_constant_p(n) ? \
- __constant_copy_from_user(to, from, n) : \
- __generic_copy_from_user(to, from, n))
+static inline size_t copy_from_user(void *to, const void __user *from, size_t n)
+{
+ if (unlikely(!access_ok(VERIFY_READ, from, n))) {
+ memset(to, 0, n);
+ return n;
+ }
+ if (__builtin_constant_p(n))
+ return __constant_copy_from_user(to, from, n);
+ else
+ return __copy_user_zeroing(to, from, n);
+}
-#define copy_to_user(to, from, n) \
- (__builtin_constant_p(n) ? \
- __constant_copy_to_user(to, from, n) : \
- __generic_copy_to_user(to, from, n))
+static inline size_t copy_to_user(void __user *to, const void *from, size_t n)
+{
+ if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
+ return n;
+ if (__builtin_constant_p(n))
+ return __constant_copy_to_user(to, from, n);
+ else
+ return __copy_user(to, from, n);
+}
/* We let the __ versions of copy_from/to_user inline, because they're often
* used in fast paths and have only a small space overhead.
diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h
index 3ac9a59d65d4..87d9e34c5df8 100644
--- a/arch/frv/include/asm/uaccess.h
+++ b/arch/frv/include/asm/uaccess.h
@@ -263,19 +263,25 @@ do { \
extern long __memset_user(void *dst, unsigned long count);
extern long __memcpy_user(void *dst, const void *src, unsigned long count);
-#define clear_user(dst,count) __memset_user(____force(dst), (count))
+#define __clear_user(dst,count) __memset_user(____force(dst), (count))
#define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n))
#define __copy_to_user_inatomic(to, from, n) __memcpy_user(____force(to), (from), (n))
#else
-#define clear_user(dst,count) (memset(____force(dst), 0, (count)), 0)
+#define __clear_user(dst,count) (memset(____force(dst), 0, (count)), 0)
#define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0)
#define __copy_to_user_inatomic(to, from, n) (memcpy(____force(to), (from), (n)), 0)
#endif
-#define __clear_user clear_user
+static inline unsigned long __must_check
+clear_user(void __user *to, unsigned long n)
+{
+ if (likely(__access_ok(to, n)))
+ n = __clear_user(to, n);
+ return n;
+}
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h
index f000a382bc7f..f61cfb28e9f2 100644
--- a/arch/hexagon/include/asm/uaccess.h
+++ b/arch/hexagon/include/asm/uaccess.h
@@ -103,7 +103,8 @@ static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
{
long res = __strnlen_user(src, n);
- /* return from strnlen can't be zero -- that would be rubbish. */
+ if (unlikely(!res))
+ return -EFAULT;
if (res > n) {
copy_from_user(dst, src, n);
diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
index 465c70982f40..bfe13196f770 100644
--- a/arch/ia64/include/asm/uaccess.h
+++ b/arch/ia64/include/asm/uaccess.h
@@ -241,8 +241,7 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
static inline unsigned long
__copy_to_user (void __user *to, const void *from, unsigned long count)
{
- if (!__builtin_constant_p(count))
- check_object_size(from, count, true);
+ check_object_size(from, count, true);
return __copy_user(to, (__force void __user *) from, count);
}
@@ -250,8 +249,7 @@ __copy_to_user (void __user *to, const void *from, unsigned long count)
static inline unsigned long
__copy_from_user (void *to, const void __user *from, unsigned long count)
{
- if (!__builtin_constant_p(count))
- check_object_size(to, count, false);
+ check_object_size(to, count, false);
return __copy_user((__force void __user *) to, from, count);
}
@@ -265,27 +263,22 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
long __cu_len = (n); \
\
if (__access_ok(__cu_to, __cu_len, get_fs())) { \
- if (!__builtin_constant_p(n)) \
- check_object_size(__cu_from, __cu_len, true); \
+ check_object_size(__cu_from, __cu_len, true); \
__cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
} \
__cu_len; \
})
-#define copy_from_user(to, from, n) \
-({ \
- void *__cu_to = (to); \
- const void __user *__cu_from = (from); \
- long __cu_len = (n); \
- \
- __chk_user_ptr(__cu_from); \
- if (__access_ok(__cu_from, __cu_len, get_fs())) { \
- if (!__builtin_constant_p(n)) \
- check_object_size(__cu_to, __cu_len, false); \
- __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
- } \
- __cu_len; \
-})
+static inline unsigned long
+copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ check_object_size(to, n, false);
+ if (likely(__access_ok(from, n, get_fs())))
+ n = __copy_user((__force void __user *) to, from, n);
+ else
+ memset(to, 0, n);
+ return n;
+}
#define __copy_in_user(to, from, size) __copy_user((to), (from), (size))
diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h
index cac7014daef3..6f8982157a75 100644
--- a/arch/m32r/include/asm/uaccess.h
+++ b/arch/m32r/include/asm/uaccess.h
@@ -219,7 +219,7 @@ extern int fixup_exception(struct pt_regs *regs);
#define __get_user_nocheck(x, ptr, size) \
({ \
long __gu_err = 0; \
- unsigned long __gu_val; \
+ unsigned long __gu_val = 0; \
might_fault(); \
__get_user_size(__gu_val, (ptr), (size), __gu_err); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
index 8282cbce7e39..273e61225c27 100644
--- a/arch/metag/include/asm/uaccess.h
+++ b/arch/metag/include/asm/uaccess.h
@@ -204,8 +204,9 @@ extern unsigned long __must_check __copy_user_zeroing(void *to,
static inline unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (access_ok(VERIFY_READ, from, n))
+ if (likely(access_ok(VERIFY_READ, from, n)))
return __copy_user_zeroing(to, from, n);
+ memset(to, 0, n);
return n;
}
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 331b0d35f89c..826676778094 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -227,7 +227,7 @@ extern long __user_bad(void);
#define __get_user(x, ptr) \
({ \
- unsigned long __gu_val; \
+ unsigned long __gu_val = 0; \
/*unsigned long __gu_ptr = (unsigned long)(ptr);*/ \
long __gu_err; \
switch (sizeof(*(ptr))) { \
@@ -373,10 +373,13 @@ extern long __user_bad(void);
static inline long copy_from_user(void *to,
const void __user *from, unsigned long n)
{
+ unsigned long res = n;
might_fault();
- if (access_ok(VERIFY_READ, from, n))
- return __copy_from_user(to, from, n);
- return n;
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ res = __copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
}
#define __copy_to_user(to, from, n) \
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 11b965f98d95..21a2aaba20d5 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/thread_info.h>
+#include <linux/string.h>
#include <asm/asm-eva.h>
/*
@@ -1170,6 +1171,8 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
__cu_len = __invoke_copy_from_user(__cu_to, \
__cu_from, \
__cu_len); \
+ } else { \
+ memset(__cu_to, 0, __cu_len); \
} \
} \
__cu_len; \
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 20f7bf6de384..d012e877a95a 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -166,6 +166,7 @@ struct __large_struct { unsigned long buf[100]; };
"2:\n" \
" .section .fixup,\"ax\"\n" \
"3:\n\t" \
+ " mov 0,%1\n" \
" mov %3,%0\n" \
" jmp 2b\n" \
" .previous\n" \
diff --git a/arch/mn10300/lib/usercopy.c b/arch/mn10300/lib/usercopy.c
index 7826e6c364e7..ce8899e5e171 100644
--- a/arch/mn10300/lib/usercopy.c
+++ b/arch/mn10300/lib/usercopy.c
@@ -9,7 +9,7 @@
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
unsigned long
__generic_copy_to_user(void *to, const void *from, unsigned long n)
@@ -24,6 +24,8 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
__copy_user_zeroing(to, from, n);
+ else
+ memset(to, 0, n);
return n;
}
diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h
index caa51ff85a3c..0ab82324c817 100644
--- a/arch/nios2/include/asm/uaccess.h
+++ b/arch/nios2/include/asm/uaccess.h
@@ -102,9 +102,12 @@ extern long __copy_to_user(void __user *to, const void *from, unsigned long n);
static inline long copy_from_user(void *to, const void __user *from,
unsigned long n)
{
- if (!access_ok(VERIFY_READ, from, n))
- return n;
- return __copy_from_user(to, from, n);
+ unsigned long res = n;
+ if (access_ok(VERIFY_READ, from, n))
+ res = __copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
}
static inline long copy_to_user(void __user *to, const void *from,
@@ -139,7 +142,7 @@ extern long strnlen_user(const char __user *s, long n);
#define __get_user_unknown(val, size, ptr, err) do { \
err = 0; \
- if (copy_from_user(&(val), ptr, size)) { \
+ if (__copy_from_user(&(val), ptr, size)) { \
err = -EFAULT; \
} \
} while (0)
@@ -166,7 +169,7 @@ do { \
({ \
long __gu_err = -EFAULT; \
const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
- unsigned long __gu_val; \
+ unsigned long __gu_val = 0; \
__get_user_common(__gu_val, sizeof(*(ptr)), __gu_ptr, __gu_err);\
(x) = (__force __typeof__(x))__gu_val; \
__gu_err; \
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index a6bd07ca3d6c..5cc6b4f1b795 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -273,28 +273,20 @@ __copy_tofrom_user(void *to, const void *from, unsigned long size);
static inline unsigned long
copy_from_user(void *to, const void *from, unsigned long n)
{
- unsigned long over;
-
- if (access_ok(VERIFY_READ, from, n))
- return __copy_tofrom_user(to, from, n);
- if ((unsigned long)from < TASK_SIZE) {
- over = (unsigned long)from + n - TASK_SIZE;
- return __copy_tofrom_user(to, from, n - over) + over;
- }
- return n;
+ unsigned long res = n;
+
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ res = __copy_tofrom_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
}
static inline unsigned long
copy_to_user(void *to, const void *from, unsigned long n)
{
- unsigned long over;
-
- if (access_ok(VERIFY_WRITE, to, n))
- return __copy_tofrom_user(to, from, n);
- if ((unsigned long)to < TASK_SIZE) {
- over = (unsigned long)to + n - TASK_SIZE;
- return __copy_tofrom_user(to, from, n - over) + over;
- }
+ if (likely(access_ok(VERIFY_WRITE, to, n)))
+ n = __copy_tofrom_user(to, from, n);
return n;
}
@@ -303,13 +295,8 @@ extern unsigned long __clear_user(void *addr, unsigned long size);
static inline __must_check unsigned long
clear_user(void *addr, unsigned long size)
{
-
- if (access_ok(VERIFY_WRITE, addr, size))
- return __clear_user(addr, size);
- if ((unsigned long)addr < TASK_SIZE) {
- unsigned long over = (unsigned long)addr + size - TASK_SIZE;
- return __clear_user(addr, size - over) + over;
- }
+ if (likely(access_ok(VERIFY_WRITE, addr, size)))
+ size = __clear_user(addr, size);
return size;
}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index cd8778103165..af12c2db9bb8 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -1,6 +1,5 @@
config PARISC
def_bool y
- select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_MIGHT_HAVE_PC_PARPORT
select HAVE_IDE
select HAVE_OPROFILE
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
index 1a8f6f95689e..f6a4c016304b 100644
--- a/arch/parisc/configs/c8000_defconfig
+++ b/arch/parisc/configs/c8000_defconfig
@@ -245,7 +245,6 @@ CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_PROVE_RCU_DELAY=y
CONFIG_DEBUG_BLOCK_EXT_DEVT=y
CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
CONFIG_KEYS=y
# CONFIG_CRYPTO_HW is not set
CONFIG_FONTS=y
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
index 7e0792658952..c564e6e1fa23 100644
--- a/arch/parisc/configs/generic-64bit_defconfig
+++ b/arch/parisc/configs/generic-64bit_defconfig
@@ -291,7 +291,6 @@ CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_TIMER_STATS=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 0f59fd9ca205..482847865dac 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -10,6 +10,7 @@
#include <asm-generic/uaccess-unaligned.h>
#include <linux/bug.h>
+#include <linux/string.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
@@ -208,26 +209,30 @@ unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned lo
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
-extern void copy_from_user_overflow(void)
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
- __compiletime_error("copy_from_user() buffer size is not provably correct")
-#else
- __compiletime_warning("copy_from_user() buffer size is not provably correct")
-#endif
-;
+extern void __compiletime_error("usercopy buffer size is too small")
+__bad_copy_user(void);
+
+static inline void copy_user_overflow(int size, unsigned long count)
+{
+ WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+}
static inline unsigned long __must_check copy_from_user(void *to,
const void __user *from,
unsigned long n)
{
int sz = __compiletime_object_size(to);
- int ret = -EFAULT;
+ unsigned long ret = n;
- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
+ if (likely(sz == -1 || sz >= n))
ret = __copy_from_user(to, from, n);
- else
- copy_from_user_overflow();
+ else if (!__builtin_constant_p(n))
+ copy_user_overflow(sz, n);
+ else
+ __bad_copy_user();
+ if (unlikely(ret))
+ memset(to + (n - ret), 0, ret);
return ret;
}
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
index 2ef55f8968a2..b312b152461b 100644
--- a/arch/powerpc/include/asm/cpu_has_feature.h
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -15,7 +15,7 @@ static inline bool early_cpu_has_feature(unsigned long feature)
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
#include <linux/jump_label.h>
-#define NUM_CPU_FTR_KEYS 64
+#define NUM_CPU_FTR_KEYS BITS_PER_LONG
extern struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS];
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index c1dc6c14deb8..c266227fdd5b 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -308,40 +308,21 @@ extern unsigned long __copy_tofrom_user(void __user *to,
static inline unsigned long copy_from_user(void *to,
const void __user *from, unsigned long n)
{
- unsigned long over;
-
- if (access_ok(VERIFY_READ, from, n)) {
- if (!__builtin_constant_p(n))
- check_object_size(to, n, false);
+ if (likely(access_ok(VERIFY_READ, from, n))) {
+ check_object_size(to, n, false);
return __copy_tofrom_user((__force void __user *)to, from, n);
}
- if ((unsigned long)from < TASK_SIZE) {
- over = (unsigned long)from + n - TASK_SIZE;
- if (!__builtin_constant_p(n - over))
- check_object_size(to, n - over, false);
- return __copy_tofrom_user((__force void __user *)to, from,
- n - over) + over;
- }
+ memset(to, 0, n);
return n;
}
static inline unsigned long copy_to_user(void __user *to,
const void *from, unsigned long n)
{
- unsigned long over;
-
if (access_ok(VERIFY_WRITE, to, n)) {
- if (!__builtin_constant_p(n))
- check_object_size(from, n, true);
+ check_object_size(from, n, true);
return __copy_tofrom_user(to, (__force void __user *)from, n);
}
- if ((unsigned long)to < TASK_SIZE) {
- over = (unsigned long)to + n - TASK_SIZE;
- if (!__builtin_constant_p(n))
- check_object_size(from, n - over, true);
- return __copy_tofrom_user(to, (__force void __user *)from,
- n - over) + over;
- }
return n;
}
@@ -383,8 +364,7 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
return 0;
}
- if (!__builtin_constant_p(n))
- check_object_size(to, n, false);
+ check_object_size(to, n, false);
return __copy_tofrom_user((__force void __user *)to, from, n);
}
@@ -412,8 +392,8 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
if (ret == 0)
return 0;
}
- if (!__builtin_constant_p(n))
- check_object_size(from, n, true);
+
+ check_object_size(from, n, true);
return __copy_tofrom_user(to, (__force const void __user *)from, n);
}
@@ -439,10 +419,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
might_fault();
if (likely(access_ok(VERIFY_WRITE, addr, size)))
return __clear_user(addr, size);
- if ((unsigned long)addr < TASK_SIZE) {
- unsigned long over = (unsigned long)addr + size - TASK_SIZE;
- return __clear_user(addr, size - over) + over;
- }
return size;
}
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 2265c6398a17..bd739fed26e3 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -411,7 +411,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
*
* r13 - PACA
* cr3 - gt if waking up with partial/complete hypervisor state loss
- * cr4 - eq if waking up from complete hypervisor state loss.
+ * cr4 - gt or eq if waking up from complete hypervisor state loss.
*/
_GLOBAL(pnv_wakeup_tb_loss)
ld r1,PACAR1(r13)
@@ -453,7 +453,7 @@ lwarx_loop2:
* At this stage
* cr2 - eq if first thread to wakeup in core
* cr3- gt if waking up with partial/complete hypervisor state loss
- * cr4 - eq if waking up from complete hypervisor state loss.
+ * cr4 - gt or eq if waking up from complete hypervisor state loss.
*/
ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
@@ -481,7 +481,7 @@ first_thread_in_subcore:
* If waking up from sleep, subcore state is not lost. Hence
* skip subcore state restore
*/
- bne cr4,subcore_state_restored
+ blt cr4,subcore_state_restored
/* Restore per-subcore state */
ld r4,_SDR1(r1)
@@ -526,7 +526,7 @@ timebase_resync:
* If waking up from sleep, per core state is not lost, skip to
* clear_lock.
*/
- bne cr4,clear_lock
+ blt cr4,clear_lock
/*
* First thread in the core to wake up and its waking up with
@@ -557,7 +557,7 @@ common_exit:
* If waking up from sleep, hypervisor state is not lost. Hence
* skip hypervisor state restore.
*/
- bne cr4,hypervisor_state_restored
+ blt cr4,hypervisor_state_restored
/* Waking up from winkle */
diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S
index 0a57fe6d49cc..aa8214f30c92 100644
--- a/arch/powerpc/lib/checksum_32.S
+++ b/arch/powerpc/lib/checksum_32.S
@@ -127,18 +127,19 @@ _GLOBAL(csum_partial_copy_generic)
stw r7,12(r1)
stw r8,8(r1)
- rlwinm r0,r4,3,0x8
- rlwnm r6,r6,r0,0,31 /* odd destination address: rotate one byte */
- cmplwi cr7,r0,0 /* is destination address even ? */
addic r12,r6,0
addi r6,r4,-4
neg r0,r4
addi r4,r3,-4
andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
+ crset 4*cr7+eq
beq 58f
cmplw 0,r5,r0 /* is this more than total to do? */
blt 63f /* if not much to do */
+ rlwinm r7,r6,3,0x8
+ rlwnm r12,r12,r7,0,31 /* odd destination address: rotate one byte */
+ cmplwi cr7,r7,0 /* is destination address even ? */
andi. r8,r0,3 /* get it word-aligned first */
mtctr r8
beq+ 61f
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index dfdb90cb4403..9f1983404e1a 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -113,7 +113,12 @@ BEGIN_FTR_SECTION
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
b slb_finish_load_1T
-0:
+0: /*
+ * For userspace addresses, make sure this is region 0.
+ */
+ cmpdi r9, 0
+ bne 8f
+
/* when using slices, we extract the psize off the slice bitmaps
* and then we need to get the sllp encoding off the mmu_psize_defs
* array.
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 13218263e66e..bc0c91e84ca0 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -162,11 +162,12 @@ static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)
static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
{
struct pnv_phb *phb = pe->phb;
+ unsigned int pe_num = pe->pe_number;
WARN_ON(pe->pdev);
memset(pe, 0, sizeof(struct pnv_ioda_pe));
- clear_bit(pe->pe_number, phb->ioda.pe_alloc);
+ clear_bit(pe_num, phb->ioda.pe_alloc);
}
/* The default M64 BAR is shared by all PEs */
@@ -2216,7 +2217,7 @@ static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
pnv_pci_link_table_and_group(phb->hose->node, num,
tbl, &pe->table_group);
- pnv_pci_phb3_tce_invalidate_pe(pe);
+ pnv_pci_ioda2_tce_invalidate_pe(pe);
return 0;
}
@@ -2354,7 +2355,7 @@ static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
if (ret)
pe_warn(pe, "Unmapping failed, ret = %ld\n", ret);
else
- pnv_pci_phb3_tce_invalidate_pe(pe);
+ pnv_pci_ioda2_tce_invalidate_pe(pe);
pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
@@ -3402,12 +3403,6 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
struct pnv_phb *phb = pe->phb;
struct pnv_ioda_pe *slave, *tmp;
- /* Release slave PEs in compound PE */
- if (pe->flags & PNV_IODA_PE_MASTER) {
- list_for_each_entry_safe(slave, tmp, &pe->slaves, list)
- pnv_ioda_release_pe(slave);
- }
-
list_del(&pe->list);
switch (phb->type) {
case PNV_PHB_IODA1:
@@ -3422,7 +3417,26 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
pnv_ioda_release_pe_seg(pe);
pnv_ioda_deconfigure_pe(pe->phb, pe);
- pnv_ioda_free_pe(pe);
+
+ /* Release slave PEs in the compound PE */
+ if (pe->flags & PNV_IODA_PE_MASTER) {
+ list_for_each_entry_safe(slave, tmp, &pe->slaves, list) {
+ list_del(&slave->list);
+ pnv_ioda_free_pe(slave);
+ }
+ }
+
+ /*
+ * The PE for root bus can be removed because of hotplug in EEH
+ * recovery for fenced PHB error. We need to mark the PE dead so
+ * that it can be populated again in PCI hot add path. The PE
+ * shouldn't be destroyed as it's the global reserved resource.
+ */
+ if (phb->ioda.root_pe_populated &&
+ phb->ioda.root_pe_idx == pe->pe_number)
+ phb->ioda.root_pe_populated = false;
+ else
+ pnv_ioda_free_pe(pe);
}
static void pnv_pci_release_device(struct pci_dev *pdev)
@@ -3438,7 +3452,17 @@ static void pnv_pci_release_device(struct pci_dev *pdev)
if (!pdn || pdn->pe_number == IODA_INVALID_PE)
return;
+ /*
+ * PCI hotplug can happen as part of EEH error recovery. The @pdn
+ * isn't removed and added afterwards in this scenario. We should
+ * set the PE number in @pdn to an invalid one. Otherwise, the PE's
+ * device count is decreased on removing devices while failing to
+ * be increased on adding devices. It leads to unbalanced PE's device
+ * count and eventually make normal PCI hotplug path broken.
+ */
pe = &phb->ioda.pe_array[pdn->pe_number];
+ pdn->pe_number = IODA_INVALID_PE;
+
WARN_ON(--pe->device_count < 0);
if (pe->device_count == 0)
pnv_ioda_release_pe(pe);
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 4ffcaa6f8670..a39d20e8623d 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -41,7 +41,6 @@
#include <linux/root_dev.h>
#include <linux/of.h>
#include <linux/of_pci.h>
-#include <linux/kexec.h>
#include <asm/mmu.h>
#include <asm/processor.h>
@@ -66,6 +65,7 @@
#include <asm/eeh.h>
#include <asm/reg.h>
#include <asm/plpar_wrappers.h>
+#include <asm/kexec.h>
#include "pseries.h"
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index 57d72f10a97f..9114243fa1b5 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -23,10 +23,10 @@
static void icp_opal_teardown_cpu(void)
{
- int cpu = smp_processor_id();
+ int hw_cpu = hard_smp_processor_id();
/* Clear any pending IPI */
- opal_int_set_mfrr(cpu, 0xff);
+ opal_int_set_mfrr(hw_cpu, 0xff);
}
static void icp_opal_flush_ipi(void)
@@ -101,14 +101,16 @@ static void icp_opal_eoi(struct irq_data *d)
static void icp_opal_cause_ipi(int cpu, unsigned long data)
{
- opal_int_set_mfrr(cpu, IPI_PRIORITY);
+ int hw_cpu = get_hard_smp_processor_id(cpu);
+
+ opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
}
static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
{
- int cpu = smp_processor_id();
+ int hw_cpu = hard_smp_processor_id();
- opal_int_set_mfrr(cpu, 0xff);
+ opal_int_set_mfrr(hw_cpu, 0xff);
return smp_ipi_demux();
}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index e751fe25d6ab..c109f073d454 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -68,7 +68,6 @@ config DEBUG_RODATA
config S390
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
- select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_GCOV_PROFILE_ALL
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 26e0c7f08814..412b1bd21029 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -602,7 +602,6 @@ CONFIG_FAIL_FUTEX=y
CONFIG_FAULT_INJECTION_DEBUG_FS=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
CONFIG_IRQSOFF_TRACER=y
CONFIG_PREEMPT_TRACER=y
CONFIG_SCHED_TRACER=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 24879dab47bc..bec279eb4b93 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -552,7 +552,6 @@ CONFIG_NOTIFIER_ERROR_INJECTION=m
CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
CONFIG_PM_NOTIFIER_ERROR_INJECT=m
CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
CONFIG_BLK_DEV_IO_TRACE=y
# CONFIG_KPROBE_EVENT is not set
CONFIG_TRACE_ENUM_MAP_FILE=y
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index a5c1e5f2a0ca..1751446a5bbb 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -549,7 +549,6 @@ CONFIG_TIMER_STATS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_STACK_TRACER=y
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 73610f2e3b4f..2d40ef0a6295 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -172,7 +172,6 @@ CONFIG_DEBUG_NOTIFIERS=y
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_RCU_TRACE=y
CONFIG_LATENCYTOP=y
-CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 9b49cf1daa8f..52d7c8709279 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -266,28 +266,28 @@ int __put_user_bad(void) __attribute__((noreturn));
__chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: { \
- unsigned char __x; \
+ unsigned char __x = 0; \
__gu_err = __get_user_fn(&__x, ptr, \
sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \
}; \
case 2: { \
- unsigned short __x; \
+ unsigned short __x = 0; \
__gu_err = __get_user_fn(&__x, ptr, \
sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \
}; \
case 4: { \
- unsigned int __x; \
+ unsigned int __x = 0; \
__gu_err = __get_user_fn(&__x, ptr, \
sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \
}; \
case 8: { \
- unsigned long long __x; \
+ unsigned long long __x = 0; \
__gu_err = __get_user_fn(&__x, ptr, \
sizeof(*(ptr))); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
@@ -311,6 +311,14 @@ int __get_user_bad(void) __attribute__((noreturn));
#define __put_user_unaligned __put_user
#define __get_user_unaligned __get_user
+extern void __compiletime_error("usercopy buffer size is too small")
+__bad_copy_user(void);
+
+static inline void copy_user_overflow(int size, unsigned long count)
+{
+ WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+}
+
/**
* copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space.
@@ -332,12 +340,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
return __copy_to_user(to, from, n);
}
-void copy_from_user_overflow(void)
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-__compiletime_warning("copy_from_user() buffer size is not provably correct")
-#endif
-;
-
/**
* copy_from_user: - Copy a block of data from user space.
* @to: Destination address, in kernel space.
@@ -362,7 +364,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
might_fault();
if (unlikely(sz != -1 && sz < n)) {
- copy_from_user_overflow();
+ if (!__builtin_constant_p(n))
+ copy_user_overflow(sz, n);
+ else
+ __bad_copy_user();
return n;
}
return __copy_from_user(to, from, n);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f142215ed30d..607ec91966c7 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -2231,9 +2231,10 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
return -EINVAL;
current->thread.fpu.fpc = fpu->fpc;
if (MACHINE_HAS_VX)
- convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
+ convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
+ (freg_t *) fpu->fprs);
else
- memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
+ memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
return 0;
}
@@ -2242,9 +2243,10 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
/* make sure we have the latest values */
save_fpu_regs();
if (MACHINE_HAS_VX)
- convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
+ convert_vx_to_fp((freg_t *) fpu->fprs,
+ (__vector128 *) vcpu->run->s.regs.vrs);
else
- memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
+ memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
fpu->fpc = current->thread.fpu.fpc;
return 0;
}
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index c106488b4137..d8673e243f13 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -584,7 +584,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
/* Validity 0x0044 will be checked by SIE */
if (rc)
goto unpin;
- scb_s->gvrd = hpa;
+ scb_s->riccbd = hpa;
}
return 0;
unpin:
diff --git a/arch/score/include/asm/uaccess.h b/arch/score/include/asm/uaccess.h
index 20a3591225cc..01aec8ccde83 100644
--- a/arch/score/include/asm/uaccess.h
+++ b/arch/score/include/asm/uaccess.h
@@ -163,7 +163,7 @@ do { \
__get_user_asm(val, "lw", ptr); \
break; \
case 8: \
- if ((copy_from_user((void *)&val, ptr, 8)) == 0) \
+ if (__copy_from_user((void *)&val, ptr, 8) == 0) \
__gu_err = 0; \
else \
__gu_err = -EFAULT; \
@@ -188,6 +188,8 @@ do { \
\
if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
__get_user_common((x), size, __gu_ptr); \
+ else \
+ (x) = 0; \
\
__gu_err; \
})
@@ -201,6 +203,7 @@ do { \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3:li %0, %4\n" \
+ "li %1, 0\n" \
"j 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
@@ -298,35 +301,34 @@ extern int __copy_tofrom_user(void *to, const void *from, unsigned long len);
static inline unsigned long
copy_from_user(void *to, const void *from, unsigned long len)
{
- unsigned long over;
+ unsigned long res = len;
- if (access_ok(VERIFY_READ, from, len))
- return __copy_tofrom_user(to, from, len);
+ if (likely(access_ok(VERIFY_READ, from, len)))
+ res = __copy_tofrom_user(to, from, len);
- if ((unsigned long)from < TASK_SIZE) {
- over = (unsigned long)from + len - TASK_SIZE;
- return __copy_tofrom_user(to, from, len - over) + over;
- }
- return len;
+ if (unlikely(res))
+ memset(to + (len - res), 0, res);
+
+ return res;
}
static inline unsigned long
copy_to_user(void *to, const void *from, unsigned long len)
{
- unsigned long over;
-
- if (access_ok(VERIFY_WRITE, to, len))
- return __copy_tofrom_user(to, from, len);
+ if (likely(access_ok(VERIFY_WRITE, to, len)))
+ len = __copy_tofrom_user(to, from, len);
- if ((unsigned long)to < TASK_SIZE) {
- over = (unsigned long)to + len - TASK_SIZE;
- return __copy_tofrom_user(to, from, len - over) + over;
- }
return len;
}
-#define __copy_from_user(to, from, len) \
- __copy_tofrom_user((to), (from), (len))
+static inline unsigned long
+__copy_from_user(void *to, const void *from, unsigned long len)
+{
+ unsigned long left = __copy_tofrom_user(to, from, len);
+ if (unlikely(left))
+ memset(to + (len - left), 0, left);
+ return left;
+}
#define __copy_to_user(to, from, len) \
__copy_tofrom_user((to), (from), (len))
@@ -340,17 +342,17 @@ __copy_to_user_inatomic(void *to, const void *from, unsigned long len)
static inline unsigned long
__copy_from_user_inatomic(void *to, const void *from, unsigned long len)
{
- return __copy_from_user(to, from, len);
+ return __copy_tofrom_user(to, from, len);
}
-#define __copy_in_user(to, from, len) __copy_from_user(to, from, len)
+#define __copy_in_user(to, from, len) __copy_tofrom_user(to, from, len)
static inline unsigned long
copy_in_user(void *to, const void *from, unsigned long len)
{
if (access_ok(VERIFY_READ, from, len) &&
access_ok(VERFITY_WRITE, to, len))
- return copy_from_user(to, from, len);
+ return __copy_tofrom_user(to, from, len);
}
/*
diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h
index a49635c51266..92ade79ac427 100644
--- a/arch/sh/include/asm/uaccess.h
+++ b/arch/sh/include/asm/uaccess.h
@@ -151,7 +151,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
__kernel_size_t __copy_size = (__kernel_size_t) n;
if (__copy_size && __access_ok(__copy_from, __copy_size))
- return __copy_user(to, from, __copy_size);
+ __copy_size = __copy_user(to, from, __copy_size);
+
+ if (unlikely(__copy_size))
+ memset(to + (n - __copy_size), 0, __copy_size);
return __copy_size;
}
diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h
index c01376c76b86..ca5073dd4596 100644
--- a/arch/sh/include/asm/uaccess_64.h
+++ b/arch/sh/include/asm/uaccess_64.h
@@ -24,6 +24,7 @@
#define __get_user_size(x,ptr,size,retval) \
do { \
retval = 0; \
+ x = 0; \
switch (size) { \
case 1: \
retval = __get_user_asm_b((void *)&x, \
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 341a5a133f48..ea55f86d7ccd 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -249,8 +249,7 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
{
if (n && __access_ok((unsigned long) to, n)) {
- if (!__builtin_constant_p(n))
- check_object_size(from, n, true);
+ check_object_size(from, n, true);
return __copy_user(to, (__force void __user *) from, n);
} else
return n;
@@ -258,19 +257,19 @@ static inline unsigned long copy_to_user(void __user *to, const void *from, unsi
static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (!__builtin_constant_p(n))
- check_object_size(from, n, true);
+ check_object_size(from, n, true);
return __copy_user(to, (__force void __user *) from, n);
}
static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (n && __access_ok((unsigned long) from, n)) {
- if (!__builtin_constant_p(n))
- check_object_size(to, n, false);
+ check_object_size(to, n, false);
return __copy_user((__force void __user *) to, from, n);
- } else
+ } else {
+ memset(to, 0, n);
return n;
+ }
}
static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index 8bda94fab8e8..37a315d0ddd4 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -212,8 +212,7 @@ copy_from_user(void *to, const void __user *from, unsigned long size)
{
unsigned long ret;
- if (!__builtin_constant_p(size))
- check_object_size(to, size, false);
+ check_object_size(to, size, false);
ret = ___copy_from_user(to, from, size);
if (unlikely(ret))
@@ -233,8 +232,8 @@ copy_to_user(void __user *to, const void *from, unsigned long size)
{
unsigned long ret;
- if (!__builtin_constant_p(size))
- check_object_size(from, size, true);
+ check_object_size(from, size, true);
+
ret = ___copy_to_user(to, from, size);
if (unlikely(ret))
ret = copy_to_user_fixup(to, from, size);
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 4820a02838ac..78da75b670bc 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -4,7 +4,6 @@
config TILE
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
- select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_WANT_FRAME_POINTERS
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index 0a9c4265763b..a77369e91e54 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -416,14 +416,13 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
return n;
}
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-/*
- * There are still unprovable places in the generic code as of 2.6.34, so this
- * option is not really compatible with -Werror, which is more useful in
- * general.
- */
-extern void copy_from_user_overflow(void)
- __compiletime_warning("copy_from_user() size is not provably correct");
+extern void __compiletime_error("usercopy buffer size is too small")
+__bad_copy_user(void);
+
+static inline void copy_user_overflow(int size, unsigned long count)
+{
+ WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+}
static inline unsigned long __must_check copy_from_user(void *to,
const void __user *from,
@@ -433,14 +432,13 @@ static inline unsigned long __must_check copy_from_user(void *to,
if (likely(sz == -1 || sz >= n))
n = _copy_from_user(to, from, n);
+ else if (!__builtin_constant_p(n))
+ copy_user_overflow(sz, n);
else
- copy_from_user_overflow();
+ __bad_copy_user();
return n;
}
-#else
-#define copy_from_user _copy_from_user
-#endif
#ifdef __tilegx__
/**
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index ef4b8f949b51..b783ac87d98a 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -21,21 +21,17 @@ void handle_syscall(struct uml_pt_regs *r)
PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS);
if (syscall_trace_enter(regs))
- return;
+ goto out;
/* Do the seccomp check after ptrace; failures should be fast. */
if (secure_computing(NULL) == -1)
- return;
+ goto out;
- /* Update the syscall number after orig_ax has potentially been updated
- * with ptrace.
- */
- UPT_SYSCALL_NR(r) = PT_SYSCALL_NR(r->gp);
syscall = UPT_SYSCALL_NR(r);
-
if (syscall >= 0 && syscall <= __NR_syscall_max)
PT_REGS_SET_SYSCALL_RETURN(regs,
EXECUTE_SYSCALL(syscall, regs));
+out:
syscall_trace_leave(regs);
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c580d8c33562..2a1f0ce7c59a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -24,7 +24,6 @@ config X86
select ARCH_DISCARD_MEMBLOCK
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
- select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index ff574dad95cc..94dd4a31f5b3 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -1004,79 +1004,87 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
return status;
}
-static efi_status_t exit_boot(struct boot_params *boot_params,
- void *handle, bool is64)
-{
- struct efi_info *efi = &boot_params->efi_info;
- unsigned long map_sz, key, desc_size;
- efi_memory_desc_t *mem_map;
+struct exit_boot_struct {
+ struct boot_params *boot_params;
+ struct efi_info *efi;
struct setup_data *e820ext;
- const char *signature;
__u32 e820ext_size;
- __u32 nr_desc, prev_nr_desc;
- efi_status_t status;
- __u32 desc_version;
- bool called_exit = false;
- u8 nr_entries;
- int i;
-
- nr_desc = 0;
- e820ext = NULL;
- e820ext_size = 0;
-
-get_map:
- status = efi_get_memory_map(sys_table, &mem_map, &map_sz, &desc_size,
- &desc_version, &key);
-
- if (status != EFI_SUCCESS)
- return status;
-
- prev_nr_desc = nr_desc;
- nr_desc = map_sz / desc_size;
- if (nr_desc > prev_nr_desc &&
- nr_desc > ARRAY_SIZE(boot_params->e820_map)) {
- u32 nr_e820ext = nr_desc - ARRAY_SIZE(boot_params->e820_map);
-
- status = alloc_e820ext(nr_e820ext, &e820ext, &e820ext_size);
- if (status != EFI_SUCCESS)
- goto free_mem_map;
+ bool is64;
+};
- efi_call_early(free_pool, mem_map);
- goto get_map; /* Allocated memory, get map again */
+static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
+ struct efi_boot_memmap *map,
+ void *priv)
+{
+ static bool first = true;
+ const char *signature;
+ __u32 nr_desc;
+ efi_status_t status;
+ struct exit_boot_struct *p = priv;
+
+ if (first) {
+ nr_desc = *map->buff_size / *map->desc_size;
+ if (nr_desc > ARRAY_SIZE(p->boot_params->e820_map)) {
+ u32 nr_e820ext = nr_desc -
+ ARRAY_SIZE(p->boot_params->e820_map);
+
+ status = alloc_e820ext(nr_e820ext, &p->e820ext,
+ &p->e820ext_size);
+ if (status != EFI_SUCCESS)
+ return status;
+ }
+ first = false;
}
- signature = is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
- memcpy(&efi->efi_loader_signature, signature, sizeof(__u32));
+ signature = p->is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
+ memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
- efi->efi_systab = (unsigned long)sys_table;
- efi->efi_memdesc_size = desc_size;
- efi->efi_memdesc_version = desc_version;
- efi->efi_memmap = (unsigned long)mem_map;
- efi->efi_memmap_size = map_sz;
+ p->efi->efi_systab = (unsigned long)sys_table_arg;
+ p->efi->efi_memdesc_size = *map->desc_size;
+ p->efi->efi_memdesc_version = *map->desc_ver;
+ p->efi->efi_memmap = (unsigned long)*map->map;
+ p->efi->efi_memmap_size = *map->map_size;
#ifdef CONFIG_X86_64
- efi->efi_systab_hi = (unsigned long)sys_table >> 32;
- efi->efi_memmap_hi = (unsigned long)mem_map >> 32;
+ p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32;
+ p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32;
#endif
+ return EFI_SUCCESS;
+}
+
+static efi_status_t exit_boot(struct boot_params *boot_params,
+ void *handle, bool is64)
+{
+ unsigned long map_sz, key, desc_size, buff_size;
+ efi_memory_desc_t *mem_map;
+ struct setup_data *e820ext;
+ __u32 e820ext_size;
+ efi_status_t status;
+ __u32 desc_version;
+ struct efi_boot_memmap map;
+ struct exit_boot_struct priv;
+
+ map.map = &mem_map;
+ map.map_size = &map_sz;
+ map.desc_size = &desc_size;
+ map.desc_ver = &desc_version;
+ map.key_ptr = &key;
+ map.buff_size = &buff_size;
+ priv.boot_params = boot_params;
+ priv.efi = &boot_params->efi_info;
+ priv.e820ext = NULL;
+ priv.e820ext_size = 0;
+ priv.is64 = is64;
+
/* Might as well exit boot services now */
- status = efi_call_early(exit_boot_services, handle, key);
- if (status != EFI_SUCCESS) {
- /*
- * ExitBootServices() will fail if any of the event
- * handlers change the memory map. In which case, we
- * must be prepared to retry, but only once so that
- * we're guaranteed to exit on repeated failures instead
- * of spinning forever.
- */
- if (called_exit)
- goto free_mem_map;
-
- called_exit = true;
- efi_call_early(free_pool, mem_map);
- goto get_map;
- }
+ status = efi_exit_boot_services(sys_table, handle, &map, &priv,
+ exit_boot_func);
+ if (status != EFI_SUCCESS)
+ return status;
+ e820ext = priv.e820ext;
+ e820ext_size = priv.e820ext_size;
/* Historic? */
boot_params->alt_mem_k = 32 * 1024;
@@ -1085,10 +1093,6 @@ get_map:
return status;
return EFI_SUCCESS;
-
-free_mem_map:
- efi_call_early(free_pool, mem_map);
- return status;
}
/*
diff --git a/arch/x86/configs/tiny.config b/arch/x86/configs/tiny.config
index 4e2ecfa23c15..4b429df40d7a 100644
--- a/arch/x86/configs/tiny.config
+++ b/arch/x86/configs/tiny.config
@@ -1 +1,3 @@
CONFIG_NOHIGHMEM=y
+# CONFIG_HIGHMEM4G is not set
+# CONFIG_HIGHMEM64G is not set
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index e07a22bb9308..f5f4b3fbbbc2 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -119,8 +119,8 @@ static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
{
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
- [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
- [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index e6131d4454e6..65577f081d07 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -29,6 +29,8 @@
#define COUNTER_SHIFT 16
+static HLIST_HEAD(uncore_unused_list);
+
struct amd_uncore {
int id;
int refcnt;
@@ -39,7 +41,7 @@ struct amd_uncore {
cpumask_t *active_mask;
struct pmu *pmu;
struct perf_event *events[MAX_COUNTERS];
- struct amd_uncore *free_when_cpu_online;
+ struct hlist_node node;
};
static struct amd_uncore * __percpu *amd_uncore_nb;
@@ -306,6 +308,7 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
uncore_nb->active_mask = &amd_nb_active_mask;
uncore_nb->pmu = &amd_nb_pmu;
+ uncore_nb->id = -1;
*per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
}
@@ -319,6 +322,7 @@ static int amd_uncore_cpu_up_prepare(unsigned int cpu)
uncore_l2->msr_base = MSR_F16H_L2I_PERF_CTL;
uncore_l2->active_mask = &amd_l2_active_mask;
uncore_l2->pmu = &amd_l2_pmu;
+ uncore_l2->id = -1;
*per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2;
}
@@ -348,7 +352,7 @@ amd_uncore_find_online_sibling(struct amd_uncore *this,
continue;
if (this->id == that->id) {
- that->free_when_cpu_online = this;
+ hlist_add_head(&this->node, &uncore_unused_list);
this = that;
break;
}
@@ -388,13 +392,23 @@ static int amd_uncore_cpu_starting(unsigned int cpu)
return 0;
}
+static void uncore_clean_online(void)
+{
+ struct amd_uncore *uncore;
+ struct hlist_node *n;
+
+ hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
+ hlist_del(&uncore->node);
+ kfree(uncore);
+ }
+}
+
static void uncore_online(unsigned int cpu,
struct amd_uncore * __percpu *uncores)
{
struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
- kfree(uncore->free_when_cpu_online);
- uncore->free_when_cpu_online = NULL;
+ uncore_clean_online();
if (cpu == uncore->cpu)
cpumask_set_cpu(cpu, uncore->active_mask);
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 0a6e393a2e62..bdcd6510992c 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -31,7 +31,17 @@
struct bts_ctx {
struct perf_output_handle handle;
struct debug_store ds_back;
- int started;
+ int state;
+};
+
+/* BTS context states: */
+enum {
+ /* no ongoing AUX transactions */
+ BTS_STATE_STOPPED = 0,
+ /* AUX transaction is on, BTS tracing is disabled */
+ BTS_STATE_INACTIVE,
+ /* AUX transaction is on, BTS tracing is running */
+ BTS_STATE_ACTIVE,
};
static DEFINE_PER_CPU(struct bts_ctx, bts_ctx);
@@ -204,6 +214,15 @@ static void bts_update(struct bts_ctx *bts)
static int
bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle);
+/*
+ * Ordering PMU callbacks wrt themselves and the PMI is done by means
+ * of bts::state, which:
+ * - is set when bts::handle::event is valid, that is, between
+ * perf_aux_output_begin() and perf_aux_output_end();
+ * - is zero otherwise;
+ * - is ordered against bts::handle::event with a compiler barrier.
+ */
+
static void __bts_event_start(struct perf_event *event)
{
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
@@ -221,10 +240,13 @@ static void __bts_event_start(struct perf_event *event)
/*
* local barrier to make sure that ds configuration made it
- * before we enable BTS
+ * before we enable BTS and bts::state goes ACTIVE
*/
wmb();
+ /* INACTIVE/STOPPED -> ACTIVE */
+ WRITE_ONCE(bts->state, BTS_STATE_ACTIVE);
+
intel_pmu_enable_bts(config);
}
@@ -251,9 +273,6 @@ static void bts_event_start(struct perf_event *event, int flags)
__bts_event_start(event);
- /* PMI handler: this counter is running and likely generating PMIs */
- ACCESS_ONCE(bts->started) = 1;
-
return;
fail_end_stop:
@@ -263,30 +282,34 @@ fail_stop:
event->hw.state = PERF_HES_STOPPED;
}
-static void __bts_event_stop(struct perf_event *event)
+static void __bts_event_stop(struct perf_event *event, int state)
{
+ struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+
+ /* ACTIVE -> INACTIVE(PMI)/STOPPED(->stop()) */
+ WRITE_ONCE(bts->state, state);
+
/*
* No extra synchronization is mandated by the documentation to have
* BTS data stores globally visible.
*/
intel_pmu_disable_bts();
-
- if (event->hw.state & PERF_HES_STOPPED)
- return;
-
- ACCESS_ONCE(event->hw.state) |= PERF_HES_STOPPED;
}
static void bts_event_stop(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
- struct bts_buffer *buf = perf_get_aux(&bts->handle);
+ struct bts_buffer *buf = NULL;
+ int state = READ_ONCE(bts->state);
- /* PMI handler: don't restart this counter */
- ACCESS_ONCE(bts->started) = 0;
+ if (state == BTS_STATE_ACTIVE)
+ __bts_event_stop(event, BTS_STATE_STOPPED);
- __bts_event_stop(event);
+ if (state != BTS_STATE_STOPPED)
+ buf = perf_get_aux(&bts->handle);
+
+ event->hw.state |= PERF_HES_STOPPED;
if (flags & PERF_EF_UPDATE) {
bts_update(bts);
@@ -296,6 +319,7 @@ static void bts_event_stop(struct perf_event *event, int flags)
bts->handle.head =
local_xchg(&buf->data_size,
buf->nr_pages << PAGE_SHIFT);
+
perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
!!local_xchg(&buf->lost, 0));
}
@@ -310,8 +334,20 @@ static void bts_event_stop(struct perf_event *event, int flags)
void intel_bts_enable_local(void)
{
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+ int state = READ_ONCE(bts->state);
+
+ /*
+ * Here we transition from INACTIVE to ACTIVE;
+ * if we instead are STOPPED from the interrupt handler,
+ * stay that way. Can't be ACTIVE here though.
+ */
+ if (WARN_ON_ONCE(state == BTS_STATE_ACTIVE))
+ return;
+
+ if (state == BTS_STATE_STOPPED)
+ return;
- if (bts->handle.event && bts->started)
+ if (bts->handle.event)
__bts_event_start(bts->handle.event);
}
@@ -319,8 +355,15 @@ void intel_bts_disable_local(void)
{
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
+ /*
+ * Here we transition from ACTIVE to INACTIVE;
+ * do nothing for STOPPED or INACTIVE.
+ */
+ if (READ_ONCE(bts->state) != BTS_STATE_ACTIVE)
+ return;
+
if (bts->handle.event)
- __bts_event_stop(bts->handle.event);
+ __bts_event_stop(bts->handle.event, BTS_STATE_INACTIVE);
}
static int
@@ -335,8 +378,6 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
return 0;
head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
- if (WARN_ON_ONCE(head != local_read(&buf->head)))
- return -EINVAL;
phys = &buf->buf[buf->cur_buf];
space = phys->offset + phys->displacement + phys->size - head;
@@ -403,22 +444,37 @@ bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
int intel_bts_interrupt(void)
{
+ struct debug_store *ds = this_cpu_ptr(&cpu_hw_events)->ds;
struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
struct perf_event *event = bts->handle.event;
struct bts_buffer *buf;
s64 old_head;
- int err;
+ int err = -ENOSPC, handled = 0;
- if (!event || !bts->started)
- return 0;
+ /*
+ * The only surefire way of knowing if this NMI is ours is by checking
+ * the write ptr against the PMI threshold.
+ */
+ if (ds->bts_index >= ds->bts_interrupt_threshold)
+ handled = 1;
+
+ /*
+ * this is wrapped in intel_bts_enable_local/intel_bts_disable_local,
+ * so we can only be INACTIVE or STOPPED
+ */
+ if (READ_ONCE(bts->state) == BTS_STATE_STOPPED)
+ return handled;
buf = perf_get_aux(&bts->handle);
+ if (!buf)
+ return handled;
+
/*
* Skip snapshot counters: they don't use the interrupt, but
* there's no other way of telling, because the pointer will
* keep moving
*/
- if (!buf || buf->snapshot)
+ if (buf->snapshot)
return 0;
old_head = local_read(&buf->head);
@@ -426,18 +482,27 @@ int intel_bts_interrupt(void)
/* no new data */
if (old_head == local_read(&buf->head))
- return 0;
+ return handled;
perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0),
!!local_xchg(&buf->lost, 0));
buf = perf_aux_output_begin(&bts->handle, event);
- if (!buf)
- return 1;
+ if (buf)
+ err = bts_buffer_reset(buf, &bts->handle);
+
+ if (err) {
+ WRITE_ONCE(bts->state, BTS_STATE_STOPPED);
- err = bts_buffer_reset(buf, &bts->handle);
- if (err)
- perf_aux_output_end(&bts->handle, 0, false);
+ if (buf) {
+ /*
+ * BTS_STATE_STOPPED should be visible before
+ * cleared handle::event
+ */
+ barrier();
+ perf_aux_output_end(&bts->handle, 0, false);
+ }
+ }
return 1;
}
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 2cbde2f449aa..4c9a79b9cd69 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -1730,9 +1730,11 @@ static __initconst const u64 knl_hw_cache_extra_regs
* disabled state if called consecutively.
*
* During consecutive calls, the same disable value will be written to related
- * registers, so the PMU state remains unchanged. hw.state in
- * intel_bts_disable_local will remain PERF_HES_STOPPED too in consecutive
- * calls.
+ * registers, so the PMU state remains unchanged.
+ *
+ * intel_bts events don't coexist with intel PMU's BTS events because of
+ * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
+ * disabled around intel PMU's event batching etc, only inside the PMI handler.
*/
static void __intel_pmu_disable_all(void)
{
@@ -1742,8 +1744,6 @@ static void __intel_pmu_disable_all(void)
if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
intel_pmu_disable_bts();
- else
- intel_bts_disable_local();
intel_pmu_pebs_disable_all();
}
@@ -1771,8 +1771,7 @@ static void __intel_pmu_enable_all(int added, bool pmi)
return;
intel_pmu_enable_bts(event->hw.config);
- } else
- intel_bts_enable_local();
+ }
}
static void intel_pmu_enable_all(int added)
@@ -2073,6 +2072,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
*/
if (!x86_pmu.late_ack)
apic_write(APIC_LVTPC, APIC_DM_NMI);
+ intel_bts_disable_local();
__intel_pmu_disable_all();
handled = intel_pmu_drain_bts_buffer();
handled += intel_bts_interrupt();
@@ -2172,6 +2172,7 @@ done:
/* Only restore PMU state when it's active. See x86_pmu_disable(). */
if (cpuc->enabled)
__intel_pmu_enable_all(0, true);
+ intel_bts_enable_local();
/*
* Only unmask the NMI after the overflow counters
diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 783c49ddef29..8f82b02934fa 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -458,6 +458,11 @@ static void __intel_cqm_event_count(void *info);
static void init_mbm_sample(u32 rmid, u32 evt_type);
static void __intel_mbm_event_count(void *info);
+static bool is_cqm_event(int e)
+{
+ return (e == QOS_L3_OCCUP_EVENT_ID);
+}
+
static bool is_mbm_event(int e)
{
return (e >= QOS_MBM_TOTAL_EVENT_ID && e <= QOS_MBM_LOCAL_EVENT_ID);
@@ -1366,6 +1371,10 @@ static int intel_cqm_event_init(struct perf_event *event)
(event->attr.config > QOS_MBM_LOCAL_EVENT_ID))
return -EINVAL;
+ if ((is_cqm_event(event->attr.config) && !cqm_enabled) ||
+ (is_mbm_event(event->attr.config) && !mbm_enabled))
+ return -EINVAL;
+
/* unsupported modes and filters */
if (event->attr.exclude_user ||
event->attr.exclude_kernel ||
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 7ce9f3f669e6..9b983a474253 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1274,18 +1274,18 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
struct pebs_record_nhm *p = at;
u64 pebs_status;
- /* PEBS v3 has accurate status bits */
+ pebs_status = p->status & cpuc->pebs_enabled;
+ pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
+
+ /* PEBS v3 has more accurate status bits */
if (x86_pmu.intel_cap.pebs_format >= 3) {
- for_each_set_bit(bit, (unsigned long *)&p->status,
- MAX_PEBS_EVENTS)
+ for_each_set_bit(bit, (unsigned long *)&pebs_status,
+ x86_pmu.max_pebs_events)
counts[bit]++;
continue;
}
- pebs_status = p->status & cpuc->pebs_enabled;
- pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
-
/*
* On some CPUs the PEBS status can be zero when PEBS is
* racing with clearing of GLOBAL_STATUS.
@@ -1333,8 +1333,11 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
continue;
event = cpuc->events[bit];
- WARN_ON_ONCE(!event);
- WARN_ON_ONCE(!event->attr.precise_ip);
+ if (WARN_ON_ONCE(!event))
+ continue;
+
+ if (WARN_ON_ONCE(!event->attr.precise_ip))
+ continue;
/* log dropped samples number */
if (error[bit])
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index 04bb5fb5a8d7..861a7d9cb60f 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -1074,6 +1074,11 @@ static void pt_addr_filters_fini(struct perf_event *event)
event->hw.addr_filters = NULL;
}
+static inline bool valid_kernel_ip(unsigned long ip)
+{
+ return virt_addr_valid(ip) && kernel_ip(ip);
+}
+
static int pt_event_addr_filters_validate(struct list_head *filters)
{
struct perf_addr_filter *filter;
@@ -1081,11 +1086,16 @@ static int pt_event_addr_filters_validate(struct list_head *filters)
list_for_each_entry(filter, filters, entry) {
/* PT doesn't support single address triggers */
- if (!filter->range)
+ if (!filter->range || !filter->size)
return -EOPNOTSUPP;
- if (!filter->inode && !kernel_ip(filter->offset))
- return -EINVAL;
+ if (!filter->inode) {
+ if (!valid_kernel_ip(filter->offset))
+ return -EINVAL;
+
+ if (!valid_kernel_ip(filter->offset + filter->size))
+ return -EINVAL;
+ }
if (++range > pt_cap_get(PT_CAP_num_address_ranges))
return -EOPNOTSUPP;
@@ -1111,7 +1121,7 @@ static void pt_event_addr_filters_sync(struct perf_event *event)
} else {
/* apply the offset */
msr_a = filter->offset + offs[range];
- msr_b = filter->size + msr_a;
+ msr_b = filter->size + msr_a - 1;
}
filters->filter[range].msr_a = msr_a;
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index a0ae610b9280..2131c4ce7d8a 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -433,7 +433,11 @@ do { \
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %1,%"rtype"0\n" \
"2:\n" \
- _ASM_EXTABLE_EX(1b, 2b) \
+ ".section .fixup,\"ax\"\n" \
+ "3:xor"itype" %"rtype"0,%"rtype"0\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE_EX(1b, 3b) \
: ltype(x) : "m" (__m(addr)))
#define __put_user_nocheck(x, ptr, size) \
@@ -697,44 +701,15 @@ unsigned long __must_check _copy_from_user(void *to, const void __user *from,
unsigned long __must_check _copy_to_user(void __user *to, const void *from,
unsigned n);
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-# define copy_user_diag __compiletime_error
-#else
-# define copy_user_diag __compiletime_warning
-#endif
-
-extern void copy_user_diag("copy_from_user() buffer size is too small")
-copy_from_user_overflow(void);
-extern void copy_user_diag("copy_to_user() buffer size is too small")
-copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
-
-#undef copy_user_diag
-
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-
-extern void
-__compiletime_warning("copy_from_user() buffer size is not provably correct")
-__copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
-#define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
-
-extern void
-__compiletime_warning("copy_to_user() buffer size is not provably correct")
-__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
-#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
+extern void __compiletime_error("usercopy buffer size is too small")
+__bad_copy_user(void);
-#else
-
-static inline void
-__copy_from_user_overflow(int size, unsigned long count)
+static inline void copy_user_overflow(int size, unsigned long count)
{
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
}
-#define __copy_to_user_overflow __copy_from_user_overflow
-
-#endif
-
-static inline unsigned long __must_check
+static __always_inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
int sz = __compiletime_object_size(to);
@@ -743,36 +718,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
kasan_check_write(to, n);
- /*
- * While we would like to have the compiler do the checking for us
- * even in the non-constant size case, any false positives there are
- * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
- * without - the [hopefully] dangerous looking nature of the warning
- * would make people go look at the respecitive call sites over and
- * over again just to find that there's no problem).
- *
- * And there are cases where it's just not realistic for the compiler
- * to prove the count to be in range. For example when multiple call
- * sites of a helper function - perhaps in different source files -
- * all doing proper range checking, yet the helper function not doing
- * so again.
- *
- * Therefore limit the compile time checking to the constant size
- * case, and do only runtime checking for non-constant sizes.
- */
-
if (likely(sz < 0 || sz >= n)) {
check_object_size(to, n, false);
n = _copy_from_user(to, from, n);
- } else if (__builtin_constant_p(n))
- copy_from_user_overflow();
+ } else if (!__builtin_constant_p(n))
+ copy_user_overflow(sz, n);
else
- __copy_from_user_overflow(sz, n);
+ __bad_copy_user();
return n;
}
-static inline unsigned long __must_check
+static __always_inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
int sz = __compiletime_object_size(from);
@@ -781,21 +738,17 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
might_fault();
- /* See the comment in copy_from_user() above. */
if (likely(sz < 0 || sz >= n)) {
check_object_size(from, n, true);
n = _copy_to_user(to, from, n);
- } else if (__builtin_constant_p(n))
- copy_to_user_overflow();
+ } else if (!__builtin_constant_p(n))
+ copy_user_overflow(sz, n);
else
- __copy_to_user_overflow(sz, n);
+ __bad_copy_user();
return n;
}
-#undef __copy_from_user_overflow
-#undef __copy_to_user_overflow
-
/*
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
* nested NMI paths are careful to preserve CR2.
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 50c95af0f017..f3e9b2df4b16 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2093,7 +2093,6 @@ int generic_processor_info(int apicid, int version)
return -EINVAL;
}
- num_processors++;
if (apicid == boot_cpu_physical_apicid) {
/*
* x86_bios_cpu_apicid is required to have processors listed
@@ -2116,10 +2115,13 @@ int generic_processor_info(int apicid, int version)
pr_warning("APIC: Package limit reached. Processor %d/0x%x ignored.\n",
thiscpu, apicid);
+
disabled_cpus++;
return -ENOSPC;
}
+ num_processors++;
+
/*
* Validate version
*/
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index f5c69d8974e1..b81fe2d63e15 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -669,6 +669,17 @@ static void init_amd_gh(struct cpuinfo_x86 *c)
set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
}
+#define MSR_AMD64_DE_CFG 0xC0011029
+
+static void init_amd_ln(struct cpuinfo_x86 *c)
+{
+ /*
+ * Apply erratum 665 fix unconditionally so machines without a BIOS
+ * fix work.
+ */
+ msr_set_bit(MSR_AMD64_DE_CFG, 31);
+}
+
static void init_amd_bd(struct cpuinfo_x86 *c)
{
u64 value;
@@ -726,6 +737,7 @@ static void init_amd(struct cpuinfo_x86 *c)
case 6: init_amd_k7(c); break;
case 0xf: init_amd_k8(c); break;
case 0x10: init_amd_gh(c); break;
+ case 0x12: init_amd_ln(c); break;
case 0x15: init_amd_bd(c); break;
}
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index b816971f5da4..620ab06bcf45 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -54,6 +54,7 @@ static LIST_HEAD(pcache);
*/
static u8 *container;
static size_t container_size;
+static bool ucode_builtin;
static u32 ucode_new_rev;
static u8 amd_ucode_patch[PATCH_MAX_SIZE];
@@ -281,18 +282,22 @@ static bool __init load_builtin_amd_microcode(struct cpio_data *cp,
void __init load_ucode_amd_bsp(unsigned int family)
{
struct cpio_data cp;
+ bool *builtin;
void **data;
size_t *size;
#ifdef CONFIG_X86_32
data = (void **)__pa_nodebug(&ucode_cpio.data);
size = (size_t *)__pa_nodebug(&ucode_cpio.size);
+ builtin = (bool *)__pa_nodebug(&ucode_builtin);
#else
data = &ucode_cpio.data;
size = &ucode_cpio.size;
+ builtin = &ucode_builtin;
#endif
- if (!load_builtin_amd_microcode(&cp, family))
+ *builtin = load_builtin_amd_microcode(&cp, family);
+ if (!*builtin)
cp = find_ucode_in_initrd();
if (!(cp.data && cp.size))
@@ -373,7 +378,8 @@ void load_ucode_amd_ap(void)
return;
/* Add CONFIG_RANDOMIZE_MEMORY offset. */
- cont += PAGE_OFFSET - __PAGE_OFFSET_BASE;
+ if (!ucode_builtin)
+ cont += PAGE_OFFSET - __PAGE_OFFSET_BASE;
eax = cpuid_eax(0x00000001);
eq = (struct equiv_cpu_entry *)(cont + CONTAINER_HDR_SZ);
@@ -439,7 +445,8 @@ int __init save_microcode_in_initrd_amd(void)
container = cont_va;
/* Add CONFIG_RANDOMIZE_MEMORY offset. */
- container += PAGE_OFFSET - __PAGE_OFFSET_BASE;
+ if (!ucode_builtin)
+ container += PAGE_OFFSET - __PAGE_OFFSET_BASE;
eax = cpuid_eax(0x00000001);
eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 1d39bfbd26bb..3692249a70f1 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -289,6 +289,7 @@ void __init kvmclock_init(void)
put_cpu();
x86_platform.calibrate_tsc = kvm_get_tsc_khz;
+ x86_platform.calibrate_cpu = kvm_get_tsc_khz;
x86_platform.get_wallclock = kvm_get_wallclock;
x86_platform.set_wallclock = kvm_set_wallclock;
#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index ad5bc9578a73..1acfd76e3e26 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -56,12 +56,12 @@ asm (".pushsection .entry.text, \"ax\"\n"
".popsection");
/* identity function, which can be inlined */
-u32 _paravirt_ident_32(u32 x)
+u32 notrace _paravirt_ident_32(u32 x)
{
return x;
}
-u64 _paravirt_ident_64(u64 x)
+u64 notrace _paravirt_ident_64(u64 x)
{
return x;
}
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 5f42d038fcb4..c7220ba94aa7 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -109,6 +109,7 @@ static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
{
bool new_val, old_val;
struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
+ struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
union kvm_ioapic_redirect_entry *e;
e = &ioapic->redirtbl[RTC_GSI];
@@ -117,16 +118,17 @@ static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
return;
new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
- old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
+ old_val = test_bit(vcpu->vcpu_id, dest_map->map);
if (new_val == old_val)
return;
if (new_val) {
- __set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
+ __set_bit(vcpu->vcpu_id, dest_map->map);
+ dest_map->vectors[vcpu->vcpu_id] = e->fields.vector;
ioapic->rtc_status.pending_eoi++;
} else {
- __clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
+ __clear_bit(vcpu->vcpu_id, dest_map->map);
ioapic->rtc_status.pending_eoi--;
rtc_status_pending_eoi_check_valid(ioapic);
}
diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
index 39b91127ef07..cd944435dfbd 100644
--- a/arch/x86/kvm/pmu_amd.c
+++ b/arch/x86/kvm/pmu_amd.c
@@ -23,8 +23,8 @@
static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
- [2] = { 0x80, 0x00, PERF_COUNT_HW_CACHE_REFERENCES },
- [3] = { 0x81, 0x00, PERF_COUNT_HW_CACHE_MISSES },
+ [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
+ [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
[6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 19f9f9e05c2a..699f8726539a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2743,16 +2743,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (tsc_delta < 0)
mark_tsc_unstable("KVM discovered backwards TSC");
- if (kvm_lapic_hv_timer_in_use(vcpu) &&
- kvm_x86_ops->set_hv_timer(vcpu,
- kvm_get_lapic_tscdeadline_msr(vcpu)))
- kvm_lapic_switch_to_sw_timer(vcpu);
if (check_tsc_unstable()) {
u64 offset = kvm_compute_tsc_offset(vcpu,
vcpu->arch.last_guest_tsc);
kvm_x86_ops->write_tsc_offset(vcpu, offset);
vcpu->arch.tsc_catchup = 1;
}
+ if (kvm_lapic_hv_timer_in_use(vcpu) &&
+ kvm_x86_ops->set_hv_timer(vcpu,
+ kvm_get_lapic_tscdeadline_msr(vcpu)))
+ kvm_lapic_switch_to_sw_timer(vcpu);
/*
* On a host with synchronized TSC, there is no need to update
* kvmclock on vcpu->cpu migration
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index ecb1b69c1651..170cc4ff057b 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -927,9 +927,10 @@ int track_pfn_copy(struct vm_area_struct *vma)
}
/*
- * prot is passed in as a parameter for the new mapping. If the vma has a
- * linear pfn mapping for the entire range reserve the entire vma range with
- * single reserve_pfn_range call.
+ * prot is passed in as a parameter for the new mapping. If the vma has
+ * a linear pfn mapping for the entire range, or no vma is provided,
+ * reserve the entire pfn + size range with single reserve_pfn_range
+ * call.
*/
int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
unsigned long pfn, unsigned long addr, unsigned long size)
@@ -938,11 +939,12 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
enum page_cache_mode pcm;
/* reserve the whole chunk starting from paddr */
- if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) {
+ if (!vma || (addr == vma->vm_start
+ && size == (vma->vm_end - vma->vm_start))) {
int ret;
ret = reserve_pfn_range(paddr, size, prot, 0);
- if (!ret)
+ if (ret == 0 && vma)
vma->vm_flags |= VM_PAT;
return ret;
}
@@ -997,7 +999,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
resource_size_t paddr;
unsigned long prot;
- if (!(vma->vm_flags & VM_PAT))
+ if (vma && !(vma->vm_flags & VM_PAT))
return;
/* free the chunk starting from pfn or the whole chunk */
@@ -1011,7 +1013,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
size = vma->vm_end - vma->vm_start;
}
free_pfn_range(paddr, size);
- vma->vm_flags &= ~VM_PAT;
+ if (vma)
+ vma->vm_flags &= ~VM_PAT;
}
/*
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 837ea36a837d..6d52b94f4bb9 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -553,15 +553,21 @@ static void twinhead_reserve_killing_zone(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
/*
- * Broadwell EP Home Agent BARs erroneously return non-zero values when read.
+ * Device [8086:2fc0]
+ * Erratum HSE43
+ * CONFIG_TDP_NOMINAL CSR Implemented at Incorrect Offset
+ * http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v3-spec-update.html
*
- * See http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html
- * entry BDF2.
+ * Devices [8086:6f60,6fa0,6fc0]
+ * Erratum BDF2
+ * PCI BARs in the Home Agent Will Return Non-Zero Values During Enumeration
+ * http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html
*/
-static void pci_bdwep_bar(struct pci_dev *dev)
+static void pci_invalid_bar(struct pci_dev *dev)
{
dev->non_compliant_bars = 1;
}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_bdwep_bar);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_bdwep_bar);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_bdwep_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
diff --git a/arch/x86/um/ptrace_32.c b/arch/x86/um/ptrace_32.c
index ebd4dd6ef73b..a7ef7b131e25 100644
--- a/arch/x86/um/ptrace_32.c
+++ b/arch/x86/um/ptrace_32.c
@@ -84,7 +84,10 @@ int putreg(struct task_struct *child, int regno, unsigned long value)
case EAX:
case EIP:
case UESP:
+ break;
case ORIG_EAX:
+ /* Update the syscall number. */
+ UPT_SYSCALL_NR(&child->thread.regs.regs) = value;
break;
case FS:
if (value && (value & 3) != 3)
diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c
index faab418876ce..0b5c184dd5b3 100644
--- a/arch/x86/um/ptrace_64.c
+++ b/arch/x86/um/ptrace_64.c
@@ -78,7 +78,11 @@ int putreg(struct task_struct *child, int regno, unsigned long value)
case RSI:
case RDI:
case RBP:
+ break;
+
case ORIG_RAX:
+ /* Update the syscall number. */
+ UPT_SYSCALL_NR(&child->thread.regs.regs) = value;
break;
case FS:
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 369999530108..a832426820e8 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -233,6 +233,8 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
return blkcipher_walk_done(desc, walk, -EINVAL);
}
+ bsize = min(walk->walk_blocksize, n);
+
walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
BLKCIPHER_WALK_DIFF);
if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
@@ -245,7 +247,6 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
}
}
- bsize = min(walk->walk_blocksize, n);
n = scatterwalk_clamp(&walk->in, n);
n = scatterwalk_clamp(&walk->out, n);
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index cf8037a87b2d..0c654e59f215 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -631,9 +631,14 @@ static int cryptd_hash_export(struct ahash_request *req, void *out)
static int cryptd_hash_import(struct ahash_request *req, const void *in)
{
- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct shash_desc *desc = cryptd_shash_desc(req);
+
+ desc->tfm = ctx->child;
+ desc->flags = req->base.flags;
- return crypto_shash_import(&rctx->desc, in);
+ return crypto_shash_import(desc, in);
}
static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
@@ -733,13 +738,14 @@ static void cryptd_aead_crypt(struct aead_request *req,
rctx = aead_request_ctx(req);
compl = rctx->complete;
+ tfm = crypto_aead_reqtfm(req);
+
if (unlikely(err == -EINPROGRESS))
goto out;
aead_request_set_tfm(req, child);
err = crypt( req );
out:
- tfm = crypto_aead_reqtfm(req);
ctx = crypto_aead_ctx(tfm);
refcnt = atomic_read(&ctx->refcnt);
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index 1b01fe98e91f..e3d889b122e0 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -1,8 +1,8 @@
/*
* echainiv: Encrypted Chain IV Generator
*
- * This generator generates an IV based on a sequence number by xoring it
- * with a salt and then encrypting it with the same key as used to encrypt
+ * This generator generates an IV based on a sequence number by multiplying
+ * it with a salt and then encrypting it with the same key as used to encrypt
* the plain text. This algorithm requires that the block size be equal
* to the IV size. It is mainly useful for CBC.
*
@@ -24,81 +24,17 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/percpu.h>
-#include <linux/spinlock.h>
+#include <linux/slab.h>
#include <linux/string.h>
-#define MAX_IV_SIZE 16
-
-static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
-
-/* We don't care if we get preempted and read/write IVs from the next CPU. */
-static void echainiv_read_iv(u8 *dst, unsigned size)
-{
- u32 *a = (u32 *)dst;
- u32 __percpu *b = echainiv_iv;
-
- for (; size >= 4; size -= 4) {
- *a++ = this_cpu_read(*b);
- b++;
- }
-}
-
-static void echainiv_write_iv(const u8 *src, unsigned size)
-{
- const u32 *a = (const u32 *)src;
- u32 __percpu *b = echainiv_iv;
-
- for (; size >= 4; size -= 4) {
- this_cpu_write(*b, *a);
- a++;
- b++;
- }
-}
-
-static void echainiv_encrypt_complete2(struct aead_request *req, int err)
-{
- struct aead_request *subreq = aead_request_ctx(req);
- struct crypto_aead *geniv;
- unsigned int ivsize;
-
- if (err == -EINPROGRESS)
- return;
-
- if (err)
- goto out;
-
- geniv = crypto_aead_reqtfm(req);
- ivsize = crypto_aead_ivsize(geniv);
-
- echainiv_write_iv(subreq->iv, ivsize);
-
- if (req->iv != subreq->iv)
- memcpy(req->iv, subreq->iv, ivsize);
-
-out:
- if (req->iv != subreq->iv)
- kzfree(subreq->iv);
-}
-
-static void echainiv_encrypt_complete(struct crypto_async_request *base,
- int err)
-{
- struct aead_request *req = base->data;
-
- echainiv_encrypt_complete2(req, err);
- aead_request_complete(req, err);
-}
-
static int echainiv_encrypt(struct aead_request *req)
{
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
struct aead_request *subreq = aead_request_ctx(req);
- crypto_completion_t compl;
- void *data;
+ __be64 nseqno;
+ u64 seqno;
u8 *info;
unsigned int ivsize = crypto_aead_ivsize(geniv);
int err;
@@ -108,8 +44,6 @@ static int echainiv_encrypt(struct aead_request *req)
aead_request_set_tfm(subreq, ctx->child);
- compl = echainiv_encrypt_complete;
- data = req;
info = req->iv;
if (req->src != req->dst) {
@@ -127,29 +61,30 @@ static int echainiv_encrypt(struct aead_request *req)
return err;
}
- if (unlikely(!IS_ALIGNED((unsigned long)info,
- crypto_aead_alignmask(geniv) + 1))) {
- info = kmalloc(ivsize, req->base.flags &
- CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
- GFP_ATOMIC);
- if (!info)
- return -ENOMEM;
-
- memcpy(info, req->iv, ivsize);
- }
-
- aead_request_set_callback(subreq, req->base.flags, compl, data);
+ aead_request_set_callback(subreq, req->base.flags,
+ req->base.complete, req->base.data);
aead_request_set_crypt(subreq, req->dst, req->dst,
req->cryptlen, info);
aead_request_set_ad(subreq, req->assoclen);
- crypto_xor(info, ctx->salt, ivsize);
+ memcpy(&nseqno, info + ivsize - 8, 8);
+ seqno = be64_to_cpu(nseqno);
+ memset(info, 0, ivsize);
+
scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
- echainiv_read_iv(info, ivsize);
- err = crypto_aead_encrypt(subreq);
- echainiv_encrypt_complete2(req, err);
- return err;
+ do {
+ u64 a;
+
+ memcpy(&a, ctx->salt + ivsize - 8, 8);
+
+ a |= 1;
+ a *= seqno;
+
+ memcpy(info + ivsize - 8, &a, 8);
+ } while ((ivsize -= 8));
+
+ return crypto_aead_encrypt(subreq);
}
static int echainiv_decrypt(struct aead_request *req)
@@ -196,8 +131,7 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
alg = crypto_spawn_aead_alg(spawn);
err = -EINVAL;
- if (inst->alg.ivsize & (sizeof(u32) - 1) ||
- inst->alg.ivsize > MAX_IV_SIZE)
+ if (inst->alg.ivsize & (sizeof(u64) - 1) || !inst->alg.ivsize)
goto free_inst;
inst->alg.encrypt = echainiv_encrypt;
@@ -206,7 +140,6 @@ static int echainiv_aead_create(struct crypto_template *tmpl,
inst->alg.init = aead_init_geniv;
inst->alg.exit = aead_exit_geniv;
- inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
inst->alg.base.cra_ctxsize += inst->alg.ivsize;
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
index 4c745bf389fe..161f91539ae6 100644
--- a/drivers/acpi/nfit/mce.c
+++ b/drivers/acpi/nfit/mce.c
@@ -42,7 +42,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
struct acpi_nfit_system_address *spa = nfit_spa->spa;
- if (nfit_spa_type(spa) == NFIT_SPA_PM)
+ if (nfit_spa_type(spa) != NFIT_SPA_PM)
continue;
/* find the spa that covers the mce addr */
if (spa->address > mce->addr)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index ad9fc84a8601..e878fc799af7 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2054,7 +2054,7 @@ int __init acpi_scan_init(void)
static struct acpi_probe_entry *ape;
static int acpi_probe_count;
-static DEFINE_SPINLOCK(acpi_probe_lock);
+static DEFINE_MUTEX(acpi_probe_mutex);
static int __init acpi_match_madt(struct acpi_subtable_header *header,
const unsigned long end)
@@ -2073,7 +2073,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
if (acpi_disabled)
return 0;
- spin_lock(&acpi_probe_lock);
+ mutex_lock(&acpi_probe_mutex);
for (ape = ap_head; nr; ape++, nr--) {
if (ACPI_COMPARE_NAME(ACPI_SIG_MADT, ape->id)) {
acpi_probe_count = 0;
@@ -2086,7 +2086,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
count++;
}
}
- spin_unlock(&acpi_probe_lock);
+ mutex_unlock(&acpi_probe_mutex);
return count;
}
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 7461a587b39b..dcf2c724fd06 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -2524,7 +2524,7 @@ static int ahci_host_activate_multi_irqs(struct ata_host *host,
/* Do not receive interrupts sent by dummy ports */
if (!pp) {
- disable_irq(irq + i);
+ disable_irq(irq);
continue;
}
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index 633aa2934a18..44f97ad3c88d 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -144,7 +144,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
ap->ioaddr.altstatus_addr = base + 0x1E;
ap->ioaddr.bmdma_addr = base;
ata_sff_std_ports(&ap->ioaddr);
- ap->pflags = ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
+ ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
ninja32_program(base);
/* FIXME: Should we disable them at remove ? */
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 6339efd32697..f2aaf9e32a36 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1845,8 +1845,9 @@ static int eni_start(struct atm_dev *dev)
/* initialize memory management */
buffer_mem = eni_dev->mem - (buf - eni_dev->ram);
eni_dev->free_list_size = buffer_mem/MID_MIN_BUF_SIZE/2;
- eni_dev->free_list = kmalloc(
- sizeof(struct eni_free)*(eni_dev->free_list_size+1),GFP_KERNEL);
+ eni_dev->free_list = kmalloc_array(eni_dev->free_list_size + 1,
+ sizeof(*eni_dev->free_list),
+ GFP_KERNEL);
if (!eni_dev->free_list) {
printk(KERN_ERR DEV_LABEL "(itf %d): couldn't get free page\n",
dev->number);
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 0f5cb37636bc..31b513a23ae0 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -779,8 +779,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
G0_RBPS_BS + (group * 32));
/* bitmap table */
- he_dev->rbpl_table = kmalloc(BITS_TO_LONGS(RBPL_TABLE_SIZE)
- * sizeof(unsigned long), GFP_KERNEL);
+ he_dev->rbpl_table = kmalloc_array(BITS_TO_LONGS(RBPL_TABLE_SIZE),
+ sizeof(*he_dev->rbpl_table),
+ GFP_KERNEL);
if (!he_dev->rbpl_table) {
hprintk("unable to allocate rbpl bitmap table\n");
return -ENOMEM;
@@ -788,8 +789,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
bitmap_zero(he_dev->rbpl_table, RBPL_TABLE_SIZE);
/* rbpl_virt 64-bit pointers */
- he_dev->rbpl_virt = kmalloc(RBPL_TABLE_SIZE
- * sizeof(struct he_buff *), GFP_KERNEL);
+ he_dev->rbpl_virt = kmalloc_array(RBPL_TABLE_SIZE,
+ sizeof(*he_dev->rbpl_virt),
+ GFP_KERNEL);
if (!he_dev->rbpl_virt) {
hprintk("unable to allocate rbpl virt table\n");
goto out_free_rbpl_table;
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 809dd1e02091..b2756765950e 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -1885,9 +1885,9 @@ static int open_tx(struct atm_vcc *vcc)
if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
return ret;
}
- }
- else
- printk("iadev: Non UBR, ABR and CBR traffic not supportedn");
+ } else {
+ printk("iadev: Non UBR, ABR and CBR traffic not supported\n");
+ }
iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
IF_EVENT(printk("ia open_tx returning \n");)
@@ -1975,7 +1975,9 @@ static int tx_init(struct atm_dev *dev)
buf_desc_ptr++;
tx_pkt_start += iadev->tx_buf_sz;
}
- iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
+ iadev->tx_buf = kmalloc_array(iadev->num_tx_desc,
+ sizeof(*iadev->tx_buf),
+ GFP_KERNEL);
if (!iadev->tx_buf) {
printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
goto err_free_dle;
@@ -1995,8 +1997,9 @@ static int tx_init(struct atm_dev *dev)
sizeof(*cpcs),
DMA_TO_DEVICE);
}
- iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
- sizeof(struct desc_tbl_t), GFP_KERNEL);
+ iadev->desc_tbl = kmalloc_array(iadev->num_tx_desc,
+ sizeof(*iadev->desc_tbl),
+ GFP_KERNEL);
if (!iadev->desc_tbl) {
printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
goto err_free_all_tx_bufs;
@@ -2124,7 +2127,9 @@ static int tx_init(struct atm_dev *dev)
memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4);
vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
- iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL);
+ iadev->testTable = kmalloc_array(iadev->num_vc,
+ sizeof(*iadev->testTable),
+ GFP_KERNEL);
if (!iadev->testTable) {
printk("Get freepage failed\n");
goto err_free_desc_tbl;
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 700ed15c2362..c7296b583787 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -370,7 +370,8 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
return error;
}
- if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL) {
+ card = kmalloc(sizeof(*card), GFP_KERNEL);
+ if (!card) {
printk
("nicstar%d: can't allocate memory for device structure.\n",
i);
@@ -611,7 +612,7 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
for (j = 0; j < card->rct_size; j++)
ns_write_sram(card, j * 4, u32d, 4);
- memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map));
+ memset(card->vcmap, 0, sizeof(card->vcmap));
for (j = 0; j < NS_FRSCD_NUM; j++)
card->scd2vc[j] = NULL;
@@ -862,7 +863,7 @@ static scq_info *get_scq(ns_dev *card, int size, u32 scd)
if (size != VBR_SCQSIZE && size != CBR_SCQSIZE)
return NULL;
- scq = kmalloc(sizeof(scq_info), GFP_KERNEL);
+ scq = kmalloc(sizeof(*scq), GFP_KERNEL);
if (!scq)
return NULL;
scq->org = dma_alloc_coherent(&card->pcidev->dev,
@@ -871,8 +872,9 @@ static scq_info *get_scq(ns_dev *card, int size, u32 scd)
kfree(scq);
return NULL;
}
- scq->skb = kmalloc(sizeof(struct sk_buff *) *
- (size / NS_SCQE_SIZE), GFP_KERNEL);
+ scq->skb = kmalloc_array(size / NS_SCQE_SIZE,
+ sizeof(*scq->skb),
+ GFP_KERNEL);
if (!scq->skb) {
dma_free_coherent(&card->pcidev->dev,
2 * size, scq->org, scq->dma);
@@ -2021,7 +2023,8 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
cell = skb->data;
for (i = ns_rsqe_cellcount(rsqe); i; i--) {
- if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL) {
+ sb = dev_alloc_skb(NS_SMSKBSIZE);
+ if (!sb) {
printk
("nicstar%d: Can't allocate buffers for aal0.\n",
card->index);
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index cecfb943762f..d3dc95484161 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -598,12 +598,13 @@ static void close_rx(struct atm_vcc *vcc)
static int start_rx(struct atm_dev *dev)
{
struct zatm_dev *zatm_dev;
- int size,i;
+ int i;
-DPRINTK("start_rx\n");
+ DPRINTK("start_rx\n");
zatm_dev = ZATM_DEV(dev);
- size = sizeof(struct atm_vcc *)*zatm_dev->chans;
- zatm_dev->rx_map = kzalloc(size,GFP_KERNEL);
+ zatm_dev->rx_map = kcalloc(zatm_dev->chans,
+ sizeof(*zatm_dev->rx_map),
+ GFP_KERNEL);
if (!zatm_dev->rx_map) return -ENOMEM;
/* set VPI/VCI split (use all VCIs and give what's left to VPIs) */
zpokel(zatm_dev,(1 << dev->ci_range.vci_bits)-1,uPD98401_VRR);
@@ -998,8 +999,9 @@ static int start_tx(struct atm_dev *dev)
DPRINTK("start_tx\n");
zatm_dev = ZATM_DEV(dev);
- zatm_dev->tx_map = kmalloc(sizeof(struct atm_vcc *)*
- zatm_dev->chans,GFP_KERNEL);
+ zatm_dev->tx_map = kmalloc_array(zatm_dev->chans,
+ sizeof(*zatm_dev->tx_map),
+ GFP_KERNEL);
if (!zatm_dev->tx_map) return -ENOMEM;
zatm_dev->tx_bw = ATM_OC3_PCR;
zatm_dev->free_shapers = (1 << NR_SHAPERS)-1;
@@ -1398,7 +1400,7 @@ static int zatm_open(struct atm_vcc *vcc)
DPRINTK(DEV_LABEL "(itf %d): open %d.%d\n",vcc->dev->number,vcc->vpi,
vcc->vci);
if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) {
- zatm_vcc = kmalloc(sizeof(struct zatm_vcc),GFP_KERNEL);
+ zatm_vcc = kmalloc(sizeof(*zatm_vcc), GFP_KERNEL);
if (!zatm_vcc) {
clear_bit(ATM_VF_ADDR,&vcc->flags);
return -ENOMEM;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index e097d355cc04..82a081ea4317 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -301,7 +301,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
int (*callback)(struct device *);
int retval;
- trace_rpm_idle(dev, rpmflags);
+ trace_rpm_idle_rcuidle(dev, rpmflags);
retval = rpm_check_suspend_allowed(dev);
if (retval < 0)
; /* Conditions are wrong. */
@@ -337,7 +337,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.request_pending = true;
queue_work(pm_wq, &dev->power.work);
}
- trace_rpm_return_int(dev, _THIS_IP_, 0);
+ trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
return 0;
}
@@ -352,7 +352,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
wake_up_all(&dev->power.wait_queue);
out:
- trace_rpm_return_int(dev, _THIS_IP_, retval);
+ trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
}
@@ -419,7 +419,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval;
- trace_rpm_suspend(dev, rpmflags);
+ trace_rpm_suspend_rcuidle(dev, rpmflags);
repeat:
retval = rpm_check_suspend_allowed(dev);
@@ -549,7 +549,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
}
out:
- trace_rpm_return_int(dev, _THIS_IP_, retval);
+ trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
return retval;
@@ -601,7 +601,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval = 0;
- trace_rpm_resume(dev, rpmflags);
+ trace_rpm_resume_rcuidle(dev, rpmflags);
repeat:
if (dev->power.runtime_error)
@@ -764,7 +764,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
spin_lock_irq(&dev->power.lock);
}
- trace_rpm_return_int(dev, _THIS_IP_, retval);
+ trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
return retval;
}
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index aa56af87d941..b11af3f2c1db 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -404,6 +404,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
unsigned int new_base_reg, new_top_reg;
unsigned int min, max;
unsigned int max_dist;
+ unsigned int dist, best_dist = UINT_MAX;
max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
map->cache_word_size;
@@ -423,24 +424,41 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
&base_reg, &top_reg);
if (base_reg <= max && top_reg >= min) {
- new_base_reg = min(reg, base_reg);
- new_top_reg = max(reg, top_reg);
- } else {
- if (max < base_reg)
- node = node->rb_left;
+ if (reg < base_reg)
+ dist = base_reg - reg;
+ else if (reg > top_reg)
+ dist = reg - top_reg;
else
- node = node->rb_right;
-
- continue;
+ dist = 0;
+ if (dist < best_dist) {
+ rbnode = rbnode_tmp;
+ best_dist = dist;
+ new_base_reg = min(reg, base_reg);
+ new_top_reg = max(reg, top_reg);
+ }
}
- ret = regcache_rbtree_insert_to_block(map, rbnode_tmp,
+ /*
+ * Keep looking, we want to choose the closest block,
+ * otherwise we might end up creating overlapping
+ * blocks, which breaks the rbtree.
+ */
+ if (reg < base_reg)
+ node = node->rb_left;
+ else if (reg > top_reg)
+ node = node->rb_right;
+ else
+ break;
+ }
+
+ if (rbnode) {
+ ret = regcache_rbtree_insert_to_block(map, rbnode,
new_base_reg,
new_top_reg, reg,
value);
if (ret)
return ret;
- rbtree_ctx->cached_rbnode = rbnode_tmp;
+ rbtree_ctx->cached_rbnode = rbnode;
return 0;
}
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index df7ff7290821..4e582561e1e7 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -38,10 +38,11 @@ static int regcache_hw_init(struct regmap *map)
/* calculate the size of reg_defaults */
for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
- if (!regmap_volatile(map, i * map->reg_stride))
+ if (regmap_readable(map, i * map->reg_stride) &&
+ !regmap_volatile(map, i * map->reg_stride))
count++;
- /* all registers are volatile, so just bypass */
+ /* all registers are unreadable or volatile, so just bypass */
if (!count) {
map->cache_bypass = true;
return 0;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 51fa7d66a393..25d26bb18970 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1474,6 +1474,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
ret = map->bus->write(map->bus_context, buf, len);
kfree(buf);
+ } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
+ regcache_drop_region(map, reg, reg + 1);
}
trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index 921ce1834673..b4f6520e74f0 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -36,12 +36,31 @@ u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc)
}
EXPORT_SYMBOL_GPL(bcma_chipco_get_alp_clock);
+static bool bcma_core_cc_has_pmu_watchdog(struct bcma_drv_cc *cc)
+{
+ struct bcma_bus *bus = cc->core->bus;
+
+ if (cc->capabilities & BCMA_CC_CAP_PMU) {
+ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53573) {
+ WARN(bus->chipinfo.rev <= 1, "No watchdog available\n");
+ /* 53573B0 and 53573B1 have bugged PMU watchdog. It can
+ * be enabled but timer can't be bumped. Use CC one
+ * instead.
+ */
+ return false;
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
static u32 bcma_chipco_watchdog_get_max_timer(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
u32 nb;
- if (cc->capabilities & BCMA_CC_CAP_PMU) {
+ if (bcma_core_cc_has_pmu_watchdog(cc)) {
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
nb = 32;
else if (cc->core->id.rev < 26)
@@ -95,9 +114,16 @@ static int bcma_chipco_watchdog_ticks_per_ms(struct bcma_drv_cc *cc)
int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc)
{
+ struct bcma_bus *bus = cc->core->bus;
struct bcm47xx_wdt wdt = {};
struct platform_device *pdev;
+ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53573 &&
+ bus->chipinfo.rev <= 1) {
+ pr_debug("No watchdog on 53573A0 / 53573A1\n");
+ return 0;
+ }
+
wdt.driver_data = cc;
wdt.timer_set = bcma_chipco_watchdog_timer_set_wdt;
wdt.timer_set_ms = bcma_chipco_watchdog_timer_set_ms_wdt;
@@ -105,7 +131,7 @@ int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc)
bcma_chipco_watchdog_get_max_timer(cc) / cc->ticks_per_ms;
pdev = platform_device_register_data(NULL, "bcm47xx-wdt",
- cc->core->bus->num, &wdt,
+ bus->num, &wdt,
sizeof(wdt));
if (IS_ERR(pdev))
return PTR_ERR(pdev);
@@ -217,7 +243,7 @@ u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks)
u32 maxt;
maxt = bcma_chipco_watchdog_get_max_timer(cc);
- if (cc->capabilities & BCMA_CC_CAP_PMU) {
+ if (bcma_core_cc_has_pmu_watchdog(cc)) {
if (ticks == 1)
ticks = 2;
else if (ticks > maxt)
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 1f635471f318..2c1798e38abd 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -209,6 +209,8 @@ static void bcma_of_fill_device(struct platform_device *parent,
core->dev.of_node = node;
core->irq = bcma_of_get_irq(parent, core, 0);
+
+ of_dma_configure(&core->dev, node);
}
unsigned int bcma_core_irq(struct bcma_device *core, int num)
@@ -248,12 +250,12 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
core->irq = bus->host_pci->irq;
break;
case BCMA_HOSTTYPE_SOC:
- core->dev.dma_mask = &core->dev.coherent_dma_mask;
- if (bus->host_pdev) {
+ if (IS_ENABLED(CONFIG_OF) && bus->host_pdev) {
core->dma_dev = &bus->host_pdev->dev;
core->dev.parent = &bus->host_pdev->dev;
bcma_of_fill_device(bus->host_pdev, core);
} else {
+ core->dev.dma_mask = &core->dev.coherent_dma_mask;
core->dma_dev = &core->dev;
}
break;
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index cf50fd2e96df..3cc9bff9d99d 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -180,6 +180,17 @@ config BT_HCIUART_AG6XX
Say Y here to compile support for Intel AG6XX protocol.
+config BT_HCIUART_MRVL
+ bool "Marvell protocol support"
+ depends on BT_HCIUART
+ select BT_HCIUART_H4
+ help
+ Marvell is serial protocol for communication between Bluetooth
+ device and host. This protocol is required for most Marvell Bluetooth
+ devices with UART interface.
+
+ Say Y here to compile support for HCI MRVL protocol.
+
config BT_HCIBCM203X
tristate "HCI BCM203x USB driver"
depends on USB
@@ -331,4 +342,16 @@ config BT_WILINK
Say Y here to compile support for Texas Instrument's WiLink7 driver
into the kernel or say M to compile it as module (btwilink).
+config BT_QCOMSMD
+ tristate "Qualcomm SMD based HCI support"
+ depends on QCOM_SMD && QCOM_WCNSS_CTRL
+ select BT_QCA
+ help
+ Qualcomm SMD based HCI driver.
+ This driver is used to bridge HCI data onto the shared memory
+ channels to the WCNSS core.
+
+ Say Y here to compile support for HCI over Qualcomm SMD into the
+ kernel or say M to compile as a module.
+
endmenu
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 9c18939fc5c9..b1fc29a697b7 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_BT_ATH3K) += ath3k.o
obj-$(CONFIG_BT_MRVL) += btmrvl.o
obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o
obj-$(CONFIG_BT_WILINK) += btwilink.o
+obj-$(CONFIG_BT_QCOMSMD) += btqcomsmd.o
obj-$(CONFIG_BT_BCM) += btbcm.o
obj-$(CONFIG_BT_RTL) += btrtl.o
obj-$(CONFIG_BT_QCA) += btqca.o
@@ -37,6 +38,7 @@ hci_uart-$(CONFIG_BT_HCIUART_INTEL) += hci_intel.o
hci_uart-$(CONFIG_BT_HCIUART_BCM) += hci_bcm.o
hci_uart-$(CONFIG_BT_HCIUART_QCA) += hci_qca.o
hci_uart-$(CONFIG_BT_HCIUART_AG6XX) += hci_ag6xx.o
+hci_uart-$(CONFIG_BT_HCIUART_MRVL) += hci_mrvl.o
hci_uart-objs := $(hci_uart-y)
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index 5b0ef7bbe8ac..5ce6d4176dc3 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -185,10 +185,8 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id
data->state = BCM203X_LOAD_MINIDRV;
data->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!data->urb) {
- BT_ERR("Can't allocate URB");
+ if (!data->urb)
return -ENOMEM;
- }
if (request_firmware(&firmware, "BCM2033-MD.hex", &udev->dev) < 0) {
BT_ERR("Mini driver request failed");
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 4a6208168850..28afd5d585f9 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -55,8 +55,8 @@ static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
}
edl = (struct edl_event_hdr *)(skb->data);
- if (!edl || !edl->data) {
- BT_ERR("%s: TLV with no header or no data", hdev->name);
+ if (!edl) {
+ BT_ERR("%s: TLV with no header", hdev->name);
err = -EILSEQ;
goto out;
}
@@ -224,8 +224,8 @@ static int rome_tlv_send_segment(struct hci_dev *hdev, int idx, int seg_size,
}
edl = (struct edl_event_hdr *)(skb->data);
- if (!edl || !edl->data) {
- BT_ERR("%s: TLV with no header or no data", hdev->name);
+ if (!edl) {
+ BT_ERR("%s: TLV with no header", hdev->name);
err = -EILSEQ;
goto out;
}
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
new file mode 100644
index 000000000000..08c2c93887c1
--- /dev/null
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2016, Linaro Ltd.
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smd.h>
+#include <linux/soc/qcom/wcnss_ctrl.h>
+#include <linux/platform_device.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btqca.h"
+
+struct btqcomsmd {
+ struct hci_dev *hdev;
+
+ struct qcom_smd_channel *acl_channel;
+ struct qcom_smd_channel *cmd_channel;
+};
+
+static int btqcomsmd_recv(struct hci_dev *hdev, unsigned int type,
+ const void *data, size_t count)
+{
+ struct sk_buff *skb;
+
+ /* Use GFP_ATOMIC as we're in IRQ context */
+ skb = bt_skb_alloc(count, GFP_ATOMIC);
+ if (!skb) {
+ hdev->stat.err_rx++;
+ return -ENOMEM;
+ }
+
+ hci_skb_pkt_type(skb) = type;
+ memcpy(skb_put(skb, count), data, count);
+
+ return hci_recv_frame(hdev, skb);
+}
+
+static int btqcomsmd_acl_callback(struct qcom_smd_channel *channel,
+ const void *data, size_t count)
+{
+ struct btqcomsmd *btq = qcom_smd_get_drvdata(channel);
+
+ btq->hdev->stat.byte_rx += count;
+ return btqcomsmd_recv(btq->hdev, HCI_ACLDATA_PKT, data, count);
+}
+
+static int btqcomsmd_cmd_callback(struct qcom_smd_channel *channel,
+ const void *data, size_t count)
+{
+ struct btqcomsmd *btq = qcom_smd_get_drvdata(channel);
+
+ return btqcomsmd_recv(btq->hdev, HCI_EVENT_PKT, data, count);
+}
+
+static int btqcomsmd_send(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btqcomsmd *btq = hci_get_drvdata(hdev);
+ int ret;
+
+ switch (hci_skb_pkt_type(skb)) {
+ case HCI_ACLDATA_PKT:
+ ret = qcom_smd_send(btq->acl_channel, skb->data, skb->len);
+ hdev->stat.acl_tx++;
+ hdev->stat.byte_tx += skb->len;
+ break;
+ case HCI_COMMAND_PKT:
+ ret = qcom_smd_send(btq->cmd_channel, skb->data, skb->len);
+ hdev->stat.cmd_tx++;
+ break;
+ default:
+ ret = -EILSEQ;
+ break;
+ }
+
+ kfree_skb(skb);
+
+ return ret;
+}
+
+static int btqcomsmd_open(struct hci_dev *hdev)
+{
+ return 0;
+}
+
+static int btqcomsmd_close(struct hci_dev *hdev)
+{
+ return 0;
+}
+
+static int btqcomsmd_probe(struct platform_device *pdev)
+{
+ struct btqcomsmd *btq;
+ struct hci_dev *hdev;
+ void *wcnss;
+ int ret;
+
+ btq = devm_kzalloc(&pdev->dev, sizeof(*btq), GFP_KERNEL);
+ if (!btq)
+ return -ENOMEM;
+
+ wcnss = dev_get_drvdata(pdev->dev.parent);
+
+ btq->acl_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_ACL",
+ btqcomsmd_acl_callback);
+ if (IS_ERR(btq->acl_channel))
+ return PTR_ERR(btq->acl_channel);
+
+ btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD",
+ btqcomsmd_cmd_callback);
+ if (IS_ERR(btq->cmd_channel))
+ return PTR_ERR(btq->cmd_channel);
+
+ qcom_smd_set_drvdata(btq->acl_channel, btq);
+ qcom_smd_set_drvdata(btq->cmd_channel, btq);
+
+ hdev = hci_alloc_dev();
+ if (!hdev)
+ return -ENOMEM;
+
+ hci_set_drvdata(hdev, btq);
+ btq->hdev = hdev;
+ SET_HCIDEV_DEV(hdev, &pdev->dev);
+
+ hdev->bus = HCI_SMD;
+ hdev->open = btqcomsmd_open;
+ hdev->close = btqcomsmd_close;
+ hdev->send = btqcomsmd_send;
+ hdev->set_bdaddr = qca_set_bdaddr_rome;
+
+ ret = hci_register_dev(hdev);
+ if (ret < 0) {
+ hci_free_dev(hdev);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, btq);
+
+ return 0;
+}
+
+static int btqcomsmd_remove(struct platform_device *pdev)
+{
+ struct btqcomsmd *btq = platform_get_drvdata(pdev);
+
+ hci_unregister_dev(btq->hdev);
+ hci_free_dev(btq->hdev);
+
+ return 0;
+}
+
+static const struct of_device_id btqcomsmd_of_match[] = {
+ { .compatible = "qcom,wcnss-bt", },
+ { },
+};
+
+static struct platform_driver btqcomsmd_driver = {
+ .probe = btqcomsmd_probe,
+ .remove = btqcomsmd_remove,
+ .driver = {
+ .name = "btqcomsmd",
+ .of_match_table = btqcomsmd_of_match,
+ },
+};
+
+module_platform_driver(btqcomsmd_driver);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm SMD HCI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 84288938f7f2..fc9b25703c67 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -33,6 +33,7 @@
#define RTL_ROM_LMP_8723B 0x8723
#define RTL_ROM_LMP_8821A 0x8821
#define RTL_ROM_LMP_8761A 0x8761
+#define RTL_ROM_LMP_8822B 0x8822
static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
{
@@ -78,11 +79,15 @@ static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
const unsigned char *patch_length_base, *patch_offset_base;
u32 patch_offset = 0;
u16 patch_length, num_patches;
- const u16 project_id_to_lmp_subver[] = {
- RTL_ROM_LMP_8723A,
- RTL_ROM_LMP_8723B,
- RTL_ROM_LMP_8821A,
- RTL_ROM_LMP_8761A
+ static const struct {
+ __u16 lmp_subver;
+ __u8 id;
+ } project_id_to_lmp_subver[] = {
+ { RTL_ROM_LMP_8723A, 0 },
+ { RTL_ROM_LMP_8723B, 1 },
+ { RTL_ROM_LMP_8821A, 2 },
+ { RTL_ROM_LMP_8761A, 3 },
+ { RTL_ROM_LMP_8822B, 8 },
};
ret = rtl_read_rom_version(hdev, &rom_version);
@@ -134,14 +139,20 @@ static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
return -EINVAL;
}
- if (project_id >= ARRAY_SIZE(project_id_to_lmp_subver)) {
+ /* Find project_id in table */
+ for (i = 0; i < ARRAY_SIZE(project_id_to_lmp_subver); i++) {
+ if (project_id == project_id_to_lmp_subver[i].id)
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(project_id_to_lmp_subver)) {
BT_ERR("%s: unknown project id %d", hdev->name, project_id);
return -EINVAL;
}
- if (lmp_subver != project_id_to_lmp_subver[project_id]) {
+ if (lmp_subver != project_id_to_lmp_subver[i].lmp_subver) {
BT_ERR("%s: firmware is for %x but this is a %x", hdev->name,
- project_id_to_lmp_subver[project_id], lmp_subver);
+ project_id_to_lmp_subver[i].lmp_subver, lmp_subver);
return -EINVAL;
}
@@ -257,6 +268,26 @@ out:
return ret;
}
+static int rtl_load_config(struct hci_dev *hdev, const char *name, u8 **buff)
+{
+ const struct firmware *fw;
+ int ret;
+
+ BT_INFO("%s: rtl: loading %s", hdev->name, name);
+ ret = request_firmware(&fw, name, &hdev->dev);
+ if (ret < 0) {
+ BT_ERR("%s: Failed to load %s", hdev->name, name);
+ return ret;
+ }
+
+ ret = fw->size;
+ *buff = kmemdup(fw->data, ret, GFP_KERNEL);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
static int btrtl_setup_rtl8723a(struct hci_dev *hdev)
{
const struct firmware *fw;
@@ -296,25 +327,74 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver,
unsigned char *fw_data = NULL;
const struct firmware *fw;
int ret;
+ int cfg_sz;
+ u8 *cfg_buff = NULL;
+ u8 *tbuff;
+ char *cfg_name = NULL;
+
+ switch (lmp_subver) {
+ case RTL_ROM_LMP_8723B:
+ cfg_name = "rtl_bt/rtl8723b_config.bin";
+ break;
+ case RTL_ROM_LMP_8821A:
+ cfg_name = "rtl_bt/rtl8821a_config.bin";
+ break;
+ case RTL_ROM_LMP_8761A:
+ cfg_name = "rtl_bt/rtl8761a_config.bin";
+ break;
+ case RTL_ROM_LMP_8822B:
+ cfg_name = "rtl_bt/rtl8822b_config.bin";
+ break;
+ default:
+ BT_ERR("%s: rtl: no config according to lmp_subver %04x",
+ hdev->name, lmp_subver);
+ break;
+ }
+
+ if (cfg_name) {
+ cfg_sz = rtl_load_config(hdev, cfg_name, &cfg_buff);
+ if (cfg_sz < 0)
+ cfg_sz = 0;
+ } else
+ cfg_sz = 0;
BT_INFO("%s: rtl: loading %s", hdev->name, fw_name);
ret = request_firmware(&fw, fw_name, &hdev->dev);
if (ret < 0) {
BT_ERR("%s: Failed to load %s", hdev->name, fw_name);
- return ret;
+ goto err_req_fw;
}
ret = rtl8723b_parse_firmware(hdev, lmp_subver, fw, &fw_data);
if (ret < 0)
goto out;
+ if (cfg_sz) {
+ tbuff = kzalloc(ret + cfg_sz, GFP_KERNEL);
+ if (!tbuff) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(tbuff, fw_data, ret);
+ kfree(fw_data);
+
+ memcpy(tbuff + ret, cfg_buff, cfg_sz);
+ ret += cfg_sz;
+
+ fw_data = tbuff;
+ }
+
+ BT_INFO("cfg_sz %d, total size %d", cfg_sz, ret);
+
ret = rtl_download_firmware(hdev, fw_data, ret);
- kfree(fw_data);
- if (ret < 0)
- goto out;
out:
release_firmware(fw);
+ kfree(fw_data);
+err_req_fw:
+ if (cfg_sz)
+ kfree(cfg_buff);
return ret;
}
@@ -377,6 +457,9 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
case RTL_ROM_LMP_8761A:
return btrtl_setup_rtl8723b(hdev, lmp_subver,
"rtl_bt/rtl8761a_fw.bin");
+ case RTL_ROM_LMP_8822B:
+ return btrtl_setup_rtl8723b(hdev, lmp_subver,
+ "rtl_bt/rtl8822b_fw.bin");
default:
BT_INFO("rtl: assuming no firmware upload needed.");
return 0;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 811f9b97e360..9ebd73dd7915 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -62,6 +62,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_REALTEK 0x20000
#define BTUSB_BCM2045 0x40000
#define BTUSB_IFNUM_2 0x80000
+#define BTUSB_CW6622 0x100000
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -248,6 +249,7 @@ static const struct usb_device_id blacklist_table[] = {
/* QCA ROME chipset */
{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME },
@@ -290,7 +292,8 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0400, 0x080a), .driver_info = BTUSB_BROKEN_ISOC },
/* CONWISE Technology based adapters with buggy SCO support */
- { USB_DEVICE(0x0e5e, 0x6622), .driver_info = BTUSB_BROKEN_ISOC },
+ { USB_DEVICE(0x0e5e, 0x6622),
+ .driver_info = BTUSB_BROKEN_ISOC | BTUSB_CW6622},
/* Roper Class 1 Bluetooth Dongle (Silicon Wave based) */
{ USB_DEVICE(0x1310, 0x0001), .driver_info = BTUSB_SWAVE },
@@ -2221,9 +2224,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
err = wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING,
TASK_INTERRUPTIBLE,
msecs_to_jiffies(5000));
- if (err == 1) {
+ if (err == -EINTR) {
BT_ERR("%s: Firmware loading interrupted", hdev->name);
- err = -EINTR;
goto done;
}
@@ -2275,7 +2277,7 @@ done:
TASK_INTERRUPTIBLE,
msecs_to_jiffies(1000));
- if (err == 1) {
+ if (err == -EINTR) {
BT_ERR("%s: Device boot interrupted", hdev->name);
return -EINTR;
}
@@ -2845,6 +2847,9 @@ static int btusb_probe(struct usb_interface *intf,
hdev->send = btusb_send_frame;
hdev->notify = btusb_notify;
+ if (id->driver_info & BTUSB_CW6622)
+ set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
+
if (id->driver_info & BTUSB_BCM2045)
set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 1c97eda8bae3..5ccb90ef0146 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -798,7 +798,7 @@ static int bcm_remove(struct platform_device *pdev)
static const struct hci_uart_proto bcm_proto = {
.id = HCI_UART_BCM,
- .name = "BCM",
+ .name = "Broadcom",
.manufacturer = 15,
.init_speed = 115200,
.oper_speed = 4000000,
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index ed0a4201b551..9e271286c5e5 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -128,7 +128,7 @@ static int intel_wait_booting(struct hci_uart *hu)
TASK_INTERRUPTIBLE,
msecs_to_jiffies(1000));
- if (err == 1) {
+ if (err == -EINTR) {
bt_dev_err(hu->hdev, "Device boot interrupted");
return -EINTR;
}
@@ -151,7 +151,7 @@ static int intel_wait_lpm_transaction(struct hci_uart *hu)
TASK_INTERRUPTIBLE,
msecs_to_jiffies(1000));
- if (err == 1) {
+ if (err == -EINTR) {
bt_dev_err(hu->hdev, "LPM transaction interrupted");
return -EINTR;
}
@@ -813,7 +813,7 @@ static int intel_setup(struct hci_uart *hu)
err = wait_on_bit_timeout(&intel->flags, STATE_DOWNLOADING,
TASK_INTERRUPTIBLE,
msecs_to_jiffies(5000));
- if (err == 1) {
+ if (err == -EINTR) {
bt_dev_err(hdev, "Firmware loading interrupted");
err = -EINTR;
goto done;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index dda97398c59a..9a3aab67b6bb 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -810,6 +810,9 @@ static int __init hci_uart_init(void)
#ifdef CONFIG_BT_HCIUART_AG6XX
ag6xx_init();
#endif
+#ifdef CONFIG_BT_HCIUART_MRVL
+ mrvl_init();
+#endif
return 0;
}
@@ -845,6 +848,9 @@ static void __exit hci_uart_exit(void)
#ifdef CONFIG_BT_HCIUART_AG6XX
ag6xx_deinit();
#endif
+#ifdef CONFIG_BT_HCIUART_MRVL
+ mrvl_deinit();
+#endif
/* Release tty registration of line discipline */
err = tty_unregister_ldisc(N_HCI);
diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c
new file mode 100644
index 000000000000..bbc4b39b1dbf
--- /dev/null
+++ b/drivers/bluetooth/hci_mrvl.c
@@ -0,0 +1,387 @@
+/*
+ *
+ * Bluetooth HCI UART driver for marvell devices
+ *
+ * Copyright (C) 2016 Marvell International Ltd.
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "hci_uart.h"
+
+#define HCI_FW_REQ_PKT 0xA5
+#define HCI_CHIP_VER_PKT 0xAA
+
+#define MRVL_ACK 0x5A
+#define MRVL_NAK 0xBF
+#define MRVL_RAW_DATA 0x1F
+
+enum {
+ STATE_CHIP_VER_PENDING,
+ STATE_FW_REQ_PENDING,
+};
+
+struct mrvl_data {
+ struct sk_buff *rx_skb;
+ struct sk_buff_head txq;
+ struct sk_buff_head rawq;
+ unsigned long flags;
+ unsigned int tx_len;
+ u8 id, rev;
+};
+
+struct hci_mrvl_pkt {
+ __le16 lhs;
+ __le16 rhs;
+} __packed;
+#define HCI_MRVL_PKT_SIZE 4
+
+static int mrvl_open(struct hci_uart *hu)
+{
+ struct mrvl_data *mrvl;
+
+ BT_DBG("hu %p", hu);
+
+ mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL);
+ if (!mrvl)
+ return -ENOMEM;
+
+ skb_queue_head_init(&mrvl->txq);
+ skb_queue_head_init(&mrvl->rawq);
+
+ set_bit(STATE_CHIP_VER_PENDING, &mrvl->flags);
+
+ hu->priv = mrvl;
+ return 0;
+}
+
+static int mrvl_close(struct hci_uart *hu)
+{
+ struct mrvl_data *mrvl = hu->priv;
+
+ BT_DBG("hu %p", hu);
+
+ skb_queue_purge(&mrvl->txq);
+ skb_queue_purge(&mrvl->rawq);
+ kfree_skb(mrvl->rx_skb);
+ kfree(mrvl);
+
+ hu->priv = NULL;
+ return 0;
+}
+
+static int mrvl_flush(struct hci_uart *hu)
+{
+ struct mrvl_data *mrvl = hu->priv;
+
+ BT_DBG("hu %p", hu);
+
+ skb_queue_purge(&mrvl->txq);
+ skb_queue_purge(&mrvl->rawq);
+
+ return 0;
+}
+
+static struct sk_buff *mrvl_dequeue(struct hci_uart *hu)
+{
+ struct mrvl_data *mrvl = hu->priv;
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&mrvl->txq);
+ if (!skb) {
+ /* Any raw data ? */
+ skb = skb_dequeue(&mrvl->rawq);
+ } else {
+ /* Prepend skb with frame type */
+ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+ }
+
+ return skb;
+}
+
+static int mrvl_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+ struct mrvl_data *mrvl = hu->priv;
+
+ skb_queue_tail(&mrvl->txq, skb);
+ return 0;
+}
+
+static void mrvl_send_ack(struct hci_uart *hu, unsigned char type)
+{
+ struct mrvl_data *mrvl = hu->priv;
+ struct sk_buff *skb;
+
+ /* No H4 payload, only 1 byte header */
+ skb = bt_skb_alloc(0, GFP_ATOMIC);
+ if (!skb) {
+ bt_dev_err(hu->hdev, "Unable to alloc ack/nak packet");
+ return;
+ }
+ hci_skb_pkt_type(skb) = type;
+
+ skb_queue_tail(&mrvl->txq, skb);
+ hci_uart_tx_wakeup(hu);
+}
+
+static int mrvl_recv_fw_req(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_mrvl_pkt *pkt = (void *)skb->data;
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct mrvl_data *mrvl = hu->priv;
+ int ret = 0;
+
+ if ((pkt->lhs ^ pkt->rhs) != 0xffff) {
+ bt_dev_err(hdev, "Corrupted mrvl header");
+ mrvl_send_ack(hu, MRVL_NAK);
+ ret = -EINVAL;
+ goto done;
+ }
+ mrvl_send_ack(hu, MRVL_ACK);
+
+ if (!test_bit(STATE_FW_REQ_PENDING, &mrvl->flags)) {
+ bt_dev_err(hdev, "Received unexpected firmware request");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ mrvl->tx_len = le16_to_cpu(pkt->lhs);
+
+ clear_bit(STATE_FW_REQ_PENDING, &mrvl->flags);
+ smp_mb__after_atomic();
+ wake_up_bit(&mrvl->flags, STATE_FW_REQ_PENDING);
+
+done:
+ kfree_skb(skb);
+ return ret;
+}
+
+static int mrvl_recv_chip_ver(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hci_mrvl_pkt *pkt = (void *)skb->data;
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct mrvl_data *mrvl = hu->priv;
+ u16 version = le16_to_cpu(pkt->lhs);
+ int ret = 0;
+
+ if ((pkt->lhs ^ pkt->rhs) != 0xffff) {
+ bt_dev_err(hdev, "Corrupted mrvl header");
+ mrvl_send_ack(hu, MRVL_NAK);
+ ret = -EINVAL;
+ goto done;
+ }
+ mrvl_send_ack(hu, MRVL_ACK);
+
+ if (!test_bit(STATE_CHIP_VER_PENDING, &mrvl->flags)) {
+ bt_dev_err(hdev, "Received unexpected chip version");
+ goto done;
+ }
+
+ mrvl->id = version;
+ mrvl->rev = version >> 8;
+
+ bt_dev_info(hdev, "Controller id = %x, rev = %x", mrvl->id, mrvl->rev);
+
+ clear_bit(STATE_CHIP_VER_PENDING, &mrvl->flags);
+ smp_mb__after_atomic();
+ wake_up_bit(&mrvl->flags, STATE_CHIP_VER_PENDING);
+
+done:
+ kfree_skb(skb);
+ return ret;
+}
+
+#define HCI_RECV_CHIP_VER \
+ .type = HCI_CHIP_VER_PKT, \
+ .hlen = HCI_MRVL_PKT_SIZE, \
+ .loff = 0, \
+ .lsize = 0, \
+ .maxlen = HCI_MRVL_PKT_SIZE
+
+#define HCI_RECV_FW_REQ \
+ .type = HCI_FW_REQ_PKT, \
+ .hlen = HCI_MRVL_PKT_SIZE, \
+ .loff = 0, \
+ .lsize = 0, \
+ .maxlen = HCI_MRVL_PKT_SIZE
+
+static const struct h4_recv_pkt mrvl_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_SCO, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = hci_recv_frame },
+ { HCI_RECV_FW_REQ, .recv = mrvl_recv_fw_req },
+ { HCI_RECV_CHIP_VER, .recv = mrvl_recv_chip_ver },
+};
+
+static int mrvl_recv(struct hci_uart *hu, const void *data, int count)
+{
+ struct mrvl_data *mrvl = hu->priv;
+
+ if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+ return -EUNATCH;
+
+ mrvl->rx_skb = h4_recv_buf(hu->hdev, mrvl->rx_skb, data, count,
+ mrvl_recv_pkts,
+ ARRAY_SIZE(mrvl_recv_pkts));
+ if (IS_ERR(mrvl->rx_skb)) {
+ int err = PTR_ERR(mrvl->rx_skb);
+ bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
+ mrvl->rx_skb = NULL;
+ return err;
+ }
+
+ return count;
+}
+
+static int mrvl_load_firmware(struct hci_dev *hdev, const char *name)
+{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ struct mrvl_data *mrvl = hu->priv;
+ const struct firmware *fw = NULL;
+ const u8 *fw_ptr, *fw_max;
+ int err;
+
+ err = request_firmware(&fw, name, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to load firmware file %s", name);
+ return err;
+ }
+
+ fw_ptr = fw->data;
+ fw_max = fw->data + fw->size;
+
+ bt_dev_info(hdev, "Loading %s", name);
+
+ set_bit(STATE_FW_REQ_PENDING, &mrvl->flags);
+
+ while (fw_ptr <= fw_max) {
+ struct sk_buff *skb;
+
+ /* Controller drives the firmware load by sending firmware
+ * request packets containing the expected fragment size.
+ */
+ err = wait_on_bit_timeout(&mrvl->flags, STATE_FW_REQ_PENDING,
+ TASK_INTERRUPTIBLE,
+ msecs_to_jiffies(2000));
+ if (err == 1) {
+ bt_dev_err(hdev, "Firmware load interrupted");
+ err = -EINTR;
+ break;
+ } else if (err) {
+ bt_dev_err(hdev, "Firmware request timeout");
+ err = -ETIMEDOUT;
+ break;
+ }
+
+ bt_dev_dbg(hdev, "Firmware request, expecting %d bytes",
+ mrvl->tx_len);
+
+ if (fw_ptr == fw_max) {
+ /* Controller requests a null size once firmware is
+ * fully loaded. If controller expects more data, there
+ * is an issue.
+ */
+ if (!mrvl->tx_len) {
+ bt_dev_info(hdev, "Firmware loading complete");
+ } else {
+ bt_dev_err(hdev, "Firmware loading failure");
+ err = -EINVAL;
+ }
+ break;
+ }
+
+ if (fw_ptr + mrvl->tx_len > fw_max) {
+ mrvl->tx_len = fw_max - fw_ptr;
+ bt_dev_dbg(hdev, "Adjusting tx_len to %d",
+ mrvl->tx_len);
+ }
+
+ skb = bt_skb_alloc(mrvl->tx_len, GFP_KERNEL);
+ if (!skb) {
+ bt_dev_err(hdev, "Failed to alloc mem for FW packet");
+ err = -ENOMEM;
+ break;
+ }
+ bt_cb(skb)->pkt_type = MRVL_RAW_DATA;
+
+ memcpy(skb_put(skb, mrvl->tx_len), fw_ptr, mrvl->tx_len);
+ fw_ptr += mrvl->tx_len;
+
+ set_bit(STATE_FW_REQ_PENDING, &mrvl->flags);
+
+ skb_queue_tail(&mrvl->rawq, skb);
+ hci_uart_tx_wakeup(hu);
+ }
+
+ release_firmware(fw);
+ return err;
+}
+
+static int mrvl_setup(struct hci_uart *hu)
+{
+ int err;
+
+ hci_uart_set_flow_control(hu, true);
+
+ err = mrvl_load_firmware(hu->hdev, "mrvl/helper_uart_3000000.bin");
+ if (err) {
+ bt_dev_err(hu->hdev, "Unable to download firmware helper");
+ return -EINVAL;
+ }
+
+ hci_uart_set_baudrate(hu, 3000000);
+ hci_uart_set_flow_control(hu, false);
+
+ err = mrvl_load_firmware(hu->hdev, "mrvl/uart8897_bt.bin");
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static const struct hci_uart_proto mrvl_proto = {
+ .id = HCI_UART_MRVL,
+ .name = "Marvell",
+ .init_speed = 115200,
+ .open = mrvl_open,
+ .close = mrvl_close,
+ .flush = mrvl_flush,
+ .setup = mrvl_setup,
+ .recv = mrvl_recv,
+ .enqueue = mrvl_enqueue,
+ .dequeue = mrvl_dequeue,
+};
+
+int __init mrvl_init(void)
+{
+ return hci_uart_register_proto(&mrvl_proto);
+}
+
+int __exit mrvl_deinit(void)
+{
+ return hci_uart_unregister_proto(&mrvl_proto);
+}
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 683c2b642057..6c867fbc56a7 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -397,7 +397,7 @@ static int qca_open(struct hci_uart *hu)
skb_queue_head_init(&qca->txq);
skb_queue_head_init(&qca->tx_wait_q);
spin_lock_init(&qca->hci_ibs_lock);
- qca->workqueue = create_singlethread_workqueue("qca_wq");
+ qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
if (!qca->workqueue) {
BT_ERR("QCA Workqueue not initialized properly");
kfree(qca);
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 839bad1d8152..070139513e65 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -35,7 +35,7 @@
#define HCIUARTGETFLAGS _IOR('U', 204, int)
/* UART protocols */
-#define HCI_UART_MAX_PROTO 10
+#define HCI_UART_MAX_PROTO 12
#define HCI_UART_H4 0
#define HCI_UART_BCSP 1
@@ -47,6 +47,8 @@
#define HCI_UART_BCM 7
#define HCI_UART_QCA 8
#define HCI_UART_AG6XX 9
+#define HCI_UART_NOKIA 10
+#define HCI_UART_MRVL 11
#define HCI_UART_RAW_DEVICE 0
#define HCI_UART_RESET_ON_INIT 1
@@ -189,3 +191,8 @@ int qca_deinit(void);
int ag6xx_init(void);
int ag6xx_deinit(void);
#endif
+
+#ifdef CONFIG_BT_HCIUART_MRVL
+int mrvl_init(void);
+int mrvl_deinit(void);
+#endif
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 5755907f836f..ffa7c9dcbd7a 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -551,7 +551,7 @@ static struct attribute *cci5xx_pmu_event_attrs[] = {
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB),
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC),
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD),
- CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snopp_rq_stall_tt_full, 0xE),
+ CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_stall_tt_full, 0xE),
CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF),
NULL
};
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index 97a9185af433..884c0305e290 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -187,6 +187,7 @@ struct arm_ccn {
struct arm_ccn_component *xp;
struct arm_ccn_dt dt;
+ int mn_id;
};
static DEFINE_MUTEX(arm_ccn_mutex);
@@ -212,6 +213,7 @@ static int arm_ccn_node_to_xp_port(int node)
#define CCN_CONFIG_TYPE(_config) (((_config) >> 8) & 0xff)
#define CCN_CONFIG_EVENT(_config) (((_config) >> 16) & 0xff)
#define CCN_CONFIG_PORT(_config) (((_config) >> 24) & 0x3)
+#define CCN_CONFIG_BUS(_config) (((_config) >> 24) & 0x3)
#define CCN_CONFIG_VC(_config) (((_config) >> 26) & 0x7)
#define CCN_CONFIG_DIR(_config) (((_config) >> 29) & 0x1)
#define CCN_CONFIG_MASK(_config) (((_config) >> 30) & 0xf)
@@ -241,6 +243,7 @@ static CCN_FORMAT_ATTR(xp, "config:0-7");
static CCN_FORMAT_ATTR(type, "config:8-15");
static CCN_FORMAT_ATTR(event, "config:16-23");
static CCN_FORMAT_ATTR(port, "config:24-25");
+static CCN_FORMAT_ATTR(bus, "config:24-25");
static CCN_FORMAT_ATTR(vc, "config:26-28");
static CCN_FORMAT_ATTR(dir, "config:29-29");
static CCN_FORMAT_ATTR(mask, "config:30-33");
@@ -253,6 +256,7 @@ static struct attribute *arm_ccn_pmu_format_attrs[] = {
&arm_ccn_pmu_format_attr_type.attr.attr,
&arm_ccn_pmu_format_attr_event.attr.attr,
&arm_ccn_pmu_format_attr_port.attr.attr,
+ &arm_ccn_pmu_format_attr_bus.attr.attr,
&arm_ccn_pmu_format_attr_vc.attr.attr,
&arm_ccn_pmu_format_attr_dir.attr.attr,
&arm_ccn_pmu_format_attr_mask.attr.attr,
@@ -328,6 +332,7 @@ struct arm_ccn_pmu_event {
static ssize_t arm_ccn_pmu_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
struct arm_ccn_pmu_event *event = container_of(attr,
struct arm_ccn_pmu_event, attr);
ssize_t res;
@@ -349,10 +354,17 @@ static ssize_t arm_ccn_pmu_event_show(struct device *dev,
break;
case CCN_TYPE_XP:
res += snprintf(buf + res, PAGE_SIZE - res,
- ",xp=?,port=?,vc=?,dir=?");
+ ",xp=?,vc=?");
if (event->event == CCN_EVENT_WATCHPOINT)
res += snprintf(buf + res, PAGE_SIZE - res,
- ",cmp_l=?,cmp_h=?,mask=?");
+ ",port=?,dir=?,cmp_l=?,cmp_h=?,mask=?");
+ else
+ res += snprintf(buf + res, PAGE_SIZE - res,
+ ",bus=?");
+
+ break;
+ case CCN_TYPE_MN:
+ res += snprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id);
break;
default:
res += snprintf(buf + res, PAGE_SIZE - res, ",node=?");
@@ -383,9 +395,9 @@ static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj,
}
static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
- CCN_EVENT_MN(eobarrier, "dir=0,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
- CCN_EVENT_MN(ecbarrier, "dir=0,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
- CCN_EVENT_MN(dvmop, "dir=0,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
+ CCN_EVENT_MN(eobarrier, "dir=1,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
+ CCN_EVENT_MN(ecbarrier, "dir=1,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
+ CCN_EVENT_MN(dvmop, "dir=1,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
@@ -733,9 +745,10 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
if (has_branch_stack(event) || event->attr.exclude_user ||
event->attr.exclude_kernel || event->attr.exclude_hv ||
- event->attr.exclude_idle) {
+ event->attr.exclude_idle || event->attr.exclude_host ||
+ event->attr.exclude_guest) {
dev_warn(ccn->dev, "Can't exclude execution levels!\n");
- return -EOPNOTSUPP;
+ return -EINVAL;
}
if (event->cpu < 0) {
@@ -759,6 +772,12 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
/* Validate node/xp vs topology */
switch (type) {
+ case CCN_TYPE_MN:
+ if (node_xp != ccn->mn_id) {
+ dev_warn(ccn->dev, "Invalid MN ID %d!\n", node_xp);
+ return -EINVAL;
+ }
+ break;
case CCN_TYPE_XP:
if (node_xp >= ccn->num_xps) {
dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp);
@@ -886,6 +905,10 @@ static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable)
struct arm_ccn_component *xp;
u32 val, dt_cfg;
+ /* Nothing to do for cycle counter */
+ if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
+ return;
+
if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)];
else
@@ -917,38 +940,17 @@ static void arm_ccn_pmu_event_start(struct perf_event *event, int flags)
arm_ccn_pmu_read_counter(ccn, hw->idx));
hw->state = 0;
- /*
- * Pin the timer, so that the overflows are handled by the chosen
- * event->cpu (this is the same one as presented in "cpumask"
- * attribute).
- */
- if (!ccn->irq)
- hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(),
- HRTIMER_MODE_REL_PINNED);
-
/* Set the DT bus input, engaging the counter */
arm_ccn_pmu_xp_dt_config(event, 1);
}
static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags)
{
- struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
struct hw_perf_event *hw = &event->hw;
- u64 timeout;
/* Disable counting, setting the DT bus to pass-through mode */
arm_ccn_pmu_xp_dt_config(event, 0);
- if (!ccn->irq)
- hrtimer_cancel(&ccn->dt.hrtimer);
-
- /* Let the DT bus drain */
- timeout = arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) +
- ccn->num_xps;
- while (arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) <
- timeout)
- cpu_relax();
-
if (flags & PERF_EF_UPDATE)
arm_ccn_pmu_event_update(event);
@@ -988,7 +990,7 @@ static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
/* Comparison values */
writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp));
- writel((cmp_l >> 32) & 0xefffffff,
+ writel((cmp_l >> 32) & 0x7fffffff,
source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4);
writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp));
writel((cmp_h >> 32) & 0x0fffffff,
@@ -996,7 +998,7 @@ static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
/* Mask */
writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp));
- writel((mask_l >> 32) & 0xefffffff,
+ writel((mask_l >> 32) & 0x7fffffff,
source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4);
writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp));
writel((mask_h >> 32) & 0x0fffffff,
@@ -1014,7 +1016,7 @@ static void arm_ccn_pmu_xp_event_config(struct perf_event *event)
hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base);
id = (CCN_CONFIG_VC(event->attr.config) << 4) |
- (CCN_CONFIG_PORT(event->attr.config) << 3) |
+ (CCN_CONFIG_BUS(event->attr.config) << 3) |
(CCN_CONFIG_EVENT(event->attr.config) << 0);
val = readl(source->base + CCN_XP_PMU_EVENT_SEL);
@@ -1099,15 +1101,31 @@ static void arm_ccn_pmu_event_config(struct perf_event *event)
spin_unlock(&ccn->dt.config_lock);
}
+static int arm_ccn_pmu_active_counters(struct arm_ccn *ccn)
+{
+ return bitmap_weight(ccn->dt.pmu_counters_mask,
+ CCN_NUM_PMU_EVENT_COUNTERS + 1);
+}
+
static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
{
int err;
struct hw_perf_event *hw = &event->hw;
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
err = arm_ccn_pmu_event_alloc(event);
if (err)
return err;
+ /*
+ * Pin the timer, so that the overflows are handled by the chosen
+ * event->cpu (this is the same one as presented in "cpumask"
+ * attribute).
+ */
+ if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 1)
+ hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(),
+ HRTIMER_MODE_REL_PINNED);
+
arm_ccn_pmu_event_config(event);
hw->state = PERF_HES_STOPPED;
@@ -1120,9 +1138,14 @@ static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
static void arm_ccn_pmu_event_del(struct perf_event *event, int flags)
{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+
arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE);
arm_ccn_pmu_event_release(event);
+
+ if (!ccn->irq && arm_ccn_pmu_active_counters(ccn) == 0)
+ hrtimer_cancel(&ccn->dt.hrtimer);
}
static void arm_ccn_pmu_event_read(struct perf_event *event)
@@ -1130,6 +1153,24 @@ static void arm_ccn_pmu_event_read(struct perf_event *event)
arm_ccn_pmu_event_update(event);
}
+static void arm_ccn_pmu_enable(struct pmu *pmu)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
+
+ u32 val = readl(ccn->dt.base + CCN_DT_PMCR);
+ val |= CCN_DT_PMCR__PMU_EN;
+ writel(val, ccn->dt.base + CCN_DT_PMCR);
+}
+
+static void arm_ccn_pmu_disable(struct pmu *pmu)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(pmu);
+
+ u32 val = readl(ccn->dt.base + CCN_DT_PMCR);
+ val &= ~CCN_DT_PMCR__PMU_EN;
+ writel(val, ccn->dt.base + CCN_DT_PMCR);
+}
+
static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt)
{
u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR);
@@ -1252,6 +1293,8 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
.start = arm_ccn_pmu_event_start,
.stop = arm_ccn_pmu_event_stop,
.read = arm_ccn_pmu_event_read,
+ .pmu_enable = arm_ccn_pmu_enable,
+ .pmu_disable = arm_ccn_pmu_disable,
};
/* No overflow interrupt? Have to use a timer instead. */
@@ -1361,6 +1404,8 @@ static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region,
switch (type) {
case CCN_TYPE_MN:
+ ccn->mn_id = id;
+ return 0;
case CCN_TYPE_DT:
return 0;
case CCN_TYPE_XP:
@@ -1471,8 +1516,9 @@ static int arm_ccn_probe(struct platform_device *pdev)
/* Can set 'disable' bits, so can acknowledge interrupts */
writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE,
ccn->base + CCN_MN_ERRINT_STATUS);
- err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler, 0,
- dev_name(ccn->dev), ccn);
+ err = devm_request_irq(ccn->dev, irq, arm_ccn_irq_handler,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ dev_name(ccn->dev), ccn);
if (err)
return err;
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c
index c3cb76b363c6..9efdf1de4035 100644
--- a/drivers/bus/vexpress-config.c
+++ b/drivers/bus/vexpress-config.c
@@ -178,6 +178,7 @@ static int vexpress_config_populate(struct device_node *node)
parent = class_find_device(vexpress_config_class, NULL, bridge,
vexpress_config_node_match);
+ of_node_put(bridge);
if (WARN_ON(!parent))
return -ENODEV;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 56ad5a5936a9..8c0770bf8881 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -244,7 +244,7 @@ config HW_RANDOM_TX4939
config HW_RANDOM_MXC_RNGA
tristate "Freescale i.MX RNGA Random Number Generator"
- depends on ARCH_HAS_RNGA
+ depends on SOC_IMX31
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 08c7e23ed535..0c75c3f1689f 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -957,7 +957,7 @@ int tpm2_auto_startup(struct tpm_chip *chip)
goto out;
rc = tpm2_do_selftest(chip);
- if (rc != TPM2_RC_INITIALIZE) {
+ if (rc != 0 && rc != TPM2_RC_INITIALIZE) {
dev_err(&chip->dev, "TPM self test failed\n");
goto out;
}
@@ -974,7 +974,6 @@ int tpm2_auto_startup(struct tpm_chip *chip)
}
}
- return rc;
out:
if (rc > 0)
rc = -ENODEV;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index d2406fe25533..5da47e26a012 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -165,6 +165,12 @@ struct ports_device {
*/
struct virtqueue *c_ivq, *c_ovq;
+ /*
+ * A control packet buffer for guest->host requests, protected
+ * by c_ovq_lock.
+ */
+ struct virtio_console_control cpkt;
+
/* Array of per-port IO virtqueues */
struct virtqueue **in_vqs, **out_vqs;
@@ -560,28 +566,29 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
unsigned int event, unsigned int value)
{
struct scatterlist sg[1];
- struct virtio_console_control cpkt;
struct virtqueue *vq;
unsigned int len;
if (!use_multiport(portdev))
return 0;
- cpkt.id = cpu_to_virtio32(portdev->vdev, port_id);
- cpkt.event = cpu_to_virtio16(portdev->vdev, event);
- cpkt.value = cpu_to_virtio16(portdev->vdev, value);
-
vq = portdev->c_ovq;
- sg_init_one(sg, &cpkt, sizeof(cpkt));
-
spin_lock(&portdev->c_ovq_lock);
- if (virtqueue_add_outbuf(vq, sg, 1, &cpkt, GFP_ATOMIC) == 0) {
+
+ portdev->cpkt.id = cpu_to_virtio32(portdev->vdev, port_id);
+ portdev->cpkt.event = cpu_to_virtio16(portdev->vdev, event);
+ portdev->cpkt.value = cpu_to_virtio16(portdev->vdev, value);
+
+ sg_init_one(sg, &portdev->cpkt, sizeof(struct virtio_console_control));
+
+ if (virtqueue_add_outbuf(vq, sg, 1, &portdev->cpkt, GFP_ATOMIC) == 0) {
virtqueue_kick(vq);
while (!virtqueue_get_buf(vq, &len)
&& !virtqueue_is_broken(vq))
cpu_relax();
}
+
spin_unlock(&portdev->c_ovq_lock);
return 0;
}
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index d359c92e13a6..e38bf60c0ff4 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -69,6 +69,7 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 3, 1),
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
+ DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
/* Core Clock Outputs */
DEF_FIXED("ztr", R8A7795_CLK_ZTR, CLK_PLL1_DIV2, 6, 1),
@@ -87,10 +88,10 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
DEF_FIXED("s3d2", R8A7795_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A7795_CLK_S3D4, CLK_S3, 4, 1),
- DEF_GEN3_SD("sd0", R8A7795_CLK_SD0, CLK_PLL1_DIV2, 0x0074),
- DEF_GEN3_SD("sd1", R8A7795_CLK_SD1, CLK_PLL1_DIV2, 0x0078),
- DEF_GEN3_SD("sd2", R8A7795_CLK_SD2, CLK_PLL1_DIV2, 0x0268),
- DEF_GEN3_SD("sd3", R8A7795_CLK_SD3, CLK_PLL1_DIV2, 0x026c),
+ DEF_GEN3_SD("sd0", R8A7795_CLK_SD0, CLK_SDSRC, 0x0074),
+ DEF_GEN3_SD("sd1", R8A7795_CLK_SD1, CLK_SDSRC, 0x0078),
+ DEF_GEN3_SD("sd2", R8A7795_CLK_SD2, CLK_SDSRC, 0x0268),
+ DEF_GEN3_SD("sd3", R8A7795_CLK_SD3, CLK_SDSRC, 0x026c),
DEF_FIXED("cl", R8A7795_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A7795_CLK_CP, CLK_EXTAL, 2, 1),
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index c109d80e7a8a..cdfabeb9a034 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -833,9 +833,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
/* perihp */
GATE(0, "cpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED,
- RK3399_CLKGATE_CON(5), 0, GFLAGS),
- GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(5), 1, GFLAGS),
+ GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(5), 0, GFLAGS),
COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IGNORE_UNUSED,
RK3399_CLKSEL_CON(14), 7, 1, MFLAGS, 0, 5, DFLAGS,
RK3399_CLKGATE_CON(5), 2, GFLAGS),
@@ -923,9 +923,9 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
RK3399_CLKGATE_CON(6), 14, GFLAGS),
GATE(0, "cpll_aclk_emmc_src", "cpll", CLK_IGNORE_UNUSED,
- RK3399_CLKGATE_CON(6), 12, GFLAGS),
- GATE(0, "gpll_aclk_emmc_src", "gpll", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(6), 13, GFLAGS),
+ GATE(0, "gpll_aclk_emmc_src", "gpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(6), 12, GFLAGS),
COMPOSITE_NOGATE(ACLK_EMMC, "aclk_emmc", mux_aclk_emmc_p, CLK_IGNORE_UNUSED,
RK3399_CLKSEL_CON(21), 7, 1, MFLAGS, 0, 5, DFLAGS),
GATE(ACLK_EMMC_CORE, "aclk_emmccore", "aclk_emmc", CLK_IGNORE_UNUSED,
@@ -1071,7 +1071,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
/* vio */
COMPOSITE(ACLK_VIO, "aclk_vio", mux_pll_src_cpll_gpll_ppll_p, CLK_IGNORE_UNUSED,
RK3399_CLKSEL_CON(42), 6, 2, MFLAGS, 0, 5, DFLAGS,
- RK3399_CLKGATE_CON(11), 10, GFLAGS),
+ RK3399_CLKGATE_CON(11), 0, GFLAGS),
COMPOSITE_NOMUX(PCLK_VIO, "pclk_vio", "aclk_vio", 0,
RK3399_CLKSEL_CON(43), 0, 5, DFLAGS,
RK3399_CLKGATE_CON(11), 1, GFLAGS),
@@ -1484,6 +1484,7 @@ static const char *const rk3399_cru_critical_clocks[] __initconst = {
"hclk_perilp1",
"hclk_perilp1_noc",
"aclk_dmac0_perilp",
+ "aclk_emmc_noc",
"gpll_hclk_perilp1_src",
"gpll_aclk_perilp0_src",
"gpll_aclk_perihp_src",
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index 9af359544110..267f99523fbe 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -783,14 +783,14 @@ static struct ccu_reset_map sun8i_h3_ccu_resets[] = {
[RST_BUS_I2S1] = { 0x2d0, BIT(13) },
[RST_BUS_I2S2] = { 0x2d0, BIT(14) },
- [RST_BUS_I2C0] = { 0x2d4, BIT(0) },
- [RST_BUS_I2C1] = { 0x2d4, BIT(1) },
- [RST_BUS_I2C2] = { 0x2d4, BIT(2) },
- [RST_BUS_UART0] = { 0x2d4, BIT(16) },
- [RST_BUS_UART1] = { 0x2d4, BIT(17) },
- [RST_BUS_UART2] = { 0x2d4, BIT(18) },
- [RST_BUS_UART3] = { 0x2d4, BIT(19) },
- [RST_BUS_SCR] = { 0x2d4, BIT(20) },
+ [RST_BUS_I2C0] = { 0x2d8, BIT(0) },
+ [RST_BUS_I2C1] = { 0x2d8, BIT(1) },
+ [RST_BUS_I2C2] = { 0x2d8, BIT(2) },
+ [RST_BUS_UART0] = { 0x2d8, BIT(16) },
+ [RST_BUS_UART1] = { 0x2d8, BIT(17) },
+ [RST_BUS_UART2] = { 0x2d8, BIT(18) },
+ [RST_BUS_UART3] = { 0x2d8, BIT(19) },
+ [RST_BUS_SCR] = { 0x2d8, BIT(20) },
};
static const struct sunxi_ccu_desc sun8i_h3_ccu_desc = {
diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
index fc17b5295e16..51d4bac97ab3 100644
--- a/drivers/clk/sunxi-ng/ccu_common.c
+++ b/drivers/clk/sunxi-ng/ccu_common.c
@@ -31,7 +31,7 @@ void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
return;
WARN_ON(readl_relaxed_poll_timeout(common->base + common->reg, reg,
- !(reg & lock), 100, 70000));
+ reg & lock, 100, 70000));
}
int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
diff --git a/drivers/clk/sunxi-ng/ccu_nk.c b/drivers/clk/sunxi-ng/ccu_nk.c
index 4470ffc8cf0d..d6fafb397489 100644
--- a/drivers/clk/sunxi-ng/ccu_nk.c
+++ b/drivers/clk/sunxi-ng/ccu_nk.c
@@ -14,9 +14,9 @@
#include "ccu_gate.h"
#include "ccu_nk.h"
-void ccu_nk_find_best(unsigned long parent, unsigned long rate,
- unsigned int max_n, unsigned int max_k,
- unsigned int *n, unsigned int *k)
+static void ccu_nk_find_best(unsigned long parent, unsigned long rate,
+ unsigned int max_n, unsigned int max_k,
+ unsigned int *n, unsigned int *k)
{
unsigned long best_rate = 0;
unsigned int best_k = 0, best_n = 0;
diff --git a/drivers/clk/sunxi/clk-a10-pll2.c b/drivers/clk/sunxi/clk-a10-pll2.c
index 0ee1f363e4be..d8eab90ae661 100644
--- a/drivers/clk/sunxi/clk-a10-pll2.c
+++ b/drivers/clk/sunxi/clk-a10-pll2.c
@@ -73,7 +73,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
SUN4I_PLL2_PRE_DIV_WIDTH,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&sun4i_a10_pll2_lock);
- if (!prediv_clk) {
+ if (IS_ERR(prediv_clk)) {
pr_err("Couldn't register the prediv clock\n");
goto err_free_array;
}
@@ -106,7 +106,7 @@ static void __init sun4i_pll2_setup(struct device_node *node,
&mult->hw, &clk_multiplier_ops,
&gate->hw, &clk_gate_ops,
CLK_SET_RATE_PARENT);
- if (!base_clk) {
+ if (IS_ERR(base_clk)) {
pr_err("Couldn't register the base multiplier clock\n");
goto err_free_multiplier;
}
diff --git a/drivers/clk/sunxi/clk-sun8i-mbus.c b/drivers/clk/sunxi/clk-sun8i-mbus.c
index 411d3033a96e..b200ebf159ee 100644
--- a/drivers/clk/sunxi/clk-sun8i-mbus.c
+++ b/drivers/clk/sunxi/clk-sun8i-mbus.c
@@ -48,7 +48,7 @@ static void __init sun8i_a23_mbus_setup(struct device_node *node)
return;
reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (!reg) {
+ if (IS_ERR(reg)) {
pr_err("Could not get registers for sun8i-mbus-clk\n");
goto err_free_parents;
}
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 64da7b79a6e4..933b5dd698b8 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -428,7 +428,7 @@ static struct tegra_clk_pll_params pll_d_params = {
.div_nmp = &pllp_nmp,
.freq_table = pll_d_freq_table,
.flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
- TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+ TEGRA_PLL_HAS_LOCK_ENABLE,
};
static struct tegra_clk_pll_params pll_d2_params = {
@@ -446,7 +446,7 @@ static struct tegra_clk_pll_params pll_d2_params = {
.div_nmp = &pllp_nmp,
.freq_table = pll_d_freq_table,
.flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
- TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+ TEGRA_PLL_HAS_LOCK_ENABLE,
};
static const struct pdiv_map pllu_p[] = {
diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
index 3494bc5a21d5..7f0f5b26d8c5 100644
--- a/drivers/clocksource/timer-atmel-pit.c
+++ b/drivers/clocksource/timer-atmel-pit.c
@@ -240,6 +240,7 @@ static int __init at91sam926x_pit_common_init(struct pit_data *data)
static int __init at91sam926x_pit_dt_init(struct device_node *node)
{
struct pit_data *data;
+ int ret;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 0bb44d5b5df4..2ee40fd360ca 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -74,6 +74,8 @@ static const struct of_device_id machines[] __initconst = {
{ .compatible = "ti,omap5", },
{ .compatible = "xlnx,zynq-7000", },
+
+ { }
};
static int __init cpufreq_dt_platdev_init(void)
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 6dc597126b79..b3044219772c 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -556,7 +556,10 @@ skip_enc:
/* Read and write assoclen bytes */
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (alg->caam.geniv)
+ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
+ else
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
/* Skip assoc data */
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -565,6 +568,14 @@ skip_enc:
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
KEY_VLF);
+ if (alg->caam.geniv) {
+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ (ctx1_iv_off << LDST_OFFSET_SHIFT));
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
+ (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
+ }
+
/* Load Counter into CONTEXT1 reg */
if (is_rfc3686)
append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
@@ -2150,7 +2161,7 @@ static void init_authenc_job(struct aead_request *req,
init_aead_job(req, edesc, all_contig, encrypt);
- if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt)))
+ if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
append_load_as_imm(desc, req->iv, ivsize,
LDST_CLASS_1_CCB |
LDST_SRCDST_BYTE_CONTEXT |
@@ -2537,20 +2548,6 @@ static int aead_decrypt(struct aead_request *req)
return ret;
}
-static int aead_givdecrypt(struct aead_request *req)
-{
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- unsigned int ivsize = crypto_aead_ivsize(aead);
-
- if (req->cryptlen < ivsize)
- return -EINVAL;
-
- req->cryptlen -= ivsize;
- req->assoclen += ivsize;
-
- return aead_decrypt(req);
-}
-
/*
* allocate and map the ablkcipher extended descriptor for ablkcipher
*/
@@ -3210,7 +3207,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
@@ -3256,7 +3253,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -3302,7 +3299,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
@@ -3348,7 +3345,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -3394,7 +3391,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
@@ -3440,7 +3437,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
@@ -3486,7 +3483,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
@@ -3534,7 +3531,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -3582,7 +3579,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
@@ -3630,7 +3627,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -3678,7 +3675,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
@@ -3726,7 +3723,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
@@ -3772,7 +3769,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
@@ -3818,7 +3815,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -3864,7 +3861,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
@@ -3910,7 +3907,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -3956,7 +3953,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
@@ -4002,7 +3999,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
@@ -4051,7 +4048,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = MD5_DIGEST_SIZE,
},
@@ -4102,7 +4099,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -4153,7 +4150,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA224_DIGEST_SIZE,
},
@@ -4204,7 +4201,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -4255,7 +4252,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA384_DIGEST_SIZE,
},
@@ -4306,7 +4303,7 @@ static struct caam_aead_alg driver_aeads[] = {
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.encrypt = aead_encrypt,
- .decrypt = aead_givdecrypt,
+ .decrypt = aead_decrypt,
.ivsize = CTR_RFC3686_IV_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 2f6156b672ce..fb5f9bbfa09c 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -39,12 +39,10 @@ static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
[CPL_FW6_PLD] = cpl_fw6_pld_handler,
};
-static struct cxgb4_pci_uld_info chcr_uld_info = {
+static struct cxgb4_uld_info chcr_uld_info = {
.name = DRV_MODULE_NAME,
- .nrxq = 4,
+ .nrxq = MAX_ULD_QSETS,
.rxq_size = 1024,
- .nciq = 0,
- .ciq_size = 0,
.add = chcr_uld_add,
.state_change = chcr_uld_state_change,
.rx_handler = chcr_uld_rx_handler,
@@ -205,7 +203,7 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
static int __init chcr_crypto_init(void)
{
- if (cxgb4_register_pci_uld(CXGB4_PCI_ULD1, &chcr_uld_info)) {
+ if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) {
pr_err("ULD register fail: No chcr crypto support in cxgb4");
return -1;
}
@@ -228,7 +226,7 @@ static void __exit chcr_crypto_exit(void)
kfree(u_ctx);
}
mutex_unlock(&dev_mutex);
- cxgb4_unregister_pci_uld(CXGB4_PCI_ULD1);
+ cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
}
module_init(chcr_crypto_init);
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 769148dbaeb3..20f35df8a01f 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -1260,8 +1260,8 @@ static struct crypto_alg qat_algs[] = { {
.setkey = qat_alg_ablkcipher_xts_setkey,
.decrypt = qat_alg_ablkcipher_decrypt,
.encrypt = qat_alg_ablkcipher_encrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
},
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index cfb25413917c..24353ec336c5 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -129,8 +129,8 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes);
- iv = (u8 *)walk.iv;
ret = blkcipher_walk_virt(desc, &walk);
+ iv = walk.iv;
memset(tweak, 0, AES_BLOCK_SIZE);
aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 803f3953b341..29f600f2c447 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -459,7 +459,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
}
pgoff = linear_page_index(vma, pmd_addr);
- phys = pgoff_to_phys(dax_dev, pgoff, PAGE_SIZE);
+ phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
if (phys == -1) {
dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
pgoff);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index e434ffe7bc5c..832cbd647145 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -2067,7 +2067,7 @@ err_dma_unregister:
err_clk_disable:
clk_disable_unprepare(atxdmac->clk);
err_free_irq:
- free_irq(atxdmac->irq, atxdmac->dma.dev);
+ free_irq(atxdmac->irq, atxdmac);
return ret;
}
@@ -2081,7 +2081,7 @@ static int at_xdmac_remove(struct platform_device *pdev)
dma_async_device_unregister(&atxdmac->dma);
clk_disable_unprepare(atxdmac->clk);
- free_irq(atxdmac->irq, atxdmac->dma.dev);
+ free_irq(atxdmac->irq, atxdmac);
for (i = 0; i < atxdmac->dma.chancnt; i++) {
struct at_xdmac_chan *atchan = &atxdmac->chan[i];
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index aad167eaaee8..de2a2a2b1d75 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -836,6 +836,7 @@ static int fsl_re_probe(struct platform_device *ofdev)
rc = of_property_read_u32(np, "reg", &off);
if (rc) {
dev_err(dev, "Reg property not found in JQ node\n");
+ of_node_put(np);
return -ENODEV;
}
/* Find out the Job Rings present under each JQ */
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index a4c53be482cf..624f1e1e9c55 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -861,7 +861,6 @@ static int mdc_dma_probe(struct platform_device *pdev)
{
struct mdc_dma *mdma;
struct resource *res;
- const struct of_device_id *match;
unsigned int i;
u32 val;
int ret;
@@ -871,8 +870,7 @@ static int mdc_dma_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, mdma);
- match = of_match_device(mdc_dma_of_match, &pdev->dev);
- mdma->soc = match->data;
+ mdma->soc = of_device_get_match_data(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mdma->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index dc7850a422b8..3f56f9ca4482 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -638,7 +638,7 @@ static bool pxad_try_hotchain(struct virt_dma_chan *vc,
vd_last_issued = list_entry(vc->desc_issued.prev,
struct virt_dma_desc, node);
pxad_desc_chain(vd_last_issued, vd);
- if (is_chan_running(chan) || is_desc_completed(vd_last_issued))
+ if (is_chan_running(chan) || is_desc_completed(vd))
return true;
}
@@ -671,6 +671,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
struct virt_dma_desc *vd, *tmp;
unsigned int dcsr;
unsigned long flags;
+ bool vd_completed;
dma_cookie_t last_started = 0;
BUG_ON(!chan);
@@ -681,15 +682,17 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
spin_lock_irqsave(&chan->vc.lock, flags);
list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) {
+ vd_completed = is_desc_completed(vd);
dev_dbg(&chan->vc.chan.dev->device,
- "%s(): checking txd %p[%x]: completed=%d\n",
- __func__, vd, vd->tx.cookie, is_desc_completed(vd));
+ "%s(): checking txd %p[%x]: completed=%d dcsr=0x%x\n",
+ __func__, vd, vd->tx.cookie, vd_completed,
+ dcsr);
last_started = vd->tx.cookie;
if (to_pxad_sw_desc(vd)->cyclic) {
vchan_cyclic_callback(vd);
break;
}
- if (is_desc_completed(vd)) {
+ if (vd_completed) {
list_del(&vd->node);
vchan_cookie_complete(vd);
} else {
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 749f1bd5d65d..06ecdc38cee0 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -600,27 +600,30 @@ static irqreturn_t usb_dmac_isr_channel(int irq, void *dev)
{
struct usb_dmac_chan *chan = dev;
irqreturn_t ret = IRQ_NONE;
- u32 mask = USB_DMACHCR_TE;
- u32 check_bits = USB_DMACHCR_TE | USB_DMACHCR_SP;
+ u32 mask = 0;
u32 chcr;
+ bool xfer_end = false;
spin_lock(&chan->vc.lock);
chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
- if (chcr & check_bits)
- mask |= USB_DMACHCR_DE | check_bits;
+ if (chcr & (USB_DMACHCR_TE | USB_DMACHCR_SP)) {
+ mask |= USB_DMACHCR_DE | USB_DMACHCR_TE | USB_DMACHCR_SP;
+ if (chcr & USB_DMACHCR_DE)
+ xfer_end = true;
+ ret |= IRQ_HANDLED;
+ }
if (chcr & USB_DMACHCR_NULL) {
/* An interruption of TE will happen after we set FTE */
mask |= USB_DMACHCR_NULL;
chcr |= USB_DMACHCR_FTE;
ret |= IRQ_HANDLED;
}
- usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
+ if (mask)
+ usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
- if (chcr & check_bits) {
+ if (xfer_end)
usb_dmac_isr_transfer_end(chan);
- ret |= IRQ_HANDLED;
- }
spin_unlock(&chan->vc.lock);
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index 438893762076..ce2bc2a38101 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -709,9 +709,10 @@ static int scpi_probe(struct platform_device *pdev)
struct mbox_client *cl = &pchan->cl;
struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
- if (of_address_to_resource(shmem, 0, &res)) {
+ ret = of_address_to_resource(shmem, 0, &res);
+ of_node_put(shmem);
+ if (ret) {
dev_err(dev, "failed to get SCPI payload mem resource\n");
- ret = -EINVAL;
goto err;
}
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
index 94a58a082b99..44c01390d035 100644
--- a/drivers/firmware/dmi-id.c
+++ b/drivers/firmware/dmi-id.c
@@ -229,14 +229,14 @@ static int __init dmi_id_init(void)
ret = device_register(dmi_dev);
if (ret)
- goto fail_free_dmi_dev;
+ goto fail_put_dmi_dev;
return 0;
-fail_free_dmi_dev:
- kfree(dmi_dev);
-fail_class_unregister:
+fail_put_dmi_dev:
+ put_device(dmi_dev);
+fail_class_unregister:
class_unregister(&dmi_class);
return ret;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 5a2631af7410..7dd2e2d37231 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -657,9 +657,12 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
}
if (subnode) {
- node = of_get_flat_dt_subnode_by_name(node, subnode);
- if (node < 0)
+ int err = of_get_flat_dt_subnode_by_name(node, subnode);
+
+ if (err < 0)
return 0;
+
+ node = err;
}
return __find_uefi_params(node, info, dt_params[i].params);
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 3bd127f95315..aded10662020 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -41,6 +41,8 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
#endif
+#define EFI_MMAP_NR_SLACK_SLOTS 8
+
struct file_info {
efi_file_handle_t *handle;
u64 size;
@@ -63,49 +65,62 @@ void efi_printk(efi_system_table_t *sys_table_arg, char *str)
}
}
+static inline bool mmap_has_headroom(unsigned long buff_size,
+ unsigned long map_size,
+ unsigned long desc_size)
+{
+ unsigned long slack = buff_size - map_size;
+
+ return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS;
+}
+
efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
- efi_memory_desc_t **map,
- unsigned long *map_size,
- unsigned long *desc_size,
- u32 *desc_ver,
- unsigned long *key_ptr)
+ struct efi_boot_memmap *map)
{
efi_memory_desc_t *m = NULL;
efi_status_t status;
unsigned long key;
u32 desc_version;
- *map_size = sizeof(*m) * 32;
+ *map->desc_size = sizeof(*m);
+ *map->map_size = *map->desc_size * 32;
+ *map->buff_size = *map->map_size;
again:
- /*
- * Add an additional efi_memory_desc_t because we're doing an
- * allocation which may be in a new descriptor region.
- */
- *map_size += sizeof(*m);
status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
- *map_size, (void **)&m);
+ *map->map_size, (void **)&m);
if (status != EFI_SUCCESS)
goto fail;
- *desc_size = 0;
+ *map->desc_size = 0;
key = 0;
- status = efi_call_early(get_memory_map, map_size, m,
- &key, desc_size, &desc_version);
- if (status == EFI_BUFFER_TOO_SMALL) {
+ status = efi_call_early(get_memory_map, map->map_size, m,
+ &key, map->desc_size, &desc_version);
+ if (status == EFI_BUFFER_TOO_SMALL ||
+ !mmap_has_headroom(*map->buff_size, *map->map_size,
+ *map->desc_size)) {
efi_call_early(free_pool, m);
+ /*
+ * Make sure there is some entries of headroom so that the
+ * buffer can be reused for a new map after allocations are
+ * no longer permitted. Its unlikely that the map will grow to
+ * exceed this headroom once we are ready to trigger
+ * ExitBootServices()
+ */
+ *map->map_size += *map->desc_size * EFI_MMAP_NR_SLACK_SLOTS;
+ *map->buff_size = *map->map_size;
goto again;
}
if (status != EFI_SUCCESS)
efi_call_early(free_pool, m);
- if (key_ptr && status == EFI_SUCCESS)
- *key_ptr = key;
- if (desc_ver && status == EFI_SUCCESS)
- *desc_ver = desc_version;
+ if (map->key_ptr && status == EFI_SUCCESS)
+ *map->key_ptr = key;
+ if (map->desc_ver && status == EFI_SUCCESS)
+ *map->desc_ver = desc_version;
fail:
- *map = m;
+ *map->map = m;
return status;
}
@@ -113,13 +128,20 @@ fail:
unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
{
efi_status_t status;
- unsigned long map_size;
+ unsigned long map_size, buff_size;
unsigned long membase = EFI_ERROR;
struct efi_memory_map map;
efi_memory_desc_t *md;
+ struct efi_boot_memmap boot_map;
- status = efi_get_memory_map(sys_table_arg, (efi_memory_desc_t **)&map.map,
- &map_size, &map.desc_size, NULL, NULL);
+ boot_map.map = (efi_memory_desc_t **)&map.map;
+ boot_map.map_size = &map_size;
+ boot_map.desc_size = &map.desc_size;
+ boot_map.desc_ver = NULL;
+ boot_map.key_ptr = NULL;
+ boot_map.buff_size = &buff_size;
+
+ status = efi_get_memory_map(sys_table_arg, &boot_map);
if (status != EFI_SUCCESS)
return membase;
@@ -144,15 +166,22 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align,
unsigned long *addr, unsigned long max)
{
- unsigned long map_size, desc_size;
+ unsigned long map_size, desc_size, buff_size;
efi_memory_desc_t *map;
efi_status_t status;
unsigned long nr_pages;
u64 max_addr = 0;
int i;
+ struct efi_boot_memmap boot_map;
+
+ boot_map.map = &map;
+ boot_map.map_size = &map_size;
+ boot_map.desc_size = &desc_size;
+ boot_map.desc_ver = NULL;
+ boot_map.key_ptr = NULL;
+ boot_map.buff_size = &buff_size;
- status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size,
- NULL, NULL);
+ status = efi_get_memory_map(sys_table_arg, &boot_map);
if (status != EFI_SUCCESS)
goto fail;
@@ -230,14 +259,21 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align,
unsigned long *addr)
{
- unsigned long map_size, desc_size;
+ unsigned long map_size, desc_size, buff_size;
efi_memory_desc_t *map;
efi_status_t status;
unsigned long nr_pages;
int i;
+ struct efi_boot_memmap boot_map;
- status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size,
- NULL, NULL);
+ boot_map.map = &map;
+ boot_map.map_size = &map_size;
+ boot_map.desc_size = &desc_size;
+ boot_map.desc_ver = NULL;
+ boot_map.key_ptr = NULL;
+ boot_map.buff_size = &buff_size;
+
+ status = efi_get_memory_map(sys_table_arg, &boot_map);
if (status != EFI_SUCCESS)
goto fail;
@@ -704,3 +740,76 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
*cmd_line_len = options_bytes;
return (char *)cmdline_addr;
}
+
+/*
+ * Handle calling ExitBootServices according to the requirements set out by the
+ * spec. Obtains the current memory map, and returns that info after calling
+ * ExitBootServices. The client must specify a function to perform any
+ * processing of the memory map data prior to ExitBootServices. A client
+ * specific structure may be passed to the function via priv. The client
+ * function may be called multiple times.
+ */
+efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
+ void *handle,
+ struct efi_boot_memmap *map,
+ void *priv,
+ efi_exit_boot_map_processing priv_func)
+{
+ efi_status_t status;
+
+ status = efi_get_memory_map(sys_table_arg, map);
+
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ status = priv_func(sys_table_arg, map, priv);
+ if (status != EFI_SUCCESS)
+ goto free_map;
+
+ status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
+
+ if (status == EFI_INVALID_PARAMETER) {
+ /*
+ * The memory map changed between efi_get_memory_map() and
+ * exit_boot_services(). Per the UEFI Spec v2.6, Section 6.4:
+ * EFI_BOOT_SERVICES.ExitBootServices we need to get the
+ * updated map, and try again. The spec implies one retry
+ * should be sufficent, which is confirmed against the EDK2
+ * implementation. Per the spec, we can only invoke
+ * get_memory_map() and exit_boot_services() - we cannot alloc
+ * so efi_get_memory_map() cannot be used, and we must reuse
+ * the buffer. For all practical purposes, the headroom in the
+ * buffer should account for any changes in the map so the call
+ * to get_memory_map() is expected to succeed here.
+ */
+ *map->map_size = *map->buff_size;
+ status = efi_call_early(get_memory_map,
+ map->map_size,
+ *map->map,
+ map->key_ptr,
+ map->desc_size,
+ map->desc_ver);
+
+ /* exit_boot_services() was called, thus cannot free */
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ status = priv_func(sys_table_arg, map, priv);
+ /* exit_boot_services() was called, thus cannot free */
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
+ }
+
+ /* exit_boot_services() was called, thus cannot free */
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ return EFI_SUCCESS;
+
+free_map:
+ efi_call_early(free_pool, *map->map);
+fail:
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index e58abfa953cc..a6a93116a8f0 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -152,6 +152,27 @@ fdt_set_fail:
#define EFI_FDT_ALIGN EFI_PAGE_SIZE
#endif
+struct exit_boot_struct {
+ efi_memory_desc_t *runtime_map;
+ int *runtime_entry_count;
+};
+
+static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
+ struct efi_boot_memmap *map,
+ void *priv)
+{
+ struct exit_boot_struct *p = priv;
+ /*
+ * Update the memory map with virtual addresses. The function will also
+ * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
+ * entries so that we can pass it straight to SetVirtualAddressMap()
+ */
+ efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
+ p->runtime_map, p->runtime_entry_count);
+
+ return EFI_SUCCESS;
+}
+
/*
* Allocate memory for a new FDT, then add EFI, commandline, and
* initrd related fields to the FDT. This routine increases the
@@ -175,13 +196,22 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
unsigned long fdt_addr,
unsigned long fdt_size)
{
- unsigned long map_size, desc_size;
+ unsigned long map_size, desc_size, buff_size;
u32 desc_ver;
unsigned long mmap_key;
efi_memory_desc_t *memory_map, *runtime_map;
unsigned long new_fdt_size;
efi_status_t status;
int runtime_entry_count = 0;
+ struct efi_boot_memmap map;
+ struct exit_boot_struct priv;
+
+ map.map = &runtime_map;
+ map.map_size = &map_size;
+ map.desc_size = &desc_size;
+ map.desc_ver = &desc_ver;
+ map.key_ptr = &mmap_key;
+ map.buff_size = &buff_size;
/*
* Get a copy of the current memory map that we will use to prepare
@@ -189,8 +219,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
* subsequent allocations adding entries, since they could not affect
* the number of EFI_MEMORY_RUNTIME regions.
*/
- status = efi_get_memory_map(sys_table, &runtime_map, &map_size,
- &desc_size, &desc_ver, &mmap_key);
+ status = efi_get_memory_map(sys_table, &map);
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n");
return status;
@@ -199,6 +228,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
pr_efi(sys_table,
"Exiting boot services and installing virtual address map...\n");
+ map.map = &memory_map;
/*
* Estimate size of new FDT, and allocate memory for it. We
* will allocate a bigger buffer if this ends up being too
@@ -218,8 +248,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
* we can get the memory map key needed for
* exit_boot_services().
*/
- status = efi_get_memory_map(sys_table, &memory_map, &map_size,
- &desc_size, &desc_ver, &mmap_key);
+ status = efi_get_memory_map(sys_table, &map);
if (status != EFI_SUCCESS)
goto fail_free_new_fdt;
@@ -250,16 +279,11 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
}
}
- /*
- * Update the memory map with virtual addresses. The function will also
- * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
- * entries so that we can pass it straight into SetVirtualAddressMap()
- */
- efi_get_virtmap(memory_map, map_size, desc_size, runtime_map,
- &runtime_entry_count);
-
- /* Now we are ready to exit_boot_services.*/
- status = sys_table->boottime->exit_boot_services(handle, mmap_key);
+ sys_table->boottime->free_pool(memory_map);
+ priv.runtime_map = runtime_map;
+ priv.runtime_entry_count = &runtime_entry_count;
+ status = efi_exit_boot_services(sys_table, handle, &map, &priv,
+ exit_boot_func);
if (status == EFI_SUCCESS) {
efi_set_virtual_address_map_t *svam;
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
index 53f6d3fe6d86..0c9f58c5ba50 100644
--- a/drivers/firmware/efi/libstub/random.c
+++ b/drivers/firmware/efi/libstub/random.c
@@ -73,12 +73,20 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
unsigned long random_seed)
{
unsigned long map_size, desc_size, total_slots = 0, target_slot;
+ unsigned long buff_size;
efi_status_t status;
efi_memory_desc_t *memory_map;
int map_offset;
+ struct efi_boot_memmap map;
- status = efi_get_memory_map(sys_table_arg, &memory_map, &map_size,
- &desc_size, NULL, NULL);
+ map.map = &memory_map;
+ map.map_size = &map_size;
+ map.desc_size = &desc_size;
+ map.desc_ver = NULL;
+ map.key_ptr = NULL;
+ map.buff_size = &buff_size;
+
+ status = efi_get_memory_map(sys_table_arg, &map);
if (status != EFI_SUCCESS)
return status;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 66a94103798b..24caedb00a7a 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -1131,6 +1131,7 @@ menu "SPI or I2C GPIO expanders"
config GPIO_MCP23S08
tristate "Microchip MCP23xxx I/O expander"
+ depends on OF_GPIO
select GPIOLIB_IRQCHIP
help
SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index ac22efc1840e..99d37b56c258 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -564,7 +564,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
mcp->chip.direction_output = mcp23s08_direction_output;
mcp->chip.set = mcp23s08_set;
mcp->chip.dbg_show = mcp23s08_dbg_show;
-#ifdef CONFIG_OF
+#ifdef CONFIG_OF_GPIO
mcp->chip.of_gpio_n_cells = 2;
mcp->chip.of_node = dev->of_node;
#endif
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 0c99e8fb9af3..8d8ee0ebf14c 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -155,7 +155,7 @@ static int sa1100_gpio_irqdomain_map(struct irq_domain *d,
{
irq_set_chip_and_handler(irq, &sa1100_gpio_irq_chip,
handle_edge_irq);
- irq_set_noprobe(irq);
+ irq_set_probe(irq);
return 0;
}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 75e7b3919ea7..a28feb3edf33 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -16,7 +16,6 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/io.h>
-#include <linux/io-mapping.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/of_address.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index a31d7ef3032c..ec1282af2479 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -280,7 +280,7 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
{
unsigned i;
- int r;
+ int r, ret = 0;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -301,10 +301,11 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
} else {
/* still not good, but we can live with it */
DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
+ ret = r;
}
}
}
- return 0;
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index ee6466912497..77fdd9911c3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -52,6 +52,7 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
+static int cik_sdma_soft_reset(void *handle);
MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
@@ -1037,6 +1038,8 @@ static int cik_sdma_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cik_sdma_soft_reset(handle);
+
return cik_sdma_hw_init(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index d869d058ef24..425413fcaf02 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2755,8 +2755,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
u64 wb_gpu_addr;
u32 *buf;
struct bonaire_mqd *mqd;
-
- gfx_v7_0_cp_compute_enable(adev, true);
+ struct amdgpu_ring *ring;
/* fix up chicken bits */
tmp = RREG32(mmCP_CPF_DEBUG);
@@ -2791,7 +2790,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
/* init the queues. Just two for now. */
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+ ring = &adev->gfx.compute_ring[i];
if (ring->mqd_obj == NULL) {
r = amdgpu_bo_create(adev,
@@ -2970,6 +2969,13 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
amdgpu_bo_unreserve(ring->mqd_obj);
ring->ready = true;
+ }
+
+ gfx_v7_0_cp_compute_enable(adev, true);
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+
r = amdgpu_ring_test_ring(ring);
if (r)
ring->ready = false;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index a978381ef95b..9b17a66cf0e1 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -387,7 +387,7 @@ void atmel_hlcdc_crtc_irq(struct drm_crtc *c)
atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
}
-void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
+static void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
{
struct atmel_hlcdc_crtc_state *state;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 016c191221f3..52c527f6642a 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -320,19 +320,19 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
u32 *coeff_tab = heo_upscaling_ycoef;
u32 max_memsize;
- if (state->crtc_w < state->src_w)
+ if (state->crtc_h < state->src_h)
coeff_tab = heo_downscaling_ycoef;
for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++)
atmel_hlcdc_layer_update_cfg(&plane->layer,
33 + i,
0xffffffff,
coeff_tab[i]);
- factor = ((8 * 256 * state->src_w) - (256 * 4)) /
- state->crtc_w;
+ factor = ((8 * 256 * state->src_h) - (256 * 4)) /
+ state->crtc_h;
factor++;
- max_memsize = ((factor * state->crtc_w) + (256 * 4)) /
+ max_memsize = ((factor * state->crtc_h) + (256 * 4)) /
2048;
- if (max_memsize > state->src_w)
+ if (max_memsize > state->src_h)
factor--;
factor_reg |= (factor << 16) | 0x80000000;
}
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 57676f8d7ecf..a6289752be16 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -1015,6 +1015,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
return 0;
}
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
typedef struct drm_mode_fb_cmd232 {
u32 fb_id;
u32 width;
@@ -1071,6 +1072,7 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
return 0;
}
+#endif
static drm_ioctl_compat_t *drm_compat_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
@@ -1104,7 +1106,9 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
#endif
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
[DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
+#endif
};
/**
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index e0166403b4bd..40ce841eb952 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -55,11 +55,11 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev,
flags = exynos_gem->flags;
/*
- * without iommu support, not support physically non-continuous memory
- * for framebuffer.
+ * Physically non-contiguous memory type for framebuffer is not
+ * supported without IOMMU.
*/
if (IS_NONCONTIG_BUFFER(flags)) {
- DRM_ERROR("cannot use this gem memory type for fb.\n");
+ DRM_ERROR("Non-contiguous GEM memory is not supported.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 0525c56145db..147ef0d298cb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1753,32 +1753,6 @@ static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int fimc_suspend(struct device *dev)
-{
- struct fimc_context *ctx = get_fimc_context(dev);
-
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
-
- if (pm_runtime_suspended(dev))
- return 0;
-
- return fimc_clk_ctrl(ctx, false);
-}
-
-static int fimc_resume(struct device *dev)
-{
- struct fimc_context *ctx = get_fimc_context(dev);
-
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
-
- if (!pm_runtime_suspended(dev))
- return fimc_clk_ctrl(ctx, true);
-
- return 0;
-}
-#endif
-
static int fimc_runtime_suspend(struct device *dev)
{
struct fimc_context *ctx = get_fimc_context(dev);
@@ -1799,7 +1773,8 @@ static int fimc_runtime_resume(struct device *dev)
#endif
static const struct dev_pm_ops fimc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 4bf00f57ffe8..6eca8bb88648 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1475,8 +1475,8 @@ static int g2d_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int g2d_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int g2d_runtime_suspend(struct device *dev)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
@@ -1490,25 +1490,6 @@ static int g2d_suspend(struct device *dev)
flush_work(&g2d->runqueue_work);
- return 0;
-}
-
-static int g2d_resume(struct device *dev)
-{
- struct g2d_data *g2d = dev_get_drvdata(dev);
-
- g2d->suspended = false;
- g2d_exec_runqueue(g2d);
-
- return 0;
-}
-#endif
-
-#ifdef CONFIG_PM
-static int g2d_runtime_suspend(struct device *dev)
-{
- struct g2d_data *g2d = dev_get_drvdata(dev);
-
clk_disable_unprepare(g2d->gate_clk);
return 0;
@@ -1523,12 +1504,16 @@ static int g2d_runtime_resume(struct device *dev)
if (ret < 0)
dev_warn(dev, "failed to enable clock.\n");
+ g2d->suspended = false;
+ g2d_exec_runqueue(g2d);
+
return ret;
}
#endif
static const struct dev_pm_ops g2d_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 5d20da8f957e..52a9d269484e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1760,34 +1760,7 @@ static int gsc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int gsc_suspend(struct device *dev)
-{
- struct gsc_context *ctx = get_gsc_context(dev);
-
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
-
- if (pm_runtime_suspended(dev))
- return 0;
-
- return gsc_clk_ctrl(ctx, false);
-}
-
-static int gsc_resume(struct device *dev)
-{
- struct gsc_context *ctx = get_gsc_context(dev);
-
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
-
- if (!pm_runtime_suspended(dev))
- return gsc_clk_ctrl(ctx, true);
-
- return 0;
-}
-#endif
-
-#ifdef CONFIG_PM
-static int gsc_runtime_suspend(struct device *dev)
+static int __maybe_unused gsc_runtime_suspend(struct device *dev)
{
struct gsc_context *ctx = get_gsc_context(dev);
@@ -1796,7 +1769,7 @@ static int gsc_runtime_suspend(struct device *dev)
return gsc_clk_ctrl(ctx, false);
}
-static int gsc_runtime_resume(struct device *dev)
+static int __maybe_unused gsc_runtime_resume(struct device *dev)
{
struct gsc_context *ctx = get_gsc_context(dev);
@@ -1804,10 +1777,10 @@ static int gsc_runtime_resume(struct device *dev)
return gsc_clk_ctrl(ctx, true);
}
-#endif
static const struct dev_pm_ops gsc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 404367a430b5..6591e406084c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -794,29 +794,6 @@ static int rotator_clk_crtl(struct rot_context *rot, bool enable)
return 0;
}
-
-#ifdef CONFIG_PM_SLEEP
-static int rotator_suspend(struct device *dev)
-{
- struct rot_context *rot = dev_get_drvdata(dev);
-
- if (pm_runtime_suspended(dev))
- return 0;
-
- return rotator_clk_crtl(rot, false);
-}
-
-static int rotator_resume(struct device *dev)
-{
- struct rot_context *rot = dev_get_drvdata(dev);
-
- if (!pm_runtime_suspended(dev))
- return rotator_clk_crtl(rot, true);
-
- return 0;
-}
-#endif
-
static int rotator_runtime_suspend(struct device *dev)
{
struct rot_context *rot = dev_get_drvdata(dev);
@@ -833,7 +810,8 @@ static int rotator_runtime_resume(struct device *dev)
#endif
static const struct dev_pm_ops rotator_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
NULL)
};
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 95ddd56b89f0..5de36d8dcc68 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1281,6 +1281,11 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
intel_runtime_pm_enable(dev_priv);
+ /* Everything is in place, we can now relax! */
+ DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+ driver.name, driver.major, driver.minor, driver.patchlevel,
+ driver.date, pci_name(pdev), dev_priv->drm.primary->index);
+
intel_runtime_pm_put(dev_priv);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 7a30af79d799..f38ceffd82c3 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -122,8 +122,11 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
has_full_48bit_ppgtt =
IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
- if (intel_vgpu_active(dev_priv))
- has_full_ppgtt = false; /* emulation is too hard */
+ if (intel_vgpu_active(dev_priv)) {
+ /* emulation is too hard */
+ has_full_ppgtt = false;
+ has_full_48bit_ppgtt = false;
+ }
if (!has_aliasing_ppgtt)
return 0;
@@ -158,7 +161,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
return 0;
}
- if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists)
+ if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
return has_full_48bit_ppgtt ? 3 : 2;
else
return has_aliasing_ppgtt ? 1 : 0;
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index f6acb5a0e701..b81cfb3b22ec 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -65,9 +65,6 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
- if (!IS_HASWELL(dev_priv))
- return;
-
magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
if (magic != VGT_MAGIC)
return;
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 47bdf9dad0d3..b9e5a63a7c9e 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -554,7 +554,6 @@ void intel_dvo_init(struct drm_device *dev)
return;
}
- drm_encoder_cleanup(&intel_encoder->base);
kfree(intel_dvo);
kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index adca262d591a..7acbbbf97833 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -1047,6 +1047,23 @@ err_out:
return err;
}
+static int intel_use_opregion_panel_type_callback(const struct dmi_system_id *id)
+{
+ DRM_INFO("Using panel type from OpRegion on %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_use_opregion_panel_type[] = {
+ {
+ .callback = intel_use_opregion_panel_type_callback,
+ .ident = "Conrac GmbH IX45GM2",
+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "Conrac GmbH"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "IX45GM2"),
+ },
+ },
+ { }
+};
+
int
intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
{
@@ -1073,6 +1090,16 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
}
/*
+ * So far we know that some machined must use it, others must not use it.
+ * There doesn't seem to be any way to determine which way to go, except
+ * via a quirk list :(
+ */
+ if (!dmi_check_system(intel_use_opregion_panel_type)) {
+ DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
+ return -ENODEV;
+ }
+
+ /*
* FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us
* low vswing for eDP, whereas the VBT panel type (2) gives us normal
* vswing instead. Low vswing results in some display flickers, so
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 53e13c10e4ea..2d2481392824 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -7859,6 +7859,7 @@ static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
case GEN6_PCODE_ILLEGAL_CMD:
return -ENXIO;
case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+ case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
return -EOVERFLOW;
case GEN6_PCODE_TIMEOUT:
return -ETIMEDOUT;
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 2b0d1baf15b3..cf171b4b8c67 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -255,14 +255,14 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t max_sleep_time = 0x1f;
- /* Lately it was identified that depending on panel idle frame count
- * calculated at HW can be off by 1. So let's use what came
- * from VBT + 1.
- * There are also other cases where panel demands at least 4
- * but VBT is not being set. To cover these 2 cases lets use
- * at least 5 when VBT isn't set to be on the safest side.
+ /*
+ * Let's respect VBT in case VBT asks a higher idle_frame value.
+ * Let's use 6 as the minimum to cover all known cases including
+ * the off-by-one issue that HW has in some cases. Also there are
+ * cases where sink should be able to train
+ * with the 5 or 6 idle patterns.
*/
- uint32_t idle_frames = dev_priv->vbt.psr.idle_frames + 1;
+ uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
uint32_t val = EDP_PSR_ENABLE;
val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 9f7dafce3a4c..7bf90e9e6139 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -171,10 +171,34 @@ static void imx_drm_output_poll_changed(struct drm_device *drm)
drm_fbdev_cma_hotplug_event(imxdrm->fbhelper);
}
+static int imx_drm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int ret;
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ return ret;
+
+ /*
+ * Check modeset again in case crtc_state->mode_changed is
+ * updated in plane's ->atomic_check callback.
+ */
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
.fb_create = drm_fb_cma_create,
.output_poll_changed = imx_drm_output_poll_changed,
- .atomic_check = drm_atomic_helper_check,
+ .atomic_check = imx_drm_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 08e188bc10fc..462056e4b9e4 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -76,6 +76,8 @@ static void ipu_crtc_disable(struct drm_crtc *crtc)
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
+
+ drm_crtc_vblank_off(crtc);
}
static void imx_drm_crtc_reset(struct drm_crtc *crtc)
@@ -175,6 +177,8 @@ static int ipu_crtc_atomic_check(struct drm_crtc *crtc,
static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
+ drm_crtc_vblank_on(crtc);
+
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc));
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 4ad67d015ec7..29423e757d36 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -319,13 +319,14 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
/*
- * since we cannot touch active IDMAC channels, we do not support
- * resizing the enabled plane or changing its format
+ * We support resizing active plane or changing its format by
+ * forcing CRTC mode change and disabling-enabling plane in plane's
+ * ->atomic_update callback.
*/
if (old_fb && (state->src_w != old_state->src_w ||
state->src_h != old_state->src_h ||
fb->pixel_format != old_fb->pixel_format))
- return -EINVAL;
+ crtc_state->mode_changed = true;
eba = drm_plane_state_to_eba(state);
@@ -336,7 +337,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
if (old_fb && fb->pitches[0] != old_fb->pitches[0])
- return -EINVAL;
+ crtc_state->mode_changed = true;
switch (fb->pixel_format) {
case DRM_FORMAT_YUV420:
@@ -372,7 +373,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
if (old_fb && old_fb->pitches[1] != fb->pitches[1])
- return -EINVAL;
+ crtc_state->mode_changed = true;
}
return 0;
@@ -392,8 +393,14 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
enum ipu_color_space ics;
if (old_state->fb) {
- ipu_plane_atomic_set_base(ipu_plane, old_state);
- return;
+ struct drm_crtc_state *crtc_state = state->crtc->state;
+
+ if (!crtc_state->mode_changed) {
+ ipu_plane_atomic_set_base(ipu_plane, old_state);
+ return;
+ }
+
+ ipu_disable_plane(plane);
}
switch (ipu_plane->dp_flow) {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index b4bc7f1ef717..d0da52f2a806 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -157,6 +157,12 @@ struct msm_drm_private {
struct shrinker shrinker;
struct msm_vblank_ctrl vblank_ctrl;
+
+ /* task holding struct_mutex.. currently only used in submit path
+ * to detect and reject faults from copy_from_user() for submit
+ * ioctl.
+ */
+ struct task_struct *struct_mutex_task;
};
struct msm_format {
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 6cd4af443139..85f3047e05ae 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -196,11 +196,20 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = dev->dev_private;
struct page **pages;
unsigned long pfn;
pgoff_t pgoff;
int ret;
+ /* This should only happen if userspace tries to pass a mmap'd
+ * but unfaulted gem bo vaddr into submit ioctl, triggering
+ * a page fault while struct_mutex is already held. This is
+ * not a valid use-case so just bail.
+ */
+ if (priv->struct_mutex_task == current)
+ return VM_FAULT_SIGBUS;
+
/* Make sure we don't parallel update on a fault, nor move or remove
* something from beneath our feet
*/
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 9766f9ae4b7d..880d6a9af7c8 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -64,6 +64,14 @@ void msm_gem_submit_free(struct msm_gem_submit *submit)
kfree(submit);
}
+static inline unsigned long __must_check
+copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+{
+ if (access_ok(VERIFY_READ, from, n))
+ return __copy_from_user_inatomic(to, from, n);
+ return -EFAULT;
+}
+
static int submit_lookup_objects(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file)
{
@@ -71,6 +79,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
int ret = 0;
spin_lock(&file->table_lock);
+ pagefault_disable();
for (i = 0; i < args->nr_bos; i++) {
struct drm_msm_gem_submit_bo submit_bo;
@@ -84,10 +93,15 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
*/
submit->bos[i].flags = 0;
- ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
- if (ret) {
- ret = -EFAULT;
- goto out_unlock;
+ ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
+ if (unlikely(ret)) {
+ pagefault_enable();
+ spin_unlock(&file->table_lock);
+ ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
+ if (ret)
+ goto out;
+ spin_lock(&file->table_lock);
+ pagefault_disable();
}
if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
@@ -127,9 +141,12 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
}
out_unlock:
- submit->nr_bos = i;
+ pagefault_enable();
spin_unlock(&file->table_lock);
+out:
+ submit->nr_bos = i;
+
return ret;
}
@@ -377,6 +394,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
return ret;
+ priv->struct_mutex_task = current;
+
submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
if (!submit) {
ret = -ENOMEM;
@@ -468,6 +487,7 @@ out:
if (ret)
msm_gem_submit_free(submit);
out_unlock:
+ priv->struct_mutex_task = NULL;
mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index f2ad17aa33f0..dc57b628e074 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -225,6 +225,17 @@ static bool nouveau_pr3_present(struct pci_dev *pdev)
if (!parent_pdev)
return false;
+ if (!parent_pdev->bridge_d3) {
+ /*
+ * Parent PCI bridge is currently not power managed.
+ * Since userspace can change these afterwards to be on
+ * the safe side we stick with _DSM and prevent usage of
+ * _PR3 from the bridge.
+ */
+ pci_d3cold_disable(pdev);
+ return false;
+ }
+
parent_adev = ACPI_COMPANION(&parent_pdev->dev);
if (!parent_adev)
return false;
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 59adcf8532dd..3f6704cf6608 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -144,7 +144,7 @@ static struct list_head *vc4_get_cache_list_for_size(struct drm_device *dev,
return &vc4->bo_cache.size_list[page_index];
}
-void vc4_bo_cache_purge(struct drm_device *dev)
+static void vc4_bo_cache_purge(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 8b42d31a7f0e..9ecef9385491 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -57,21 +57,21 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
switch (args->param) {
case DRM_VC4_PARAM_V3D_IDENT0:
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
- if (ret)
+ if (ret < 0)
return ret;
args->value = V3D_READ(V3D_IDENT0);
pm_runtime_put(&vc4->v3d->pdev->dev);
break;
case DRM_VC4_PARAM_V3D_IDENT1:
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
- if (ret)
+ if (ret < 0)
return ret;
args->value = V3D_READ(V3D_IDENT1);
pm_runtime_put(&vc4->v3d->pdev->dev);
break;
case DRM_VC4_PARAM_V3D_IDENT2:
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
- if (ret)
+ if (ret < 0)
return ret;
args->value = V3D_READ(V3D_IDENT2);
pm_runtime_put(&vc4->v3d->pdev->dev);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 489e3de0c050..428e24919ef1 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -321,6 +321,15 @@ vc4_first_render_job(struct vc4_dev *vc4)
struct vc4_exec_info, head);
}
+static inline struct vc4_exec_info *
+vc4_last_render_job(struct vc4_dev *vc4)
+{
+ if (list_empty(&vc4->render_job_list))
+ return NULL;
+ return list_last_entry(&vc4->render_job_list,
+ struct vc4_exec_info, head);
+}
+
/**
* struct vc4_texture_sample_info - saves the offsets into the UBO for texture
* setup parameters.
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 6155e8aca1c6..b262c5c26f10 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -534,8 +534,8 @@ vc4_cl_lookup_bos(struct drm_device *dev,
return -EINVAL;
}
- exec->bo = kcalloc(exec->bo_count, sizeof(struct drm_gem_cma_object *),
- GFP_KERNEL);
+ exec->bo = drm_calloc_large(exec->bo_count,
+ sizeof(struct drm_gem_cma_object *));
if (!exec->bo) {
DRM_ERROR("Failed to allocate validated BO pointers\n");
return -ENOMEM;
@@ -572,8 +572,8 @@ vc4_cl_lookup_bos(struct drm_device *dev,
spin_unlock(&file_priv->table_lock);
fail:
- kfree(handles);
- return 0;
+ drm_free_large(handles);
+ return ret;
}
static int
@@ -608,7 +608,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
* read the contents back for validation, and I think the
* bo->vaddr is uncached access.
*/
- temp = kmalloc(temp_size, GFP_KERNEL);
+ temp = drm_malloc_ab(temp_size, 1);
if (!temp) {
DRM_ERROR("Failed to allocate storage for copying "
"in bin/render CLs.\n");
@@ -675,7 +675,7 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
ret = vc4_validate_shader_recs(dev, exec);
fail:
- kfree(temp);
+ drm_free_large(temp);
return ret;
}
@@ -688,7 +688,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
if (exec->bo) {
for (i = 0; i < exec->bo_count; i++)
drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
- kfree(exec->bo);
+ drm_free_large(exec->bo);
}
while (!list_empty(&exec->unref_list)) {
@@ -942,8 +942,8 @@ vc4_gem_destroy(struct drm_device *dev)
vc4->overflow_mem = NULL;
}
- vc4_bo_cache_destroy(dev);
-
if (vc4->hang_state)
vc4_free_hang_state(dev, vc4->hang_state);
+
+ vc4_bo_cache_destroy(dev);
}
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index b0104a346a74..094bc6a475c1 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -83,8 +83,10 @@ vc4_overflow_mem_work(struct work_struct *work)
spin_lock_irqsave(&vc4->job_lock, irqflags);
current_exec = vc4_first_bin_job(vc4);
+ if (!current_exec)
+ current_exec = vc4_last_render_job(vc4);
if (current_exec) {
- vc4->overflow_mem->seqno = vc4->finished_seqno + 1;
+ vc4->overflow_mem->seqno = current_exec->seqno;
list_add_tail(&vc4->overflow_mem->unref_head,
&current_exec->unref_list);
vc4->overflow_mem = NULL;
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index 46527e989ce3..2543cf5b8b51 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -309,8 +309,14 @@ validate_uniform_address_write(struct vc4_validated_shader_info *validated_shade
* of uniforms on each side. However, this scheme is easy to
* validate so it's all we allow for now.
*/
-
- if (QPU_GET_FIELD(inst, QPU_SIG) != QPU_SIG_NONE) {
+ switch (QPU_GET_FIELD(inst, QPU_SIG)) {
+ case QPU_SIG_NONE:
+ case QPU_SIG_SCOREBOARD_UNLOCK:
+ case QPU_SIG_COLOR_LOAD:
+ case QPU_SIG_LOAD_TMU0:
+ case QPU_SIG_LOAD_TMU1:
+ break;
+ default:
DRM_ERROR("uniforms address change must be "
"normal math\n");
return false;
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c
index f98743277e3c..258cb9a40ab3 100644
--- a/drivers/i2c/busses/i2c-bcm-kona.c
+++ b/drivers/i2c/busses/i2c-bcm-kona.c
@@ -643,7 +643,7 @@ static int bcm_kona_i2c_xfer(struct i2c_adapter *adapter,
if (rc < 0) {
dev_err(dev->device,
"restart cmd failed rc = %d\n", rc);
- goto xfer_send_stop;
+ goto xfer_send_stop;
}
}
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 90bbd9f9dd8f..3c16a2f7c673 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -767,7 +767,7 @@ static int cdns_i2c_setclk(unsigned long clk_in, struct cdns_i2c *id)
* depending on the scaling direction.
*
* Return: NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK
- * to acknowedge the change, NOTIFY_DONE if the notification is
+ * to acknowledge the change, NOTIFY_DONE if the notification is
* considered irrelevant.
*/
static int cdns_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index c6922b806fb7..fcd973d5131e 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -367,13 +367,17 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
/* Configure SDA Hold Time if required */
- if (dev->sda_hold_time) {
- reg = dw_readl(dev, DW_IC_COMP_VERSION);
- if (reg >= DW_IC_SDA_HOLD_MIN_VERS)
+ reg = dw_readl(dev, DW_IC_COMP_VERSION);
+ if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {
+ if (dev->sda_hold_time) {
dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
- else
- dev_warn(dev->dev,
- "Hardware too old to adjust SDA hold time.");
+ } else {
+ /* Keep previous hold time setting if no one set it */
+ dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD);
+ }
+ } else {
+ dev_warn(dev->dev,
+ "Hardware too old to adjust SDA hold time.\n");
}
/* Configure Tx/Rx FIFO threshold levels */
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 52407f3c9e1c..9bd849dacee8 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -378,7 +378,7 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
}
dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
- if (dma_mapping_error(dev, dma_addr)) {
+ if (dma_mapping_error(chan->device->dev, dma_addr)) {
dev_dbg(dev, "dma map failed, using PIO\n");
return;
}
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 2bc8b01153d6..5c5b7cada8be 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -918,7 +918,7 @@ static void rk3x_i2c_adapt_div(struct rk3x_i2c *i2c, unsigned long clk_rate)
* Code adapted from i2c-cadence.c.
*
* Return: NOTIFY_STOP if the rate change should be aborted, NOTIFY_OK
- * to acknowedge the change, NOTIFY_DONE if the notification is
+ * to acknowledge the change, NOTIFY_DONE if the notification is
* considered irrelevant.
*/
static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
@@ -1111,6 +1111,15 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap,
return ret < 0 ? ret : num;
}
+static __maybe_unused int rk3x_i2c_resume(struct device *dev)
+{
+ struct rk3x_i2c *i2c = dev_get_drvdata(dev);
+
+ rk3x_i2c_adapt_div(i2c, clk_get_rate(i2c->clk));
+
+ return 0;
+}
+
static u32 rk3x_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
@@ -1334,12 +1343,15 @@ static int rk3x_i2c_remove(struct platform_device *pdev)
return 0;
}
+static SIMPLE_DEV_PM_OPS(rk3x_i2c_pm_ops, NULL, rk3x_i2c_resume);
+
static struct platform_driver rk3x_i2c_driver = {
.probe = rk3x_i2c_probe,
.remove = rk3x_i2c_remove,
.driver = {
.name = "rk3x-i2c",
.of_match_table = rk3x_i2c_match,
+ .pm = &rk3x_i2c_pm_ops,
},
};
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 6fb3e2645992..05b1eeab9cf5 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -610,7 +610,7 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
return;
dma_addr = dma_map_single(chan->device->dev, pd->msg->buf, pd->msg->len, dir);
- if (dma_mapping_error(pd->dev, dma_addr)) {
+ if (dma_mapping_error(chan->device->dev, dma_addr)) {
dev_dbg(pd->dev, "dma map failed, using PIO\n");
return;
}
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index 215ac87f606d..b3893f6282ba 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -37,8 +37,6 @@ struct i2c_demux_pinctrl_priv {
struct i2c_demux_pinctrl_chan chan[];
};
-static struct property status_okay = { .name = "status", .length = 3, .value = "ok" };
-
static int i2c_demux_master_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
struct i2c_demux_pinctrl_priv *priv = adap->algo_data;
@@ -107,6 +105,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
of_changeset_revert(&priv->chan[new_chan].chgset);
err:
dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret);
+ priv->cur_chan = -EINVAL;
return ret;
}
@@ -192,6 +191,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct i2c_demux_pinctrl_priv *priv;
+ struct property *props;
int num_chan, i, j, err;
num_chan = of_count_phandle_with_args(np, "i2c-parent", NULL);
@@ -202,7 +202,10 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
priv = devm_kzalloc(&pdev->dev, sizeof(*priv)
+ num_chan * sizeof(struct i2c_demux_pinctrl_chan), GFP_KERNEL);
- if (!priv)
+
+ props = devm_kcalloc(&pdev->dev, num_chan, sizeof(*props), GFP_KERNEL);
+
+ if (!priv || !props)
return -ENOMEM;
err = of_property_read_string(np, "i2c-bus-name", &priv->bus_name);
@@ -220,8 +223,12 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
}
priv->chan[i].parent_np = adap_np;
+ props[i].name = devm_kstrdup(&pdev->dev, "status", GFP_KERNEL);
+ props[i].value = devm_kstrdup(&pdev->dev, "ok", GFP_KERNEL);
+ props[i].length = 3;
+
of_changeset_init(&priv->chan[i].chgset);
- of_changeset_update_property(&priv->chan[i].chgset, adap_np, &status_okay);
+ of_changeset_update_property(&priv->chan[i].chgset, adap_np, &props[i]);
}
priv->num_chan = num_chan;
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 89d78208de3f..78f148ea9d9f 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -20,6 +20,8 @@ config BMA180
config BMA220
tristate "Bosch BMA220 3-Axis Accelerometer Driver"
depends on SPI
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to add support for the Bosch BMA220 triaxial
acceleration sensor.
@@ -234,7 +236,8 @@ config STK8312
config STK8BA50
tristate "Sensortek STK8BA50 3-Axis Accelerometer Driver"
depends on I2C
- depends on IIO_TRIGGER
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to get support for the Sensortek STK8BA50 3-axis
accelerometer.
diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c
index 1098d10df8e8..5099f295dd37 100644
--- a/drivers/iio/accel/bma220_spi.c
+++ b/drivers/iio/accel/bma220_spi.c
@@ -253,7 +253,7 @@ static int bma220_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
bma220_trigger_handler, NULL);
if (ret < 0) {
dev_err(&spi->dev, "iio triggered buffer setup failed\n");
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index bf17aae66145..59b380dbf27f 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -67,6 +67,9 @@
#define BMC150_ACCEL_REG_PMU_BW 0x10
#define BMC150_ACCEL_DEF_BW 125
+#define BMC150_ACCEL_REG_RESET 0x14
+#define BMC150_ACCEL_RESET_VAL 0xB6
+
#define BMC150_ACCEL_REG_INT_MAP_0 0x19
#define BMC150_ACCEL_INT_MAP_0_BIT_SLOPE BIT(2)
@@ -1497,6 +1500,14 @@ static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
int ret, i;
unsigned int val;
+ /*
+ * Reset chip to get it in a known good state. A delay of 1.8ms after
+ * reset is required according to the data sheets of supported chips.
+ */
+ regmap_write(data->regmap, BMC150_ACCEL_REG_RESET,
+ BMC150_ACCEL_RESET_VAL);
+ usleep_range(1800, 2500);
+
ret = regmap_read(data->regmap, BMC150_ACCEL_REG_CHIP_ID, &val);
if (ret < 0) {
dev_err(dev, "Error: Reading chip id\n");
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index 3a9f106787d2..9d72d4bcf5e9 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -160,11 +160,13 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
if (ret < 0)
goto error_ret;
*val = ret;
+ ret = IIO_VAL_INT;
break;
case IIO_CHAN_INFO_SCALE:
ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
if (ret < 0)
goto error_ret;
+ *val = 0;
*val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
ret = IIO_VAL_INT_PLUS_MICRO;
break;
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 1de31bdd4ce4..767577298ee3 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -389,6 +389,7 @@ config QCOM_SPMI_VADC
config ROCKCHIP_SARADC
tristate "Rockchip SARADC driver"
depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST)
+ depends on RESET_CONTROLLER
help
Say yes here to build support for the SARADC found in SoCs from
Rockchip.
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index b6163764489c..9704090b7908 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -527,6 +527,7 @@ static struct attribute_group ad799x_event_attrs_group = {
static const struct iio_info ad7991_info = {
.read_raw = &ad799x_read_raw,
.driver_module = THIS_MODULE,
+ .update_scan_mode = ad799x_update_scan_mode,
};
static const struct iio_info ad7993_4_7_8_noirq_info = {
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 52430ba171f3..0438c68015e8 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -381,8 +381,8 @@ static irqreturn_t at91_adc_rl_interrupt(int irq, void *private)
st->ts_bufferedmeasure = false;
input_report_key(st->ts_input, BTN_TOUCH, 0);
input_sync(st->ts_input);
- } else if (status & AT91_ADC_EOC(3)) {
- /* Conversion finished */
+ } else if (status & AT91_ADC_EOC(3) && st->ts_input) {
+ /* Conversion finished and we've a touchscreen */
if (st->ts_bufferedmeasure) {
/*
* Last measurement is always discarded, since it can
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index f9ad6c2d6821..85d701291654 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -21,6 +21,8 @@
#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/reset.h>
#include <linux/regulator/consumer.h>
#include <linux/iio/iio.h>
@@ -53,6 +55,7 @@ struct rockchip_saradc {
struct clk *clk;
struct completion completion;
struct regulator *vref;
+ struct reset_control *reset;
const struct rockchip_saradc_data *data;
u16 last_val;
};
@@ -190,6 +193,16 @@ static const struct of_device_id rockchip_saradc_match[] = {
};
MODULE_DEVICE_TABLE(of, rockchip_saradc_match);
+/**
+ * Reset SARADC Controller.
+ */
+static void rockchip_saradc_reset_controller(struct reset_control *reset)
+{
+ reset_control_assert(reset);
+ usleep_range(10, 20);
+ reset_control_deassert(reset);
+}
+
static int rockchip_saradc_probe(struct platform_device *pdev)
{
struct rockchip_saradc *info = NULL;
@@ -218,6 +231,20 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
if (IS_ERR(info->regs))
return PTR_ERR(info->regs);
+ /*
+ * The reset should be an optional property, as it should work
+ * with old devicetrees as well
+ */
+ info->reset = devm_reset_control_get(&pdev->dev, "saradc-apb");
+ if (IS_ERR(info->reset)) {
+ ret = PTR_ERR(info->reset);
+ if (ret != -ENOENT)
+ return ret;
+
+ dev_dbg(&pdev->dev, "no reset control found\n");
+ info->reset = NULL;
+ }
+
init_completion(&info->completion);
irq = platform_get_irq(pdev, 0);
@@ -252,6 +279,9 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
return PTR_ERR(info->vref);
}
+ if (info->reset)
+ rockchip_saradc_reset_controller(info->reset);
+
/*
* Use a default value for the converter clock.
* This may become user-configurable in the future.
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index 1ef398770a1f..066abaf80201 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -489,7 +489,8 @@ static struct iio_info ads1115_info = {
#ifdef CONFIG_OF
static int ads1015_get_channels_config_of(struct i2c_client *client)
{
- struct ads1015_data *data = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ads1015_data *data = iio_priv(indio_dev);
struct device_node *node;
if (!client->dev.of_node ||
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 8a368756881b..c3cfacca2541 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -32,6 +32,7 @@
struct tiadc_device {
struct ti_tscadc_dev *mfd_tscadc;
+ struct mutex fifo1_lock; /* to protect fifo access */
int channels;
u8 channel_line[8];
u8 channel_step[8];
@@ -359,6 +360,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct tiadc_device *adc_dev = iio_priv(indio_dev);
+ int ret = IIO_VAL_INT;
int i, map_val;
unsigned int fifo1count, read, stepid;
bool found = false;
@@ -372,13 +374,14 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
if (!step_en)
return -EINVAL;
+ mutex_lock(&adc_dev->fifo1_lock);
fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
while (fifo1count--)
tiadc_readl(adc_dev, REG_FIFO1);
am335x_tsc_se_set_once(adc_dev->mfd_tscadc, step_en);
- timeout = jiffies + usecs_to_jiffies
+ timeout = jiffies + msecs_to_jiffies
(IDLE_TIMEOUT * adc_dev->channels);
/* Wait for Fifo threshold interrupt */
while (1) {
@@ -388,7 +391,8 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
if (time_after(jiffies, timeout)) {
am335x_tsc_se_adc_done(adc_dev->mfd_tscadc);
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto err_unlock;
}
}
map_val = adc_dev->channel_step[chan->scan_index];
@@ -414,8 +418,11 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
am335x_tsc_se_adc_done(adc_dev->mfd_tscadc);
if (found == false)
- return -EBUSY;
- return IIO_VAL_INT;
+ ret = -EBUSY;
+
+err_unlock:
+ mutex_unlock(&adc_dev->fifo1_lock);
+ return ret;
}
static const struct iio_info tiadc_info = {
@@ -483,6 +490,7 @@ static int tiadc_probe(struct platform_device *pdev)
tiadc_step_config(indio_dev);
tiadc_writel(adc_dev, REG_FIFO1THR, FIFO1_THRESHOLD);
+ mutex_init(&adc_dev->fifo1_lock);
err = tiadc_channel_init(indio_dev, adc_dev->channels);
if (err < 0)
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index ae038a59d256..407f141a1eee 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -434,7 +434,7 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
break;
case IIO_ELECTRICALCONDUCTIVITY:
*val = 1; /* 0.00001 */
- *val = 100000;
+ *val2 = 100000;
break;
case IIO_CONCENTRATION:
*val = 0; /* 0.000000001 */
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
index e81f434760f4..dc33c1dd5191 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
@@ -56,8 +56,8 @@ static struct {
{HID_USAGE_SENSOR_ALS, 0, 1, 0},
{HID_USAGE_SENSOR_ALS, HID_USAGE_SENSOR_UNITS_LUX, 1, 0},
- {HID_USAGE_SENSOR_PRESSURE, 0, 100000, 0},
- {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 1, 0},
+ {HID_USAGE_SENSOR_PRESSURE, 0, 100, 0},
+ {HID_USAGE_SENSOR_PRESSURE, HID_USAGE_SENSOR_UNITS_PASCAL, 0, 1000},
};
static int pow_10(unsigned power)
diff --git a/drivers/iio/dac/stx104.c b/drivers/iio/dac/stx104.c
index 792a97164cb2..bebbd00304ce 100644
--- a/drivers/iio/dac/stx104.c
+++ b/drivers/iio/dac/stx104.c
@@ -65,6 +65,16 @@ struct stx104_gpio {
unsigned int out_state;
};
+/**
+ * struct stx104_dev - STX104 device private data structure
+ * @indio_dev: IIO device
+ * @chip: instance of the gpio_chip
+ */
+struct stx104_dev {
+ struct iio_dev *indio_dev;
+ struct gpio_chip *chip;
+};
+
static int stx104_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long mask)
{
@@ -107,6 +117,7 @@ static const struct iio_chan_spec stx104_channels[STX104_NUM_CHAN] = {
static int stx104_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
+ /* GPIO 0-3 are input only, while the rest are output only */
if (offset < 4)
return 1;
@@ -169,6 +180,7 @@ static int stx104_probe(struct device *dev, unsigned int id)
struct iio_dev *indio_dev;
struct stx104_iio *priv;
struct stx104_gpio *stx104gpio;
+ struct stx104_dev *stx104dev;
int err;
indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
@@ -179,6 +191,10 @@ static int stx104_probe(struct device *dev, unsigned int id)
if (!stx104gpio)
return -ENOMEM;
+ stx104dev = devm_kzalloc(dev, sizeof(*stx104dev), GFP_KERNEL);
+ if (!stx104dev)
+ return -ENOMEM;
+
if (!devm_request_region(dev, base[id], STX104_EXTENT,
dev_name(dev))) {
dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
@@ -199,12 +215,6 @@ static int stx104_probe(struct device *dev, unsigned int id)
outw(0, base[id] + 4);
outw(0, base[id] + 6);
- err = devm_iio_device_register(dev, indio_dev);
- if (err) {
- dev_err(dev, "IIO device registering failed (%d)\n", err);
- return err;
- }
-
stx104gpio->chip.label = dev_name(dev);
stx104gpio->chip.parent = dev;
stx104gpio->chip.owner = THIS_MODULE;
@@ -220,7 +230,9 @@ static int stx104_probe(struct device *dev, unsigned int id)
spin_lock_init(&stx104gpio->lock);
- dev_set_drvdata(dev, stx104gpio);
+ stx104dev->indio_dev = indio_dev;
+ stx104dev->chip = &stx104gpio->chip;
+ dev_set_drvdata(dev, stx104dev);
err = gpiochip_add_data(&stx104gpio->chip, stx104gpio);
if (err) {
@@ -228,14 +240,22 @@ static int stx104_probe(struct device *dev, unsigned int id)
return err;
}
+ err = iio_device_register(indio_dev);
+ if (err) {
+ dev_err(dev, "IIO device registering failed (%d)\n", err);
+ gpiochip_remove(&stx104gpio->chip);
+ return err;
+ }
+
return 0;
}
static int stx104_remove(struct device *dev, unsigned int id)
{
- struct stx104_gpio *const stx104gpio = dev_get_drvdata(dev);
+ struct stx104_dev *const stx104dev = dev_get_drvdata(dev);
- gpiochip_remove(&stx104gpio->chip);
+ iio_device_unregister(stx104dev->indio_dev);
+ gpiochip_remove(stx104dev->chip);
return 0;
}
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index 738a86d9e4a9..d04124345992 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -6,6 +6,8 @@ menu "Humidity sensors"
config AM2315
tristate "Aosong AM2315 relative humidity and temperature sensor"
depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
If you say yes here you get support for the Aosong AM2315
relative humidity and ambient temperature sensor.
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
index 3e200f69e886..ff96b6d0fdae 100644
--- a/drivers/iio/humidity/am2315.c
+++ b/drivers/iio/humidity/am2315.c
@@ -244,7 +244,7 @@ static int am2315_probe(struct i2c_client *client,
indio_dev->channels = am2315_channels;
indio_dev->num_channels = ARRAY_SIZE(am2315_channels);
- ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
am2315_trigger_handler, NULL);
if (ret < 0) {
dev_err(&client->dev, "iio triggered buffer setup failed\n");
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index a03832a5fc95..e0c9c70c2a4a 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -142,7 +142,7 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
struct i2c_client *client = data->client;
int delay = data->adc_int_us[chan->address];
int ret;
- int val;
+ __be16 val;
/* start measurement */
ret = i2c_smbus_write_byte(client, chan->address);
@@ -154,26 +154,13 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
/* wait for integration time to pass */
usleep_range(delay, delay + 1000);
- /*
- * i2c_smbus_read_word_data cannot() be used here due to the command
- * value not being understood and causes NAKs preventing any reading
- * from being accessed.
- */
- ret = i2c_smbus_read_byte(client);
+ /* read measurement */
+ ret = i2c_master_recv(data->client, (char *)&val, sizeof(val));
if (ret < 0) {
- dev_err(&client->dev, "cannot read high byte measurement");
+ dev_err(&client->dev, "cannot read sensor data\n");
return ret;
}
- val = ret << 8;
-
- ret = i2c_smbus_read_byte(client);
- if (ret < 0) {
- dev_err(&client->dev, "cannot read low byte measurement");
- return ret;
- }
- val |= ret;
-
- return val;
+ return be16_to_cpu(val);
}
static int hdc100x_get_heater_status(struct hdc100x_data *data)
@@ -272,8 +259,8 @@ static int hdc100x_probe(struct i2c_client *client,
struct iio_dev *indio_dev;
struct hdc100x_data *data;
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BYTE))
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA |
+ I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
return -EOPNOTSUPP;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 90462fcf5436..158aaf44dd95 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -107,9 +107,10 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
{
struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer;
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
size_t datum_size;
size_t to_wait;
- int ret;
+ int ret = 0;
if (!indio_dev->info)
return -ENODEV;
@@ -131,19 +132,29 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
else
to_wait = min_t(size_t, n / datum_size, rb->watermark);
+ add_wait_queue(&rb->pollq, &wait);
do {
- ret = wait_event_interruptible(rb->pollq,
- iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size));
- if (ret)
- return ret;
+ if (!indio_dev->info) {
+ ret = -ENODEV;
+ break;
+ }
- if (!indio_dev->info)
- return -ENODEV;
+ if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ wait_woken(&wait, TASK_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT);
+ continue;
+ }
ret = rb->access->read_first_n(rb, n, buf);
if (ret == 0 && (filp->f_flags & O_NONBLOCK))
ret = -EAGAIN;
- } while (ret == 0);
+ } while (ret == 0);
+ remove_wait_queue(&rb->pollq, &wait);
return ret;
}
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index f914d5d140e4..d2b889918c3e 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -613,9 +613,8 @@ ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
case IIO_VAL_FRACTIONAL:
tmp = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
- vals[1] = do_div(tmp, 1000000000LL);
- vals[0] = tmp;
- return sprintf(buf, "%d.%09u\n", vals[0], vals[1]);
+ vals[0] = (int)div_s64_rem(tmp, 1000000000, &vals[1]);
+ return sprintf(buf, "%d.%09u\n", vals[0], abs(vals[1]));
case IIO_VAL_FRACTIONAL_LOG2:
tmp = (s64)vals[0] * 1000000000LL >> vals[1];
vals[1] = do_div(tmp, 1000000000LL);
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 7c566f516572..3574945183fe 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -76,7 +76,6 @@ config BH1750
config BH1780
tristate "ROHM BH1780 ambient light sensor"
depends on I2C
- depends on !SENSORS_BH1780
help
Say Y here to build support for the ROHM BH1780GLI ambient
light sensor.
@@ -238,6 +237,8 @@ config MAX44000
tristate "MAX44000 Ambient and Infrared Proximity Sensor"
depends on I2C
select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say Y here if you want to build support for Maxim Integrated's
MAX44000 ambient and infrared proximity sensor device.
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 6943688e66df..e5a533cbd53f 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -970,7 +970,7 @@ int bmp280_common_probe(struct device *dev,
data->vdda = devm_regulator_get(dev, "vdda");
if (IS_ERR(data->vdda)) {
dev_err(dev, "failed to get VDDA regulator\n");
- ret = PTR_ERR(data->vddd);
+ ret = PTR_ERR(data->vdda);
goto out_disable_vddd;
}
ret = regulator_enable(data->vdda);
@@ -1079,7 +1079,8 @@ EXPORT_SYMBOL(bmp280_common_remove);
#ifdef CONFIG_PM
static int bmp280_runtime_suspend(struct device *dev)
{
- struct bmp280_data *data = dev_get_drvdata(dev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct bmp280_data *data = iio_priv(indio_dev);
int ret;
ret = regulator_disable(data->vdda);
@@ -1090,7 +1091,8 @@ static int bmp280_runtime_suspend(struct device *dev)
static int bmp280_runtime_resume(struct device *dev)
{
- struct bmp280_data *data = dev_get_drvdata(dev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct bmp280_data *data = iio_priv(indio_dev);
int ret;
ret = regulator_enable(data->vddd);
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 2e3a70e1b245..5656deb17261 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -397,7 +397,7 @@ static int as3935_probe(struct spi_device *spi)
return ret;
}
- ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ ret = iio_triggered_buffer_setup(indio_dev, iio_pollfunc_store_time,
&as3935_trigger_handler, NULL);
if (ret) {
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 3a3c5d73bbfc..51c79b2fb0b8 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -106,7 +106,6 @@ struct mcast_group {
atomic_t refcount;
enum mcast_group_state state;
struct ib_sa_query *query;
- int query_id;
u16 pkey_index;
u8 leave_state;
int retries;
@@ -340,11 +339,7 @@ static int send_join(struct mcast_group *group, struct mcast_member *member)
member->multicast.comp_mask,
3000, GFP_KERNEL, join_handler, group,
&group->query);
- if (ret >= 0) {
- group->query_id = ret;
- ret = 0;
- }
- return ret;
+ return (ret > 0) ? 0 : ret;
}
static int send_leave(struct mcast_group *group, u8 leave_state)
@@ -364,11 +359,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state)
IB_SA_MCMEMBER_REC_JOIN_STATE,
3000, GFP_KERNEL, leave_handler,
group, &group->query);
- if (ret >= 0) {
- group->query_id = ret;
- ret = 0;
- }
- return ret;
+ return (ret > 0) ? 0 : ret;
}
static void join_group(struct mcast_group *group, struct mcast_member *member,
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig
index 23f38cf2c5cd..afe8b28e0878 100644
--- a/drivers/infiniband/hw/cxgb4/Kconfig
+++ b/drivers/infiniband/hw/cxgb4/Kconfig
@@ -1,6 +1,7 @@
config INFINIBAND_CXGB4
tristate "Chelsio T4/T5 RDMA Driver"
depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
+ select CHELSIO_LIB
select GENERIC_ALLOCATOR
---help---
This is an iWARP/RDMA driver for the Chelsio T4 and T5
diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile
index e11cf7299945..fa40b685831b 100644
--- a/drivers/infiniband/hw/cxgb4/Makefile
+++ b/drivers/infiniband/hw/cxgb4/Makefile
@@ -1,4 +1,5 @@
ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
+ccflags-y += -Idrivers/net/ethernet/chelsio/libcxgb
obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index b6a953aed7e8..71c8867ef66b 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -49,6 +49,7 @@
#include <rdma/ib_addr.h>
+#include <libcxgb_cm.h>
#include "iw_cxgb4.h"
#include "clip_tbl.h"
@@ -239,15 +240,13 @@ int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
{
- struct cpl_tid_release *req;
+ u32 len = roundup(sizeof(struct cpl_tid_release), 16);
- skb = get_skb(skb, sizeof *req, GFP_KERNEL);
+ skb = get_skb(skb, len, GFP_KERNEL);
if (!skb)
return;
- req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
- INIT_TP_WR(req, hwtid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
- set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
+
+ cxgb_mk_tid_release(skb, len, hwtid, 0);
c4iw_ofld_send(rdev, skb);
return;
}
@@ -333,6 +332,8 @@ static void remove_ep_tid(struct c4iw_ep *ep)
spin_lock_irqsave(&ep->com.dev->lock, flags);
_remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0);
+ if (idr_is_empty(&ep->com.dev->hwtid_idr))
+ wake_up(&ep->com.dev->wait);
spin_unlock_irqrestore(&ep->com.dev->lock, flags);
}
@@ -464,72 +465,6 @@ static struct net_device *get_real_dev(struct net_device *egress_dev)
return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev;
}
-static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev)
-{
- int i;
-
- egress_dev = get_real_dev(egress_dev);
- for (i = 0; i < dev->rdev.lldi.nports; i++)
- if (dev->rdev.lldi.ports[i] == egress_dev)
- return 1;
- return 0;
-}
-
-static struct dst_entry *find_route6(struct c4iw_dev *dev, __u8 *local_ip,
- __u8 *peer_ip, __be16 local_port,
- __be16 peer_port, u8 tos,
- __u32 sin6_scope_id)
-{
- struct dst_entry *dst = NULL;
-
- if (IS_ENABLED(CONFIG_IPV6)) {
- struct flowi6 fl6;
-
- memset(&fl6, 0, sizeof(fl6));
- memcpy(&fl6.daddr, peer_ip, 16);
- memcpy(&fl6.saddr, local_ip, 16);
- if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
- fl6.flowi6_oif = sin6_scope_id;
- dst = ip6_route_output(&init_net, NULL, &fl6);
- if (!dst)
- goto out;
- if (!our_interface(dev, ip6_dst_idev(dst)->dev) &&
- !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
- dst_release(dst);
- dst = NULL;
- }
- }
-
-out:
- return dst;
-}
-
-static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip,
- __be32 peer_ip, __be16 local_port,
- __be16 peer_port, u8 tos)
-{
- struct rtable *rt;
- struct flowi4 fl4;
- struct neighbour *n;
-
- rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
- peer_port, local_port, IPPROTO_TCP,
- tos, 0);
- if (IS_ERR(rt))
- return NULL;
- n = dst_neigh_lookup(&rt->dst, &peer_ip);
- if (!n)
- return NULL;
- if (!our_interface(dev, n->dev) &&
- !(n->dev->flags & IFF_LOOPBACK)) {
- neigh_release(n);
- dst_release(&rt->dst);
- return NULL;
- }
- neigh_release(n);
- return &rt->dst;
-}
-
static void arp_failure_discard(void *handle, struct sk_buff *skb)
{
pr_err(MOD "ARP failure\n");
@@ -704,56 +639,32 @@ static int send_flowc(struct c4iw_ep *ep)
static int send_halfclose(struct c4iw_ep *ep)
{
- struct cpl_close_con_req *req;
struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
- int wrlen = roundup(sizeof *req, 16);
+ u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
if (WARN_ON(!skb))
return -ENOMEM;
- set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
- req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
- memset(req, 0, wrlen);
- INIT_TP_WR(req, ep->hwtid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
- ep->hwtid));
+ cxgb_mk_close_con_req(skb, wrlen, ep->hwtid, ep->txq_idx,
+ NULL, arp_failure_discard);
+
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
static int send_abort(struct c4iw_ep *ep)
{
- struct cpl_abort_req *req;
- int wrlen = roundup(sizeof *req, 16);
+ u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16);
struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
if (WARN_ON(!req_skb))
return -ENOMEM;
- set_wr_txq(req_skb, CPL_PRIORITY_DATA, ep->txq_idx);
- t4_set_arp_err_handler(req_skb, ep, abort_arp_failure);
- req = (struct cpl_abort_req *)skb_put(req_skb, wrlen);
- memset(req, 0, wrlen);
- INIT_TP_WR(req, ep->hwtid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
- req->cmd = CPL_ABORT_SEND_RST;
- return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
-}
+ cxgb_mk_abort_req(req_skb, wrlen, ep->hwtid, ep->txq_idx,
+ ep, abort_arp_failure);
-static void best_mtu(const unsigned short *mtus, unsigned short mtu,
- unsigned int *idx, int use_ts, int ipv6)
-{
- unsigned short hdr_size = (ipv6 ?
- sizeof(struct ipv6hdr) :
- sizeof(struct iphdr)) +
- sizeof(struct tcphdr) +
- (use_ts ?
- round_up(TCPOLEN_TIMESTAMP, 4) : 0);
- unsigned short data_size = mtu - hdr_size;
-
- cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
+ return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
}
static int send_connect(struct c4iw_ep *ep)
@@ -768,7 +679,7 @@ static int send_connect(struct c4iw_ep *ep)
u64 opt0;
u32 opt2;
unsigned int mtu_idx;
- int wscale;
+ u32 wscale;
int win, sizev4, sizev6, wrlen;
struct sockaddr_in *la = (struct sockaddr_in *)
&ep->com.local_addr;
@@ -815,10 +726,10 @@ static int send_connect(struct c4iw_ep *ep)
}
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
- best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
- enable_tcp_timestamps,
- (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
- wscale = compute_wscale(rcv_win);
+ cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
+ enable_tcp_timestamps,
+ (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
+ wscale = cxgb_compute_wscale(rcv_win);
/*
* Specify the largest window that will fit in opt0. The
@@ -1445,9 +1356,9 @@ static void established_upcall(struct c4iw_ep *ep)
static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
{
- struct cpl_rx_data_ack *req;
struct sk_buff *skb;
- int wrlen = roundup(sizeof *req, 16);
+ u32 wrlen = roundup(sizeof(struct cpl_rx_data_ack), 16);
+ u32 credit_dack;
PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
skb = get_skb(NULL, wrlen, GFP_KERNEL);
@@ -1464,15 +1375,12 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
if (ep->rcv_win > RCV_BUFSIZ_M * 1024)
credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
- req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
- memset(req, 0, wrlen);
- INIT_TP_WR(req, ep->hwtid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
- ep->hwtid));
- req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F |
- RX_DACK_CHANGE_F |
- RX_DACK_MODE_V(dack_mode));
- set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
+ credit_dack = credits | RX_FORCE_ACK_F | RX_DACK_CHANGE_F |
+ RX_DACK_MODE_V(dack_mode);
+
+ cxgb_mk_rx_data_ack(skb, wrlen, ep->hwtid, ep->ctrlq_idx,
+ credit_dack);
+
c4iw_ofld_send(&ep->com.dev->rdev, skb);
return credits;
}
@@ -1970,7 +1878,7 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
struct sk_buff *skb;
struct fw_ofld_connection_wr *req;
unsigned int mtu_idx;
- int wscale;
+ u32 wscale;
struct sockaddr_in *sin;
int win;
@@ -1995,10 +1903,10 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F);
req->tcb.tx_max = (__force __be32) jiffies;
req->tcb.rcv_adv = htons(1);
- best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
- enable_tcp_timestamps,
- (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
- wscale = compute_wscale(rcv_win);
+ cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
+ enable_tcp_timestamps,
+ (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
+ wscale = cxgb_compute_wscale(rcv_win);
/*
* Specify the largest window that will fit in opt0. The
@@ -2052,15 +1960,6 @@ static inline int act_open_has_tid(int status)
status != CPL_ERR_CONN_EXIST);
}
-/* Returns whether a CPL status conveys negative advice.
- */
-static int is_neg_adv(unsigned int status)
-{
- return status == CPL_ERR_RTX_NEG_ADVICE ||
- status == CPL_ERR_PERSIST_NEG_ADVICE ||
- status == CPL_ERR_KEEPALV_NEG_ADVICE;
-}
-
static char *neg_adv_str(unsigned int status)
{
switch (status) {
@@ -2117,8 +2016,10 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
}
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
n, pdev, rt_tos2priority(tos));
- if (!ep->l2t)
+ if (!ep->l2t) {
+ dev_put(pdev);
goto out;
+ }
ep->mtu = pdev->mtu;
ep->tx_chan = cxgb4_port_chan(pdev);
ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
@@ -2214,16 +2115,21 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
/* find a route */
if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) {
- ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr,
- raddr->sin_addr.s_addr, laddr->sin_port,
- raddr->sin_port, ep->com.cm_id->tos);
+ ep->dst = cxgb_find_route(&ep->com.dev->rdev.lldi, get_real_dev,
+ laddr->sin_addr.s_addr,
+ raddr->sin_addr.s_addr,
+ laddr->sin_port,
+ raddr->sin_port, ep->com.cm_id->tos);
iptype = 4;
ra = (__u8 *)&raddr->sin_addr;
} else {
- ep->dst = find_route6(ep->com.dev, laddr6->sin6_addr.s6_addr,
- raddr6->sin6_addr.s6_addr,
- laddr6->sin6_port, raddr6->sin6_port, 0,
- raddr6->sin6_scope_id);
+ ep->dst = cxgb_find_route6(&ep->com.dev->rdev.lldi,
+ get_real_dev,
+ laddr6->sin6_addr.s6_addr,
+ raddr6->sin6_addr.s6_addr,
+ laddr6->sin6_port,
+ raddr6->sin6_port, 0,
+ raddr6->sin6_scope_id);
iptype = 6;
ra = (__u8 *)&raddr6->sin6_addr;
}
@@ -2295,7 +2201,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
status, status2errno(status));
- if (is_neg_adv(status)) {
+ if (cxgb_is_neg_adv(status)) {
PDBG("%s Connection problems for atid %u status %u (%s)\n",
__func__, atid, status, neg_adv_str(status));
ep->stats.connect_neg_adv++;
@@ -2422,7 +2328,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
unsigned int mtu_idx;
u64 opt0;
u32 opt2;
- int wscale;
+ u32 wscale;
struct cpl_t5_pass_accept_rpl *rpl5 = NULL;
int win;
enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
@@ -2443,10 +2349,10 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
ep->hwtid));
- best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
- enable_tcp_timestamps && req->tcpopt.tstamp,
- (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1);
- wscale = compute_wscale(rcv_win);
+ cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
+ enable_tcp_timestamps && req->tcpopt.tstamp,
+ (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
+ wscale = cxgb_compute_wscale(rcv_win);
/*
* Specify the largest window that will fit in opt0. The
@@ -2518,42 +2424,6 @@ static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
return;
}
-static void get_4tuple(struct cpl_pass_accept_req *req, enum chip_type type,
- int *iptype, __u8 *local_ip, __u8 *peer_ip,
- __be16 *local_port, __be16 *peer_port)
-{
- int eth_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ?
- ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
- T6_ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
- int ip_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ?
- IP_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
- T6_IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
- struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
- struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
- struct tcphdr *tcp = (struct tcphdr *)
- ((u8 *)(req + 1) + eth_len + ip_len);
-
- if (ip->version == 4) {
- PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
- ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
- ntohs(tcp->dest));
- *iptype = 4;
- memcpy(peer_ip, &ip->saddr, 4);
- memcpy(local_ip, &ip->daddr, 4);
- } else {
- PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__,
- ip6->saddr.s6_addr, ip6->daddr.s6_addr, ntohs(tcp->source),
- ntohs(tcp->dest));
- *iptype = 6;
- memcpy(peer_ip, ip6->saddr.s6_addr, 16);
- memcpy(local_ip, ip6->daddr.s6_addr, 16);
- }
- *peer_port = tcp->source;
- *local_port = tcp->dest;
-
- return;
-}
-
static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *child_ep = NULL, *parent_ep;
@@ -2582,8 +2452,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
- get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type, &iptype,
- local_ip, peer_ip, &local_port, &peer_port);
+ cxgb_get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type,
+ &iptype, local_ip, peer_ip, &local_port, &peer_port);
/* Find output route */
if (iptype == 4) {
@@ -2591,18 +2461,19 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
, __func__, parent_ep, hwtid,
local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss);
- dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
- local_port, peer_port,
- tos);
+ dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
+ *(__be32 *)local_ip, *(__be32 *)peer_ip,
+ local_port, peer_port, tos);
} else {
PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
, __func__, parent_ep, hwtid,
local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss);
- dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port,
- PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
- ((struct sockaddr_in6 *)
- &parent_ep->com.local_addr)->sin6_scope_id);
+ dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
+ local_ip, peer_ip, local_port, peer_port,
+ PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
+ ((struct sockaddr_in6 *)
+ &parent_ep->com.local_addr)->sin6_scope_id);
}
if (!dst) {
printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
@@ -2835,18 +2706,18 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_abort_req_rss *req = cplhdr(skb);
struct c4iw_ep *ep;
- struct cpl_abort_rpl *rpl;
struct sk_buff *rpl_skb;
struct c4iw_qp_attributes attrs;
int ret;
int release = 0;
unsigned int tid = GET_TID(req);
+ u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
ep = get_ep_from_tid(dev, tid);
if (!ep)
return 0;
- if (is_neg_adv(req->status)) {
+ if (cxgb_is_neg_adv(req->status)) {
PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
__func__, ep->hwtid, req->status,
neg_adv_str(req->status));
@@ -2939,11 +2810,9 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
release = 1;
goto out;
}
- set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
- INIT_TP_WR(rpl, ep->hwtid);
- OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
- rpl->cmd = CPL_ABORT_NO_RST;
+
+ cxgb_mk_abort_rpl(rpl_skb, len, ep->hwtid, ep->txq_idx);
+
c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
out:
if (release)
@@ -3375,9 +3244,11 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
__func__, &laddr->sin_addr, ntohs(laddr->sin_port),
ra, ntohs(raddr->sin_port));
- ep->dst = find_route(dev, laddr->sin_addr.s_addr,
- raddr->sin_addr.s_addr, laddr->sin_port,
- raddr->sin_port, cm_id->tos);
+ ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
+ laddr->sin_addr.s_addr,
+ raddr->sin_addr.s_addr,
+ laddr->sin_port,
+ raddr->sin_port, cm_id->tos);
} else {
iptype = 6;
ra = (__u8 *)&raddr6->sin6_addr;
@@ -3396,10 +3267,12 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
__func__, laddr6->sin6_addr.s6_addr,
ntohs(laddr6->sin6_port),
raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
- ep->dst = find_route6(dev, laddr6->sin6_addr.s6_addr,
- raddr6->sin6_addr.s6_addr,
- laddr6->sin6_port, raddr6->sin6_port, 0,
- raddr6->sin6_scope_id);
+ ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
+ laddr6->sin6_addr.s6_addr,
+ raddr6->sin6_addr.s6_addr,
+ laddr6->sin6_port,
+ raddr6->sin6_port, 0,
+ raddr6->sin6_scope_id);
}
if (!ep->dst) {
printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
@@ -4041,8 +3914,9 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
ntohs(tcph->source), iph->tos);
- dst = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
- iph->tos);
+ dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
+ iph->daddr, iph->saddr, tcph->dest,
+ tcph->source, iph->tos);
if (!dst) {
pr_err("%s - failed to find dst entry!\n",
__func__);
@@ -4317,7 +4191,7 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
kfree_skb(skb);
return 0;
}
- if (is_neg_adv(req->status)) {
+ if (cxgb_is_neg_adv(req->status)) {
PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
__func__, ep->hwtid, req->status,
neg_adv_str(req->status));
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 071d7332ec06..93e3d270a98a 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -872,9 +872,13 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
static void c4iw_dealloc(struct uld_ctx *ctx)
{
c4iw_rdev_close(&ctx->dev->rdev);
+ WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr));
idr_destroy(&ctx->dev->cqidr);
+ WARN_ON_ONCE(!idr_is_empty(&ctx->dev->qpidr));
idr_destroy(&ctx->dev->qpidr);
+ WARN_ON_ONCE(!idr_is_empty(&ctx->dev->mmidr));
idr_destroy(&ctx->dev->mmidr);
+ wait_event(ctx->dev->wait, idr_is_empty(&ctx->dev->hwtid_idr));
idr_destroy(&ctx->dev->hwtid_idr);
idr_destroy(&ctx->dev->stid_idr);
idr_destroy(&ctx->dev->atid_idr);
@@ -992,6 +996,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
mutex_init(&devp->rdev.stats.lock);
mutex_init(&devp->db_mutex);
INIT_LIST_HEAD(&devp->db_fc_list);
+ init_waitqueue_head(&devp->wait);
devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
if (c4iw_debugfs_root) {
@@ -1475,6 +1480,10 @@ static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
static struct cxgb4_uld_info c4iw_uld_info = {
.name = DRV_NAME,
+ .nrxq = MAX_ULD_QSETS,
+ .rxq_size = 511,
+ .ciq = true,
+ .lro = false,
.add = c4iw_uld_add,
.rx_handler = c4iw_uld_rx_handler,
.state_change = c4iw_uld_state_change,
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index aa47e0ae80bc..cdcf3eeb6f4a 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -263,6 +263,7 @@ struct c4iw_dev {
struct idr stid_idr;
struct list_head db_fc_list;
u32 avail_ird;
+ wait_queue_head_t wait;
};
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
@@ -881,15 +882,6 @@ static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
return cm_id->provider_data;
}
-static inline int compute_wscale(int win)
-{
- int wscale = 0;
-
- while (wscale < 14 && (65535<<wscale) < win)
- wscale++;
- return wscale;
-}
-
static inline int ocqp_supported(const struct cxgb4_lld_info *infop)
{
#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index edb1172b6f54..690435229be7 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -683,7 +683,7 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
return 0;
}
-void _free_qp(struct kref *kref)
+static void _free_qp(struct kref *kref)
{
struct c4iw_qp *qhp;
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index b32638d58ae8..cc38004cea42 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -9490,6 +9490,78 @@ static void init_lcb(struct hfi1_devdata *dd)
write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
}
+/*
+ * Perform a test read on the QSFP. Return 0 on success, -ERRNO
+ * on error.
+ */
+static int test_qsfp_read(struct hfi1_pportdata *ppd)
+{
+ int ret;
+ u8 status;
+
+ /* report success if not a QSFP */
+ if (ppd->port_type != PORT_TYPE_QSFP)
+ return 0;
+
+ /* read byte 2, the status byte */
+ ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
+ if (ret < 0)
+ return ret;
+ if (ret != 1)
+ return -EIO;
+
+ return 0; /* success */
+}
+
+/*
+ * Values for QSFP retry.
+ *
+ * Give up after 10s (20 x 500ms). The overall timeout was empirically
+ * arrived at from experience on a large cluster.
+ */
+#define MAX_QSFP_RETRIES 20
+#define QSFP_RETRY_WAIT 500 /* msec */
+
+/*
+ * Try a QSFP read. If it fails, schedule a retry for later.
+ * Called on first link activation after driver load.
+ */
+static void try_start_link(struct hfi1_pportdata *ppd)
+{
+ if (test_qsfp_read(ppd)) {
+ /* read failed */
+ if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
+ dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
+ return;
+ }
+ dd_dev_info(ppd->dd,
+ "QSFP not responding, waiting and retrying %d\n",
+ (int)ppd->qsfp_retry_count);
+ ppd->qsfp_retry_count++;
+ queue_delayed_work(ppd->hfi1_wq, &ppd->start_link_work,
+ msecs_to_jiffies(QSFP_RETRY_WAIT));
+ return;
+ }
+ ppd->qsfp_retry_count = 0;
+
+ /*
+ * Tune the SerDes to a ballpark setting for optimal signal and bit
+ * error rate. Needs to be done before starting the link.
+ */
+ tune_serdes(ppd);
+ start_link(ppd);
+}
+
+/*
+ * Workqueue function to start the link after a delay.
+ */
+void handle_start_link(struct work_struct *work)
+{
+ struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
+ start_link_work.work);
+ try_start_link(ppd);
+}
+
int bringup_serdes(struct hfi1_pportdata *ppd)
{
struct hfi1_devdata *dd = ppd->dd;
@@ -9525,14 +9597,8 @@ int bringup_serdes(struct hfi1_pportdata *ppd)
set_qsfp_int_n(ppd, 1);
}
- /*
- * Tune the SerDes to a ballpark setting for
- * optimal signal and bit error rate
- * Needs to be done before starting the link
- */
- tune_serdes(ppd);
-
- return start_link(ppd);
+ try_start_link(ppd);
+ return 0;
}
void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
@@ -9549,6 +9615,10 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
ppd->driver_link_ready = 0;
ppd->link_enabled = 0;
+ ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
+ flush_delayed_work(&ppd->start_link_work);
+ cancel_delayed_work_sync(&ppd->start_link_work);
+
ppd->offline_disabled_reason =
HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
@@ -12865,7 +12935,7 @@ fail:
*/
static int set_up_context_variables(struct hfi1_devdata *dd)
{
- int num_kernel_contexts;
+ unsigned long num_kernel_contexts;
int total_contexts;
int ret;
unsigned ngroups;
@@ -12894,9 +12964,9 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
*/
if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
dd_dev_err(dd,
- "Reducing # kernel rcv contexts to: %d, from %d\n",
+ "Reducing # kernel rcv contexts to: %d, from %lu\n",
(int)(dd->chip_send_contexts - num_vls - 1),
- (int)num_kernel_contexts);
+ num_kernel_contexts);
num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
}
/*
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index ed11107c50fe..e29573769efc 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -706,6 +706,7 @@ void handle_link_up(struct work_struct *work);
void handle_link_down(struct work_struct *work);
void handle_link_downgrade(struct work_struct *work);
void handle_link_bounce(struct work_struct *work);
+void handle_start_link(struct work_struct *work);
void handle_sma_message(struct work_struct *work);
void reset_qsfp(struct hfi1_pportdata *ppd);
void qsfp_event(struct work_struct *work);
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index a49cc88f08a2..5e9be16f6cd3 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -59,6 +59,40 @@
static struct dentry *hfi1_dbg_root;
+/* wrappers to enforce srcu in seq file */
+static ssize_t hfi1_seq_read(
+ struct file *file,
+ char __user *buf,
+ size_t size,
+ loff_t *ppos)
+{
+ struct dentry *d = file->f_path.dentry;
+ int srcu_idx;
+ ssize_t r;
+
+ r = debugfs_use_file_start(d, &srcu_idx);
+ if (likely(!r))
+ r = seq_read(file, buf, size, ppos);
+ debugfs_use_file_finish(srcu_idx);
+ return r;
+}
+
+static loff_t hfi1_seq_lseek(
+ struct file *file,
+ loff_t offset,
+ int whence)
+{
+ struct dentry *d = file->f_path.dentry;
+ int srcu_idx;
+ loff_t r;
+
+ r = debugfs_use_file_start(d, &srcu_idx);
+ if (likely(!r))
+ r = seq_lseek(file, offset, whence);
+ debugfs_use_file_finish(srcu_idx);
+ return r;
+}
+
#define private2dd(file) (file_inode(file)->i_private)
#define private2ppd(file) (file_inode(file)->i_private)
@@ -87,8 +121,8 @@ static int _##name##_open(struct inode *inode, struct file *s) \
static const struct file_operations _##name##_file_ops = { \
.owner = THIS_MODULE, \
.open = _##name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
+ .read = hfi1_seq_read, \
+ .llseek = hfi1_seq_lseek, \
.release = seq_release \
}
@@ -105,11 +139,9 @@ do { \
DEBUGFS_FILE_CREATE(#name, parent, data, &_##name##_file_ops, S_IRUGO)
static void *_opcode_stats_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
{
struct hfi1_opcode_stats_perctx *opstats;
- rcu_read_lock();
if (*pos >= ARRAY_SIZE(opstats->stats))
return NULL;
return pos;
@@ -126,9 +158,7 @@ static void *_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
}
static void _opcode_stats_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
{
- rcu_read_unlock();
}
static int _opcode_stats_seq_show(struct seq_file *s, void *v)
@@ -285,12 +315,10 @@ DEBUGFS_SEQ_FILE_OPEN(qp_stats)
DEBUGFS_FILE_OPS(qp_stats);
static void *_sdes_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
{
struct hfi1_ibdev *ibd;
struct hfi1_devdata *dd;
- rcu_read_lock();
ibd = (struct hfi1_ibdev *)s->private;
dd = dd_from_dev(ibd);
if (!dd->per_sdma || *pos >= dd->num_sdma)
@@ -310,9 +338,7 @@ static void *_sdes_seq_next(struct seq_file *s, void *v, loff_t *pos)
}
static void _sdes_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
{
- rcu_read_unlock();
}
static int _sdes_seq_show(struct seq_file *s, void *v)
@@ -339,11 +365,9 @@ static ssize_t dev_counters_read(struct file *file, char __user *buf,
struct hfi1_devdata *dd;
ssize_t rval;
- rcu_read_lock();
dd = private2dd(file);
avail = hfi1_read_cntrs(dd, NULL, &counters);
rval = simple_read_from_buffer(buf, count, ppos, counters, avail);
- rcu_read_unlock();
return rval;
}
@@ -356,11 +380,9 @@ static ssize_t dev_names_read(struct file *file, char __user *buf,
struct hfi1_devdata *dd;
ssize_t rval;
- rcu_read_lock();
dd = private2dd(file);
avail = hfi1_read_cntrs(dd, &names, NULL);
rval = simple_read_from_buffer(buf, count, ppos, names, avail);
- rcu_read_unlock();
return rval;
}
@@ -383,11 +405,9 @@ static ssize_t portnames_read(struct file *file, char __user *buf,
struct hfi1_devdata *dd;
ssize_t rval;
- rcu_read_lock();
dd = private2dd(file);
avail = hfi1_read_portcntrs(dd->pport, &names, NULL);
rval = simple_read_from_buffer(buf, count, ppos, names, avail);
- rcu_read_unlock();
return rval;
}
@@ -400,11 +420,9 @@ static ssize_t portcntrs_debugfs_read(struct file *file, char __user *buf,
struct hfi1_pportdata *ppd;
ssize_t rval;
- rcu_read_lock();
ppd = private2ppd(file);
avail = hfi1_read_portcntrs(ppd, NULL, &counters);
rval = simple_read_from_buffer(buf, count, ppos, counters, avail);
- rcu_read_unlock();
return rval;
}
@@ -434,16 +452,13 @@ static ssize_t asic_flags_read(struct file *file, char __user *buf,
int used;
int i;
- rcu_read_lock();
ppd = private2ppd(file);
dd = ppd->dd;
size = PAGE_SIZE;
used = 0;
tmp = kmalloc(size, GFP_KERNEL);
- if (!tmp) {
- rcu_read_unlock();
+ if (!tmp)
return -ENOMEM;
- }
scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
used += scnprintf(tmp + used, size - used,
@@ -470,7 +485,6 @@ static ssize_t asic_flags_read(struct file *file, char __user *buf,
used += scnprintf(tmp + used, size - used, "Write bits to clear\n");
ret = simple_read_from_buffer(buf, count, ppos, tmp, used);
- rcu_read_unlock();
kfree(tmp);
return ret;
}
@@ -486,15 +500,12 @@ static ssize_t asic_flags_write(struct file *file, const char __user *buf,
u64 scratch0;
u64 clear;
- rcu_read_lock();
ppd = private2ppd(file);
dd = ppd->dd;
buff = kmalloc(count + 1, GFP_KERNEL);
- if (!buff) {
- ret = -ENOMEM;
- goto do_return;
- }
+ if (!buff)
+ return -ENOMEM;
ret = copy_from_user(buff, buf, count);
if (ret > 0) {
@@ -527,8 +538,6 @@ static ssize_t asic_flags_write(struct file *file, const char __user *buf,
do_free:
kfree(buff);
- do_return:
- rcu_read_unlock();
return ret;
}
@@ -542,18 +551,14 @@ static ssize_t qsfp_debugfs_dump(struct file *file, char __user *buf,
char *tmp;
int ret;
- rcu_read_lock();
ppd = private2ppd(file);
tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!tmp) {
- rcu_read_unlock();
+ if (!tmp)
return -ENOMEM;
- }
ret = qsfp_dump(ppd, tmp, PAGE_SIZE);
if (ret > 0)
ret = simple_read_from_buffer(buf, count, ppos, tmp, ret);
- rcu_read_unlock();
kfree(tmp);
return ret;
}
@@ -569,7 +574,6 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
int offset;
int total_written;
- rcu_read_lock();
ppd = private2ppd(file);
/* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */
@@ -577,16 +581,12 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
offset = *ppos & 0xffff;
/* explicitly reject invalid address 0 to catch cp and cat */
- if (i2c_addr == 0) {
- ret = -EINVAL;
- goto _return;
- }
+ if (i2c_addr == 0)
+ return -EINVAL;
buff = kmalloc(count, GFP_KERNEL);
- if (!buff) {
- ret = -ENOMEM;
- goto _return;
- }
+ if (!buff)
+ return -ENOMEM;
ret = copy_from_user(buff, buf, count);
if (ret > 0) {
@@ -606,8 +606,6 @@ static ssize_t __i2c_debugfs_write(struct file *file, const char __user *buf,
_free:
kfree(buff);
- _return:
- rcu_read_unlock();
return ret;
}
@@ -636,7 +634,6 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
int offset;
int total_read;
- rcu_read_lock();
ppd = private2ppd(file);
/* byte offset format: [offsetSize][i2cAddr][offsetHigh][offsetLow] */
@@ -644,16 +641,12 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
offset = *ppos & 0xffff;
/* explicitly reject invalid address 0 to catch cp and cat */
- if (i2c_addr == 0) {
- ret = -EINVAL;
- goto _return;
- }
+ if (i2c_addr == 0)
+ return -EINVAL;
buff = kmalloc(count, GFP_KERNEL);
- if (!buff) {
- ret = -ENOMEM;
- goto _return;
- }
+ if (!buff)
+ return -ENOMEM;
total_read = i2c_read(ppd, target, i2c_addr, offset, buff, count);
if (total_read < 0) {
@@ -673,8 +666,6 @@ static ssize_t __i2c_debugfs_read(struct file *file, char __user *buf,
_free:
kfree(buff);
- _return:
- rcu_read_unlock();
return ret;
}
@@ -701,26 +692,20 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf,
int ret;
int total_written;
- rcu_read_lock();
- if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */
- ret = -EINVAL;
- goto _return;
- }
+ if (*ppos + count > QSFP_PAGESIZE * 4) /* base page + page00-page03 */
+ return -EINVAL;
ppd = private2ppd(file);
buff = kmalloc(count, GFP_KERNEL);
- if (!buff) {
- ret = -ENOMEM;
- goto _return;
- }
+ if (!buff)
+ return -ENOMEM;
ret = copy_from_user(buff, buf, count);
if (ret > 0) {
ret = -EFAULT;
goto _free;
}
-
total_written = qsfp_write(ppd, target, *ppos, buff, count);
if (total_written < 0) {
ret = total_written;
@@ -733,8 +718,6 @@ static ssize_t __qsfp_debugfs_write(struct file *file, const char __user *buf,
_free:
kfree(buff);
- _return:
- rcu_read_unlock();
return ret;
}
@@ -761,7 +744,6 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf,
int ret;
int total_read;
- rcu_read_lock();
if (*ppos + count > QSFP_PAGESIZE * 4) { /* base page + page00-page03 */
ret = -EINVAL;
goto _return;
@@ -794,7 +776,6 @@ static ssize_t __qsfp_debugfs_read(struct file *file, char __user *buf,
_free:
kfree(buff);
_return:
- rcu_read_unlock();
return ret;
}
@@ -1010,7 +991,6 @@ void hfi1_dbg_ibdev_exit(struct hfi1_ibdev *ibd)
debugfs_remove_recursive(ibd->hfi1_ibdev_dbg);
out:
ibd->hfi1_ibdev_dbg = NULL;
- synchronize_rcu();
}
/*
@@ -1035,9 +1015,7 @@ static const char * const hfi1_statnames[] = {
};
static void *_driver_stats_names_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
{
- rcu_read_lock();
if (*pos >= ARRAY_SIZE(hfi1_statnames))
return NULL;
return pos;
@@ -1055,9 +1033,7 @@ static void *_driver_stats_names_seq_next(
}
static void _driver_stats_names_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
{
- rcu_read_unlock();
}
static int _driver_stats_names_seq_show(struct seq_file *s, void *v)
@@ -1073,9 +1049,7 @@ DEBUGFS_SEQ_FILE_OPEN(driver_stats_names)
DEBUGFS_FILE_OPS(driver_stats_names);
static void *_driver_stats_seq_start(struct seq_file *s, loff_t *pos)
-__acquires(RCU)
{
- rcu_read_lock();
if (*pos >= ARRAY_SIZE(hfi1_statnames))
return NULL;
return pos;
@@ -1090,9 +1064,7 @@ static void *_driver_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
}
static void _driver_stats_seq_stop(struct seq_file *s, void *v)
-__releases(RCU)
{
- rcu_read_unlock();
}
static u64 hfi1_sps_ints(void)
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index a021e660d482..325ec211370f 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -605,6 +605,7 @@ struct hfi1_pportdata {
struct work_struct freeze_work;
struct work_struct link_downgrade_work;
struct work_struct link_bounce_work;
+ struct delayed_work start_link_work;
/* host link state variables */
struct mutex hls_lock;
u32 host_link_state;
@@ -659,6 +660,7 @@ struct hfi1_pportdata {
u8 linkinit_reason;
u8 local_tx_rate; /* rate given to 8051 firmware */
u8 last_pstate; /* info only */
+ u8 qsfp_retry_count;
/* placeholders for IB MAD packet settings */
u8 overrun_threshold;
@@ -1804,7 +1806,7 @@ extern unsigned int hfi1_max_mtu;
extern unsigned int hfi1_cu;
extern unsigned int user_credit_return_threshold;
extern int num_user_contexts;
-extern unsigned n_krcvqs;
+extern unsigned long n_krcvqs;
extern uint krcvqs[];
extern int krcvqsset;
extern uint kdeth_qp;
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index b7935451093c..384b43d2fd49 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -94,7 +94,7 @@ module_param_array(krcvqs, uint, &krcvqsset, S_IRUGO);
MODULE_PARM_DESC(krcvqs, "Array of the number of non-control kernel receive queues by VL");
/* computed based on above array */
-unsigned n_krcvqs;
+unsigned long n_krcvqs;
static unsigned hfi1_rcvarr_split = 25;
module_param_named(rcvarr_split, hfi1_rcvarr_split, uint, S_IRUGO);
@@ -500,6 +500,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
INIT_WORK(&ppd->sma_message_work, handle_sma_message);
INIT_WORK(&ppd->link_bounce_work, handle_link_bounce);
+ INIT_DELAYED_WORK(&ppd->start_link_work, handle_start_link);
INIT_WORK(&ppd->linkstate_active_work, receive_interrupt_work);
INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event);
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index 39e42c373a01..7ffc14f21523 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -2604,7 +2604,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
u8 lq, num_vls;
u8 res_lli, res_ler;
u64 port_mask;
- unsigned long port_num;
+ u8 port_num;
unsigned long vl;
u32 vl_select_mask;
int vfi;
@@ -2638,9 +2638,9 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
*/
port_mask = be64_to_cpu(req->port_select_mask[3]);
port_num = find_first_bit((unsigned long *)&port_mask,
- sizeof(port_mask));
+ sizeof(port_mask) * 8);
- if ((u8)port_num != port) {
+ if (port_num != port) {
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
return reply((struct ib_mad_hdr *)pmp);
}
@@ -2842,7 +2842,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
*/
port_mask = be64_to_cpu(req->port_select_mask[3]);
port_num = find_first_bit((unsigned long *)&port_mask,
- sizeof(port_mask));
+ sizeof(port_mask) * 8);
if (port_num != port) {
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
@@ -3015,7 +3015,7 @@ static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
*/
port_mask = be64_to_cpu(req->port_select_mask[3]);
port_num = find_first_bit((unsigned long *)&port_mask,
- sizeof(port_mask));
+ sizeof(port_mask) * 8);
if (port_num != port) {
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
@@ -3252,7 +3252,7 @@ static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
*/
port_mask = be64_to_cpu(req->port_select_mask[3]);
port_num = find_first_bit((unsigned long *)&port_mask,
- sizeof(port_mask));
+ sizeof(port_mask) * 8);
if (port_num != port) {
pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c
index 8c25e1b58849..3a1ef3056282 100644
--- a/drivers/infiniband/hw/hfi1/pio_copy.c
+++ b/drivers/infiniband/hw/hfi1/pio_copy.c
@@ -771,6 +771,9 @@ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
read_extra_bytes(pbuf, from, to_fill);
from += to_fill;
nbytes -= to_fill;
+ /* may not be enough valid bytes left to align */
+ if (extra > nbytes)
+ extra = nbytes;
/* ...now write carry */
dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
@@ -798,6 +801,15 @@ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
read_low_bytes(pbuf, from, extra);
from += extra;
nbytes -= extra;
+ /*
+ * If no bytes are left, return early - we are done.
+ * NOTE: This short-circuit is *required* because
+ * "extra" may have been reduced in size and "from"
+ * is not aligned, as required when leaving this
+ * if block.
+ */
+ if (nbytes == 0)
+ return;
}
/* at this point, from is QW aligned */
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 0ecf27903dc2..1694037d1eee 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -114,6 +114,8 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
#define KDETH_HCRC_LOWER_SHIFT 24
#define KDETH_HCRC_LOWER_MASK 0xff
+#define AHG_KDETH_INTR_SHIFT 12
+
#define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
#define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
@@ -1480,7 +1482,8 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
/* Clear KDETH.SH on last packet */
if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) {
val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset,
- INTR) >> 16);
+ INTR) <<
+ AHG_KDETH_INTR_SHIFT);
val &= cpu_to_le16(~(1U << 13));
AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
} else {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index 3ee0cad96bc6..0c92a40b3e86 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -265,6 +265,7 @@ void i40iw_next_iw_state(struct i40iw_qp *iwqp,
info.dont_send_fin = false;
if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR))
info.reset_tcp_conn = true;
+ iwqp->hw_iwarp_state = state;
i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 0cbbe4038298..445e230d5ff8 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -100,7 +100,7 @@ static struct notifier_block i40iw_net_notifier = {
.notifier_call = i40iw_net_event
};
-static int i40iw_notifiers_registered;
+static atomic_t i40iw_notifiers_registered;
/**
* i40iw_find_i40e_handler - find a handler given a client info
@@ -1342,12 +1342,11 @@ exit:
*/
static void i40iw_register_notifiers(void)
{
- if (!i40iw_notifiers_registered) {
+ if (atomic_inc_return(&i40iw_notifiers_registered) == 1) {
register_inetaddr_notifier(&i40iw_inetaddr_notifier);
register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
register_netevent_notifier(&i40iw_net_notifier);
}
- i40iw_notifiers_registered++;
}
/**
@@ -1429,8 +1428,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del
i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
/* fallthrough */
case INET_NOTIFIER:
- if (i40iw_notifiers_registered > 0) {
- i40iw_notifiers_registered--;
+ if (!atomic_dec_return(&i40iw_notifiers_registered)) {
unregister_netevent_notifier(&i40iw_net_notifier);
unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 006db6436e3b..5df63dacaaa3 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -687,12 +687,6 @@ repoll:
is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR;
- if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
- is_send)) {
- pr_warn("Completion for NOP opcode detected!\n");
- return -EAGAIN;
- }
-
/* Resize CQ in progress */
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
if (cq->resize_buf) {
@@ -718,12 +712,6 @@ repoll:
*/
mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
be32_to_cpu(cqe->vlan_my_qpn));
- if (unlikely(!mqp)) {
- pr_warn("CQ %06x with entry for unknown QPN %06x\n",
- cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
- return -EAGAIN;
- }
-
*cur_qp = to_mibqp(mqp);
}
@@ -736,11 +724,6 @@ repoll:
/* SRQ is also in the radix tree */
msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
srq_num);
- if (unlikely(!msrq)) {
- pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
- cq->mcq.cqn, srq_num);
- return -EAGAIN;
- }
}
if (is_send) {
@@ -891,7 +874,6 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
struct mlx4_ib_qp *cur_qp = NULL;
unsigned long flags;
int npolled;
- int err = 0;
struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
spin_lock_irqsave(&cq->lock, flags);
@@ -901,8 +883,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
}
for (npolled = 0; npolled < num_entries; ++npolled) {
- err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled);
- if (err)
+ if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled))
break;
}
@@ -911,10 +892,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
out:
spin_unlock_irqrestore(&cq->lock, flags);
- if (err == 0 || err == -EAGAIN)
- return npolled;
- else
- return err;
+ return npolled;
}
int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 9c2e53d28f98..0f21c3a25552 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1128,6 +1128,27 @@ void handle_port_mgmt_change_event(struct work_struct *work)
/* Generate GUID changed event */
if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
+ if (mlx4_is_master(dev->dev)) {
+ union ib_gid gid;
+ int err = 0;
+
+ if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix)
+ err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1);
+ else
+ gid.global.subnet_prefix =
+ eqe->event.port_mgmt_change.params.port_info.gid_prefix;
+ if (err) {
+ pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n",
+ port, err);
+ } else {
+ pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n",
+ port,
+ (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix),
+ be64_to_cpu(gid.global.subnet_prefix));
+ atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix,
+ be64_to_cpu(gid.global.subnet_prefix));
+ }
+ }
mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
/*if master, notify all slaves*/
if (mlx4_is_master(dev->dev))
@@ -2202,6 +2223,8 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
if (err)
goto demux_err;
dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
+ atomic64_set(&dev->sriov.demux[i].subnet_prefix,
+ be64_to_cpu(gid.global.subnet_prefix));
err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
&dev->sriov.sqps[i]);
if (err)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 2af44c2de262..87ba9bca4181 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2202,6 +2202,9 @@ static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
bool per_port = !!(ibdev->dev->caps.flags2 &
MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
+ if (mlx4_is_slave(ibdev->dev))
+ return 0;
+
for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
/* i == 1 means we are building port counters */
if (i && !per_port)
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 8f7ad07915b0..097bfcc4ee99 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -489,7 +489,7 @@ static u8 get_leave_state(struct mcast_group *group)
if (!group->members[i])
leave_state |= (1 << i);
- return leave_state & (group->rec.scope_join_state & 7);
+ return leave_state & (group->rec.scope_join_state & 0xf);
}
static int join_group(struct mcast_group *group, int slave, u8 join_mask)
@@ -564,8 +564,8 @@ static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
} else
mcg_warn_group(group, "DRIVER BUG\n");
} else if (group->state == MCAST_LEAVE_SENT) {
- if (group->rec.scope_join_state & 7)
- group->rec.scope_join_state &= 0xf8;
+ if (group->rec.scope_join_state & 0xf)
+ group->rec.scope_join_state &= 0xf0;
group->state = MCAST_IDLE;
mutex_unlock(&group->lock);
if (release_group(group, 1))
@@ -605,7 +605,7 @@ static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
static int handle_join_req(struct mcast_group *group, u8 join_mask,
struct mcast_req *req)
{
- u8 group_join_state = group->rec.scope_join_state & 7;
+ u8 group_join_state = group->rec.scope_join_state & 0xf;
int ref = 0;
u16 status;
struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
@@ -690,8 +690,8 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work)
u8 cur_join_state;
resp_join_state = ((struct ib_sa_mcmember_data *)
- group->response_sa_mad.data)->scope_join_state & 7;
- cur_join_state = group->rec.scope_join_state & 7;
+ group->response_sa_mad.data)->scope_join_state & 0xf;
+ cur_join_state = group->rec.scope_join_state & 0xf;
if (method == IB_MGMT_METHOD_GET_RESP) {
/* successfull join */
@@ -710,7 +710,7 @@ process_requests:
req = list_first_entry(&group->pending_list, struct mcast_req,
group_list);
sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
- req_join_state = sa_data->scope_join_state & 0x7;
+ req_join_state = sa_data->scope_join_state & 0xf;
/* For a leave request, we will immediately answer the VF, and
* update our internal counters. The actual leave will be sent
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 7c5832ede4bd..686ab48ff644 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -448,7 +448,7 @@ struct mlx4_ib_demux_ctx {
struct workqueue_struct *wq;
struct workqueue_struct *ud_wq;
spinlock_t ud_lock;
- __be64 subnet_prefix;
+ atomic64_t subnet_prefix;
__be64 guid_cache[128];
struct mlx4_ib_dev *dev;
/* the following lock protects both mcg_table and mcg_mgid0_list */
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 768085f59566..7fb9629bd12b 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -2493,24 +2493,27 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr,
sqp->ud_header.grh.flow_label =
ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff);
sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit;
- if (is_eth)
+ if (is_eth) {
memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16);
- else {
- if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
- /* When multi-function is enabled, the ib_core gid
- * indexes don't necessarily match the hw ones, so
- * we must use our own cache */
- sqp->ud_header.grh.source_gid.global.subnet_prefix =
- to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
- subnet_prefix;
- sqp->ud_header.grh.source_gid.global.interface_id =
- to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
- guid_cache[ah->av.ib.gid_index];
- } else
- ib_get_cached_gid(ib_dev,
- be32_to_cpu(ah->av.ib.port_pd) >> 24,
- ah->av.ib.gid_index,
- &sqp->ud_header.grh.source_gid, NULL);
+ } else {
+ if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
+ /* When multi-function is enabled, the ib_core gid
+ * indexes don't necessarily match the hw ones, so
+ * we must use our own cache
+ */
+ sqp->ud_header.grh.source_gid.global.subnet_prefix =
+ cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov.
+ demux[sqp->qp.port - 1].
+ subnet_prefix)));
+ sqp->ud_header.grh.source_gid.global.interface_id =
+ to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1].
+ guid_cache[ah->av.ib.gid_index];
+ } else {
+ ib_get_cached_gid(ib_dev,
+ be32_to_cpu(ah->av.ib.port_pd) >> 24,
+ ah->av.ib.gid_index,
+ &sqp->ud_header.grh.source_gid, NULL);
+ }
}
memcpy(sqp->ud_header.grh.destination_gid.raw,
ah->av.ib.dgid, 16);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 35a9f718e669..5de9a65f53bc 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -553,12 +553,6 @@ repoll:
* from the table.
*/
mqp = __mlx5_qp_lookup(dev->mdev, qpn);
- if (unlikely(!mqp)) {
- mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
- cq->mcq.cqn, qpn);
- return -EINVAL;
- }
-
*cur_qp = to_mibqp(mqp);
}
@@ -619,13 +613,6 @@ repoll:
read_lock(&dev->mdev->priv.mkey_table.lock);
mmkey = __mlx5_mr_lookup(dev->mdev,
mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
- if (unlikely(!mmkey)) {
- read_unlock(&dev->mdev->priv.mkey_table.lock);
- mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
- cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
- return -EINVAL;
- }
-
mr = to_mibmr(mmkey);
get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
mr->sig->sig_err_exists = true;
@@ -676,7 +663,6 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
unsigned long flags;
int soft_polled = 0;
int npolled;
- int err = 0;
spin_lock_irqsave(&cq->lock, flags);
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
@@ -688,8 +674,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
soft_polled = poll_soft_wc(cq, num_entries, wc);
for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
- err = mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled);
- if (err)
+ if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
break;
}
@@ -698,10 +683,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
out:
spin_unlock_irqrestore(&cq->lock, flags);
- if (err == 0 || err == -EAGAIN)
- return soft_polled + npolled;
- else
- return err;
+ return soft_polled + npolled;
}
int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 8db6fdff092f..551aa0e789aa 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -284,7 +284,9 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
{
- return !MLX5_CAP_GEN(dev->mdev, ib_virt);
+ if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
+ return !MLX5_CAP_GEN(dev->mdev, ib_virt);
+ return 0;
}
enum {
@@ -1423,6 +1425,13 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
dmac_47_16),
ib_spec->eth.val.dst_mac);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ smac_47_16),
+ ib_spec->eth.mask.src_mac);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ smac_47_16),
+ ib_spec->eth.val.src_mac);
+
if (ib_spec->eth.mask.vlan_tag) {
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
vlan_tag, 1);
@@ -1844,6 +1853,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
int domain)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
struct mlx5_ib_flow_handler *handler = NULL;
struct mlx5_flow_destination *dst = NULL;
struct mlx5_ib_flow_prio *ft_prio;
@@ -1870,7 +1880,10 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
}
dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- dst->tir_num = to_mqp(qp)->raw_packet_qp.rq.tirn;
+ if (mqp->flags & MLX5_IB_QP_RSS)
+ dst->tir_num = mqp->rss_qp.tirn;
+ else
+ dst->tir_num = mqp->raw_packet_qp.rq.tirn;
if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) {
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 40df2cca0609..996b54e366b0 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -71,7 +71,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
addr = addr >> page_shift;
tmp = (unsigned long)addr;
- m = find_first_bit(&tmp, sizeof(tmp));
+ m = find_first_bit(&tmp, BITS_PER_LONG);
skip = 1 << m;
mask = skip - 1;
i = 0;
@@ -81,7 +81,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
for (k = 0; k < len; k++) {
if (!(i & mask)) {
tmp = (unsigned long)pfn;
- m = min_t(unsigned long, m, find_first_bit(&tmp, sizeof(tmp)));
+ m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG));
skip = 1 << m;
mask = skip - 1;
base = pfn;
@@ -89,7 +89,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
} else {
if (base + p != pfn) {
tmp = (unsigned long)p;
- m = find_first_bit(&tmp, sizeof(tmp));
+ m = find_first_bit(&tmp, BITS_PER_LONG);
skip = 1 << m;
mask = skip - 1;
base = pfn;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index a59034aaa297..67cc7416fdff 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -402,6 +402,7 @@ enum mlx5_ib_qp_flags {
/* QP uses 1 as its source QP number */
MLX5_IB_QP_SQPN_QP1 = 1 << 6,
MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
+ MLX5_IB_QP_RSS = 1 << 8,
};
struct mlx5_umr_wr {
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index f3c943f6458e..9529b464fbdc 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1457,6 +1457,7 @@ create_tir:
kvfree(in);
/* qpn is reserved for that QP */
qp->trans_qp.base.mqp.qpn = 0;
+ qp->flags |= MLX5_IB_QP_RSS;
return 0;
err:
@@ -3656,12 +3657,8 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
struct ib_send_wr *wr, unsigned *idx,
int *size, int nreq)
{
- int err = 0;
-
- if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
- err = -ENOMEM;
- return err;
- }
+ if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
+ return -ENOMEM;
*idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
*seg = mlx5_get_send_wqe(qp, *idx);
@@ -3677,7 +3674,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
*seg += sizeof(**ctrl);
*size = sizeof(**ctrl) / 16;
- return err;
+ return 0;
}
static void finish_wqe(struct mlx5_ib_qp *qp,
@@ -3756,7 +3753,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
num_sge = wr->num_sge;
if (unlikely(num_sge > qp->sq.max_gs)) {
mlx5_ib_warn(dev, "\n");
- err = -ENOMEM;
+ err = -EINVAL;
*bad_wr = wr;
goto out;
}
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 80c4b6b401b8..46b64970058e 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -294,7 +294,7 @@ static void __rvt_free_mr(struct rvt_mr *mr)
{
rvt_deinit_mregion(&mr->mr);
rvt_free_lkey(&mr->mr);
- vfree(mr);
+ kfree(mr);
}
/**
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 55f0e8f0ca79..ddd59270ff6d 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -362,15 +362,34 @@ static int __init rxe_module_init(void)
return err;
}
- err = rxe_net_init();
+ err = rxe_net_ipv4_init();
if (err) {
- pr_err("rxe: unable to init\n");
+ pr_err("rxe: unable to init ipv4 tunnel\n");
rxe_cache_exit();
- return err;
+ goto exit;
+ }
+
+ err = rxe_net_ipv6_init();
+ if (err) {
+ pr_err("rxe: unable to init ipv6 tunnel\n");
+ rxe_cache_exit();
+ goto exit;
}
+
+ err = register_netdevice_notifier(&rxe_net_notifier);
+ if (err) {
+ pr_err("rxe: Failed to rigister netdev notifier\n");
+ goto exit;
+ }
+
pr_info("rxe: loaded\n");
return 0;
+
+exit:
+ rxe_release_udp_tunnel(recv_sockets.sk4);
+ rxe_release_udp_tunnel(recv_sockets.sk6);
+ return err;
}
static void __exit rxe_module_exit(void)
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 36f67de44095..1c59ef2c67aa 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -689,7 +689,14 @@ int rxe_completer(void *arg)
qp->req.need_retry = 1;
rxe_run_task(&qp->req.task, 1);
}
+
+ if (pkt) {
+ rxe_drop_ref(pkt->qp);
+ kfree_skb(skb);
+ }
+
goto exit;
+
} else {
wqe->status = IB_WC_RETRY_EXC_ERR;
state = COMPST_ERROR;
@@ -716,6 +723,12 @@ int rxe_completer(void *arg)
case COMPST_ERROR:
do_complete(qp, wqe);
rxe_qp_error(qp);
+
+ if (pkt) {
+ rxe_drop_ref(pkt->qp);
+ kfree_skb(skb);
+ }
+
goto exit;
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 0b8d2ea8b41d..eedf2f1cafdf 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -275,9 +275,10 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
return sock;
}
-static void rxe_release_udp_tunnel(struct socket *sk)
+void rxe_release_udp_tunnel(struct socket *sk)
{
- udp_tunnel_sock_release(sk);
+ if (sk)
+ udp_tunnel_sock_release(sk);
}
static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
@@ -658,51 +659,45 @@ out:
return NOTIFY_OK;
}
-static struct notifier_block rxe_net_notifier = {
+struct notifier_block rxe_net_notifier = {
.notifier_call = rxe_notify,
};
-int rxe_net_init(void)
+int rxe_net_ipv4_init(void)
{
- int err;
-
spin_lock_init(&dev_list_lock);
- recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
- htons(ROCE_V2_UDP_DPORT), true);
- if (IS_ERR(recv_sockets.sk6)) {
- recv_sockets.sk6 = NULL;
- pr_err("rxe: Failed to create IPv6 UDP tunnel\n");
- return -1;
- }
-
recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
- htons(ROCE_V2_UDP_DPORT), false);
+ htons(ROCE_V2_UDP_DPORT), false);
if (IS_ERR(recv_sockets.sk4)) {
- rxe_release_udp_tunnel(recv_sockets.sk6);
recv_sockets.sk4 = NULL;
- recv_sockets.sk6 = NULL;
pr_err("rxe: Failed to create IPv4 UDP tunnel\n");
return -1;
}
- err = register_netdevice_notifier(&rxe_net_notifier);
- if (err) {
- rxe_release_udp_tunnel(recv_sockets.sk6);
- rxe_release_udp_tunnel(recv_sockets.sk4);
- pr_err("rxe: Failed to rigister netdev notifier\n");
- }
-
- return err;
+ return 0;
}
-void rxe_net_exit(void)
+int rxe_net_ipv6_init(void)
{
- if (recv_sockets.sk6)
- rxe_release_udp_tunnel(recv_sockets.sk6);
+#if IS_ENABLED(CONFIG_IPV6)
- if (recv_sockets.sk4)
- rxe_release_udp_tunnel(recv_sockets.sk4);
+ spin_lock_init(&dev_list_lock);
+ recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
+ htons(ROCE_V2_UDP_DPORT), true);
+ if (IS_ERR(recv_sockets.sk6)) {
+ recv_sockets.sk6 = NULL;
+ pr_err("rxe: Failed to create IPv6 UDP tunnel\n");
+ return -1;
+ }
+#endif
+ return 0;
+}
+
+void rxe_net_exit(void)
+{
+ rxe_release_udp_tunnel(recv_sockets.sk6);
+ rxe_release_udp_tunnel(recv_sockets.sk4);
unregister_netdevice_notifier(&rxe_net_notifier);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.h b/drivers/infiniband/sw/rxe/rxe_net.h
index 7b06f76d16cc..0daf7f09e5b5 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.h
+++ b/drivers/infiniband/sw/rxe/rxe_net.h
@@ -44,10 +44,13 @@ struct rxe_recv_sockets {
};
extern struct rxe_recv_sockets recv_sockets;
+extern struct notifier_block rxe_net_notifier;
+void rxe_release_udp_tunnel(struct socket *sk);
struct rxe_dev *rxe_net_add(struct net_device *ndev);
-int rxe_net_init(void);
+int rxe_net_ipv4_init(void);
+int rxe_net_ipv6_init(void);
void rxe_net_exit(void);
#endif /* RXE_NET_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index 3d464c23e08b..144d2f129fcd 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -312,7 +312,7 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
* make a copy of the skb to post to the next qp
*/
skb_copy = (mce->qp_list.next != &mcg->qp_list) ?
- skb_clone(skb, GFP_KERNEL) : NULL;
+ skb_clone(skb, GFP_ATOMIC) : NULL;
pkt->qp = qp;
rxe_add_ref(qp);
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 33b2d9d77021..13a848a518e8 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -511,24 +511,21 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
}
static void update_wqe_state(struct rxe_qp *qp,
- struct rxe_send_wqe *wqe,
- struct rxe_pkt_info *pkt,
- enum wqe_state *prev_state)
+ struct rxe_send_wqe *wqe,
+ struct rxe_pkt_info *pkt)
{
- enum wqe_state prev_state_ = wqe->state;
-
if (pkt->mask & RXE_END_MASK) {
if (qp_type(qp) == IB_QPT_RC)
wqe->state = wqe_state_pending;
} else {
wqe->state = wqe_state_processing;
}
-
- *prev_state = prev_state_;
}
-static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
- struct rxe_pkt_info *pkt, int payload)
+static void update_wqe_psn(struct rxe_qp *qp,
+ struct rxe_send_wqe *wqe,
+ struct rxe_pkt_info *pkt,
+ int payload)
{
/* number of packets left to send including current one */
int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
@@ -546,9 +543,34 @@ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
else
qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
+}
- qp->req.opcode = pkt->opcode;
+static void save_state(struct rxe_send_wqe *wqe,
+ struct rxe_qp *qp,
+ struct rxe_send_wqe *rollback_wqe,
+ struct rxe_qp *rollback_qp)
+{
+ rollback_wqe->state = wqe->state;
+ rollback_wqe->first_psn = wqe->first_psn;
+ rollback_wqe->last_psn = wqe->last_psn;
+ rollback_qp->req.psn = qp->req.psn;
+}
+static void rollback_state(struct rxe_send_wqe *wqe,
+ struct rxe_qp *qp,
+ struct rxe_send_wqe *rollback_wqe,
+ struct rxe_qp *rollback_qp)
+{
+ wqe->state = rollback_wqe->state;
+ wqe->first_psn = rollback_wqe->first_psn;
+ wqe->last_psn = rollback_wqe->last_psn;
+ qp->req.psn = rollback_qp->req.psn;
+}
+
+static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+ struct rxe_pkt_info *pkt, int payload)
+{
+ qp->req.opcode = pkt->opcode;
if (pkt->mask & RXE_END_MASK)
qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
@@ -571,7 +593,8 @@ int rxe_requester(void *arg)
int mtu;
int opcode;
int ret;
- enum wqe_state prev_state;
+ struct rxe_qp rollback_qp;
+ struct rxe_send_wqe rollback_wqe;
next_wqe:
if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
@@ -688,13 +711,21 @@ next_wqe:
goto err;
}
- update_wqe_state(qp, wqe, &pkt, &prev_state);
+ /*
+ * To prevent a race on wqe access between requester and completer,
+ * wqe members state and psn need to be set before calling
+ * rxe_xmit_packet().
+ * Otherwise, completer might initiate an unjustified retry flow.
+ */
+ save_state(wqe, qp, &rollback_wqe, &rollback_qp);
+ update_wqe_state(qp, wqe, &pkt);
+ update_wqe_psn(qp, wqe, &pkt, payload);
ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb);
if (ret) {
qp->need_req_skb = 1;
kfree_skb(skb);
- wqe->state = prev_state;
+ rollback_state(wqe, qp, &rollback_wqe, &rollback_qp);
if (ret == -EAGAIN) {
rxe_run_task(&qp->req.task, 1);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index ebb03b46e2ad..3e0f0f2baace 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -972,11 +972,13 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
free_rd_atomic_resource(qp, res);
rxe_advance_resp_resource(qp);
+ memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(skb->cb));
+
res->type = RXE_ATOMIC_MASK;
res->atomic.skb = skb;
- res->first_psn = qp->resp.psn;
- res->last_psn = qp->resp.psn;
- res->cur_psn = qp->resp.psn;
+ res->first_psn = ack_pkt.psn;
+ res->last_psn = ack_pkt.psn;
+ res->cur_psn = ack_pkt.psn;
rc = rxe_xmit_packet(rxe, qp, &ack_pkt, skb_copy);
if (rc) {
@@ -1116,8 +1118,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
rc = RESPST_CLEANUP;
goto out;
}
- bth_set_psn(SKB_TO_PKT(skb_copy),
- qp->resp.psn - 1);
+
/* Resend the result. */
rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp,
pkt, skb_copy);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 4f7d9b48df64..9dbfcc0ab577 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -478,6 +478,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
struct ipoib_ah *address, u32 qpn);
void ipoib_reap_ah(struct work_struct *work);
+struct ipoib_path *__path_find(struct net_device *dev, void *gid);
void ipoib_mark_paths_invalid(struct net_device *dev);
void ipoib_flush_paths(struct net_device *dev);
int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 951d9abcca8b..4ad297d3de89 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1318,6 +1318,8 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
}
}
+#define QPN_AND_OPTIONS_OFFSET 4
+
static void ipoib_cm_tx_start(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
@@ -1326,6 +1328,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
struct ipoib_neigh *neigh;
struct ipoib_cm_tx *p;
unsigned long flags;
+ struct ipoib_path *path;
int ret;
struct ib_sa_path_rec pathrec;
@@ -1338,7 +1341,19 @@ static void ipoib_cm_tx_start(struct work_struct *work)
p = list_entry(priv->cm.start_list.next, typeof(*p), list);
list_del_init(&p->list);
neigh = p->neigh;
+
qpn = IPOIB_QPN(neigh->daddr);
+ /*
+ * As long as the search is with these 2 locks,
+ * path existence indicates its validity.
+ */
+ path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET);
+ if (!path) {
+ pr_info("%s ignore not valid path %pI6\n",
+ __func__,
+ neigh->daddr + QPN_AND_OPTIONS_OFFSET);
+ goto free_neigh;
+ }
memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1350,6 +1365,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
spin_lock_irqsave(&priv->lock, flags);
if (ret) {
+free_neigh:
neigh = p->neigh;
if (neigh) {
neigh->cm = NULL;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index dc6d241b9406..be11d5d5b8c1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1161,8 +1161,17 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
}
if (level == IPOIB_FLUSH_LIGHT) {
+ int oper_up;
ipoib_mark_paths_invalid(dev);
+ /* Set IPoIB operation as down to prevent races between:
+ * the flush flow which leaves MCG and on the fly joins
+ * which can happen during that time. mcast restart task
+ * should deal with join requests we missed.
+ */
+ oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
ipoib_mcast_dev_flush(dev);
+ if (oper_up)
+ set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
ipoib_flush_ah(dev);
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 74bcaa064226..cc1c1b062ea5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -485,7 +485,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
return -EINVAL;
}
-static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
+struct ipoib_path *__path_find(struct net_device *dev, void *gid)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct rb_node *n = priv->path_tree.rb_node;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 7914c14478cd..cae9bbcc27e7 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -403,6 +403,7 @@ isert_init_conn(struct isert_conn *isert_conn)
INIT_LIST_HEAD(&isert_conn->node);
init_completion(&isert_conn->login_comp);
init_completion(&isert_conn->login_req_comp);
+ init_waitqueue_head(&isert_conn->rem_wait);
kref_init(&isert_conn->kref);
mutex_init(&isert_conn->mutex);
INIT_WORK(&isert_conn->release_work, isert_release_work);
@@ -578,7 +579,8 @@ isert_connect_release(struct isert_conn *isert_conn)
BUG_ON(!device);
isert_free_rx_descriptors(isert_conn);
- if (isert_conn->cm_id)
+ if (isert_conn->cm_id &&
+ !isert_conn->dev_removed)
rdma_destroy_id(isert_conn->cm_id);
if (isert_conn->qp) {
@@ -593,7 +595,10 @@ isert_connect_release(struct isert_conn *isert_conn)
isert_device_put(device);
- kfree(isert_conn);
+ if (isert_conn->dev_removed)
+ wake_up_interruptible(&isert_conn->rem_wait);
+ else
+ kfree(isert_conn);
}
static void
@@ -753,6 +758,7 @@ static int
isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
struct isert_np *isert_np = cma_id->context;
+ struct isert_conn *isert_conn;
int ret = 0;
isert_info("%s (%d): status %d id %p np %p\n",
@@ -773,10 +779,21 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
break;
case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
- case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
ret = isert_disconnected_handler(cma_id, event->event);
break;
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ isert_conn = cma_id->qp->qp_context;
+ isert_conn->dev_removed = true;
+ isert_disconnected_handler(cma_id, event->event);
+ wait_event_interruptible(isert_conn->rem_wait,
+ isert_conn->state == ISER_CONN_DOWN);
+ kfree(isert_conn);
+ /*
+ * return non-zero from the callback to destroy
+ * the rdma cm id
+ */
+ return 1;
case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
case RDMA_CM_EVENT_CONNECT_ERROR:
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index fc791efe3a10..c02ada57d7f5 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -158,6 +158,8 @@ struct isert_conn {
struct work_struct release_work;
bool logout_posted;
bool snd_w_inv;
+ wait_queue_head_t rem_wait;
+ bool dev_removed;
};
#define ISERT_MAX_CQ 64
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index 112e17c2768b..37f952dd9fc9 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -176,6 +176,7 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
{
struct irq_domain_chip_generic *dgc = d->gc;
struct irq_chip_generic *gc;
+ unsigned long flags;
unsigned smr;
int idx;
int ret;
@@ -194,11 +195,11 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
gc = dgc->gc[idx];
- irq_gc_lock(gc);
+ irq_gc_lock_irqsave(gc, flags);
smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq));
aic_common_set_priority(intspec[2], &smr);
irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
- irq_gc_unlock(gc);
+ irq_gc_unlock_irqrestore(gc, flags);
return ret;
}
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index 4f0d068e1abe..2a624d87a035 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -258,6 +258,7 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
unsigned int *out_type)
{
struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0);
+ unsigned long flags;
unsigned smr;
int ret;
@@ -269,12 +270,12 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
if (ret)
return ret;
- irq_gc_lock(bgc);
+ irq_gc_lock_irqsave(bgc, flags);
irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
aic_common_set_priority(intspec[2], &smr);
irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
- irq_gc_unlock(bgc);
+ irq_gc_unlock_irqrestore(bgc, flags);
return ret;
}
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 97c372908e78..7817d40d81e7 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -127,6 +127,7 @@ config XGENE_SLIMPRO_MBOX
config BCM_PDC_MBOX
tristate "Broadcom PDC Mailbox"
depends on ARM64 || COMPILE_TEST
+ depends on HAS_DMA
default ARCH_BCM_IPROC
help
Mailbox implementation for the Broadcom PDC ring manager,
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index cbe0c1ee4ba9..c19dd820ea9b 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -469,7 +469,7 @@ static const struct file_operations pdc_debugfs_stats = {
* this directory for a SPU.
* @pdcs: PDC state structure
*/
-void pdc_setup_debugfs(struct pdc_state *pdcs)
+static void pdc_setup_debugfs(struct pdc_state *pdcs)
{
char spu_stats_name[16];
@@ -485,7 +485,7 @@ void pdc_setup_debugfs(struct pdc_state *pdcs)
&pdc_debugfs_stats);
}
-void pdc_free_debugfs(void)
+static void pdc_free_debugfs(void)
{
if (debugfs_dir && simple_empty(debugfs_dir)) {
debugfs_remove_recursive(debugfs_dir);
@@ -1191,10 +1191,11 @@ static void pdc_shutdown(struct mbox_chan *chan)
{
struct pdc_state *pdcs = chan->con_priv;
- if (pdcs)
- dev_dbg(&pdcs->pdev->dev,
- "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
+ if (!pdcs)
+ return;
+ dev_dbg(&pdcs->pdev->dev,
+ "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
pdc_ring_free(pdcs);
}
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 6fff794e0c72..13041ee37ad6 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -2183,19 +2183,29 @@ location_show(struct mddev *mddev, char *page)
static ssize_t
location_store(struct mddev *mddev, const char *buf, size_t len)
{
+ int rv;
+ rv = mddev_lock(mddev);
+ if (rv)
+ return rv;
if (mddev->pers) {
- if (!mddev->pers->quiesce)
- return -EBUSY;
- if (mddev->recovery || mddev->sync_thread)
- return -EBUSY;
+ if (!mddev->pers->quiesce) {
+ rv = -EBUSY;
+ goto out;
+ }
+ if (mddev->recovery || mddev->sync_thread) {
+ rv = -EBUSY;
+ goto out;
+ }
}
if (mddev->bitmap || mddev->bitmap_info.file ||
mddev->bitmap_info.offset) {
/* bitmap already configured. Only option is to clear it */
- if (strncmp(buf, "none", 4) != 0)
- return -EBUSY;
+ if (strncmp(buf, "none", 4) != 0) {
+ rv = -EBUSY;
+ goto out;
+ }
if (mddev->pers) {
mddev->pers->quiesce(mddev, 1);
bitmap_destroy(mddev);
@@ -2214,21 +2224,25 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
/* nothing to be done */;
else if (strncmp(buf, "file:", 5) == 0) {
/* Not supported yet */
- return -EINVAL;
+ rv = -EINVAL;
+ goto out;
} else {
- int rv;
if (buf[0] == '+')
rv = kstrtoll(buf+1, 10, &offset);
else
rv = kstrtoll(buf, 10, &offset);
if (rv)
- return rv;
- if (offset == 0)
- return -EINVAL;
+ goto out;
+ if (offset == 0) {
+ rv = -EINVAL;
+ goto out;
+ }
if (mddev->bitmap_info.external == 0 &&
mddev->major_version == 0 &&
- offset != mddev->bitmap_info.default_offset)
- return -EINVAL;
+ offset != mddev->bitmap_info.default_offset) {
+ rv = -EINVAL;
+ goto out;
+ }
mddev->bitmap_info.offset = offset;
if (mddev->pers) {
struct bitmap *bitmap;
@@ -2245,7 +2259,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
mddev->pers->quiesce(mddev, 0);
if (rv) {
bitmap_destroy(mddev);
- return rv;
+ goto out;
}
}
}
@@ -2257,6 +2271,11 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread);
}
+ rv = 0;
+out:
+ mddev_unlock(mddev);
+ if (rv)
+ return rv;
return len;
}
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 6571c81465e1..8625040bae92 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1879,7 +1879,7 @@ static int __init dm_bufio_init(void)
__cache_size_refresh();
mutex_unlock(&dm_bufio_clients_lock);
- dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache");
+ dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
if (!dm_bufio_wq)
return -ENOMEM;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index eedba67b0e3e..874295757caa 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1453,7 +1453,7 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
unsigned i;
int err;
- cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_skcipher *),
+ cc->tfms = kzalloc(cc->tfms_count * sizeof(struct crypto_skcipher *),
GFP_KERNEL);
if (!cc->tfms)
return -ENOMEM;
@@ -1924,6 +1924,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
+ /*
+ * Check if bio is too large, split as needed.
+ */
+ if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) &&
+ bio_data_dir(bio) == WRITE)
+ dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
+
io = dm_per_bio_data(bio, cc->per_bio_data_size);
crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
io->ctx.req = (struct skcipher_request *)(io + 1);
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 4ab68033f9d1..49e4d8d4558f 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -259,12 +259,12 @@ static int log_one_block(struct log_writes_c *lc,
goto out;
sector++;
- bio = bio_alloc(GFP_KERNEL, block->vec_cnt);
+ atomic_inc(&lc->io_blocks);
+ bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES));
if (!bio) {
DMERR("Couldn't alloc log bio");
goto error;
}
- atomic_inc(&lc->io_blocks);
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector;
bio->bi_bdev = lc->logdev->bdev;
@@ -282,7 +282,7 @@ static int log_one_block(struct log_writes_c *lc,
if (ret != block->vecs[i].bv_len) {
atomic_inc(&lc->io_blocks);
submit_bio(bio);
- bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i);
+ bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt - i, BIO_MAX_PAGES));
if (!bio) {
DMERR("Couldn't alloc log bio");
goto error;
@@ -459,9 +459,9 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- ret = -EINVAL;
lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write");
- if (!lc->log_kthread) {
+ if (IS_ERR(lc->log_kthread)) {
+ ret = PTR_ERR(lc->log_kthread);
ti->error = "Couldn't alloc kthread";
dm_put_device(ti, lc->dev);
dm_put_device(ti, lc->logdev);
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 41573f1f626f..34a840d9df76 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -834,8 +834,10 @@ static int join(struct mddev *mddev, int nodes)
goto err;
}
cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0);
- if (!cinfo->ack_lockres)
+ if (!cinfo->ack_lockres) {
+ ret = -ENOMEM;
goto err;
+ }
/* get sync CR lock on ACK. */
if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR))
pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n",
@@ -849,8 +851,10 @@ static int join(struct mddev *mddev, int nodes)
pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number);
snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1);
cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1);
- if (!cinfo->bitmap_lockres)
+ if (!cinfo->bitmap_lockres) {
+ ret = -ENOMEM;
goto err;
+ }
if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) {
pr_err("Failed to get bitmap lock\n");
ret = -EINVAL;
@@ -858,8 +862,10 @@ static int join(struct mddev *mddev, int nodes)
}
cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0);
- if (!cinfo->resync_lockres)
+ if (!cinfo->resync_lockres) {
+ ret = -ENOMEM;
goto err;
+ }
return 0;
err:
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d646f6e444f0..915e84d631a2 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1604,11 +1604,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
mddev->new_chunk_sectors = mddev->chunk_sectors;
}
- if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) {
+ if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
set_bit(MD_HAS_JOURNAL, &mddev->flags);
- if (mddev->recovery_cp == MaxSector)
- set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
- }
} else if (mddev->pers == NULL) {
/* Insist of good event counter while assembling, except for
* spares (which don't need an event count) */
@@ -5851,6 +5848,9 @@ static int get_array_info(struct mddev *mddev, void __user *arg)
working++;
if (test_bit(In_sync, &rdev->flags))
insync++;
+ else if (test_bit(Journal, &rdev->flags))
+ /* TODO: add journal count to md_u.h */
+ ;
else
spare++;
}
@@ -7610,16 +7610,12 @@ EXPORT_SYMBOL(unregister_md_cluster_operations);
int md_setup_cluster(struct mddev *mddev, int nodes)
{
- int err;
-
- err = request_module("md-cluster");
- if (err) {
- pr_err("md-cluster module not found.\n");
- return -ENOENT;
- }
-
+ if (!md_cluster_ops)
+ request_module("md-cluster");
spin_lock(&pers_lock);
+ /* ensure module won't be unloaded */
if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
+ pr_err("can't find md-cluster module or get it's reference.\n");
spin_unlock(&pers_lock);
return -ENOENT;
}
@@ -7862,6 +7858,7 @@ void md_do_sync(struct md_thread *thread)
*/
do {
+ int mddev2_minor = -1;
mddev->curr_resync = 2;
try_again:
@@ -7891,10 +7888,14 @@ void md_do_sync(struct md_thread *thread)
prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
mddev2->curr_resync >= mddev->curr_resync) {
- printk(KERN_INFO "md: delaying %s of %s"
- " until %s has finished (they"
- " share one or more physical units)\n",
- desc, mdname(mddev), mdname(mddev2));
+ if (mddev2_minor != mddev2->md_minor) {
+ mddev2_minor = mddev2->md_minor;
+ printk(KERN_INFO "md: delaying %s of %s"
+ " until %s has finished (they"
+ " share one or more physical units)\n",
+ desc, mdname(mddev),
+ mdname(mddev2));
+ }
mddev_put(mddev2);
if (signal_pending(current))
flush_signals(current);
@@ -8275,16 +8276,13 @@ no_add:
static void md_start_sync(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, del_work);
- int ret = 0;
mddev->sync_thread = md_register_thread(md_do_sync,
mddev,
"resync");
if (!mddev->sync_thread) {
- if (!(mddev_is_clustered(mddev) && ret == -EAGAIN))
- printk(KERN_ERR "%s: could not start resync"
- " thread...\n",
- mdname(mddev));
+ printk(KERN_ERR "%s: could not start resync thread...\n",
+ mdname(mddev));
/* leave the spares where they are, it shouldn't hurt */
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 0e4efcd10795..be1a9fca3b2d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1064,6 +1064,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
int max_sectors;
int sectors;
+ md_write_start(mddev, bio);
+
/*
* Register the new request and wait if the reconstruction
* thread has put up a bar for new requests.
@@ -1445,8 +1447,6 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
return;
}
- md_write_start(mddev, bio);
-
do {
/*
@@ -2465,20 +2465,21 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
while (sect_to_write) {
struct bio *wbio;
+ sector_t wsector;
if (sectors > sect_to_write)
sectors = sect_to_write;
/* Write at 'sector' for 'sectors' */
wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
- wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
- choose_data_offset(r10_bio, rdev) +
- (sector - r10_bio->sector));
+ wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
+ wbio->bi_iter.bi_sector = wsector +
+ choose_data_offset(r10_bio, rdev);
wbio->bi_bdev = rdev->bdev;
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
if (submit_bio_wait(wbio) < 0)
/* Failure! */
- ok = rdev_set_badblocks(rdev, sector,
+ ok = rdev_set_badblocks(rdev, wsector,
sectors, 0)
&& ok;
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 51f76ddbe265..1b1ab4a1d132 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -96,7 +96,6 @@ struct r5l_log {
spinlock_t no_space_stripes_lock;
bool need_cache_flush;
- bool in_teardown;
};
/*
@@ -704,31 +703,22 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
mddev = log->rdev->mddev;
/*
- * This is to avoid a deadlock. r5l_quiesce holds reconfig_mutex and
- * wait for this thread to finish. This thread waits for
- * MD_CHANGE_PENDING clear, which is supposed to be done in
- * md_check_recovery(). md_check_recovery() tries to get
- * reconfig_mutex. Since r5l_quiesce already holds the mutex,
- * md_check_recovery() fails, so the PENDING never get cleared. The
- * in_teardown check workaround this issue.
+ * Discard could zero data, so before discard we must make sure
+ * superblock is updated to new log tail. Updating superblock (either
+ * directly call md_update_sb() or depend on md thread) must hold
+ * reconfig mutex. On the other hand, raid5_quiesce is called with
+ * reconfig_mutex hold. The first step of raid5_quiesce() is waitting
+ * for all IO finish, hence waitting for reclaim thread, while reclaim
+ * thread is calling this function and waitting for reconfig mutex. So
+ * there is a deadlock. We workaround this issue with a trylock.
+ * FIXME: we could miss discard if we can't take reconfig mutex
*/
- if (!log->in_teardown) {
- set_mask_bits(&mddev->flags, 0,
- BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
- md_wakeup_thread(mddev->thread);
- wait_event(mddev->sb_wait,
- !test_bit(MD_CHANGE_PENDING, &mddev->flags) ||
- log->in_teardown);
- /*
- * r5l_quiesce could run after in_teardown check and hold
- * mutex first. Superblock might get updated twice.
- */
- if (log->in_teardown)
- md_update_sb(mddev, 1);
- } else {
- WARN_ON(!mddev_is_locked(mddev));
- md_update_sb(mddev, 1);
- }
+ set_mask_bits(&mddev->flags, 0,
+ BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
+ if (!mddev_trylock(mddev))
+ return;
+ md_update_sb(mddev, 1);
+ mddev_unlock(mddev);
/* discard IO error really doesn't matter, ignore it */
if (log->last_checkpoint < end) {
@@ -827,7 +817,6 @@ void r5l_quiesce(struct r5l_log *log, int state)
if (!log || state == 2)
return;
if (state == 0) {
- log->in_teardown = 0;
/*
* This is a special case for hotadd. In suspend, the array has
* no journal. In resume, journal is initialized as well as the
@@ -838,11 +827,6 @@ void r5l_quiesce(struct r5l_log *log, int state)
log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
log->rdev->mddev, "reclaim");
} else if (state == 1) {
- /*
- * at this point all stripes are finished, so io_unit is at
- * least in STRIPE_END state
- */
- log->in_teardown = 1;
/* make sure r5l_write_super_and_discard_space exits */
mddev = log->rdev->mddev;
wake_up(&mddev->sb_wait);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8912407a4dd0..ee7fc3701700 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -659,6 +659,7 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
{
struct stripe_head *sh;
int hash = stripe_hash_locks_hash(sector);
+ int inc_empty_inactive_list_flag;
pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
@@ -703,7 +704,12 @@ raid5_get_active_stripe(struct r5conf *conf, sector_t sector,
atomic_inc(&conf->active_stripes);
BUG_ON(list_empty(&sh->lru) &&
!test_bit(STRIPE_EXPANDING, &sh->state));
+ inc_empty_inactive_list_flag = 0;
+ if (!list_empty(conf->inactive_list + hash))
+ inc_empty_inactive_list_flag = 1;
list_del_init(&sh->lru);
+ if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
+ atomic_inc(&conf->empty_inactive_list_nr);
if (sh->group) {
sh->group->stripes_cnt--;
sh->group = NULL;
@@ -762,6 +768,7 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
sector_t head_sector, tmp_sec;
int hash;
int dd_idx;
+ int inc_empty_inactive_list_flag;
/* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
tmp_sec = sh->sector;
@@ -779,7 +786,12 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
atomic_inc(&conf->active_stripes);
BUG_ON(list_empty(&head->lru) &&
!test_bit(STRIPE_EXPANDING, &head->state));
+ inc_empty_inactive_list_flag = 0;
+ if (!list_empty(conf->inactive_list + hash))
+ inc_empty_inactive_list_flag = 1;
list_del_init(&head->lru);
+ if (list_empty(conf->inactive_list + hash) && inc_empty_inactive_list_flag)
+ atomic_inc(&conf->empty_inactive_list_nr);
if (head->group) {
head->group->stripes_cnt--;
head->group = NULL;
@@ -993,7 +1005,6 @@ again:
set_bit(STRIPE_IO_STARTED, &sh->state);
- bio_reset(bi);
bi->bi_bdev = rdev->bdev;
bio_set_op_attrs(bi, op, op_flags);
bi->bi_end_io = op_is_write(op)
@@ -1045,7 +1056,6 @@ again:
set_bit(STRIPE_IO_STARTED, &sh->state);
- bio_reset(rbi);
rbi->bi_bdev = rrdev->bdev;
bio_set_op_attrs(rbi, op, op_flags);
BUG_ON(!op_is_write(op));
@@ -1978,9 +1988,11 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
put_cpu();
}
-static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
+static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
+ int disks)
{
struct stripe_head *sh;
+ int i;
sh = kmem_cache_zalloc(sc, gfp);
if (sh) {
@@ -1989,6 +2001,17 @@ static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
INIT_LIST_HEAD(&sh->batch_list);
INIT_LIST_HEAD(&sh->lru);
atomic_set(&sh->count, 1);
+ for (i = 0; i < disks; i++) {
+ struct r5dev *dev = &sh->dev[i];
+
+ bio_init(&dev->req);
+ dev->req.bi_io_vec = &dev->vec;
+ dev->req.bi_max_vecs = 1;
+
+ bio_init(&dev->rreq);
+ dev->rreq.bi_io_vec = &dev->rvec;
+ dev->rreq.bi_max_vecs = 1;
+ }
}
return sh;
}
@@ -1996,7 +2019,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
{
struct stripe_head *sh;
- sh = alloc_stripe(conf->slab_cache, gfp);
+ sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size);
if (!sh)
return 0;
@@ -2167,7 +2190,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
mutex_lock(&conf->cache_size_mutex);
for (i = conf->max_nr_stripes; i; i--) {
- nsh = alloc_stripe(sc, GFP_KERNEL);
+ nsh = alloc_stripe(sc, GFP_KERNEL, newsize);
if (!nsh)
break;
@@ -2299,6 +2322,7 @@ static void raid5_end_read_request(struct bio * bi)
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
bi->bi_error);
if (i == disks) {
+ bio_reset(bi);
BUG();
return;
}
@@ -2399,6 +2423,7 @@ static void raid5_end_read_request(struct bio * bi)
}
}
rdev_dec_pending(rdev, conf->mddev);
+ bio_reset(bi);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
raid5_release_stripe(sh);
@@ -2436,6 +2461,7 @@ static void raid5_end_write_request(struct bio *bi)
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
bi->bi_error);
if (i == disks) {
+ bio_reset(bi);
BUG();
return;
}
@@ -2472,6 +2498,7 @@ static void raid5_end_write_request(struct bio *bi)
if (sh->batch_head && bi->bi_error && !replacement)
set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
+ bio_reset(bi);
if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
@@ -2485,16 +2512,6 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous)
{
struct r5dev *dev = &sh->dev[i];
- bio_init(&dev->req);
- dev->req.bi_io_vec = &dev->vec;
- dev->req.bi_max_vecs = 1;
- dev->req.bi_private = sh;
-
- bio_init(&dev->rreq);
- dev->rreq.bi_io_vec = &dev->rvec;
- dev->rreq.bi_max_vecs = 1;
- dev->rreq.bi_private = sh;
-
dev->flags = 0;
dev->sector = raid5_compute_blocknr(sh, i, previous);
}
@@ -4628,7 +4645,9 @@ finish:
}
if (!bio_list_empty(&s.return_bi)) {
- if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags)) {
+ if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags) &&
+ (s.failed <= conf->max_degraded ||
+ conf->mddev->external == 0)) {
spin_lock_irq(&conf->device_lock);
bio_list_merge(&conf->return_bi, &s.return_bi);
spin_unlock_irq(&conf->device_lock);
@@ -6620,6 +6639,16 @@ static struct r5conf *setup_conf(struct mddev *mddev)
}
conf->min_nr_stripes = NR_STRIPES;
+ if (mddev->reshape_position != MaxSector) {
+ int stripes = max_t(int,
+ ((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4,
+ ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4);
+ conf->min_nr_stripes = max(NR_STRIPES, stripes);
+ if (conf->min_nr_stripes != NR_STRIPES)
+ printk(KERN_INFO
+ "md/raid:%s: force stripe size %d for reshape\n",
+ mdname(mddev), conf->min_nr_stripes);
+ }
memory = conf->min_nr_stripes * (sizeof(struct stripe_head) +
max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
@@ -6826,11 +6855,14 @@ static int raid5_run(struct mddev *mddev)
if (IS_ERR(conf))
return PTR_ERR(conf);
- if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !journal_dev) {
- printk(KERN_ERR "md/raid:%s: journal disk is missing, force array readonly\n",
- mdname(mddev));
- mddev->ro = 1;
- set_disk_ro(mddev->gendisk, 1);
+ if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
+ if (!journal_dev) {
+ pr_err("md/raid:%s: journal disk is missing, force array readonly\n",
+ mdname(mddev));
+ mddev->ro = 1;
+ set_disk_ro(mddev->gendisk, 1);
+ } else if (mddev->recovery_cp == MaxSector)
+ set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
}
conf->min_offset_diff = min_offset_diff;
diff --git a/drivers/media/cec-edid.c b/drivers/media/cec-edid.c
index 70018247bdda..5719b991e340 100644
--- a/drivers/media/cec-edid.c
+++ b/drivers/media/cec-edid.c
@@ -70,7 +70,10 @@ static unsigned int cec_get_edid_spa_location(const u8 *edid, unsigned int size)
u8 tag = edid[i] >> 5;
u8 len = edid[i] & 0x1f;
- if (tag == 3 && len >= 5 && i + len <= end)
+ if (tag == 3 && len >= 5 && i + len <= end &&
+ edid[i + 1] == 0x03 &&
+ edid[i + 2] == 0x0c &&
+ edid[i + 3] == 0x00)
return i + 4;
i += len + 1;
} while (i < end);
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index efec2d1a7afd..4d080da7afaf 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -1552,6 +1552,7 @@ int cx23885_417_register(struct cx23885_dev *dev)
q->mem_ops = &vb2_dma_sg_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &dev->lock;
+ q->dev = &dev->pci->dev;
err = vb2_queue_init(q);
if (err < 0)
diff --git a/drivers/media/pci/saa7134/saa7134-dvb.c b/drivers/media/pci/saa7134/saa7134-dvb.c
index db987e5b93eb..59a4b5f7724e 100644
--- a/drivers/media/pci/saa7134/saa7134-dvb.c
+++ b/drivers/media/pci/saa7134/saa7134-dvb.c
@@ -1238,6 +1238,7 @@ static int dvb_init(struct saa7134_dev *dev)
q->buf_struct_size = sizeof(struct saa7134_buf);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &dev->lock;
+ q->dev = &dev->pci->dev;
ret = vb2_queue_init(q);
if (ret) {
vb2_dvb_dealloc_frontends(&dev->frontends);
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index ca417a454d67..791a5161809b 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -295,6 +295,7 @@ static int empress_init(struct saa7134_dev *dev)
q->buf_struct_size = sizeof(struct saa7134_buf);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &dev->lock;
+ q->dev = &dev->pci->dev;
err = vb2_queue_init(q);
if (err)
return err;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index f25344bc7912..552b635cfce7 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -169,7 +169,7 @@ config VIDEO_MEDIATEK_VPU
config VIDEO_MEDIATEK_VCODEC
tristate "Mediatek Video Codec driver"
depends on MTK_IOMMU || COMPILE_TEST
- depends on VIDEO_DEV && VIDEO_V4L2
+ depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
depends on ARCH_MEDIATEK || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
index 94f0a425be42..3a8e6958adae 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
@@ -23,7 +23,6 @@
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-core.h>
-#include "mtk_vcodec_util.h"
#define MTK_VCODEC_DRV_NAME "mtk_vcodec_drv"
#define MTK_VCODEC_ENC_NAME "mtk-vcodec-enc"
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
index 3ed3f2d31df5..2c5719ac23b2 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -487,7 +487,6 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
struct mtk_q_data *q_data;
int ret, i;
struct mtk_video_fmt *fmt;
- unsigned int pitch_w_div16;
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
@@ -530,15 +529,6 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
q_data->coded_width = f->fmt.pix_mp.width;
q_data->coded_height = f->fmt.pix_mp.height;
- pitch_w_div16 = DIV_ROUND_UP(q_data->visible_width, 16);
- if (pitch_w_div16 % 8 != 0) {
- /* Adjust returned width/height, so application could correctly
- * allocate hw required memory
- */
- q_data->visible_height += 32;
- vidioc_try_fmt(f, q_data->fmt);
- }
-
q_data->field = f->fmt.pix_mp.field;
ctx->colorspace = f->fmt.pix_mp.colorspace;
ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
@@ -878,7 +868,8 @@ static int mtk_venc_encode_header(void *priv)
{
struct mtk_vcodec_ctx *ctx = priv;
int ret;
- struct vb2_buffer *dst_buf;
+ struct vb2_buffer *src_buf, *dst_buf;
+ struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
struct mtk_vcodec_mem bs_buf;
struct venc_done_result enc_result;
@@ -911,6 +902,15 @@ static int mtk_venc_encode_header(void *priv)
mtk_v4l2_err("venc_if_encode failed=%d", ret);
return -EINVAL;
}
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (src_buf) {
+ src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf);
+ dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf);
+ dst_buf->timestamp = src_buf->timestamp;
+ dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode;
+ } else {
+ mtk_v4l2_err("No timestamp for the header buffer.");
+ }
ctx->state = MTK_STATE_HEADER;
dst_buf->planes[0].bytesused = enc_result.bs_size;
@@ -1003,7 +1003,7 @@ static void mtk_venc_worker(struct work_struct *work)
struct mtk_vcodec_mem bs_buf;
struct venc_done_result enc_result;
int ret, i;
- struct vb2_v4l2_buffer *vb2_v4l2;
+ struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
/* check dst_buf, dst_buf may be removed in device_run
* to stored encdoe header so we need check dst_buf and
@@ -1043,9 +1043,14 @@ static void mtk_venc_worker(struct work_struct *work)
ret = venc_if_encode(ctx, VENC_START_OPT_ENCODE_FRAME,
&frm_buf, &bs_buf, &enc_result);
- vb2_v4l2 = container_of(dst_buf, struct vb2_v4l2_buffer, vb2_buf);
+ src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf);
+ dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf);
+
+ dst_buf->timestamp = src_buf->timestamp;
+ dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode;
+
if (enc_result.is_key_frm)
- vb2_v4l2->flags |= V4L2_BUF_FLAG_KEYFRAME;
+ dst_vb2_v4l2->flags |= V4L2_BUF_FLAG_KEYFRAME;
if (ret) {
v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
@@ -1217,7 +1222,7 @@ int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx)
0, V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE);
v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE,
V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
- 0, V4L2_MPEG_VIDEO_H264_PROFILE_MAIN);
+ 0, V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL,
V4L2_MPEG_VIDEO_H264_LEVEL_4_2,
0, V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
@@ -1288,5 +1293,10 @@ int mtk_venc_lock(struct mtk_vcodec_ctx *ctx)
void mtk_vcodec_enc_release(struct mtk_vcodec_ctx *ctx)
{
- venc_if_deinit(ctx);
+ int ret = venc_if_deinit(ctx);
+
+ if (ret)
+ mtk_v4l2_err("venc_if_deinit failed=%d", ret);
+
+ ctx->state = MTK_STATE_FREE;
}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
index c7806ecda2dd..5cd2151431bf 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
@@ -218,11 +218,15 @@ static int fops_vcodec_release(struct file *file)
mtk_v4l2_debug(1, "[%d] encoder", ctx->id);
mutex_lock(&dev->dev_mutex);
+ /*
+ * Call v4l2_m2m_ctx_release to make sure the worker thread is not
+ * running after venc_if_deinit.
+ */
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
mtk_vcodec_enc_release(ctx);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
- v4l2_m2m_ctx_release(ctx->m2m_ctx);
list_del_init(&ctx->list);
dev->num_instances--;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h
index 33e890f5aa9c..12131855b46a 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h
@@ -16,7 +16,6 @@
#define _MTK_VCODEC_INTR_H_
#define MTK_INST_IRQ_RECEIVED 0x1
-#define MTK_INST_WORK_THREAD_ABORT_DONE 0x2
struct mtk_vcodec_ctx;
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
index 9a600525b3c1..63d4be4ff327 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
@@ -61,6 +61,8 @@ enum venc_h264_bs_mode {
/*
* struct venc_h264_vpu_config - Structure for h264 encoder configuration
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
* @input_fourcc: input fourcc
* @bitrate: target bitrate (in bps)
* @pic_w: picture width. Picture size is visible stream resolution, in pixels,
@@ -94,13 +96,13 @@ struct venc_h264_vpu_config {
/*
* struct venc_h264_vpu_buf - Structure for buffer information
- * @align: buffer alignment (in bytes)
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
* @iova: IO virtual address
* @vpua: VPU side memory addr which is used by RC_CODE
* @size: buffer size (in bytes)
*/
struct venc_h264_vpu_buf {
- u32 align;
u32 iova;
u32 vpua;
u32 size;
@@ -108,6 +110,8 @@ struct venc_h264_vpu_buf {
/*
* struct venc_h264_vsi - Structure for VPU driver control and info share
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
* This structure is allocated in VPU side and shared to AP side.
* @config: h264 encoder configuration
* @work_bufs: working buffer information in VPU side
@@ -150,12 +154,6 @@ struct venc_h264_inst {
struct mtk_vcodec_ctx *ctx;
};
-static inline void h264_write_reg(struct venc_h264_inst *inst, u32 addr,
- u32 val)
-{
- writel(val, inst->hw_base + addr);
-}
-
static inline u32 h264_read_reg(struct venc_h264_inst *inst, u32 addr)
{
return readl(inst->hw_base + addr);
@@ -214,6 +212,8 @@ static unsigned int h264_get_level(struct venc_h264_inst *inst,
return 40;
case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
return 41;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+ return 42;
default:
mtk_vcodec_debug(inst, "unsupported level %d", level);
return 31;
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
index 60bbcd2a0510..6d9758479f9a 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
@@ -56,6 +56,8 @@ enum venc_vp8_vpu_work_buf {
/*
* struct venc_vp8_vpu_config - Structure for vp8 encoder configuration
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
* @input_fourcc: input fourcc
* @bitrate: target bitrate (in bps)
* @pic_w: picture width. Picture size is visible stream resolution, in pixels,
@@ -83,14 +85,14 @@ struct venc_vp8_vpu_config {
};
/*
- * struct venc_vp8_vpu_buf -Structure for buffer information
- * @align: buffer alignment (in bytes)
+ * struct venc_vp8_vpu_buf - Structure for buffer information
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
* @iova: IO virtual address
* @vpua: VPU side memory addr which is used by RC_CODE
* @size: buffer size (in bytes)
*/
struct venc_vp8_vpu_buf {
- u32 align;
u32 iova;
u32 vpua;
u32 size;
@@ -98,6 +100,8 @@ struct venc_vp8_vpu_buf {
/*
* struct venc_vp8_vsi - Structure for VPU driver control and info share
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
* This structure is allocated in VPU side and shared to AP side.
* @config: vp8 encoder configuration
* @work_bufs: working buffer information in VPU side
@@ -138,12 +142,6 @@ struct venc_vp8_inst {
struct mtk_vcodec_ctx *ctx;
};
-static inline void vp8_enc_write_reg(struct venc_vp8_inst *inst, u32 addr,
- u32 val)
-{
- writel(val, inst->hw_base + addr);
-}
-
static inline u32 vp8_enc_read_reg(struct venc_vp8_inst *inst, u32 addr)
{
return readl(inst->hw_base + addr);
diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
index 6a7bcc3028b1..bc50c69ee0c5 100644
--- a/drivers/media/platform/rcar-fcp.c
+++ b/drivers/media/platform/rcar-fcp.c
@@ -99,10 +99,16 @@ EXPORT_SYMBOL_GPL(rcar_fcp_put);
*/
int rcar_fcp_enable(struct rcar_fcp_device *fcp)
{
+ int error;
+
if (!fcp)
return 0;
- return pm_runtime_get_sync(fcp->dev);
+ error = pm_runtime_get_sync(fcp->dev);
+ if (error < 0)
+ return error;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(rcar_fcp_enable);
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 869c83fb3c5d..f00f3e742265 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -2185,7 +2185,7 @@ static int gpmc_probe_dt(struct platform_device *pdev)
return 0;
}
-static int gpmc_probe_dt_children(struct platform_device *pdev)
+static void gpmc_probe_dt_children(struct platform_device *pdev)
{
int ret;
struct device_node *child;
@@ -2200,11 +2200,11 @@ static int gpmc_probe_dt_children(struct platform_device *pdev)
else
ret = gpmc_probe_generic_child(pdev, child);
- if (ret)
- return ret;
+ if (ret) {
+ dev_err(&pdev->dev, "failed to probe DT child '%s': %d\n",
+ child->name, ret);
+ }
}
-
- return 0;
}
#else
static int gpmc_probe_dt(struct platform_device *pdev)
@@ -2212,9 +2212,8 @@ static int gpmc_probe_dt(struct platform_device *pdev)
return 0;
}
-static int gpmc_probe_dt_children(struct platform_device *pdev)
+static void gpmc_probe_dt_children(struct platform_device *pdev)
{
- return 0;
}
#endif /* CONFIG_OF */
@@ -2369,16 +2368,10 @@ static int gpmc_probe(struct platform_device *pdev)
goto setup_irq_failed;
}
- rc = gpmc_probe_dt_children(pdev);
- if (rc < 0) {
- dev_err(gpmc->dev, "failed to probe DT children\n");
- goto dt_children_failed;
- }
+ gpmc_probe_dt_children(pdev);
return 0;
-dt_children_failed:
- gpmc_free_irq(gpmc);
setup_irq_failed:
gpmc_gpio_exit(gpmc);
gpio_init_failed:
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index a216b4667742..d00252828966 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -345,16 +345,6 @@ config SENSORS_TSL2550
This driver can also be built as a module. If so, the module
will be called tsl2550.
-config SENSORS_BH1780
- tristate "ROHM BH1780GLI ambient light sensor"
- depends on I2C && SYSFS
- help
- If you say yes here you get support for the ROHM BH1780GLI
- ambient light sensor.
-
- This driver can also be built as a module. If so, the module
- will be called bh1780gli.
-
config SENSORS_BH1770
tristate "BH1770GLC / SFH7770 combined ALS - Proximity sensor"
depends on I2C
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 7410c6d9a34d..fb32516ddfe2 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_TIFM_CORE) += tifm_core.o
obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
obj-$(CONFIG_PHANTOM) += phantom.o
obj-$(CONFIG_QCOM_COINCELL) += qcom-coincell.o
-obj-$(CONFIG_SENSORS_BH1780) += bh1780gli.o
obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
obj-$(CONFIG_SGI_IOC4) += ioc4.o
diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c
deleted file mode 100644
index 7f90ce5a569a..000000000000
--- a/drivers/misc/bh1780gli.c
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * bh1780gli.c
- * ROHM Ambient Light Sensor Driver
- *
- * Copyright (C) 2010 Texas Instruments
- * Author: Hemanth V <hemanthv@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/of.h>
-
-#define BH1780_REG_CONTROL 0x80
-#define BH1780_REG_PARTID 0x8A
-#define BH1780_REG_MANFID 0x8B
-#define BH1780_REG_DLOW 0x8C
-#define BH1780_REG_DHIGH 0x8D
-
-#define BH1780_REVMASK (0xf)
-#define BH1780_POWMASK (0x3)
-#define BH1780_POFF (0x0)
-#define BH1780_PON (0x3)
-
-/* power on settling time in ms */
-#define BH1780_PON_DELAY 2
-
-struct bh1780_data {
- struct i2c_client *client;
- int power_state;
- /* lock for sysfs operations */
- struct mutex lock;
-};
-
-static int bh1780_write(struct bh1780_data *ddata, u8 reg, u8 val, char *msg)
-{
- int ret = i2c_smbus_write_byte_data(ddata->client, reg, val);
- if (ret < 0)
- dev_err(&ddata->client->dev,
- "i2c_smbus_write_byte_data failed error %d Register (%s)\n",
- ret, msg);
- return ret;
-}
-
-static int bh1780_read(struct bh1780_data *ddata, u8 reg, char *msg)
-{
- int ret = i2c_smbus_read_byte_data(ddata->client, reg);
- if (ret < 0)
- dev_err(&ddata->client->dev,
- "i2c_smbus_read_byte_data failed error %d Register (%s)\n",
- ret, msg);
- return ret;
-}
-
-static ssize_t bh1780_show_lux(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct bh1780_data *ddata = platform_get_drvdata(pdev);
- int lsb, msb;
-
- lsb = bh1780_read(ddata, BH1780_REG_DLOW, "DLOW");
- if (lsb < 0)
- return lsb;
-
- msb = bh1780_read(ddata, BH1780_REG_DHIGH, "DHIGH");
- if (msb < 0)
- return msb;
-
- return sprintf(buf, "%d\n", (msb << 8) | lsb);
-}
-
-static ssize_t bh1780_show_power_state(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct bh1780_data *ddata = platform_get_drvdata(pdev);
- int state;
-
- state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
- if (state < 0)
- return state;
-
- return sprintf(buf, "%d\n", state & BH1780_POWMASK);
-}
-
-static ssize_t bh1780_store_power_state(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct bh1780_data *ddata = platform_get_drvdata(pdev);
- unsigned long val;
- int error;
-
- error = kstrtoul(buf, 0, &val);
- if (error)
- return error;
-
- if (val < BH1780_POFF || val > BH1780_PON)
- return -EINVAL;
-
- mutex_lock(&ddata->lock);
-
- error = bh1780_write(ddata, BH1780_REG_CONTROL, val, "CONTROL");
- if (error < 0) {
- mutex_unlock(&ddata->lock);
- return error;
- }
-
- msleep(BH1780_PON_DELAY);
- ddata->power_state = val;
- mutex_unlock(&ddata->lock);
-
- return count;
-}
-
-static DEVICE_ATTR(lux, S_IRUGO, bh1780_show_lux, NULL);
-
-static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO,
- bh1780_show_power_state, bh1780_store_power_state);
-
-static struct attribute *bh1780_attributes[] = {
- &dev_attr_power_state.attr,
- &dev_attr_lux.attr,
- NULL
-};
-
-static const struct attribute_group bh1780_attr_group = {
- .attrs = bh1780_attributes,
-};
-
-static int bh1780_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- int ret;
- struct bh1780_data *ddata;
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
- return -EIO;
-
- ddata = devm_kzalloc(&client->dev, sizeof(struct bh1780_data),
- GFP_KERNEL);
- if (ddata == NULL)
- return -ENOMEM;
-
- ddata->client = client;
- i2c_set_clientdata(client, ddata);
-
- ret = bh1780_read(ddata, BH1780_REG_PARTID, "PART ID");
- if (ret < 0)
- return ret;
-
- dev_info(&client->dev, "Ambient Light Sensor, Rev : %d\n",
- (ret & BH1780_REVMASK));
-
- mutex_init(&ddata->lock);
-
- return sysfs_create_group(&client->dev.kobj, &bh1780_attr_group);
-}
-
-static int bh1780_remove(struct i2c_client *client)
-{
- sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int bh1780_suspend(struct device *dev)
-{
- struct bh1780_data *ddata;
- int state, ret;
- struct i2c_client *client = to_i2c_client(dev);
-
- ddata = i2c_get_clientdata(client);
- state = bh1780_read(ddata, BH1780_REG_CONTROL, "CONTROL");
- if (state < 0)
- return state;
-
- ddata->power_state = state & BH1780_POWMASK;
-
- ret = bh1780_write(ddata, BH1780_REG_CONTROL, BH1780_POFF,
- "CONTROL");
-
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int bh1780_resume(struct device *dev)
-{
- struct bh1780_data *ddata;
- int state, ret;
- struct i2c_client *client = to_i2c_client(dev);
-
- ddata = i2c_get_clientdata(client);
- state = ddata->power_state;
- ret = bh1780_write(ddata, BH1780_REG_CONTROL, state,
- "CONTROL");
-
- if (ret < 0)
- return ret;
-
- return 0;
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(bh1780_pm, bh1780_suspend, bh1780_resume);
-
-static const struct i2c_device_id bh1780_id[] = {
- { "bh1780", 0 },
- { },
-};
-
-MODULE_DEVICE_TABLE(i2c, bh1780_id);
-
-#ifdef CONFIG_OF
-static const struct of_device_id of_bh1780_match[] = {
- { .compatible = "rohm,bh1780gli", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, of_bh1780_match);
-#endif
-
-static struct i2c_driver bh1780_driver = {
- .probe = bh1780_probe,
- .remove = bh1780_remove,
- .id_table = bh1780_id,
- .driver = {
- .name = "bh1780",
- .pm = &bh1780_pm,
- .of_match_table = of_match_ptr(of_bh1780_match),
- },
-};
-
-module_i2c_driver(bh1780_driver);
-
-MODULE_DESCRIPTION("BH1780GLI Ambient Light Sensor Driver");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Hemanth V <hemanthv@ti.com>");
diff --git a/drivers/misc/lkdtm_rodata.c b/drivers/misc/lkdtm_rodata.c
index 166b1db3969f..3564477b8c2d 100644
--- a/drivers/misc/lkdtm_rodata.c
+++ b/drivers/misc/lkdtm_rodata.c
@@ -4,7 +4,7 @@
*/
#include "lkdtm.h"
-void lkdtm_rodata_do_nothing(void)
+void notrace lkdtm_rodata_do_nothing(void)
{
/* Does nothing. We just want an architecture agnostic "return". */
}
diff --git a/drivers/misc/lkdtm_usercopy.c b/drivers/misc/lkdtm_usercopy.c
index 5525a204db93..1dd611423d8b 100644
--- a/drivers/misc/lkdtm_usercopy.c
+++ b/drivers/misc/lkdtm_usercopy.c
@@ -9,7 +9,15 @@
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
-static size_t cache_size = 1024;
+/*
+ * Many of the tests here end up using const sizes, but those would
+ * normally be ignored by hardened usercopy, so force the compiler
+ * into choosing the non-const path to make sure we trigger the
+ * hardened usercopy checks by added "unconst" to all the const copies,
+ * and making sure "cache_size" isn't optimized into a const.
+ */
+static volatile size_t unconst = 0;
+static volatile size_t cache_size = 1024;
static struct kmem_cache *bad_cache;
static const unsigned char test_text[] = "This is a test.\n";
@@ -67,14 +75,14 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
if (to_user) {
pr_info("attempting good copy_to_user of local stack\n");
if (copy_to_user((void __user *)user_addr, good_stack,
- sizeof(good_stack))) {
+ unconst + sizeof(good_stack))) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_to_user of distant stack\n");
if (copy_to_user((void __user *)user_addr, bad_stack,
- sizeof(good_stack))) {
+ unconst + sizeof(good_stack))) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
@@ -88,14 +96,14 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
pr_info("attempting good copy_from_user of local stack\n");
if (copy_from_user(good_stack, (void __user *)user_addr,
- sizeof(good_stack))) {
+ unconst + sizeof(good_stack))) {
pr_warn("copy_from_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_from_user of distant stack\n");
if (copy_from_user(bad_stack, (void __user *)user_addr,
- sizeof(good_stack))) {
+ unconst + sizeof(good_stack))) {
pr_warn("copy_from_user failed, but lacked Oops\n");
goto free_user;
}
@@ -109,7 +117,7 @@ static void do_usercopy_heap_size(bool to_user)
{
unsigned long user_addr;
unsigned char *one, *two;
- const size_t size = 1024;
+ size_t size = unconst + 1024;
one = kmalloc(size, GFP_KERNEL);
two = kmalloc(size, GFP_KERNEL);
@@ -285,13 +293,14 @@ void lkdtm_USERCOPY_KERNEL(void)
pr_info("attempting good copy_to_user from kernel rodata\n");
if (copy_to_user((void __user *)user_addr, test_text,
- sizeof(test_text))) {
+ unconst + sizeof(test_text))) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_to_user from kernel text\n");
- if (copy_to_user((void __user *)user_addr, vm_mmap, PAGE_SIZE)) {
+ if (copy_to_user((void __user *)user_addr, vm_mmap,
+ unconst + PAGE_SIZE)) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index e2fb44cc5c37..dc3a854e02d3 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1263,8 +1263,14 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev)
static bool mei_me_fw_type_sps(struct pci_dev *pdev)
{
u32 reg;
- /* Read ME FW Status check for SPS Firmware */
- pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
+ unsigned int devfn;
+
+ /*
+ * Read ME FW Status register to check for SPS Firmware
+ * The SPS FW is only signaled in pci function 0
+ */
+ devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
+ pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_1, &reg);
trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
/* if bits [19:16] = 15, running SPS Firmware */
return (reg & 0xf0000) == 0xf0000;
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 64e64da6da44..71cea9b296b2 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -85,8 +85,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT, mei_me_pch8_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_sps_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_sps_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index f23d65eb070d..be3c49fa7382 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1016,14 +1016,16 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
/* Only reconfigure if we have a different burst size */
if (*bp != burst) {
- struct dma_slave_config cfg;
-
- cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
- cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
- cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
- cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
- cfg.src_maxburst = burst;
- cfg.dst_maxburst = burst;
+ struct dma_slave_config cfg = {
+ .src_addr = host->phys_base +
+ OMAP_MMC_REG(host, DATA),
+ .dst_addr = host->phys_base +
+ OMAP_MMC_REG(host, DATA),
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+ .src_maxburst = burst,
+ .dst_maxburst = burst,
+ };
if (dmaengine_slave_config(c, &cfg))
goto use_pio;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 24ebc9a8de89..5f2f24a7360d 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1409,11 +1409,18 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
struct mmc_request *req)
{
- struct dma_slave_config cfg;
struct dma_async_tx_descriptor *tx;
int ret = 0, i;
struct mmc_data *data = req->data;
struct dma_chan *chan;
+ struct dma_slave_config cfg = {
+ .src_addr = host->mapbase + OMAP_HSMMC_DATA,
+ .dst_addr = host->mapbase + OMAP_HSMMC_DATA,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .src_maxburst = data->blksz / 4,
+ .dst_maxburst = data->blksz / 4,
+ };
/* Sanity check: all the SG entries must be aligned by block size. */
for (i = 0; i < data->sg_len; i++) {
@@ -1433,13 +1440,6 @@ static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,
chan = omap_hsmmc_get_dma_chan(host, data);
- cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
- cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
- cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- cfg.src_maxburst = data->blksz / 4;
- cfg.dst_maxburst = data->blksz / 4;
-
ret = dmaengine_slave_config(chan, &cfg);
if (ret)
return ret;
diff --git a/drivers/mmc/host/sdhci-st.c b/drivers/mmc/host/sdhci-st.c
index c95ba83366a0..ed92ce729dde 100644
--- a/drivers/mmc/host/sdhci-st.c
+++ b/drivers/mmc/host/sdhci-st.c
@@ -28,6 +28,7 @@
struct st_mmc_platform_data {
struct reset_control *rstc;
+ struct clk *icnclk;
void __iomem *top_ioaddr;
};
@@ -353,7 +354,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
struct sdhci_host *host;
struct st_mmc_platform_data *pdata;
struct sdhci_pltfm_host *pltfm_host;
- struct clk *clk;
+ struct clk *clk, *icnclk;
int ret = 0;
u16 host_version;
struct resource *res;
@@ -365,6 +366,11 @@ static int sdhci_st_probe(struct platform_device *pdev)
return PTR_ERR(clk);
}
+ /* ICN clock isn't compulsory, but use it if it's provided. */
+ icnclk = devm_clk_get(&pdev->dev, "icn");
+ if (IS_ERR(icnclk))
+ icnclk = NULL;
+
rstc = devm_reset_control_get(&pdev->dev, NULL);
if (IS_ERR(rstc))
rstc = NULL;
@@ -389,6 +395,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
}
clk_prepare_enable(clk);
+ clk_prepare_enable(icnclk);
/* Configure the FlashSS Top registers for setting eMMC TX/RX delay */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -400,6 +407,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
}
pltfm_host->clk = clk;
+ pdata->icnclk = icnclk;
/* Configure the Arasan HC inside the flashSS */
st_mmcss_cconfig(np, host);
@@ -422,6 +430,7 @@ static int sdhci_st_probe(struct platform_device *pdev)
return 0;
err_out:
+ clk_disable_unprepare(icnclk);
clk_disable_unprepare(clk);
err_of:
sdhci_pltfm_free(pdev);
@@ -442,6 +451,8 @@ static int sdhci_st_remove(struct platform_device *pdev)
ret = sdhci_pltfm_unregister(pdev);
+ clk_disable_unprepare(pdata->icnclk);
+
if (rstc)
reset_control_assert(rstc);
@@ -462,6 +473,7 @@ static int sdhci_st_suspend(struct device *dev)
if (pdata->rstc)
reset_control_assert(pdata->rstc);
+ clk_disable_unprepare(pdata->icnclk);
clk_disable_unprepare(pltfm_host->clk);
out:
return ret;
@@ -475,6 +487,7 @@ static int sdhci_st_resume(struct device *dev)
struct device_node *np = dev->of_node;
clk_prepare_enable(pltfm_host->clk);
+ clk_prepare_enable(pdata->icnclk);
if (pdata->rstc)
reset_control_deassert(pdata->rstc);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 0c5415b05ea9..95c32f2d7601 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -149,6 +149,8 @@ config IPVLAN
tristate "IP-VLAN support"
depends on INET
depends on IPV6
+ depends on NETFILTER
+ depends on NET_L3_MASTER_DEV
---help---
This allows one to create virtual devices off of a main interface
and packets will be delivered based on the dest L3 (IPv6/IPv4 addr)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index c6a1309b2131..3f31ca32f52b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1341,9 +1341,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
slave_dev->name);
}
- /* already enslaved */
- if (slave_dev->flags & IFF_SLAVE) {
- netdev_dbg(bond_dev, "Error: Device was already enslaved\n");
+ /* already in-use? */
+ if (netdev_is_rx_handler_busy(slave_dev)) {
+ netdev_err(bond_dev,
+ "Error: Device is in use and cannot be enslaved\n");
return -EBUSY;
}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 41c0fc9f3b14..16f7cadda5c3 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1268,11 +1268,10 @@ static int __maybe_unused flexcan_suspend(struct device *device)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
- err = flexcan_chip_disable(priv);
- if (err)
- return err;
-
if (netif_running(dev)) {
+ err = flexcan_chip_disable(priv);
+ if (err)
+ return err;
netif_stop_queue(dev);
netif_device_detach(dev);
}
@@ -1285,13 +1284,17 @@ static int __maybe_unused flexcan_resume(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
+ int err;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(dev)) {
netif_device_attach(dev);
netif_start_queue(dev);
+ err = flexcan_chip_enable(priv);
+ if (err)
+ return err;
}
- return flexcan_chip_enable(priv);
+ return 0;
}
static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume);
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 2d1d22eec750..368bb0710d8f 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -81,6 +81,10 @@
#define IFI_CANFD_TIME_SET_TIMEA_4_12_6_6 BIT(15)
#define IFI_CANFD_TDELAY 0x1c
+#define IFI_CANFD_TDELAY_DEFAULT 0xb
+#define IFI_CANFD_TDELAY_MASK 0x3fff
+#define IFI_CANFD_TDELAY_ABS BIT(14)
+#define IFI_CANFD_TDELAY_EN BIT(15)
#define IFI_CANFD_ERROR 0x20
#define IFI_CANFD_ERROR_TX_OFFSET 0
@@ -641,7 +645,7 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
struct ifi_canfd_priv *priv = netdev_priv(ndev);
const struct can_bittiming *bt = &priv->can.bittiming;
const struct can_bittiming *dbt = &priv->can.data_bittiming;
- u16 brp, sjw, tseg1, tseg2;
+ u16 brp, sjw, tseg1, tseg2, tdc;
/* Configure bit timing */
brp = bt->brp - 2;
@@ -664,6 +668,11 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
(brp << IFI_CANFD_TIME_PRESCALE_OFF) |
(sjw << IFI_CANFD_TIME_SJW_OFF_7_9_8_8),
priv->base + IFI_CANFD_FTIME);
+
+ /* Configure transmitter delay */
+ tdc = (dbt->brp * (dbt->phase_seg1 + 1)) & IFI_CANFD_TDELAY_MASK;
+ writel(IFI_CANFD_TDELAY_EN | IFI_CANFD_TDELAY_ABS | tdc,
+ priv->base + IFI_CANFD_TDELAY);
}
static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id,
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index de6d04429a70..065984670ff1 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -25,4 +25,13 @@ source "drivers/net/dsa/b53/Kconfig"
source "drivers/net/dsa/mv88e6xxx/Kconfig"
+config NET_DSA_QCA8K
+ tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
+ depends on NET_DSA
+ select NET_DSA_TAG_QCA
+ select REGMAP
+ ---help---
+ This enables support for the Qualcomm Atheros QCA8K Ethernet
+ switch chips.
+
endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index ca1e71b853a6..8346e4f9737a 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o
+obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
obj-y += b53/
obj-y += mv88e6xxx/
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 0afc2e5a04dc..7717b19dc806 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -764,11 +764,6 @@ static int b53_get_sset_count(struct dsa_switch *ds)
return b53_get_mib_size(dev);
}
-static int b53_set_addr(struct dsa_switch *ds, u8 *addr)
-{
- return 0;
-}
-
static int b53_setup(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
@@ -1407,16 +1402,12 @@ static void b53_br_leave(struct dsa_switch *ds, int port)
}
}
-static void b53_br_set_stp_state(struct dsa_switch *ds, int port,
- u8 state)
+static void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state)
{
struct b53_device *dev = ds->priv;
- u8 hw_state, cur_hw_state;
+ u8 hw_state;
u8 reg;
- b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
- cur_hw_state = reg & PORT_CTRL_STP_STATE_MASK;
-
switch (state) {
case BR_STATE_DISABLED:
hw_state = PORT_CTRL_DIS_STATE;
@@ -1438,26 +1429,20 @@ static void b53_br_set_stp_state(struct dsa_switch *ds, int port,
return;
}
- /* Fast-age ARL entries if we are moving a port from Learning or
- * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
- * state (hw_state)
- */
- if (cur_hw_state != hw_state) {
- if (cur_hw_state >= PORT_CTRL_LEARN_STATE &&
- hw_state <= PORT_CTRL_LISTEN_STATE) {
- if (b53_fast_age_port(dev, port)) {
- dev_err(ds->dev, "fast ageing failed\n");
- return;
- }
- }
- }
-
b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), &reg);
reg &= ~PORT_CTRL_STP_STATE_MASK;
reg |= hw_state;
b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
}
+static void b53_br_fast_age(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+
+ if (b53_fast_age_port(dev, port))
+ dev_err(ds->dev, "fast ageing failed\n");
+}
+
static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds)
{
return DSA_TAG_PROTO_NONE;
@@ -1466,7 +1451,6 @@ static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds)
static struct dsa_switch_ops b53_switch_ops = {
.get_tag_protocol = b53_get_tag_protocol,
.setup = b53_setup,
- .set_addr = b53_set_addr,
.get_strings = b53_get_strings,
.get_ethtool_stats = b53_get_ethtool_stats,
.get_sset_count = b53_get_sset_count,
@@ -1478,6 +1462,7 @@ static struct dsa_switch_ops b53_switch_ops = {
.port_bridge_join = b53_br_join,
.port_bridge_leave = b53_br_leave,
.port_stp_state_set = b53_br_set_stp_state,
+ .port_fast_age = b53_br_fast_age,
.port_vlan_filtering = b53_vlan_filtering,
.port_vlan_prepare = b53_vlan_prepare,
.port_vlan_add = b53_vlan_add,
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 76672dae412d..f192a673caba 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -372,7 +372,6 @@ static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
#ifdef CONFIG_BCM47XX
-#include <linux/version.h>
#include <linux/bcm47xx_nvram.h>
#include <bcm47xx_board.h>
static inline int b53_switch_get_reset_gpio(struct b53_device *dev)
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 51f1fc0dddc5..e218887f18b7 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -256,7 +256,7 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port));
reg &= ~PORT_VLAN_CTRL_MASK;
reg |= (1 << port);
- reg |= priv->port_sts[port].vlan_ctl_mask;
+ reg |= priv->dev->ports[port].vlan_ctl_mask;
core_writel(priv, reg, CORE_PORT_VLAN_CTL_PORT(port));
bcm_sf2_imp_vlan_setup(ds, cpu_port);
@@ -960,7 +960,7 @@ static int bcm_sf2_core_write64(struct b53_device *dev, u8 page, u8 reg,
return 0;
}
-struct b53_io_ops bcm_sf2_io_ops = {
+static struct b53_io_ops bcm_sf2_io_ops = {
.read8 = bcm_sf2_core_read8,
.read16 = bcm_sf2_core_read16,
.read32 = bcm_sf2_core_read32,
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 46c4ea796574..44692673e1d5 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -50,8 +50,6 @@ struct bcm_sf2_port_status {
unsigned int link;
struct ethtool_eee eee;
-
- u16 vlan_ctl_mask;
};
struct bcm_sf2_priv {
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 70a812d159c9..122876cb926c 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -216,6 +216,22 @@ int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val)
return 0;
}
+int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
+ u16 *val)
+{
+ int addr = chip->info->port_base_addr + port;
+
+ return mv88e6xxx_read(chip, addr, reg, val);
+}
+
+int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
+ u16 val)
+{
+ int addr = chip->info->port_base_addr + port;
+
+ return mv88e6xxx_write(chip, addr, reg, val);
+}
+
static int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy,
int reg, u16 *val)
{
@@ -585,23 +601,23 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
struct mv88e6xxx_chip *chip = ds->priv;
- u32 reg;
- int ret;
+ u16 reg;
+ int err;
if (!phy_is_pseudo_fixed_link(phydev))
return;
mutex_lock(&chip->reg_lock);
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_PCS_CTRL);
- if (ret < 0)
+ err = mv88e6xxx_port_read(chip, port, PORT_PCS_CTRL, &reg);
+ if (err)
goto out;
- reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
- PORT_PCS_CTRL_FORCE_LINK |
- PORT_PCS_CTRL_DUPLEX_FULL |
- PORT_PCS_CTRL_FORCE_DUPLEX |
- PORT_PCS_CTRL_UNFORCED);
+ reg &= ~(PORT_PCS_CTRL_LINK_UP |
+ PORT_PCS_CTRL_FORCE_LINK |
+ PORT_PCS_CTRL_DUPLEX_FULL |
+ PORT_PCS_CTRL_FORCE_DUPLEX |
+ PORT_PCS_CTRL_UNFORCED);
reg |= PORT_PCS_CTRL_FORCE_LINK;
if (phydev->link)
@@ -639,7 +655,7 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
}
- _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_PCS_CTRL, reg);
+ mv88e6xxx_port_write(chip, port, PORT_PCS_CTRL, reg);
out:
mutex_unlock(&chip->reg_lock);
@@ -799,22 +815,22 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
{
u32 low;
u32 high = 0;
- int ret;
+ int err;
+ u16 reg;
u64 value;
switch (s->type) {
case PORT:
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), s->reg);
- if (ret < 0)
+ err = mv88e6xxx_port_read(chip, port, s->reg, &reg);
+ if (err)
return UINT64_MAX;
- low = ret;
+ low = reg;
if (s->sizeof_stat == 4) {
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port),
- s->reg + 1);
- if (ret < 0)
+ err = mv88e6xxx_port_read(chip, port, s->reg + 1, &reg);
+ if (err)
return UINT64_MAX;
- high = ret;
+ high = reg;
}
break;
case BANK0:
@@ -893,6 +909,8 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
struct ethtool_regs *regs, void *_p)
{
struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+ u16 reg;
u16 *p = _p;
int i;
@@ -903,11 +921,10 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
mutex_lock(&chip->reg_lock);
for (i = 0; i < 32; i++) {
- int ret;
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), i);
- if (ret >= 0)
- p[i] = ret;
+ err = mv88e6xxx_port_read(chip, port, i, &reg);
+ if (!err)
+ p[i] = reg;
}
mutex_unlock(&chip->reg_lock);
@@ -938,7 +955,7 @@ static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port,
e->eee_enabled = !!(reg & 0x0200);
e->tx_lpi_enabled = !!(reg & 0x0100);
- err = mv88e6xxx_read(chip, REG_PORT(port), PORT_STATUS, &reg);
+ err = mv88e6xxx_port_read(chip, port, PORT_STATUS, &reg);
if (err)
goto out;
@@ -1106,41 +1123,28 @@ static int _mv88e6xxx_port_state(struct mv88e6xxx_chip *chip, int port,
u8 state)
{
struct dsa_switch *ds = chip->ds;
- int reg, ret = 0;
+ u16 reg;
+ int err;
u8 oldstate;
- reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL);
- if (reg < 0)
- return reg;
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL, &reg);
+ if (err)
+ return err;
oldstate = reg & PORT_CONTROL_STATE_MASK;
- if (oldstate != state) {
- /* Flush forwarding database if we're moving a port
- * from Learning or Forwarding state to Disabled or
- * Blocking or Listening state.
- */
- if ((oldstate == PORT_CONTROL_STATE_LEARNING ||
- oldstate == PORT_CONTROL_STATE_FORWARDING) &&
- (state == PORT_CONTROL_STATE_DISABLED ||
- state == PORT_CONTROL_STATE_BLOCKING)) {
- ret = _mv88e6xxx_atu_remove(chip, 0, port, false);
- if (ret)
- return ret;
- }
+ reg &= ~PORT_CONTROL_STATE_MASK;
+ reg |= state;
- reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL,
- reg);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
+ if (err)
+ return err;
- netdev_dbg(ds->ports[port].netdev, "PortState %s (was %s)\n",
- mv88e6xxx_port_state_names[state],
- mv88e6xxx_port_state_names[oldstate]);
- }
+ netdev_dbg(ds->ports[port].netdev, "PortState %s (was %s)\n",
+ mv88e6xxx_port_state_names[state],
+ mv88e6xxx_port_state_names[oldstate]);
- return ret;
+ return 0;
}
static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
@@ -1149,7 +1153,8 @@ static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
const u16 mask = (1 << chip->info->num_ports) - 1;
struct dsa_switch *ds = chip->ds;
u16 output_ports = 0;
- int reg;
+ u16 reg;
+ int err;
int i;
/* allow CPU port or DSA link(s) to send frames to every port */
@@ -1170,14 +1175,14 @@ static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
/* prevent frames from going back out of the port they came in on */
output_ports &= ~BIT(port);
- reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_BASE_VLAN);
- if (reg < 0)
- return reg;
+ err = mv88e6xxx_port_read(chip, port, PORT_BASE_VLAN, &reg);
+ if (err)
+ return err;
reg &= ~mask;
reg |= output_ports & mask;
- return _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_BASE_VLAN, reg);
+ return mv88e6xxx_port_write(chip, port, PORT_BASE_VLAN, reg);
}
static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
@@ -1214,27 +1219,39 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
mv88e6xxx_port_state_names[stp_state]);
}
+static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mutex_lock(&chip->reg_lock);
+ err = _mv88e6xxx_atu_remove(chip, 0, port, false);
+ mutex_unlock(&chip->reg_lock);
+
+ if (err)
+ netdev_err(ds->ports[port].netdev, "failed to flush ATU\n");
+}
+
static int _mv88e6xxx_port_pvid(struct mv88e6xxx_chip *chip, int port,
u16 *new, u16 *old)
{
struct dsa_switch *ds = chip->ds;
- u16 pvid;
- int ret;
+ u16 pvid, reg;
+ int err;
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_DEFAULT_VLAN);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_port_read(chip, port, PORT_DEFAULT_VLAN, &reg);
+ if (err)
+ return err;
- pvid = ret & PORT_DEFAULT_VLAN_MASK;
+ pvid = reg & PORT_DEFAULT_VLAN_MASK;
if (new) {
- ret &= ~PORT_DEFAULT_VLAN_MASK;
- ret |= *new & PORT_DEFAULT_VLAN_MASK;
+ reg &= ~PORT_DEFAULT_VLAN_MASK;
+ reg |= *new & PORT_DEFAULT_VLAN_MASK;
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_DEFAULT_VLAN, ret);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_DEFAULT_VLAN, reg);
+ if (err)
+ return err;
netdev_dbg(ds->ports[port].netdev,
"DefaultVID %d (was %d)\n", *new, pvid);
@@ -1613,7 +1630,8 @@ static int _mv88e6xxx_port_fid(struct mv88e6xxx_chip *chip, int port,
struct dsa_switch *ds = chip->ds;
u16 upper_mask;
u16 fid;
- int ret;
+ u16 reg;
+ int err;
if (mv88e6xxx_num_databases(chip) == 4096)
upper_mask = 0xff;
@@ -1623,37 +1641,35 @@ static int _mv88e6xxx_port_fid(struct mv88e6xxx_chip *chip, int port,
return -EOPNOTSUPP;
/* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_BASE_VLAN);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_port_read(chip, port, PORT_BASE_VLAN, &reg);
+ if (err)
+ return err;
- fid = (ret & PORT_BASE_VLAN_FID_3_0_MASK) >> 12;
+ fid = (reg & PORT_BASE_VLAN_FID_3_0_MASK) >> 12;
if (new) {
- ret &= ~PORT_BASE_VLAN_FID_3_0_MASK;
- ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
+ reg &= ~PORT_BASE_VLAN_FID_3_0_MASK;
+ reg |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_BASE_VLAN,
- ret);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_BASE_VLAN, reg);
+ if (err)
+ return err;
}
/* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL_1);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_1, &reg);
+ if (err)
+ return err;
- fid |= (ret & upper_mask) << 4;
+ fid |= (reg & upper_mask) << 4;
if (new) {
- ret &= ~upper_mask;
- ret |= (*new >> 4) & upper_mask;
+ reg &= ~upper_mask;
+ reg |= (*new >> 4) & upper_mask;
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_1,
- ret);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_1, reg);
+ if (err)
+ return err;
netdev_dbg(ds->ports[port].netdev,
"FID %d (was %d)\n", *new, fid);
@@ -1865,26 +1881,26 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
PORT_CONTROL_2_8021Q_DISABLED;
- int ret;
+ u16 reg;
+ int err;
if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU))
return -EOPNOTSUPP;
mutex_lock(&chip->reg_lock);
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL_2);
- if (ret < 0)
+ err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_2, &reg);
+ if (err)
goto unlock;
- old = ret & PORT_CONTROL_2_8021Q_MASK;
+ old = reg & PORT_CONTROL_2_8021Q_MASK;
if (new != old) {
- ret &= ~PORT_CONTROL_2_8021Q_MASK;
- ret |= new & PORT_CONTROL_2_8021Q_MASK;
+ reg &= ~PORT_CONTROL_2_8021Q_MASK;
+ reg |= new & PORT_CONTROL_2_8021Q_MASK;
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_2,
- ret);
- if (ret < 0)
+ err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
+ if (err)
goto unlock;
netdev_dbg(ds->ports[port].netdev, "802.1Q Mode %s (was %s)\n",
@@ -1892,11 +1908,11 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
mv88e6xxx_port_8021q_mode_names[old]);
}
- ret = 0;
+ err = 0;
unlock:
mutex_unlock(&chip->reg_lock);
- return ret;
+ return err;
}
static int
@@ -2091,12 +2107,48 @@ static int _mv88e6xxx_atu_load(struct mv88e6xxx_chip *chip,
return _mv88e6xxx_atu_cmd(chip, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
}
+static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid,
+ struct mv88e6xxx_atu_entry *entry);
+
+static int mv88e6xxx_atu_get(struct mv88e6xxx_chip *chip, int fid,
+ const u8 *addr, struct mv88e6xxx_atu_entry *entry)
+{
+ struct mv88e6xxx_atu_entry next;
+ int err;
+
+ eth_broadcast_addr(next.mac);
+
+ err = _mv88e6xxx_atu_mac_write(chip, next.mac);
+ if (err)
+ return err;
+
+ do {
+ err = _mv88e6xxx_atu_getnext(chip, fid, &next);
+ if (err)
+ return err;
+
+ if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
+ break;
+
+ if (ether_addr_equal(next.mac, addr)) {
+ *entry = next;
+ return 0;
+ }
+ } while (!is_broadcast_ether_addr(next.mac));
+
+ memset(entry, 0, sizeof(*entry));
+ entry->fid = fid;
+ ether_addr_copy(entry->mac, addr);
+
+ return 0;
+}
+
static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
const unsigned char *addr, u16 vid,
u8 state)
{
- struct mv88e6xxx_atu_entry entry = { 0 };
struct mv88e6xxx_vtu_stu_entry vlan;
+ struct mv88e6xxx_atu_entry entry;
int err;
/* Null VLAN ID corresponds to the port private database */
@@ -2107,12 +2159,18 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
- entry.fid = vlan.fid;
- entry.state = state;
- ether_addr_copy(entry.mac, addr);
- if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
- entry.trunk = false;
- entry.portv_trunkid = BIT(port);
+ err = mv88e6xxx_atu_get(chip, vlan.fid, addr, &entry);
+ if (err)
+ return err;
+
+ /* Purge the ATU entry only if no port is using it anymore */
+ if (state == GLOBAL_ATU_DATA_STATE_UNUSED) {
+ entry.portv_trunkid &= ~BIT(port);
+ if (!entry.portv_trunkid)
+ entry.state = GLOBAL_ATU_DATA_STATE_UNUSED;
+ } else {
+ entry.portv_trunkid |= BIT(port);
+ entry.state = state;
}
return _mv88e6xxx_atu_load(chip, &entry);
@@ -2364,19 +2422,20 @@ static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip)
u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
struct gpio_desc *gpiod = chip->reset;
unsigned long timeout;
- int ret;
+ int err, ret;
+ u16 reg;
int i;
/* Set all ports to the disabled state. */
for (i = 0; i < chip->info->num_ports; i++) {
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(i), PORT_CONTROL);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_port_read(chip, i, PORT_CONTROL, &reg);
+ if (err)
+ return err;
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(i), PORT_CONTROL,
- ret & 0xfffc);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, i, PORT_CONTROL,
+ reg & 0xfffc);
+ if (err)
+ return err;
}
/* Wait for transmit queues to drain. */
@@ -2395,11 +2454,11 @@ static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip)
* through global registers 0x18 and 0x19.
*/
if (ppu_active)
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, 0x04, 0xc000);
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, 0x04, 0xc000);
else
- ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, 0x04, 0xc400);
- if (ret)
- return ret;
+ err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, 0x04, 0xc400);
+ if (err)
+ return err;
/* Wait up to one second for reset to complete. */
timeout = jiffies + 1 * HZ;
@@ -2413,11 +2472,11 @@ static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip)
usleep_range(1000, 2000);
}
if (time_after(jiffies, timeout))
- ret = -ETIMEDOUT;
+ err = -ETIMEDOUT;
else
- ret = 0;
+ err = 0;
- return ret;
+ return err;
}
static int mv88e6xxx_serdes_power_on(struct mv88e6xxx_chip *chip)
@@ -2438,21 +2497,10 @@ static int mv88e6xxx_serdes_power_on(struct mv88e6xxx_chip *chip)
return err;
}
-static int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port,
- int reg, u16 *val)
-{
- int addr = chip->info->port_base_addr + port;
-
- if (port >= chip->info->num_ports)
- return -EINVAL;
-
- return mv88e6xxx_read(chip, addr, reg, val);
-}
-
static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
{
struct dsa_switch *ds = chip->ds;
- int ret;
+ int err;
u16 reg;
if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
@@ -2465,7 +2513,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
* and all DSA ports to their maximum bandwidth and
* full duplex.
*/
- reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_PCS_CTRL);
+ err = mv88e6xxx_port_read(chip, port, PORT_PCS_CTRL, &reg);
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
reg &= ~PORT_PCS_CTRL_UNFORCED;
reg |= PORT_PCS_CTRL_FORCE_LINK |
@@ -2480,10 +2528,9 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
reg |= PORT_PCS_CTRL_UNFORCED;
}
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_PCS_CTRL, reg);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_PCS_CTRL, reg);
+ if (err)
+ return err;
}
/* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
@@ -2534,26 +2581,25 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
PORT_CONTROL_FORWARD_UNKNOWN_MC;
}
if (reg) {
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_CONTROL, reg);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_CONTROL, reg);
+ if (err)
+ return err;
}
/* If this port is connected to a SerDes, make sure the SerDes is not
* powered down.
*/
if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_SERDES)) {
- ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_STATUS);
- if (ret < 0)
- return ret;
- ret &= PORT_STATUS_CMODE_MASK;
- if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
- (ret == PORT_STATUS_CMODE_1000BASE_X) ||
- (ret == PORT_STATUS_CMODE_SGMII)) {
- ret = mv88e6xxx_serdes_power_on(chip);
- if (ret < 0)
- return ret;
+ err = mv88e6xxx_port_read(chip, port, PORT_STATUS, &reg);
+ if (err)
+ return err;
+ reg &= PORT_STATUS_CMODE_MASK;
+ if ((reg == PORT_STATUS_CMODE_100BASE_X) ||
+ (reg == PORT_STATUS_CMODE_1000BASE_X) ||
+ (reg == PORT_STATUS_CMODE_SGMII)) {
+ err = mv88e6xxx_serdes_power_on(chip);
+ if (err < 0)
+ return err;
}
}
@@ -2587,10 +2633,9 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
reg |= PORT_CONTROL_2_8021Q_DISABLED;
if (reg) {
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_CONTROL_2, reg);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
+ if (err)
+ return err;
}
/* Port Association Vector: when learning source addresses
@@ -2603,16 +2648,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (dsa_is_cpu_port(ds, port))
reg = 0;
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_ASSOC_VECTOR,
- reg);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_ASSOC_VECTOR, reg);
+ if (err)
+ return err;
/* Egress rate control 2: disable egress rate control. */
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_RATE_CONTROL_2,
- 0x0000);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_RATE_CONTROL_2, 0x0000);
+ if (err)
+ return err;
if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
@@ -2621,96 +2664,89 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
* be paused for by the remote end or the period of
* time that this port can pause the remote end.
*/
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_PAUSE_CTRL, 0x0000);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_PAUSE_CTRL, 0x0000);
+ if (err)
+ return err;
/* Port ATU control: disable limiting the number of
* address database entries that this port is allowed
* to use.
*/
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_ATU_CONTROL, 0x0000);
+ err = mv88e6xxx_port_write(chip, port, PORT_ATU_CONTROL,
+ 0x0000);
/* Priority Override: disable DA, SA and VTU priority
* override.
*/
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_PRI_OVERRIDE, 0x0000);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_PRI_OVERRIDE,
+ 0x0000);
+ if (err)
+ return err;
/* Port Ethertype: use the Ethertype DSA Ethertype
* value.
*/
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_EDSA)) {
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_ETH_TYPE, ETH_P_EDSA);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_ETH_TYPE,
+ ETH_P_EDSA);
+ if (err)
+ return err;
}
/* Tag Remap: use an identity 802.1p prio -> switch
* prio mapping.
*/
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_TAG_REGMAP_0123, 0x3210);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_TAG_REGMAP_0123,
+ 0x3210);
+ if (err)
+ return err;
/* Tag Remap 2: use an identity 802.1p prio -> switch
* prio mapping.
*/
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_TAG_REGMAP_4567, 0x7654);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_TAG_REGMAP_4567,
+ 0x7654);
+ if (err)
+ return err;
}
/* Rate Control: disable ingress rate limiting. */
if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
mv88e6xxx_6320_family(chip)) {
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_RATE_CONTROL, 0x0001);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_RATE_CONTROL,
+ 0x0001);
+ if (err)
+ return err;
} else if (mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip)) {
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port),
- PORT_RATE_CONTROL, 0x0000);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_RATE_CONTROL,
+ 0x0000);
+ if (err)
+ return err;
}
/* Port Control 1: disable trunking, disable sending
* learning messages to this port.
*/
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_1,
- 0x0000);
- if (ret)
- return ret;
+ err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_1, 0x0000);
+ if (err)
+ return err;
/* Port based VLAN map: give each port the same default address
* database, and allow bidirectional communication between the
* CPU and DSA port(s), and the other ports.
*/
- ret = _mv88e6xxx_port_fid_set(chip, port, 0);
- if (ret)
- return ret;
+ err = _mv88e6xxx_port_fid_set(chip, port, 0);
+ if (err)
+ return err;
- ret = _mv88e6xxx_port_based_vlan_map(chip, port);
- if (ret)
- return ret;
+ err = _mv88e6xxx_port_based_vlan_map(chip, port);
+ if (err)
+ return err;
/* Default VLAN ID and priority: don't set a default VLAN
* ID, and set the default packet priority to zero.
*/
- ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_DEFAULT_VLAN,
- 0x0000);
- if (ret)
- return ret;
-
- return 0;
+ return mv88e6xxx_port_write(chip, port, PORT_DEFAULT_VLAN, 0x0000);
}
static int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
@@ -3648,6 +3684,7 @@ static struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_bridge_join = mv88e6xxx_port_bridge_join,
.port_bridge_leave = mv88e6xxx_port_bridge_leave,
.port_stp_state_set = mv88e6xxx_port_stp_state_set,
+ .port_fast_age = mv88e6xxx_port_fast_age,
.port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
.port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
.port_vlan_add = mv88e6xxx_port_vlan_add,
diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
index 52f3f5231f51..827988397fd8 100644
--- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
@@ -37,7 +37,6 @@
#define ADDR_SERDES 0x0f
#define SERDES_PAGE_FIBER 0x01
-#define REG_PORT(p) (0x10 + (p))
#define PORT_STATUS 0x00
#define PORT_STATUS_PAUSE_EN BIT(15)
#define PORT_STATUS_MY_PAUSE BIT(14)
@@ -453,36 +452,36 @@ enum mv88e6xxx_cap {
};
/* Bitmask of capabilities */
-#define MV88E6XXX_FLAG_EDSA BIT(MV88E6XXX_CAP_EDSA)
-#define MV88E6XXX_FLAG_EEE BIT(MV88E6XXX_CAP_EEE)
-
-#define MV88E6XXX_FLAG_SMI_CMD BIT(MV88E6XXX_CAP_SMI_CMD)
-#define MV88E6XXX_FLAG_SMI_DATA BIT(MV88E6XXX_CAP_SMI_DATA)
-
-#define MV88E6XXX_FLAG_PHY_PAGE BIT(MV88E6XXX_CAP_PHY_PAGE)
-
-#define MV88E6XXX_FLAG_SERDES BIT(MV88E6XXX_CAP_SERDES)
-
-#define MV88E6XXX_FLAG_GLOBAL2 BIT(MV88E6XXX_CAP_GLOBAL2)
-#define MV88E6XXX_FLAG_G2_MGMT_EN_2X BIT(MV88E6XXX_CAP_G2_MGMT_EN_2X)
-#define MV88E6XXX_FLAG_G2_MGMT_EN_0X BIT(MV88E6XXX_CAP_G2_MGMT_EN_0X)
-#define MV88E6XXX_FLAG_G2_IRL_CMD BIT(MV88E6XXX_CAP_G2_IRL_CMD)
-#define MV88E6XXX_FLAG_G2_IRL_DATA BIT(MV88E6XXX_CAP_G2_IRL_DATA)
-#define MV88E6XXX_FLAG_G2_PVT_ADDR BIT(MV88E6XXX_CAP_G2_PVT_ADDR)
-#define MV88E6XXX_FLAG_G2_PVT_DATA BIT(MV88E6XXX_CAP_G2_PVT_DATA)
-#define MV88E6XXX_FLAG_G2_SWITCH_MAC BIT(MV88E6XXX_CAP_G2_SWITCH_MAC)
-#define MV88E6XXX_FLAG_G2_POT BIT(MV88E6XXX_CAP_G2_POT)
-#define MV88E6XXX_FLAG_G2_EEPROM_CMD BIT(MV88E6XXX_CAP_G2_EEPROM_CMD)
-#define MV88E6XXX_FLAG_G2_EEPROM_DATA BIT(MV88E6XXX_CAP_G2_EEPROM_DATA)
-#define MV88E6XXX_FLAG_G2_SMI_PHY_CMD BIT(MV88E6XXX_CAP_G2_SMI_PHY_CMD)
-#define MV88E6XXX_FLAG_G2_SMI_PHY_DATA BIT(MV88E6XXX_CAP_G2_SMI_PHY_DATA)
-
-#define MV88E6XXX_FLAG_PPU BIT(MV88E6XXX_CAP_PPU)
-#define MV88E6XXX_FLAG_PPU_ACTIVE BIT(MV88E6XXX_CAP_PPU_ACTIVE)
-#define MV88E6XXX_FLAG_STU BIT(MV88E6XXX_CAP_STU)
-#define MV88E6XXX_FLAG_TEMP BIT(MV88E6XXX_CAP_TEMP)
-#define MV88E6XXX_FLAG_TEMP_LIMIT BIT(MV88E6XXX_CAP_TEMP_LIMIT)
-#define MV88E6XXX_FLAG_VTU BIT(MV88E6XXX_CAP_VTU)
+#define MV88E6XXX_FLAG_EDSA BIT_ULL(MV88E6XXX_CAP_EDSA)
+#define MV88E6XXX_FLAG_EEE BIT_ULL(MV88E6XXX_CAP_EEE)
+
+#define MV88E6XXX_FLAG_SMI_CMD BIT_ULL(MV88E6XXX_CAP_SMI_CMD)
+#define MV88E6XXX_FLAG_SMI_DATA BIT_ULL(MV88E6XXX_CAP_SMI_DATA)
+
+#define MV88E6XXX_FLAG_PHY_PAGE BIT_ULL(MV88E6XXX_CAP_PHY_PAGE)
+
+#define MV88E6XXX_FLAG_SERDES BIT_ULL(MV88E6XXX_CAP_SERDES)
+
+#define MV88E6XXX_FLAG_GLOBAL2 BIT_ULL(MV88E6XXX_CAP_GLOBAL2)
+#define MV88E6XXX_FLAG_G2_MGMT_EN_2X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_2X)
+#define MV88E6XXX_FLAG_G2_MGMT_EN_0X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_0X)
+#define MV88E6XXX_FLAG_G2_IRL_CMD BIT_ULL(MV88E6XXX_CAP_G2_IRL_CMD)
+#define MV88E6XXX_FLAG_G2_IRL_DATA BIT_ULL(MV88E6XXX_CAP_G2_IRL_DATA)
+#define MV88E6XXX_FLAG_G2_PVT_ADDR BIT_ULL(MV88E6XXX_CAP_G2_PVT_ADDR)
+#define MV88E6XXX_FLAG_G2_PVT_DATA BIT_ULL(MV88E6XXX_CAP_G2_PVT_DATA)
+#define MV88E6XXX_FLAG_G2_SWITCH_MAC BIT_ULL(MV88E6XXX_CAP_G2_SWITCH_MAC)
+#define MV88E6XXX_FLAG_G2_POT BIT_ULL(MV88E6XXX_CAP_G2_POT)
+#define MV88E6XXX_FLAG_G2_EEPROM_CMD BIT_ULL(MV88E6XXX_CAP_G2_EEPROM_CMD)
+#define MV88E6XXX_FLAG_G2_EEPROM_DATA BIT_ULL(MV88E6XXX_CAP_G2_EEPROM_DATA)
+#define MV88E6XXX_FLAG_G2_SMI_PHY_CMD BIT_ULL(MV88E6XXX_CAP_G2_SMI_PHY_CMD)
+#define MV88E6XXX_FLAG_G2_SMI_PHY_DATA BIT_ULL(MV88E6XXX_CAP_G2_SMI_PHY_DATA)
+
+#define MV88E6XXX_FLAG_PPU BIT_ULL(MV88E6XXX_CAP_PPU)
+#define MV88E6XXX_FLAG_PPU_ACTIVE BIT_ULL(MV88E6XXX_CAP_PPU_ACTIVE)
+#define MV88E6XXX_FLAG_STU BIT_ULL(MV88E6XXX_CAP_STU)
+#define MV88E6XXX_FLAG_TEMP BIT_ULL(MV88E6XXX_CAP_TEMP)
+#define MV88E6XXX_FLAG_TEMP_LIMIT BIT_ULL(MV88E6XXX_CAP_TEMP_LIMIT)
+#define MV88E6XXX_FLAG_VTU BIT_ULL(MV88E6XXX_CAP_VTU)
/* EEPROM Programming via Global2 with 16-bit data */
#define MV88E6XXX_FLAGS_EEPROM16 \
@@ -615,7 +614,7 @@ struct mv88e6xxx_info {
unsigned int num_ports;
unsigned int port_base_addr;
unsigned int age_time_coeff;
- unsigned long flags;
+ unsigned long long flags;
};
struct mv88e6xxx_atu_entry {
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
new file mode 100644
index 000000000000..b3df70d07ff6
--- /dev/null
+++ b/drivers/net/dsa/qca8k.c
@@ -0,0 +1,1040 @@
+/*
+ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016 John Crispin <john@phrozen.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/if_bridge.h>
+#include <linux/mdio.h>
+#include <linux/etherdevice.h>
+
+#include "qca8k.h"
+
+#define MIB_DESC(_s, _o, _n) \
+ { \
+ .size = (_s), \
+ .offset = (_o), \
+ .name = (_n), \
+ }
+
+static const struct qca8k_mib_desc ar8327_mib[] = {
+ MIB_DESC(1, 0x00, "RxBroad"),
+ MIB_DESC(1, 0x04, "RxPause"),
+ MIB_DESC(1, 0x08, "RxMulti"),
+ MIB_DESC(1, 0x0c, "RxFcsErr"),
+ MIB_DESC(1, 0x10, "RxAlignErr"),
+ MIB_DESC(1, 0x14, "RxRunt"),
+ MIB_DESC(1, 0x18, "RxFragment"),
+ MIB_DESC(1, 0x1c, "Rx64Byte"),
+ MIB_DESC(1, 0x20, "Rx128Byte"),
+ MIB_DESC(1, 0x24, "Rx256Byte"),
+ MIB_DESC(1, 0x28, "Rx512Byte"),
+ MIB_DESC(1, 0x2c, "Rx1024Byte"),
+ MIB_DESC(1, 0x30, "Rx1518Byte"),
+ MIB_DESC(1, 0x34, "RxMaxByte"),
+ MIB_DESC(1, 0x38, "RxTooLong"),
+ MIB_DESC(2, 0x3c, "RxGoodByte"),
+ MIB_DESC(2, 0x44, "RxBadByte"),
+ MIB_DESC(1, 0x4c, "RxOverFlow"),
+ MIB_DESC(1, 0x50, "Filtered"),
+ MIB_DESC(1, 0x54, "TxBroad"),
+ MIB_DESC(1, 0x58, "TxPause"),
+ MIB_DESC(1, 0x5c, "TxMulti"),
+ MIB_DESC(1, 0x60, "TxUnderRun"),
+ MIB_DESC(1, 0x64, "Tx64Byte"),
+ MIB_DESC(1, 0x68, "Tx128Byte"),
+ MIB_DESC(1, 0x6c, "Tx256Byte"),
+ MIB_DESC(1, 0x70, "Tx512Byte"),
+ MIB_DESC(1, 0x74, "Tx1024Byte"),
+ MIB_DESC(1, 0x78, "Tx1518Byte"),
+ MIB_DESC(1, 0x7c, "TxMaxByte"),
+ MIB_DESC(1, 0x80, "TxOverSize"),
+ MIB_DESC(2, 0x84, "TxByte"),
+ MIB_DESC(1, 0x8c, "TxCollision"),
+ MIB_DESC(1, 0x90, "TxAbortCol"),
+ MIB_DESC(1, 0x94, "TxMultiCol"),
+ MIB_DESC(1, 0x98, "TxSingleCol"),
+ MIB_DESC(1, 0x9c, "TxExcDefer"),
+ MIB_DESC(1, 0xa0, "TxDefer"),
+ MIB_DESC(1, 0xa4, "TxLateCol"),
+};
+
+/* The 32bit switch registers are accessed indirectly. To achieve this we need
+ * to set the page of the register. Track the last page that was set to reduce
+ * mdio writes
+ */
+static u16 qca8k_current_page = 0xffff;
+
+static void
+qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
+{
+ regaddr >>= 1;
+ *r1 = regaddr & 0x1e;
+
+ regaddr >>= 5;
+ *r2 = regaddr & 0x7;
+
+ regaddr >>= 3;
+ *page = regaddr & 0x3ff;
+}
+
+static u32
+qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum)
+{
+ u32 val;
+ int ret;
+
+ ret = bus->read(bus, phy_id, regnum);
+ if (ret >= 0) {
+ val = ret;
+ ret = bus->read(bus, phy_id, regnum + 1);
+ val |= ret << 16;
+ }
+
+ if (ret < 0) {
+ dev_err_ratelimited(&bus->dev,
+ "failed to read qca8k 32bit register\n");
+ return ret;
+ }
+
+ return val;
+}
+
+static void
+qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
+{
+ u16 lo, hi;
+ int ret;
+
+ lo = val & 0xffff;
+ hi = (u16)(val >> 16);
+
+ ret = bus->write(bus, phy_id, regnum, lo);
+ if (ret >= 0)
+ ret = bus->write(bus, phy_id, regnum + 1, hi);
+ if (ret < 0)
+ dev_err_ratelimited(&bus->dev,
+ "failed to write qca8k 32bit register\n");
+}
+
+static void
+qca8k_set_page(struct mii_bus *bus, u16 page)
+{
+ if (page == qca8k_current_page)
+ return;
+
+ if (bus->write(bus, 0x18, 0, page) < 0)
+ dev_err_ratelimited(&bus->dev,
+ "failed to set qca8k page\n");
+ qca8k_current_page = page;
+}
+
+static u32
+qca8k_read(struct qca8k_priv *priv, u32 reg)
+{
+ u16 r1, r2, page;
+ u32 val;
+
+ qca8k_split_addr(reg, &r1, &r2, &page);
+
+ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ qca8k_set_page(priv->bus, page);
+ val = qca8k_mii_read32(priv->bus, 0x10 | r2, r1);
+
+ mutex_unlock(&priv->bus->mdio_lock);
+
+ return val;
+}
+
+static void
+qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
+{
+ u16 r1, r2, page;
+
+ qca8k_split_addr(reg, &r1, &r2, &page);
+
+ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ qca8k_set_page(priv->bus, page);
+ qca8k_mii_write32(priv->bus, 0x10 | r2, r1, val);
+
+ mutex_unlock(&priv->bus->mdio_lock);
+}
+
+static u32
+qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 val)
+{
+ u16 r1, r2, page;
+ u32 ret;
+
+ qca8k_split_addr(reg, &r1, &r2, &page);
+
+ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ qca8k_set_page(priv->bus, page);
+ ret = qca8k_mii_read32(priv->bus, 0x10 | r2, r1);
+ ret &= ~mask;
+ ret |= val;
+ qca8k_mii_write32(priv->bus, 0x10 | r2, r1, ret);
+
+ mutex_unlock(&priv->bus->mdio_lock);
+
+ return ret;
+}
+
+static void
+qca8k_reg_set(struct qca8k_priv *priv, u32 reg, u32 val)
+{
+ qca8k_rmw(priv, reg, 0, val);
+}
+
+static void
+qca8k_reg_clear(struct qca8k_priv *priv, u32 reg, u32 val)
+{
+ qca8k_rmw(priv, reg, val, 0);
+}
+
+static int
+qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
+
+ *val = qca8k_read(priv, reg);
+
+ return 0;
+}
+
+static int
+qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
+
+ qca8k_write(priv, reg, val);
+
+ return 0;
+}
+
+static const struct regmap_range qca8k_readable_ranges[] = {
+ regmap_reg_range(0x0000, 0x00e4), /* Global control */
+ regmap_reg_range(0x0100, 0x0168), /* EEE control */
+ regmap_reg_range(0x0200, 0x0270), /* Parser control */
+ regmap_reg_range(0x0400, 0x0454), /* ACL */
+ regmap_reg_range(0x0600, 0x0718), /* Lookup */
+ regmap_reg_range(0x0800, 0x0b70), /* QM */
+ regmap_reg_range(0x0c00, 0x0c80), /* PKT */
+ regmap_reg_range(0x0e00, 0x0e98), /* L3 */
+ regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
+ regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
+ regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
+ regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
+ regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
+ regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
+ regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
+
+};
+
+static struct regmap_access_table qca8k_readable_table = {
+ .yes_ranges = qca8k_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
+};
+
+static struct regmap_config qca8k_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x16ac, /* end MIB - Port6 range */
+ .reg_read = qca8k_regmap_read,
+ .reg_write = qca8k_regmap_write,
+ .rd_table = &qca8k_readable_table,
+};
+
+static int
+qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
+{
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(20);
+
+ /* loop until the busy flag has cleared */
+ do {
+ u32 val = qca8k_read(priv, reg);
+ int busy = val & mask;
+
+ if (!busy)
+ break;
+ cond_resched();
+ } while (!time_after_eq(jiffies, timeout));
+
+ return time_after_eq(jiffies, timeout);
+}
+
+static void
+qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
+{
+ u32 reg[4];
+ int i;
+
+ /* load the ARL table into an array */
+ for (i = 0; i < 4; i++)
+ reg[i] = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4));
+
+ /* vid - 83:72 */
+ fdb->vid = (reg[2] >> QCA8K_ATU_VID_S) & QCA8K_ATU_VID_M;
+ /* aging - 67:64 */
+ fdb->aging = reg[2] & QCA8K_ATU_STATUS_M;
+ /* portmask - 54:48 */
+ fdb->port_mask = (reg[1] >> QCA8K_ATU_PORT_S) & QCA8K_ATU_PORT_M;
+ /* mac - 47:0 */
+ fdb->mac[0] = (reg[1] >> QCA8K_ATU_ADDR0_S) & 0xff;
+ fdb->mac[1] = reg[1] & 0xff;
+ fdb->mac[2] = (reg[0] >> QCA8K_ATU_ADDR2_S) & 0xff;
+ fdb->mac[3] = (reg[0] >> QCA8K_ATU_ADDR3_S) & 0xff;
+ fdb->mac[4] = (reg[0] >> QCA8K_ATU_ADDR4_S) & 0xff;
+ fdb->mac[5] = reg[0] & 0xff;
+}
+
+static void
+qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
+ u8 aging)
+{
+ u32 reg[3] = { 0 };
+ int i;
+
+ /* vid - 83:72 */
+ reg[2] = (vid & QCA8K_ATU_VID_M) << QCA8K_ATU_VID_S;
+ /* aging - 67:64 */
+ reg[2] |= aging & QCA8K_ATU_STATUS_M;
+ /* portmask - 54:48 */
+ reg[1] = (port_mask & QCA8K_ATU_PORT_M) << QCA8K_ATU_PORT_S;
+ /* mac - 47:0 */
+ reg[1] |= mac[0] << QCA8K_ATU_ADDR0_S;
+ reg[1] |= mac[1];
+ reg[0] |= mac[2] << QCA8K_ATU_ADDR2_S;
+ reg[0] |= mac[3] << QCA8K_ATU_ADDR3_S;
+ reg[0] |= mac[4] << QCA8K_ATU_ADDR4_S;
+ reg[0] |= mac[5];
+
+ /* load the array into the ARL table */
+ for (i = 0; i < 3; i++)
+ qca8k_write(priv, QCA8K_REG_ATU_DATA0 + (i * 4), reg[i]);
+}
+
+static int
+qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
+{
+ u32 reg;
+
+ /* Set the command and FDB index */
+ reg = QCA8K_ATU_FUNC_BUSY;
+ reg |= cmd;
+ if (port >= 0) {
+ reg |= QCA8K_ATU_FUNC_PORT_EN;
+ reg |= (port & QCA8K_ATU_FUNC_PORT_M) << QCA8K_ATU_FUNC_PORT_S;
+ }
+
+ /* Write the function register triggering the table access */
+ qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
+
+ /* wait for completion */
+ if (qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY))
+ return -1;
+
+ /* Check for table full violation when adding an entry */
+ if (cmd == QCA8K_FDB_LOAD) {
+ reg = qca8k_read(priv, QCA8K_REG_ATU_FUNC);
+ if (reg & QCA8K_ATU_FUNC_FULL)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
+{
+ int ret;
+
+ qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
+ if (ret >= 0)
+ qca8k_fdb_read(priv, fdb);
+
+ return ret;
+}
+
+static int
+qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
+ u16 vid, u8 aging)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_write(priv, vid, port_mask, mac, aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int
+qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_write(priv, vid, port_mask, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static void
+qca8k_fdb_flush(struct qca8k_priv *priv)
+{
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static void
+qca8k_mib_init(struct qca8k_priv *priv)
+{
+ mutex_lock(&priv->reg_mutex);
+ qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
+ qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
+ qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
+ qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static int
+qca8k_set_pad_ctrl(struct qca8k_priv *priv, int port, int mode)
+{
+ u32 reg;
+
+ switch (port) {
+ case 0:
+ reg = QCA8K_REG_PORT0_PAD_CTRL;
+ break;
+ case 6:
+ reg = QCA8K_REG_PORT6_PAD_CTRL;
+ break;
+ default:
+ pr_err("Can't set PAD_CTRL on port %d\n", port);
+ return -EINVAL;
+ }
+
+ /* Configure a port to be directly connected to an external
+ * PHY or MAC.
+ */
+ switch (mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ qca8k_write(priv, reg,
+ QCA8K_PORT_PAD_RGMII_EN |
+ QCA8K_PORT_PAD_RGMII_TX_DELAY(3) |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY(3));
+
+ /* According to the datasheet, RGMII delay is enabled through
+ * PORT5_PAD_CTRL for all ports, rather than individual port
+ * registers
+ */
+ qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
+ break;
+ default:
+ pr_err("xMII mode %d not supported\n", mode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
+{
+ u32 mask = QCA8K_PORT_STATUS_TXMAC;
+
+ /* Port 0 and 6 have no internal PHY */
+ if ((port > 0) && (port < 6))
+ mask |= QCA8K_PORT_STATUS_LINK_AUTO;
+
+ if (enable)
+ qca8k_reg_set(priv, QCA8K_REG_PORT_STATUS(port), mask);
+ else
+ qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask);
+}
+
+static int
+qca8k_setup(struct dsa_switch *ds)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ int ret, i, phy_mode = -1;
+
+ /* Make sure that port 0 is the cpu port */
+ if (!dsa_is_cpu_port(ds, 0)) {
+ pr_err("port 0 is not the CPU port\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&priv->reg_mutex);
+
+ /* Start by setting up the register mapping */
+ priv->regmap = devm_regmap_init(ds->dev, NULL, priv,
+ &qca8k_regmap_config);
+ if (IS_ERR(priv->regmap))
+ pr_warn("regmap initialization failed");
+
+ /* Initialize CPU port pad mode (xMII type, delays...) */
+ phy_mode = of_get_phy_mode(ds->ports[ds->dst->cpu_port].dn);
+ if (phy_mode < 0) {
+ pr_err("Can't find phy-mode for master device\n");
+ return phy_mode;
+ }
+ ret = qca8k_set_pad_ctrl(priv, QCA8K_CPU_PORT, phy_mode);
+ if (ret < 0)
+ return ret;
+
+ /* Enable CPU Port */
+ qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
+ qca8k_port_set_status(priv, QCA8K_CPU_PORT, 1);
+ priv->port_sts[QCA8K_CPU_PORT].enabled = 1;
+
+ /* Enable MIB counters */
+ qca8k_mib_init(priv);
+
+ /* Enable QCA header mode on the cpu port */
+ qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(QCA8K_CPU_PORT),
+ QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
+ QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);
+
+ /* Disable forwarding by default on all ports */
+ for (i = 0; i < QCA8K_NUM_PORTS; i++)
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER, 0);
+
+ /* Disable MAC by default on all user ports */
+ for (i = 1; i < QCA8K_NUM_PORTS; i++)
+ if (ds->enabled_port_mask & BIT(i))
+ qca8k_port_set_status(priv, i, 0);
+
+ /* Forward all unknown frames to CPU port for Linux processing */
+ qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
+ BIT(0) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S |
+ BIT(0) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S |
+ BIT(0) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S |
+ BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
+
+ /* Setup connection between CPU port & user ports */
+ for (i = 0; i < DSA_MAX_PORTS; i++) {
+ /* CPU port gets connected to all user ports of the switch */
+ if (dsa_is_cpu_port(ds, i)) {
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
+ QCA8K_PORT_LOOKUP_MEMBER,
+ ds->enabled_port_mask);
+ }
+
+ /* Invividual user ports get connected to CPU port only */
+ if (ds->enabled_port_mask & BIT(i)) {
+ int shift = 16 * (i % 2);
+
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER,
+ BIT(QCA8K_CPU_PORT));
+
+ /* Enable ARP Auto-learning by default */
+ qca8k_reg_set(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_LEARN);
+
+ /* For port based vlans to work we need to set the
+ * default egress vid
+ */
+ qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
+ 0xffff << shift, 1 << shift);
+ qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
+ QCA8K_PORT_VLAN_CVID(1) |
+ QCA8K_PORT_VLAN_SVID(1));
+ }
+ }
+
+ /* Flush the FDB table */
+ qca8k_fdb_flush(priv);
+
+ return 0;
+}
+
+static int
+qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+
+ return mdiobus_read(priv->bus, phy, regnum);
+}
+
+static int
+qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+
+ return mdiobus_write(priv->bus, phy, regnum, val);
+}
+
+static void
+qca8k_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++)
+ strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
+ ETH_GSTRING_LEN);
+}
+
+static void
+qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ const struct qca8k_mib_desc *mib;
+ u32 reg, i;
+ u64 hi;
+
+ for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) {
+ mib = &ar8327_mib[i];
+ reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
+
+ data[i] = qca8k_read(priv, reg);
+ if (mib->size == 2) {
+ hi = qca8k_read(priv, reg + 4);
+ data[i] |= hi << 32;
+ }
+ }
+}
+
+static int
+qca8k_get_sset_count(struct dsa_switch *ds)
+{
+ return ARRAY_SIZE(ar8327_mib);
+}
+
+static void
+qca8k_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
+ u32 reg;
+
+ mutex_lock(&priv->reg_mutex);
+ reg = qca8k_read(priv, QCA8K_REG_EEE_CTRL);
+ if (enable)
+ reg |= lpi_en;
+ else
+ reg &= ~lpi_en;
+ qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static int
+qca8k_eee_init(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ struct ethtool_eee *p = &priv->port_sts[port].eee;
+ int ret;
+
+ p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full);
+
+ ret = phy_init_eee(phy, 0);
+ if (ret)
+ return ret;
+
+ qca8k_eee_enable_set(ds, port, true);
+
+ return 0;
+}
+
+static int
+qca8k_set_eee(struct dsa_switch *ds, int port,
+ struct phy_device *phydev,
+ struct ethtool_eee *e)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ struct ethtool_eee *p = &priv->port_sts[port].eee;
+ int ret = 0;
+
+ p->eee_enabled = e->eee_enabled;
+
+ if (e->eee_enabled) {
+ p->eee_enabled = qca8k_eee_init(ds, port, phydev);
+ if (!p->eee_enabled)
+ ret = -EOPNOTSUPP;
+ }
+ qca8k_eee_enable_set(ds, port, p->eee_enabled);
+
+ return ret;
+}
+
+static int
+qca8k_get_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ struct ethtool_eee *p = &priv->port_sts[port].eee;
+ struct net_device *netdev = ds->ports[port].netdev;
+ int ret;
+
+ ret = phy_ethtool_get_eee(netdev->phydev, p);
+ if (!ret)
+ e->eee_active =
+ !!(p->supported & p->advertised & p->lp_advertised);
+ else
+ e->eee_active = 0;
+
+ e->eee_enabled = p->eee_enabled;
+
+ return ret;
+}
+
+static void
+qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ u32 stp_state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
+ break;
+ case BR_STATE_LISTENING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
+ break;
+ case BR_STATE_LEARNING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ default:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
+ break;
+ }
+
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
+}
+
+static int
+qca8k_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ int port_mask = BIT(QCA8K_CPU_PORT);
+ int i;
+
+ priv->port_sts[port].bridge_dev = bridge;
+
+ for (i = 1; i < QCA8K_NUM_PORTS; i++) {
+ if (priv->port_sts[i].bridge_dev != bridge)
+ continue;
+ /* Add this port to the portvlan mask of the other ports
+ * in the bridge
+ */
+ qca8k_reg_set(priv,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ if (i != port)
+ port_mask |= BIT(i);
+ }
+ /* Add all other ports to this ports portvlan mask */
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER, port_mask);
+
+ return 0;
+}
+
+static void
+qca8k_port_bridge_leave(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ int i;
+
+ for (i = 1; i < QCA8K_NUM_PORTS; i++) {
+ if (priv->port_sts[i].bridge_dev !=
+ priv->port_sts[port].bridge_dev)
+ continue;
+ /* Remove this port to the portvlan mask of the other ports
+ * in the bridge
+ */
+ qca8k_reg_clear(priv,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ }
+ priv->port_sts[port].bridge_dev = NULL;
+ /* Set the cpu port to be the only one in the portvlan mask of
+ * this port
+ */
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER, BIT(QCA8K_CPU_PORT));
+}
+
+static int
+qca8k_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+
+ qca8k_port_set_status(priv, port, 1);
+ priv->port_sts[port].enabled = 1;
+
+ return 0;
+}
+
+static void
+qca8k_port_disable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+
+ qca8k_port_set_status(priv, port, 0);
+ priv->port_sts[port].enabled = 0;
+}
+
+static int
+qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
+ u16 port_mask, u16 vid)
+{
+ /* Set the vid to the port vlan id if no vid is set */
+ if (!vid)
+ vid = 1;
+
+ return qca8k_fdb_add(priv, addr, port_mask, vid,
+ QCA8K_ATU_STATUS_STATIC);
+}
+
+static int
+qca8k_port_fdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+
+ /* The FDB table for static and auto learned entries is the same. We
+ * need to reserve an entry with no port_mask set to make sure that
+ * when port_fdb_add is called an entry is still available. Otherwise
+ * the last free entry might have been used up by auto learning
+ */
+ return qca8k_port_fdb_insert(priv, fdb->addr, 0, fdb->vid);
+}
+
+static void
+qca8k_port_fdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ u16 port_mask = BIT(port);
+
+ /* Update the FDB entry adding the port_mask */
+ qca8k_port_fdb_insert(priv, fdb->addr, port_mask, fdb->vid);
+}
+
+static int
+qca8k_port_fdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ u16 port_mask = BIT(port);
+ u16 vid = fdb->vid;
+
+ if (!vid)
+ vid = 1;
+
+ return qca8k_fdb_del(priv, fdb->addr, port_mask, vid);
+}
+
+static int
+qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj))
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ struct qca8k_fdb _fdb = { 0 };
+ int cnt = QCA8K_NUM_FDB_RECORDS;
+ int ret = 0;
+
+ mutex_lock(&priv->reg_mutex);
+ while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
+ if (!_fdb.aging)
+ break;
+
+ ether_addr_copy(fdb->addr, _fdb.mac);
+ fdb->vid = _fdb.vid;
+ if (_fdb.aging == QCA8K_ATU_STATUS_STATIC)
+ fdb->ndm_state = NUD_NOARP;
+ else
+ fdb->ndm_state = NUD_REACHABLE;
+
+ ret = cb(&fdb->obj);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+static enum dsa_tag_protocol
+qca8k_get_tag_protocol(struct dsa_switch *ds)
+{
+ return DSA_TAG_PROTO_QCA;
+}
+
+static struct dsa_switch_ops qca8k_switch_ops = {
+ .get_tag_protocol = qca8k_get_tag_protocol,
+ .setup = qca8k_setup,
+ .get_strings = qca8k_get_strings,
+ .phy_read = qca8k_phy_read,
+ .phy_write = qca8k_phy_write,
+ .get_ethtool_stats = qca8k_get_ethtool_stats,
+ .get_sset_count = qca8k_get_sset_count,
+ .get_eee = qca8k_get_eee,
+ .set_eee = qca8k_set_eee,
+ .port_enable = qca8k_port_enable,
+ .port_disable = qca8k_port_disable,
+ .port_stp_state_set = qca8k_port_stp_state_set,
+ .port_bridge_join = qca8k_port_bridge_join,
+ .port_bridge_leave = qca8k_port_bridge_leave,
+ .port_fdb_prepare = qca8k_port_fdb_prepare,
+ .port_fdb_add = qca8k_port_fdb_add,
+ .port_fdb_del = qca8k_port_fdb_del,
+ .port_fdb_dump = qca8k_port_fdb_dump,
+};
+
+static int
+qca8k_sw_probe(struct mdio_device *mdiodev)
+{
+ struct qca8k_priv *priv;
+ u32 id;
+
+ /* allocate the private data struct so that we can probe the switches
+ * ID register
+ */
+ priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->bus = mdiodev->bus;
+
+ /* read the switches ID register */
+ id = qca8k_read(priv, QCA8K_REG_MASK_CTRL);
+ id >>= QCA8K_MASK_CTRL_ID_S;
+ id &= QCA8K_MASK_CTRL_ID_M;
+ if (id != QCA8K_ID_QCA8337)
+ return -ENODEV;
+
+ priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ priv->ds->priv = priv;
+ priv->ds->dev = &mdiodev->dev;
+ priv->ds->ops = &qca8k_switch_ops;
+ mutex_init(&priv->reg_mutex);
+ dev_set_drvdata(&mdiodev->dev, priv);
+
+ return dsa_register_switch(priv->ds, priv->ds->dev->of_node);
+}
+
+static void
+qca8k_sw_remove(struct mdio_device *mdiodev)
+{
+ struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
+ int i;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++)
+ qca8k_port_set_status(priv, i, 0);
+
+ dsa_unregister_switch(priv->ds);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void
+qca8k_set_pm(struct qca8k_priv *priv, int enable)
+{
+ int i;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (!priv->port_sts[i].enabled)
+ continue;
+
+ qca8k_port_set_status(priv, i, enable);
+ }
+}
+
+static int qca8k_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct qca8k_priv *priv = platform_get_drvdata(pdev);
+
+ qca8k_set_pm(priv, 0);
+
+ return dsa_switch_suspend(priv->ds);
+}
+
+static int qca8k_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct qca8k_priv *priv = platform_get_drvdata(pdev);
+
+ qca8k_set_pm(priv, 1);
+
+ return dsa_switch_resume(priv->ds);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
+ qca8k_suspend, qca8k_resume);
+
+static const struct of_device_id qca8k_of_match[] = {
+ { .compatible = "qca,qca8337" },
+ { /* sentinel */ },
+};
+
+static struct mdio_driver qca8kmdio_driver = {
+ .probe = qca8k_sw_probe,
+ .remove = qca8k_sw_remove,
+ .mdiodrv.driver = {
+ .name = "qca8k",
+ .of_match_table = qca8k_of_match,
+ .pm = &qca8k_pm_ops,
+ },
+};
+
+mdio_module_driver(qca8kmdio_driver);
+
+MODULE_AUTHOR("Mathieu Olivari, John Crispin <john@phrozen.org>");
+MODULE_DESCRIPTION("Driver for QCA8K ethernet switch family");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qca8k");
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
new file mode 100644
index 000000000000..201464719531
--- /dev/null
+++ b/drivers/net/dsa/qca8k.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCA8K_H
+#define __QCA8K_H
+
+#include <linux/delay.h>
+#include <linux/regmap.h>
+
+#define QCA8K_NUM_PORTS 7
+
+#define PHY_ID_QCA8337 0x004dd036
+#define QCA8K_ID_QCA8337 0x13
+
+#define QCA8K_NUM_FDB_RECORDS 2048
+
+#define QCA8K_CPU_PORT 0
+
+/* Global control registers */
+#define QCA8K_REG_MASK_CTRL 0x000
+#define QCA8K_MASK_CTRL_ID_M 0xff
+#define QCA8K_MASK_CTRL_ID_S 8
+#define QCA8K_REG_PORT0_PAD_CTRL 0x004
+#define QCA8K_REG_PORT5_PAD_CTRL 0x008
+#define QCA8K_REG_PORT6_PAD_CTRL 0x00c
+#define QCA8K_PORT_PAD_RGMII_EN BIT(26)
+#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) \
+ ((0x8 + (x & 0x3)) << 22)
+#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) \
+ ((0x10 + (x & 0x3)) << 20)
+#define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
+#define QCA8K_PORT_PAD_SGMII_EN BIT(7)
+#define QCA8K_REG_MODULE_EN 0x030
+#define QCA8K_MODULE_EN_MIB BIT(0)
+#define QCA8K_REG_MIB 0x034
+#define QCA8K_MIB_FLUSH BIT(24)
+#define QCA8K_MIB_CPU_KEEP BIT(20)
+#define QCA8K_MIB_BUSY BIT(17)
+#define QCA8K_GOL_MAC_ADDR0 0x60
+#define QCA8K_GOL_MAC_ADDR1 0x64
+#define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
+#define QCA8K_PORT_STATUS_SPEED GENMASK(2, 0)
+#define QCA8K_PORT_STATUS_SPEED_S 0
+#define QCA8K_PORT_STATUS_TXMAC BIT(2)
+#define QCA8K_PORT_STATUS_RXMAC BIT(3)
+#define QCA8K_PORT_STATUS_TXFLOW BIT(4)
+#define QCA8K_PORT_STATUS_RXFLOW BIT(5)
+#define QCA8K_PORT_STATUS_DUPLEX BIT(6)
+#define QCA8K_PORT_STATUS_LINK_UP BIT(8)
+#define QCA8K_PORT_STATUS_LINK_AUTO BIT(9)
+#define QCA8K_PORT_STATUS_LINK_PAUSE BIT(10)
+#define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4))
+#define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2)
+#define QCA8K_PORT_HDR_CTRL_RX_S 2
+#define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0)
+#define QCA8K_PORT_HDR_CTRL_TX_S 0
+#define QCA8K_PORT_HDR_CTRL_ALL 2
+#define QCA8K_PORT_HDR_CTRL_MGMT 1
+#define QCA8K_PORT_HDR_CTRL_NONE 0
+
+/* EEE control registers */
+#define QCA8K_REG_EEE_CTRL 0x100
+#define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2)
+
+/* ACL registers */
+#define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8))
+#define QCA8K_PORT_VLAN_CVID(x) (x << 16)
+#define QCA8K_PORT_VLAN_SVID(x) x
+#define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8))
+#define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470
+#define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474
+
+/* Lookup registers */
+#define QCA8K_REG_ATU_DATA0 0x600
+#define QCA8K_ATU_ADDR2_S 24
+#define QCA8K_ATU_ADDR3_S 16
+#define QCA8K_ATU_ADDR4_S 8
+#define QCA8K_REG_ATU_DATA1 0x604
+#define QCA8K_ATU_PORT_M 0x7f
+#define QCA8K_ATU_PORT_S 16
+#define QCA8K_ATU_ADDR0_S 8
+#define QCA8K_REG_ATU_DATA2 0x608
+#define QCA8K_ATU_VID_M 0xfff
+#define QCA8K_ATU_VID_S 8
+#define QCA8K_ATU_STATUS_M 0xf
+#define QCA8K_ATU_STATUS_STATIC 0xf
+#define QCA8K_REG_ATU_FUNC 0x60c
+#define QCA8K_ATU_FUNC_BUSY BIT(31)
+#define QCA8K_ATU_FUNC_PORT_EN BIT(14)
+#define QCA8K_ATU_FUNC_MULTI_EN BIT(13)
+#define QCA8K_ATU_FUNC_FULL BIT(12)
+#define QCA8K_ATU_FUNC_PORT_M 0xf
+#define QCA8K_ATU_FUNC_PORT_S 8
+#define QCA8K_REG_GLOBAL_FW_CTRL0 0x620
+#define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
+#define QCA8K_REG_GLOBAL_FW_CTRL1 0x624
+#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S 24
+#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_S 16
+#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_S 8
+#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_S 0
+#define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc)
+#define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0)
+#define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16)
+#define QCA8K_PORT_LOOKUP_STATE_DISABLED (0 << 16)
+#define QCA8K_PORT_LOOKUP_STATE_BLOCKING (1 << 16)
+#define QCA8K_PORT_LOOKUP_STATE_LISTENING (2 << 16)
+#define QCA8K_PORT_LOOKUP_STATE_LEARNING (3 << 16)
+#define QCA8K_PORT_LOOKUP_STATE_FORWARD (4 << 16)
+#define QCA8K_PORT_LOOKUP_STATE GENMASK(18, 16)
+#define QCA8K_PORT_LOOKUP_LEARN BIT(20)
+
+/* Pkt edit registers */
+#define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2)))
+
+/* L3 registers */
+#define QCA8K_HROUTER_CONTROL 0xe00
+#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_M GENMASK(17, 16)
+#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_S 16
+#define QCA8K_HROUTER_CONTROL_ARP_AGE_MODE 1
+#define QCA8K_HROUTER_PBASED_CONTROL1 0xe08
+#define QCA8K_HROUTER_PBASED_CONTROL2 0xe0c
+#define QCA8K_HNAT_CONTROL 0xe38
+
+/* MIB registers */
+#define QCA8K_PORT_MIB_COUNTER(_i) (0x1000 + (_i) * 0x100)
+
+/* QCA specific MII registers */
+#define MII_ATH_MMD_ADDR 0x0d
+#define MII_ATH_MMD_DATA 0x0e
+
+enum {
+ QCA8K_PORT_SPEED_10M = 0,
+ QCA8K_PORT_SPEED_100M = 1,
+ QCA8K_PORT_SPEED_1000M = 2,
+ QCA8K_PORT_SPEED_ERR = 3,
+};
+
+enum qca8k_fdb_cmd {
+ QCA8K_FDB_FLUSH = 1,
+ QCA8K_FDB_LOAD = 2,
+ QCA8K_FDB_PURGE = 3,
+ QCA8K_FDB_NEXT = 6,
+ QCA8K_FDB_SEARCH = 7,
+};
+
+struct ar8xxx_port_status {
+ struct ethtool_eee eee;
+ struct net_device *bridge_dev;
+ int enabled;
+};
+
+struct qca8k_priv {
+ struct regmap *regmap;
+ struct mii_bus *bus;
+ struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS];
+ struct dsa_switch *ds;
+ struct mutex reg_mutex;
+};
+
+struct qca8k_mib_desc {
+ unsigned int size;
+ unsigned int offset;
+ const char *name;
+};
+
+struct qca8k_fdb {
+ u16 vid;
+ u8 port_mask;
+ u8 aging;
+ u8 mac[6];
+};
+
+#endif /* __QCA8K_H */
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 25c55ab05c7d..9133e7926da5 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -3089,7 +3089,7 @@ static void set_rx_mode(struct net_device *dev)
iowrite16(new_mode, ioaddr + EL3_CMD);
}
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
/* Setup the card so that it can receive frames with an 802.1q VLAN tag.
Note that this must be done after each RxReset due to some backwards
compatibility logic in the Cyclone and Tornado ASICs */
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 1d1069641d81..8af2c88d5b33 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -66,7 +66,7 @@
*/
#define ZEROCOPY
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define VLAN_SUPPORT
#endif
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index dcf2a1f3643d..dc57f2759f44 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -45,14 +45,14 @@
#define WRITERDP(lp, x) out_be16(lp->base + LANCE_RDP, (x))
#define READRDP(lp) in_be16(lp->base + LANCE_RDP)
-#if defined(CONFIG_HPLANCE) || defined(CONFIG_HPLANCE_MODULE)
+#if IS_ENABLED(CONFIG_HPLANCE)
#include "hplance.h"
#undef WRITERAP
#undef WRITERDP
#undef READRDP
-#if defined(CONFIG_MVME147_NET) || defined(CONFIG_MVME147_NET_MODULE)
+#if IS_ENABLED(CONFIG_MVME147_NET)
/* Lossage Factor Nine, Mr Sulu. */
#define WRITERAP(lp, x) (lp->writerap(lp, x))
@@ -86,7 +86,7 @@ static inline __u16 READRDP(struct lance_private *lp)
}
#endif
-#endif /* CONFIG_HPLANCE || CONFIG_HPLANCE_MODULE */
+#endif /* IS_ENABLED(CONFIG_HPLANCE) */
/* debugging output macros, various flavours */
/* #define TEST_HITS */
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 94960055fa1f..f92cc97151ec 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -89,7 +89,7 @@ Revision History:
#include <asm/byteorder.h>
#include <asm/uaccess.h>
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define AMD8111E_VLAN_TAG_USED 1
#else
#define AMD8111E_VLAN_TAG_USED 0
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
index 22a7b26ca1d6..d372d4235c81 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
@@ -54,55 +54,68 @@ static void xgene_get_drvinfo(struct net_device *ndev,
sprintf(info->bus_info, "%s", pdev->name);
}
-static int xgene_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+static int xgene_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
- struct phy_device *phydev = pdata->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
+ u32 supported;
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
if (phydev == NULL)
return -ENODEV;
- return phy_ethtool_gset(phydev, cmd);
+ return phy_ethtool_ksettings_get(phydev, cmd);
} else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
if (pdata->mdio_driver) {
if (!phydev)
return -ENODEV;
- return phy_ethtool_gset(phydev, cmd);
+ return phy_ethtool_ksettings_get(phydev, cmd);
}
- cmd->supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
- SUPPORTED_MII;
- cmd->advertising = cmd->supported;
- ethtool_cmd_speed_set(cmd, SPEED_1000);
- cmd->duplex = DUPLEX_FULL;
- cmd->port = PORT_MII;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = AUTONEG_ENABLE;
+ supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+ SUPPORTED_MII;
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.advertising,
+ supported);
+
+ cmd->base.speed = SPEED_1000;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.port = PORT_MII;
+ cmd->base.autoneg = AUTONEG_ENABLE;
} else {
- cmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE;
- cmd->advertising = cmd->supported;
- ethtool_cmd_speed_set(cmd, SPEED_10000);
- cmd->duplex = DUPLEX_FULL;
- cmd->port = PORT_FIBRE;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = AUTONEG_DISABLE;
+ supported = SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE;
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.advertising,
+ supported);
+
+ cmd->base.speed = SPEED_10000;
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.port = PORT_FIBRE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
}
return 0;
}
-static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+static int xgene_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
- struct phy_device *phydev = pdata->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
if (!phydev)
return -ENODEV;
- return phy_ethtool_sset(phydev, cmd);
+ return phy_ethtool_ksettings_set(phydev, cmd);
}
if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
@@ -110,7 +123,7 @@ static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
if (!phydev)
return -ENODEV;
- return phy_ethtool_sset(phydev, cmd);
+ return phy_ethtool_ksettings_set(phydev, cmd);
}
}
@@ -152,12 +165,12 @@ static void xgene_get_ethtool_stats(struct net_device *ndev,
static const struct ethtool_ops xgene_ethtool_ops = {
.get_drvinfo = xgene_get_drvinfo,
- .get_settings = xgene_get_settings,
- .set_settings = xgene_set_settings,
.get_link = ethtool_op_get_link,
.get_strings = xgene_get_strings,
.get_sset_count = xgene_get_sset_count,
- .get_ethtool_stats = xgene_get_ethtool_stats
+ .get_ethtool_stats = xgene_get_ethtool_stats,
+ .get_link_ksettings = xgene_get_link_ksettings,
+ .set_link_ksettings = xgene_set_link_ksettings,
};
void xgene_enet_set_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index da413c8d3e19..c481f104a8fe 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -713,7 +713,7 @@ static void xgene_enet_adjust_link(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
- struct phy_device *phydev = pdata->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
if (phydev->link) {
if (pdata->phy_speed != phydev->speed) {
@@ -773,15 +773,13 @@ int xgene_enet_phy_connect(struct net_device *ndev)
netdev_err(ndev, "Could not connect to PHY\n");
return -ENODEV;
}
-
- pdata->phy_dev = phy_dev;
} else {
#ifdef CONFIG_ACPI
struct acpi_device *adev = acpi_phy_find_device(dev);
if (adev)
- pdata->phy_dev = adev->driver_data;
-
- phy_dev = pdata->phy_dev;
+ phy_dev = adev->driver_data;
+ else
+ phy_dev = NULL;
if (!phy_dev ||
phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
@@ -849,8 +847,6 @@ static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
if (!phy)
return -EIO;
- pdata->phy_dev = phy;
-
return ret;
}
@@ -890,14 +886,18 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata)
{
- if (pdata->phy_dev)
- phy_disconnect(pdata->phy_dev);
+ struct net_device *ndev = pdata->ndev;
+
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
}
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
{
- if (pdata->phy_dev)
- phy_disconnect(pdata->phy_dev);
+ struct net_device *ndev = pdata->ndev;
+
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
mdiobus_unregister(pdata->mdio_bus);
mdiobus_free(pdata->mdio_bus);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 8a8d05500894..8456337a237d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -237,6 +237,8 @@ enum xgene_enet_rm {
#define TCPHDR_LEN 6
#define IPHDR_POS 6
#define IPHDR_LEN 6
+#define MSS_POS 20
+#define MSS_LEN 2
#define EC_POS 22 /* Enable checksum */
#define EC_LEN 1
#define ET_POS 23 /* Enable TSO */
@@ -253,6 +255,11 @@ enum xgene_enet_rm {
#define LAST_BUFFER (0x7800ULL << BUFDATALEN_POS)
+#define TSO_MSS0_POS 0
+#define TSO_MSS0_LEN 14
+#define TSO_MSS1_POS 16
+#define TSO_MSS1_LEN 14
+
struct xgene_enet_raw_desc {
__le64 m0;
__le64 m1;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index b8b9495e6da6..429f18fc5503 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -137,6 +137,7 @@ static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
struct xgene_enet_raw_desc *raw_desc)
{
+ struct xgene_enet_pdata *pdata = netdev_priv(cp_ring->ndev);
struct sk_buff *skb;
struct device *dev;
skb_frag_t *frag;
@@ -144,6 +145,7 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
u16 skb_index;
u8 status;
int i, ret = 0;
+ u8 mss_index;
skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
skb = cp_ring->cp_skb[skb_index];
@@ -160,6 +162,13 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
DMA_TO_DEVICE);
}
+ if (GET_BIT(ET, le64_to_cpu(raw_desc->m3))) {
+ mss_index = GET_VAL(MSS, le64_to_cpu(raw_desc->m3));
+ spin_lock(&pdata->mss_lock);
+ pdata->mss_refcnt[mss_index]--;
+ spin_unlock(&pdata->mss_lock);
+ }
+
/* Checking for error */
status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
if (unlikely(status > 2)) {
@@ -178,15 +187,53 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
return ret;
}
-static u64 xgene_enet_work_msg(struct sk_buff *skb)
+static int xgene_enet_setup_mss(struct net_device *ndev, u32 mss)
+{
+ struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+ bool mss_index_found = false;
+ int mss_index;
+ int i;
+
+ spin_lock(&pdata->mss_lock);
+
+ /* Reuse the slot if MSS matches */
+ for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) {
+ if (pdata->mss[i] == mss) {
+ pdata->mss_refcnt[i]++;
+ mss_index = i;
+ mss_index_found = true;
+ }
+ }
+
+ /* Overwrite the slot with ref_count = 0 */
+ for (i = 0; !mss_index_found && i < NUM_MSS_REG; i++) {
+ if (!pdata->mss_refcnt[i]) {
+ pdata->mss_refcnt[i]++;
+ pdata->mac_ops->set_mss(pdata, mss, i);
+ pdata->mss[i] = mss;
+ mss_index = i;
+ mss_index_found = true;
+ }
+ }
+
+ spin_unlock(&pdata->mss_lock);
+
+ /* No slots with ref_count = 0 available, return busy */
+ if (!mss_index_found)
+ return -EBUSY;
+
+ return mss_index;
+}
+
+static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo)
{
struct net_device *ndev = skb->dev;
struct iphdr *iph;
u8 l3hlen = 0, l4hlen = 0;
u8 ethhdr, proto = 0, csum_enable = 0;
- u64 hopinfo = 0;
u32 hdr_len, mss = 0;
u32 i, len, nr_frags;
+ int mss_index;
ethhdr = xgene_enet_hdr_len(skb->data);
@@ -226,7 +273,11 @@ static u64 xgene_enet_work_msg(struct sk_buff *skb)
if (!mss || ((skb->len - hdr_len) <= mss))
goto out;
- hopinfo |= SET_BIT(ET);
+ mss_index = xgene_enet_setup_mss(ndev, mss);
+ if (unlikely(mss_index < 0))
+ return -EBUSY;
+
+ *hopinfo |= SET_BIT(ET) | SET_VAL(MSS, mss_index);
}
} else if (iph->protocol == IPPROTO_UDP) {
l4hlen = UDP_HDR_SIZE;
@@ -234,15 +285,15 @@ static u64 xgene_enet_work_msg(struct sk_buff *skb)
}
out:
l3hlen = ip_hdrlen(skb) >> 2;
- hopinfo |= SET_VAL(TCPHDR, l4hlen) |
- SET_VAL(IPHDR, l3hlen) |
- SET_VAL(ETHHDR, ethhdr) |
- SET_VAL(EC, csum_enable) |
- SET_VAL(IS, proto) |
- SET_BIT(IC) |
- SET_BIT(TYPE_ETH_WORK_MESSAGE);
-
- return hopinfo;
+ *hopinfo |= SET_VAL(TCPHDR, l4hlen) |
+ SET_VAL(IPHDR, l3hlen) |
+ SET_VAL(ETHHDR, ethhdr) |
+ SET_VAL(EC, csum_enable) |
+ SET_VAL(IS, proto) |
+ SET_BIT(IC) |
+ SET_BIT(TYPE_ETH_WORK_MESSAGE);
+
+ return 0;
}
static u16 xgene_enet_encode_len(u16 len)
@@ -282,20 +333,22 @@ static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
skb_frag_t *frag;
u16 tail = tx_ring->tail;
- u64 hopinfo;
+ u64 hopinfo = 0;
u32 len, hw_len;
u8 ll = 0, nv = 0, idx = 0;
bool split = false;
u32 size, offset, ell_bytes = 0;
u32 i, fidx, nr_frags, count = 1;
+ int ret;
raw_desc = &tx_ring->raw_desc[tail];
tail = (tail + 1) & (tx_ring->slots - 1);
memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
- hopinfo = xgene_enet_work_msg(skb);
- if (!hopinfo)
- return -EINVAL;
+ ret = xgene_enet_work_msg(skb, &hopinfo);
+ if (ret)
+ return ret;
+
raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
hopinfo);
@@ -435,6 +488,9 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
count = xgene_enet_setup_tx_desc(tx_ring, skb);
+ if (count == -EBUSY)
+ return NETDEV_TX_BUSY;
+
if (count <= 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -748,8 +804,8 @@ static int xgene_enet_open(struct net_device *ndev)
if (ret)
return ret;
- if (pdata->phy_dev) {
- phy_start(pdata->phy_dev);
+ if (ndev->phydev) {
+ phy_start(ndev->phydev);
} else {
schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
netif_carrier_off(ndev);
@@ -772,8 +828,8 @@ static int xgene_enet_close(struct net_device *ndev)
mac_ops->tx_disable(pdata);
mac_ops->rx_disable(pdata);
- if (pdata->phy_dev)
- phy_stop(pdata->phy_dev);
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
else
cancel_delayed_work_sync(&pdata->link_work);
@@ -1669,7 +1725,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
ndev->features |= NETIF_F_TSO;
- pdata->mss = XGENE_ENET_MSS;
+ spin_lock_init(&pdata->mss_lock);
}
ndev->hw_features = ndev->features;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index b339fc1e8841..0cda58f5a840 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -47,7 +47,7 @@
#define NUM_PKT_BUF 64
#define NUM_BUFPOOL 32
#define MAX_EXP_BUFFS 256
-#define XGENE_ENET_MSS 1448
+#define NUM_MSS_REG 4
#define XGENE_MIN_ENET_FRAME_SIZE 60
#define XGENE_MAX_ENET_IRQ 16
@@ -143,7 +143,7 @@ struct xgene_mac_ops {
void (*rx_disable)(struct xgene_enet_pdata *pdata);
void (*set_speed)(struct xgene_enet_pdata *pdata);
void (*set_mac_addr)(struct xgene_enet_pdata *pdata);
- void (*set_mss)(struct xgene_enet_pdata *pdata);
+ void (*set_mss)(struct xgene_enet_pdata *pdata, u16 mss, u8 index);
void (*link_state)(struct work_struct *work);
};
@@ -174,7 +174,6 @@ struct xgene_cle_ops {
struct xgene_enet_pdata {
struct net_device *ndev;
struct mii_bus *mdio_bus;
- struct phy_device *phy_dev;
int phy_speed;
struct clk *clk;
struct platform_device *pdev;
@@ -213,7 +212,9 @@ struct xgene_enet_pdata {
u8 eth_bufnum;
u8 bp_bufnum;
u16 ring_num;
- u32 mss;
+ u32 mss[NUM_MSS_REG];
+ u32 mss_refcnt[NUM_MSS_REG];
+ spinlock_t mss_lock; /* mss lock */
u8 tx_delay;
u8 rx_delay;
bool mdio_driver;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 279ee27004f7..6475f383ba83 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -232,9 +232,22 @@ static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mac(pdata, HSTMACADR_MSW_ADDR, addr1);
}
-static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata)
+static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata,
+ u16 mss, u8 index)
{
- xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR, pdata->mss);
+ u8 offset;
+ u32 data;
+
+ offset = (index < 2) ? 0 : 4;
+ xgene_enet_rd_csr(pdata, XG_TSIF_MSS_REG0_ADDR + offset, &data);
+
+ if (!(index & 0x1))
+ data = SET_VAL(TSO_MSS1, data >> TSO_MSS1_POS) |
+ SET_VAL(TSO_MSS0, mss);
+ else
+ data = SET_VAL(TSO_MSS1, mss) | SET_VAL(TSO_MSS0, data);
+
+ xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR + offset, data);
}
static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata)
@@ -258,7 +271,6 @@ static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
xgene_xgmac_set_mac_addr(pdata);
- xgene_xgmac_set_mss(pdata);
xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data);
data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index 8fc93c5f6abc..6cac919272ea 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -76,11 +76,19 @@ enum alx_device_quirks {
ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG = BIT(0),
};
+#define ALX_FLAG_USING_MSIX BIT(0)
+#define ALX_FLAG_USING_MSI BIT(1)
+
struct alx_priv {
struct net_device *dev;
struct alx_hw hw;
+ /* msi-x vectors */
+ int num_vec;
+ struct msix_entry *msix_entries;
+ char irq_lbl[IFNAMSIZ + 8];
+
/* all descriptor memory */
struct {
dma_addr_t dma;
@@ -105,7 +113,7 @@ struct alx_priv {
u16 msg_enable;
- bool msi;
+ int flags;
/* protects hw.stats */
spinlock_t stats_lock;
diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c
index 1fe35e453d43..6ac40b0003a3 100644
--- a/drivers/net/ethernet/atheros/alx/hw.c
+++ b/drivers/net/ethernet/atheros/alx/hw.c
@@ -1031,6 +1031,20 @@ void alx_configure_basic(struct alx_hw *hw)
alx_write_mem32(hw, ALX_WRR, val);
}
+void alx_mask_msix(struct alx_hw *hw, int index, bool mask)
+{
+ u32 reg, val;
+
+ reg = ALX_MSIX_ENTRY_BASE + index * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL;
+
+ val = mask ? PCI_MSIX_ENTRY_CTRL_MASKBIT : 0;
+
+ alx_write_mem32(hw, reg, val);
+ alx_post_write(hw);
+}
+
+
bool alx_get_phy_info(struct alx_hw *hw)
{
u16 devs1, devs2;
diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h
index f289c05f5cb4..0191477ace51 100644
--- a/drivers/net/ethernet/atheros/alx/hw.h
+++ b/drivers/net/ethernet/atheros/alx/hw.h
@@ -562,6 +562,7 @@ int alx_reset_mac(struct alx_hw *hw);
void alx_set_macaddr(struct alx_hw *hw, const u8 *addr);
bool alx_phy_configured(struct alx_hw *hw);
void alx_configure_basic(struct alx_hw *hw);
+void alx_mask_msix(struct alx_hw *hw, int index, bool mask);
void alx_disable_rss(struct alx_hw *hw);
bool alx_get_phy_info(struct alx_hw *hw);
void alx_update_hw_stats(struct alx_hw *hw);
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index d29a4f3102d6..c0f84b73574d 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -51,6 +51,9 @@
const char alx_drv_name[] = "alx";
+static bool msix = false;
+module_param(msix, bool, 0);
+MODULE_PARM_DESC(msix, "Enable msi-x interrupt support");
static void alx_free_txbuf(struct alx_priv *alx, int entry)
{
@@ -292,32 +295,29 @@ static int alx_poll(struct napi_struct *napi, int budget)
napi_complete(&alx->napi);
/* enable interrupt */
- spin_lock_irqsave(&alx->irq_lock, flags);
- alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
- alx_write_mem32(hw, ALX_IMR, alx->int_mask);
- spin_unlock_irqrestore(&alx->irq_lock, flags);
+ if (alx->flags & ALX_FLAG_USING_MSIX) {
+ alx_mask_msix(hw, 1, false);
+ } else {
+ spin_lock_irqsave(&alx->irq_lock, flags);
+ alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
+ alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+ spin_unlock_irqrestore(&alx->irq_lock, flags);
+ }
alx_post_write(hw);
return work;
}
-static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
+static bool alx_intr_handle_misc(struct alx_priv *alx, u32 intr)
{
struct alx_hw *hw = &alx->hw;
- bool write_int_mask = false;
-
- spin_lock(&alx->irq_lock);
-
- /* ACK interrupt */
- alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
- intr &= alx->int_mask;
if (intr & ALX_ISR_FATAL) {
netif_warn(alx, hw, alx->dev,
"fatal interrupt 0x%x, resetting\n", intr);
alx_schedule_reset(alx);
- goto out;
+ return true;
}
if (intr & ALX_ISR_ALERT)
@@ -329,19 +329,32 @@ static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
* is cleared, the interrupt status could be cleared.
*/
alx->int_mask &= ~ALX_ISR_PHY;
- write_int_mask = true;
+ alx_write_mem32(hw, ALX_IMR, alx->int_mask);
alx_schedule_link_check(alx);
}
+ return false;
+}
+
+static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
+{
+ struct alx_hw *hw = &alx->hw;
+
+ spin_lock(&alx->irq_lock);
+
+ /* ACK interrupt */
+ alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
+ intr &= alx->int_mask;
+
+ if (alx_intr_handle_misc(alx, intr))
+ goto out;
+
if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) {
napi_schedule(&alx->napi);
/* mask rx/tx interrupt, enable them when napi complete */
alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
- write_int_mask = true;
- }
-
- if (write_int_mask)
alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+ }
alx_write_mem32(hw, ALX_ISR, 0);
@@ -350,6 +363,46 @@ static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
return IRQ_HANDLED;
}
+static irqreturn_t alx_intr_msix_ring(int irq, void *data)
+{
+ struct alx_priv *alx = data;
+ struct alx_hw *hw = &alx->hw;
+
+ /* mask interrupt to ACK chip */
+ alx_mask_msix(hw, 1, true);
+ /* clear interrupt status */
+ alx_write_mem32(hw, ALX_ISR, (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0));
+
+ napi_schedule(&alx->napi);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t alx_intr_msix_misc(int irq, void *data)
+{
+ struct alx_priv *alx = data;
+ struct alx_hw *hw = &alx->hw;
+ u32 intr;
+
+ /* mask interrupt to ACK chip */
+ alx_mask_msix(hw, 0, true);
+
+ /* read interrupt status */
+ intr = alx_read_mem32(hw, ALX_ISR);
+ intr &= (alx->int_mask & ~ALX_ISR_ALL_QUEUES);
+
+ if (alx_intr_handle_misc(alx, intr))
+ return IRQ_HANDLED;
+
+ /* clear interrupt status */
+ alx_write_mem32(hw, ALX_ISR, intr);
+
+ /* enable interrupt again */
+ alx_mask_msix(hw, 0, false);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t alx_intr_msi(int irq, void *data)
{
struct alx_priv *alx = data;
@@ -614,31 +667,136 @@ static void alx_free_rings(struct alx_priv *alx)
static void alx_config_vector_mapping(struct alx_priv *alx)
{
struct alx_hw *hw = &alx->hw;
+ u32 tbl = 0;
+
+ if (alx->flags & ALX_FLAG_USING_MSIX) {
+ tbl |= 1 << ALX_MSI_MAP_TBL1_TXQ0_SHIFT;
+ tbl |= 1 << ALX_MSI_MAP_TBL1_RXQ0_SHIFT;
+ }
- alx_write_mem32(hw, ALX_MSI_MAP_TBL1, 0);
+ alx_write_mem32(hw, ALX_MSI_MAP_TBL1, tbl);
alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0);
alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
}
+static bool alx_enable_msix(struct alx_priv *alx)
+{
+ int i, err, num_vec = 2;
+
+ alx->msix_entries = kcalloc(num_vec, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!alx->msix_entries) {
+ netdev_warn(alx->dev, "Allocation of msix entries failed!\n");
+ return false;
+ }
+
+ for (i = 0; i < num_vec; i++)
+ alx->msix_entries[i].entry = i;
+
+ err = pci_enable_msix(alx->hw.pdev, alx->msix_entries, num_vec);
+ if (err) {
+ kfree(alx->msix_entries);
+ netdev_warn(alx->dev, "Enabling MSI-X interrupts failed!\n");
+ return false;
+ }
+
+ alx->num_vec = num_vec;
+ return true;
+}
+
+static int alx_request_msix(struct alx_priv *alx)
+{
+ struct net_device *netdev = alx->dev;
+ int i, err, vector = 0, free_vector = 0;
+
+ err = request_irq(alx->msix_entries[0].vector, alx_intr_msix_misc,
+ 0, netdev->name, alx);
+ if (err)
+ goto out_err;
+
+ vector++;
+ sprintf(alx->irq_lbl, "%s-TxRx-0", netdev->name);
+
+ err = request_irq(alx->msix_entries[vector].vector,
+ alx_intr_msix_ring, 0, alx->irq_lbl, alx);
+ if (err)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ free_irq(alx->msix_entries[free_vector++].vector, alx);
+
+ vector--;
+ for (i = 0; i < vector; i++)
+ free_irq(alx->msix_entries[free_vector++].vector, alx);
+
+out_err:
+ return err;
+}
+
+static void alx_init_intr(struct alx_priv *alx, bool msix)
+{
+ if (msix) {
+ if (alx_enable_msix(alx))
+ alx->flags |= ALX_FLAG_USING_MSIX;
+ }
+
+ if (!(alx->flags & ALX_FLAG_USING_MSIX)) {
+ alx->num_vec = 1;
+
+ if (!pci_enable_msi(alx->hw.pdev))
+ alx->flags |= ALX_FLAG_USING_MSI;
+ }
+}
+
+static void alx_disable_advanced_intr(struct alx_priv *alx)
+{
+ if (alx->flags & ALX_FLAG_USING_MSIX) {
+ kfree(alx->msix_entries);
+ pci_disable_msix(alx->hw.pdev);
+ alx->flags &= ~ALX_FLAG_USING_MSIX;
+ }
+
+ if (alx->flags & ALX_FLAG_USING_MSI) {
+ pci_disable_msi(alx->hw.pdev);
+ alx->flags &= ~ALX_FLAG_USING_MSI;
+ }
+}
+
static void alx_irq_enable(struct alx_priv *alx)
{
struct alx_hw *hw = &alx->hw;
+ int i;
/* level-1 interrupt switch */
alx_write_mem32(hw, ALX_ISR, 0);
alx_write_mem32(hw, ALX_IMR, alx->int_mask);
alx_post_write(hw);
+
+ if (alx->flags & ALX_FLAG_USING_MSIX)
+ /* enable all msix irqs */
+ for (i = 0; i < alx->num_vec; i++)
+ alx_mask_msix(hw, i, false);
}
static void alx_irq_disable(struct alx_priv *alx)
{
struct alx_hw *hw = &alx->hw;
+ int i;
alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
alx_write_mem32(hw, ALX_IMR, 0);
alx_post_write(hw);
- synchronize_irq(alx->hw.pdev->irq);
+ if (alx->flags & ALX_FLAG_USING_MSIX) {
+ for (i = 0; i < alx->num_vec; i++) {
+ alx_mask_msix(hw, i, true);
+ synchronize_irq(alx->msix_entries[i].vector);
+ }
+ } else {
+ synchronize_irq(alx->hw.pdev->irq);
+ }
}
static int alx_request_irq(struct alx_priv *alx)
@@ -650,9 +808,18 @@ static int alx_request_irq(struct alx_priv *alx)
msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
- if (!pci_enable_msi(alx->hw.pdev)) {
- alx->msi = true;
+ if (alx->flags & ALX_FLAG_USING_MSIX) {
+ alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, msi_ctrl);
+ err = alx_request_msix(alx);
+ if (!err)
+ goto out;
+ /* msix request failed, realloc resources */
+ alx_disable_advanced_intr(alx);
+ alx_init_intr(alx, false);
+ }
+
+ if (alx->flags & ALX_FLAG_USING_MSI) {
alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
msi_ctrl | ALX_MSI_MASK_SEL_LINE);
err = request_irq(pdev->irq, alx_intr_msi, 0,
@@ -660,6 +827,7 @@ static int alx_request_irq(struct alx_priv *alx)
if (!err)
goto out;
/* fall back to legacy interrupt */
+ alx->flags &= ~ALX_FLAG_USING_MSI;
pci_disable_msi(alx->hw.pdev);
}
@@ -669,19 +837,25 @@ static int alx_request_irq(struct alx_priv *alx)
out:
if (!err)
alx_config_vector_mapping(alx);
+ else
+ netdev_err(alx->dev, "IRQ registration failed!\n");
return err;
}
static void alx_free_irq(struct alx_priv *alx)
{
struct pci_dev *pdev = alx->hw.pdev;
+ int i;
- free_irq(pdev->irq, alx);
-
- if (alx->msi) {
- pci_disable_msi(alx->hw.pdev);
- alx->msi = false;
+ if (alx->flags & ALX_FLAG_USING_MSIX) {
+ /* we have only 2 vectors without multi queue support */
+ for (i = 0; i < 2; i++)
+ free_irq(alx->msix_entries[i].vector, alx);
+ } else {
+ free_irq(pdev->irq, alx);
}
+
+ alx_disable_advanced_intr(alx);
}
static int alx_identify_hw(struct alx_priv *alx)
@@ -847,12 +1021,14 @@ static int __alx_open(struct alx_priv *alx, bool resume)
{
int err;
+ alx_init_intr(alx, msix);
+
if (!resume)
netif_carrier_off(alx->dev);
err = alx_alloc_rings(alx);
if (err)
- return err;
+ goto out_disable_adv_intr;
alx_configure(alx);
@@ -873,6 +1049,8 @@ static int __alx_open(struct alx_priv *alx, bool resume)
out_free_rings:
alx_free_rings(alx);
+out_disable_adv_intr:
+ alx_disable_advanced_intr(alx);
return err;
}
@@ -1236,7 +1414,10 @@ static void alx_poll_controller(struct net_device *netdev)
{
struct alx_priv *alx = netdev_priv(netdev);
- if (alx->msi)
+ if (alx->flags & ALX_FLAG_USING_MSIX) {
+ alx_intr_msix_misc(0, alx);
+ alx_intr_msix_ring(0, alx);
+ } else if (alx->flags & ALX_FLAG_USING_MSI)
alx_intr_msi(0, alx);
else
alx_intr_legacy(0, alx);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 74f0a37c4eb6..17aa33c5567d 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1486,7 +1486,7 @@ static int b44_open(struct net_device *dev)
b44_enable_ints(bp);
if (bp->flags & B44_FLAG_EXTERNAL_PHY)
- phy_start(bp->phydev);
+ phy_start(dev->phydev);
netif_start_queue(dev);
out:
@@ -1651,7 +1651,7 @@ static int b44_close(struct net_device *dev)
netif_stop_queue(dev);
if (bp->flags & B44_FLAG_EXTERNAL_PHY)
- phy_stop(bp->phydev);
+ phy_stop(dev->phydev);
napi_disable(&bp->napi);
@@ -1832,90 +1832,100 @@ static int b44_nway_reset(struct net_device *dev)
return r;
}
-static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int b44_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct b44 *bp = netdev_priv(dev);
+ u32 supported, advertising;
if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
- BUG_ON(!bp->phydev);
- return phy_ethtool_gset(bp->phydev, cmd);
+ BUG_ON(!dev->phydev);
+ return phy_ethtool_ksettings_get(dev->phydev, cmd);
}
- cmd->supported = (SUPPORTED_Autoneg);
- cmd->supported |= (SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_MII);
+ supported = (SUPPORTED_Autoneg);
+ supported |= (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_MII);
- cmd->advertising = 0;
+ advertising = 0;
if (bp->flags & B44_FLAG_ADV_10HALF)
- cmd->advertising |= ADVERTISED_10baseT_Half;
+ advertising |= ADVERTISED_10baseT_Half;
if (bp->flags & B44_FLAG_ADV_10FULL)
- cmd->advertising |= ADVERTISED_10baseT_Full;
+ advertising |= ADVERTISED_10baseT_Full;
if (bp->flags & B44_FLAG_ADV_100HALF)
- cmd->advertising |= ADVERTISED_100baseT_Half;
+ advertising |= ADVERTISED_100baseT_Half;
if (bp->flags & B44_FLAG_ADV_100FULL)
- cmd->advertising |= ADVERTISED_100baseT_Full;
- cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
- ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ?
- SPEED_100 : SPEED_10));
- cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
+ advertising |= ADVERTISED_100baseT_Full;
+ advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ cmd->base.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
+ SPEED_100 : SPEED_10;
+ cmd->base.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
DUPLEX_FULL : DUPLEX_HALF;
- cmd->port = 0;
- cmd->phy_address = bp->phy_addr;
- cmd->transceiver = (bp->flags & B44_FLAG_EXTERNAL_PHY) ?
- XCVR_EXTERNAL : XCVR_INTERNAL;
- cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
+ cmd->base.port = 0;
+ cmd->base.phy_address = bp->phy_addr;
+ cmd->base.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
AUTONEG_DISABLE : AUTONEG_ENABLE;
- if (cmd->autoneg == AUTONEG_ENABLE)
- cmd->advertising |= ADVERTISED_Autoneg;
+ if (cmd->base.autoneg == AUTONEG_ENABLE)
+ advertising |= ADVERTISED_Autoneg;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
if (!netif_running(dev)){
- ethtool_cmd_speed_set(cmd, 0);
- cmd->duplex = 0xff;
+ cmd->base.speed = 0;
+ cmd->base.duplex = 0xff;
}
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+
return 0;
}
-static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int b44_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct b44 *bp = netdev_priv(dev);
u32 speed;
int ret;
+ u32 advertising;
if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
- BUG_ON(!bp->phydev);
+ BUG_ON(!dev->phydev);
spin_lock_irq(&bp->lock);
if (netif_running(dev))
b44_setup_phy(bp);
- ret = phy_ethtool_sset(bp->phydev, cmd);
+ ret = phy_ethtool_ksettings_set(dev->phydev, cmd);
spin_unlock_irq(&bp->lock);
return ret;
}
- speed = ethtool_cmd_speed(cmd);
+ speed = cmd->base.speed;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
/* We do not support gigabit. */
- if (cmd->autoneg == AUTONEG_ENABLE) {
- if (cmd->advertising &
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ if (advertising &
(ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full))
return -EINVAL;
} else if ((speed != SPEED_100 &&
speed != SPEED_10) ||
- (cmd->duplex != DUPLEX_HALF &&
- cmd->duplex != DUPLEX_FULL)) {
+ (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL)) {
return -EINVAL;
}
spin_lock_irq(&bp->lock);
- if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
bp->flags &= ~(B44_FLAG_FORCE_LINK |
B44_FLAG_100_BASE_T |
B44_FLAG_FULL_DUPLEX |
@@ -1923,19 +1933,19 @@ static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
B44_FLAG_ADV_10FULL |
B44_FLAG_ADV_100HALF |
B44_FLAG_ADV_100FULL);
- if (cmd->advertising == 0) {
+ if (advertising == 0) {
bp->flags |= (B44_FLAG_ADV_10HALF |
B44_FLAG_ADV_10FULL |
B44_FLAG_ADV_100HALF |
B44_FLAG_ADV_100FULL);
} else {
- if (cmd->advertising & ADVERTISED_10baseT_Half)
+ if (advertising & ADVERTISED_10baseT_Half)
bp->flags |= B44_FLAG_ADV_10HALF;
- if (cmd->advertising & ADVERTISED_10baseT_Full)
+ if (advertising & ADVERTISED_10baseT_Full)
bp->flags |= B44_FLAG_ADV_10FULL;
- if (cmd->advertising & ADVERTISED_100baseT_Half)
+ if (advertising & ADVERTISED_100baseT_Half)
bp->flags |= B44_FLAG_ADV_100HALF;
- if (cmd->advertising & ADVERTISED_100baseT_Full)
+ if (advertising & ADVERTISED_100baseT_Full)
bp->flags |= B44_FLAG_ADV_100FULL;
}
} else {
@@ -1943,7 +1953,7 @@ static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
if (speed == SPEED_100)
bp->flags |= B44_FLAG_100_BASE_T;
- if (cmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
bp->flags |= B44_FLAG_FULL_DUPLEX;
}
@@ -2110,8 +2120,6 @@ static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static const struct ethtool_ops b44_ethtool_ops = {
.get_drvinfo = b44_get_drvinfo,
- .get_settings = b44_get_settings,
- .set_settings = b44_set_settings,
.nway_reset = b44_nway_reset,
.get_link = ethtool_op_get_link,
.get_wol = b44_get_wol,
@@ -2125,6 +2133,8 @@ static const struct ethtool_ops b44_ethtool_ops = {
.get_strings = b44_get_strings,
.get_sset_count = b44_get_sset_count,
.get_ethtool_stats = b44_get_ethtool_stats,
+ .get_link_ksettings = b44_get_link_ksettings,
+ .set_link_ksettings = b44_set_link_ksettings,
};
static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -2137,8 +2147,8 @@ static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
spin_lock_irq(&bp->lock);
if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
- BUG_ON(!bp->phydev);
- err = phy_mii_ioctl(bp->phydev, ifr, cmd);
+ BUG_ON(!dev->phydev);
+ err = phy_mii_ioctl(dev->phydev, ifr, cmd);
} else {
err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
}
@@ -2206,7 +2216,7 @@ static const struct net_device_ops b44_netdev_ops = {
static void b44_adjust_link(struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phydev;
+ struct phy_device *phydev = dev->phydev;
bool status_changed = 0;
BUG_ON(!phydev);
@@ -2303,7 +2313,6 @@ static int b44_register_phy_one(struct b44 *bp)
SUPPORTED_MII);
phydev->advertising = phydev->supported;
- bp->phydev = phydev;
bp->old_link = 0;
bp->phy_addr = phydev->mdio.addr;
@@ -2323,9 +2332,10 @@ err_out:
static void b44_unregister_phy_one(struct b44 *bp)
{
+ struct net_device *dev = bp->dev;
struct mii_bus *mii_bus = bp->mii_bus;
- phy_disconnect(bp->phydev);
+ phy_disconnect(dev->phydev);
mdiobus_unregister(mii_bus);
mdiobus_free(mii_bus);
}
diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h
index 65d88d7c5581..89d2cf341163 100644
--- a/drivers/net/ethernet/broadcom/b44.h
+++ b/drivers/net/ethernet/broadcom/b44.h
@@ -404,7 +404,6 @@ struct b44 {
u32 tx_pending;
u8 phy_addr;
u8 force_copybreak;
- struct phy_device *phydev;
struct mii_bus *mii_bus;
int old_link;
struct mii_if_info mii_if;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 6c8bc5fadac7..ae364c74baf3 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -791,7 +791,7 @@ static void bcm_enet_adjust_phy_link(struct net_device *dev)
int status_changed;
priv = netdev_priv(dev);
- phydev = priv->phydev;
+ phydev = dev->phydev;
status_changed = 0;
if (priv->old_link != phydev->link) {
@@ -913,7 +913,6 @@ static int bcm_enet_open(struct net_device *dev)
priv->old_link = 0;
priv->old_duplex = -1;
priv->old_pause = -1;
- priv->phydev = phydev;
}
/* mask all interrupts and request them */
@@ -1085,7 +1084,7 @@ static int bcm_enet_open(struct net_device *dev)
ENETDMAC_IRMASK, priv->tx_chan);
if (priv->has_phy)
- phy_start(priv->phydev);
+ phy_start(phydev);
else
bcm_enet_adjust_link(dev);
@@ -1127,7 +1126,7 @@ out_freeirq:
free_irq(dev->irq, dev);
out_phy_disconnect:
- phy_disconnect(priv->phydev);
+ phy_disconnect(phydev);
return ret;
}
@@ -1190,7 +1189,7 @@ static int bcm_enet_stop(struct net_device *dev)
netif_stop_queue(dev);
napi_disable(&priv->napi);
if (priv->has_phy)
- phy_stop(priv->phydev);
+ phy_stop(dev->phydev);
del_timer_sync(&priv->rx_timeout);
/* mask all interrupts */
@@ -1234,10 +1233,8 @@ static int bcm_enet_stop(struct net_device *dev)
free_irq(dev->irq, dev);
/* release phy */
- if (priv->has_phy) {
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
- }
+ if (priv->has_phy)
+ phy_disconnect(dev->phydev);
return 0;
}
@@ -1437,64 +1434,68 @@ static int bcm_enet_nway_reset(struct net_device *dev)
priv = netdev_priv(dev);
if (priv->has_phy) {
- if (!priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
- return genphy_restart_aneg(priv->phydev);
+ return genphy_restart_aneg(dev->phydev);
}
return -EOPNOTSUPP;
}
-static int bcm_enet_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int bcm_enet_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct bcm_enet_priv *priv;
+ u32 supported, advertising;
priv = netdev_priv(dev);
- cmd->maxrxpkt = 0;
- cmd->maxtxpkt = 0;
-
if (priv->has_phy) {
- if (!priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
- return phy_ethtool_gset(priv->phydev, cmd);
+ return phy_ethtool_ksettings_get(dev->phydev, cmd);
} else {
- cmd->autoneg = 0;
- ethtool_cmd_speed_set(cmd, ((priv->force_speed_100)
- ? SPEED_100 : SPEED_10));
- cmd->duplex = (priv->force_duplex_full) ?
+ cmd->base.autoneg = 0;
+ cmd->base.speed = (priv->force_speed_100) ?
+ SPEED_100 : SPEED_10;
+ cmd->base.duplex = (priv->force_duplex_full) ?
DUPLEX_FULL : DUPLEX_HALF;
- cmd->supported = ADVERTISED_10baseT_Half |
+ supported = ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full;
- cmd->advertising = 0;
- cmd->port = PORT_MII;
- cmd->transceiver = XCVR_EXTERNAL;
+ advertising = 0;
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.supported, supported);
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.advertising, advertising);
+ cmd->base.port = PORT_MII;
}
return 0;
}
-static int bcm_enet_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int bcm_enet_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct bcm_enet_priv *priv;
priv = netdev_priv(dev);
if (priv->has_phy) {
- if (!priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
- return phy_ethtool_sset(priv->phydev, cmd);
+ return phy_ethtool_ksettings_set(dev->phydev, cmd);
} else {
- if (cmd->autoneg ||
- (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
- cmd->port != PORT_MII)
+ if (cmd->base.autoneg ||
+ (cmd->base.speed != SPEED_100 &&
+ cmd->base.speed != SPEED_10) ||
+ cmd->base.port != PORT_MII)
return -EINVAL;
- priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
- priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
+ priv->force_speed_100 =
+ (cmd->base.speed == SPEED_100) ? 1 : 0;
+ priv->force_duplex_full =
+ (cmd->base.duplex == DUPLEX_FULL) ? 1 : 0;
if (netif_running(dev))
bcm_enet_adjust_link(dev);
@@ -1588,14 +1589,14 @@ static const struct ethtool_ops bcm_enet_ethtool_ops = {
.get_sset_count = bcm_enet_get_sset_count,
.get_ethtool_stats = bcm_enet_get_ethtool_stats,
.nway_reset = bcm_enet_nway_reset,
- .get_settings = bcm_enet_get_settings,
- .set_settings = bcm_enet_set_settings,
.get_drvinfo = bcm_enet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = bcm_enet_get_ringparam,
.set_ringparam = bcm_enet_set_ringparam,
.get_pauseparam = bcm_enet_get_pauseparam,
.set_pauseparam = bcm_enet_set_pauseparam,
+ .get_link_ksettings = bcm_enet_get_link_ksettings,
+ .set_link_ksettings = bcm_enet_set_link_ksettings,
};
static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -1604,9 +1605,9 @@ static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
priv = netdev_priv(dev);
if (priv->has_phy) {
- if (!priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
- return phy_mii_ioctl(priv->phydev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
} else {
struct mii_if_info mii;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
index f55af4310085..0a1b7b2e55bd 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -290,7 +290,6 @@ struct bcm_enet_priv {
/* used when a phy is connected (phylib used) */
struct mii_bus *mii_bus;
- struct phy_device *phydev;
int old_link;
int old_duplex;
int old_pause;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 8fc3f3c137f8..27f11a5d5fe2 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -50,7 +50,7 @@
#include <linux/log2.h>
#include <linux/aer.h>
-#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
+#if IS_ENABLED(CONFIG_CNIC)
#define BCM_CNIC 1
#include "cnic_if.h"
#endif
@@ -6356,10 +6356,6 @@ bnx2_open(struct net_device *dev)
struct bnx2 *bp = netdev_priv(dev);
int rc;
- rc = bnx2_request_firmware(bp);
- if (rc < 0)
- goto out;
-
netif_carrier_off(dev);
bnx2_disable_int(bp);
@@ -6428,7 +6424,6 @@ open_err:
bnx2_free_irq(bp);
bnx2_free_mem(bp);
bnx2_del_napi(bp);
- bnx2_release_firmware(bp);
goto out;
}
@@ -8575,6 +8570,12 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
+ rc = bnx2_request_firmware(bp);
+ if (rc < 0)
+ goto error;
+
+
+ bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
@@ -8607,6 +8608,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
error:
+ bnx2_release_firmware(bp);
pci_iounmap(pdev, bp->regview);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 0e68fadecfdb..243cb9748d35 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -492,7 +492,8 @@ int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
int bnx2x_get_vf_config(struct net_device *dev, int vf,
struct ifla_vf_info *ivi);
int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
-int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
+int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto);
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 6f9104b5d599..dab61a81a3ba 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -772,6 +772,11 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff));
+ if (pci_channel_offline(bp->pdev)) {
+ BNX2X_ERR("Cannot dump MCP info while in PCI error\n");
+ return;
+ }
+
val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
@@ -9415,10 +9420,16 @@ unload_error:
/* Release IRQs */
bnx2x_free_irq(bp);
- /* Reset the chip */
- rc = bnx2x_reset_hw(bp, reset_code);
- if (rc)
- BNX2X_ERR("HW_RESET failed\n");
+ /* Reset the chip, unless PCI function is offline. If we reach this
+ * point following a PCI error handling, it means device is really
+ * in a bad state and we're about to remove it, so reset the chip
+ * is not a good idea.
+ */
+ if (!pci_channel_offline(bp->pdev)) {
+ rc = bnx2x_reset_hw(bp, reset_code);
+ if (rc)
+ BNX2X_ERR("HW_RESET failed\n");
+ }
/* Report UNLOAD_DONE to MCP */
bnx2x_send_unload_done(bp, keep_link);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 6c586b045d1d..3f77d0863543 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -2521,7 +2521,8 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
for_each_vf(bp, vfidx) {
bulletin = BP_VF_BULLETIN(bp, vfidx);
if (bulletin->valid_bitmap & (1 << VLAN_VALID))
- bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
+ bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0,
+ htons(ETH_P_8021Q));
}
}
@@ -2781,7 +2782,8 @@ static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
return 0;
}
-int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
+int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos,
+ __be16 vlan_proto)
{
struct pf_vf_bulletin_content *bulletin = NULL;
struct bnx2x *bp = netdev_priv(dev);
@@ -2796,6 +2798,9 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
return -EINVAL;
}
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
vfidx, vlan, 0);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 2cf79100c9cb..a9f9f3738022 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -32,6 +32,7 @@
#include <linux/mii.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
+#include <linux/rtc.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/udp.h>
@@ -93,50 +94,49 @@ enum board_idx {
BCM57404_NPAR,
BCM57406_NPAR,
BCM57407_SFP,
+ BCM57407_NPAR,
BCM57414_NPAR,
BCM57416_NPAR,
- BCM57304_VF,
- BCM57404_VF,
- BCM57414_VF,
- BCM57314_VF,
+ NETXTREME_E_VF,
+ NETXTREME_C_VF,
};
/* indexed by enum above */
static const struct {
char *name;
} board_info[] = {
- { "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" },
- { "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
+ { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
{ "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM58700 Nitro 4-port 1Gb/2.5Gb/10Gb Ethernet" },
- { "Broadcom BCM57311 NetXtreme-C Single-port 10Gb Ethernet" },
- { "Broadcom BCM57312 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
- { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+ { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
+ { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
+ { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
+ { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
{ "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57407 NetXtreme-E Dual-port 10GBase-T Ethernet" },
- { "Broadcom BCM57412 NetXtreme-E Dual-port 10Gb Ethernet" },
- { "Broadcom BCM57414 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57416 NetXtreme-E Dual-port 10GBase-T Ethernet" },
- { "Broadcom BCM57417 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+ { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
+ { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
+ { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
+ { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
{ "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57314 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
- { "Broadcom BCM57417 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57416 NetXtreme-E Dual-port 10Gb Ethernet" },
+ { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
{ "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57407 NetXtreme-E Dual-port 25Gb Ethernet" },
+ { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
+ { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
- { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
- { "Broadcom BCM57414 NetXtreme-E Ethernet Virtual Function" },
- { "Broadcom BCM57314 NetXtreme-E Ethernet Virtual Function" },
+ { "Broadcom NetXtreme-E Ethernet Virtual Function" },
+ { "Broadcom NetXtreme-C Ethernet Virtual Function" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
+ { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
@@ -160,13 +160,19 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
+ { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
#ifdef CONFIG_BNXT_SRIOV
- { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
- { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
- { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = BCM57414_VF },
- { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = BCM57314_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
#endif
{ 0 }
};
@@ -189,8 +195,7 @@ static const u16 bnxt_async_events_arr[] = {
static bool bnxt_vf_pciid(enum board_idx idx)
{
- return (idx == BCM57304_VF || idx == BCM57404_VF ||
- idx == BCM57314_VF || idx == BCM57414_VF);
+ return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -353,8 +358,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
push_len = (length + sizeof(*tx_push) + 7) / 8;
if (push_len > 16) {
__iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
- __iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
- push_len - 16);
+ __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
+ (push_len - 16) << 1);
} else {
__iowrite64_copy(txr->tx_doorbell, tx_push_buf,
push_len);
@@ -3419,10 +3424,10 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
if (set_rss) {
- vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 |
- BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 |
- BNXT_RSS_HASH_TYPE_FLAG_IPV6 |
- BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6;
+ vnic->hash_type = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
req.hash_type = cpu_to_le32(vnic->hash_type);
@@ -4156,6 +4161,11 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (rc)
goto hwrm_func_qcaps_exit;
+ bp->tx_push_thresh = 0;
+ if (resp->flags &
+ cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
+ bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
+
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
@@ -4187,12 +4197,6 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
struct bnxt_vf_info *vf = &bp->vf;
vf->fw_fid = le16_to_cpu(resp->fid);
- memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
- if (is_valid_ether_addr(vf->mac_addr))
- /* overwrite netdev dev_adr with admin VF MAC */
- memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
- else
- random_ether_addr(bp->dev->dev_addr);
vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
@@ -4204,14 +4208,21 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
vf->max_vnics = le16_to_cpu(resp->max_vnics);
vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
+ memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (is_valid_ether_addr(vf->mac_addr)) {
+ /* overwrite netdev dev_adr with admin VF MAC */
+ memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+ } else {
+ random_ether_addr(bp->dev->dev_addr);
+ rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
+ }
+ return rc;
#endif
}
- bp->tx_push_thresh = 0;
- if (resp->flags &
- cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
- bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
-
hwrm_func_qcaps_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -4249,6 +4260,9 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
if (bp->max_tc > BNXT_MAX_QUEUE)
bp->max_tc = BNXT_MAX_QUEUE;
+ if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
+ bp->max_tc = 1;
+
qptr = &resp->queue_id0;
for (i = 0; i < bp->max_tc; i++) {
bp->q_info[i].queue_id = *qptr++;
@@ -4307,6 +4321,31 @@ hwrm_ver_get_exit:
return rc;
}
+int bnxt_hwrm_fw_set_time(struct bnxt *bp)
+{
+#if IS_ENABLED(CONFIG_RTC_LIB)
+ struct hwrm_fw_set_time_input req = {0};
+ struct rtc_time tm;
+ struct timeval tv;
+
+ if (bp->hwrm_spec_code < 0x10400)
+ return -EOPNOTSUPP;
+
+ do_gettimeofday(&tv);
+ rtc_time_to_tm(tv.tv_sec, &tm);
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
+ req.year = cpu_to_le16(1900 + tm.tm_year);
+ req.month = 1 + tm.tm_mon;
+ req.day = tm.tm_mday;
+ req.hour = tm.tm_hour;
+ req.minute = tm.tm_min;
+ req.second = tm.tm_sec;
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
static int bnxt_hwrm_port_qstats(struct bnxt *bp)
{
int rc;
@@ -6804,6 +6843,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err;
+ bnxt_hwrm_fw_set_time(bp);
+
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 23e04a6142fb..51b164a0e844 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -11,10 +11,10 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.3.0"
+#define DRV_MODULE_VERSION "1.5.0"
#define DRV_VER_MAJ 1
-#define DRV_VER_MIN 3
+#define DRV_VER_MIN 5
#define DRV_VER_UPD 0
struct tx_bd {
@@ -106,11 +106,11 @@ struct tx_cmp {
#define CMP_TYPE_REMOTE_DRIVER_REQ 34
#define CMP_TYPE_REMOTE_DRIVER_RESP 36
#define CMP_TYPE_ERROR_STATUS 48
- #define CMPL_BASE_TYPE_STAT_EJECT (0x1aUL << 0)
- #define CMPL_BASE_TYPE_HWRM_DONE (0x20UL << 0)
- #define CMPL_BASE_TYPE_HWRM_FWD_REQ (0x22UL << 0)
- #define CMPL_BASE_TYPE_HWRM_FWD_RESP (0x24UL << 0)
- #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define CMPL_BASE_TYPE_STAT_EJECT 0x1aUL
+ #define CMPL_BASE_TYPE_HWRM_DONE 0x20UL
+ #define CMPL_BASE_TYPE_HWRM_FWD_REQ 0x22UL
+ #define CMPL_BASE_TYPE_HWRM_FWD_RESP 0x24UL
+ #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
#define TX_CMP_FLAGS_ERROR (1 << 6)
#define TX_CMP_FLAGS_PUSH (1 << 7)
@@ -389,11 +389,6 @@ struct rx_tpa_end_cmp_ext {
#define INVALID_HW_RING_ID ((u16)-1)
-#define BNXT_RSS_HASH_TYPE_FLAG_IPV4 0x01
-#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 0x02
-#define BNXT_RSS_HASH_TYPE_FLAG_IPV6 0x04
-#define BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6 0x08
-
/* The hardware supports certain page sizes. Use the supported page sizes
* to allocate the rings.
*/
@@ -418,7 +413,7 @@ struct rx_tpa_end_cmp_ext {
#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
-#define BNXT_MIN_PKT_SIZE 45
+#define BNXT_MIN_PKT_SIZE 52
#define BNXT_NUM_TESTS(bp) 0
@@ -1225,6 +1220,7 @@ int bnxt_hwrm_set_coal(struct bnxt *);
int bnxt_hwrm_func_qcaps(struct bnxt *);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
+int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_close_nic(struct bnxt *, bool, bool);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index b83e17403d6c..a7e04ff4eaed 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -21,6 +21,8 @@
#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
+#define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
+#define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen);
@@ -346,7 +348,7 @@ static void bnxt_get_channels(struct net_device *dev,
int max_rx_rings, max_tx_rings, tcs;
bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
- channel->max_combined = max_rx_rings;
+ channel->max_combined = max_t(int, max_rx_rings, max_tx_rings);
if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
max_rx_rings = 0;
@@ -404,8 +406,8 @@ static int bnxt_set_channels(struct net_device *dev,
if (tcs > 1)
max_tx_rings /= tcs;
- if (sh && (channel->combined_count > max_rx_rings ||
- channel->combined_count > max_tx_rings))
+ if (sh &&
+ channel->combined_count > max_t(int, max_rx_rings, max_tx_rings))
return -ENOMEM;
if (!sh && (channel->rx_count > max_rx_rings ||
@@ -428,8 +430,10 @@ static int bnxt_set_channels(struct net_device *dev,
if (sh) {
bp->flags |= BNXT_FLAG_SHARED_RINGS;
- bp->rx_nr_rings = channel->combined_count;
- bp->tx_nr_rings_per_tc = channel->combined_count;
+ bp->rx_nr_rings = min_t(int, channel->combined_count,
+ max_rx_rings);
+ bp->tx_nr_rings_per_tc = min_t(int, channel->combined_count,
+ max_tx_rings);
} else {
bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
bp->rx_nr_rings = channel->rx_count;
@@ -1028,6 +1032,10 @@ static u32 bnxt_get_link(struct net_device *dev)
return bp->link_info.link_up;
}
+static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
+
static int bnxt_flash_nvram(struct net_device *dev,
u16 dir_type,
u16 dir_ordinal,
@@ -1179,7 +1187,6 @@ static int bnxt_flash_firmware(struct net_device *dev,
(unsigned long)calculated_crc);
return -EINVAL;
}
- /* TODO: Validate digital signature (RSA-encrypted SHA-256 hash) here */
rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
0, 0, fw_data, fw_size);
if (rc == 0) /* Firmware update successful */
@@ -1188,6 +1195,57 @@ static int bnxt_flash_firmware(struct net_device *dev,
return rc;
}
+static int bnxt_flash_microcode(struct net_device *dev,
+ u16 dir_type,
+ const u8 *fw_data,
+ size_t fw_size)
+{
+ struct bnxt_ucode_trailer *trailer;
+ u32 calculated_crc;
+ u32 stored_crc;
+ int rc = 0;
+
+ if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
+ netdev_err(dev, "Invalid microcode file size: %u\n",
+ (unsigned int)fw_size);
+ return -EINVAL;
+ }
+ trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
+ sizeof(*trailer)));
+ if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
+ netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
+ le32_to_cpu(trailer->sig));
+ return -EINVAL;
+ }
+ if (le16_to_cpu(trailer->dir_type) != dir_type) {
+ netdev_err(dev, "Expected microcode type: %d, read: %d\n",
+ dir_type, le16_to_cpu(trailer->dir_type));
+ return -EINVAL;
+ }
+ if (le16_to_cpu(trailer->trailer_length) <
+ sizeof(struct bnxt_ucode_trailer)) {
+ netdev_err(dev, "Invalid microcode trailer length: %d\n",
+ le16_to_cpu(trailer->trailer_length));
+ return -EINVAL;
+ }
+
+ /* Confirm the CRC32 checksum of the file: */
+ stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
+ sizeof(stored_crc)));
+ calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
+ if (calculated_crc != stored_crc) {
+ netdev_err(dev,
+ "CRC32 (%08lX) does not match calculated: %08lX\n",
+ (unsigned long)stored_crc,
+ (unsigned long)calculated_crc);
+ return -EINVAL;
+ }
+ rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
+ 0, 0, fw_data, fw_size);
+
+ return rc;
+}
+
static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
{
switch (dir_type) {
@@ -1206,7 +1264,7 @@ static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
return false;
}
-static bool bnxt_dir_type_is_unprotected_exec_format(u16 dir_type)
+static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
{
switch (dir_type) {
case BNX_DIR_TYPE_AVS:
@@ -1227,7 +1285,7 @@ static bool bnxt_dir_type_is_unprotected_exec_format(u16 dir_type)
static bool bnxt_dir_type_is_executable(u16 dir_type)
{
return bnxt_dir_type_is_ape_bin_format(dir_type) ||
- bnxt_dir_type_is_unprotected_exec_format(dir_type);
+ bnxt_dir_type_is_other_exec_format(dir_type);
}
static int bnxt_flash_firmware_from_file(struct net_device *dev,
@@ -1237,10 +1295,6 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
const struct firmware *fw;
int rc;
- if (dir_type != BNX_DIR_TYPE_UPDATE &&
- bnxt_dir_type_is_executable(dir_type) == false)
- return -EINVAL;
-
rc = request_firmware(&fw, filename, &dev->dev);
if (rc != 0) {
netdev_err(dev, "Error %d requesting firmware file: %s\n",
@@ -1249,6 +1303,8 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
}
if (bnxt_dir_type_is_ape_bin_format(dir_type) == true)
rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
+ else if (bnxt_dir_type_is_other_exec_format(dir_type) == true)
+ rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
else
rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
0, 0, fw->data, fw->size);
@@ -1257,10 +1313,83 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
}
static int bnxt_flash_package_from_file(struct net_device *dev,
- char *filename)
+ char *filename, u32 install_type)
{
- netdev_err(dev, "packages are not yet supported\n");
- return -EINVAL;
+ struct bnxt *bp = netdev_priv(dev);
+ struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_nvm_install_update_input install = {0};
+ const struct firmware *fw;
+ u32 item_len;
+ u16 index;
+ int rc;
+
+ bnxt_hwrm_fw_set_time(bp);
+
+ if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+ &index, &item_len, NULL) != 0) {
+ netdev_err(dev, "PKG update area not created in nvram\n");
+ return -ENOBUFS;
+ }
+
+ rc = request_firmware(&fw, filename, &dev->dev);
+ if (rc != 0) {
+ netdev_err(dev, "PKG error %d requesting file: %s\n",
+ rc, filename);
+ return rc;
+ }
+
+ if (fw->size > item_len) {
+ netdev_err(dev, "PKG insufficient update area in nvram: %lu",
+ (unsigned long)fw->size);
+ rc = -EFBIG;
+ } else {
+ dma_addr_t dma_handle;
+ u8 *kmem;
+ struct hwrm_nvm_modify_input modify = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1);
+
+ modify.dir_idx = cpu_to_le16(index);
+ modify.len = cpu_to_le32(fw->size);
+
+ kmem = dma_alloc_coherent(&bp->pdev->dev, fw->size,
+ &dma_handle, GFP_KERNEL);
+ if (!kmem) {
+ netdev_err(dev,
+ "dma_alloc_coherent failure, length = %u\n",
+ (unsigned int)fw->size);
+ rc = -ENOMEM;
+ } else {
+ memcpy(kmem, fw->data, fw->size);
+ modify.host_src_addr = cpu_to_le64(dma_handle);
+
+ rc = hwrm_send_message(bp, &modify, sizeof(modify),
+ FLASH_PACKAGE_TIMEOUT);
+ dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
+ dma_handle);
+ }
+ }
+ release_firmware(fw);
+ if (rc)
+ return rc;
+
+ if ((install_type & 0xffff) == 0)
+ install_type >>= 16;
+ bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1);
+ install.install_type = cpu_to_le32(install_type);
+
+ rc = hwrm_send_message(bp, &install, sizeof(install),
+ INSTALL_PACKAGE_TIMEOUT);
+ if (rc)
+ return -EOPNOTSUPP;
+
+ if (resp->result) {
+ netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
+ (s8)resp->result, (int)resp->problem_item);
+ return -ENOPKG;
+ }
+ return 0;
}
static int bnxt_flash_device(struct net_device *dev,
@@ -1271,8 +1400,10 @@ static int bnxt_flash_device(struct net_device *dev,
return -EINVAL;
}
- if (flash->region == ETHTOOL_FLASH_ALL_REGIONS)
- return bnxt_flash_package_from_file(dev, flash->data);
+ if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
+ flash->region > 0xffff)
+ return bnxt_flash_package_from_file(dev, flash->data,
+ flash->region);
return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
}
@@ -1516,7 +1647,7 @@ static int bnxt_set_eeprom(struct net_device *dev,
/* Create or re-write an NVM item: */
if (bnxt_dir_type_is_executable(type) == true)
- return -EINVAL;
+ return -EOPNOTSUPP;
ext = eeprom->magic & 0xffff;
ordinal = eeprom->offset >> 16;
attr = eeprom->offset & 0xffff;
@@ -1718,6 +1849,25 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
return rc;
}
+static int bnxt_nway_reset(struct net_device *dev)
+{
+ int rc = 0;
+
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ if (!BNXT_SINGLE_PF(bp))
+ return -EOPNOTSUPP;
+
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
+ return -EINVAL;
+
+ if (netif_running(dev))
+ rc = bnxt_hwrm_set_link_setting(bp, true, false);
+
+ return rc;
+}
+
const struct ethtool_ops bnxt_ethtool_ops = {
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
@@ -1750,4 +1900,5 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.set_eee = bnxt_set_eee,
.get_module_info = bnxt_get_module_info,
.get_module_eeprom = bnxt_get_module_eeprom,
+ .nway_reset = bnxt_nway_reset
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
index 82bf44ab811b..cad30ddc6936 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
@@ -11,6 +11,7 @@
#define __BNXT_FW_HDR_H__
#define BNXT_FIRMWARE_BIN_SIGNATURE 0x1a4d4342 /* "BCM"+0x1a */
+#define BNXT_UCODE_TRAILER_SIGNATURE 0x726c7254 /* "Trlr" */
enum SUPPORTED_FAMILY {
DEVICE_5702_3_4_FAMILY, /* 0 - Denali, Vinson, K2 */
@@ -85,7 +86,7 @@ enum SUPPORTED_MEDIA {
struct bnxt_fw_header {
__le32 signature; /* constains the constant value of
- * BNXT_Firmware_Bin_Signatures
+ * BNXT_FIRMWARE_BIN_SIGNATURE
*/
u8 flags; /* reserved for ChiMP use */
u8 code_type; /* enum SUPPORTED_CODE */
@@ -102,4 +103,17 @@ struct bnxt_fw_header {
u8 major_ver;
};
+/* Microcode and pre-boot software/firmware trailer: */
+struct bnxt_ucode_trailer {
+ u8 rsa_sig[256];
+ __le16 flags;
+ u8 version_format;
+ u8 version_length;
+ u8 version[16];
+ __le16 dir_type;
+ __le16 trailer_length;
+ __le32 sig; /* BNXT_UCODE_TRAILER_SIGNATURE */
+ __le32 chksum; /* CRC-32 */
+};
+
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 517567f6d651..04a96cc3498a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -39,7 +39,7 @@ struct eject_cmpl {
__le16 type;
#define EJECT_CMPL_TYPE_MASK 0x3fUL
#define EJECT_CMPL_TYPE_SFT 0
- #define EJECT_CMPL_TYPE_STAT_EJECT (0x1aUL << 0)
+ #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
__le16 len;
__le32 opaque;
__le32 v;
@@ -52,7 +52,7 @@ struct hwrm_cmpl {
__le16 type;
#define HWRM_CMPL_TYPE_MASK 0x3fUL
#define HWRM_CMPL_TYPE_SFT 0
- #define HWRM_CMPL_TYPE_HWRM_DONE (0x20UL << 0)
+ #define HWRM_CMPL_TYPE_HWRM_DONE 0x20UL
__le16 sequence_id;
__le32 unused_1;
__le32 v;
@@ -65,7 +65,7 @@ struct hwrm_fwd_req_cmpl {
__le16 req_len_type;
#define HWRM_FWD_REQ_CMPL_TYPE_MASK 0x3fUL
#define HWRM_FWD_REQ_CMPL_TYPE_SFT 0
- #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ (0x22UL << 0)
+ #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
#define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
#define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6
__le16 source_id;
@@ -81,7 +81,7 @@ struct hwrm_fwd_resp_cmpl {
__le16 type;
#define HWRM_FWD_RESP_CMPL_TYPE_MASK 0x3fUL
#define HWRM_FWD_RESP_CMPL_TYPE_SFT 0
- #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP (0x24UL << 0)
+ #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
__le16 source_id;
__le16 resp_len;
__le16 unused_1;
@@ -96,25 +96,26 @@ struct hwrm_async_event_cmpl {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE (0x7UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD (0x21UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (0x30UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE (0x33UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR (0xffUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_V 0x1UL
@@ -130,9 +131,9 @@ struct hwrm_async_event_cmpl_link_status_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE (0x0UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
@@ -156,9 +157,9 @@ struct hwrm_async_event_cmpl_link_mtu_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE (0x1UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE 0x1UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL
@@ -176,9 +177,9 @@ struct hwrm_async_event_cmpl_link_speed_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE (0x2UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL
@@ -200,8 +201,7 @@ struct hwrm_async_event_cmpl_link_speed_change {
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB (0xffffUL << 1)
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
};
@@ -211,9 +211,9 @@ struct hwrm_async_event_cmpl_dcb_config_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL
@@ -231,9 +231,9 @@ struct hwrm_async_event_cmpl_port_conn_not_allowed {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
@@ -258,9 +258,9 @@ struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL
@@ -278,9 +278,9 @@ struct hwrm_async_event_cmpl_link_speed_cfg_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
@@ -300,9 +300,9 @@ struct hwrm_async_event_cmpl_func_drvr_unload {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL
@@ -320,9 +320,9 @@ struct hwrm_async_event_cmpl_func_drvr_load {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL
@@ -340,9 +340,9 @@ struct hwrm_async_event_cmpl_pf_drvr_unload {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
@@ -362,9 +362,9 @@ struct hwrm_async_event_cmpl_pf_drvr_load {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD (0x21UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD 0x21UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL
@@ -384,9 +384,9 @@ struct hwrm_async_event_cmpl_vf_flr {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR (0x30UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR 0x30UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL
@@ -404,9 +404,9 @@ struct hwrm_async_event_cmpl_vf_mac_addr_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL
@@ -424,9 +424,9 @@ struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL
@@ -443,9 +443,9 @@ struct hwrm_async_event_cmpl_vf_cfg_change {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE (0x33UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
__le32 event_data2;
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
@@ -465,15 +465,15 @@ struct hwrm_async_event_cmpl_hwrm_error {
__le16 type;
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
__le16 event_id;
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR (0xffUL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
__le32 event_data2;
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING (0x0UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL (0x1UL << 0)
- #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL (0x2UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
@@ -485,12 +485,12 @@ struct hwrm_async_event_cmpl_hwrm_error {
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
};
-/* HW Resource Manager Specification 1.3.0 */
+/* HW Resource Manager Specification 1.5.1 */
#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 3
-#define HWRM_VERSION_UPDATE 0
+#define HWRM_VERSION_MINOR 5
+#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_STR "1.3.0"
+#define HWRM_VERSION_STR "1.5.1"
/*
* Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed.
@@ -556,8 +556,8 @@ struct cmd_nums {
#define HWRM_QUEUE_QPORTCFG (0x30UL)
#define HWRM_QUEUE_QCFG (0x31UL)
#define HWRM_QUEUE_CFG (0x32UL)
- #define HWRM_QUEUE_BUFFERS_QCFG (0x33UL)
- #define HWRM_QUEUE_BUFFERS_CFG (0x34UL)
+ #define RESERVED2 (0x33UL)
+ #define RESERVED3 (0x34UL)
#define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL)
#define HWRM_QUEUE_PFCENABLE_CFG (0x36UL)
#define HWRM_QUEUE_PRI2COS_QCFG (0x37UL)
@@ -574,6 +574,7 @@ struct cmd_nums {
#define HWRM_VNIC_RSS_QCFG (0x47UL)
#define HWRM_VNIC_PLCMODES_CFG (0x48UL)
#define HWRM_VNIC_PLCMODES_QCFG (0x49UL)
+ #define HWRM_VNIC_QCAPS (0x4aUL)
#define HWRM_RING_ALLOC (0x50UL)
#define HWRM_RING_FREE (0x51UL)
#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (0x52UL)
@@ -581,13 +582,15 @@ struct cmd_nums {
#define HWRM_RING_RESET (0x5eUL)
#define HWRM_RING_GRP_ALLOC (0x60UL)
#define HWRM_RING_GRP_FREE (0x61UL)
+ #define RESERVED5 (0x64UL)
+ #define RESERVED6 (0x65UL)
#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (0x70UL)
#define HWRM_VNIC_RSS_COS_LB_CTX_FREE (0x71UL)
#define HWRM_CFA_L2_FILTER_ALLOC (0x90UL)
#define HWRM_CFA_L2_FILTER_FREE (0x91UL)
#define HWRM_CFA_L2_FILTER_CFG (0x92UL)
#define HWRM_CFA_L2_SET_RX_MASK (0x93UL)
- #define RESERVED3 (0x94UL)
+ #define RESERVED4 (0x94UL)
#define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL)
#define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL)
#define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL)
@@ -607,6 +610,8 @@ struct cmd_nums {
#define HWRM_STAT_CTX_CLR_STATS (0xb3UL)
#define HWRM_FW_RESET (0xc0UL)
#define HWRM_FW_QSTATUS (0xc1UL)
+ #define HWRM_FW_SET_TIME (0xc8UL)
+ #define HWRM_FW_GET_TIME (0xc9UL)
#define HWRM_EXEC_FWD_RESP (0xd0UL)
#define HWRM_REJECT_FWD_RESP (0xd1UL)
#define HWRM_FWD_RESP (0xd2UL)
@@ -615,11 +620,13 @@ struct cmd_nums {
#define HWRM_WOL_FILTER_ALLOC (0xf0UL)
#define HWRM_WOL_FILTER_FREE (0xf1UL)
#define HWRM_WOL_FILTER_QCFG (0xf2UL)
+ #define HWRM_WOL_REASON_QCFG (0xf3UL)
#define HWRM_DBG_READ_DIRECT (0xff10UL)
#define HWRM_DBG_READ_INDIRECT (0xff11UL)
#define HWRM_DBG_WRITE_DIRECT (0xff12UL)
#define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
#define HWRM_DBG_DUMP (0xff14UL)
+ #define HWRM_NVM_INSTALL_UPDATE (0xfff3UL)
#define HWRM_NVM_MODIFY (0xfff4UL)
#define HWRM_NVM_VERIFY_UPDATE (0xfff5UL)
#define HWRM_NVM_GET_DEV_INFO (0xfff6UL)
@@ -824,7 +831,9 @@ struct hwrm_ver_get_output {
u8 netctrl_fw_min;
u8 netctrl_fw_bld;
u8 netctrl_fw_rsvd;
- __le32 reserved1;
+ __le32 dev_caps_cfg;
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
u8 roce_fw_maj;
u8 roce_fw_min;
u8 roce_fw_bld;
@@ -839,9 +848,9 @@ struct hwrm_ver_get_output {
u8 chip_metal;
u8 chip_bond_id;
u8 chip_platform_type;
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC (0x0UL << 0)
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA (0x1UL << 0)
- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM (0x2UL << 0)
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
+ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
__le16 max_req_win_len;
__le16 max_resp_len;
__le16 def_req_timeout;
@@ -863,10 +872,10 @@ struct hwrm_func_reset_input {
#define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
__le16 vf_id;
u8 func_reset_level;
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL (0x0UL << 0)
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME (0x1UL << 0)
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN (0x2UL << 0)
- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF (0x3UL << 0)
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
+ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
u8 unused_0;
};
@@ -1028,6 +1037,10 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
#define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
#define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -1047,9 +1060,8 @@ struct hwrm_func_qcaps_output {
__le32 max_mcast_filters;
__le32 max_flow_id;
__le32 max_hw_ring_grps;
+ __le16 max_sp_tx_rings;
u8 unused_0;
- u8 unused_1;
- u8 unused_2;
u8 valid;
};
@@ -1077,6 +1089,7 @@ struct hwrm_func_qcfg_output {
__le16 flags;
#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
#define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1089,29 +1102,46 @@ struct hwrm_func_qcfg_output {
__le16 mru;
__le16 stat_ctx_id;
u8 port_partition_type;
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF (0x0UL << 0)
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS (0x1UL << 0)
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 (0x2UL << 0)
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 (0x3UL << 0)
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 (0x4UL << 0)
- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN (0xffUL << 0)
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
u8 unused_0;
__le16 dflt_vnic_id;
u8 unused_1;
u8 unused_2;
__le32 min_bw;
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MIN_BW_RSVD 0x10000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 max_bw;
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_QCFG_RESP_MAX_BW_RSVD 0x10000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
u8 evb_mode;
- #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB (0x0UL << 0)
- #define FUNC_QCFG_RESP_EVB_MODE_VEB (0x1UL << 0)
- #define FUNC_QCFG_RESP_EVB_MODE_VEPA (0x2UL << 0)
+ #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
+ #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
u8 unused_3;
- __le16 unused_4;
+ __le16 alloc_vfs;
__le32 alloc_mcast_filters;
__le32 alloc_hw_ring_grps;
- u8 unused_5;
- u8 unused_6;
- u8 unused_7;
+ __le16 alloc_sp_tx_rings;
+ u8 unused_4;
u8 valid;
};
@@ -1171,18 +1201,36 @@ struct hwrm_func_cfg_input {
__le16 dflt_vlan;
__be32 dflt_ip_addr[4];
__le32 min_bw;
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MIN_BW_RSVD 0x10000000UL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 max_bw;
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define FUNC_CFG_REQ_MAX_BW_RSVD 0x10000000UL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
__le16 async_event_cr;
u8 vlan_antispoof_mode;
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK (0x0UL << 0)
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN (0x1UL << 0)
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE (0x2UL << 0)
- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN (0x3UL << 0)
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
u8 allowed_vlan_pris;
u8 evb_mode;
- #define FUNC_CFG_REQ_EVB_MODE_NO_EVB (0x0UL << 0)
- #define FUNC_CFG_REQ_EVB_MODE_VEB (0x1UL << 0)
- #define FUNC_CFG_REQ_EVB_MODE_VEPA (0x2UL << 0)
+ #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
+ #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
u8 unused_2;
__le16 num_mcast_filters;
};
@@ -1341,16 +1389,16 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
#define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
__le16 os_type;
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN (0x0UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER (0x1UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS (0xeUL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS (0x12UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS (0x1dUL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX (0x24UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD (0x2aUL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI (0x68UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 (0x73UL << 0)
- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 (0x74UL << 0)
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
u8 ver_maj;
u8 ver_min;
u8 ver_upd;
@@ -1415,13 +1463,13 @@ struct hwrm_func_buf_rgtr_input {
__le16 vf_id;
__le16 req_buf_num_pages;
__le16 req_buf_page_size;
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B (0x4UL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K (0xcUL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K (0xdUL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K (0x10UL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x15UL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x16UL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G (0x1eUL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
__le16 req_buf_len;
__le16 resp_buf_len;
u8 unused_0;
@@ -1473,16 +1521,16 @@ struct hwrm_func_drv_qver_output {
__le16 seq_id;
__le16 resp_len;
__le16 os_type;
- #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN (0x0UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER (0x1UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS (0xeUL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS (0x12UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS (0x1dUL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX (0x24UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD (0x2aUL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI (0x68UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 (0x73UL << 0)
- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 (0x74UL << 0)
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
u8 ver_maj;
u8 ver_min;
u8 ver_upd;
@@ -1528,44 +1576,44 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
__le16 port_id;
__le16 force_link_speed;
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB (0x1UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB (0xaUL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB (0x14UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB (0x19UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB (0x64UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB (0xc8UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB (0x190UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB (0x3e8UL << 0)
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB (0xffffUL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
u8 auto_mode;
- #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE (0x0UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED (0x2UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK (0x4UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
u8 auto_duplex;
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF (0x0UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL (0x1UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH (0x2UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
u8 auto_pause;
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
u8 unused_0;
__le16 auto_link_speed;
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB (0x1UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB (0xaUL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB (0x14UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB (0x19UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB (0x64UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB (0xc8UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB (0x190UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB (0x3e8UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB (0xffffUL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
__le16 auto_link_speed_mask;
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
@@ -1582,12 +1630,12 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
u8 wirespeed;
- #define PORT_PHY_CFG_REQ_WIRESPEED_OFF (0x0UL << 0)
- #define PORT_PHY_CFG_REQ_WIRESPEED_ON (0x1UL << 0)
+ #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
u8 lpbk;
- #define PORT_PHY_CFG_REQ_LPBK_NONE (0x0UL << 0)
- #define PORT_PHY_CFG_REQ_LPBK_LOCAL (0x1UL << 0)
- #define PORT_PHY_CFG_REQ_LPBK_REMOTE (0x2UL << 0)
+ #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
u8 force_pause;
#define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
#define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
@@ -1641,25 +1689,25 @@ struct hwrm_port_phy_qcfg_output {
__le16 seq_id;
__le16 resp_len;
u8 link;
- #define PORT_PHY_QCFG_RESP_LINK_NO_LINK (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SIGNAL (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_LINK (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
u8 unused_0;
__le16 link_speed;
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB (0xaUL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB (0x14UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB (0x19UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB (0x64UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB (0xc8UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB (0xfaUL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB (0x190UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB (0x1f4UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB (0x3e8UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB (0xffffUL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
u8 duplex;
- #define PORT_PHY_QCFG_RESP_DUPLEX_HALF (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_DUPLEX_FULL (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_DUPLEX_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_FULL 0x1UL
u8 pause;
#define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
@@ -1679,39 +1727,39 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
__le16 force_link_speed;
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB (0xaUL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB (0x14UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB (0x19UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB (0x64UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB (0xc8UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB (0x190UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB (0x3e8UL << 0)
- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB (0xffffUL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
u8 auto_mode;
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
u8 auto_pause;
#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
__le16 auto_link_speed;
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB (0xaUL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB (0x14UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB (0x19UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB (0x64UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB (0xc8UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB (0x190UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB (0x3e8UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB (0xffffUL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
__le16 auto_link_speed_mask;
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
@@ -1728,46 +1776,46 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
u8 wirespeed;
- #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_WIRESPEED_ON (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
+ #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
u8 lpbk;
- #define PORT_PHY_QCFG_RESP_LPBK_NONE (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_LPBK_LOCAL (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_LPBK_REMOTE (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
u8 force_pause;
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
u8 module_status;
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED (0x4UL << 0)
- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE (0xffUL << 0)
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
__le32 preemphasis;
u8 phy_maj;
u8 phy_min;
u8 phy_bld;
u8 phy_type;
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR (0x4UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 (0x5UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX (0x6UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR (0x7UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET (0x8UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE (0x9UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY (0xaUL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
u8 media_type;
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE (0x3UL << 0)
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
u8 xcvr_pkg_type;
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
u8 eee_config_phy_addr;
#define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
#define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
@@ -1796,11 +1844,11 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
u8 link_partner_adv_auto_mode;
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE (0x0UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
u8 link_partner_adv_pause;
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
@@ -1859,7 +1907,7 @@ struct hwrm_port_mac_cfg_input {
__le64 resp_addr;
__le32 flags;
#define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
- #define PORT_MAC_CFG_REQ_FLAGS_COS_ASSIGNMENT_ENABLE 0x2UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
#define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
@@ -1868,28 +1916,50 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
#define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
#define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
__le32 enables;
#define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
#define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
- #define PORT_MAC_CFG_REQ_ENABLES_IVLAN_PRI2COS_MAP_PRI 0x4UL
- #define PORT_MAC_CFG_REQ_ENABLES_LCOS_MAP_PRI 0x8UL
+ #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
+ #define PORT_MAC_CFG_REQ_ENABLES_RESERVED1 0x8UL
#define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
#define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
#define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
#define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
+ #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
__le16 port_id;
u8 ipg;
u8 lpbk;
- #define PORT_MAC_CFG_REQ_LPBK_NONE (0x0UL << 0)
- #define PORT_MAC_CFG_REQ_LPBK_LOCAL (0x1UL << 0)
- #define PORT_MAC_CFG_REQ_LPBK_REMOTE (0x2UL << 0)
- u8 ivlan_pri2cos_map_pri;
- u8 lcos_map_pri;
+ #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
+ u8 vlan_pri2cos_map_pri;
+ u8 reserved1;
u8 tunnel_pri2cos_map_pri;
u8 dscp2pri_map_pri;
__le16 rx_ts_capture_ptp_msg_type;
__le16 tx_ts_capture_ptp_msg_type;
- __le32 unused_0;
+ u8 cos_field_cfg;
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
+ u8 unused_0[3];
};
/* Output (16 bytes) */
@@ -1902,9 +1972,9 @@ struct hwrm_port_mac_cfg_output {
__le16 mtu;
u8 ipg;
u8 lpbk;
- #define PORT_MAC_CFG_RESP_LPBK_NONE (0x0UL << 0)
- #define PORT_MAC_CFG_RESP_LPBK_LOCAL (0x1UL << 0)
- #define PORT_MAC_CFG_RESP_LPBK_REMOTE (0x2UL << 0)
+ #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
u8 unused_0;
u8 valid;
};
@@ -2163,8 +2233,8 @@ struct hwrm_queue_qportcfg_input {
__le64 resp_addr;
__le32 flags;
#define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
__le16 port_id;
__le16 unused_0;
@@ -2179,50 +2249,51 @@ struct hwrm_queue_qportcfg_output {
u8 max_configurable_queues;
u8 max_configurable_lossless_queues;
u8 queue_cfg_allowed;
- u8 queue_buffers_cfg_allowed;
+ u8 queue_cfg_info;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
u8 queue_pfcenable_cfg_allowed;
u8 queue_pri2cos_cfg_allowed;
u8 queue_cos2bw_cfg_allowed;
u8 queue_id0;
u8 queue_id0_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id1;
u8 queue_id1_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id2;
u8 queue_id2_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id3;
u8 queue_id3_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id4;
u8 queue_id4_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id5;
u8 queue_id5_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id6;
u8 queue_id6_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 queue_id7;
u8 queue_id7_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 valid;
};
@@ -2235,19 +2306,21 @@ struct hwrm_queue_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define QUEUE_CFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define QUEUE_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_RX
+ #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
+ #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+ #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
__le32 enables;
#define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
#define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
__le32 queue_id;
__le32 dflt_len;
u8 service_profile;
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY (0x0UL << 0)
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS (0x1UL << 0)
- #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN (0xffUL << 0)
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
+ #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
u8 unused_0[7];
};
@@ -2264,50 +2337,6 @@ struct hwrm_queue_cfg_output {
u8 valid;
};
-/* hwrm_queue_buffers_cfg */
-/* Input (56 bytes) */
-struct hwrm_queue_buffers_cfg_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le32 flags;
- #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH 0x1UL
- #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
- #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_LAST QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX
- __le32 enables;
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED 0x1UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED 0x2UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XOFF 0x4UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_XON 0x8UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_FULL 0x10UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_NOTFULL 0x20UL
- #define QUEUE_BUFFERS_CFG_REQ_ENABLES_MAX 0x40UL
- __le32 queue_id;
- __le32 reserved;
- __le32 shared;
- __le32 xoff;
- __le32 xon;
- __le32 full;
- __le32 notfull;
- __le32 max;
-};
-
-/* Output (16 bytes) */
-struct hwrm_queue_buffers_cfg_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
/* hwrm_queue_pfcenable_cfg */
/* Input (24 bytes) */
struct hwrm_queue_pfcenable_cfg_input {
@@ -2351,12 +2380,22 @@ struct hwrm_queue_pri2cos_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX
- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR (0x2UL << 0)
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
__le32 enables;
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
+ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
u8 port_id;
u8 pri0_cos_queue_id;
u8 pri1_cos_queue_id;
@@ -2404,82 +2443,226 @@ struct hwrm_queue_cos2bw_cfg_input {
u8 queue_id0;
u8 unused_0;
__le32 queue_id0_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id0_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id0_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id0_pri_lvl;
u8 queue_id0_bw_weight;
u8 queue_id1;
__le32 queue_id1_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id1_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id1_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id1_pri_lvl;
u8 queue_id1_bw_weight;
u8 queue_id2;
__le32 queue_id2_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id2_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id2_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id2_pri_lvl;
u8 queue_id2_bw_weight;
u8 queue_id3;
__le32 queue_id3_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id3_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id3_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id3_pri_lvl;
u8 queue_id3_bw_weight;
u8 queue_id4;
__le32 queue_id4_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id4_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id4_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id4_pri_lvl;
u8 queue_id4_bw_weight;
u8 queue_id5;
__le32 queue_id5_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id5_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id5_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id5_pri_lvl;
u8 queue_id5_bw_weight;
u8 queue_id6;
__le32 queue_id6_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id6_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id6_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id6_pri_lvl;
u8 queue_id6_bw_weight;
u8 queue_id7;
__le32 queue_id7_min_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
__le32 queue_id7_max_bw;
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_RSVD 0x10000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
u8 queue_id7_tsa_assign;
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP (0x0UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS (0x1UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
u8 queue_id7_pri_lvl;
u8 queue_id7_bw_weight;
u8 unused_1[5];
@@ -2563,6 +2746,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
#define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
#define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
+ #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
__le32 enables;
#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
#define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
@@ -2615,18 +2799,18 @@ struct hwrm_vnic_tpa_cfg_input {
#define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
__le16 vnic_id;
__le16 max_agg_segs;
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 (0x0UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 (0x1UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 (0x2UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 (0x3UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX (0x1fUL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
__le16 max_aggs;
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 (0x0UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 (0x1UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 (0x2UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 (0x3UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 (0x4UL << 0)
- #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX (0x7UL << 0)
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
+ #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
u8 unused_0;
u8 unused_1;
__le32 max_agg_timer;
@@ -2780,15 +2964,15 @@ struct hwrm_ring_alloc_input {
__le64 resp_addr;
__le32 enables;
#define RING_ALLOC_REQ_ENABLES_RESERVED1 0x1UL
- #define RING_ALLOC_REQ_ENABLES_RESERVED2 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
#define RING_ALLOC_REQ_ENABLES_RESERVED3 0x4UL
#define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
#define RING_ALLOC_REQ_ENABLES_RESERVED4 0x10UL
#define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
u8 ring_type;
- #define RING_ALLOC_REQ_RING_TYPE_CMPL (0x0UL << 0)
- #define RING_ALLOC_REQ_RING_TYPE_TX (0x1UL << 0)
- #define RING_ALLOC_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_ALLOC_REQ_RING_TYPE_CMPL 0x0UL
+ #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
+ #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
u8 unused_0;
__le16 unused_1;
__le64 page_tbl_addr;
@@ -2804,18 +2988,36 @@ struct hwrm_ring_alloc_input {
u8 unused_4;
u8 unused_5;
__le32 reserved1;
- __le16 reserved2;
+ __le16 ring_arb_cfg;
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP (0x1UL << 0)
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ (0x2UL << 0)
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
+ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
u8 unused_6;
u8 unused_7;
__le32 reserved3;
__le32 stat_ctx_id;
__le32 reserved4;
__le32 max_bw;
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
+ #define RING_ALLOC_REQ_MAX_BW_RSVD 0x10000000UL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
u8 int_mode;
- #define RING_ALLOC_REQ_INT_MODE_LEGACY (0x0UL << 0)
- #define RING_ALLOC_REQ_INT_MODE_RSVD (0x1UL << 0)
- #define RING_ALLOC_REQ_INT_MODE_MSIX (0x2UL << 0)
- #define RING_ALLOC_REQ_INT_MODE_POLL (0x3UL << 0)
+ #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
+ #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
+ #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
+ #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
u8 unused_8[3];
};
@@ -2842,9 +3044,9 @@ struct hwrm_ring_free_input {
__le16 target_id;
__le64 resp_addr;
u8 ring_type;
- #define RING_FREE_REQ_RING_TYPE_CMPL (0x0UL << 0)
- #define RING_FREE_REQ_RING_TYPE_TX (0x1UL << 0)
- #define RING_FREE_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_FREE_REQ_RING_TYPE_CMPL 0x0UL
+ #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
+ #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
u8 unused_0;
__le16 ring_id;
__le32 unused_1;
@@ -2942,9 +3144,9 @@ struct hwrm_ring_reset_input {
__le16 target_id;
__le64 resp_addr;
u8 ring_type;
- #define RING_RESET_REQ_RING_TYPE_CMPL (0x0UL << 0)
- #define RING_RESET_REQ_RING_TYPE_TX (0x1UL << 0)
- #define RING_RESET_REQ_RING_TYPE_RX (0x2UL << 0)
+ #define RING_RESET_REQ_RING_TYPE_CMPL 0x0UL
+ #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
+ #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
u8 unused_0;
__le16 ring_id;
__le32 unused_1;
@@ -3068,36 +3270,36 @@ struct hwrm_cfa_l2_filter_alloc_input {
__le16 t_l2_ivlan;
__le16 t_l2_ivlan_mask;
u8 src_type;
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT (0x0UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF (0x1UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF (0x2UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC (0x3UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG (0x4UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE (0x5UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO (0x6UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG (0x7UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
u8 unused_6;
__le32 src_id;
u8 tunnel_type;
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
u8 unused_7;
__le16 dst_id;
__le16 mirror_vnic_id;
u8 pri_hint;
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER (0x0UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER (0x1UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER (0x2UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX (0x3UL << 0)
- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN (0x4UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
u8 unused_8;
__le32 unused_9;
__le64 l2_filter_id_hint;
@@ -3246,16 +3448,16 @@ struct hwrm_cfa_tunnel_filter_alloc_input {
u8 l3_addr_type;
u8 t_l3_addr_type;
u8 tunnel_type;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
u8 unused_0;
__le32 vni;
__le32 dst_vnic_id;
@@ -3311,14 +3513,14 @@ struct hwrm_cfa_encap_record_alloc_input {
__le32 flags;
#define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
u8 encap_type;
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN (0x1UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE (0x2UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE (0x3UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP (0x4UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE (0x5UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS (0x6UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN (0x7UL << 0)
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE (0x8UL << 0)
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
u8 unused_0;
__le16 unused_1;
__le32 encap_data[16];
@@ -3397,32 +3599,32 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
u8 src_macaddr[6];
__be16 ethertype;
u8 ip_addr_type;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN (0x0UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 (0x4UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 (0x6UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
u8 ip_protocol;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN (0x0UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP (0x6UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP (0x11UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x11UL
__le16 dst_id;
__le16 mirror_vnic_id;
u8 tunnel_type;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL (0x0UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE (0x2UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE (0x3UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP (0x4UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS (0x6UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT (0x7UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE (0x8UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL (0xffUL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
u8 pri_hint;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER (0x0UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE (0x1UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW (0x2UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST (0x3UL << 0)
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST (0x4UL << 0)
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL
__be32 src_ipaddr[4];
__be32 src_ipaddr_mask[4];
__be32 dst_ipaddr[4];
@@ -3511,8 +3713,8 @@ struct hwrm_tunnel_dst_port_query_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
u8 unused_0[7];
};
@@ -3539,8 +3741,8 @@ struct hwrm_tunnel_dst_port_alloc_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
u8 unused_0;
__be16 tunnel_dst_port_val;
__le32 unused_1;
@@ -3570,8 +3772,8 @@ struct hwrm_tunnel_dst_port_free_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0)
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0)
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
u8 unused_0;
__le16 tunnel_dst_port_id;
__le32 unused_1;
@@ -3720,15 +3922,15 @@ struct hwrm_fw_reset_input {
__le16 target_id;
__le64 resp_addr;
u8 embedded_proc_type;
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT (0x0UL << 0)
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT (0x1UL << 0)
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL (0x2UL << 0)
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE (0x3UL << 0)
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_RSVD (0x4UL << 0)
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_RSVD 0x4UL
u8 selfrst_status;
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
__le16 unused_0[3];
};
@@ -3739,9 +3941,9 @@ struct hwrm_fw_reset_output {
__le16 seq_id;
__le16 resp_len;
u8 selfrst_status;
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
u8 unused_0;
__le16 unused_1;
u8 unused_2;
@@ -3759,11 +3961,11 @@ struct hwrm_fw_qstatus_input {
__le16 target_id;
__le64 resp_addr;
u8 embedded_proc_type;
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT (0x0UL << 0)
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT (0x1UL << 0)
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL (0x2UL << 0)
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE (0x3UL << 0)
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD (0x4UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD 0x4UL
u8 unused_0[7];
};
@@ -3774,9 +3976,9 @@ struct hwrm_fw_qstatus_output {
__le16 seq_id;
__le16 resp_len;
u8 selfrst_status;
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
u8 unused_0;
__le16 unused_1;
u8 unused_2;
@@ -3785,6 +3987,42 @@ struct hwrm_fw_qstatus_output {
u8 valid;
};
+/* hwrm_fw_set_time */
+/* Input (32 bytes) */
+struct hwrm_fw_set_time_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 year;
+ #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 unused_0;
+ __le16 millisecond;
+ __le16 zone;
+ #define FW_SET_TIME_REQ_ZONE_UTC 0x0UL
+ #define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL
+ __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_set_time_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
/* hwrm_exec_fwd_resp */
/* Input (128 bytes) */
struct hwrm_exec_fwd_resp_input {
@@ -3921,32 +4159,6 @@ struct hwrm_temp_monitor_query_output {
u8 valid;
};
-/* hwrm_nvm_raw_write_blk */
-/* Input (32 bytes) */
-struct hwrm_nvm_raw_write_blk_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_src_addr;
- __le32 dest_addr;
- __le32 len;
-};
-
-/* Output (16 bytes) */
-struct hwrm_nvm_raw_write_blk_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- __le32 unused_0;
- u8 unused_1;
- u8 unused_2;
- u8 unused_3;
- u8 valid;
-};
-
/* hwrm_nvm_read */
/* Input (40 bytes) */
struct hwrm_nvm_read_input {
@@ -4132,9 +4344,9 @@ struct hwrm_nvm_find_dir_entry_input {
u8 opt_ordinal;
#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL
#define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ (0x0UL << 0)
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE (0x1UL << 0)
- #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT (0x2UL << 0)
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL
+ #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL
u8 unused_1[3];
};
@@ -4266,4 +4478,41 @@ struct hwrm_nvm_verify_update_output {
u8 valid;
};
+/* hwrm_nvm_install_update */
+/* Input (24 bytes) */
+struct hwrm_nvm_install_update_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 install_type;
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
+ #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
+ __le32 unused_0;
+};
+
+/* Output (24 bytes) */
+struct hwrm_nvm_install_update_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 installed_items;
+ u8 result;
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
+ u8 problem_item;
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
+ u8 reset_required;
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 50d2007a2640..ec6cd18842c3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -19,6 +19,45 @@
#include "bnxt_ethtool.h"
#ifdef CONFIG_BNXT_SRIOV
+static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
+ struct bnxt_vf_info *vf, u16 event_id)
+{
+ struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_fwd_async_event_cmpl_input req = {0};
+ struct hwrm_async_event_cmpl *async_cmpl;
+ int rc = 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
+ if (vf)
+ req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
+ else
+ /* broadcast this async event to all VFs */
+ req.encap_async_event_target_id = cpu_to_le16(0xffff);
+ async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
+ async_cmpl->type =
+ cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
+ async_cmpl->event_id = cpu_to_le16(event_id);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ if (rc) {
+ netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
+ rc);
+ goto fwd_async_event_cmpl_exit;
+ }
+
+ if (resp->error_code) {
+ netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
+ resp->error_code);
+ rc = -1;
+ }
+
+fwd_async_event_cmpl_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
{
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
@@ -135,7 +174,8 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
-int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
+int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
+ __be16 vlan_proto)
{
struct hwrm_func_cfg_input req = {0};
struct bnxt *bp = netdev_priv(dev);
@@ -146,6 +186,9 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
if (bp->hwrm_spec_code < 0x10201)
return -ENOTSUPP;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
rc = bnxt_vf_ndo_prep(bp, vf_id);
if (rc)
return rc;
@@ -243,8 +286,9 @@ int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
rc = -EINVAL;
break;
}
- /* CHIMP TODO: send msg to VF to update new link state */
-
+ if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
+ rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
return rc;
}
@@ -525,46 +569,6 @@ err_out1:
return rc;
}
-static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
- struct bnxt_vf_info *vf,
- u16 event_id)
-{
- int rc = 0;
- struct hwrm_fwd_async_event_cmpl_input req = {0};
- struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
- struct hwrm_async_event_cmpl *async_cmpl;
-
- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
- if (vf)
- req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
- else
- /* broadcast this async event to all VFs */
- req.encap_async_event_target_id = cpu_to_le16(0xffff);
- async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
- async_cmpl->type =
- cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
- async_cmpl->event_id = cpu_to_le16(event_id);
-
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-
- if (rc) {
- netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
- rc);
- goto fwd_async_event_cmpl_exit;
- }
-
- if (resp->error_code) {
- netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
- resp->error_code);
- rc = -1;
- }
-
-fwd_async_event_cmpl_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
- return rc;
-}
-
void bnxt_sriov_disable(struct bnxt *bp)
{
u16 num_vfs = pci_num_vf(bp->pdev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index 0392670ab49c..1ab72e4820af 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -12,7 +12,7 @@
int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *);
int bnxt_set_vf_mac(struct net_device *, int, u8 *);
-int bnxt_set_vf_vlan(struct net_device *, int, u16, u8);
+int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16);
int bnxt_set_vf_bw(struct net_device *, int, int, int);
int bnxt_set_vf_link_state(struct net_device *, int, int);
int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 46f904392f88..2013474bfdbf 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -450,8 +450,8 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
genet_dma_ring_regs[r]);
}
-static int bcmgenet_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int bcmgenet_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
if (!netif_running(dev))
return -EINVAL;
@@ -459,11 +459,11 @@ static int bcmgenet_get_settings(struct net_device *dev,
if (!dev->phydev)
return -ENODEV;
- return phy_ethtool_gset(dev->phydev, cmd);
+ return phy_ethtool_ksettings_get(dev->phydev, cmd);
}
-static int bcmgenet_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int bcmgenet_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
if (!netif_running(dev))
return -EINVAL;
@@ -471,7 +471,7 @@ static int bcmgenet_set_settings(struct net_device *dev,
if (!dev->phydev)
return -ENODEV;
- return phy_ethtool_sset(dev->phydev, cmd);
+ return phy_ethtool_ksettings_set(dev->phydev, cmd);
}
static int bcmgenet_set_rx_csum(struct net_device *dev,
@@ -977,8 +977,6 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
.get_strings = bcmgenet_get_strings,
.get_sset_count = bcmgenet_get_sset_count,
.get_ethtool_stats = bcmgenet_get_ethtool_stats,
- .get_settings = bcmgenet_get_settings,
- .set_settings = bcmgenet_set_settings,
.get_drvinfo = bcmgenet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_msglevel = bcmgenet_get_msglevel,
@@ -990,6 +988,8 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
.nway_reset = bcmgenet_nway_reset,
.get_coalesce = bcmgenet_get_coalesce,
.set_coalesce = bcmgenet_set_coalesce,
+ .get_link_ksettings = bcmgenet_get_link_ksettings,
+ .set_link_ksettings = bcmgenet_set_link_ksettings,
};
/* Power down the unimac, based on mode. */
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 659261218d9f..a2551bcd1027 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -14012,6 +14012,7 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
(!ec->rx_coalesce_usecs) ||
(ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
+ (!ec->tx_coalesce_usecs) ||
(ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
(ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
(ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
@@ -14022,16 +14023,6 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
(ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
return -EINVAL;
- /* No rx interrupts will be generated if both are zero */
- if ((ec->rx_coalesce_usecs == 0) &&
- (ec->rx_max_coalesced_frames == 0))
- return -EINVAL;
-
- /* No tx interrupts will be generated if both are zero */
- if ((ec->tx_coalesce_usecs == 0) &&
- (ec->tx_max_coalesced_frames == 0))
- return -EINVAL;
-
/* Only copy relevant parameters, ignore all others. */
tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 0e4fdc3dd729..31f61a744d66 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -31,15 +31,10 @@
#define BNAD_NUM_TXF_COUNTERS 12
#define BNAD_NUM_RXF_COUNTERS 10
#define BNAD_NUM_CQ_COUNTERS (3 + 5)
-#define BNAD_NUM_RXQ_COUNTERS 6
+#define BNAD_NUM_RXQ_COUNTERS 7
#define BNAD_NUM_TXQ_COUNTERS 5
-#define BNAD_ETHTOOL_STATS_NUM \
- (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \
- sizeof(struct bnad_drv_stats) / sizeof(u64) + \
- offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
-
-static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
+static const char *bnad_net_stats_strings[] = {
"rx_packets",
"tx_packets",
"rx_bytes",
@@ -50,22 +45,10 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"tx_dropped",
"multicast",
"collisions",
-
"rx_length_errors",
- "rx_over_errors",
"rx_crc_errors",
"rx_frame_errors",
- "rx_fifo_errors",
- "rx_missed_errors",
-
- "tx_aborted_errors",
- "tx_carrier_errors",
"tx_fifo_errors",
- "tx_heartbeat_errors",
- "tx_window_errors",
-
- "rx_compressed",
- "tx_compressed",
"netif_queue_stop",
"netif_queue_wakeup",
@@ -254,6 +237,8 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"fc_tx_fid_parity_errors",
};
+#define BNAD_ETHTOOL_STATS_NUM ARRAY_SIZE(bnad_net_stats_strings)
+
static int
bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
{
@@ -658,6 +643,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_allocbuf_failed", q_num);
string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_mapbuf_failed", q_num);
+ string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_producer_index", q_num);
string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_consumer_index", q_num);
@@ -678,6 +665,9 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
sprintf(string, "rxq%d_allocbuf_failed",
q_num);
string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_mapbuf_failed",
+ q_num);
+ string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_producer_index",
q_num);
string += ETH_GSTRING_LEN;
@@ -854,9 +844,9 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
u64 *buf)
{
struct bnad *bnad = netdev_priv(netdev);
- int i, j, bi;
+ int i, j, bi = 0;
unsigned long flags;
- struct rtnl_link_stats64 *net_stats64;
+ struct rtnl_link_stats64 net_stats64;
u64 *stats64;
u32 bmap;
@@ -871,14 +861,25 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
* under the same lock
*/
spin_lock_irqsave(&bnad->bna_lock, flags);
- bi = 0;
- memset(buf, 0, stats->n_stats * sizeof(u64));
-
- net_stats64 = (struct rtnl_link_stats64 *)buf;
- bnad_netdev_qstats_fill(bnad, net_stats64);
- bnad_netdev_hwstats_fill(bnad, net_stats64);
- bi = sizeof(*net_stats64) / sizeof(u64);
+ memset(&net_stats64, 0, sizeof(net_stats64));
+ bnad_netdev_qstats_fill(bnad, &net_stats64);
+ bnad_netdev_hwstats_fill(bnad, &net_stats64);
+
+ buf[bi++] = net_stats64.rx_packets;
+ buf[bi++] = net_stats64.tx_packets;
+ buf[bi++] = net_stats64.rx_bytes;
+ buf[bi++] = net_stats64.tx_bytes;
+ buf[bi++] = net_stats64.rx_errors;
+ buf[bi++] = net_stats64.tx_errors;
+ buf[bi++] = net_stats64.rx_dropped;
+ buf[bi++] = net_stats64.tx_dropped;
+ buf[bi++] = net_stats64.multicast;
+ buf[bi++] = net_stats64.collisions;
+ buf[bi++] = net_stats64.rx_length_errors;
+ buf[bi++] = net_stats64.rx_crc_errors;
+ buf[bi++] = net_stats64.rx_frame_errors;
+ buf[bi++] = net_stats64.tx_fifo_errors;
/* Get netif_queue_stopped from stack */
bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 32568392b9f9..63144bb413d1 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1339,6 +1339,24 @@ dma_error:
return 0;
}
+static inline int macb_clear_csum(struct sk_buff *skb)
+{
+ /* no change for packets without checksum offloading */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ /* make sure we can modify the header */
+ if (unlikely(skb_cow_head(skb, 0)))
+ return -1;
+
+ /* initialize checksum field
+ * This is required - at least for Zynq, which otherwise calculates
+ * wrong UDP header checksums for UDP packets with UDP data len <=2
+ */
+ *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
+ return 0;
+}
+
static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
u16 queue_index = skb_get_queue_mapping(skb);
@@ -1378,6 +1396,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
+ if (macb_clear_csum(skb)) {
+ dev_kfree_skb_any(skb);
+ goto unlock;
+ }
+
/* Map socket buffer for DMA transfer */
if (!macb_tx_map(bp, queue, skb)) {
dev_kfree_skb_any(skb);
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index dd63f961827a..30426109711c 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -258,6 +258,7 @@ struct nicvf {
u8 sqs_id;
bool sqs_mode;
bool hw_tso;
+ bool t88;
/* Receive buffer alloc */
u32 rb_page_offset;
@@ -304,7 +305,7 @@ struct nicvf {
bool msix_enabled;
u8 num_vec;
struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS];
- char irq_name[NIC_VF_MSIX_VECTORS][20];
+ char irq_name[NIC_VF_MSIX_VECTORS][IFNAMSIZ + 15];
bool irq_allocated[NIC_VF_MSIX_VECTORS];
cpumask_var_t affinity_mask[NIC_VF_MSIX_VECTORS];
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 25618d203931..2bbf4cbf08b2 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -282,9 +282,14 @@ static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
u16 sdevid;
u64 lmac_cfg;
- /* Max value that can be set is 60 */
- if (size > 60)
- size = 60;
+ /* There is a issue in HW where-in while sending GSO sized
+ * pkts as part of TSO, if pkt len falls below this size
+ * NIC will zero PAD packet and also updates IP total length.
+ * Hence set this value to lessthan min pkt size of MAC+IP+TCP
+ * headers, BGX will do the padding to transmit 64 byte pkt.
+ */
+ if (size > 52)
+ size = 52;
pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
/* 81xx's RGX has only one LMAC */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 06c014edf762..45a13f718863 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -516,12 +516,14 @@ static int nicvf_init_resources(struct nicvf *nic)
static void nicvf_snd_pkt_handler(struct net_device *netdev,
struct cmp_queue *cq,
struct cqe_send_t *cqe_tx,
- int cqe_type, int budget)
+ int cqe_type, int budget,
+ unsigned int *tx_pkts, unsigned int *tx_bytes)
{
struct sk_buff *skb = NULL;
struct nicvf *nic = netdev_priv(netdev);
struct snd_queue *sq;
struct sq_hdr_subdesc *hdr;
+ struct sq_hdr_subdesc *tso_sqe;
sq = &nic->qs->sq[cqe_tx->sq_idx];
@@ -536,17 +538,23 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
- /* For TSO offloaded packets only one SQE will have a valid SKB */
if (skb) {
+ /* Check for dummy descriptor used for HW TSO offload on 88xx */
+ if (hdr->dont_send) {
+ /* Get actual TSO descriptors and free them */
+ tso_sqe =
+ (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
+ nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
+ }
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
prefetch(skb);
+ (*tx_pkts)++;
+ *tx_bytes += skb->len;
napi_consume_skb(skb, budget);
sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
} else {
- /* In case of HW TSO, HW sends a CQE for each segment of a TSO
- * packet instead of a single CQE for the whole TSO packet
- * transmitted. Each of this CQE points to the same SQE, so
- * avoid freeing same SQE multiple times.
+ /* In case of SW TSO on 88xx, only last segment will have
+ * a SKB attached, so just free SQEs here.
*/
if (!nic->hw_tso)
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
@@ -657,6 +665,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
struct cmp_queue *cq = &qs->cq[cq_idx];
struct cqe_rx_t *cq_desc;
struct netdev_queue *txq;
+ unsigned int tx_pkts = 0, tx_bytes = 0;
spin_lock_bh(&cq->lock);
loop:
@@ -696,7 +705,7 @@ loop:
case CQE_TYPE_SEND:
nicvf_snd_pkt_handler(netdev, cq,
(void *)cq_desc, CQE_TYPE_SEND,
- budget);
+ budget, &tx_pkts, &tx_bytes);
tx_done++;
break;
case CQE_TYPE_INVALID:
@@ -725,6 +734,9 @@ done:
netdev = nic->pnicvf->netdev;
txq = netdev_get_tx_queue(netdev,
nicvf_netdev_qidx(nic, cq_idx));
+ if (tx_pkts)
+ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
+
nic = nic->pnicvf;
if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
netif_tx_start_queue(txq);
@@ -1155,6 +1167,9 @@ int nicvf_stop(struct net_device *netdev)
netif_tx_disable(netdev);
+ for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
+ netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
+
/* Free resources */
nicvf_config_data_transfer(nic, false);
@@ -1516,6 +1531,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev;
struct nicvf *nic;
int err, qcount;
+ u16 sdevid;
err = pci_enable_device(pdev);
if (err) {
@@ -1588,6 +1604,10 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pass1_silicon(nic->pdev))
nic->hw_tso = true;
+ pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
+ if (sdevid == 0xA134)
+ nic->t88 = true;
+
/* Check if this VF is in QS only mode */
if (nic->sqs_mode)
return 0;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 7d90856c9783..a4fc50155881 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -953,6 +953,8 @@ static int nicvf_tso_count_subdescs(struct sk_buff *skb)
return num_edescs + sh->gso_segs;
}
+#define POST_CQE_DESC_COUNT 2
+
/* Get the number of SQ descriptors needed to xmit this skb */
static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
{
@@ -963,6 +965,10 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
return subdesc_cnt;
}
+ /* Dummy descriptors to get TSO pkt completion notification */
+ if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
+ subdesc_cnt += POST_CQE_DESC_COUNT;
+
if (skb_shinfo(skb)->nr_frags)
subdesc_cnt += skb_shinfo(skb)->nr_frags;
@@ -980,14 +986,21 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
struct sq_hdr_subdesc *hdr;
hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
- sq->skbuff[qentry] = (u64)skb;
-
memset(hdr, 0, SND_QUEUE_DESC_SIZE);
hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
- /* Enable notification via CQE after processing SQE */
- hdr->post_cqe = 1;
- /* No of subdescriptors following this */
- hdr->subdesc_cnt = subdesc_cnt;
+
+ if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
+ /* post_cqe = 0, to avoid HW posting a CQE for every TSO
+ * segment transmitted on 88xx.
+ */
+ hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT;
+ } else {
+ sq->skbuff[qentry] = (u64)skb;
+ /* Enable notification via CQE after processing SQE */
+ hdr->post_cqe = 1;
+ /* No of subdescriptors following this */
+ hdr->subdesc_cnt = subdesc_cnt;
+ }
hdr->tot_len = len;
/* Offload checksum calculation to HW */
@@ -1038,6 +1051,55 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
gather->addr = data;
}
+/* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO
+ * packet so that a CQE is posted as a notifation for transmission of
+ * TSO packet.
+ */
+static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
+ int tso_sqe, struct sk_buff *skb)
+{
+ struct sq_imm_subdesc *imm;
+ struct sq_hdr_subdesc *hdr;
+
+ sq->skbuff[qentry] = (u64)skb;
+
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
+ memset(hdr, 0, SND_QUEUE_DESC_SIZE);
+ hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
+ /* Enable notification via CQE after processing SQE */
+ hdr->post_cqe = 1;
+ /* There is no packet to transmit here */
+ hdr->dont_send = 1;
+ hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1;
+ hdr->tot_len = 1;
+ /* Actual TSO header SQE index, needed for cleanup */
+ hdr->rsvd2 = tso_sqe;
+
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry);
+ memset(imm, 0, SND_QUEUE_DESC_SIZE);
+ imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE;
+ imm->len = 1;
+}
+
+static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
+ int sq_num, int desc_cnt)
+{
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(nic->pnicvf->netdev,
+ skb_get_queue_mapping(skb));
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ /* make sure all memory stores are done before ringing doorbell */
+ smp_wmb();
+
+ /* Inform HW to xmit all TSO segments */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+ sq_num, desc_cnt);
+}
+
/* Segment a TSO packet into 'gso_size' segments and append
* them to SQ for transfer
*/
@@ -1097,12 +1159,8 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
/* Save SKB in the last segment for freeing */
sq->skbuff[hdr_qentry] = (u64)skb;
- /* make sure all memory stores are done before ringing doorbell */
- smp_wmb();
+ nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
- /* Inform HW to xmit all TSO segments */
- nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
- sq_num, desc_cnt);
nic->drv_stats.tx_tso++;
return 1;
}
@@ -1111,7 +1169,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
{
int i, size;
- int subdesc_cnt;
+ int subdesc_cnt, tso_sqe = 0;
int sq_num, qentry;
struct queue_set *qs;
struct snd_queue *sq;
@@ -1146,6 +1204,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
/* Add SQ header subdesc */
nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
skb, skb->len);
+ tso_sqe = qentry;
/* Add SQ gather subdescs */
qentry = nicvf_get_nxt_sqentry(sq, qentry);
@@ -1169,12 +1228,13 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
}
doorbell:
- /* make sure all memory stores are done before ringing doorbell */
- smp_wmb();
+ if (nic->t88 && skb_shinfo(skb)->gso_size) {
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
+ }
+
+ nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
- /* Inform HW to xmit new packet */
- nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
- sq_num, subdesc_cnt);
return 1;
append_fail:
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 246129650967..c6b71f656992 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -4,7 +4,7 @@
obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
-cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4_uld.o sched.o
+cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 45955691edc7..28e653e9c856 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -422,8 +422,8 @@ struct link_config {
unsigned short supported; /* link capabilities */
unsigned short advertising; /* advertised capabilities */
unsigned short lp_advertising; /* peer advertised capabilities */
- unsigned short requested_speed; /* speed user has requested */
- unsigned short speed; /* actual link speed */
+ unsigned int requested_speed; /* speed user has requested */
+ unsigned int speed; /* actual link speed */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */
@@ -437,11 +437,6 @@ enum {
MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */
MAX_OFLD_QSETS = 16, /* # of offload Tx, iscsi Rx queue sets */
MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
- MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
- MAX_RDMA_CIQS = 32, /* # of RDMA concentrator IQs */
-
- /* # of streaming iSCSIT Rx queues */
- MAX_ISCSIT_QUEUES = MAX_OFLD_QSETS,
};
enum {
@@ -458,8 +453,7 @@ enum {
enum {
INGQ_EXTRAS = 2, /* firmware event queue and */
/* forwarded interrupts */
- MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES +
- MAX_RDMA_CIQS + MAX_ISCSIT_QUEUES + INGQ_EXTRAS,
+ MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS,
};
struct adapter;
@@ -704,10 +698,6 @@ struct sge {
struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
- struct sge_ofld_rxq iscsirxq[MAX_OFLD_QSETS];
- struct sge_ofld_rxq iscsitrxq[MAX_ISCSIT_QUEUES];
- struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
- struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
struct sge_uld_rxq_info **uld_rxq_info;
@@ -717,15 +707,8 @@ struct sge {
u16 max_ethqsets; /* # of available Ethernet queue sets */
u16 ethqsets; /* # of active Ethernet queue sets */
u16 ethtxq_rover; /* Tx queue to clean up next */
- u16 iscsiqsets; /* # of active iSCSI queue sets */
- u16 niscsitq; /* # of available iSCST Rx queues */
- u16 rdmaqs; /* # of available RDMA Rx queues */
- u16 rdmaciqs; /* # of available RDMA concentrator IQs */
+ u16 ofldqsets; /* # of active ofld queue sets */
u16 nqs_per_uld; /* # of Rx queues per ULD */
- u16 iscsi_rxq[MAX_OFLD_QSETS];
- u16 iscsit_rxq[MAX_ISCSIT_QUEUES];
- u16 rdma_rxq[MAX_RDMA_QUEUES];
- u16 rdma_ciq[MAX_RDMA_CIQS];
u16 timer_val[SGE_NTIMERS];
u8 counter_val[SGE_NCOUNTERS];
u32 fl_pg_order; /* large page allocation size */
@@ -749,10 +732,7 @@ struct sge {
};
#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
-#define for_each_iscsirxq(sge, i) for (i = 0; i < (sge)->iscsiqsets; i++)
-#define for_each_iscsitrxq(sge, i) for (i = 0; i < (sge)->niscsitq; i++)
-#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
-#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
+#define for_each_ofldtxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
struct l2t_data;
@@ -786,6 +766,7 @@ struct uld_msix_bmap {
struct uld_msix_info {
unsigned short vec;
char desc[IFNAMSIZ + 10];
+ unsigned int idx;
};
struct vf_info {
@@ -818,7 +799,7 @@ struct adapter {
} msix_info[MAX_INGQ + 1];
struct uld_msix_info *msix_info_ulds; /* msix info for uld's */
struct uld_msix_bmap msix_bmap_ulds; /* msix bitmap for all uld */
- unsigned int msi_idx;
+ int msi_idx;
struct doorbell_stats db_stats;
struct sge sge;
@@ -836,9 +817,10 @@ struct adapter {
unsigned int clipt_start;
unsigned int clipt_end;
struct clip_tbl *clipt;
- struct cxgb4_pci_uld_info *uld;
+ struct cxgb4_uld_info *uld;
void *uld_handle[CXGB4_ULD_MAX];
unsigned int num_uld;
+ unsigned int num_ofld_uld;
struct list_head list_node;
struct list_head rcu_node;
struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */
@@ -858,6 +840,8 @@ struct adapter {
#define T4_OS_LOG_MBOX_CMDS 256
struct mbox_cmd_log *mbox_log;
+ struct mutex uld_mutex;
+
struct dentry *debugfs_root;
bool use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */
bool trace_rss; /* 1 implies that different RSS flit per filter is
@@ -867,6 +851,9 @@ struct adapter {
spinlock_t stats_lock;
spinlock_t win0_lock ____cacheline_aligned_in_smp;
+
+ /* TC u32 offload */
+ struct cxgb4_tc_u32_table *tc_u32;
};
/* Support for "sched-class" command to allow a TX Scheduling Class to be
@@ -1041,6 +1028,32 @@ enum {
VLAN_REWRITE
};
+/* Host shadow copy of ingress filter entry. This is in host native format
+ * and doesn't match the ordering or bit order, etc. of the hardware of the
+ * firmware command. The use of bit-field structure elements is purely to
+ * remind ourselves of the field size limitations and save memory in the case
+ * where the filter table is large.
+ */
+struct filter_entry {
+ /* Administrative fields for filter. */
+ u32 valid:1; /* filter allocated and valid */
+ u32 locked:1; /* filter is administratively locked */
+
+ u32 pending:1; /* filter action is pending firmware reply */
+ u32 smtidx:8; /* Source MAC Table index for smac */
+ struct filter_ctx *ctx; /* Caller's completion hook */
+ struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
+ struct net_device *dev; /* Associated net device */
+ u32 tid; /* This will store the actual tid */
+
+ /* The filter itself. Most of this is a straight copy of information
+ * provided by the extended ioctl(). Some fields are translated to
+ * internal forms -- for instance the Ingress Queue ID passed in from
+ * the ioctl() is translated into the Absolute Ingress Queue ID.
+ */
+ struct ch_filter_specification fs;
+};
+
static inline int is_offload(const struct adapter *adap)
{
return adap->params.offload;
@@ -1051,6 +1064,11 @@ static inline int is_pci_uld(const struct adapter *adap)
return adap->params.crypto;
}
+static inline int is_uld(const struct adapter *adap)
+{
+ return (adap->params.offload || adap->params.crypto);
+}
+
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
{
return readl(adap->regs + reg_addr);
@@ -1277,6 +1295,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
struct net_device *dev, unsigned int iqid,
unsigned int cmplqid);
+int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
+ unsigned int cmplqid);
int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
struct net_device *dev, unsigned int iqid);
irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
@@ -1635,7 +1655,9 @@ void t4_idma_monitor(struct adapter *adapter,
int hz, int ticks);
int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
unsigned int naddr, u8 *addr);
-void uld_mem_free(struct adapter *adap);
-int uld_mem_alloc(struct adapter *adap);
+void t4_uld_mem_free(struct adapter *adap);
+int t4_uld_mem_alloc(struct adapter *adap);
+void t4_uld_clean_up(struct adapter *adap);
+void t4_register_netevent_notifier(void);
void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 91fb50850fff..20455d082cb8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2432,17 +2432,11 @@ static int sge_qinfo_show(struct seq_file *seq, void *v)
{
struct adapter *adap = seq->private;
int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
- int iscsi_entries = DIV_ROUND_UP(adap->sge.iscsiqsets, 4);
- int iscsit_entries = DIV_ROUND_UP(adap->sge.niscsitq, 4);
- int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
- int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
+ int ofld_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
int i, r = (uintptr_t)v - 1;
- int iscsi_idx = r - eth_entries;
- int iscsit_idx = iscsi_idx - iscsi_entries;
- int rdma_idx = iscsit_idx - iscsit_entries;
- int ciq_idx = rdma_idx - rdma_entries;
- int ctrl_idx = ciq_idx - ciq_entries;
+ int ofld_idx = r - eth_entries;
+ int ctrl_idx = ofld_idx - ofld_entries;
int fq_idx = ctrl_idx - ctrl_entries;
if (r)
@@ -2518,119 +2512,17 @@ do { \
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
- } else if (iscsi_idx < iscsi_entries) {
- const struct sge_ofld_rxq *rx =
- &adap->sge.iscsirxq[iscsi_idx * 4];
+ } else if (ofld_idx < ofld_entries) {
const struct sge_ofld_txq *tx =
- &adap->sge.ofldtxq[iscsi_idx * 4];
- int n = min(4, adap->sge.iscsiqsets - 4 * iscsi_idx);
+ &adap->sge.ofldtxq[ofld_idx * 4];
+ int n = min(4, adap->sge.ofldqsets - 4 * ofld_idx);
- S("QType:", "iSCSI");
+ S("QType:", "OFLD-Txq");
T("TxQ ID:", q.cntxt_id);
T("TxQ size:", q.size);
T("TxQ inuse:", q.in_use);
T("TxQ CIDX:", q.cidx);
T("TxQ PIDX:", q.pidx);
- R("RspQ ID:", rspq.abs_id);
- R("RspQ size:", rspq.size);
- R("RspQE size:", rspq.iqe_len);
- R("RspQ CIDX:", rspq.cidx);
- R("RspQ Gen:", rspq.gen);
- S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
- S3("u", "Intr pktcnt:",
- adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
- R("FL ID:", fl.cntxt_id);
- R("FL size:", fl.size - 8);
- R("FL pend:", fl.pend_cred);
- R("FL avail:", fl.avail);
- R("FL PIDX:", fl.pidx);
- R("FL CIDX:", fl.cidx);
- RL("RxPackets:", stats.pkts);
- RL("RxImmPkts:", stats.imm);
- RL("RxNoMem:", stats.nomem);
- RL("FLAllocErr:", fl.alloc_failed);
- RL("FLLrgAlcErr:", fl.large_alloc_failed);
- RL("FLMapErr:", fl.mapping_err);
- RL("FLLow:", fl.low);
- RL("FLStarving:", fl.starving);
-
- } else if (iscsit_idx < iscsit_entries) {
- const struct sge_ofld_rxq *rx =
- &adap->sge.iscsitrxq[iscsit_idx * 4];
- int n = min(4, adap->sge.niscsitq - 4 * iscsit_idx);
-
- S("QType:", "iSCSIT");
- R("RspQ ID:", rspq.abs_id);
- R("RspQ size:", rspq.size);
- R("RspQE size:", rspq.iqe_len);
- R("RspQ CIDX:", rspq.cidx);
- R("RspQ Gen:", rspq.gen);
- S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
- S3("u", "Intr pktcnt:",
- adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
- R("FL ID:", fl.cntxt_id);
- R("FL size:", fl.size - 8);
- R("FL pend:", fl.pend_cred);
- R("FL avail:", fl.avail);
- R("FL PIDX:", fl.pidx);
- R("FL CIDX:", fl.cidx);
- RL("RxPackets:", stats.pkts);
- RL("RxImmPkts:", stats.imm);
- RL("RxNoMem:", stats.nomem);
- RL("FLAllocErr:", fl.alloc_failed);
- RL("FLLrgAlcErr:", fl.large_alloc_failed);
- RL("FLMapErr:", fl.mapping_err);
- RL("FLLow:", fl.low);
- RL("FLStarving:", fl.starving);
-
- } else if (rdma_idx < rdma_entries) {
- const struct sge_ofld_rxq *rx =
- &adap->sge.rdmarxq[rdma_idx * 4];
- int n = min(4, adap->sge.rdmaqs - 4 * rdma_idx);
-
- S("QType:", "RDMA-CPL");
- S("Interface:",
- rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
- R("RspQ ID:", rspq.abs_id);
- R("RspQ size:", rspq.size);
- R("RspQE size:", rspq.iqe_len);
- R("RspQ CIDX:", rspq.cidx);
- R("RspQ Gen:", rspq.gen);
- S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
- S3("u", "Intr pktcnt:",
- adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
- R("FL ID:", fl.cntxt_id);
- R("FL size:", fl.size - 8);
- R("FL pend:", fl.pend_cred);
- R("FL avail:", fl.avail);
- R("FL PIDX:", fl.pidx);
- R("FL CIDX:", fl.cidx);
- RL("RxPackets:", stats.pkts);
- RL("RxImmPkts:", stats.imm);
- RL("RxNoMem:", stats.nomem);
- RL("FLAllocErr:", fl.alloc_failed);
- RL("FLLrgAlcErr:", fl.large_alloc_failed);
- RL("FLMapErr:", fl.mapping_err);
- RL("FLLow:", fl.low);
- RL("FLStarving:", fl.starving);
-
- } else if (ciq_idx < ciq_entries) {
- const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4];
- int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx);
-
- S("QType:", "RDMA-CIQ");
- S("Interface:",
- rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
- R("RspQ ID:", rspq.abs_id);
- R("RspQ size:", rspq.size);
- R("RspQE size:", rspq.iqe_len);
- R("RspQ CIDX:", rspq.cidx);
- R("RspQ Gen:", rspq.gen);
- S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
- S3("u", "Intr pktcnt:",
- adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
- RL("RxAN:", stats.an);
- RL("RxNoMem:", stats.nomem);
} else if (ctrl_idx < ctrl_entries) {
const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
@@ -2672,10 +2564,7 @@ do { \
static int sge_queue_entries(const struct adapter *adap)
{
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
- DIV_ROUND_UP(adap->sge.iscsiqsets, 4) +
- DIV_ROUND_UP(adap->sge.niscsitq, 4) +
- DIV_ROUND_UP(adap->sge.rdmaqs, 4) +
- DIV_ROUND_UP(adap->sge.rdmaciqs, 4) +
+ DIV_ROUND_UP(adap->sge.ofldqsets, 4) +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
}
@@ -2859,12 +2748,6 @@ static void add_debugfs_mem(struct adapter *adap, const char *name,
size_mb << 20);
}
-static int blocked_fl_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
static ssize_t blocked_fl_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
@@ -2908,7 +2791,7 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
static const struct file_operations blocked_fl_fops = {
.owner = THIS_MODULE,
- .open = blocked_fl_open,
+ .open = simple_open,
.read = blocked_fl_read,
.write = blocked_fl_write,
.llseek = generic_file_llseek,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
new file mode 100644
index 000000000000..2a616171cb68
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -0,0 +1,721 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "l2t.h"
+#include "t4fw_api.h"
+#include "cxgb4_filter.h"
+
+static inline bool is_field_set(u32 val, u32 mask)
+{
+ return val || mask;
+}
+
+static inline bool unsupported(u32 conf, u32 conf_mask, u32 val, u32 mask)
+{
+ return !(conf & conf_mask) && is_field_set(val, mask);
+}
+
+/* Validate filter spec against configuration done on the card. */
+static int validate_filter(struct net_device *dev,
+ struct ch_filter_specification *fs)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ u32 fconf, iconf;
+
+ /* Check for unconfigured fields being used. */
+ fconf = adapter->params.tp.vlan_pri_map;
+ iconf = adapter->params.tp.ingress_config;
+
+ if (unsupported(fconf, FCOE_F, fs->val.fcoe, fs->mask.fcoe) ||
+ unsupported(fconf, PORT_F, fs->val.iport, fs->mask.iport) ||
+ unsupported(fconf, TOS_F, fs->val.tos, fs->mask.tos) ||
+ unsupported(fconf, ETHERTYPE_F, fs->val.ethtype,
+ fs->mask.ethtype) ||
+ unsupported(fconf, MACMATCH_F, fs->val.macidx, fs->mask.macidx) ||
+ unsupported(fconf, MPSHITTYPE_F, fs->val.matchtype,
+ fs->mask.matchtype) ||
+ unsupported(fconf, FRAGMENTATION_F, fs->val.frag, fs->mask.frag) ||
+ unsupported(fconf, PROTOCOL_F, fs->val.proto, fs->mask.proto) ||
+ unsupported(fconf, VNIC_ID_F, fs->val.pfvf_vld,
+ fs->mask.pfvf_vld) ||
+ unsupported(fconf, VNIC_ID_F, fs->val.ovlan_vld,
+ fs->mask.ovlan_vld) ||
+ unsupported(fconf, VLAN_F, fs->val.ivlan_vld, fs->mask.ivlan_vld))
+ return -EOPNOTSUPP;
+
+ /* T4 inconveniently uses the same FT_VNIC_ID_W bits for both the Outer
+ * VLAN Tag and PF/VF/VFvld fields based on VNIC_F being set
+ * in TP_INGRESS_CONFIG. Hense the somewhat crazy checks
+ * below. Additionally, since the T4 firmware interface also
+ * carries that overlap, we need to translate any PF/VF
+ * specification into that internal format below.
+ */
+ if (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
+ is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld))
+ return -EOPNOTSUPP;
+ if (unsupported(iconf, VNIC_F, fs->val.pfvf_vld, fs->mask.pfvf_vld) ||
+ (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
+ (iconf & VNIC_F)))
+ return -EOPNOTSUPP;
+ if (fs->val.pf > 0x7 || fs->val.vf > 0x7f)
+ return -ERANGE;
+ fs->mask.pf &= 0x7;
+ fs->mask.vf &= 0x7f;
+
+ /* If the user is requesting that the filter action loop
+ * matching packets back out one of our ports, make sure that
+ * the egress port is in range.
+ */
+ if (fs->action == FILTER_SWITCH &&
+ fs->eport >= adapter->params.nports)
+ return -ERANGE;
+
+ /* Don't allow various trivially obvious bogus out-of-range values... */
+ if (fs->val.iport >= adapter->params.nports)
+ return -ERANGE;
+
+ /* T4 doesn't support removing VLAN Tags for loop back filters. */
+ if (is_t4(adapter->params.chip) &&
+ fs->action == FILTER_SWITCH &&
+ (fs->newvlan == VLAN_REMOVE ||
+ fs->newvlan == VLAN_REWRITE))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static unsigned int get_filter_steerq(struct net_device *dev,
+ struct ch_filter_specification *fs)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ unsigned int iq;
+
+ /* If the user has requested steering matching Ingress Packets
+ * to a specific Queue Set, we need to make sure it's in range
+ * for the port and map that into the Absolute Queue ID of the
+ * Queue Set's Response Queue.
+ */
+ if (!fs->dirsteer) {
+ if (fs->iq)
+ return -EINVAL;
+ iq = 0;
+ } else {
+ struct port_info *pi = netdev_priv(dev);
+
+ /* If the iq id is greater than the number of qsets,
+ * then assume it is an absolute qid.
+ */
+ if (fs->iq < pi->nqsets)
+ iq = adapter->sge.ethrxq[pi->first_qset +
+ fs->iq].rspq.abs_id;
+ else
+ iq = fs->iq;
+ }
+
+ return iq;
+}
+
+static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family)
+{
+ spin_lock_bh(&t->ftid_lock);
+
+ if (test_bit(fidx, t->ftid_bmap)) {
+ spin_unlock_bh(&t->ftid_lock);
+ return -EBUSY;
+ }
+
+ if (family == PF_INET)
+ __set_bit(fidx, t->ftid_bmap);
+ else
+ bitmap_allocate_region(t->ftid_bmap, fidx, 2);
+
+ spin_unlock_bh(&t->ftid_lock);
+ return 0;
+}
+
+static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family)
+{
+ spin_lock_bh(&t->ftid_lock);
+ if (family == PF_INET)
+ __clear_bit(fidx, t->ftid_bmap);
+ else
+ bitmap_release_region(t->ftid_bmap, fidx, 2);
+ spin_unlock_bh(&t->ftid_lock);
+}
+
+/* Delete the filter at a specified index. */
+static int del_filter_wr(struct adapter *adapter, int fidx)
+{
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct fw_filter_wr *fwr;
+ struct sk_buff *skb;
+ unsigned int len;
+
+ len = sizeof(*fwr);
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ fwr = (struct fw_filter_wr *)__skb_put(skb, len);
+ t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
+
+ /* Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ t4_mgmt_tx(adapter, skb);
+ return 0;
+}
+
+/* Send a Work Request to write the filter at a specified index. We construct
+ * a Firmware Filter Work Request to have the work done and put the indicated
+ * filter into "pending" mode which will prevent any further actions against
+ * it till we get a reply from the firmware on the completion status of the
+ * request.
+ */
+int set_filter_wr(struct adapter *adapter, int fidx)
+{
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct fw_filter_wr *fwr;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ /* If the new filter requires loopback Destination MAC and/or VLAN
+ * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
+ * the filter.
+ */
+ if (f->fs.newdmac || f->fs.newvlan) {
+ /* allocate L2T entry for new filter */
+ f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
+ f->fs.eport, f->fs.dmac);
+ if (!f->l2t) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+ }
+
+ fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
+ memset(fwr, 0, sizeof(*fwr));
+
+ /* It would be nice to put most of the following in t4_hw.c but most
+ * of the work is translating the cxgbtool ch_filter_specification
+ * into the Work Request and the definition of that structure is
+ * currently in cxgbtool.h which isn't appropriate to pull into the
+ * common code. We may eventually try to come up with a more neutral
+ * filter specification structure but for now it's easiest to simply
+ * put this fairly direct code in line ...
+ */
+ fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
+ fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr) / 16));
+ fwr->tid_to_iq =
+ htonl(FW_FILTER_WR_TID_V(f->tid) |
+ FW_FILTER_WR_RQTYPE_V(f->fs.type) |
+ FW_FILTER_WR_NOREPLY_V(0) |
+ FW_FILTER_WR_IQ_V(f->fs.iq));
+ fwr->del_filter_to_l2tix =
+ htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
+ FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
+ FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
+ FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
+ FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
+ FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
+ FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
+ FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
+ FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
+ FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
+ FW_FILTER_WR_PRIO_V(f->fs.prio) |
+ FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
+ fwr->ethtype = htons(f->fs.val.ethtype);
+ fwr->ethtypem = htons(f->fs.mask.ethtype);
+ fwr->frag_to_ovlan_vldm =
+ (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
+ FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
+ FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
+ FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
+ FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
+ FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
+ fwr->smac_sel = 0;
+ fwr->rx_chan_rx_rpl_iq =
+ htons(FW_FILTER_WR_RX_CHAN_V(0) |
+ FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
+ fwr->maci_to_matchtypem =
+ htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
+ FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
+ FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
+ FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
+ FW_FILTER_WR_PORT_V(f->fs.val.iport) |
+ FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
+ FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
+ FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
+ fwr->ptcl = f->fs.val.proto;
+ fwr->ptclm = f->fs.mask.proto;
+ fwr->ttyp = f->fs.val.tos;
+ fwr->ttypm = f->fs.mask.tos;
+ fwr->ivlan = htons(f->fs.val.ivlan);
+ fwr->ivlanm = htons(f->fs.mask.ivlan);
+ fwr->ovlan = htons(f->fs.val.ovlan);
+ fwr->ovlanm = htons(f->fs.mask.ovlan);
+ memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
+ memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
+ memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
+ memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
+ fwr->lp = htons(f->fs.val.lport);
+ fwr->lpm = htons(f->fs.mask.lport);
+ fwr->fp = htons(f->fs.val.fport);
+ fwr->fpm = htons(f->fs.mask.fport);
+ if (f->fs.newsmac)
+ memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
+
+ /* Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
+ t4_ofld_send(adapter, skb);
+ return 0;
+}
+
+/* Return an error number if the indicated filter isn't writable ... */
+int writable_filter(struct filter_entry *f)
+{
+ if (f->locked)
+ return -EPERM;
+ if (f->pending)
+ return -EBUSY;
+
+ return 0;
+}
+
+/* Delete the filter at the specified index (if valid). The checks for all
+ * the common problems with doing this like the filter being locked, currently
+ * pending in another operation, etc.
+ */
+int delete_filter(struct adapter *adapter, unsigned int fidx)
+{
+ struct filter_entry *f;
+ int ret;
+
+ if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
+ return -EINVAL;
+
+ f = &adapter->tids.ftid_tab[fidx];
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+ if (f->valid)
+ return del_filter_wr(adapter, fidx);
+
+ return 0;
+}
+
+/* Clear a filter and release any of its resources that we own. This also
+ * clears the filter's "pending" status.
+ */
+void clear_filter(struct adapter *adap, struct filter_entry *f)
+{
+ /* If the new or old filter have loopback rewriteing rules then we'll
+ * need to free any existing Layer Two Table (L2T) entries of the old
+ * filter rule. The firmware will handle freeing up any Source MAC
+ * Table (SMT) entries used for rewriting Source MAC Addresses in
+ * loopback rules.
+ */
+ if (f->l2t)
+ cxgb4_l2t_release(f->l2t);
+
+ /* The zeroing of the filter rule below clears the filter valid,
+ * pending, locked flags, l2t pointer, etc. so it's all we need for
+ * this operation.
+ */
+ memset(f, 0, sizeof(*f));
+}
+
+void clear_all_filters(struct adapter *adapter)
+{
+ unsigned int i;
+
+ if (adapter->tids.ftid_tab) {
+ struct filter_entry *f = &adapter->tids.ftid_tab[0];
+ unsigned int max_ftid = adapter->tids.nftids +
+ adapter->tids.nsftids;
+
+ for (i = 0; i < max_ftid; i++, f++)
+ if (f->valid || f->pending)
+ clear_filter(adapter, f);
+ }
+}
+
+/* Fill up default masks for set match fields. */
+static void fill_default_mask(struct ch_filter_specification *fs)
+{
+ unsigned int lip = 0, lip_mask = 0;
+ unsigned int fip = 0, fip_mask = 0;
+ unsigned int i;
+
+ if (fs->val.iport && !fs->mask.iport)
+ fs->mask.iport |= ~0;
+ if (fs->val.fcoe && !fs->mask.fcoe)
+ fs->mask.fcoe |= ~0;
+ if (fs->val.matchtype && !fs->mask.matchtype)
+ fs->mask.matchtype |= ~0;
+ if (fs->val.macidx && !fs->mask.macidx)
+ fs->mask.macidx |= ~0;
+ if (fs->val.ethtype && !fs->mask.ethtype)
+ fs->mask.ethtype |= ~0;
+ if (fs->val.ivlan && !fs->mask.ivlan)
+ fs->mask.ivlan |= ~0;
+ if (fs->val.ovlan && !fs->mask.ovlan)
+ fs->mask.ovlan |= ~0;
+ if (fs->val.frag && !fs->mask.frag)
+ fs->mask.frag |= ~0;
+ if (fs->val.tos && !fs->mask.tos)
+ fs->mask.tos |= ~0;
+ if (fs->val.proto && !fs->mask.proto)
+ fs->mask.proto |= ~0;
+
+ for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) {
+ lip |= fs->val.lip[i];
+ lip_mask |= fs->mask.lip[i];
+ fip |= fs->val.fip[i];
+ fip_mask |= fs->mask.fip[i];
+ }
+
+ if (lip && !lip_mask)
+ memset(fs->mask.lip, ~0, sizeof(fs->mask.lip));
+
+ if (fip && !fip_mask)
+ memset(fs->mask.fip, ~0, sizeof(fs->mask.lip));
+
+ if (fs->val.lport && !fs->mask.lport)
+ fs->mask.lport = ~0;
+ if (fs->val.fport && !fs->mask.fport)
+ fs->mask.fport = ~0;
+}
+
+/* Check a Chelsio Filter Request for validity, convert it into our internal
+ * format and send it to the hardware. Return 0 on success, an error number
+ * otherwise. We attach any provided filter operation context to the internal
+ * filter specification in order to facilitate signaling completion of the
+ * operation.
+ */
+int __cxgb4_set_filter(struct net_device *dev, int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ unsigned int max_fidx, fidx, iq;
+ struct filter_entry *f;
+ u32 iconf;
+ int ret;
+
+ max_fidx = adapter->tids.nftids;
+ if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
+ filter_id >= max_fidx)
+ return -E2BIG;
+
+ fill_default_mask(fs);
+
+ ret = validate_filter(dev, fs);
+ if (ret)
+ return ret;
+
+ iq = get_filter_steerq(dev, fs);
+ if (iq < 0)
+ return iq;
+
+ /* IPv6 filters occupy four slots and must be aligned on
+ * four-slot boundaries. IPv4 filters only occupy a single
+ * slot and have no alignment requirements but writing a new
+ * IPv4 filter into the middle of an existing IPv6 filter
+ * requires clearing the old IPv6 filter and hence we prevent
+ * insertion.
+ */
+ if (fs->type == 0) { /* IPv4 */
+ /* If our IPv4 filter isn't being written to a
+ * multiple of four filter index and there's an IPv6
+ * filter at the multiple of 4 base slot, then we
+ * prevent insertion.
+ */
+ fidx = filter_id & ~0x3;
+ if (fidx != filter_id &&
+ adapter->tids.ftid_tab[fidx].fs.type) {
+ f = &adapter->tids.ftid_tab[fidx];
+ if (f->valid) {
+ dev_err(adapter->pdev_dev,
+ "Invalid location. IPv6 requires 4 slots and is occupying slots %u to %u\n",
+ fidx, fidx + 3);
+ return -EINVAL;
+ }
+ }
+ } else { /* IPv6 */
+ /* Ensure that the IPv6 filter is aligned on a
+ * multiple of 4 boundary.
+ */
+ if (filter_id & 0x3) {
+ dev_err(adapter->pdev_dev,
+ "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
+ return -EINVAL;
+ }
+
+ /* Check all except the base overlapping IPv4 filter slots. */
+ for (fidx = filter_id + 1; fidx < filter_id + 4; fidx++) {
+ f = &adapter->tids.ftid_tab[fidx];
+ if (f->valid) {
+ dev_err(adapter->pdev_dev,
+ "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
+ fidx);
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Check to make sure that provided filter index is not
+ * already in use by someone else
+ */
+ f = &adapter->tids.ftid_tab[filter_id];
+ if (f->valid)
+ return -EBUSY;
+
+ fidx = filter_id + adapter->tids.ftid_base;
+ ret = cxgb4_set_ftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET);
+ if (ret)
+ return ret;
+
+ /* Check to make sure the filter requested is writable ... */
+ ret = writable_filter(f);
+ if (ret) {
+ /* Clear the bits we have set above */
+ cxgb4_clear_ftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET);
+ return ret;
+ }
+
+ /* Clear out any old resources being used by the filter before
+ * we start constructing the new filter.
+ */
+ if (f->valid)
+ clear_filter(adapter, f);
+
+ /* Convert the filter specification into our internal format.
+ * We copy the PF/VF specification into the Outer VLAN field
+ * here so the rest of the code -- including the interface to
+ * the firmware -- doesn't have to constantly do these checks.
+ */
+ f->fs = *fs;
+ f->fs.iq = iq;
+ f->dev = dev;
+
+ iconf = adapter->params.tp.ingress_config;
+ if (iconf & VNIC_F) {
+ f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
+ f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
+ f->fs.val.ovlan_vld = fs->val.pfvf_vld;
+ f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
+ }
+
+ /* Attempt to set the filter. If we don't succeed, we clear
+ * it and return the failure.
+ */
+ f->ctx = ctx;
+ f->tid = fidx; /* Save the actual tid */
+ ret = set_filter_wr(adapter, filter_id);
+ if (ret) {
+ cxgb4_clear_ftid(&adapter->tids, filter_id,
+ fs->type ? PF_INET6 : PF_INET);
+ clear_filter(adapter, f);
+ }
+
+ return ret;
+}
+
+/* Check a delete filter request for validity and send it to the hardware.
+ * Return 0 on success, an error number otherwise. We attach any provided
+ * filter operation context to the internal filter specification in order to
+ * facilitate signaling completion of the operation.
+ */
+int __cxgb4_del_filter(struct net_device *dev, int filter_id,
+ struct filter_ctx *ctx)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ struct filter_entry *f;
+ unsigned int max_fidx;
+ int ret;
+
+ max_fidx = adapter->tids.nftids;
+ if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
+ filter_id >= max_fidx)
+ return -E2BIG;
+
+ f = &adapter->tids.ftid_tab[filter_id];
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+
+ if (f->valid) {
+ f->ctx = ctx;
+ cxgb4_clear_ftid(&adapter->tids, filter_id,
+ f->fs.type ? PF_INET6 : PF_INET);
+ return del_filter_wr(adapter, filter_id);
+ }
+
+ /* If the caller has passed in a Completion Context then we need to
+ * mark it as a successful completion so they don't stall waiting
+ * for it.
+ */
+ if (ctx) {
+ ctx->result = 0;
+ complete(&ctx->completion);
+ }
+ return ret;
+}
+
+int cxgb4_set_filter(struct net_device *dev, int filter_id,
+ struct ch_filter_specification *fs)
+{
+ struct filter_ctx ctx;
+ int ret;
+
+ init_completion(&ctx.completion);
+
+ ret = __cxgb4_set_filter(dev, filter_id, fs, &ctx);
+ if (ret)
+ goto out;
+
+ /* Wait for reply */
+ ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ ret = ctx.result;
+out:
+ return ret;
+}
+
+int cxgb4_del_filter(struct net_device *dev, int filter_id)
+{
+ struct filter_ctx ctx;
+ int ret;
+
+ init_completion(&ctx.completion);
+
+ ret = __cxgb4_del_filter(dev, filter_id, &ctx);
+ if (ret)
+ goto out;
+
+ /* Wait for reply */
+ ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ ret = ctx.result;
+out:
+ return ret;
+}
+
+/* Handle a filter write/deletion reply. */
+void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
+{
+ unsigned int tid = GET_TID(rpl);
+ struct filter_entry *f = NULL;
+ unsigned int max_fidx;
+ int idx;
+
+ max_fidx = adap->tids.nftids + adap->tids.nsftids;
+ /* Get the corresponding filter entry for this tid */
+ if (adap->tids.ftid_tab) {
+ /* Check this in normal filter region */
+ idx = tid - adap->tids.ftid_base;
+ if (idx >= max_fidx)
+ return;
+ f = &adap->tids.ftid_tab[idx];
+ if (f->tid != tid)
+ return;
+ }
+
+ /* We found the filter entry for this tid */
+ if (f) {
+ unsigned int ret = TCB_COOKIE_G(rpl->cookie);
+ struct filter_ctx *ctx;
+
+ /* Pull off any filter operation context attached to the
+ * filter.
+ */
+ ctx = f->ctx;
+ f->ctx = NULL;
+
+ if (ret == FW_FILTER_WR_FLT_DELETED) {
+ /* Clear the filter when we get confirmation from the
+ * hardware that the filter has been deleted.
+ */
+ clear_filter(adap, f);
+ if (ctx)
+ ctx->result = 0;
+ } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
+ dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
+ idx);
+ clear_filter(adap, f);
+ if (ctx)
+ ctx->result = -ENOMEM;
+ } else if (ret == FW_FILTER_WR_FLT_ADDED) {
+ f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
+ f->pending = 0; /* asynchronous setup completed */
+ f->valid = 1;
+ if (ctx) {
+ ctx->result = 0;
+ ctx->tid = idx;
+ }
+ } else {
+ /* Something went wrong. Issue a warning about the
+ * problem and clear everything out.
+ */
+ dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
+ idx, ret);
+ clear_filter(adap, f);
+ if (ctx)
+ ctx->result = -EINVAL;
+ }
+ if (ctx)
+ complete(&ctx->completion);
+ }
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
new file mode 100644
index 000000000000..23742cb1c69f
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
@@ -0,0 +1,48 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_FILTER_H
+#define __CXGB4_FILTER_H
+
+#include "t4_msg.h"
+
+void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl);
+void clear_filter(struct adapter *adap, struct filter_entry *f);
+
+int set_filter_wr(struct adapter *adapter, int fidx);
+int delete_filter(struct adapter *adapter, unsigned int fidx);
+
+int writable_filter(struct filter_entry *f);
+void clear_all_filters(struct adapter *adapter);
+#endif /* __CXGB4_FILTER_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 44cc9767936f..eaa7fa98a205 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -67,6 +67,7 @@
#include <linux/crash_dump.h>
#include "cxgb4.h"
+#include "cxgb4_filter.h"
#include "t4_regs.h"
#include "t4_values.h"
#include "t4_msg.h"
@@ -77,6 +78,7 @@
#include "clip_tbl.h"
#include "l2t.h"
#include "sched.h"
+#include "cxgb4_tc_u32.h"
char cxgb4_driver_name[] = KBUILD_MODNAME;
@@ -87,30 +89,6 @@ char cxgb4_driver_name[] = KBUILD_MODNAME;
const char cxgb4_driver_version[] = DRV_VERSION;
#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
-/* Host shadow copy of ingress filter entry. This is in host native format
- * and doesn't match the ordering or bit order, etc. of the hardware of the
- * firmware command. The use of bit-field structure elements is purely to
- * remind ourselves of the field size limitations and save memory in the case
- * where the filter table is large.
- */
-struct filter_entry {
- /* Administrative fields for filter.
- */
- u32 valid:1; /* filter allocated and valid */
- u32 locked:1; /* filter is administratively locked */
-
- u32 pending:1; /* filter action is pending firmware reply */
- u32 smtidx:8; /* Source MAC Table index for smac */
- struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
-
- /* The filter itself. Most of this is a straight copy of information
- * provided by the extended ioctl(). Some fields are translated to
- * internal forms -- for instance the Ingress Queue ID passed in from
- * the ioctl() is translated into the Absolute Ingress Queue ID.
- */
- struct ch_filter_specification fs;
-};
-
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@ -226,11 +204,6 @@ static struct dentry *cxgb4_debugfs_root;
LIST_HEAD(adapter_list);
DEFINE_MUTEX(uld_mutex);
-/* Adapter list to be accessed from atomic context */
-static LIST_HEAD(adap_rcu_list);
-static DEFINE_SPINLOCK(adap_rcu_lock);
-static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
-static const char *const uld_str[] = { "RDMA", "iSCSI", "iSCSIT" };
static void link_report(struct net_device *dev)
{
@@ -532,66 +505,6 @@ static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
}
#endif /* CONFIG_CHELSIO_T4_DCB */
-/* Clear a filter and release any of its resources that we own. This also
- * clears the filter's "pending" status.
- */
-static void clear_filter(struct adapter *adap, struct filter_entry *f)
-{
- /* If the new or old filter have loopback rewriteing rules then we'll
- * need to free any existing Layer Two Table (L2T) entries of the old
- * filter rule. The firmware will handle freeing up any Source MAC
- * Table (SMT) entries used for rewriting Source MAC Addresses in
- * loopback rules.
- */
- if (f->l2t)
- cxgb4_l2t_release(f->l2t);
-
- /* The zeroing of the filter rule below clears the filter valid,
- * pending, locked flags, l2t pointer, etc. so it's all we need for
- * this operation.
- */
- memset(f, 0, sizeof(*f));
-}
-
-/* Handle a filter write/deletion reply.
- */
-static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
-{
- unsigned int idx = GET_TID(rpl);
- unsigned int nidx = idx - adap->tids.ftid_base;
- unsigned int ret;
- struct filter_entry *f;
-
- if (idx >= adap->tids.ftid_base && nidx <
- (adap->tids.nftids + adap->tids.nsftids)) {
- idx = nidx;
- ret = TCB_COOKIE_G(rpl->cookie);
- f = &adap->tids.ftid_tab[idx];
-
- if (ret == FW_FILTER_WR_FLT_DELETED) {
- /* Clear the filter when we get confirmation from the
- * hardware that the filter has been deleted.
- */
- clear_filter(adap, f);
- } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
- dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
- idx);
- clear_filter(adap, f);
- } else if (ret == FW_FILTER_WR_FLT_ADDED) {
- f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
- f->pending = 0; /* asynchronous setup completed */
- f->valid = 1;
- } else {
- /* Something went wrong. Issue a warning about the
- * problem and clear everything out.
- */
- dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
- idx, ret);
- clear_filter(adap, f);
- }
- }
-}
-
/* Response queue handler for the FW event queue.
*/
static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
@@ -678,56 +591,6 @@ out:
return 0;
}
-/* Flush the aggregated lro sessions */
-static void uldrx_flush_handler(struct sge_rspq *q)
-{
- if (ulds[q->uld].lro_flush)
- ulds[q->uld].lro_flush(&q->lro_mgr);
-}
-
-/**
- * uldrx_handler - response queue handler for ULD queues
- * @q: the response queue that received the packet
- * @rsp: the response queue descriptor holding the offload message
- * @gl: the gather list of packet fragments
- *
- * Deliver an ingress offload packet to a ULD. All processing is done by
- * the ULD, we just maintain statistics.
- */
-static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
- const struct pkt_gl *gl)
-{
- struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
- int ret;
-
- /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
- */
- if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
- ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
- rsp += 2;
-
- if (q->flush_handler)
- ret = ulds[q->uld].lro_rx_handler(q->adap->uld_handle[q->uld],
- rsp, gl, &q->lro_mgr,
- &q->napi);
- else
- ret = ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld],
- rsp, gl);
-
- if (ret) {
- rxq->stats.nomem++;
- return -1;
- }
-
- if (gl == NULL)
- rxq->stats.imm++;
- else if (gl == CXGB4_MSG_AN)
- rxq->stats.an++;
- else
- rxq->stats.pkts++;
- return 0;
-}
-
static void disable_msi(struct adapter *adapter)
{
if (adapter->flags & USING_MSIX) {
@@ -779,30 +642,12 @@ static void name_msix_vecs(struct adapter *adap)
snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
d->name, i);
}
-
- /* offload queues */
- for_each_iscsirxq(&adap->sge, i)
- snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iscsi%d",
- adap->port[0]->name, i);
-
- for_each_iscsitrxq(&adap->sge, i)
- snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iSCSIT%d",
- adap->port[0]->name, i);
-
- for_each_rdmarxq(&adap->sge, i)
- snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
- adap->port[0]->name, i);
-
- for_each_rdmaciq(&adap->sge, i)
- snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
- adap->port[0]->name, i);
}
static int request_msix_queue_irqs(struct adapter *adap)
{
struct sge *s = &adap->sge;
- int err, ethqidx, iscsiqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
- int iscsitqidx = 0;
+ int err, ethqidx;
int msi_index = 2;
err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
@@ -819,57 +664,9 @@ static int request_msix_queue_irqs(struct adapter *adap)
goto unwind;
msi_index++;
}
- for_each_iscsirxq(s, iscsiqidx) {
- err = request_irq(adap->msix_info[msi_index].vec,
- t4_sge_intr_msix, 0,
- adap->msix_info[msi_index].desc,
- &s->iscsirxq[iscsiqidx].rspq);
- if (err)
- goto unwind;
- msi_index++;
- }
- for_each_iscsitrxq(s, iscsitqidx) {
- err = request_irq(adap->msix_info[msi_index].vec,
- t4_sge_intr_msix, 0,
- adap->msix_info[msi_index].desc,
- &s->iscsitrxq[iscsitqidx].rspq);
- if (err)
- goto unwind;
- msi_index++;
- }
- for_each_rdmarxq(s, rdmaqidx) {
- err = request_irq(adap->msix_info[msi_index].vec,
- t4_sge_intr_msix, 0,
- adap->msix_info[msi_index].desc,
- &s->rdmarxq[rdmaqidx].rspq);
- if (err)
- goto unwind;
- msi_index++;
- }
- for_each_rdmaciq(s, rdmaciqqidx) {
- err = request_irq(adap->msix_info[msi_index].vec,
- t4_sge_intr_msix, 0,
- adap->msix_info[msi_index].desc,
- &s->rdmaciq[rdmaciqqidx].rspq);
- if (err)
- goto unwind;
- msi_index++;
- }
return 0;
unwind:
- while (--rdmaciqqidx >= 0)
- free_irq(adap->msix_info[--msi_index].vec,
- &s->rdmaciq[rdmaciqqidx].rspq);
- while (--rdmaqidx >= 0)
- free_irq(adap->msix_info[--msi_index].vec,
- &s->rdmarxq[rdmaqidx].rspq);
- while (--iscsitqidx >= 0)
- free_irq(adap->msix_info[--msi_index].vec,
- &s->iscsitrxq[iscsitqidx].rspq);
- while (--iscsiqidx >= 0)
- free_irq(adap->msix_info[--msi_index].vec,
- &s->iscsirxq[iscsiqidx].rspq);
while (--ethqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
&s->ethrxq[ethqidx].rspq);
@@ -885,16 +682,6 @@ static void free_msix_queue_irqs(struct adapter *adap)
free_irq(adap->msix_info[1].vec, &s->fw_evtq);
for_each_ethrxq(s, i)
free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
- for_each_iscsirxq(s, i)
- free_irq(adap->msix_info[msi_index++].vec,
- &s->iscsirxq[i].rspq);
- for_each_iscsitrxq(s, i)
- free_irq(adap->msix_info[msi_index++].vec,
- &s->iscsitrxq[i].rspq);
- for_each_rdmarxq(s, i)
- free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
- for_each_rdmaciq(s, i)
- free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
}
/**
@@ -1033,42 +820,11 @@ static void enable_rx(struct adapter *adap)
}
}
-static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
- unsigned int nq, unsigned int per_chan, int msi_idx,
- u16 *ids, bool lro)
-{
- int i, err;
- for (i = 0; i < nq; i++, q++) {
- if (msi_idx > 0)
- msi_idx++;
- err = t4_sge_alloc_rxq(adap, &q->rspq, false,
- adap->port[i / per_chan],
- msi_idx, q->fl.size ? &q->fl : NULL,
- uldrx_handler,
- lro ? uldrx_flush_handler : NULL,
- 0);
- if (err)
- return err;
- memset(&q->stats, 0, sizeof(q->stats));
- if (ids)
- ids[i] = q->rspq.abs_id;
- }
- return 0;
-}
-
-/**
- * setup_sge_queues - configure SGE Tx/Rx/response queues
- * @adap: the adapter
- *
- * Determines how many sets of SGE queues to use and initializes them.
- * We support multiple queue sets per port if we have MSI-X, otherwise
- * just one queue set per port.
- */
-static int setup_sge_queues(struct adapter *adap)
+static int setup_fw_sge_queues(struct adapter *adap)
{
- int err, i, j;
struct sge *s = &adap->sge;
+ int err = 0;
bitmap_zero(s->starving_fl, s->egr_sz);
bitmap_zero(s->txq_maperr, s->egr_sz);
@@ -1083,25 +839,27 @@ static int setup_sge_queues(struct adapter *adap)
adap->msi_idx = -((int)s->intrq.abs_id + 1);
}
- /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
- * don't forget to update the following which need to be
- * synchronized to and changes here.
- *
- * 1. The calculations of MAX_INGQ in cxgb4.h.
- *
- * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
- * to accommodate any new/deleted Ingress Queues
- * which need MSI-X Vectors.
- *
- * 3. Update sge_qinfo_show() to include information on the
- * new/deleted queues.
- */
err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
- if (err) {
-freeout: t4_free_sge_resources(adap);
- return err;
- }
+ if (err)
+ t4_free_sge_resources(adap);
+ return err;
+}
+
+/**
+ * setup_sge_queues - configure SGE Tx/Rx/response queues
+ * @adap: the adapter
+ *
+ * Determines how many sets of SGE queues to use and initializes them.
+ * We support multiple queue sets per port if we have MSI-X, otherwise
+ * just one queue set per port.
+ */
+static int setup_sge_queues(struct adapter *adap)
+{
+ int err, i, j;
+ struct sge *s = &adap->sge;
+ struct sge_uld_rxq_info *rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
+ unsigned int cmplqid = 0;
for_each_port(adap, i) {
struct net_device *dev = adap->port[i];
@@ -1132,8 +890,8 @@ freeout: t4_free_sge_resources(adap);
}
}
- j = s->iscsiqsets / adap->params.nports; /* iscsi queues per channel */
- for_each_iscsirxq(s, i) {
+ j = s->ofldqsets / adap->params.nports; /* iscsi queues per channel */
+ for_each_ofldtxq(s, i) {
err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
adap->port[i / j],
s->fw_evtq.cntxt_id);
@@ -1141,30 +899,15 @@ freeout: t4_free_sge_resources(adap);
goto freeout;
}
-#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids, lro) do { \
- err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, adap->msi_idx, ids, lro); \
- if (err) \
- goto freeout; \
- if (adap->msi_idx > 0) \
- adap->msi_idx += nq; \
-} while (0)
-
- ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq, false);
- ALLOC_OFLD_RXQS(s->iscsitrxq, s->niscsitq, j, s->iscsit_rxq, true);
- ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq, false);
- j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
- ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq, false);
-
-#undef ALLOC_OFLD_RXQS
-
for_each_port(adap, i) {
- /*
- * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
+ /* Note that cmplqid below is 0 if we don't
* have RDMA queues, and that's the right value.
*/
+ if (rxq_info)
+ cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
+
err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
- s->fw_evtq.cntxt_id,
- s->rdmarxq[i].rspq.cntxt_id);
+ s->fw_evtq.cntxt_id, cmplqid);
if (err)
goto freeout;
}
@@ -1175,6 +918,9 @@ freeout: t4_free_sge_resources(adap);
RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
return 0;
+freeout:
+ t4_free_sge_resources(adap);
+ return err;
}
/*
@@ -1198,151 +944,6 @@ void t4_free_mem(void *addr)
kvfree(addr);
}
-/* Send a Work Request to write the filter at a specified index. We construct
- * a Firmware Filter Work Request to have the work done and put the indicated
- * filter into "pending" mode which will prevent any further actions against
- * it till we get a reply from the firmware on the completion status of the
- * request.
- */
-static int set_filter_wr(struct adapter *adapter, int fidx)
-{
- struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
- struct sk_buff *skb;
- struct fw_filter_wr *fwr;
- unsigned int ftid;
-
- skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- /* If the new filter requires loopback Destination MAC and/or VLAN
- * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
- * the filter.
- */
- if (f->fs.newdmac || f->fs.newvlan) {
- /* allocate L2T entry for new filter */
- f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
- f->fs.eport, f->fs.dmac);
- if (f->l2t == NULL) {
- kfree_skb(skb);
- return -ENOMEM;
- }
- }
-
- ftid = adapter->tids.ftid_base + fidx;
-
- fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
- memset(fwr, 0, sizeof(*fwr));
-
- /* It would be nice to put most of the following in t4_hw.c but most
- * of the work is translating the cxgbtool ch_filter_specification
- * into the Work Request and the definition of that structure is
- * currently in cxgbtool.h which isn't appropriate to pull into the
- * common code. We may eventually try to come up with a more neutral
- * filter specification structure but for now it's easiest to simply
- * put this fairly direct code in line ...
- */
- fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
- fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
- fwr->tid_to_iq =
- htonl(FW_FILTER_WR_TID_V(ftid) |
- FW_FILTER_WR_RQTYPE_V(f->fs.type) |
- FW_FILTER_WR_NOREPLY_V(0) |
- FW_FILTER_WR_IQ_V(f->fs.iq));
- fwr->del_filter_to_l2tix =
- htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
- FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
- FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
- FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
- FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
- FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
- FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
- FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
- FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
- f->fs.newvlan == VLAN_REWRITE) |
- FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
- f->fs.newvlan == VLAN_REWRITE) |
- FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
- FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
- FW_FILTER_WR_PRIO_V(f->fs.prio) |
- FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
- fwr->ethtype = htons(f->fs.val.ethtype);
- fwr->ethtypem = htons(f->fs.mask.ethtype);
- fwr->frag_to_ovlan_vldm =
- (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
- FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
- FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
- FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
- FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
- FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
- fwr->smac_sel = 0;
- fwr->rx_chan_rx_rpl_iq =
- htons(FW_FILTER_WR_RX_CHAN_V(0) |
- FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
- fwr->maci_to_matchtypem =
- htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
- FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
- FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
- FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
- FW_FILTER_WR_PORT_V(f->fs.val.iport) |
- FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
- FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
- FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
- fwr->ptcl = f->fs.val.proto;
- fwr->ptclm = f->fs.mask.proto;
- fwr->ttyp = f->fs.val.tos;
- fwr->ttypm = f->fs.mask.tos;
- fwr->ivlan = htons(f->fs.val.ivlan);
- fwr->ivlanm = htons(f->fs.mask.ivlan);
- fwr->ovlan = htons(f->fs.val.ovlan);
- fwr->ovlanm = htons(f->fs.mask.ovlan);
- memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
- memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
- memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
- memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
- fwr->lp = htons(f->fs.val.lport);
- fwr->lpm = htons(f->fs.mask.lport);
- fwr->fp = htons(f->fs.val.fport);
- fwr->fpm = htons(f->fs.mask.fport);
- if (f->fs.newsmac)
- memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
-
- /* Mark the filter as "pending" and ship off the Filter Work Request.
- * When we get the Work Request Reply we'll clear the pending status.
- */
- f->pending = 1;
- set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
- t4_ofld_send(adapter, skb);
- return 0;
-}
-
-/* Delete the filter at a specified index.
- */
-static int del_filter_wr(struct adapter *adapter, int fidx)
-{
- struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
- struct sk_buff *skb;
- struct fw_filter_wr *fwr;
- unsigned int len, ftid;
-
- len = sizeof(*fwr);
- ftid = adapter->tids.ftid_base + fidx;
-
- skb = alloc_skb(len, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- fwr = (struct fw_filter_wr *)__skb_put(skb, len);
- t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
-
- /* Mark the filter as "pending" and ship off the Filter Work Request.
- * When we get the Work Request Reply we'll clear the pending status.
- */
- f->pending = 1;
- t4_mgmt_tx(adapter, skb);
- return 0;
-}
-
static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
@@ -1724,19 +1325,22 @@ EXPORT_SYMBOL(cxgb4_remove_tid);
*/
static int tid_init(struct tid_info *t)
{
- size_t size;
- unsigned int stid_bmap_size;
- unsigned int natids = t->natids;
struct adapter *adap = container_of(t, struct adapter, tids);
+ unsigned int max_ftids = t->nftids + t->nsftids;
+ unsigned int natids = t->natids;
+ unsigned int stid_bmap_size;
+ unsigned int ftid_bmap_size;
+ size_t size;
stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
+ ftid_bmap_size = BITS_TO_LONGS(t->nftids);
size = t->ntids * sizeof(*t->tid_tab) +
natids * sizeof(*t->atid_tab) +
t->nstids * sizeof(*t->stid_tab) +
t->nsftids * sizeof(*t->stid_tab) +
stid_bmap_size * sizeof(long) +
- t->nftids * sizeof(*t->ftid_tab) +
- t->nsftids * sizeof(*t->ftid_tab);
+ max_ftids * sizeof(*t->ftid_tab) +
+ ftid_bmap_size * sizeof(long);
t->tid_tab = t4_alloc_mem(size);
if (!t->tid_tab)
@@ -1746,8 +1350,10 @@ static int tid_init(struct tid_info *t)
t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
+ t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
spin_lock_init(&t->stid_lock);
spin_lock_init(&t->atid_lock);
+ spin_lock_init(&t->ftid_lock);
t->stids_in_use = 0;
t->sftids_in_use = 0;
@@ -1762,12 +1368,16 @@ static int tid_init(struct tid_info *t)
t->atid_tab[natids - 1].next = &t->atid_tab[natids];
t->afree = t->atid_tab;
}
- bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
- /* Reserve stid 0 for T4/T5 adapters */
- if (!t->stid_base &&
- (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
- __set_bit(0, t->stid_bmap);
+ if (is_offload(adap)) {
+ bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
+ /* Reserve stid 0 for T4/T5 adapters */
+ if (!t->stid_base &&
+ CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ __set_bit(0, t->stid_bmap);
+ }
+
+ bitmap_zero(t->ftid_bmap, t->nftids);
return 0;
}
@@ -2317,7 +1927,7 @@ static void disable_dbs(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
disable_txq_db(&adap->sge.ethtxq[i].q);
- for_each_iscsirxq(&adap->sge, i)
+ for_each_ofldtxq(&adap->sge, i)
disable_txq_db(&adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
disable_txq_db(&adap->sge.ctrlq[i].q);
@@ -2329,7 +1939,7 @@ static void enable_dbs(struct adapter *adap)
for_each_ethrxq(&adap->sge, i)
enable_txq_db(adap, &adap->sge.ethtxq[i].q);
- for_each_iscsirxq(&adap->sge, i)
+ for_each_ofldtxq(&adap->sge, i)
enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
enable_txq_db(adap, &adap->sge.ctrlq[i].q);
@@ -2337,9 +1947,10 @@ static void enable_dbs(struct adapter *adap)
static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
{
- if (adap->uld_handle[CXGB4_ULD_RDMA])
- ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
- cmd);
+ enum cxgb4_uld type = CXGB4_ULD_RDMA;
+
+ if (adap->uld && adap->uld[type].handle)
+ adap->uld[type].control(adap->uld[type].handle, cmd);
}
static void process_db_full(struct work_struct *work)
@@ -2393,13 +2004,14 @@ out:
if (ret)
CH_WARN(adap, "DB drop recovery failed.\n");
}
+
static void recover_all_queues(struct adapter *adap)
{
int i;
for_each_ethrxq(&adap->sge, i)
sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
- for_each_iscsirxq(&adap->sge, i)
+ for_each_ofldtxq(&adap->sge, i)
sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
for_each_port(adap, i)
sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
@@ -2464,94 +2076,12 @@ void t4_db_dropped(struct adapter *adap)
queue_work(adap->workq, &adap->db_drop_task);
}
-static void uld_attach(struct adapter *adap, unsigned int uld)
-{
- void *handle;
- struct cxgb4_lld_info lli;
- unsigned short i;
-
- lli.pdev = adap->pdev;
- lli.pf = adap->pf;
- lli.l2t = adap->l2t;
- lli.tids = &adap->tids;
- lli.ports = adap->port;
- lli.vr = &adap->vres;
- lli.mtus = adap->params.mtus;
- if (uld == CXGB4_ULD_RDMA) {
- lli.rxq_ids = adap->sge.rdma_rxq;
- lli.ciq_ids = adap->sge.rdma_ciq;
- lli.nrxq = adap->sge.rdmaqs;
- lli.nciq = adap->sge.rdmaciqs;
- } else if (uld == CXGB4_ULD_ISCSI) {
- lli.rxq_ids = adap->sge.iscsi_rxq;
- lli.nrxq = adap->sge.iscsiqsets;
- } else if (uld == CXGB4_ULD_ISCSIT) {
- lli.rxq_ids = adap->sge.iscsit_rxq;
- lli.nrxq = adap->sge.niscsitq;
- }
- lli.ntxq = adap->sge.iscsiqsets;
- lli.nchan = adap->params.nports;
- lli.nports = adap->params.nports;
- lli.wr_cred = adap->params.ofldq_wr_cred;
- lli.adapter_type = adap->params.chip;
- lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
- lli.iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
- lli.iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
- lli.iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
- lli.iscsi_ppm = &adap->iscsi_ppm;
- lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
- lli.udb_density = 1 << adap->params.sge.eq_qpp;
- lli.ucq_density = 1 << adap->params.sge.iq_qpp;
- lli.filt_mode = adap->params.tp.vlan_pri_map;
- /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
- for (i = 0; i < NCHAN; i++)
- lli.tx_modq[i] = i;
- lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
- lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
- lli.fw_vers = adap->params.fw_vers;
- lli.dbfifo_int_thresh = dbfifo_int_thresh;
- lli.sge_ingpadboundary = adap->sge.fl_align;
- lli.sge_egrstatuspagesize = adap->sge.stat_len;
- lli.sge_pktshift = adap->sge.pktshift;
- lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
- lli.max_ordird_qp = adap->params.max_ordird_qp;
- lli.max_ird_adapter = adap->params.max_ird_adapter;
- lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
- lli.nodeid = dev_to_node(adap->pdev_dev);
-
- handle = ulds[uld].add(&lli);
- if (IS_ERR(handle)) {
- dev_warn(adap->pdev_dev,
- "could not attach to the %s driver, error %ld\n",
- uld_str[uld], PTR_ERR(handle));
- return;
- }
-
- adap->uld_handle[uld] = handle;
-
+void t4_register_netevent_notifier(void)
+{
if (!netevent_registered) {
register_netevent_notifier(&cxgb4_netevent_nb);
netevent_registered = true;
}
-
- if (adap->flags & FULL_INIT_DONE)
- ulds[uld].state_change(handle, CXGB4_STATE_UP);
-}
-
-static void attach_ulds(struct adapter *adap)
-{
- unsigned int i;
-
- spin_lock(&adap_rcu_lock);
- list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
- spin_unlock(&adap_rcu_lock);
-
- mutex_lock(&uld_mutex);
- list_add_tail(&adap->list_node, &adapter_list);
- for (i = 0; i < CXGB4_ULD_MAX; i++)
- if (ulds[i].add)
- uld_attach(adap, i);
- mutex_unlock(&uld_mutex);
}
static void detach_ulds(struct adapter *adap)
@@ -2561,12 +2091,6 @@ static void detach_ulds(struct adapter *adap)
mutex_lock(&uld_mutex);
list_del(&adap->list_node);
for (i = 0; i < CXGB4_ULD_MAX; i++)
- if (adap->uld_handle[i]) {
- ulds[i].state_change(adap->uld_handle[i],
- CXGB4_STATE_DETACH);
- adap->uld_handle[i] = NULL;
- }
- for (i = 0; i < CXGB4_PCI_ULD_MAX; i++)
if (adap->uld && adap->uld[i].handle) {
adap->uld[i].state_change(adap->uld[i].handle,
CXGB4_STATE_DETACH);
@@ -2577,10 +2101,6 @@ static void detach_ulds(struct adapter *adap)
netevent_registered = false;
}
mutex_unlock(&uld_mutex);
-
- spin_lock(&adap_rcu_lock);
- list_del_rcu(&adap->rcu_node);
- spin_unlock(&adap_rcu_lock);
}
static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
@@ -2589,65 +2109,12 @@ static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
mutex_lock(&uld_mutex);
for (i = 0; i < CXGB4_ULD_MAX; i++)
- if (adap->uld_handle[i])
- ulds[i].state_change(adap->uld_handle[i], new_state);
- for (i = 0; i < CXGB4_PCI_ULD_MAX; i++)
if (adap->uld && adap->uld[i].handle)
adap->uld[i].state_change(adap->uld[i].handle,
new_state);
mutex_unlock(&uld_mutex);
}
-/**
- * cxgb4_register_uld - register an upper-layer driver
- * @type: the ULD type
- * @p: the ULD methods
- *
- * Registers an upper-layer driver with this driver and notifies the ULD
- * about any presently available devices that support its type. Returns
- * %-EBUSY if a ULD of the same type is already registered.
- */
-int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
-{
- int ret = 0;
- struct adapter *adap;
-
- if (type >= CXGB4_ULD_MAX)
- return -EINVAL;
- mutex_lock(&uld_mutex);
- if (ulds[type].add) {
- ret = -EBUSY;
- goto out;
- }
- ulds[type] = *p;
- list_for_each_entry(adap, &adapter_list, list_node)
- uld_attach(adap, type);
-out: mutex_unlock(&uld_mutex);
- return ret;
-}
-EXPORT_SYMBOL(cxgb4_register_uld);
-
-/**
- * cxgb4_unregister_uld - unregister an upper-layer driver
- * @type: the ULD type
- *
- * Unregisters an existing upper-layer driver.
- */
-int cxgb4_unregister_uld(enum cxgb4_uld type)
-{
- struct adapter *adap;
-
- if (type >= CXGB4_ULD_MAX)
- return -EINVAL;
- mutex_lock(&uld_mutex);
- list_for_each_entry(adap, &adapter_list, list_node)
- adap->uld_handle[type] = NULL;
- ulds[type].add = NULL;
- mutex_unlock(&uld_mutex);
- return 0;
-}
-EXPORT_SYMBOL(cxgb4_unregister_uld);
-
#if IS_ENABLED(CONFIG_IPV6)
static int cxgb4_inet6addr_handler(struct notifier_block *this,
unsigned long event, void *data)
@@ -2752,7 +2219,6 @@ static int cxgb_up(struct adapter *adap)
adap->msix_info[0].desc, adap);
if (err)
goto irq_err;
-
err = request_msix_queue_irqs(adap);
if (err) {
free_irq(adap->msix_info[0].vec, adap);
@@ -2830,40 +2296,6 @@ static int cxgb_close(struct net_device *dev)
return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
}
-/* Return an error number if the indicated filter isn't writable ...
- */
-static int writable_filter(struct filter_entry *f)
-{
- if (f->locked)
- return -EPERM;
- if (f->pending)
- return -EBUSY;
-
- return 0;
-}
-
-/* Delete the filter at the specified index (if valid). The checks for all
- * the common problems with doing this like the filter being locked, currently
- * pending in another operation, etc.
- */
-static int delete_filter(struct adapter *adapter, unsigned int fidx)
-{
- struct filter_entry *f;
- int ret;
-
- if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
- return -EINVAL;
-
- f = &adapter->tids.ftid_tab[fidx];
- ret = writable_filter(f);
- if (ret)
- return ret;
- if (f->valid)
- return del_filter_wr(adapter, fidx);
-
- return 0;
-}
-
int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
__be32 sip, __be16 sport, __be16 vlan,
unsigned int queue, unsigned char port, unsigned char mask)
@@ -3280,6 +2712,35 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return err;
}
+int cxgb_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+
+ if (!(adap->flags & FULL_INIT_DONE)) {
+ dev_err(adap->pdev_dev,
+ "Failed to setup tc on port %d. Link Down?\n",
+ pi->port_id);
+ return -EINVAL;
+ }
+
+ if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) &&
+ tc->type == TC_SETUP_CLSU32) {
+ switch (tc->cls_u32->command) {
+ case TC_CLSU32_NEW_KNODE:
+ case TC_CLSU32_REPLACE_KNODE:
+ return cxgb4_config_knode(dev, proto, tc->cls_u32);
+ case TC_CLSU32_DELETE_KNODE:
+ return cxgb4_delete_knode(dev, proto, tc->cls_u32);
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return -EOPNOTSUPP;
+}
+
static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_open = cxgb_open,
.ndo_stop = cxgb_close,
@@ -3303,6 +2764,7 @@ static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_busy_poll = cxgb_busy_poll,
#endif
.ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
+ .ndo_setup_tc = cxgb_setup_tc,
};
#ifdef CONFIG_PCI_IOV
@@ -4262,6 +3724,7 @@ static int adap_init0(struct adapter *adap)
adap->params.ofldq_wr_cred = val[5];
adap->params.offload = 1;
+ adap->num_ofld_uld += 1;
}
if (caps_cmd.rdmacaps) {
params[0] = FW_PARAM_PFVF(STAG_START);
@@ -4314,6 +3777,7 @@ static int adap_init0(struct adapter *adap)
"max_ordird_qp %d max_ird_adapter %d\n",
adap->params.max_ordird_qp,
adap->params.max_ird_adapter);
+ adap->num_ofld_uld += 2;
}
if (caps_cmd.iscsicaps) {
params[0] = FW_PARAM_PFVF(ISCSI_START);
@@ -4324,6 +3788,8 @@ static int adap_init0(struct adapter *adap)
goto bye;
adap->vres.iscsi.start = val[0];
adap->vres.iscsi.size = val[1] - val[0] + 1;
+ /* LIO target and cxgb4i initiaitor */
+ adap->num_ofld_uld += 2;
}
if (caps_cmd.cryptocaps) {
/* Should query params here...TODO */
@@ -4505,10 +3971,17 @@ static const struct pci_error_handlers cxgb4_eeh = {
.resume = eeh_resume,
};
+/* Return true if the Link Configuration supports "High Speeds" (those greater
+ * than 1Gb/s).
+ */
static inline bool is_x_10g_port(const struct link_config *lc)
{
- return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
- (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
+ unsigned int speeds, high_speeds;
+
+ speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported));
+ high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
+
+ return high_speeds != 0;
}
/*
@@ -4523,14 +3996,14 @@ static void cfg_queues(struct adapter *adap)
#ifndef CONFIG_CHELSIO_T4_DCB
int q10g = 0;
#endif
- int ciq_size;
/* Reduce memory usage in kdump environment, disable all offload.
*/
if (is_kdump_kernel()) {
adap->params.offload = 0;
adap->params.crypto = 0;
- } else if (adap->num_uld && uld_mem_alloc(adap)) {
+ } else if (is_uld(adap) && t4_uld_mem_alloc(adap)) {
+ adap->params.offload = 0;
adap->params.crypto = 0;
}
@@ -4576,33 +4049,18 @@ static void cfg_queues(struct adapter *adap)
s->ethqsets = qidx;
s->max_ethqsets = qidx; /* MSI-X may lower it later */
- if (is_offload(adap)) {
+ if (is_uld(adap)) {
/*
* For offload we use 1 queue/channel if all ports are up to 1G,
* otherwise we divide all available queues amongst the channels
* capped by the number of available cores.
*/
if (n10g) {
- i = min_t(int, ARRAY_SIZE(s->iscsirxq),
- num_online_cpus());
- s->iscsiqsets = roundup(i, adap->params.nports);
- } else
- s->iscsiqsets = adap->params.nports;
- /* For RDMA one Rx queue per channel suffices */
- s->rdmaqs = adap->params.nports;
- /* Try and allow at least 1 CIQ per cpu rounding down
- * to the number of ports, with a minimum of 1 per port.
- * A 2 port card in a 6 cpu system: 6 CIQs, 3 / port.
- * A 4 port card in a 6 cpu system: 4 CIQs, 1 / port.
- * A 4 port card in a 2 cpu system: 4 CIQs, 1 / port.
- */
- s->rdmaciqs = min_t(int, MAX_RDMA_CIQS, num_online_cpus());
- s->rdmaciqs = (s->rdmaciqs / adap->params.nports) *
- adap->params.nports;
- s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports);
-
- if (!is_t4(adap->params.chip))
- s->niscsitq = s->iscsiqsets;
+ i = num_online_cpus();
+ s->ofldqsets = roundup(i, adap->params.nports);
+ } else {
+ s->ofldqsets = adap->params.nports;
+ }
}
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
@@ -4621,47 +4079,8 @@ static void cfg_queues(struct adapter *adap)
for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
s->ofldtxq[i].q.size = 1024;
- for (i = 0; i < ARRAY_SIZE(s->iscsirxq); i++) {
- struct sge_ofld_rxq *r = &s->iscsirxq[i];
-
- init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
- r->rspq.uld = CXGB4_ULD_ISCSI;
- r->fl.size = 72;
- }
-
- if (!is_t4(adap->params.chip)) {
- for (i = 0; i < ARRAY_SIZE(s->iscsitrxq); i++) {
- struct sge_ofld_rxq *r = &s->iscsitrxq[i];
-
- init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
- r->rspq.uld = CXGB4_ULD_ISCSIT;
- r->fl.size = 72;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
- struct sge_ofld_rxq *r = &s->rdmarxq[i];
-
- init_rspq(adap, &r->rspq, 5, 1, 511, 64);
- r->rspq.uld = CXGB4_ULD_RDMA;
- r->fl.size = 72;
- }
-
- ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
- if (ciq_size > SGE_MAX_IQ_SIZE) {
- CH_WARN(adap, "CIQ size too small for available IQs\n");
- ciq_size = SGE_MAX_IQ_SIZE;
- }
-
- for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
- struct sge_ofld_rxq *r = &s->rdmaciq[i];
-
- init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
- r->rspq.uld = CXGB4_ULD_RDMA;
- }
-
init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
- init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
+ init_rspq(adap, &s->intrq, 0, 1, 512, 64);
}
/*
@@ -4695,7 +4114,15 @@ static void reduce_ethqs(struct adapter *adap, int n)
static int get_msix_info(struct adapter *adap)
{
struct uld_msix_info *msix_info;
- int max_ingq = (MAX_OFLD_QSETS * adap->num_uld);
+ unsigned int max_ingq = 0;
+
+ if (is_offload(adap))
+ max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld;
+ if (is_pci_uld(adap))
+ max_ingq += MAX_OFLD_QSETS * adap->num_uld;
+
+ if (!max_ingq)
+ goto out;
msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL);
if (!msix_info)
@@ -4709,12 +4136,13 @@ static int get_msix_info(struct adapter *adap)
}
spin_lock_init(&adap->msix_bmap_ulds.lock);
adap->msix_info_ulds = msix_info;
+out:
return 0;
}
static void free_msix_info(struct adapter *adap)
{
- if (!adap->num_uld)
+ if (!(adap->num_uld && adap->num_ofld_uld))
return;
kfree(adap->msix_info_ulds);
@@ -4733,32 +4161,32 @@ static int enable_msix(struct adapter *adap)
struct msix_entry *entries;
int max_ingq = MAX_INGQ;
- max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
+ if (is_pci_uld(adap))
+ max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
+ if (is_offload(adap))
+ max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld);
entries = kmalloc(sizeof(*entries) * (max_ingq + 1),
GFP_KERNEL);
if (!entries)
return -ENOMEM;
/* map for msix */
- if (is_pci_uld(adap) && get_msix_info(adap))
+ if (get_msix_info(adap)) {
+ adap->params.offload = 0;
adap->params.crypto = 0;
+ }
for (i = 0; i < max_ingq + 1; ++i)
entries[i].entry = i;
want = s->max_ethqsets + EXTRA_VECS;
if (is_offload(adap)) {
- want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets +
- s->niscsitq;
- /* need nchan for each possible ULD */
- if (is_t4(adap->params.chip))
- ofld_need = 3 * nchan;
- else
- ofld_need = 4 * nchan;
+ want += adap->num_ofld_uld * s->ofldqsets;
+ ofld_need = adap->num_ofld_uld * nchan;
}
if (is_pci_uld(adap)) {
- want += netif_get_num_default_rss_queues() * nchan;
- uld_need = nchan;
+ want += adap->num_uld * s->ofldqsets;
+ uld_need = adap->num_uld * nchan;
}
#ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
@@ -4786,43 +4214,25 @@ static int enable_msix(struct adapter *adap)
if (i < s->ethqsets)
reduce_ethqs(adap, i);
}
- if (is_pci_uld(adap)) {
+ if (is_uld(adap)) {
if (allocated < want)
s->nqs_per_uld = nchan;
else
- s->nqs_per_uld = netif_get_num_default_rss_queues() *
- nchan;
- }
-
- if (is_offload(adap)) {
- if (allocated < want) {
- s->rdmaqs = nchan;
- s->rdmaciqs = nchan;
-
- if (!is_t4(adap->params.chip))
- s->niscsitq = nchan;
- }
-
- /* leftovers go to OFLD */
- i = allocated - EXTRA_VECS - s->max_ethqsets -
- s->rdmaqs - s->rdmaciqs - s->niscsitq;
- if (is_pci_uld(adap))
- i -= s->nqs_per_uld * adap->num_uld;
- s->iscsiqsets = (i / nchan) * nchan; /* round down */
-
+ s->nqs_per_uld = s->ofldqsets;
}
- for (i = 0; i < (allocated - (s->nqs_per_uld * adap->num_uld)); ++i)
+ for (i = 0; i < (s->max_ethqsets + EXTRA_VECS); ++i)
adap->msix_info[i].vec = entries[i].vector;
- if (is_pci_uld(adap)) {
- for (j = 0 ; i < allocated; ++i, j++)
+ if (is_uld(adap)) {
+ for (j = 0 ; i < allocated; ++i, j++) {
adap->msix_info_ulds[j].vec = entries[i].vector;
+ adap->msix_info_ulds[j].idx = i;
+ }
adap->msix_bmap_ulds.mapsize = j;
}
dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
- "nic %d iscsi %d rdma cpl %d rdma ciq %d uld %d\n",
- allocated, s->max_ethqsets, s->iscsiqsets, s->rdmaqs,
- s->rdmaciqs, s->nqs_per_uld);
+ "nic %d per uld %d\n",
+ allocated, s->max_ethqsets, s->nqs_per_uld);
kfree(entries);
return 0;
@@ -5005,8 +4415,12 @@ static void print_port_info(const struct net_device *dev)
bufp += sprintf(bufp, "1000/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
bufp += sprintf(bufp, "10G/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
+ bufp += sprintf(bufp, "25G/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
bufp += sprintf(bufp, "40G/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G)
+ bufp += sprintf(bufp, "100G/");
if (bufp != buf)
--bufp;
sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
@@ -5034,6 +4448,7 @@ static void free_some_resources(struct adapter *adapter)
t4_free_mem(adapter->l2t);
t4_cleanup_sched(adapter);
t4_free_mem(adapter->tids.tid_tab);
+ cxgb4_cleanup_tc_u32(adapter);
kfree(adapter->sge.egr_map);
kfree(adapter->sge.ingr_map);
kfree(adapter->sge.starving_fl);
@@ -5378,7 +4793,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_RXHASH |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_TC;
if (highdma)
netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
@@ -5462,10 +4878,16 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
i);
}
- if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
+ if (tid_init(&adapter->tids) < 0) {
dev_warn(&pdev->dev, "could not allocate TID table, "
"continuing\n");
adapter->params.offload = 0;
+ } else {
+ adapter->tc_u32 = cxgb4_init_tc_u32(adapter,
+ CXGB4_MAX_LINK_HANDLE);
+ if (!adapter->tc_u32)
+ dev_warn(&pdev->dev,
+ "could not offload tc u32, continuing\n");
}
if (is_offload(adapter)) {
@@ -5535,10 +4957,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
pdev->needs_freset = 1;
- if (is_offload(adapter))
- attach_ulds(adapter);
+ if (is_uld(adapter)) {
+ mutex_lock(&uld_mutex);
+ list_add_tail(&adapter->list_node, &adapter_list);
+ mutex_unlock(&uld_mutex);
+ }
print_adapter_info(adapter);
+ setup_fw_sge_queues(adapter);
return 0;
sriov:
@@ -5593,8 +5019,8 @@ sriov:
free_some_resources(adapter);
if (adapter->flags & USING_MSIX)
free_msix_info(adapter);
- if (adapter->num_uld)
- uld_mem_free(adapter);
+ if (adapter->num_uld || adapter->num_ofld_uld)
+ t4_uld_mem_free(adapter);
out_unmap_bar:
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
@@ -5631,7 +5057,7 @@ static void remove_one(struct pci_dev *pdev)
*/
destroy_workqueue(adapter->workq);
- if (is_offload(adapter))
+ if (is_uld(adapter))
detach_ulds(adapter);
disable_interrupts(adapter);
@@ -5645,21 +5071,15 @@ static void remove_one(struct pci_dev *pdev)
/* If we allocated filters, free up state associated with any
* valid filters ...
*/
- if (adapter->tids.ftid_tab) {
- struct filter_entry *f = &adapter->tids.ftid_tab[0];
- for (i = 0; i < (adapter->tids.nftids +
- adapter->tids.nsftids); i++, f++)
- if (f->valid)
- clear_filter(adapter, f);
- }
+ clear_all_filters(adapter);
if (adapter->flags & FULL_INIT_DONE)
cxgb_down(adapter);
if (adapter->flags & USING_MSIX)
free_msix_info(adapter);
- if (adapter->num_uld)
- uld_mem_free(adapter);
+ if (adapter->num_uld || adapter->num_ofld_uld)
+ t4_uld_mem_free(adapter);
free_some_resources(adapter);
#if IS_ENABLED(CONFIG_IPV6)
t4_cleanup_clip_tbl(adapter);
@@ -5690,12 +5110,58 @@ static void remove_one(struct pci_dev *pdev)
#endif
}
+/* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
+ * delivery. This is essentially a stripped down version of the PCI remove()
+ * function where we do the minimal amount of work necessary to shutdown any
+ * further activity.
+ */
+static void shutdown_one(struct pci_dev *pdev)
+{
+ struct adapter *adapter = pci_get_drvdata(pdev);
+
+ /* As with remove_one() above (see extended comment), we only want do
+ * do cleanup on PCI Devices which went all the way through init_one()
+ * ...
+ */
+ if (!adapter) {
+ pci_release_regions(pdev);
+ return;
+ }
+
+ if (adapter->pf == 4) {
+ int i;
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
+ cxgb_close(adapter->port[i]);
+
+ t4_uld_clean_up(adapter);
+ disable_interrupts(adapter);
+ disable_msi(adapter);
+
+ t4_sge_stop(adapter);
+ if (adapter->flags & FW_OK)
+ t4_fw_bye(adapter, adapter->mbox);
+ }
+#ifdef CONFIG_PCI_IOV
+ else {
+ if (adapter->port[0])
+ unregister_netdev(adapter->port[0]);
+ iounmap(adapter->regs);
+ kfree(adapter->vfinfo);
+ kfree(adapter);
+ pci_disable_sriov(pdev);
+ pci_release_regions(pdev);
+ }
+#endif
+}
+
static struct pci_driver cxgb4_driver = {
.name = KBUILD_MODNAME,
.id_table = cxgb4_pci_tbl,
.probe = init_one,
.remove = remove_one,
- .shutdown = remove_one,
+ .shutdown = shutdown_one,
#ifdef CONFIG_PCI_IOV
.sriov_configure = cxgb4_iov_configure,
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
new file mode 100644
index 000000000000..49d2debb334e
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -0,0 +1,483 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
+
+#include "cxgb4.h"
+#include "cxgb4_tc_u32_parse.h"
+#include "cxgb4_tc_u32.h"
+
+/* Fill ch_filter_specification with parsed match value/mask pair. */
+static int fill_match_fields(struct adapter *adap,
+ struct ch_filter_specification *fs,
+ struct tc_cls_u32_offload *cls,
+ const struct cxgb4_match_field *entry,
+ bool next_header)
+{
+ unsigned int i, j;
+ u32 val, mask;
+ int off, err;
+ bool found;
+
+ for (i = 0; i < cls->knode.sel->nkeys; i++) {
+ off = cls->knode.sel->keys[i].off;
+ val = cls->knode.sel->keys[i].val;
+ mask = cls->knode.sel->keys[i].mask;
+
+ if (next_header) {
+ /* For next headers, parse only keys with offmask */
+ if (!cls->knode.sel->keys[i].offmask)
+ continue;
+ } else {
+ /* For the remaining, parse only keys without offmask */
+ if (cls->knode.sel->keys[i].offmask)
+ continue;
+ }
+
+ found = false;
+
+ for (j = 0; entry[j].val; j++) {
+ if (off == entry[j].off) {
+ found = true;
+ err = entry[j].val(fs, val, mask);
+ if (err)
+ return err;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Fill ch_filter_specification with parsed action. */
+static int fill_action_fields(struct adapter *adap,
+ struct ch_filter_specification *fs,
+ struct tc_cls_u32_offload *cls)
+{
+ unsigned int num_actions = 0;
+ const struct tc_action *a;
+ struct tcf_exts *exts;
+ LIST_HEAD(actions);
+
+ exts = cls->knode.exts;
+ if (tc_no_actions(exts))
+ return -EINVAL;
+
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ /* Don't allow more than one action per rule. */
+ if (num_actions)
+ return -EINVAL;
+
+ /* Drop in hardware. */
+ if (is_tcf_gact_shot(a)) {
+ fs->action = FILTER_DROP;
+ num_actions++;
+ continue;
+ }
+
+ /* Re-direct to specified port in hardware. */
+ if (is_tcf_mirred_redirect(a)) {
+ struct net_device *n_dev;
+ unsigned int i, index;
+ bool found = false;
+
+ index = tcf_mirred_ifindex(a);
+ for_each_port(adap, i) {
+ n_dev = adap->port[i];
+ if (index == n_dev->ifindex) {
+ fs->action = FILTER_SWITCH;
+ fs->eport = i;
+ found = true;
+ break;
+ }
+ }
+
+ /* Interface doesn't belong to any port of
+ * the underlying hardware.
+ */
+ if (!found)
+ return -EINVAL;
+
+ num_actions++;
+ continue;
+ }
+
+ /* Un-supported action. */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int cxgb4_config_knode(struct net_device *dev, __be16 protocol,
+ struct tc_cls_u32_offload *cls)
+{
+ const struct cxgb4_match_field *start, *link_start = NULL;
+ struct adapter *adapter = netdev2adap(dev);
+ struct ch_filter_specification fs;
+ struct cxgb4_tc_u32_table *t;
+ struct cxgb4_link *link;
+ unsigned int filter_id;
+ u32 uhtid, link_uhtid;
+ bool is_ipv6 = false;
+ int ret;
+
+ if (!can_tc_u32_offload(dev))
+ return -EOPNOTSUPP;
+
+ if (protocol != htons(ETH_P_IP) && protocol != htons(ETH_P_IPV6))
+ return -EOPNOTSUPP;
+
+ /* Fetch the location to insert the filter. */
+ filter_id = cls->knode.handle & 0xFFFFF;
+
+ if (filter_id > adapter->tids.nftids) {
+ dev_err(adapter->pdev_dev,
+ "Location %d out of range for insertion. Max: %d\n",
+ filter_id, adapter->tids.nftids);
+ return -ERANGE;
+ }
+
+ t = adapter->tc_u32;
+ uhtid = TC_U32_USERHTID(cls->knode.handle);
+ link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
+
+ /* Ensure that uhtid is either root u32 (i.e. 0x800)
+ * or a a valid linked bucket.
+ */
+ if (uhtid != 0x800 && uhtid >= t->size)
+ return -EINVAL;
+
+ /* Ensure link handle uhtid is sane, if specified. */
+ if (link_uhtid >= t->size)
+ return -EINVAL;
+
+ memset(&fs, 0, sizeof(fs));
+
+ if (protocol == htons(ETH_P_IPV6)) {
+ start = cxgb4_ipv6_fields;
+ is_ipv6 = true;
+ } else {
+ start = cxgb4_ipv4_fields;
+ is_ipv6 = false;
+ }
+
+ if (uhtid != 0x800) {
+ /* Link must exist from root node before insertion. */
+ if (!t->table[uhtid - 1].link_handle)
+ return -EINVAL;
+
+ /* Link must have a valid supported next header. */
+ link_start = t->table[uhtid - 1].match_field;
+ if (!link_start)
+ return -EINVAL;
+ }
+
+ /* Parse links and record them for subsequent jumps to valid
+ * next headers.
+ */
+ if (link_uhtid) {
+ const struct cxgb4_next_header *next;
+ bool found = false;
+ unsigned int i, j;
+ u32 val, mask;
+ int off;
+
+ if (t->table[link_uhtid - 1].link_handle) {
+ dev_err(adapter->pdev_dev,
+ "Link handle exists for: 0x%x\n",
+ link_uhtid);
+ return -EINVAL;
+ }
+
+ next = is_ipv6 ? cxgb4_ipv6_jumps : cxgb4_ipv4_jumps;
+
+ /* Try to find matches that allow jumps to next header. */
+ for (i = 0; next[i].jump; i++) {
+ if (next[i].offoff != cls->knode.sel->offoff ||
+ next[i].shift != cls->knode.sel->offshift ||
+ next[i].mask != cls->knode.sel->offmask ||
+ next[i].offset != cls->knode.sel->off)
+ continue;
+
+ /* Found a possible candidate. Find a key that
+ * matches the corresponding offset, value, and
+ * mask to jump to next header.
+ */
+ for (j = 0; j < cls->knode.sel->nkeys; j++) {
+ off = cls->knode.sel->keys[j].off;
+ val = cls->knode.sel->keys[j].val;
+ mask = cls->knode.sel->keys[j].mask;
+
+ if (next[i].match_off == off &&
+ next[i].match_val == val &&
+ next[i].match_mask == mask) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ continue; /* Try next candidate. */
+
+ /* Candidate to jump to next header found.
+ * Translate all keys to internal specification
+ * and store them in jump table. This spec is copied
+ * later to set the actual filters.
+ */
+ ret = fill_match_fields(adapter, &fs, cls,
+ start, false);
+ if (ret)
+ goto out;
+
+ link = &t->table[link_uhtid - 1];
+ link->match_field = next[i].jump;
+ link->link_handle = cls->knode.handle;
+ memcpy(&link->fs, &fs, sizeof(fs));
+ break;
+ }
+
+ /* No candidate found to jump to next header. */
+ if (!found)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ /* Fill ch_filter_specification match fields to be shipped to hardware.
+ * Copy the linked spec (if any) first. And then update the spec as
+ * needed.
+ */
+ if (uhtid != 0x800 && t->table[uhtid - 1].link_handle) {
+ /* Copy linked ch_filter_specification */
+ memcpy(&fs, &t->table[uhtid - 1].fs, sizeof(fs));
+ ret = fill_match_fields(adapter, &fs, cls,
+ link_start, true);
+ if (ret)
+ goto out;
+ }
+
+ ret = fill_match_fields(adapter, &fs, cls, start, false);
+ if (ret)
+ goto out;
+
+ /* Fill ch_filter_specification action fields to be shipped to
+ * hardware.
+ */
+ ret = fill_action_fields(adapter, &fs, cls);
+ if (ret)
+ goto out;
+
+ /* The filter spec has been completely built from the info
+ * provided from u32. We now set some default fields in the
+ * spec for sanity.
+ */
+
+ /* Match only packets coming from the ingress port where this
+ * filter will be created.
+ */
+ fs.val.iport = netdev2pinfo(dev)->port_id;
+ fs.mask.iport = ~0;
+
+ /* Enable filter hit counts. */
+ fs.hitcnts = 1;
+
+ /* Set type of filter - IPv6 or IPv4 */
+ fs.type = is_ipv6 ? 1 : 0;
+
+ /* Set the filter */
+ ret = cxgb4_set_filter(dev, filter_id, &fs);
+ if (ret)
+ goto out;
+
+ /* If this is a linked bucket, then set the corresponding
+ * entry in the bitmap to mark it as belonging to this linked
+ * bucket.
+ */
+ if (uhtid != 0x800 && t->table[uhtid - 1].link_handle)
+ set_bit(filter_id, t->table[uhtid - 1].tid_map);
+
+out:
+ return ret;
+}
+
+int cxgb4_delete_knode(struct net_device *dev, __be16 protocol,
+ struct tc_cls_u32_offload *cls)
+{
+ struct adapter *adapter = netdev2adap(dev);
+ unsigned int filter_id, max_tids, i, j;
+ struct cxgb4_link *link = NULL;
+ struct cxgb4_tc_u32_table *t;
+ u32 handle, uhtid;
+ int ret;
+
+ if (!can_tc_u32_offload(dev))
+ return -EOPNOTSUPP;
+
+ /* Fetch the location to delete the filter. */
+ filter_id = cls->knode.handle & 0xFFFFF;
+
+ if (filter_id > adapter->tids.nftids) {
+ dev_err(adapter->pdev_dev,
+ "Location %d out of range for deletion. Max: %d\n",
+ filter_id, adapter->tids.nftids);
+ return -ERANGE;
+ }
+
+ t = adapter->tc_u32;
+ handle = cls->knode.handle;
+ uhtid = TC_U32_USERHTID(cls->knode.handle);
+
+ /* Ensure that uhtid is either root u32 (i.e. 0x800)
+ * or a a valid linked bucket.
+ */
+ if (uhtid != 0x800 && uhtid >= t->size)
+ return -EINVAL;
+
+ /* Delete the specified filter */
+ if (uhtid != 0x800) {
+ link = &t->table[uhtid - 1];
+ if (!link->link_handle)
+ return -EINVAL;
+
+ if (!test_bit(filter_id, link->tid_map))
+ return -EINVAL;
+ }
+
+ ret = cxgb4_del_filter(dev, filter_id);
+ if (ret)
+ goto out;
+
+ if (link)
+ clear_bit(filter_id, link->tid_map);
+
+ /* If a link is being deleted, then delete all filters
+ * associated with the link.
+ */
+ max_tids = adapter->tids.nftids;
+ for (i = 0; i < t->size; i++) {
+ link = &t->table[i];
+
+ if (link->link_handle == handle) {
+ for (j = 0; j < max_tids; j++) {
+ if (!test_bit(j, link->tid_map))
+ continue;
+
+ ret = __cxgb4_del_filter(dev, j, NULL);
+ if (ret)
+ goto out;
+
+ clear_bit(j, link->tid_map);
+ }
+
+ /* Clear the link state */
+ link->match_field = NULL;
+ link->link_handle = 0;
+ memset(&link->fs, 0, sizeof(link->fs));
+ break;
+ }
+ }
+
+out:
+ return ret;
+}
+
+void cxgb4_cleanup_tc_u32(struct adapter *adap)
+{
+ struct cxgb4_tc_u32_table *t;
+ unsigned int i;
+
+ if (!adap->tc_u32)
+ return;
+
+ /* Free up all allocated memory. */
+ t = adap->tc_u32;
+ for (i = 0; i < t->size; i++) {
+ struct cxgb4_link *link = &t->table[i];
+
+ t4_free_mem(link->tid_map);
+ }
+ t4_free_mem(adap->tc_u32);
+}
+
+struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap,
+ unsigned int size)
+{
+ struct cxgb4_tc_u32_table *t;
+ unsigned int i;
+
+ if (!size)
+ return NULL;
+
+ t = t4_alloc_mem(sizeof(*t) +
+ (size * sizeof(struct cxgb4_link)));
+ if (!t)
+ return NULL;
+
+ t->size = size;
+
+ for (i = 0; i < t->size; i++) {
+ struct cxgb4_link *link = &t->table[i];
+ unsigned int bmap_size;
+ unsigned int max_tids;
+
+ max_tids = adap->tids.nftids;
+ bmap_size = BITS_TO_LONGS(max_tids);
+ link->tid_map = t4_alloc_mem(sizeof(unsigned long) * bmap_size);
+ if (!link->tid_map)
+ goto out_no_mem;
+ bitmap_zero(link->tid_map, max_tids);
+ }
+
+ return t;
+
+out_no_mem:
+ for (i = 0; i < t->size; i++) {
+ struct cxgb4_link *link = &t->table[i];
+
+ if (link->tid_map)
+ t4_free_mem(link->tid_map);
+ }
+
+ if (t)
+ t4_free_mem(t);
+
+ return NULL;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h
new file mode 100644
index 000000000000..6bdc885eff22
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h
@@ -0,0 +1,57 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_TC_U32_H
+#define __CXGB4_TC_U32_H
+
+#include <net/pkt_cls.h>
+
+#define CXGB4_MAX_LINK_HANDLE 32
+
+static inline bool can_tc_u32_offload(struct net_device *dev)
+{
+ struct adapter *adap = netdev2adap(dev);
+
+ return (dev->features & NETIF_F_HW_TC) && adap->tc_u32 ? true : false;
+}
+
+int cxgb4_config_knode(struct net_device *dev, __be16 protocol,
+ struct tc_cls_u32_offload *cls);
+int cxgb4_delete_knode(struct net_device *dev, __be16 protocol,
+ struct tc_cls_u32_offload *cls);
+
+void cxgb4_cleanup_tc_u32(struct adapter *adapter);
+struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap,
+ unsigned int size);
+#endif /* __CXGB4_TC_U32_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
new file mode 100644
index 000000000000..a4b99edcc339
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
@@ -0,0 +1,294 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_TC_U32_PARSE_H
+#define __CXGB4_TC_U32_PARSE_H
+
+struct cxgb4_match_field {
+ int off; /* Offset from the beginning of the header to match */
+ /* Fill the value/mask pair in the spec if matched */
+ int (*val)(struct ch_filter_specification *f, u32 val, u32 mask);
+};
+
+/* IPv4 match fields */
+static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ f->val.tos = (ntohl(val) >> 16) & 0x000000FF;
+ f->mask.tos = (ntohl(mask) >> 16) & 0x000000FF;
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ u32 mask_val;
+ u8 frag_val;
+
+ frag_val = (ntohl(val) >> 13) & 0x00000007;
+ mask_val = ntohl(mask) & 0x0000FFFF;
+
+ if (frag_val == 0x1 && mask_val != 0x3FFF) { /* MF set */
+ f->val.frag = 1;
+ f->mask.frag = 1;
+ } else if (frag_val == 0x2 && mask_val != 0x3FFF) { /* DF set */
+ f->val.frag = 0;
+ f->mask.frag = 1;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ f->val.proto = (ntohl(val) >> 16) & 0x000000FF;
+ f->mask.proto = (ntohl(mask) >> 16) & 0x000000FF;
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.fip[0], &val, sizeof(u32));
+ memcpy(&f->mask.fip[0], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv4_dst_ip(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.lip[0], &val, sizeof(u32));
+ memcpy(&f->mask.lip[0], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static const struct cxgb4_match_field cxgb4_ipv4_fields[] = {
+ { .off = 0, .val = cxgb4_fill_ipv4_tos },
+ { .off = 4, .val = cxgb4_fill_ipv4_frag },
+ { .off = 8, .val = cxgb4_fill_ipv4_proto },
+ { .off = 12, .val = cxgb4_fill_ipv4_src_ip },
+ { .off = 16, .val = cxgb4_fill_ipv4_dst_ip },
+ { .val = NULL }
+};
+
+/* IPv6 match fields */
+static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ f->val.tos = (ntohl(val) >> 20) & 0x000000FF;
+ f->mask.tos = (ntohl(mask) >> 20) & 0x000000FF;
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ f->val.proto = (ntohl(val) >> 8) & 0x000000FF;
+ f->mask.proto = (ntohl(mask) >> 8) & 0x000000FF;
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.fip[0], &val, sizeof(u32));
+ memcpy(&f->mask.fip[0], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.fip[4], &val, sizeof(u32));
+ memcpy(&f->mask.fip[4], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.fip[8], &val, sizeof(u32));
+ memcpy(&f->mask.fip[8], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.fip[12], &val, sizeof(u32));
+ memcpy(&f->mask.fip[12], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.lip[0], &val, sizeof(u32));
+ memcpy(&f->mask.lip[0], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.lip[4], &val, sizeof(u32));
+ memcpy(&f->mask.lip[4], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.lip[8], &val, sizeof(u32));
+ memcpy(&f->mask.lip[8], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static inline int cxgb4_fill_ipv6_dst_ip3(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ memcpy(&f->val.lip[12], &val, sizeof(u32));
+ memcpy(&f->mask.lip[12], &mask, sizeof(u32));
+
+ return 0;
+}
+
+static const struct cxgb4_match_field cxgb4_ipv6_fields[] = {
+ { .off = 0, .val = cxgb4_fill_ipv6_tos },
+ { .off = 4, .val = cxgb4_fill_ipv6_proto },
+ { .off = 8, .val = cxgb4_fill_ipv6_src_ip0 },
+ { .off = 12, .val = cxgb4_fill_ipv6_src_ip1 },
+ { .off = 16, .val = cxgb4_fill_ipv6_src_ip2 },
+ { .off = 20, .val = cxgb4_fill_ipv6_src_ip3 },
+ { .off = 24, .val = cxgb4_fill_ipv6_dst_ip0 },
+ { .off = 28, .val = cxgb4_fill_ipv6_dst_ip1 },
+ { .off = 32, .val = cxgb4_fill_ipv6_dst_ip2 },
+ { .off = 36, .val = cxgb4_fill_ipv6_dst_ip3 },
+ { .val = NULL }
+};
+
+/* TCP/UDP match */
+static inline int cxgb4_fill_l4_ports(struct ch_filter_specification *f,
+ u32 val, u32 mask)
+{
+ f->val.fport = ntohl(val) >> 16;
+ f->mask.fport = ntohl(mask) >> 16;
+ f->val.lport = ntohl(val) & 0x0000FFFF;
+ f->mask.lport = ntohl(mask) & 0x0000FFFF;
+
+ return 0;
+};
+
+static const struct cxgb4_match_field cxgb4_tcp_fields[] = {
+ { .off = 0, .val = cxgb4_fill_l4_ports },
+ { .val = NULL }
+};
+
+static const struct cxgb4_match_field cxgb4_udp_fields[] = {
+ { .off = 0, .val = cxgb4_fill_l4_ports },
+ { .val = NULL }
+};
+
+struct cxgb4_next_header {
+ unsigned int offset; /* Offset to next header */
+ /* offset, shift, and mask added to offset above
+ * to get to next header. Useful when using a header
+ * field's value to jump to next header such as IHL field
+ * in IPv4 header.
+ */
+ unsigned int offoff;
+ u32 shift;
+ u32 mask;
+ /* match criteria to make this jump */
+ unsigned int match_off;
+ u32 match_val;
+ u32 match_mask;
+ /* location of jump to make */
+ const struct cxgb4_match_field *jump;
+};
+
+/* Accept a rule with a jump to transport layer header based on IHL field in
+ * IPv4 header.
+ */
+static const struct cxgb4_next_header cxgb4_ipv4_jumps[] = {
+ { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
+ .match_off = 8, .match_val = 0x600, .match_mask = 0xFF00,
+ .jump = cxgb4_tcp_fields },
+ { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
+ .match_off = 8, .match_val = 0x1100, .match_mask = 0xFF00,
+ .jump = cxgb4_udp_fields },
+ { .jump = NULL }
+};
+
+/* Accept a rule with a jump directly past the 40 Bytes of IPv6 fixed header
+ * to get to transport layer header.
+ */
+static const struct cxgb4_next_header cxgb4_ipv6_jumps[] = {
+ { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
+ .match_off = 4, .match_val = 0x60000, .match_mask = 0xFF0000,
+ .jump = cxgb4_tcp_fields },
+ { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
+ .match_off = 4, .match_val = 0x110000, .match_mask = 0xFF0000,
+ .jump = cxgb4_udp_fields },
+ { .jump = NULL }
+};
+
+struct cxgb4_link {
+ const struct cxgb4_match_field *match_field; /* Next header */
+ struct ch_filter_specification fs; /* Match spec associated with link */
+ u32 link_handle; /* Knode handle associated with the link */
+ unsigned long *tid_map; /* Bitmap for filter tids */
+};
+
+struct cxgb4_tc_u32_table {
+ unsigned int size; /* number of entries in table */
+ struct cxgb4_link table[0]; /* Jump table */
+};
+#endif /* __CXGB4_TC_U32_PARSE_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 5d402bace6c1..f13b593d7c8b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -82,6 +82,24 @@ static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
spin_unlock_irqrestore(&bmap->lock, flags);
}
+/* Flush the aggregated lro sessions */
+static void uldrx_flush_handler(struct sge_rspq *q)
+{
+ struct adapter *adap = q->adap;
+
+ if (adap->uld[q->uld].lro_flush)
+ adap->uld[q->uld].lro_flush(&q->lro_mgr);
+}
+
+/**
+ * uldrx_handler - response queue handler for ULD queues
+ * @q: the response queue that received the packet
+ * @rsp: the response queue descriptor holding the offload message
+ * @gl: the gather list of packet fragments
+ *
+ * Deliver an ingress offload packet to a ULD. All processing is done by
+ * the ULD, we just maintain statistics.
+ */
static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl)
{
@@ -124,8 +142,8 @@ static int alloc_uld_rxqs(struct adapter *adap,
struct sge_ofld_rxq *q = rxq_info->uldrxq + offset;
unsigned short *ids = rxq_info->rspq_id + offset;
unsigned int per_chan = nq / adap->params.nports;
- unsigned int msi_idx, bmap_idx;
- int i, err;
+ unsigned int bmap_idx = 0;
+ int i, err, msi_idx;
if (adap->flags & USING_MSIX)
msi_idx = 1;
@@ -135,14 +153,14 @@ static int alloc_uld_rxqs(struct adapter *adap,
for (i = 0; i < nq; i++, q++) {
if (msi_idx >= 0) {
bmap_idx = get_msix_idx_from_bmap(adap);
- adap->msi_idx++;
+ msi_idx = adap->msix_info_ulds[bmap_idx].idx;
}
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
adap->port[i / per_chan],
- adap->msi_idx,
+ msi_idx,
q->fl.size ? &q->fl : NULL,
uldrx_handler,
- NULL,
+ lro ? uldrx_flush_handler : NULL,
0);
if (err)
goto freeout;
@@ -159,7 +177,6 @@ freeout:
if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL);
- adap->msi_idx--;
}
/* We need to free rxq also in case of ciq allocation failure */
@@ -169,7 +186,6 @@ freeout:
if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL);
- adap->msi_idx--;
}
}
return err;
@@ -178,17 +194,38 @@ freeout:
int setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ int i, ret = 0;
if (adap->flags & USING_MSIX) {
- rxq_info->msix_tbl = kzalloc(rxq_info->nrxq + rxq_info->nciq,
+ rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
+ sizeof(unsigned short),
GFP_KERNEL);
if (!rxq_info->msix_tbl)
return -ENOMEM;
}
- return !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) &&
+ ret = !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) &&
!alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq,
rxq_info->nrxq, lro));
+
+ /* Tell uP to route control queue completions to rdma rspq */
+ if (adap->flags & FULL_INIT_DONE &&
+ !ret && uld_type == CXGB4_ULD_RDMA) {
+ struct sge *s = &adap->sge;
+ unsigned int cmplqid;
+ u32 param, cmdop;
+
+ cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
+ for_each_port(adap, i) {
+ cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(cmdop) |
+ FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
+ ret = t4_set_params(adap, adap->mbox, adap->pf,
+ 0, 1, &param, &cmplqid);
+ }
+ }
+ return ret;
}
static void t4_free_uld_rxqs(struct adapter *adap, int n,
@@ -198,7 +235,6 @@ static void t4_free_uld_rxqs(struct adapter *adap, int n,
if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL);
- adap->msi_idx--;
}
}
@@ -206,6 +242,21 @@ void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
+ struct sge *s = &adap->sge;
+ u32 param, cmdop, cmplqid = 0;
+ int i;
+
+ cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
+ for_each_port(adap, i) {
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(cmdop) |
+ FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
+ t4_set_params(adap, adap->mbox, adap->pf,
+ 0, 1, &param, &cmplqid);
+ }
+ }
+
if (rxq_info->nciq)
t4_free_uld_rxqs(adap, rxq_info->nciq,
rxq_info->uldrxq + rxq_info->nrxq);
@@ -215,26 +266,38 @@ void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
}
int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
- const struct cxgb4_pci_uld_info *uld_info)
+ const struct cxgb4_uld_info *uld_info)
{
struct sge *s = &adap->sge;
struct sge_uld_rxq_info *rxq_info;
- int i, nrxq;
+ int i, nrxq, ciq_size;
rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
if (!rxq_info)
return -ENOMEM;
- if (uld_info->nrxq > s->nqs_per_uld)
- rxq_info->nrxq = s->nqs_per_uld;
- else
- rxq_info->nrxq = uld_info->nrxq;
- if (!uld_info->nciq)
+ if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
+ i = s->nqs_per_uld;
+ rxq_info->nrxq = roundup(i, adap->params.nports);
+ } else {
+ i = min_t(int, uld_info->nrxq,
+ num_online_cpus());
+ rxq_info->nrxq = roundup(i, adap->params.nports);
+ }
+ if (!uld_info->ciq) {
rxq_info->nciq = 0;
- else if (uld_info->nciq && uld_info->nciq > s->nqs_per_uld)
- rxq_info->nciq = s->nqs_per_uld;
- else
- rxq_info->nciq = uld_info->nciq;
+ } else {
+ if (adap->flags & USING_MSIX)
+ rxq_info->nciq = min_t(int, s->nqs_per_uld,
+ num_online_cpus());
+ else
+ rxq_info->nciq = min_t(int, MAX_OFLD_QSETS,
+ num_online_cpus());
+ rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) *
+ adap->params.nports);
+ rxq_info->nciq = max_t(int, rxq_info->nciq,
+ adap->params.nports);
+ }
nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */
rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
@@ -245,7 +308,7 @@ int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
}
rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL);
- if (!rxq_info->uldrxq) {
+ if (!rxq_info->rspq_id) {
kfree(rxq_info->uldrxq);
kfree(rxq_info);
return -ENOMEM;
@@ -259,12 +322,17 @@ int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
r->fl.size = 72;
}
+ ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
+ if (ciq_size > SGE_MAX_IQ_SIZE) {
+ dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
+ ciq_size = SGE_MAX_IQ_SIZE;
+ }
+
for (i = rxq_info->nrxq; i < nrxq; i++) {
struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
- init_rspq(adap, &r->rspq, 5, 1, uld_info->ciq_size, 64);
+ init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
r->rspq.uld = uld_type;
- r->fl.size = 72;
}
memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
@@ -285,7 +353,8 @@ void free_queues_uld(struct adapter *adap, unsigned int uld_type)
int request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- int idx, bmap_idx, err = 0;
+ int err = 0;
+ unsigned int idx, bmap_idx;
for_each_uldrxq(rxq_info, idx) {
bmap_idx = rxq_info->msix_tbl[idx];
@@ -298,7 +367,7 @@ int request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
}
return 0;
unwind:
- while (--idx >= 0) {
+ while (idx-- > 0) {
bmap_idx = rxq_info->msix_tbl[idx];
free_msix_idx_in_bmap(adap, bmap_idx);
free_irq(adap->msix_info_ulds[bmap_idx].vec,
@@ -310,10 +379,10 @@ unwind:
void free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
- int idx;
+ unsigned int idx, bmap_idx;
for_each_uldrxq(rxq_info, idx) {
- unsigned int bmap_idx = rxq_info->msix_tbl[idx];
+ bmap_idx = rxq_info->msix_tbl[idx];
free_msix_idx_in_bmap(adap, bmap_idx);
free_irq(adap->msix_info_ulds[bmap_idx].vec,
@@ -325,10 +394,10 @@ void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
int n = sizeof(adap->msix_info_ulds[0].desc);
- int idx;
+ unsigned int idx, bmap_idx;
for_each_uldrxq(rxq_info, idx) {
- unsigned int bmap_idx = rxq_info->msix_tbl[idx];
+ bmap_idx = rxq_info->msix_tbl[idx];
snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d",
adap->port[0]->name, rxq_info->name, idx);
@@ -390,15 +459,15 @@ static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
lli->nciq = rxq_info->nciq;
}
-int uld_mem_alloc(struct adapter *adap)
+int t4_uld_mem_alloc(struct adapter *adap)
{
struct sge *s = &adap->sge;
- adap->uld = kcalloc(adap->num_uld, sizeof(*adap->uld), GFP_KERNEL);
+ adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL);
if (!adap->uld)
return -ENOMEM;
- s->uld_rxq_info = kzalloc(adap->num_uld *
+ s->uld_rxq_info = kzalloc(CXGB4_ULD_MAX *
sizeof(struct sge_uld_rxq_info *),
GFP_KERNEL);
if (!s->uld_rxq_info)
@@ -410,7 +479,7 @@ err_uld:
return -ENOMEM;
}
-void uld_mem_free(struct adapter *adap)
+void t4_uld_mem_free(struct adapter *adap)
{
struct sge *s = &adap->sge;
@@ -418,6 +487,26 @@ void uld_mem_free(struct adapter *adap)
kfree(adap->uld);
}
+void t4_uld_clean_up(struct adapter *adap)
+{
+ struct sge_uld_rxq_info *rxq_info;
+ unsigned int i;
+
+ if (!adap->uld)
+ return;
+ for (i = 0; i < CXGB4_ULD_MAX; i++) {
+ if (!adap->uld[i].handle)
+ continue;
+ rxq_info = adap->sge.uld_rxq_info[i];
+ if (adap->flags & FULL_INIT_DONE)
+ quiesce_rx_uld(adap, i);
+ if (adap->flags & USING_MSIX)
+ free_msix_queue_irqs_uld(adap, i);
+ free_sge_queues_uld(adap, i);
+ free_queues_uld(adap, i);
+ }
+}
+
static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
{
int i;
@@ -429,10 +518,15 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->ports = adap->port;
lld->vr = &adap->vres;
lld->mtus = adap->params.mtus;
- lld->ntxq = adap->sge.iscsiqsets;
+ lld->ntxq = adap->sge.ofldqsets;
lld->nchan = adap->params.nports;
lld->nports = adap->params.nports;
lld->wr_cred = adap->params.ofldq_wr_cred;
+ lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
+ lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
+ lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
+ lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
+ lld->iscsi_ppm = &adap->iscsi_ppm;
lld->adapter_type = adap->params.chip;
lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
lld->udb_density = 1 << adap->params.sge.eq_qpp;
@@ -472,23 +566,37 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
}
adap->uld[uld].handle = handle;
+ t4_register_netevent_notifier();
if (adap->flags & FULL_INIT_DONE)
adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
}
-int cxgb4_register_pci_uld(enum cxgb4_pci_uld type,
- struct cxgb4_pci_uld_info *p)
+/**
+ * cxgb4_register_uld - register an upper-layer driver
+ * @type: the ULD type
+ * @p: the ULD methods
+ *
+ * Registers an upper-layer driver with this driver and notifies the ULD
+ * about any presently available devices that support its type. Returns
+ * %-EBUSY if a ULD of the same type is already registered.
+ */
+int cxgb4_register_uld(enum cxgb4_uld type,
+ const struct cxgb4_uld_info *p)
{
int ret = 0;
+ unsigned int adap_idx = 0;
struct adapter *adap;
- if (type >= CXGB4_PCI_ULD_MAX)
+ if (type >= CXGB4_ULD_MAX)
return -EINVAL;
mutex_lock(&uld_mutex);
list_for_each_entry(adap, &adapter_list, list_node) {
- if (!is_pci_uld(adap))
+ if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
+ (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
+ continue;
+ if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
continue;
ret = cfg_queues_uld(adap, type, p);
if (ret)
@@ -510,11 +618,14 @@ int cxgb4_register_pci_uld(enum cxgb4_pci_uld type,
}
adap->uld[type] = *p;
uld_attach(adap, type);
+ adap_idx++;
}
mutex_unlock(&uld_mutex);
return 0;
free_irq:
+ if (adap->flags & FULL_INIT_DONE)
+ quiesce_rx_uld(adap, type);
if (adap->flags & USING_MSIX)
free_msix_queue_irqs_uld(adap, type);
free_rxq:
@@ -522,21 +633,49 @@ free_rxq:
free_queues:
free_queues_uld(adap, type);
out:
+
+ list_for_each_entry(adap, &adapter_list, list_node) {
+ if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
+ (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
+ continue;
+ if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
+ continue;
+ if (!adap_idx)
+ break;
+ adap->uld[type].handle = NULL;
+ adap->uld[type].add = NULL;
+ if (adap->flags & FULL_INIT_DONE)
+ quiesce_rx_uld(adap, type);
+ if (adap->flags & USING_MSIX)
+ free_msix_queue_irqs_uld(adap, type);
+ free_sge_queues_uld(adap, type);
+ free_queues_uld(adap, type);
+ adap_idx--;
+ }
mutex_unlock(&uld_mutex);
return ret;
}
-EXPORT_SYMBOL(cxgb4_register_pci_uld);
+EXPORT_SYMBOL(cxgb4_register_uld);
-int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type)
+/**
+ * cxgb4_unregister_uld - unregister an upper-layer driver
+ * @type: the ULD type
+ *
+ * Unregisters an existing upper-layer driver.
+ */
+int cxgb4_unregister_uld(enum cxgb4_uld type)
{
struct adapter *adap;
- if (type >= CXGB4_PCI_ULD_MAX)
+ if (type >= CXGB4_ULD_MAX)
return -EINVAL;
mutex_lock(&uld_mutex);
list_for_each_entry(adap, &adapter_list, list_node) {
- if (!is_pci_uld(adap))
+ if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
+ (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
+ continue;
+ if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
continue;
adap->uld[type].handle = NULL;
adap->uld[type].add = NULL;
@@ -551,4 +690,4 @@ int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type)
return 0;
}
-EXPORT_SYMBOL(cxgb4_unregister_pci_uld);
+EXPORT_SYMBOL(cxgb4_unregister_uld);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index ab4037222f8d..47bd14f602db 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -1,7 +1,7 @@
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
- * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -42,6 +42,8 @@
#include <linux/atomic.h>
#include "cxgb4.h"
+#define MAX_ULD_QSETS 16
+
/* CPL message priority levels */
enum {
CPL_PRIORITY_DATA = 0, /* data messages */
@@ -104,6 +106,7 @@ struct tid_info {
unsigned int atid_base;
struct filter_entry *ftid_tab;
+ unsigned long *ftid_bmap;
unsigned int nftids;
unsigned int ftid_base;
unsigned int aftid_base;
@@ -124,6 +127,8 @@ struct tid_info {
atomic_t tids_in_use;
/* TIDs in the HASH */
atomic_t hash_tids_in_use;
+ /* lock for setting/clearing filter bitmap */
+ spinlock_t ftid_lock;
};
static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
@@ -183,15 +188,38 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
unsigned int queue, bool ipv6);
+/* Filter operation context to allow callers of cxgb4_set_filter() and
+ * cxgb4_del_filter() to wait for an asynchronous completion.
+ */
+struct filter_ctx {
+ struct completion completion; /* completion rendezvous */
+ void *closure; /* caller's opaque information */
+ int result; /* result of operation */
+ u32 tid; /* to store tid */
+};
+
+struct ch_filter_specification;
+
+int __cxgb4_set_filter(struct net_device *dev, int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx);
+int __cxgb4_del_filter(struct net_device *dev, int filter_id,
+ struct filter_ctx *ctx);
+int cxgb4_set_filter(struct net_device *dev, int filter_id,
+ struct ch_filter_specification *fs);
+int cxgb4_del_filter(struct net_device *dev, int filter_id);
+
static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
{
skb_set_queue_mapping(skb, (queue << 1) | prio);
}
enum cxgb4_uld {
+ CXGB4_ULD_INIT,
CXGB4_ULD_RDMA,
CXGB4_ULD_ISCSI,
CXGB4_ULD_ISCSIT,
+ CXGB4_ULD_CRYPTO,
CXGB4_ULD_MAX
};
@@ -284,31 +312,11 @@ struct cxgb4_lld_info {
struct cxgb4_uld_info {
const char *name;
- void *(*add)(const struct cxgb4_lld_info *p);
- int (*rx_handler)(void *handle, const __be64 *rsp,
- const struct pkt_gl *gl);
- int (*state_change)(void *handle, enum cxgb4_state new_state);
- int (*control)(void *handle, enum cxgb4_control control, ...);
- int (*lro_rx_handler)(void *handle, const __be64 *rsp,
- const struct pkt_gl *gl,
- struct t4_lro_mgr *lro_mgr,
- struct napi_struct *napi);
- void (*lro_flush)(struct t4_lro_mgr *);
-};
-
-enum cxgb4_pci_uld {
- CXGB4_PCI_ULD1,
- CXGB4_PCI_ULD_MAX
-};
-
-struct cxgb4_pci_uld_info {
- const char *name;
- bool lro;
void *handle;
unsigned int nrxq;
- unsigned int nciq;
unsigned int rxq_size;
- unsigned int ciq_size;
+ bool ciq;
+ bool lro;
void *(*add)(const struct cxgb4_lld_info *p);
int (*rx_handler)(void *handle, const __be64 *rsp,
const struct pkt_gl *gl);
@@ -323,9 +331,6 @@ struct cxgb4_pci_uld_info {
int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type);
-int cxgb4_register_pci_uld(enum cxgb4_pci_uld type,
- struct cxgb4_pci_uld_info *p);
-int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type);
int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo);
unsigned int cxgb4_port_chan(const struct net_device *dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 9a607dbc6ca8..1e74fd6085df 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2860,6 +2860,18 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
return 0;
}
+int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
+ unsigned int cmplqid)
+{
+ u32 param, val;
+
+ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) |
+ FW_PARAMS_PARAM_YZ_V(eqid));
+ val = cmplqid;
+ return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
+}
+
int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
struct net_device *dev, unsigned int iqid)
{
@@ -3014,12 +3026,6 @@ void t4_free_sge_resources(struct adapter *adap)
}
}
- /* clean up RDMA and iSCSI Rx queues */
- t4_free_ofld_rxqs(adap, adap->sge.iscsiqsets, adap->sge.iscsirxq);
- t4_free_ofld_rxqs(adap, adap->sge.niscsitq, adap->sge.iscsitrxq);
- t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq);
- t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq);
-
/* clean up offload Tx queues */
for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 15be54324e3d..20dec85da63d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3627,7 +3627,8 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
}
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
- FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
+ FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
FW_PORT_CAP_ANEG)
/**
@@ -7196,8 +7197,12 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
speed = 1000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
speed = 10000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
+ speed = 25000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
speed = 40000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
+ speed = 100000;
lc = &pi->link_cfg;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index ffe4bf4b96da..4b58b32105f7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -2267,6 +2267,12 @@ enum fw_port_cap {
FW_PORT_CAP_802_3_ASM_DIR = 0x8000,
};
+#define FW_PORT_CAP_SPEED_S 0
+#define FW_PORT_CAP_SPEED_M 0x3f
+#define FW_PORT_CAP_SPEED_V(x) ((x) << FW_PORT_CAP_SPEED_S)
+#define FW_PORT_CAP_SPEED_G(x) \
+ (((x) >> FW_PORT_CAP_SPEED_S) & FW_PORT_CAP_SPEED_M)
+
enum fw_port_mdi {
FW_PORT_CAP_MDI_UNCHANGED,
FW_PORT_CAP_MDI_AUTO,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index c8fd4f8fe1fa..f3ed9ce99e5e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1648,14 +1648,15 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
if (csum_ok && !pkt->err_vec &&
(be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
- if (!pkt->ip_frag)
+ if (!pkt->ip_frag) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- else {
+ rxq->stats.rx_cso++;
+ } else if (pkt->l2info & htonl(RXF_IP_F)) {
__sum16 c = (__force __sum16)pkt->csum;
skb->csum = csum_unfold(c);
skb->ip_summed = CHECKSUM_COMPLETE;
+ rxq->stats.rx_cso++;
}
- rxq->stats.rx_cso++;
} else
skb_checksum_none_assert(skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 8067424ad4a8..b3903fe411aa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -108,8 +108,8 @@ struct link_config {
unsigned int supported; /* link capabilities */
unsigned int advertising; /* advertised capabilities */
unsigned short lp_advertising; /* peer advertised capabilities */
- unsigned short requested_speed; /* speed user has requested */
- unsigned short speed; /* actual link speed */
+ unsigned int requested_speed; /* speed user has requested */
+ unsigned int speed; /* actual link speed */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */
@@ -271,10 +271,17 @@ static inline bool is_10g_port(const struct link_config *lc)
return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
}
+/* Return true if the Link Configuration supports "High Speeds" (those greater
+ * than 1Gb/s).
+ */
static inline bool is_x_10g_port(const struct link_config *lc)
{
- return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
- (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
+ unsigned int speeds, high_speeds;
+
+ speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported));
+ high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
+
+ return high_speeds != 0;
}
static inline unsigned int core_ticks_per_usec(const struct adapter *adapter)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 879f4c52b3d5..e98248f00fef 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -314,8 +314,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
}
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
- FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
- FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
+ FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
+ FW_PORT_CAP_ANEG)
/**
* init_link_config - initialize a link's SW state
@@ -1716,8 +1717,12 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
speed = 1000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
speed = 10000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
+ speed = 25000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
speed = 40000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
+ speed = 100000;
/*
* Scan all of our "ports" (Virtual Interfaces) looking for
diff --git a/drivers/net/ethernet/chelsio/libcxgb/Makefile b/drivers/net/ethernet/chelsio/libcxgb/Makefile
index 2362230ef4fe..2534e30a1560 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/Makefile
+++ b/drivers/net/ethernet/chelsio/libcxgb/Makefile
@@ -1,3 +1,5 @@
+ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
+
obj-$(CONFIG_CHELSIO_LIB) += libcxgb.o
-libcxgb-y := libcxgb_ppm.o
+libcxgb-y := libcxgb_ppm.o libcxgb_cm.o
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
new file mode 100644
index 000000000000..0f0de5b63622
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#include <net/route.h>
+#include <net/ip6_route.h>
+
+#include "libcxgb_cm.h"
+
+void
+cxgb_get_4tuple(struct cpl_pass_accept_req *req, enum chip_type type,
+ int *iptype, __u8 *local_ip, __u8 *peer_ip,
+ __be16 *local_port, __be16 *peer_port)
+{
+ int eth_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ?
+ ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
+ T6_ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
+ int ip_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ?
+ IP_HDR_LEN_G(be32_to_cpu(req->hdr_len)) :
+ T6_IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
+ struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
+ struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
+ struct tcphdr *tcp = (struct tcphdr *)
+ ((u8 *)(req + 1) + eth_len + ip_len);
+
+ if (ip->version == 4) {
+ pr_debug("%s saddr 0x%x daddr 0x%x sport %u dport %u\n",
+ __func__, ntohl(ip->saddr), ntohl(ip->daddr),
+ ntohs(tcp->source), ntohs(tcp->dest));
+ *iptype = 4;
+ memcpy(peer_ip, &ip->saddr, 4);
+ memcpy(local_ip, &ip->daddr, 4);
+ } else {
+ pr_debug("%s saddr %pI6 daddr %pI6 sport %u dport %u\n",
+ __func__, ip6->saddr.s6_addr, ip6->daddr.s6_addr,
+ ntohs(tcp->source), ntohs(tcp->dest));
+ *iptype = 6;
+ memcpy(peer_ip, ip6->saddr.s6_addr, 16);
+ memcpy(local_ip, ip6->daddr.s6_addr, 16);
+ }
+ *peer_port = tcp->source;
+ *local_port = tcp->dest;
+}
+EXPORT_SYMBOL(cxgb_get_4tuple);
+
+static bool
+cxgb_our_interface(struct cxgb4_lld_info *lldi,
+ struct net_device *(*get_real_dev)(struct net_device *),
+ struct net_device *egress_dev)
+{
+ int i;
+
+ egress_dev = get_real_dev(egress_dev);
+ for (i = 0; i < lldi->nports; i++)
+ if (lldi->ports[i] == egress_dev)
+ return true;
+ return false;
+}
+
+struct dst_entry *
+cxgb_find_route(struct cxgb4_lld_info *lldi,
+ struct net_device *(*get_real_dev)(struct net_device *),
+ __be32 local_ip, __be32 peer_ip, __be16 local_port,
+ __be16 peer_port, u8 tos)
+{
+ struct rtable *rt;
+ struct flowi4 fl4;
+ struct neighbour *n;
+
+ rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
+ peer_port, local_port, IPPROTO_TCP,
+ tos, 0);
+ if (IS_ERR(rt))
+ return NULL;
+ n = dst_neigh_lookup(&rt->dst, &peer_ip);
+ if (!n)
+ return NULL;
+ if (!cxgb_our_interface(lldi, get_real_dev, n->dev) &&
+ !(n->dev->flags & IFF_LOOPBACK)) {
+ neigh_release(n);
+ dst_release(&rt->dst);
+ return NULL;
+ }
+ neigh_release(n);
+ return &rt->dst;
+}
+EXPORT_SYMBOL(cxgb_find_route);
+
+struct dst_entry *
+cxgb_find_route6(struct cxgb4_lld_info *lldi,
+ struct net_device *(*get_real_dev)(struct net_device *),
+ __u8 *local_ip, __u8 *peer_ip, __be16 local_port,
+ __be16 peer_port, u8 tos, __u32 sin6_scope_id)
+{
+ struct dst_entry *dst = NULL;
+
+ if (IS_ENABLED(CONFIG_IPV6)) {
+ struct flowi6 fl6;
+
+ memset(&fl6, 0, sizeof(fl6));
+ memcpy(&fl6.daddr, peer_ip, 16);
+ memcpy(&fl6.saddr, local_ip, 16);
+ if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+ fl6.flowi6_oif = sin6_scope_id;
+ dst = ip6_route_output(&init_net, NULL, &fl6);
+ if (!dst)
+ goto out;
+ if (!cxgb_our_interface(lldi, get_real_dev,
+ ip6_dst_idev(dst)->dev) &&
+ !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
+ dst_release(dst);
+ dst = NULL;
+ }
+ }
+
+out:
+ return dst;
+}
+EXPORT_SYMBOL(cxgb_find_route6);
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
new file mode 100644
index 000000000000..515b94ff9080
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __LIBCXGB_CM_H__
+#define __LIBCXGB_CM_H__
+
+
+#include <net/tcp.h>
+
+#include <cxgb4.h>
+#include <t4_msg.h>
+#include <l2t.h>
+
+void
+cxgb_get_4tuple(struct cpl_pass_accept_req *, enum chip_type,
+ int *, __u8 *, __u8 *, __be16 *, __be16 *);
+struct dst_entry *
+cxgb_find_route(struct cxgb4_lld_info *,
+ struct net_device *(*)(struct net_device *),
+ __be32, __be32, __be16, __be16, u8);
+struct dst_entry *
+cxgb_find_route6(struct cxgb4_lld_info *,
+ struct net_device *(*)(struct net_device *),
+ __u8 *, __u8 *, __be16, __be16, u8, __u32);
+
+/* Returns whether a CPL status conveys negative advice.
+ */
+static inline bool cxgb_is_neg_adv(unsigned int status)
+{
+ return status == CPL_ERR_RTX_NEG_ADVICE ||
+ status == CPL_ERR_PERSIST_NEG_ADVICE ||
+ status == CPL_ERR_KEEPALV_NEG_ADVICE;
+}
+
+static inline void
+cxgb_best_mtu(const unsigned short *mtus, unsigned short mtu,
+ unsigned int *idx, int use_ts, int ipv6)
+{
+ unsigned short hdr_size = (ipv6 ?
+ sizeof(struct ipv6hdr) :
+ sizeof(struct iphdr)) +
+ sizeof(struct tcphdr) +
+ (use_ts ?
+ round_up(TCPOLEN_TIMESTAMP, 4) : 0);
+ unsigned short data_size = mtu - hdr_size;
+
+ cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
+}
+
+static inline u32 cxgb_compute_wscale(u32 win)
+{
+ u32 wscale = 0;
+
+ while (wscale < 14 && (65535 << wscale) < win)
+ wscale++;
+ return wscale;
+}
+
+static inline void
+cxgb_mk_tid_release(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
+{
+ struct cpl_tid_release *req;
+
+ req = (struct cpl_tid_release *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ INIT_TP_WR(req, tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
+}
+
+static inline void
+cxgb_mk_close_con_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
+ void *handle, arp_err_handler_t handler)
+{
+ struct cpl_close_con_req *req;
+
+ req = (struct cpl_close_con_req *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ INIT_TP_WR(req, tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
+ set_wr_txq(skb, CPL_PRIORITY_DATA, chan);
+ t4_set_arp_err_handler(skb, handle, handler);
+}
+
+static inline void
+cxgb_mk_abort_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
+ void *handle, arp_err_handler_t handler)
+{
+ struct cpl_abort_req *req;
+
+ req = (struct cpl_abort_req *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ INIT_TP_WR(req, tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
+ req->cmd = CPL_ABORT_SEND_RST;
+ set_wr_txq(skb, CPL_PRIORITY_DATA, chan);
+ t4_set_arp_err_handler(skb, handle, handler);
+}
+
+static inline void
+cxgb_mk_abort_rpl(struct sk_buff *skb, u32 len, u32 tid, u16 chan)
+{
+ struct cpl_abort_rpl *rpl;
+
+ rpl = (struct cpl_abort_rpl *)__skb_put(skb, len);
+ memset(rpl, 0, len);
+
+ INIT_TP_WR(rpl, tid);
+ OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+ rpl->cmd = CPL_ABORT_NO_RST;
+ set_wr_txq(skb, CPL_PRIORITY_DATA, chan);
+}
+
+static inline void
+cxgb_mk_rx_data_ack(struct sk_buff *skb, u32 len, u32 tid, u16 chan,
+ u32 credit_dack)
+{
+ struct cpl_rx_data_ack *req;
+
+ req = (struct cpl_rx_data_ack *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ INIT_TP_WR(req, tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, tid));
+ req->credit_dack = cpu_to_be32(credit_dack);
+ set_wr_txq(skb, CPL_PRIORITY_ACK, chan);
+}
+#endif
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 58c6338a839e..79d80090eac8 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -867,7 +867,7 @@ static int netdev_open(struct net_device *dev)
/* Initialize other registers. */
__set_mac_addr(dev);
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
#else
iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 86780b5c40ef..6cfa63a5e9b4 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -37,7 +37,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "11.0.0.0"
+#define DRV_VER "11.1.0.0"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -399,13 +399,13 @@ enum vf_state {
#define BE_FLAGS_PHY_MISCONFIGURED BIT(10)
#define BE_FLAGS_ERR_DETECTION_SCHEDULED BIT(11)
#define BE_FLAGS_OS2BMC BIT(12)
+#define BE_FLAGS_TRY_RECOVERY BIT(13)
#define BE_UC_PMAC_COUNT 30
#define BE_VF_UC_PMAC_COUNT 2
#define MAX_ERR_RECOVERY_RETRY_COUNT 3
#define ERR_DETECTION_DELAY 1000
-#define ERR_RECOVERY_RETRY_DELAY 30000
/* Ethtool set_dump flags */
#define LANCER_INITIATE_FW_DUMP 0x1
@@ -512,6 +512,66 @@ struct be_eth_addr {
unsigned char mac[ETH_ALEN];
};
+#define BE_SEC 1000 /* in msec */
+#define BE_MIN (60 * BE_SEC) /* in msec */
+#define BE_HOUR (60 * BE_MIN) /* in msec */
+
+#define ERR_RECOVERY_MAX_RETRY_COUNT 3
+#define ERR_RECOVERY_DETECTION_DELAY BE_SEC
+#define ERR_RECOVERY_RETRY_DELAY (30 * BE_SEC)
+
+/* UE-detection-duration in BEx/Skyhawk:
+ * All PFs must wait for this duration after they detect UE before reading
+ * SLIPORT_SEMAPHORE register. At the end of this duration, the Firmware
+ * guarantees that the SLIPORT_SEMAPHORE register is updated to indicate
+ * if the UE is recoverable.
+ */
+#define ERR_RECOVERY_UE_DETECT_DURATION BE_SEC
+
+/* Initial idle time (in msec) to elapse after driver load,
+ * before UE recovery is allowed.
+ */
+#define ERR_IDLE_HR 24
+#define ERR_RECOVERY_IDLE_TIME (ERR_IDLE_HR * BE_HOUR)
+
+/* Time interval (in msec) after which UE recovery can be repeated */
+#define ERR_INTERVAL_HR 72
+#define ERR_RECOVERY_INTERVAL (ERR_INTERVAL_HR * BE_HOUR)
+
+/* BEx/SH UE recovery state machine */
+enum {
+ ERR_RECOVERY_ST_NONE = 0, /* No Recovery */
+ ERR_RECOVERY_ST_DETECT = 1, /* UE detection duration */
+ ERR_RECOVERY_ST_RESET = 2, /* Reset Phase (PF0 only) */
+ ERR_RECOVERY_ST_PRE_POLL = 3, /* Pre-Poll Phase (all PFs) */
+ ERR_RECOVERY_ST_REINIT = 4 /* Re-initialize Phase */
+};
+
+struct be_error_recovery {
+ /* Lancer error recovery variables */
+ u8 recovery_retries;
+
+ /* BEx/Skyhawk error recovery variables */
+ u8 recovery_state;
+ u16 ue_to_reset_time; /* Time after UE, to soft reset
+ * the chip - PF0 only
+ */
+ u16 ue_to_poll_time; /* Time after UE, to Restart Polling
+ * of SLIPORT_SEMAPHORE reg
+ */
+ u16 last_err_code;
+ bool recovery_supported;
+ unsigned long probe_time;
+ unsigned long last_recovery_time;
+
+ /* Common to both Lancer & BEx/SH error recovery */
+ u32 resched_delay;
+ struct delayed_work err_detection_work;
+};
+
+/* Ethtool priv_flags */
+#define BE_DISABLE_TPE_RECOVERY 0x1
+
struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
@@ -560,7 +620,6 @@ struct be_adapter {
struct delayed_work work;
u16 work_counter;
- struct delayed_work be_err_detection_work;
u8 recovery_retries;
u8 err_flags;
bool pcicfg_mapped; /* pcicfg obtained via pci_iomap() */
@@ -634,6 +693,9 @@ struct be_adapter {
u32 fat_dump_len;
u16 serial_num[CNTL_SERIAL_NUM_WORDS];
u8 phy_state; /* state of sfp optics (functional, faulted, etc.,) */
+ u8 dev_mac[ETH_ALEN];
+ u32 priv_flags; /* ethtool get/set_priv_flags() */
+ struct be_error_recovery error_recovery;
};
/* Used for defered FW config cmds. Add fields to this struct as reqd */
@@ -867,6 +929,9 @@ static inline bool is_ipv4_pkt(struct sk_buff *skb)
return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
}
+#define be_error_recovering(adapter) \
+ (adapter->flags & BE_FLAGS_TRY_RECOVERY)
+
#define BE_ERROR_EEH 1
#define BE_ERROR_UE BIT(1)
#define BE_ERROR_FW BIT(2)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index fa11a5a8c354..9cffe48be156 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -92,6 +92,11 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
CMD_SUBSYSTEM_COMMON,
BE_PRIV_DEVCFG | BE_PRIV_VHADM
},
+ {
+ OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES,
+ CMD_SUBSYSTEM_COMMON,
+ BE_PRIV_DEVCFG
+ }
};
static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
@@ -705,7 +710,7 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
return 0;
}
-static u16 be_POST_stage_get(struct be_adapter *adapter)
+u16 be_POST_stage_get(struct be_adapter *adapter)
{
u32 sem;
@@ -4127,6 +4132,10 @@ int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
struct be_cmd_req_get_ext_fat_caps *req;
int status;
+ if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES,
+ CMD_SUBSYSTEM_COMMON))
+ return -EPERM;
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -4138,7 +4147,7 @@ int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
req = cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
+ OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES,
cmd->size, wrb, cmd);
req->parameter_type = cpu_to_le32(1);
@@ -4167,7 +4176,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
req = cmd->va;
memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
+ OPCODE_COMMON_SET_EXT_FAT_CAPABILITIES,
cmd->size, wrb, cmd);
status = be_mcc_notify_wait(adapter);
@@ -4373,7 +4382,7 @@ err:
}
/* This routine returns a list of all the NIC PF_nums in the adapter */
-u16 be_get_nic_pf_num_list(u8 *buf, u32 desc_count, u16 *nic_pf_nums)
+static u16 be_get_nic_pf_num_list(u8 *buf, u32 desc_count, u16 *nic_pf_nums)
{
struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
struct be_pcie_res_desc *pcie = NULL;
@@ -4525,7 +4534,7 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
}
/* Mark all fields invalid */
-void be_reset_nic_desc(struct be_nic_res_desc *nic)
+static void be_reset_nic_desc(struct be_nic_res_desc *nic)
{
memset(nic, 0, sizeof(*nic));
nic->unicast_mac_count = 0xFFFF;
@@ -4898,8 +4907,9 @@ err:
return status;
}
-int __be_cmd_set_logical_link_config(struct be_adapter *adapter,
- int link_state, int version, u8 domain)
+static int
+__be_cmd_set_logical_link_config(struct be_adapter *adapter,
+ int link_state, int version, u8 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_set_ll_link *req;
@@ -4954,6 +4964,57 @@ int be_cmd_set_logical_link_config(struct be_adapter *adapter,
1, domain);
return status;
}
+
+int be_cmd_set_features(struct be_adapter *adapter)
+{
+ struct be_cmd_resp_set_features *resp;
+ struct be_cmd_req_set_features *req;
+ struct be_mcc_wrb *wrb;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mcc_lock))
+ return -1;
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_FEATURES,
+ sizeof(*req), wrb, NULL);
+
+ req->features = cpu_to_le32(BE_FEATURE_UE_RECOVERY);
+ req->parameter_len = cpu_to_le32(sizeof(struct be_req_ue_recovery));
+ req->parameter.req.uer = cpu_to_le32(BE_UE_RECOVERY_UER_MASK);
+
+ status = be_mcc_notify_wait(adapter);
+ if (status)
+ goto err;
+
+ resp = embedded_payload(wrb);
+
+ adapter->error_recovery.ue_to_poll_time =
+ le16_to_cpu(resp->parameter.resp.ue2rp);
+ adapter->error_recovery.ue_to_reset_time =
+ le16_to_cpu(resp->parameter.resp.ue2sr);
+ adapter->error_recovery.recovery_supported = true;
+err:
+ /* Checking "MCC_STATUS_INVALID_LENGTH" for SKH as FW
+ * returns this error in older firmware versions
+ */
+ if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
+ base_status(status) == MCC_STATUS_INVALID_LENGTH)
+ dev_info(&adapter->pdev->dev,
+ "Adapter does not support HW error recovery\n");
+
+ mutex_unlock(&adapter->mcc_lock);
+ return status;
+}
+
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 0d6be224a787..1bd82bcb3be5 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -58,7 +58,8 @@ enum mcc_base_status {
MCC_STATUS_INSUFFICIENT_BUFFER = 4,
MCC_STATUS_UNAUTHORIZED_REQUEST = 5,
MCC_STATUS_NOT_SUPPORTED = 66,
- MCC_STATUS_FEATURE_NOT_SUPPORTED = 68
+ MCC_STATUS_FEATURE_NOT_SUPPORTED = 68,
+ MCC_STATUS_INVALID_LENGTH = 116
};
/* Additional status */
@@ -294,8 +295,8 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_PHY_DETAILS 102
#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
-#define OPCODE_COMMON_GET_EXT_FAT_CAPABILITES 125
-#define OPCODE_COMMON_SET_EXT_FAT_CAPABILITES 126
+#define OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES 125
+#define OPCODE_COMMON_SET_EXT_FAT_CAPABILITIES 126
#define OPCODE_COMMON_GET_MAC_LIST 147
#define OPCODE_COMMON_SET_MAC_LIST 148
#define OPCODE_COMMON_GET_HSW_CONFIG 152
@@ -308,6 +309,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_READ_OBJECT 171
#define OPCODE_COMMON_WRITE_OBJECT 172
#define OPCODE_COMMON_DELETE_OBJECT 174
+#define OPCODE_COMMON_SET_FEATURES 191
#define OPCODE_COMMON_MANAGE_IFACE_FILTERS 193
#define OPCODE_COMMON_GET_IFACE_LIST 194
#define OPCODE_COMMON_ENABLE_DISABLE_VF 196
@@ -2315,6 +2317,41 @@ struct be_cmd_resp_get_iface_list {
struct be_if_desc if_desc;
};
+/************** Set Features *******************/
+#define BE_FEATURE_UE_RECOVERY 0x10
+#define BE_UE_RECOVERY_UER_MASK 0x1
+
+struct be_req_ue_recovery {
+ u32 uer;
+ u32 rsvd;
+};
+
+struct be_cmd_req_set_features {
+ struct be_cmd_req_hdr hdr;
+ u32 features;
+ u32 parameter_len;
+ union {
+ struct be_req_ue_recovery req;
+ u32 rsvd[2];
+ } parameter;
+};
+
+struct be_resp_ue_recovery {
+ u32 uer;
+ u16 ue2rp;
+ u16 ue2sr;
+};
+
+struct be_cmd_resp_set_features {
+ struct be_cmd_resp_hdr hdr;
+ u32 features;
+ u32 parameter_len;
+ union {
+ struct be_resp_ue_recovery resp;
+ u32 rsvd[2];
+ } parameter;
+};
+
/*************** Set logical link ********************/
#define PLINK_ENABLE BIT(0)
#define PLINK_TRACK BIT(8)
@@ -2343,6 +2380,7 @@ struct be_cmd_req_manage_iface_filters {
u32 cap_control_flags;
} __packed;
+u16 be_POST_stage_get(struct be_adapter *adapter);
int be_pci_fnum_get(struct be_adapter *adapter);
int be_fw_wait_ready(struct be_adapter *adapter);
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -2470,3 +2508,4 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op);
int be_cmd_set_sriov_config(struct be_adapter *adapter,
struct be_resources res, u16 num_vfs,
struct be_resources *vft_res);
+int be_cmd_set_features(struct be_adapter *adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 50e7be5da50c..0a48a31225e6 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -421,6 +421,10 @@ static void be_get_ethtool_stats(struct net_device *netdev,
}
}
+static const char be_priv_flags[][ETH_GSTRING_LEN] = {
+ "disable-tpe-recovery"
+};
+
static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
uint8_t *data)
{
@@ -454,6 +458,10 @@ static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
data += ETH_GSTRING_LEN;
}
break;
+ case ETH_SS_PRIV_FLAGS:
+ for (i = 0; i < ARRAY_SIZE(be_priv_flags); i++)
+ strcpy(data + i * ETH_GSTRING_LEN, be_priv_flags[i]);
+ break;
}
}
@@ -468,6 +476,8 @@ static int be_get_sset_count(struct net_device *netdev, int stringset)
return ETHTOOL_STATS_NUM +
adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(be_priv_flags);
default:
return -EINVAL;
}
@@ -1360,6 +1370,34 @@ err:
return be_cmd_status(status);
}
+static u32 be_get_priv_flags(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->priv_flags;
+}
+
+static int be_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ bool tpe_old = !!(adapter->priv_flags & BE_DISABLE_TPE_RECOVERY);
+ bool tpe_new = !!(flags & BE_DISABLE_TPE_RECOVERY);
+
+ if (tpe_old != tpe_new) {
+ if (tpe_new) {
+ adapter->priv_flags |= BE_DISABLE_TPE_RECOVERY;
+ dev_info(&adapter->pdev->dev,
+ "HW error recovery is disabled\n");
+ } else {
+ adapter->priv_flags &= ~BE_DISABLE_TPE_RECOVERY;
+ dev_info(&adapter->pdev->dev,
+ "HW error recovery is enabled\n");
+ }
+ }
+
+ return 0;
+}
+
const struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
@@ -1373,6 +1411,8 @@ const struct ethtool_ops be_ethtool_ops = {
.get_ringparam = be_get_ringparam,
.get_pauseparam = be_get_pauseparam,
.set_pauseparam = be_set_pauseparam,
+ .set_priv_flags = be_set_priv_flags,
+ .get_priv_flags = be_get_priv_flags,
.get_strings = be_get_stat_strings,
.set_phys_id = be_set_phys_id,
.set_dump = be_set_dump,
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index c684bb32b487..92942c84d329 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -32,18 +32,23 @@
#define MPU_EP_CONTROL 0
/********** MPU semphore: used for SH & BE *************/
+#define SLIPORT_SOFTRESET_OFFSET 0x5c /* CSR BAR offset */
#define SLIPORT_SEMAPHORE_OFFSET_BEx 0xac /* CSR BAR offset */
#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 /* PCI-CFG offset */
#define POST_STAGE_MASK 0x0000FFFF
#define POST_ERR_MASK 0x1
#define POST_ERR_SHIFT 31
+#define POST_ERR_RECOVERY_CODE_MASK 0xFFF
+
+/* Soft Reset register masks */
+#define SLIPORT_SOFTRESET_SR_MASK 0x00000080 /* SR bit */
/* MPU semphore POST stage values */
#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
-
+#define POST_STAGE_RECOVERABLE_ERR 0xE000 /* Recoverable err detected */
/* Lancer SLIPORT registers */
#define SLIPORT_STATUS_OFFSET 0x404
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index f7584d4139ff..ac513e6627d1 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -41,6 +41,11 @@ static ushort rx_frag_size = 2048;
module_param(rx_frag_size, ushort, S_IRUGO);
MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
+/* Per-module error detection/recovery workq shared across all functions.
+ * Each function schedules its own work request on this shared workq.
+ */
+struct workqueue_struct *be_err_recovery_workq;
+
static const struct pci_device_id be_dev_ids[] = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
@@ -264,6 +269,38 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
iowrite32(val, adapter->db + DB_CQ_OFFSET);
}
+static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
+{
+ int i;
+
+ /* Check if mac has already been added as part of uc-list */
+ for (i = 0; i < adapter->uc_macs; i++) {
+ if (ether_addr_equal((u8 *)&adapter->uc_list[i * ETH_ALEN],
+ mac)) {
+ /* mac already added, skip addition */
+ adapter->pmac_id[0] = adapter->pmac_id[i + 1];
+ return 0;
+ }
+ }
+
+ return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
+ &adapter->pmac_id[0], 0);
+}
+
+static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
+{
+ int i;
+
+ /* Skip deletion if the programmed mac is
+ * being used in uc-list
+ */
+ for (i = 0; i < adapter->uc_macs; i++) {
+ if (adapter->pmac_id[i + 1] == pmac_id)
+ return;
+ }
+ be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
+}
+
static int be_mac_addr_set(struct net_device *netdev, void *p)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -271,7 +308,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
struct sockaddr *addr = p;
int status;
u8 mac[ETH_ALEN];
- u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
+ u32 old_pmac_id = adapter->pmac_id[0];
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -279,7 +316,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
/* Proceed further only if, User provided MAC is different
* from active MAC
*/
- if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
+ if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
return 0;
/* if device is not running, copy MAC to netdev->dev_addr */
@@ -292,23 +329,22 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
* FILTMGMT privilege. This failure is OK, only if the PF programmed
* the MAC for the VF.
*/
- status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
- adapter->if_handle, &adapter->pmac_id[0], 0);
+ mutex_lock(&adapter->rx_filter_lock);
+ status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
if (!status) {
- curr_pmac_id = adapter->pmac_id[0];
/* Delete the old programmed MAC. This call may fail if the
* old MAC was already deleted by the PF driver.
*/
if (adapter->pmac_id[0] != old_pmac_id)
- be_cmd_pmac_del(adapter, adapter->if_handle,
- old_pmac_id, 0);
+ be_dev_mac_del(adapter, old_pmac_id);
}
+ mutex_unlock(&adapter->rx_filter_lock);
/* Decide if the new MAC is successfully activated only after
* querying the FW
*/
- status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
+ status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
adapter->if_handle, true, 0);
if (status)
goto err;
@@ -321,6 +357,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
goto err;
}
done:
+ ether_addr_copy(adapter->dev_mac, addr->sa_data);
ether_addr_copy(netdev->dev_addr, addr->sa_data);
dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
return 0;
@@ -1623,6 +1660,28 @@ static void be_clear_mc_list(struct be_adapter *adapter)
adapter->mc_count = 0;
}
+static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
+{
+ if (ether_addr_equal((u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
+ adapter->dev_mac)) {
+ adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
+ return 0;
+ }
+
+ return be_cmd_pmac_add(adapter,
+ (u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
+ adapter->if_handle,
+ &adapter->pmac_id[uc_idx + 1], 0);
+}
+
+static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
+{
+ if (pmac_id == adapter->pmac_id[0])
+ return;
+
+ be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
+}
+
static void be_set_uc_list(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -1663,13 +1722,10 @@ static void be_set_uc_list(struct be_adapter *adapter)
be_clear_uc_promisc(adapter);
for (i = 0; i < adapter->uc_macs; i++)
- be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[i + 1], 0);
+ be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
for (i = 0; i < curr_uc_macs; i++)
- be_cmd_pmac_add(adapter, adapter->uc_list[i].mac,
- adapter->if_handle,
- &adapter->pmac_id[i + 1], 0);
+ be_uc_mac_add(adapter, i);
adapter->uc_macs = curr_uc_macs;
adapter->update_uc_list = false;
}
@@ -1682,8 +1738,8 @@ static void be_clear_uc_list(struct be_adapter *adapter)
__dev_uc_unsync(netdev, NULL);
for (i = 0; i < adapter->uc_macs; i++)
- be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[i + 1], 0);
+ be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
+
adapter->uc_macs = 0;
}
@@ -1839,7 +1895,8 @@ static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
return 0;
}
-static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
+static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1851,6 +1908,9 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
if (vlan || qos) {
vlan |= qos << VLAN_PRIO_SHIFT;
status = be_set_vf_tvt(adapter, vf, vlan);
@@ -3358,9 +3418,7 @@ void be_detect_error(struct be_adapter *adapter)
*/
if (ue_lo || ue_hi) {
- dev_err(dev,
- "Unrecoverable Error detected in the adapter");
- dev_err(dev, "Please reboot server to recover");
+ dev_err(dev, "Error detected in the adapter");
if (skyhawk_chip(adapter))
be_set_error(adapter, BE_ERROR_UE);
@@ -3563,9 +3621,7 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
static void be_disable_if_filters(struct be_adapter *adapter)
{
- be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[0], 0);
-
+ be_dev_mac_del(adapter, adapter->pmac_id[0]);
be_clear_uc_list(adapter);
be_clear_mc_list(adapter);
@@ -3720,11 +3776,10 @@ static int be_enable_if_filters(struct be_adapter *adapter)
/* For BE3 VFs, the PF programs the initial MAC address */
if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
- status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
- adapter->if_handle,
- &adapter->pmac_id[0], 0);
+ status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
if (status)
return status;
+ ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
}
if (adapter->vlans_added)
@@ -3903,8 +3958,13 @@ static void be_cancel_worker(struct be_adapter *adapter)
static void be_cancel_err_detection(struct be_adapter *adapter)
{
+ struct be_error_recovery *err_rec = &adapter->error_recovery;
+
+ if (!be_err_recovery_workq)
+ return;
+
if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
- cancel_delayed_work_sync(&adapter->be_err_detection_work);
+ cancel_delayed_work_sync(&err_rec->err_detection_work);
adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
}
}
@@ -4309,7 +4369,7 @@ static void be_setup_init(struct be_adapter *adapter)
* for distribution between the VFs. This self-imposed limit will determine the
* no: of VFs for which RSS can be enabled.
*/
-void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
+static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
{
struct be_port_resources port_res = {0};
u8 rss_tables_on_port;
@@ -4503,10 +4563,25 @@ static void be_schedule_worker(struct be_adapter *adapter)
adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
}
+static void be_destroy_err_recovery_workq(void)
+{
+ if (!be_err_recovery_workq)
+ return;
+
+ flush_workqueue(be_err_recovery_workq);
+ destroy_workqueue(be_err_recovery_workq);
+ be_err_recovery_workq = NULL;
+}
+
static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
{
- schedule_delayed_work(&adapter->be_err_detection_work,
- msecs_to_jiffies(delay));
+ struct be_error_recovery *err_rec = &adapter->error_recovery;
+
+ if (!be_err_recovery_workq)
+ return;
+
+ queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
+ msecs_to_jiffies(delay));
adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
}
@@ -4635,10 +4710,15 @@ static inline int fw_major_num(const char *fw_ver)
return fw_major;
}
-/* If any VFs are already enabled don't FLR the PF */
+/* If it is error recovery, FLR the PF
+ * Else if any VFs are already enabled don't FLR the PF
+ */
static bool be_reset_required(struct be_adapter *adapter)
{
- return pci_num_vf(adapter->pdev) ? false : true;
+ if (be_error_recovering(adapter))
+ return true;
+ else
+ return pci_num_vf(adapter->pdev) == 0;
}
/* Wait for the FW to be ready and perform the required initialization */
@@ -4650,6 +4730,9 @@ static int be_func_init(struct be_adapter *adapter)
if (status)
return status;
+ /* FW is now ready; clear errors to allow cmds/doorbell */
+ be_clear_error(adapter, BE_CLEAR_ALL);
+
if (be_reset_required(adapter)) {
status = be_cmd_reset_function(adapter);
if (status)
@@ -4657,9 +4740,6 @@ static int be_func_init(struct be_adapter *adapter)
/* Wait for interrupts to quiesce after an FLR */
msleep(100);
-
- /* We can clear all errors when function reset succeeds */
- be_clear_error(adapter, BE_CLEAR_ALL);
}
/* Tell FW we're ready to fire cmds */
@@ -4767,6 +4847,9 @@ static int be_setup(struct be_adapter *adapter)
if (!status && be_pause_supported(adapter))
adapter->phy.fc_autoneg = 1;
+ if (be_physfn(adapter) && !lancer_chip(adapter))
+ be_cmd_set_features(adapter);
+
be_schedule_worker(adapter);
adapter->flags |= BE_FLAGS_SETUP_DONE;
return 0;
@@ -5210,13 +5293,145 @@ static int be_resume(struct be_adapter *adapter)
return 0;
}
+static void be_soft_reset(struct be_adapter *adapter)
+{
+ u32 val;
+
+ dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
+ val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
+ val |= SLIPORT_SOFTRESET_SR_MASK;
+ iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
+}
+
+static bool be_err_is_recoverable(struct be_adapter *adapter)
+{
+ struct be_error_recovery *err_rec = &adapter->error_recovery;
+ unsigned long initial_idle_time =
+ msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
+ unsigned long recovery_interval =
+ msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
+ u16 ue_err_code;
+ u32 val;
+
+ val = be_POST_stage_get(adapter);
+ if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
+ return false;
+ ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
+ if (ue_err_code == 0)
+ return false;
+
+ dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
+ ue_err_code);
+
+ if (jiffies - err_rec->probe_time <= initial_idle_time) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot recover within %lu sec from driver load\n",
+ jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
+ return false;
+ }
+
+ if (err_rec->last_recovery_time &&
+ (jiffies - err_rec->last_recovery_time <= recovery_interval)) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot recover within %lu sec from last recovery\n",
+ jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
+ return false;
+ }
+
+ if (ue_err_code == err_rec->last_err_code) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot recover from a consecutive TPE error\n");
+ return false;
+ }
+
+ err_rec->last_recovery_time = jiffies;
+ err_rec->last_err_code = ue_err_code;
+ return true;
+}
+
+static int be_tpe_recover(struct be_adapter *adapter)
+{
+ struct be_error_recovery *err_rec = &adapter->error_recovery;
+ int status = -EAGAIN;
+ u32 val;
+
+ switch (err_rec->recovery_state) {
+ case ERR_RECOVERY_ST_NONE:
+ err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
+ err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
+ break;
+
+ case ERR_RECOVERY_ST_DETECT:
+ val = be_POST_stage_get(adapter);
+ if ((val & POST_STAGE_RECOVERABLE_ERR) !=
+ POST_STAGE_RECOVERABLE_ERR) {
+ dev_err(&adapter->pdev->dev,
+ "Unrecoverable HW error detected: 0x%x\n", val);
+ status = -EINVAL;
+ err_rec->resched_delay = 0;
+ break;
+ }
+
+ dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
+
+ /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
+ * milliseconds before it checks for final error status in
+ * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
+ * If it does, then PF0 initiates a Soft Reset.
+ */
+ if (adapter->pf_num == 0) {
+ err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
+ err_rec->resched_delay = err_rec->ue_to_reset_time -
+ ERR_RECOVERY_UE_DETECT_DURATION;
+ break;
+ }
+
+ err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
+ err_rec->resched_delay = err_rec->ue_to_poll_time -
+ ERR_RECOVERY_UE_DETECT_DURATION;
+ break;
+
+ case ERR_RECOVERY_ST_RESET:
+ if (!be_err_is_recoverable(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to meet recovery criteria\n");
+ status = -EIO;
+ err_rec->resched_delay = 0;
+ break;
+ }
+ be_soft_reset(adapter);
+ err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
+ err_rec->resched_delay = err_rec->ue_to_poll_time -
+ err_rec->ue_to_reset_time;
+ break;
+
+ case ERR_RECOVERY_ST_PRE_POLL:
+ err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
+ err_rec->resched_delay = 0;
+ status = 0; /* done */
+ break;
+
+ default:
+ status = -EINVAL;
+ err_rec->resched_delay = 0;
+ break;
+ }
+
+ return status;
+}
+
static int be_err_recover(struct be_adapter *adapter)
{
int status;
- /* Error recovery is supported only Lancer as of now */
- if (!lancer_chip(adapter))
- return -EIO;
+ if (!lancer_chip(adapter)) {
+ if (!adapter->error_recovery.recovery_supported ||
+ adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
+ return -EIO;
+ status = be_tpe_recover(adapter);
+ if (status)
+ goto err;
+ }
/* Wait for adapter to reach quiescent state before
* destroying queues
@@ -5225,59 +5440,74 @@ static int be_err_recover(struct be_adapter *adapter)
if (status)
goto err;
+ adapter->flags |= BE_FLAGS_TRY_RECOVERY;
+
be_cleanup(adapter);
status = be_resume(adapter);
if (status)
goto err;
- return 0;
+ adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
+
err:
return status;
}
static void be_err_detection_task(struct work_struct *work)
{
+ struct be_error_recovery *err_rec =
+ container_of(work, struct be_error_recovery,
+ err_detection_work.work);
struct be_adapter *adapter =
- container_of(work, struct be_adapter,
- be_err_detection_work.work);
+ container_of(err_rec, struct be_adapter,
+ error_recovery);
+ u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
struct device *dev = &adapter->pdev->dev;
int recovery_status;
- int delay = ERR_DETECTION_DELAY;
be_detect_error(adapter);
-
- if (be_check_error(adapter, BE_ERROR_HW))
- recovery_status = be_err_recover(adapter);
- else
+ if (!be_check_error(adapter, BE_ERROR_HW))
goto reschedule_task;
+ recovery_status = be_err_recover(adapter);
if (!recovery_status) {
- adapter->recovery_retries = 0;
+ err_rec->recovery_retries = 0;
+ err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
dev_info(dev, "Adapter recovery successful\n");
goto reschedule_task;
- } else if (be_virtfn(adapter)) {
+ } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
+ /* BEx/SH recovery state machine */
+ if (adapter->pf_num == 0 &&
+ err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
+ dev_err(&adapter->pdev->dev,
+ "Adapter recovery in progress\n");
+ resched_delay = err_rec->resched_delay;
+ goto reschedule_task;
+ } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
/* For VFs, check if PF have allocated resources
* every second.
*/
dev_err(dev, "Re-trying adapter recovery\n");
goto reschedule_task;
- } else if (adapter->recovery_retries++ <
- MAX_ERR_RECOVERY_RETRY_COUNT) {
+ } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
+ ERR_RECOVERY_MAX_RETRY_COUNT) {
/* In case of another error during recovery, it takes 30 sec
* for adapter to come out of error. Retry error recovery after
* this time interval.
*/
dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
- delay = ERR_RECOVERY_RETRY_DELAY;
+ resched_delay = ERR_RECOVERY_RETRY_DELAY;
goto reschedule_task;
} else {
dev_err(dev, "Adapter recovery failed\n");
+ dev_err(dev, "Please reboot server to recover\n");
}
return;
+
reschedule_task:
- be_schedule_err_detection(adapter, delay);
+ be_schedule_err_detection(adapter, resched_delay);
}
static void be_log_sfp_info(struct be_adapter *adapter)
@@ -5490,7 +5720,10 @@ static int be_drv_init(struct be_adapter *adapter)
pci_save_state(adapter->pdev);
INIT_DELAYED_WORK(&adapter->work, be_worker);
- INIT_DELAYED_WORK(&adapter->be_err_detection_work,
+
+ adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
+ adapter->error_recovery.resched_delay = 0;
+ INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
be_err_detection_task);
adapter->rx_fc = true;
@@ -5525,6 +5758,9 @@ static void be_remove(struct pci_dev *pdev)
be_clear(adapter);
+ if (!pci_vfs_assigned(adapter->pdev))
+ be_cmd_reset_function(adapter);
+
/* tell fw we're done with firing cmds */
be_cmd_fw_clean(adapter);
@@ -5681,6 +5917,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
be_roce_dev_add(adapter);
be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
+ adapter->error_recovery.probe_time = jiffies;
/* On Die temperature not supported for VF. */
if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
@@ -5926,6 +6163,8 @@ static struct pci_driver be_driver = {
static int __init be_init_module(void)
{
+ int status;
+
if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
rx_frag_size != 2048) {
printk(KERN_WARNING DRV_NAME
@@ -5945,7 +6184,17 @@ static int __init be_init_module(void)
return -1;
}
- return pci_register_driver(&be_driver);
+ be_err_recovery_workq =
+ create_singlethread_workqueue("be_err_recover");
+ if (!be_err_recovery_workq)
+ pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
+
+ status = pci_register_driver(&be_driver);
+ if (status) {
+ destroy_workqueue(be_wq);
+ be_destroy_err_recovery_workq();
+ }
+ return status;
}
module_init(be_init_module);
@@ -5953,6 +6202,8 @@ static void __exit be_exit_module(void)
{
pci_unregister_driver(&be_driver);
+ be_destroy_err_recovery_workq();
+
if (be_wq)
destroy_workqueue(be_wq);
}
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 36361f8bf894..90f9c5481290 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -60,6 +60,8 @@ struct ftgmac100 {
struct ftgmac100_descs *descs;
dma_addr_t descs_dma_addr;
+ struct page *rx_pages[RX_QUEUE_ENTRIES];
+
unsigned int rx_pointer;
unsigned int tx_clean_pointer;
unsigned int tx_pointer;
@@ -77,6 +79,9 @@ struct ftgmac100 {
int int_mask_all;
bool use_ncsi;
bool enabled;
+
+ u32 rxdes0_edorr_mask;
+ u32 txdes0_edotr_mask;
};
static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
@@ -257,10 +262,11 @@ static bool ftgmac100_rxdes_packet_ready(struct ftgmac100_rxdes *rxdes)
return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY);
}
-static void ftgmac100_rxdes_set_dma_own(struct ftgmac100_rxdes *rxdes)
+static void ftgmac100_rxdes_set_dma_own(const struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes)
{
/* clear status bits */
- rxdes->rxdes0 &= cpu_to_le32(FTGMAC100_RXDES0_EDORR);
+ rxdes->rxdes0 &= cpu_to_le32(priv->rxdes0_edorr_mask);
}
static bool ftgmac100_rxdes_rx_error(struct ftgmac100_rxdes *rxdes)
@@ -298,9 +304,10 @@ static bool ftgmac100_rxdes_multicast(struct ftgmac100_rxdes *rxdes)
return rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_MULTICAST);
}
-static void ftgmac100_rxdes_set_end_of_ring(struct ftgmac100_rxdes *rxdes)
+static void ftgmac100_rxdes_set_end_of_ring(const struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes)
{
- rxdes->rxdes0 |= cpu_to_le32(FTGMAC100_RXDES0_EDORR);
+ rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask);
}
static void ftgmac100_rxdes_set_dma_addr(struct ftgmac100_rxdes *rxdes,
@@ -341,18 +348,27 @@ static bool ftgmac100_rxdes_ipcs_err(struct ftgmac100_rxdes *rxdes)
return rxdes->rxdes1 & cpu_to_le32(FTGMAC100_RXDES1_IP_CHKSUM_ERR);
}
+static inline struct page **ftgmac100_rxdes_page_slot(struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes)
+{
+ return &priv->rx_pages[rxdes - priv->descs->rxdes];
+}
+
/*
* rxdes2 is not used by hardware. We use it to keep track of page.
* Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu().
*/
-static void ftgmac100_rxdes_set_page(struct ftgmac100_rxdes *rxdes, struct page *page)
+static void ftgmac100_rxdes_set_page(struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes,
+ struct page *page)
{
- rxdes->rxdes2 = (unsigned int)page;
+ *ftgmac100_rxdes_page_slot(priv, rxdes) = page;
}
-static struct page *ftgmac100_rxdes_get_page(struct ftgmac100_rxdes *rxdes)
+static struct page *ftgmac100_rxdes_get_page(struct ftgmac100 *priv,
+ struct ftgmac100_rxdes *rxdes)
{
- return (struct page *)rxdes->rxdes2;
+ return *ftgmac100_rxdes_page_slot(priv, rxdes);
}
/******************************************************************************
@@ -382,7 +398,7 @@ ftgmac100_rx_locate_first_segment(struct ftgmac100 *priv)
if (ftgmac100_rxdes_first_segment(rxdes))
return rxdes;
- ftgmac100_rxdes_set_dma_own(rxdes);
+ ftgmac100_rxdes_set_dma_own(priv, rxdes);
ftgmac100_rx_pointer_advance(priv);
rxdes = ftgmac100_current_rxdes(priv);
}
@@ -453,7 +469,7 @@ static void ftgmac100_rx_drop_packet(struct ftgmac100 *priv)
if (ftgmac100_rxdes_last_segment(rxdes))
done = true;
- ftgmac100_rxdes_set_dma_own(rxdes);
+ ftgmac100_rxdes_set_dma_own(priv, rxdes);
ftgmac100_rx_pointer_advance(priv);
rxdes = ftgmac100_current_rxdes(priv);
} while (!done && ftgmac100_rxdes_packet_ready(rxdes));
@@ -501,7 +517,7 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
do {
dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
- struct page *page = ftgmac100_rxdes_get_page(rxdes);
+ struct page *page = ftgmac100_rxdes_get_page(priv, rxdes);
unsigned int size;
dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
@@ -545,10 +561,11 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
/******************************************************************************
* internal functions (transmit descriptor)
*****************************************************************************/
-static void ftgmac100_txdes_reset(struct ftgmac100_txdes *txdes)
+static void ftgmac100_txdes_reset(const struct ftgmac100 *priv,
+ struct ftgmac100_txdes *txdes)
{
/* clear all except end of ring bit */
- txdes->txdes0 &= cpu_to_le32(FTGMAC100_TXDES0_EDOTR);
+ txdes->txdes0 &= cpu_to_le32(priv->txdes0_edotr_mask);
txdes->txdes1 = 0;
txdes->txdes2 = 0;
txdes->txdes3 = 0;
@@ -569,9 +586,10 @@ static void ftgmac100_txdes_set_dma_own(struct ftgmac100_txdes *txdes)
txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_TXDMA_OWN);
}
-static void ftgmac100_txdes_set_end_of_ring(struct ftgmac100_txdes *txdes)
+static void ftgmac100_txdes_set_end_of_ring(const struct ftgmac100 *priv,
+ struct ftgmac100_txdes *txdes)
{
- txdes->txdes0 |= cpu_to_le32(FTGMAC100_TXDES0_EDOTR);
+ txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask);
}
static void ftgmac100_txdes_set_first_segment(struct ftgmac100_txdes *txdes)
@@ -690,7 +708,7 @@ static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
dev_kfree_skb(skb);
- ftgmac100_txdes_reset(txdes);
+ ftgmac100_txdes_reset(priv, txdes);
ftgmac100_tx_clean_pointer_advance(priv);
@@ -779,9 +797,9 @@ static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
return -ENOMEM;
}
- ftgmac100_rxdes_set_page(rxdes, page);
+ ftgmac100_rxdes_set_page(priv, rxdes, page);
ftgmac100_rxdes_set_dma_addr(rxdes, map);
- ftgmac100_rxdes_set_dma_own(rxdes);
+ ftgmac100_rxdes_set_dma_own(priv, rxdes);
return 0;
}
@@ -791,7 +809,7 @@ static void ftgmac100_free_buffers(struct ftgmac100 *priv)
for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
- struct page *page = ftgmac100_rxdes_get_page(rxdes);
+ struct page *page = ftgmac100_rxdes_get_page(priv, rxdes);
dma_addr_t map = ftgmac100_rxdes_get_dma_addr(rxdes);
if (!page)
@@ -828,7 +846,8 @@ static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
return -ENOMEM;
/* initialize RX ring */
- ftgmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
+ ftgmac100_rxdes_set_end_of_ring(priv,
+ &priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
struct ftgmac100_rxdes *rxdes = &priv->descs->rxdes[i];
@@ -838,7 +857,8 @@ static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
}
/* initialize TX ring */
- ftgmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
+ ftgmac100_txdes_set_end_of_ring(priv,
+ &priv->descs->txdes[TX_QUEUE_ENTRIES - 1]);
return 0;
err:
@@ -1055,14 +1075,12 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget)
}
if (status & priv->int_mask_all & (FTGMAC100_INT_NO_RXBUF |
- FTGMAC100_INT_RPKT_LOST | FTGMAC100_INT_AHB_ERR |
- FTGMAC100_INT_PHYSTS_CHG)) {
+ FTGMAC100_INT_RPKT_LOST | FTGMAC100_INT_AHB_ERR)) {
if (net_ratelimit())
- netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status,
+ netdev_info(netdev, "[ISR] = 0x%x: %s%s%s\n", status,
status & FTGMAC100_INT_NO_RXBUF ? "NO_RXBUF " : "",
status & FTGMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "",
- status & FTGMAC100_INT_AHB_ERR ? "AHB_ERR " : "",
- status & FTGMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : "");
+ status & FTGMAC100_INT_AHB_ERR ? "AHB_ERR " : "");
if (status & FTGMAC100_INT_NO_RXBUF) {
/* RX buffer unavailable */
@@ -1092,6 +1110,7 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget)
static int ftgmac100_open(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
+ unsigned int status;
int err;
err = ftgmac100_alloc_buffers(priv);
@@ -1117,6 +1136,11 @@ static int ftgmac100_open(struct net_device *netdev)
ftgmac100_init_hw(priv);
ftgmac100_start_hw(priv, priv->use_ncsi ? 100 : 10);
+
+ /* Clear stale interrupts */
+ status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
+ iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
+
if (netdev->phydev)
phy_start(netdev->phydev);
else if (priv->use_ncsi)
@@ -1226,12 +1250,21 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
struct ftgmac100 *priv = netdev_priv(netdev);
struct platform_device *pdev = to_platform_device(priv->dev);
int i, err = 0;
+ u32 reg;
/* initialize mdio bus */
priv->mii_bus = mdiobus_alloc();
if (!priv->mii_bus)
return -EIO;
+ if (of_machine_is_compatible("aspeed,ast2400") ||
+ of_machine_is_compatible("aspeed,ast2500")) {
+ /* This driver supports the old MDIO interface */
+ reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR);
+ reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE;
+ iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR);
+ };
+
priv->mii_bus->name = "ftgmac100_mdio";
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
pdev->name, pdev->id);
@@ -1355,9 +1388,18 @@ static int ftgmac100_probe(struct platform_device *pdev)
FTGMAC100_INT_XPKT_ETH |
FTGMAC100_INT_XPKT_LOST |
FTGMAC100_INT_AHB_ERR |
- FTGMAC100_INT_PHYSTS_CHG |
FTGMAC100_INT_RPKT_BUF |
FTGMAC100_INT_NO_RXBUF);
+
+ if (of_machine_is_compatible("aspeed,ast2400") ||
+ of_machine_is_compatible("aspeed,ast2500")) {
+ priv->rxdes0_edorr_mask = BIT(30);
+ priv->txdes0_edotr_mask = BIT(30);
+ } else {
+ priv->rxdes0_edorr_mask = BIT(15);
+ priv->txdes0_edotr_mask = BIT(15);
+ }
+
if (pdev->dev.of_node &&
of_get_property(pdev->dev.of_node, "use-ncsi", NULL)) {
if (!IS_ENABLED(CONFIG_NET_NCSI)) {
@@ -1367,7 +1409,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Using NCSI interface\n");
priv->use_ncsi = true;
- priv->int_mask_all &= ~FTGMAC100_INT_PHYSTS_CHG;
priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
if (!priv->ndev)
goto err_ncsi_dev;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
index 13408d448b05..a7ce0ac8858a 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.h
+++ b/drivers/net/ethernet/faraday/ftgmac100.h
@@ -134,6 +134,11 @@
#define FTGMAC100_DMAFIFOS_TXDMA_REQ (1 << 31)
/*
+ * Feature Register
+ */
+#define FTGMAC100_REVR_NEW_MDIO_INTERFACE BIT(31)
+
+/*
* Receive buffer size register
*/
#define FTGMAC100_RBSR_SIZE(x) ((x) & 0x3fff)
@@ -152,6 +157,7 @@
#define FTGMAC100_MACCR_FULLDUP (1 << 8)
#define FTGMAC100_MACCR_GIGA_MODE (1 << 9)
#define FTGMAC100_MACCR_CRC_APD (1 << 10)
+#define FTGMAC100_MACCR_PHY_LINK_LEVEL (1 << 11)
#define FTGMAC100_MACCR_RX_RUNT (1 << 12)
#define FTGMAC100_MACCR_JUMBO_LF (1 << 13)
#define FTGMAC100_MACCR_RX_ALL (1 << 14)
@@ -189,7 +195,6 @@ struct ftgmac100_txdes {
} __attribute__ ((aligned(16)));
#define FTGMAC100_TXDES0_TXBUF_SIZE(x) ((x) & 0x3fff)
-#define FTGMAC100_TXDES0_EDOTR (1 << 15)
#define FTGMAC100_TXDES0_CRC_ERR (1 << 19)
#define FTGMAC100_TXDES0_LTS (1 << 28)
#define FTGMAC100_TXDES0_FTS (1 << 29)
@@ -215,7 +220,6 @@ struct ftgmac100_rxdes {
} __attribute__ ((aligned(16)));
#define FTGMAC100_RXDES0_VDBC 0x3fff
-#define FTGMAC100_RXDES0_EDORR (1 << 15)
#define FTGMAC100_RXDES0_MULTICAST (1 << 16)
#define FTGMAC100_RXDES0_BROADCAST (1 << 17)
#define FTGMAC100_RXDES0_RX_ERR (1 << 18)
diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
index 8ddeedbcef9c..ddf0260176c9 100644
--- a/drivers/net/ethernet/freescale/fman/fman_mac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_mac.h
@@ -192,7 +192,7 @@ struct fman_mac_params {
/* A handle to the FM object this port related to */
void *fm;
/* MDIO exceptions interrupt source - not valid for all
- * MACs; MUST be set to 'NO_IRQ' for MACs that don't have
+ * MACs; MUST be set to 0 for MACs that don't have
* mdio-irq, or for polling
*/
void *dev_id; /* device cookie used by the exception cbs */
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 61fd486c50bb..dc120c148d97 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -60,6 +60,9 @@ module_param(fs_enet_debug, int, 0);
MODULE_PARM_DESC(fs_enet_debug,
"Freescale bitmapped debugging message enable value");
+#define RX_RING_SIZE 32
+#define TX_RING_SIZE 64
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void fs_enet_netpoll(struct net_device *dev);
#endif
@@ -79,8 +82,8 @@ static void skb_align(struct sk_buff *skb, int align)
skb_reserve(skb, align - off);
}
-/* NAPI receive function */
-static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
+/* NAPI function */
+static int fs_enet_napi(struct napi_struct *napi, int budget)
{
struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
struct net_device *dev = fep->ndev;
@@ -90,9 +93,102 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
int received = 0;
u16 pkt_len, sc;
int curidx;
+ int dirtyidx, do_wake, do_restart;
+ int tx_left = TX_RING_SIZE;
- if (budget <= 0)
- return received;
+ spin_lock(&fep->tx_lock);
+ bdp = fep->dirty_tx;
+
+ /* clear status bits for napi*/
+ (*fep->ops->napi_clear_event)(dev);
+
+ do_wake = do_restart = 0;
+ while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0 && tx_left) {
+ dirtyidx = bdp - fep->tx_bd_base;
+
+ if (fep->tx_free == fep->tx_ring)
+ break;
+
+ skb = fep->tx_skbuff[dirtyidx];
+
+ /*
+ * Check for errors.
+ */
+ if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
+ BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
+
+ if (sc & BD_ENET_TX_HB) /* No heartbeat */
+ fep->stats.tx_heartbeat_errors++;
+ if (sc & BD_ENET_TX_LC) /* Late collision */
+ fep->stats.tx_window_errors++;
+ if (sc & BD_ENET_TX_RL) /* Retrans limit */
+ fep->stats.tx_aborted_errors++;
+ if (sc & BD_ENET_TX_UN) /* Underrun */
+ fep->stats.tx_fifo_errors++;
+ if (sc & BD_ENET_TX_CSL) /* Carrier lost */
+ fep->stats.tx_carrier_errors++;
+
+ if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
+ fep->stats.tx_errors++;
+ do_restart = 1;
+ }
+ } else
+ fep->stats.tx_packets++;
+
+ if (sc & BD_ENET_TX_READY) {
+ dev_warn(fep->dev,
+ "HEY! Enet xmit interrupt and TX_READY.\n");
+ }
+
+ /*
+ * Deferred means some collisions occurred during transmit,
+ * but we eventually sent the packet OK.
+ */
+ if (sc & BD_ENET_TX_DEF)
+ fep->stats.collisions++;
+
+ /* unmap */
+ if (fep->mapped_as_page[dirtyidx])
+ dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
+ CBDR_DATLEN(bdp), DMA_TO_DEVICE);
+ else
+ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
+ CBDR_DATLEN(bdp), DMA_TO_DEVICE);
+
+ /*
+ * Free the sk buffer associated with this last transmit.
+ */
+ if (skb) {
+ dev_kfree_skb(skb);
+ fep->tx_skbuff[dirtyidx] = NULL;
+ }
+
+ /*
+ * Update pointer to next buffer descriptor to be transmitted.
+ */
+ if ((sc & BD_ENET_TX_WRAP) == 0)
+ bdp++;
+ else
+ bdp = fep->tx_bd_base;
+
+ /*
+ * Since we have freed up a buffer, the ring is no longer
+ * full.
+ */
+ if (++fep->tx_free == MAX_SKB_FRAGS)
+ do_wake = 1;
+ tx_left--;
+ }
+
+ fep->dirty_tx = bdp;
+
+ if (do_restart)
+ (*fep->ops->tx_restart)(dev);
+
+ spin_unlock(&fep->tx_lock);
+
+ if (do_wake)
+ netif_wake_queue(dev);
/*
* First, grab all of the stats for the incoming packet.
@@ -100,10 +196,8 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
*/
bdp = fep->cur_rx;
- /* clear RX status bits for napi*/
- (*fep->ops->napi_clear_rx_event)(dev);
-
- while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
+ while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0 &&
+ received < budget) {
curidx = bdp - fep->rx_bd_base;
/*
@@ -132,21 +226,10 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
if (sc & BD_ENET_RX_OV)
fep->stats.rx_crc_errors++;
- skb = fep->rx_skbuff[curidx];
-
- dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE);
-
- skbn = skb;
-
+ skbn = fep->rx_skbuff[curidx];
} else {
skb = fep->rx_skbuff[curidx];
- dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE);
-
/*
* Process the incoming frame.
*/
@@ -162,12 +245,30 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
skb_copy_from_linear_data(skb,
skbn->data, pkt_len);
swap(skb, skbn);
+ dma_sync_single_for_cpu(fep->dev,
+ CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(pkt_len),
+ DMA_FROM_DEVICE);
}
} else {
skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
- if (skbn)
+ if (skbn) {
+ dma_addr_t dma;
+
skb_align(skbn, ENET_RX_ALIGN);
+
+ dma_unmap_single(fep->dev,
+ CBDR_BUFADDR(bdp),
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+
+ dma = dma_map_single(fep->dev,
+ skbn->data,
+ L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
+ DMA_FROM_DEVICE);
+ CBDW_BUFADDR(bdp, dma);
+ }
}
if (skbn != NULL) {
@@ -182,9 +283,6 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
}
fep->rx_skbuff[curidx] = skbn;
- CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE));
CBDW_DATLEN(bdp, 0);
CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
@@ -197,134 +295,19 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
bdp = fep->rx_bd_base;
(*fep->ops->rx_bd_done)(dev);
-
- if (received >= budget)
- break;
}
fep->cur_rx = bdp;
- if (received < budget) {
+ if (received < budget && tx_left) {
/* done */
napi_complete(napi);
- (*fep->ops->napi_enable_rx)(dev);
- }
- return received;
-}
-
-static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
-{
- struct fs_enet_private *fep = container_of(napi, struct fs_enet_private,
- napi_tx);
- struct net_device *dev = fep->ndev;
- cbd_t __iomem *bdp;
- struct sk_buff *skb;
- int dirtyidx, do_wake, do_restart;
- u16 sc;
- int has_tx_work = 0;
-
- spin_lock(&fep->tx_lock);
- bdp = fep->dirty_tx;
-
- /* clear TX status bits for napi*/
- (*fep->ops->napi_clear_tx_event)(dev);
-
- do_wake = do_restart = 0;
- while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
- dirtyidx = bdp - fep->tx_bd_base;
-
- if (fep->tx_free == fep->tx_ring)
- break;
-
- skb = fep->tx_skbuff[dirtyidx];
-
- /*
- * Check for errors.
- */
- if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
- BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
-
- if (sc & BD_ENET_TX_HB) /* No heartbeat */
- fep->stats.tx_heartbeat_errors++;
- if (sc & BD_ENET_TX_LC) /* Late collision */
- fep->stats.tx_window_errors++;
- if (sc & BD_ENET_TX_RL) /* Retrans limit */
- fep->stats.tx_aborted_errors++;
- if (sc & BD_ENET_TX_UN) /* Underrun */
- fep->stats.tx_fifo_errors++;
- if (sc & BD_ENET_TX_CSL) /* Carrier lost */
- fep->stats.tx_carrier_errors++;
-
- if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
- fep->stats.tx_errors++;
- do_restart = 1;
- }
- } else
- fep->stats.tx_packets++;
-
- if (sc & BD_ENET_TX_READY) {
- dev_warn(fep->dev,
- "HEY! Enet xmit interrupt and TX_READY.\n");
- }
-
- /*
- * Deferred means some collisions occurred during transmit,
- * but we eventually sent the packet OK.
- */
- if (sc & BD_ENET_TX_DEF)
- fep->stats.collisions++;
-
- /* unmap */
- if (fep->mapped_as_page[dirtyidx])
- dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
- CBDR_DATLEN(bdp), DMA_TO_DEVICE);
- else
- dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
- CBDR_DATLEN(bdp), DMA_TO_DEVICE);
-
- /*
- * Free the sk buffer associated with this last transmit.
- */
- if (skb) {
- dev_kfree_skb(skb);
- fep->tx_skbuff[dirtyidx] = NULL;
- }
-
- /*
- * Update pointer to next buffer descriptor to be transmitted.
- */
- if ((sc & BD_ENET_TX_WRAP) == 0)
- bdp++;
- else
- bdp = fep->tx_bd_base;
-
- /*
- * Since we have freed up a buffer, the ring is no longer
- * full.
- */
- if (++fep->tx_free >= MAX_SKB_FRAGS)
- do_wake = 1;
- has_tx_work = 1;
- }
-
- fep->dirty_tx = bdp;
-
- if (do_restart)
- (*fep->ops->tx_restart)(dev);
+ (*fep->ops->napi_enable)(dev);
- if (!has_tx_work) {
- napi_complete(napi);
- (*fep->ops->napi_enable_tx)(dev);
+ return received;
}
- spin_unlock(&fep->tx_lock);
-
- if (do_wake)
- netif_wake_queue(dev);
-
- if (has_tx_work)
- return budget;
- return 0;
+ return budget;
}
/*
@@ -350,18 +333,18 @@ fs_enet_interrupt(int irq, void *dev_id)
nr++;
int_clr_events = int_events;
- int_clr_events &= ~fep->ev_napi_rx;
+ int_clr_events &= ~fep->ev_napi;
(*fep->ops->clear_int_events)(dev, int_clr_events);
if (int_events & fep->ev_err)
(*fep->ops->ev_error)(dev, int_events);
- if (int_events & fep->ev_rx) {
+ if (int_events & fep->ev) {
napi_ok = napi_schedule_prep(&fep->napi);
- (*fep->ops->napi_disable_rx)(dev);
- (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
+ (*fep->ops->napi_disable)(dev);
+ (*fep->ops->clear_int_events)(dev, fep->ev_napi);
/* NOTE: it is possible for FCCs in NAPI mode */
/* to submit a spurious interrupt while in poll */
@@ -369,17 +352,6 @@ fs_enet_interrupt(int irq, void *dev_id)
__napi_schedule(&fep->napi);
}
- if (int_events & fep->ev_tx) {
- napi_ok = napi_schedule_prep(&fep->napi_tx);
-
- (*fep->ops->napi_disable_tx)(dev);
- (*fep->ops->clear_int_events)(dev, fep->ev_napi_tx);
-
- /* NOTE: it is possible for FCCs in NAPI mode */
- /* to submit a spurious interrupt while in poll */
- if (napi_ok)
- __napi_schedule(&fep->napi_tx);
- }
}
handled = nr > 0;
@@ -659,7 +631,8 @@ static void fs_timeout(struct net_device *dev)
}
phy_start(dev->phydev);
- wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
+ wake = fep->tx_free >= MAX_SKB_FRAGS &&
+ !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
spin_unlock_irqrestore(&fep->lock, flags);
if (wake)
@@ -751,11 +724,10 @@ static int fs_enet_open(struct net_device *dev)
int err;
/* to initialize the fep->cur_rx,... */
- /* not doing this, will cause a crash in fs_enet_rx_napi */
+ /* not doing this, will cause a crash in fs_enet_napi */
fs_init_bds(fep->ndev);
napi_enable(&fep->napi);
- napi_enable(&fep->napi_tx);
/* Install our interrupt handler. */
r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
@@ -763,7 +735,6 @@ static int fs_enet_open(struct net_device *dev)
if (r != 0) {
dev_err(fep->dev, "Could not allocate FS_ENET IRQ!");
napi_disable(&fep->napi);
- napi_disable(&fep->napi_tx);
return -EINVAL;
}
@@ -771,7 +742,6 @@ static int fs_enet_open(struct net_device *dev)
if (err) {
free_irq(fep->interrupt, dev);
napi_disable(&fep->napi);
- napi_disable(&fep->napi_tx);
return err;
}
phy_start(dev->phydev);
@@ -789,7 +759,6 @@ static int fs_enet_close(struct net_device *dev)
netif_stop_queue(dev);
netif_carrier_off(dev);
napi_disable(&fep->napi);
- napi_disable(&fep->napi_tx);
phy_stop(dev->phydev);
spin_lock_irqsave(&fep->lock, flags);
@@ -861,6 +830,44 @@ static void fs_set_msglevel(struct net_device *dev, u32 value)
fep->msg_enable = value;
}
+static int fs_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fs_platform_info *fpi = fep->fpi;
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = fpi->rx_copybreak;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int fs_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, const void *data)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fs_platform_info *fpi = fep->fpi;
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ fpi->rx_copybreak = *(u32 *)data;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static const struct ethtool_ops fs_ethtool_ops = {
.get_drvinfo = fs_get_drvinfo,
.get_regs_len = fs_get_regs_len,
@@ -872,6 +879,8 @@ static const struct ethtool_ops fs_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_tunable = fs_get_tunable,
+ .set_tunable = fs_set_tunable,
};
static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -939,8 +948,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
fpi->cp_command = *data;
}
- fpi->rx_ring = 32;
- fpi->tx_ring = 64;
+ fpi->rx_ring = RX_RING_SIZE;
+ fpi->tx_ring = TX_RING_SIZE;
fpi->rx_copybreak = 240;
fpi->napi_weight = 17;
fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
@@ -1024,8 +1033,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
ndev->netdev_ops = &fs_enet_netdev_ops;
ndev->watchdog_timeo = 2 * HZ;
- netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight);
- netif_tx_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2);
+ netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight);
ndev->ethtool_ops = &fs_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index e29f54a35210..fee24c822fad 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -81,12 +81,9 @@ struct fs_ops {
void (*adjust_link)(struct net_device *dev);
void (*restart)(struct net_device *dev);
void (*stop)(struct net_device *dev);
- void (*napi_clear_rx_event)(struct net_device *dev);
- void (*napi_enable_rx)(struct net_device *dev);
- void (*napi_disable_rx)(struct net_device *dev);
- void (*napi_clear_tx_event)(struct net_device *dev);
- void (*napi_enable_tx)(struct net_device *dev);
- void (*napi_disable_tx)(struct net_device *dev);
+ void (*napi_clear_event)(struct net_device *dev);
+ void (*napi_enable)(struct net_device *dev);
+ void (*napi_disable)(struct net_device *dev);
void (*rx_bd_done)(struct net_device *dev);
void (*tx_kickstart)(struct net_device *dev);
u32 (*get_int_events)(struct net_device *dev);
@@ -122,7 +119,6 @@ struct phy_info {
struct fs_enet_private {
struct napi_struct napi;
- struct napi_struct napi_tx;
struct device *dev; /* pointer back to the device (must be initialized first) */
struct net_device *ndev;
spinlock_t lock; /* during all ops except TX pckt processing */
@@ -152,10 +148,8 @@ struct fs_enet_private {
int oldduplex, oldspeed, oldlink; /* current settings */
/* event masks */
- u32 ev_napi_rx; /* mask of NAPI rx events */
- u32 ev_napi_tx; /* mask of NAPI rx events */
- u32 ev_rx; /* rx event mask */
- u32 ev_tx; /* tx event mask */
+ u32 ev_napi; /* mask of NAPI events */
+ u32 ev; /* event mask */
u32 ev_err; /* error event mask */
u16 bd_rx_empty; /* mask of BD rx empty */
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index d71761a34022..120c758f5d01 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -90,7 +90,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
int ret = -EINVAL;
fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- if (fep->interrupt == NO_IRQ)
+ if (!fep->interrupt)
goto out;
fep->fcc.fccp = of_iomap(ofdev->dev.of_node, 0);
@@ -124,10 +124,8 @@ out:
return ret;
}
-#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
-#define FCC_NAPI_TX_EVENT_MSK (FCC_ENET_TXB)
-#define FCC_RX_EVENT (FCC_ENET_RXF)
-#define FCC_TX_EVENT (FCC_ENET_TXB)
+#define FCC_NAPI_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB | FCC_ENET_TXB)
+#define FCC_EVENT (FCC_ENET_RXF | FCC_ENET_TXB)
#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE)
static int setup_data(struct net_device *dev)
@@ -137,10 +135,8 @@ static int setup_data(struct net_device *dev)
if (do_pd_setup(fep) != 0)
return -EINVAL;
- fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK;
- fep->ev_napi_tx = FCC_NAPI_TX_EVENT_MSK;
- fep->ev_rx = FCC_RX_EVENT;
- fep->ev_tx = FCC_TX_EVENT;
+ fep->ev_napi = FCC_NAPI_EVENT_MSK;
+ fep->ev = FCC_EVENT;
fep->ev_err = FCC_ERR_EVENT_MSK;
return 0;
@@ -424,52 +420,28 @@ static void stop(struct net_device *dev)
fs_cleanup_bds(dev);
}
-static void napi_clear_rx_event(struct net_device *dev)
+static void napi_clear_event_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
- W16(fccp, fcc_fcce, FCC_NAPI_RX_EVENT_MSK);
+ W16(fccp, fcc_fcce, FCC_NAPI_EVENT_MSK);
}
-static void napi_enable_rx(struct net_device *dev)
+static void napi_enable_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
- S16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
+ S16(fccp, fcc_fccm, FCC_NAPI_EVENT_MSK);
}
-static void napi_disable_rx(struct net_device *dev)
+static void napi_disable_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
fcc_t __iomem *fccp = fep->fcc.fccp;
- C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
-}
-
-static void napi_clear_tx_event(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- fcc_t __iomem *fccp = fep->fcc.fccp;
-
- W16(fccp, fcc_fcce, FCC_NAPI_TX_EVENT_MSK);
-}
-
-static void napi_enable_tx(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- fcc_t __iomem *fccp = fep->fcc.fccp;
-
- S16(fccp, fcc_fccm, FCC_NAPI_TX_EVENT_MSK);
-}
-
-static void napi_disable_tx(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- fcc_t __iomem *fccp = fep->fcc.fccp;
-
- C16(fccp, fcc_fccm, FCC_NAPI_TX_EVENT_MSK);
+ C16(fccp, fcc_fccm, FCC_NAPI_EVENT_MSK);
}
static void rx_bd_done(struct net_device *dev)
@@ -595,12 +567,9 @@ const struct fs_ops fs_fcc_ops = {
.set_multicast_list = set_multicast_list,
.restart = restart,
.stop = stop,
- .napi_clear_rx_event = napi_clear_rx_event,
- .napi_enable_rx = napi_enable_rx,
- .napi_disable_rx = napi_disable_rx,
- .napi_clear_tx_event = napi_clear_tx_event,
- .napi_enable_tx = napi_enable_tx,
- .napi_disable_tx = napi_disable_tx,
+ .napi_clear_event = napi_clear_event_fs,
+ .napi_enable = napi_enable_fs,
+ .napi_disable = napi_disable_fs,
.rx_bd_done = rx_bd_done,
.tx_kickstart = tx_kickstart,
.get_int_events = get_int_events,
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index 35a318ed3a62..777beffa1e1e 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -99,7 +99,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
struct platform_device *ofdev = to_platform_device(fep->dev);
fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- if (fep->interrupt == NO_IRQ)
+ if (!fep->interrupt)
return -EINVAL;
fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
@@ -109,10 +109,8 @@ static int do_pd_setup(struct fs_enet_private *fep)
return 0;
}
-#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
-#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF)
-#define FEC_RX_EVENT (FEC_ENET_RXF)
-#define FEC_TX_EVENT (FEC_ENET_TXF)
+#define FEC_NAPI_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_TXF)
+#define FEC_EVENT (FEC_ENET_RXF | FEC_ENET_TXF)
#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
FEC_ENET_BABT | FEC_ENET_EBERR)
@@ -126,10 +124,8 @@ static int setup_data(struct net_device *dev)
fep->fec.hthi = 0;
fep->fec.htlo = 0;
- fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK;
- fep->ev_napi_tx = FEC_NAPI_TX_EVENT_MSK;
- fep->ev_rx = FEC_RX_EVENT;
- fep->ev_tx = FEC_TX_EVENT;
+ fep->ev_napi = FEC_NAPI_EVENT_MSK;
+ fep->ev = FEC_EVENT;
fep->ev_err = FEC_ERR_EVENT_MSK;
return 0;
@@ -396,52 +392,28 @@ static void stop(struct net_device *dev)
}
}
-static void napi_clear_rx_event(struct net_device *dev)
+static void napi_clear_event_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
- FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK);
+ FW(fecp, ievent, FEC_NAPI_EVENT_MSK);
}
-static void napi_enable_rx(struct net_device *dev)
+static void napi_enable_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
- FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
+ FS(fecp, imask, FEC_NAPI_EVENT_MSK);
}
-static void napi_disable_rx(struct net_device *dev)
+static void napi_disable_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
struct fec __iomem *fecp = fep->fec.fecp;
- FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
-}
-
-static void napi_clear_tx_event(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct fec __iomem *fecp = fep->fec.fecp;
-
- FW(fecp, ievent, FEC_NAPI_TX_EVENT_MSK);
-}
-
-static void napi_enable_tx(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct fec __iomem *fecp = fep->fec.fecp;
-
- FS(fecp, imask, FEC_NAPI_TX_EVENT_MSK);
-}
-
-static void napi_disable_tx(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- struct fec __iomem *fecp = fep->fec.fecp;
-
- FC(fecp, imask, FEC_NAPI_TX_EVENT_MSK);
+ FC(fecp, imask, FEC_NAPI_EVENT_MSK);
}
static void rx_bd_done(struct net_device *dev)
@@ -513,12 +485,9 @@ const struct fs_ops fs_fec_ops = {
.set_multicast_list = set_multicast_list,
.restart = restart,
.stop = stop,
- .napi_clear_rx_event = napi_clear_rx_event,
- .napi_enable_rx = napi_enable_rx,
- .napi_disable_rx = napi_disable_rx,
- .napi_clear_tx_event = napi_clear_tx_event,
- .napi_enable_tx = napi_enable_tx,
- .napi_disable_tx = napi_disable_tx,
+ .napi_clear_event = napi_clear_event_fs,
+ .napi_enable = napi_enable_fs,
+ .napi_disable = napi_disable_fs,
.rx_bd_done = rx_bd_done,
.tx_kickstart = tx_kickstart,
.get_int_events = get_int_events,
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index e8b9c33d35b4..15abd37d70e3 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -99,7 +99,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
struct platform_device *ofdev = to_platform_device(fep->dev);
fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- if (fep->interrupt == NO_IRQ)
+ if (!fep->interrupt)
return -EINVAL;
fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0);
@@ -115,10 +115,8 @@ static int do_pd_setup(struct fs_enet_private *fep)
return 0;
}
-#define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB)
-#define SCC_NAPI_TX_EVENT_MSK (SCCE_ENET_TXB)
-#define SCC_RX_EVENT (SCCE_ENET_RXF)
-#define SCC_TX_EVENT (SCCE_ENET_TXB)
+#define SCC_NAPI_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB | SCCE_ENET_TXB)
+#define SCC_EVENT (SCCE_ENET_RXF | SCCE_ENET_TXB)
#define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
static int setup_data(struct net_device *dev)
@@ -130,10 +128,8 @@ static int setup_data(struct net_device *dev)
fep->scc.hthi = 0;
fep->scc.htlo = 0;
- fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK;
- fep->ev_napi_tx = SCC_NAPI_TX_EVENT_MSK;
- fep->ev_rx = SCC_RX_EVENT;
- fep->ev_tx = SCC_TX_EVENT | SCCE_ENET_TXE;
+ fep->ev_napi = SCC_NAPI_EVENT_MSK;
+ fep->ev = SCC_EVENT | SCCE_ENET_TXE;
fep->ev_err = SCC_ERR_EVENT_MSK;
return 0;
@@ -379,52 +375,28 @@ static void stop(struct net_device *dev)
fs_cleanup_bds(dev);
}
-static void napi_clear_rx_event(struct net_device *dev)
+static void napi_clear_event_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
- W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK);
+ W16(sccp, scc_scce, SCC_NAPI_EVENT_MSK);
}
-static void napi_enable_rx(struct net_device *dev)
+static void napi_enable_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
- S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
+ S16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
}
-static void napi_disable_rx(struct net_device *dev)
+static void napi_disable_fs(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
scc_t __iomem *sccp = fep->scc.sccp;
- C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
-}
-
-static void napi_clear_tx_event(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- scc_t __iomem *sccp = fep->scc.sccp;
-
- W16(sccp, scc_scce, SCC_NAPI_TX_EVENT_MSK);
-}
-
-static void napi_enable_tx(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- scc_t __iomem *sccp = fep->scc.sccp;
-
- S16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK);
-}
-
-static void napi_disable_tx(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
- scc_t __iomem *sccp = fep->scc.sccp;
-
- C16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK);
+ C16(sccp, scc_sccm, SCC_NAPI_EVENT_MSK);
}
static void rx_bd_done(struct net_device *dev)
@@ -497,12 +469,9 @@ const struct fs_ops fs_scc_ops = {
.set_multicast_list = set_multicast_list,
.restart = restart,
.stop = stop,
- .napi_clear_rx_event = napi_clear_rx_event,
- .napi_enable_rx = napi_enable_rx,
- .napi_disable_rx = napi_disable_rx,
- .napi_clear_tx_event = napi_clear_tx_event,
- .napi_enable_tx = napi_enable_tx,
- .napi_disable_tx = napi_disable_tx,
+ .napi_clear_event = napi_clear_event_fs,
+ .napi_enable = napi_enable_fs,
+ .napi_disable = napi_disable_fs,
.rx_bd_done = rx_bd_done,
.tx_kickstart = tx_kickstart,
.get_int_events = get_int_events,
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index f3c63dce1e30..446c7b374ff5 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -195,7 +195,7 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
return 0;
}
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+#if IS_ENABLED(CONFIG_GIANFAR)
/*
* Return the TBIPA address, starting from the address
* of the mapped GFAR MDIO registers (struct gfar)
@@ -228,7 +228,7 @@ static uint32_t __iomem *get_etsec_tbipa(void __iomem *p)
}
#endif
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+#if IS_ENABLED(CONFIG_UCC_GETH)
/*
* Return the TBIPAR address for a QE MDIO node, starting from the address
* of the mapped MII registers (struct fsl_pq_mii)
@@ -306,7 +306,7 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end)
#endif
static const struct of_device_id fsl_pq_mdio_match[] = {
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
+#if IS_ENABLED(CONFIG_GIANFAR)
{
.compatible = "fsl,gianfar-tbi",
.data = &(struct fsl_pq_mdio_data) {
@@ -344,7 +344,7 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
},
},
#endif
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
+#if IS_ENABLED(CONFIG_UCC_GETH)
{
.compatible = "fsl,ucc-mdio",
.data = &(struct fsl_pq_mdio_data) {
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index a90ab40e55af..415ffa1509d5 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -761,7 +761,7 @@ static const struct ethtool_ops hip04_ethtool_ops = {
.get_drvinfo = hip04_get_drvinfo,
};
-static struct net_device_ops hip04_netdev_ops = {
+static const struct net_device_ops hip04_netdev_ops = {
.ndo_open = hip04_mac_open,
.ndo_stop = hip04_mac_stop,
.ndo_get_stats = hip04_get_stats,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index d7e1f8c7ae92..059aaeda46b1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -994,10 +994,10 @@ static void hns_nic_adjust_link(struct net_device *ndev)
struct hnae_handle *h = priv->ae_handle;
int state = 1;
- if (priv->phy) {
+ if (ndev->phydev) {
h->dev->ops->adjust_link(h, ndev->phydev->speed,
ndev->phydev->duplex);
- state = priv->phy->link;
+ state = ndev->phydev->link;
}
state = state && h->dev->ops->get_status(h);
@@ -1022,7 +1022,6 @@ static void hns_nic_adjust_link(struct net_device *ndev)
*/
int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
{
- struct hns_nic_priv *priv = netdev_priv(ndev);
struct phy_device *phy_dev = h->phy_dev;
int ret;
@@ -1046,8 +1045,6 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
phy_dev->autoneg = false;
- priv->phy = phy_dev;
-
return 0;
}
@@ -1224,8 +1221,8 @@ static int hns_nic_net_up(struct net_device *ndev)
if (ret)
goto out_start_err;
- if (priv->phy)
- phy_start(priv->phy);
+ if (ndev->phydev)
+ phy_start(ndev->phydev);
clear_bit(NIC_STATE_DOWN, &priv->state);
(void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
@@ -1259,8 +1256,8 @@ static void hns_nic_net_down(struct net_device *ndev)
netif_tx_disable(ndev);
priv->link = 0;
- if (priv->phy)
- phy_stop(priv->phy);
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
ops = priv->ae_handle->dev->ops;
@@ -1359,8 +1356,7 @@ static void hns_nic_net_timeout(struct net_device *ndev)
static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd)
{
- struct hns_nic_priv *priv = netdev_priv(netdev);
- struct phy_device *phy_dev = priv->phy;
+ struct phy_device *phy_dev = netdev->phydev;
if (!netif_running(netdev))
return -EINVAL;
@@ -2017,9 +2013,8 @@ static int hns_nic_dev_remove(struct platform_device *pdev)
hns_nic_uninit_ring_data(priv);
priv->ring_data = NULL;
- if (priv->phy)
- phy_disconnect(priv->phy);
- priv->phy = NULL;
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
if (!IS_ERR_OR_NULL(priv->ae_handle))
hnae_put_handle(priv->ae_handle);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
index 44bb3015eed3..5b412de350aa 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -59,7 +59,6 @@ struct hns_nic_priv {
u32 port_id;
int phy_mode;
int phy_led_val;
- struct phy_device *phy;
struct net_device *netdev;
struct device *dev;
struct hnae_handle *ae_handle;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 5eb3245bdf86..47e59bbfd061 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -48,9 +48,9 @@ static u32 hns_nic_get_link(struct net_device *net_dev)
h = priv->ae_handle;
- if (priv->phy) {
- if (!genphy_read_status(priv->phy))
- link_stat = priv->phy->link;
+ if (net_dev->phydev) {
+ if (!genphy_read_status(net_dev->phydev))
+ link_stat = net_dev->phydev->link;
else
link_stat = 0;
}
@@ -64,15 +64,14 @@ static u32 hns_nic_get_link(struct net_device *net_dev)
}
static void hns_get_mdix_mode(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+ struct ethtool_link_ksettings *cmd)
{
int mdix_ctrl, mdix, retval, is_resolved;
- struct hns_nic_priv *priv = netdev_priv(net_dev);
- struct phy_device *phy_dev = priv->phy;
+ struct phy_device *phy_dev = net_dev->phydev;
if (!phy_dev || !phy_dev->mdio.bus) {
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
- cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
return;
}
@@ -89,35 +88,35 @@ static void hns_get_mdix_mode(struct net_device *net_dev,
switch (mdix_ctrl) {
case 0x0:
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI;
break;
case 0x1:
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_X;
break;
case 0x3:
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
break;
default:
- cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
break;
}
if (!is_resolved)
- cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
else if (mdix)
- cmd->eth_tp_mdix = ETH_TP_MDI_X;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
else
- cmd->eth_tp_mdix = ETH_TP_MDI;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI;
}
/**
- *hns_nic_get_settings - implement ethtool get settings
+ *hns_nic_get_link_ksettings - implement ethtool get link ksettings
*@net_dev: net_device
- *@cmd: ethtool_cmd
+ *@cmd: ethtool_link_ksettings
*retuen 0 - success , negative --fail
*/
-static int hns_nic_get_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+static int hns_nic_get_link_ksettings(struct net_device *net_dev,
+ struct ethtool_link_ksettings *cmd)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_handle *h;
@@ -125,6 +124,7 @@ static int hns_nic_get_settings(struct net_device *net_dev,
int ret;
u8 duplex;
u16 speed;
+ u32 supported, advertising;
if (!priv || !priv->ae_handle)
return -ESRCH;
@@ -139,38 +139,43 @@ static int hns_nic_get_settings(struct net_device *net_dev,
return -EINVAL;
}
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
/* When there is no phy, autoneg is off. */
- cmd->autoneg = false;
- ethtool_cmd_speed_set(cmd, speed);
- cmd->duplex = duplex;
+ cmd->base.autoneg = false;
+ cmd->base.cmd = speed;
+ cmd->base.duplex = duplex;
- if (priv->phy)
- (void)phy_ethtool_gset(priv->phy, cmd);
+ if (net_dev->phydev)
+ (void)phy_ethtool_ksettings_get(net_dev->phydev, cmd);
link_stat = hns_nic_get_link(net_dev);
if (!link_stat) {
- ethtool_cmd_speed_set(cmd, (u32)SPEED_UNKNOWN);
- cmd->duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = (u32)SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
}
- if (cmd->autoneg)
- cmd->advertising |= ADVERTISED_Autoneg;
+ if (cmd->base.autoneg)
+ advertising |= ADVERTISED_Autoneg;
- cmd->supported |= h->if_support;
+ supported |= h->if_support;
if (h->phy_if == PHY_INTERFACE_MODE_SGMII) {
- cmd->supported |= SUPPORTED_TP;
- cmd->advertising |= ADVERTISED_1000baseT_Full;
+ supported |= SUPPORTED_TP;
+ advertising |= ADVERTISED_1000baseT_Full;
} else if (h->phy_if == PHY_INTERFACE_MODE_XGMII) {
- cmd->supported |= SUPPORTED_FIBRE;
- cmd->advertising |= ADVERTISED_10000baseKR_Full;
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_10000baseKR_Full;
}
switch (h->media_type) {
case HNAE_MEDIA_TYPE_FIBER:
- cmd->port = PORT_FIBRE;
+ cmd->base.port = PORT_FIBRE;
break;
case HNAE_MEDIA_TYPE_COPPER:
- cmd->port = PORT_TP;
+ cmd->base.port = PORT_TP;
break;
case HNAE_MEDIA_TYPE_UNKNOWN:
default:
@@ -178,23 +183,27 @@ static int hns_nic_get_settings(struct net_device *net_dev,
}
if (!(AE_IS_VER1(priv->enet_ver) && h->port_type == HNAE_PORT_DEBUG))
- cmd->supported |= SUPPORTED_Pause;
+ supported |= SUPPORTED_Pause;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->mdio_support = (ETH_MDIO_SUPPORTS_C45 | ETH_MDIO_SUPPORTS_C22);
+ cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C45 | ETH_MDIO_SUPPORTS_C22;
hns_get_mdix_mode(net_dev, cmd);
return 0;
}
/**
- *hns_nic_set_settings - implement ethtool set settings
+ *hns_nic_set_link_settings - implement ethtool set link ksettings
*@net_dev: net_device
- *@cmd: ethtool_cmd
+ *@cmd: ethtool_link_ksettings
*retuen 0 - success , negative --fail
*/
-static int hns_nic_set_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+static int hns_nic_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_handle *h;
@@ -208,24 +217,25 @@ static int hns_nic_set_settings(struct net_device *net_dev,
return -ENODEV;
h = priv->ae_handle;
- speed = ethtool_cmd_speed(cmd);
+ speed = cmd->base.speed;
if (h->phy_if == PHY_INTERFACE_MODE_XGMII) {
- if (cmd->autoneg == AUTONEG_ENABLE || speed != SPEED_10000 ||
- cmd->duplex != DUPLEX_FULL)
+ if (cmd->base.autoneg == AUTONEG_ENABLE ||
+ speed != SPEED_10000 ||
+ cmd->base.duplex != DUPLEX_FULL)
return -EINVAL;
} else if (h->phy_if == PHY_INTERFACE_MODE_SGMII) {
- if (!priv->phy && cmd->autoneg == AUTONEG_ENABLE)
+ if (!net_dev->phydev && cmd->base.autoneg == AUTONEG_ENABLE)
return -EINVAL;
- if (speed == SPEED_1000 && cmd->duplex == DUPLEX_HALF)
+ if (speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
return -EINVAL;
- if (priv->phy)
- return phy_ethtool_sset(priv->phy, cmd);
+ if (net_dev->phydev)
+ return phy_ethtool_ksettings_set(net_dev->phydev, cmd);
if ((speed != SPEED_10 && speed != SPEED_100 &&
- speed != SPEED_1000) || (cmd->duplex != DUPLEX_HALF &&
- cmd->duplex != DUPLEX_FULL))
+ speed != SPEED_1000) || (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL))
return -EINVAL;
} else {
netdev_err(net_dev, "Not supported!");
@@ -233,7 +243,7 @@ static int hns_nic_set_settings(struct net_device *net_dev,
}
if (h->dev->ops->adjust_link) {
- h->dev->ops->adjust_link(h, (int)speed, cmd->duplex);
+ h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex);
return 0;
}
@@ -305,7 +315,7 @@ static int __lb_setup(struct net_device *ndev,
{
int ret = 0;
struct hns_nic_priv *priv = netdev_priv(ndev);
- struct phy_device *phy_dev = priv->phy;
+ struct phy_device *phy_dev = ndev->phydev;
struct hnae_handle *h = priv->ae_handle;
switch (loop) {
@@ -910,7 +920,7 @@ void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_SERDES],
ETH_GSTRING_LEN);
buff += ETH_GSTRING_LEN;
- if ((priv->phy) && (!priv->phy->is_c45))
+ if ((netdev->phydev) && (!netdev->phydev->is_c45))
memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_PHY],
ETH_GSTRING_LEN);
@@ -996,7 +1006,7 @@ int hns_get_sset_count(struct net_device *netdev, int stringset)
if (priv->ae_handle->phy_if == PHY_INTERFACE_MODE_XGMII)
cnt--;
- if ((!priv->phy) || (priv->phy->is_c45))
+ if ((!netdev->phydev) || (netdev->phydev->is_c45))
cnt--;
return cnt;
@@ -1015,8 +1025,7 @@ int hns_get_sset_count(struct net_device *netdev, int stringset)
int hns_phy_led_set(struct net_device *netdev, int value)
{
int retval;
- struct hns_nic_priv *priv = netdev_priv(netdev);
- struct phy_device *phy_dev = priv->phy;
+ struct phy_device *phy_dev = netdev->phydev;
retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED);
retval |= phy_write(phy_dev, HNS_LED_FC_REG, value);
@@ -1039,7 +1048,7 @@ int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
- struct phy_device *phy_dev = priv->phy;
+ struct phy_device *phy_dev = netdev->phydev;
int ret;
if (phy_dev)
@@ -1159,8 +1168,7 @@ static int hns_get_regs_len(struct net_device *net_dev)
static int hns_nic_nway_reset(struct net_device *netdev)
{
int ret = 0;
- struct hns_nic_priv *priv = netdev_priv(netdev);
- struct phy_device *phy = priv->phy;
+ struct phy_device *phy = netdev->phydev;
if (netif_running(netdev)) {
if (phy)
@@ -1267,8 +1275,6 @@ static int hns_get_rxnfc(struct net_device *netdev,
static const struct ethtool_ops hns_ethtool_ops = {
.get_drvinfo = hns_nic_get_drvinfo,
.get_link = hns_nic_get_link,
- .get_settings = hns_nic_get_settings,
- .set_settings = hns_nic_set_settings,
.get_ringparam = hns_get_ringparam,
.get_pauseparam = hns_get_pauseparam,
.set_pauseparam = hns_set_pauseparam,
@@ -1288,6 +1294,8 @@ static const struct ethtool_ops hns_ethtool_ops = {
.get_rxfh = hns_get_rss,
.set_rxfh = hns_set_rss,
.get_rxnfc = hns_get_rxnfc,
+ .get_link_ksettings = hns_nic_get_link_ksettings,
+ .set_link_ksettings = hns_nic_set_link_ksettings,
};
void hns_ethtool_set_ops(struct net_device *ndev)
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index befb4ac3e2b0..ce235b776793 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -89,10 +89,10 @@ static char version[] __initdata =
#define DEB(x,y) if (i596_debug & (x)) y
-#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_MVME16x_NET_MODULE)
+#if IS_ENABLED(CONFIG_MVME16x_NET)
#define ENABLE_MVME16x_NET
#endif
-#if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE)
+#if IS_ENABLED(CONFIG_BVME6000_NET)
#define ENABLE_BVME6000_NET
#endif
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 4c9771d57d6e..8f139197f1aa 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -977,7 +977,37 @@ static void emac_set_multicast_list(struct net_device *ndev)
dev->mcast_pending = 1;
return;
}
+
+ mutex_lock(&dev->link_lock);
__emac_set_multicast_list(dev);
+ mutex_unlock(&dev->link_lock);
+}
+
+static int emac_set_mac_address(struct net_device *ndev, void *sa)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ struct sockaddr *addr = sa;
+ struct emac_regs __iomem *p = dev->emacp;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ mutex_lock(&dev->link_lock);
+
+ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+
+ emac_rx_disable(dev);
+ emac_tx_disable(dev);
+ out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
+ out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
+ (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
+ ndev->dev_addr[5]);
+ emac_tx_enable(dev);
+ emac_rx_enable(dev);
+
+ mutex_unlock(&dev->link_lock);
+
+ return 0;
}
static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
@@ -2686,7 +2716,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_do_ioctl = emac_ioctl,
.ndo_tx_timeout = emac_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = emac_set_mac_address,
.ndo_start_xmit = emac_start_xmit,
.ndo_change_mtu = eth_change_mtu,
};
@@ -2699,7 +2729,7 @@ static const struct net_device_ops emac_gige_netdev_ops = {
.ndo_do_ioctl = emac_ioctl,
.ndo_tx_timeout = emac_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = emac_set_mac_address,
.ndo_start_xmit = emac_start_xmit_sg,
.ndo_change_mtu = emac_change_mtu,
};
@@ -2750,7 +2780,7 @@ static int emac_probe(struct platform_device *ofdev)
/* Get interrupts. EMAC irq is mandatory, WOL irq is optional */
dev->emac_irq = irq_of_parse_and_map(np, 0);
dev->wol_irq = irq_of_parse_and_map(np, 1);
- if (dev->emac_irq == NO_IRQ) {
+ if (!dev->emac_irq) {
printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
goto err_free;
}
@@ -2913,9 +2943,9 @@ static int emac_probe(struct platform_device *ofdev)
err_reg_unmap:
iounmap(dev->emacp);
err_irq_unmap:
- if (dev->wol_irq != NO_IRQ)
+ if (dev->wol_irq)
irq_dispose_mapping(dev->wol_irq);
- if (dev->emac_irq != NO_IRQ)
+ if (dev->emac_irq)
irq_dispose_mapping(dev->emac_irq);
err_free:
free_netdev(ndev);
@@ -2957,9 +2987,9 @@ static int emac_remove(struct platform_device *ofdev)
emac_dbg_unregister(dev);
iounmap(dev->emacp);
- if (dev->wol_irq != NO_IRQ)
+ if (dev->wol_irq)
irq_dispose_mapping(dev->wol_irq);
- if (dev->emac_irq != NO_IRQ)
+ if (dev->emac_irq)
irq_dispose_mapping(dev->emac_irq);
free_netdev(dev->ndev);
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index fdb5cdb3cd15..aaf6fec566b5 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -597,9 +597,8 @@ static int mal_probe(struct platform_device *ofdev)
mal->rxde_irq = irq_of_parse_and_map(ofdev->dev.of_node, 4);
}
- if (mal->txeob_irq == NO_IRQ || mal->rxeob_irq == NO_IRQ ||
- mal->serr_irq == NO_IRQ || mal->txde_irq == NO_IRQ ||
- mal->rxde_irq == NO_IRQ) {
+ if (!mal->txeob_irq || !mal->rxeob_irq || !mal->serr_irq ||
+ !mal->txde_irq || !mal->rxde_irq) {
printk(KERN_ERR
"mal%d: failed to map interrupts !\n", index);
err = -ENODEV;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 62454d7a062a..bfe17d9c022d 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1424,7 +1424,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
scrq = adapter->tx_scrq[i];
scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
- if (scrq->irq == NO_IRQ) {
+ if (!scrq->irq) {
rc = -EINVAL;
dev_err(dev, "Error mapping irq\n");
goto req_tx_irq_failed;
@@ -1444,7 +1444,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
for (i = 0; i < adapter->req_rx_queues; i++) {
scrq = adapter->rx_scrq[i];
scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
- if (scrq->irq == NO_IRQ) {
+ if (!scrq->irq) {
rc = -EINVAL;
dev_err(dev, "Error mapping irq\n");
goto req_rx_irq_failed;
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 2e1b17ad52a3..ad03763e009a 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -334,7 +334,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
if (IS_ERR(adapter->ptp_clock)) {
adapter->ptp_clock = NULL;
e_err("ptp_clock_register failed\n");
- } else {
+ } else if (adapter->ptp_clock) {
e_info("registered PHC clock\n");
}
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 67ff01aeb11a..4d19e46f7c55 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -507,7 +507,7 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs);
s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid);
int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac);
int fm10k_ndo_set_vf_vlan(struct net_device *netdev,
- int vf_idx, u16 vid, u8 qos);
+ int vf_idx, u16 vid, u8 qos, __be16 vlan_proto);
int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int rate,
int unused);
int fm10k_ndo_get_vf_config(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index d9dec81f6b6d..5f4dac0d36ef 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -445,7 +445,7 @@ int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
}
int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
- u8 qos)
+ u8 qos, __be16 vlan_proto)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
@@ -460,6 +460,10 @@ int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
if (qos || (vid > (VLAN_VID_MASK - 1)))
return -EINVAL;
+ /* VF VLAN Protocol part to default is unsupported */
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
vf_info = &iov_data->vf_info[vf_idx];
/* exit if there is nothing to do */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 19103a6a7dcc..2030d7c1dc94 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -74,7 +74,7 @@
#define I40E_MIN_NUM_DESCRIPTORS 64
#define I40E_MIN_MSIX 2
#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
-#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
+#define I40E_MIN_VSI_ALLOC 83 /* LAN, ATR, FCOE, 64 VF */
/* max 16 qps */
#define i40e_default_queues_per_vmdq(pf) \
(((pf)->flags & I40E_FLAG_RSS_AQ_CAPABLE) ? 4 : 1)
@@ -701,6 +701,8 @@ void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
+void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
+ u16 rss_table_size, u16 rss_size);
struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
void i40e_update_stats(struct i40e_vsi *vsi);
void i40e_update_eth_stats(struct i40e_vsi *vsi);
@@ -708,8 +710,6 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
int i40e_fetch_switch_configuration(struct i40e_pf *pf,
bool printconfig);
-int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
- struct i40e_pf *pf, bool add);
int i40e_add_del_fdir(struct i40e_vsi *vsi,
struct i40e_fdir_filter *input, bool add);
void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 05cf9a719bab..0c1875b5b16d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1054,6 +1054,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
struct i40e_dcbx_config *r_cfg =
&pf->hw.remote_dcbx_config;
int i, ret;
+ u32 switch_id;
bw_data = kzalloc(sizeof(
struct i40e_aqc_query_port_ets_config_resp),
@@ -1063,8 +1064,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
+ vsi = pf->vsi[pf->lan_vsi];
+ switch_id =
+ vsi->info.switch_id & I40E_AQ_VSI_SW_ID_MASK;
+
ret = i40e_aq_query_port_ets_config(&pf->hw,
- pf->mac_seid,
+ switch_id,
bw_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
@@ -1425,84 +1430,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
buff = NULL;
kfree(desc);
desc = NULL;
- } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
- (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
- struct i40e_fdir_filter fd_data;
- u16 packet_len, i, j = 0;
- char *asc_packet;
- u8 *raw_packet;
- bool add = false;
- int ret;
-
- if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
- goto command_write_done;
-
- if (strncmp(cmd_buf, "add", 3) == 0)
- add = true;
-
- if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
- goto command_write_done;
-
- asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
- GFP_KERNEL);
- if (!asc_packet)
- goto command_write_done;
-
- raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
- GFP_KERNEL);
-
- if (!raw_packet) {
- kfree(asc_packet);
- asc_packet = NULL;
- goto command_write_done;
- }
-
- cnt = sscanf(&cmd_buf[13],
- "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s",
- &fd_data.q_index,
- &fd_data.flex_off, &fd_data.pctype,
- &fd_data.dest_vsi, &fd_data.dest_ctl,
- &fd_data.fd_status, &fd_data.cnt_index,
- &fd_data.fd_id, &packet_len, asc_packet);
- if (cnt != 10) {
- dev_info(&pf->pdev->dev,
- "program fd_filter: bad command string, cnt=%d\n",
- cnt);
- kfree(asc_packet);
- asc_packet = NULL;
- kfree(raw_packet);
- goto command_write_done;
- }
-
- /* fix packet length if user entered 0 */
- if (packet_len == 0)
- packet_len = I40E_FDIR_MAX_RAW_PACKET_SIZE;
-
- /* make sure to check the max as well */
- packet_len = min_t(u16,
- packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
-
- for (i = 0; i < packet_len; i++) {
- cnt = sscanf(&asc_packet[j], "%2hhx ", &raw_packet[i]);
- if (!cnt)
- break;
- j += 3;
- }
- dev_info(&pf->pdev->dev, "FD raw packet dump\n");
- print_hex_dump(KERN_INFO, "FD raw packet: ",
- DUMP_PREFIX_OFFSET, 16, 1,
- raw_packet, packet_len, true);
- ret = i40e_program_fdir_filter(&fd_data, raw_packet, pf, add);
- if (!ret) {
- dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
- } else {
- dev_info(&pf->pdev->dev,
- "Filter command send failed %d\n", ret);
- }
- kfree(raw_packet);
- raw_packet = NULL;
- kfree(asc_packet);
- asc_packet = NULL;
} else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
i40e_get_current_fd_count(pf));
@@ -1727,8 +1654,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " globr\n");
dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n");
dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n");
- dev_info(&pf->pdev->dev, " add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
- dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
dev_info(&pf->pdev->dev, " fd current cnt");
dev_info(&pf->pdev->dev, " lldp start\n");
dev_info(&pf->pdev->dev, " lldp stop\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 1835186b62c9..92bc8846f1ba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1970,11 +1970,22 @@ static int i40e_set_phys_id(struct net_device *netdev,
* 125us (8000 interrupts per second) == ITR(62)
*/
+/**
+ * __i40e_get_coalesce - get per-queue coalesce settings
+ * @netdev: the netdev to check
+ * @ec: ethtool coalesce data structure
+ * @queue: which queue to pick
+ *
+ * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
+ * are per queue. If queue is <0 then we default to queue 0 as the
+ * representative value.
+ **/
static int __i40e_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
int queue)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_ring *rx_ring, *tx_ring;
struct i40e_vsi *vsi = np->vsi;
ec->tx_max_coalesced_frames_irq = vsi->work_limit;
@@ -1989,14 +2000,18 @@ static int __i40e_get_coalesce(struct net_device *netdev,
return -EINVAL;
}
- if (ITR_IS_DYNAMIC(vsi->rx_rings[queue]->rx_itr_setting))
+ rx_ring = vsi->rx_rings[queue];
+ tx_ring = vsi->tx_rings[queue];
+
+ if (ITR_IS_DYNAMIC(rx_ring->rx_itr_setting))
ec->use_adaptive_rx_coalesce = 1;
- if (ITR_IS_DYNAMIC(vsi->tx_rings[queue]->tx_itr_setting))
+ if (ITR_IS_DYNAMIC(tx_ring->tx_itr_setting))
ec->use_adaptive_tx_coalesce = 1;
- ec->rx_coalesce_usecs = vsi->rx_rings[queue]->rx_itr_setting & ~I40E_ITR_DYNAMIC;
- ec->tx_coalesce_usecs = vsi->tx_rings[queue]->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->rx_coalesce_usecs = rx_ring->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->tx_coalesce_usecs = tx_ring->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+
/* we use the _usecs_high to store/set the interrupt rate limit
* that the hardware supports, that almost but not quite
@@ -2010,18 +2025,44 @@ static int __i40e_get_coalesce(struct net_device *netdev,
return 0;
}
+/**
+ * i40e_get_coalesce - get a netdev's coalesce settings
+ * @netdev: the netdev to check
+ * @ec: ethtool coalesce data structure
+ *
+ * Gets the coalesce settings for a particular netdev. Note that if user has
+ * modified per-queue settings, this only guarantees to represent queue 0. See
+ * __i40e_get_coalesce for more details.
+ **/
static int i40e_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
return __i40e_get_coalesce(netdev, ec, -1);
}
+/**
+ * i40e_get_per_queue_coalesce - gets coalesce settings for particular queue
+ * @netdev: netdev structure
+ * @ec: ethtool's coalesce settings
+ * @queue: the particular queue to read
+ *
+ * Will read a specific queue's coalesce settings
+ **/
static int i40e_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
struct ethtool_coalesce *ec)
{
return __i40e_get_coalesce(netdev, ec, queue);
}
+/**
+ * i40e_set_itr_per_queue - set ITR values for specific queue
+ * @vsi: the VSI to set values for
+ * @ec: coalesce settings from ethtool
+ * @queue: the queue to modify
+ *
+ * Change the ITR settings for a specific queue.
+ **/
+
static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
struct ethtool_coalesce *ec,
int queue)
@@ -2060,6 +2101,14 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
i40e_flush(hw);
}
+/**
+ * __i40e_set_coalesce - set coalesce settings for particular queue
+ * @netdev: the netdev to change
+ * @ec: ethtool coalesce settings
+ * @queue: the queue to change
+ *
+ * Sets the coalesce settings for a particular queue.
+ **/
static int __i40e_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
int queue)
@@ -2120,12 +2169,27 @@ static int __i40e_set_coalesce(struct net_device *netdev,
return 0;
}
+/**
+ * i40e_set_coalesce - set coalesce settings for every queue on the netdev
+ * @netdev: the netdev to change
+ * @ec: ethtool coalesce settings
+ *
+ * This will set each queue to the same coalesce settings.
+ **/
static int i40e_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
return __i40e_set_coalesce(netdev, ec, -1);
}
+/**
+ * i40e_set_per_queue_coalesce - set specific queue's coalesce settings
+ * @netdev: the netdev to change
+ * @ec: ethtool's coalesce settings
+ * @queue: the queue to change
+ *
+ * Sets the specified queue's coalesce settings.
+ **/
static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
struct ethtool_coalesce *ec)
{
@@ -2922,15 +2986,13 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
u8 *seed = NULL;
u16 i;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (!indir)
- return 0;
-
if (key) {
if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE,
@@ -2948,8 +3010,12 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
}
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
- vsi->rss_lut_user[i] = (u8)(indir[i]);
+ if (indir)
+ for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
+ vsi->rss_lut_user[i] = (u8)(indir[i]);
+ else
+ i40e_fill_rss_lut(pf, vsi->rss_lut_user, I40E_HLUT_ARRAY_SIZE,
+ vsi->rss_size);
return i40e_config_rss(vsi, seed, vsi->rss_lut_user,
I40E_HLUT_ARRAY_SIZE);
@@ -3019,6 +3085,9 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
} else {
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+
+ /* flush current ATR settings */
+ set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
}
if ((flags & I40E_PRIV_FLAGS_VEB_STATS) &&
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index fcdea29be4ee..8176596932be 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -41,7 +41,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 12
+#define DRV_VERSION_BUILD 16
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -57,8 +57,6 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
-static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
- u16 rss_table_size, u16 rss_size);
static void i40e_fdir_sb_setup(struct i40e_pf *pf);
static int i40e_veb_get_bw_info(struct i40e_veb *veb);
@@ -1317,7 +1315,7 @@ static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
element.vlan_tag = 0;
/* ...and some firmware does it this way. */
element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
- I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
}
@@ -1910,7 +1908,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
if (f->vlan == I40E_VLAN_ANY) {
del_list[num_del].vlan_tag = 0;
- cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
} else {
del_list[num_del].vlan_tag =
cpu_to_le16((u16)(f->vlan));
@@ -5112,9 +5110,13 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
DCB_CAP_DCBX_VER_IEEE;
pf->flags |= I40E_FLAG_DCB_CAPABLE;
- /* Enable DCB tagging only when more than one TC */
+ /* Enable DCB tagging only when more than one TC
+ * or explicitly disable if only one TC
+ */
if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
pf->flags |= I40E_FLAG_DCB_ENABLED;
+ else
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
dev_dbg(&pf->pdev->dev,
"DCBX offload is supported for this PF.\n");
}
@@ -5240,7 +5242,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
/* reset fd counters */
pf->fd_add_err = pf->fd_atr_cnt = 0;
if (pf->fd_tcp_rule > 0) {
- pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
pf->fd_tcp_rule = 0;
@@ -5715,7 +5717,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
u8 type;
/* Not DCB capable or capability disabled */
- if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
return ret;
/* Ignore if event is not for Nearest Bridge */
@@ -5937,13 +5939,17 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
}
}
- /* Wait for some more space to be available to turn on ATR */
+
+ /* Wait for some more space to be available to turn on ATR. We also
+ * must check that no existing ntuple rules for TCP are in effect
+ */
if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
+ (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ (pf->fd_tcp_rule == 0)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
+ dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
}
}
@@ -5974,9 +5980,6 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
int fd_room;
int reg;
- if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
- return;
-
if (!time_after(jiffies, pf->fd_flush_timestamp +
(I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
return;
@@ -5996,7 +5999,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
}
pf->fd_flush_timestamp = jiffies;
- pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
/* flush all filters */
wr32(&pf->hw, I40E_PFQF_CTL_1,
I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
@@ -6016,7 +6019,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
/* replay sideband filters */
i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
if (!disable_atr)
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+ pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
@@ -6050,9 +6053,6 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
if (test_bit(__I40E_DOWN, &pf->state))
return;
- if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
- return;
-
if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
i40e_fdir_flush_and_replay(pf);
@@ -7152,9 +7152,9 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
pf->pending_udp_bitmap &= ~BIT_ULL(i);
port = pf->udp_ports[i].index;
if (port)
- ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
- pf->udp_ports[i].type,
- NULL, NULL);
+ ret = i40e_aq_add_udp_tunnel(hw, port,
+ pf->udp_ports[i].type,
+ NULL, NULL);
else
ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
@@ -7895,6 +7895,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
#endif
I40E_FLAG_RSS_ENABLED |
I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_DCB_ENABLED |
I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
@@ -8239,8 +8240,8 @@ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
* @rss_table_size: Lookup table size
* @rss_size: Range of queue number for hashing
*/
-static void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
- u16 rss_table_size, u16 rss_size)
+void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
+ u16 rss_table_size, u16 rss_size)
{
u16 i;
@@ -8281,6 +8282,8 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
if (!vsi->rss_size)
vsi->rss_size = min_t(int, pf->alloc_rss_size,
vsi->num_queue_pairs);
+ if (!vsi->rss_size)
+ return -EINVAL;
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
@@ -8605,7 +8608,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
I40E_FLAG_WB_ON_ITR_CAPABLE |
I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
I40E_FLAG_NO_PCI_LINK_CHECK |
- I40E_FLAG_100M_SGMII_CAPABLE |
I40E_FLAG_USE_SET_LLDP_MIB |
I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
} else if ((pf->hw.aq.api_maj_ver > 1) ||
@@ -8680,13 +8682,13 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
/* reset fd counters */
pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
pf->fdir_pf_active_filters = 0;
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
/* if ATR was auto disabled it can be re-enabled. */
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
- (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+ (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
+ }
}
return need_reset;
}
@@ -10531,6 +10533,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_DCB_ENABLED |
I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_VMDQ_ENABLED);
} else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
@@ -10554,7 +10557,8 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
/* Not enough queues for all TCs */
if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
(queues_left < I40E_MAX_TRAFFIC_CLASS)) {
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_DCB_ENABLED);
dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
}
pf->num_lan_qps = max_t(int, pf->rss_size_max,
@@ -10951,7 +10955,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_init_pf_dcb(pf);
if (err) {
dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ pf->flags &= ~(I40E_FLAG_DCB_CAPABLE & I40E_FLAG_DCB_ENABLED);
/* Continue without DCB enabled */
}
#endif /* CONFIG_I40E_DCB */
@@ -11331,11 +11335,7 @@ static void i40e_remove(struct pci_dev *pdev)
}
/* shutdown the adminq */
- ret_code = i40e_shutdown_adminq(hw);
- if (ret_code)
- dev_warn(&pdev->dev,
- "Failed to destroy the Admin Queue resources: %d\n",
- ret_code);
+ i40e_shutdown_adminq(hw);
/* destroy the locks only once, here */
mutex_destroy(&hw->aq.arq_mutex);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index ed39cbad24bd..f1feceab758a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -669,7 +669,7 @@ void i40e_ptp_init(struct i40e_pf *pf)
pf->ptp_clock = NULL;
dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n",
__func__);
- } else {
+ } else if (pf->ptp_clock) {
struct timespec64 ts;
u32 regval;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index f8d66236fcbf..6287bf63c43c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -40,6 +40,69 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
}
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+/**
+ * i40e_fdir - Generate a Flow Director descriptor based on fdata
+ * @tx_ring: Tx ring to send buffer on
+ * @fdata: Flow director filter data
+ * @add: Indicate if we are adding a rule or deleting one
+ *
+ **/
+static void i40e_fdir(struct i40e_ring *tx_ring,
+ struct i40e_fdir_filter *fdata, bool add)
+{
+ struct i40e_filter_program_desc *fdir_desc;
+ struct i40e_pf *pf = tx_ring->vsi->back;
+ u32 flex_ptype, dtype_cmd;
+ u16 i;
+
+ /* grab the next descriptor */
+ i = tx_ring->next_to_use;
+ fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
+
+ flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
+ (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
+
+ flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
+ (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
+
+ flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
+ (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
+
+ /* Use LAN VSI Id if not programmed by user */
+ flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
+ ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
+
+ dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
+
+ dtype_cmd |= add ?
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT :
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT;
+
+ dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
+ (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
+
+ dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
+ (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
+
+ if (fdata->cnt_index) {
+ dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
+ dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
+ ((u32)fdata->cnt_index <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
+ }
+
+ fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
+ fdir_desc->rsvd = cpu_to_le32(0);
+ fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
+ fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
+}
+
#define I40E_FD_CLEAN_DELAY 10
/**
* i40e_program_fdir_filter - Program a Flow Director filter
@@ -48,14 +111,13 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
* @pf: The PF pointer
* @add: True for add/update, False for remove
**/
-int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
- struct i40e_pf *pf, bool add)
+static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
+ u8 *raw_packet, struct i40e_pf *pf,
+ bool add)
{
- struct i40e_filter_program_desc *fdir_desc;
struct i40e_tx_buffer *tx_buf, *first;
struct i40e_tx_desc *tx_desc;
struct i40e_ring *tx_ring;
- unsigned int fpt, dcc;
struct i40e_vsi *vsi;
struct device *dev;
dma_addr_t dma;
@@ -92,56 +154,8 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
/* grab the next descriptor */
i = tx_ring->next_to_use;
- fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
first = &tx_ring->tx_bi[i];
- memset(first, 0, sizeof(struct i40e_tx_buffer));
-
- tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
-
- fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
- I40E_TXD_FLTR_QW0_QINDEX_MASK;
-
- fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
- I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
-
- fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
- I40E_TXD_FLTR_QW0_PCTYPE_MASK;
-
- /* Use LAN VSI Id if not programmed by user */
- if (fdir_data->dest_vsi == 0)
- fpt |= (pf->vsi[pf->lan_vsi]->id) <<
- I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
- else
- fpt |= ((u32)fdir_data->dest_vsi <<
- I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
- I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
-
- dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
-
- if (add)
- dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
- I40E_TXD_FLTR_QW1_PCMD_SHIFT;
- else
- dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
- I40E_TXD_FLTR_QW1_PCMD_SHIFT;
-
- dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
- I40E_TXD_FLTR_QW1_DEST_MASK;
-
- dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
- I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
-
- if (fdir_data->cnt_index != 0) {
- dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
- dcc |= ((u32)fdir_data->cnt_index <<
- I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
- I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
- }
-
- fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
- fdir_desc->rsvd = cpu_to_le32(0);
- fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
- fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
+ i40e_fdir(tx_ring, fdir_data, add);
/* Now program a dummy descriptor */
i = tx_ring->next_to_use;
@@ -282,18 +296,18 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
if (add) {
pf->fd_tcp_rule++;
- if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
- dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
- pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- }
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+ pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
} else {
pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ?
(pf->fd_tcp_rule - 1) : 0;
if (pf->fd_tcp_rule == 0) {
- pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+ I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
+ pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
}
}
@@ -532,7 +546,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
struct i40e_tx_buffer *tx_buffer)
{
if (tx_buffer->skb) {
- dev_kfree_skb_any(tx_buffer->skb);
+ if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+ kfree(tx_buffer->raw_buf);
+ else
+ dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buffer, dma),
@@ -545,9 +562,6 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
DMA_TO_DEVICE);
}
- if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
- kfree(tx_buffer->raw_buf);
-
tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
@@ -584,8 +598,7 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
return;
/* cleanup Tx queue statistics */
- netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index));
+ netdev_tx_reset_queue(txring_txq(tx_ring));
}
/**
@@ -754,8 +767,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_ring->arm_wb = true;
}
- netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index),
+ /* notify netdev of completed buffers */
+ netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes);
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
@@ -1864,6 +1877,15 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
/* a small macro to shorten up some long lines */
#define INTREG I40E_PFINT_DYN_CTLN
+static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx)
+{
+ return !!(vsi->rx_rings[idx]->rx_itr_setting);
+}
+
+static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx)
+{
+ return !!(vsi->tx_rings[idx]->tx_itr_setting);
+}
/**
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
@@ -1879,6 +1901,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
u32 rxval, txval;
int vector;
int idx = q_vector->v_idx;
+ int rx_itr_setting, tx_itr_setting;
vector = (q_vector->v_idx + vsi->base_vector);
@@ -1887,18 +1910,21 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+ rx_itr_setting = get_rx_itr_enabled(vsi, idx);
+ tx_itr_setting = get_tx_itr_enabled(vsi, idx);
+
if (q_vector->itr_countdown > 0 ||
- (!ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting) &&
- !ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting))) {
+ (!ITR_IS_DYNAMIC(rx_itr_setting) &&
+ !ITR_IS_DYNAMIC(tx_itr_setting))) {
goto enable_int;
}
- if (ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting)) {
+ if (ITR_IS_DYNAMIC(tx_itr_setting)) {
rx = i40e_set_new_dynamic_itr(&q_vector->rx);
rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
}
- if (ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting)) {
+ if (ITR_IS_DYNAMIC(tx_itr_setting)) {
tx = i40e_set_new_dynamic_itr(&q_vector->tx);
txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
}
@@ -2621,9 +2647,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
return false;
/* We need to walk through the list and validate that each group
- * of 6 fragments totals at least gso_size. However we don't need
- * to perform such validation on the last 6 since the last 6 cannot
- * inherit any data from a descriptor after them.
+ * of 6 fragments totals at least gso_size.
*/
nr_frags -= I40E_MAX_BUFFER_TXD - 2;
frag = &skb_shinfo(skb)->frags[0];
@@ -2654,8 +2678,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
if (sum < 0)
return true;
- /* use pre-decrement to avoid processing last fragment */
- if (!--nr_frags)
+ if (!nr_frags--)
break;
sum -= skb_frag_size(stale++);
@@ -2787,9 +2810,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
- netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index),
- first->bytecount);
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* Algorithm to optimize tail and RS bit setting:
@@ -2814,13 +2835,11 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
* trigger a force WB.
*/
if (skb->xmit_more &&
- !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index))) {
+ !netif_xmit_stopped(txring_txq(tx_ring))) {
tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
tail_bump = false;
} else if (!skb->xmit_more &&
- !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index)) &&
+ !netif_xmit_stopped(txring_txq(tx_ring)) &&
(!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
(tx_ring->packet_stride < WB_STRIDE) &&
(desc_count < WB_STRIDE)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index b78c810d1835..508840585645 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -463,4 +463,13 @@ static inline bool i40e_rx_is_fcoe(u16 ptype)
return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
(ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
}
+
+/**
+ * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
+ * @ring: Tx ring to find the netdev equivalent of
+ **/
+static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
+{
+ return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index c92a3bdee229..f861d3109d1a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -163,6 +163,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index da3423561b3a..54b8ee2583f1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -502,8 +502,16 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
u32 qtx_ctl;
int ret = 0;
+ if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+ ret = -ENOENT;
+ goto error_context;
+ }
pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
vsi = i40e_find_vsi_from_id(pf, vsi_id);
+ if (!vsi) {
+ ret = -ENOENT;
+ goto error_context;
+ }
/* clear the context structure first */
memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
@@ -1476,7 +1484,8 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+ !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
+ !vsi) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2217,8 +2226,8 @@ static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
- config ? I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP :
- I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
+ config ? I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
+ I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
aq_ret);
}
@@ -2747,11 +2756,12 @@ error_param:
* @vf_id: VF identifier
* @vlan_id: mac address
* @qos: priority setting
+ * @vlan_proto: vlan protocol
*
* program VF vlan id and/or qos
**/
-int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
- int vf_id, u16 vlan_id, u8 qos)
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ u16 vlan_id, u8 qos, __be16 vlan_proto)
{
u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
struct i40e_netdev_priv *np = netdev_priv(netdev);
@@ -2774,6 +2784,12 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
goto error_pvid;
}
+ if (vlan_proto != htons(ETH_P_8021Q)) {
+ dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
+ ret = -EPROTONOSUPPORT;
+ goto error_pvid;
+ }
+
vf = &(pf->vf[vf_id]);
vsi = pf->vsi[vf->lan_vsi_idx];
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 875174141451..4012d069939a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -129,8 +129,8 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
/* VF configuration related iplink handlers */
int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
-int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
- int vf_id, u16 vlan_id, u8 qos);
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
+ u16 vlan_id, u8 qos, __be16 vlan_proto);
int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
int max_tx_rate);
int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 4db0c0326185..7953c13451b9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -302,7 +302,6 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
- u16 len = le16_to_cpu(aq_desc->datalen);
u8 *buf = (u8 *)buffer;
u16 i = 0;
@@ -326,6 +325,8 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
le32_to_cpu(aq_desc->params.external.addr_low));
if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ u16 len = le16_to_cpu(aq_desc->datalen);
+
i40e_debug(hw, mask, "AQ CMD Buffer:\n");
if (buf_len < len)
len = buf_len;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 0130458264e5..75f2a2cdd738 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -51,7 +51,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
struct i40e_tx_buffer *tx_buffer)
{
if (tx_buffer->skb) {
- dev_kfree_skb_any(tx_buffer->skb);
+ if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+ kfree(tx_buffer->raw_buf);
+ else
+ dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(ring->dev,
dma_unmap_addr(tx_buffer, dma),
@@ -64,9 +67,6 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
DMA_TO_DEVICE);
}
- if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
- kfree(tx_buffer->raw_buf);
-
tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
@@ -103,8 +103,7 @@ void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
return;
/* cleanup Tx queue statistics */
- netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index));
+ netdev_tx_reset_queue(txring_txq(tx_ring));
}
/**
@@ -273,8 +272,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_ring->arm_wb = true;
}
- netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index),
+ /* notify netdev of completed buffers */
+ netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes);
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
@@ -1312,6 +1311,19 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
/* a small macro to shorten up some long lines */
#define INTREG I40E_VFINT_DYN_CTLN1
+static inline int get_rx_itr_enabled(struct i40e_vsi *vsi, int idx)
+{
+ struct i40evf_adapter *adapter = vsi->back;
+
+ return !!(adapter->rx_rings[idx].rx_itr_setting);
+}
+
+static inline int get_tx_itr_enabled(struct i40e_vsi *vsi, int idx)
+{
+ struct i40evf_adapter *adapter = vsi->back;
+
+ return !!(adapter->tx_rings[idx].tx_itr_setting);
+}
/**
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
@@ -1326,6 +1338,8 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
bool rx = false, tx = false;
u32 rxval, txval;
int vector;
+ int idx = q_vector->v_idx;
+ int rx_itr_setting, tx_itr_setting;
vector = (q_vector->v_idx + vsi->base_vector);
@@ -1334,18 +1348,21 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*/
rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+ rx_itr_setting = get_rx_itr_enabled(vsi, idx);
+ tx_itr_setting = get_tx_itr_enabled(vsi, idx);
+
if (q_vector->itr_countdown > 0 ||
- (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) &&
- !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) {
+ (!ITR_IS_DYNAMIC(rx_itr_setting) &&
+ !ITR_IS_DYNAMIC(tx_itr_setting))) {
goto enable_int;
}
- if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+ if (ITR_IS_DYNAMIC(rx_itr_setting)) {
rx = i40e_set_new_dynamic_itr(&q_vector->rx);
rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
}
- if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+ if (ITR_IS_DYNAMIC(tx_itr_setting)) {
tx = i40e_set_new_dynamic_itr(&q_vector->tx);
txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
}
@@ -1832,9 +1849,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
return false;
/* We need to walk through the list and validate that each group
- * of 6 fragments totals at least gso_size. However we don't need
- * to perform such validation on the last 6 since the last 6 cannot
- * inherit any data from a descriptor after them.
+ * of 6 fragments totals at least gso_size.
*/
nr_frags -= I40E_MAX_BUFFER_TXD - 2;
frag = &skb_shinfo(skb)->frags[0];
@@ -1865,8 +1880,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
if (sum < 0)
return true;
- /* use pre-decrement to avoid processing last fragment */
- if (!--nr_frags)
+ if (!nr_frags--)
break;
sum -= skb_frag_size(stale++);
@@ -2015,9 +2029,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
- netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index),
- first->bytecount);
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* Algorithm to optimize tail and RS bit setting:
@@ -2042,13 +2054,11 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
* trigger a force WB.
*/
if (skb->xmit_more &&
- !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index))) {
+ !netif_xmit_stopped(txring_txq(tx_ring))) {
tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
tail_bump = false;
} else if (!skb->xmit_more &&
- !netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->queue_index)) &&
+ !netif_xmit_stopped(txring_txq(tx_ring)) &&
(!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
(tx_ring->packet_stride < WB_STRIDE) &&
(desc_count < WB_STRIDE)) {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 0112277e5882..abcdecabbc56 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -287,6 +287,14 @@ struct i40e_ring {
u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail;
+ /* high bit set means dynamic, use accessors routines to read/write.
+ * hardware only supports 2us resolution for the ITR registers.
+ * these values always store the USER setting, and must be converted
+ * before programming to a register.
+ */
+ u16 rx_itr_setting;
+ u16 tx_itr_setting;
+
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
u16 rx_buf_len;
@@ -445,4 +453,13 @@ static inline bool i40e_rx_is_fcoe(u16 ptype)
return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
(ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
}
+
+/**
+ * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
+ * @ring: Tx ring to find the netdev equivalent of
+ **/
+static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
+{
+ return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index f04ce6cb70dc..bd691ad86673 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -160,6 +160,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00100000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index dc00aaf94687..c5fd724313c7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -59,13 +59,6 @@ struct i40e_vsi {
unsigned long state;
int base_vector;
u16 work_limit;
- /* high bit set means dynamic, use accessor routines to read/write.
- * hardware only supports 2us resolution for the ITR registers.
- * these values always store the USER setting, and must be converted
- * before programming to a register.
- */
- u16 rx_itr_setting;
- u16 tx_itr_setting;
u16 qs_handle;
};
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index e17a15456266..a9940154eead 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -296,93 +296,207 @@ static int i40evf_set_ringparam(struct net_device *netdev,
}
/**
- * i40evf_get_coalesce - Get interrupt coalescing settings
- * @netdev: network interface device structure
- * @ec: ethtool coalesce structure
+ * __i40evf_get_coalesce - get per-queue coalesce settings
+ * @netdev: the netdev to check
+ * @ec: ethtool coalesce data structure
+ * @queue: which queue to pick
*
- * Returns current coalescing settings. This is referred to elsewhere in the
- * driver as Interrupt Throttle Rate, as this is how the hardware describes
- * this functionality.
+ * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
+ * are per queue. If queue is <0 then we default to queue 0 as the
+ * representative value.
**/
-static int i40evf_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+static int __i40evf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ int queue)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_vsi *vsi = &adapter->vsi;
+ struct i40e_ring *rx_ring, *tx_ring;
ec->tx_max_coalesced_frames = vsi->work_limit;
ec->rx_max_coalesced_frames = vsi->work_limit;
- if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
+ /* Rx and Tx usecs per queue value. If user doesn't specify the
+ * queue, return queue 0's value to represent.
+ */
+ if (queue < 0)
+ queue = 0;
+ else if (queue >= adapter->num_active_queues)
+ return -EINVAL;
+
+ rx_ring = &adapter->rx_rings[queue];
+ tx_ring = &adapter->tx_rings[queue];
+
+ if (ITR_IS_DYNAMIC(rx_ring->rx_itr_setting))
ec->use_adaptive_rx_coalesce = 1;
- if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+ if (ITR_IS_DYNAMIC(tx_ring->tx_itr_setting))
ec->use_adaptive_tx_coalesce = 1;
- ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
- ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->rx_coalesce_usecs = rx_ring->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->tx_coalesce_usecs = tx_ring->tx_itr_setting & ~I40E_ITR_DYNAMIC;
return 0;
}
/**
- * i40evf_set_coalesce - Set interrupt coalescing settings
+ * i40evf_get_coalesce - Get interrupt coalescing settings
* @netdev: network interface device structure
* @ec: ethtool coalesce structure
*
- * Change current coalescing settings.
+ * Returns current coalescing settings. This is referred to elsewhere in the
+ * driver as Interrupt Throttle Rate, as this is how the hardware describes
+ * this functionality. Note that if per-queue settings have been modified this
+ * only represents the settings of queue 0.
**/
-static int i40evf_set_coalesce(struct net_device *netdev,
+static int i40evf_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40e_hw *hw = &adapter->hw;
+ return __i40evf_get_coalesce(netdev, ec, -1);
+}
+
+/**
+ * i40evf_get_per_queue_coalesce - get coalesce values for specific queue
+ * @netdev: netdev to read
+ * @ec: coalesce settings from ethtool
+ * @queue: the queue to read
+ *
+ * Read specific queue's coalesce settings.
+ **/
+static int i40evf_get_per_queue_coalesce(struct net_device *netdev,
+ u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ return __i40evf_get_coalesce(netdev, ec, queue);
+}
+
+/**
+ * i40evf_set_itr_per_queue - set ITR values for specific queue
+ * @vsi: the VSI to set values for
+ * @ec: coalesce settings from ethtool
+ * @queue: the queue to modify
+ *
+ * Change the ITR settings for a specific queue.
+ **/
+static void i40evf_set_itr_per_queue(struct i40evf_adapter *adapter,
+ struct ethtool_coalesce *ec,
+ int queue)
+{
struct i40e_vsi *vsi = &adapter->vsi;
+ struct i40e_hw *hw = &adapter->hw;
struct i40e_q_vector *q_vector;
+ u16 vector;
+
+ adapter->rx_rings[queue].rx_itr_setting = ec->rx_coalesce_usecs;
+ adapter->tx_rings[queue].tx_itr_setting = ec->tx_coalesce_usecs;
+
+ if (ec->use_adaptive_rx_coalesce)
+ adapter->rx_rings[queue].rx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ adapter->rx_rings[queue].rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ if (ec->use_adaptive_tx_coalesce)
+ adapter->tx_rings[queue].tx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ adapter->tx_rings[queue].tx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ q_vector = adapter->rx_rings[queue].q_vector;
+ q_vector->rx.itr = ITR_TO_REG(adapter->rx_rings[queue].rx_itr_setting);
+ vector = vsi->base_vector + q_vector->v_idx;
+ wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
+
+ q_vector = adapter->tx_rings[queue].q_vector;
+ q_vector->tx.itr = ITR_TO_REG(adapter->tx_rings[queue].tx_itr_setting);
+ vector = vsi->base_vector + q_vector->v_idx;
+ wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1), q_vector->tx.itr);
+
+ i40e_flush(hw);
+}
+
+/**
+ * __i40evf_set_coalesce - set coalesce settings for particular queue
+ * @netdev: the netdev to change
+ * @ec: ethtool coalesce settings
+ * @queue: the queue to change
+ *
+ * Sets the coalesce settings for a particular queue.
+ **/
+static int __i40evf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ int queue)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_vsi *vsi = &adapter->vsi;
int i;
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
vsi->work_limit = ec->tx_max_coalesced_frames_irq;
- if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
- (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
- vsi->rx_itr_setting = ec->rx_coalesce_usecs;
-
- else
+ if (ec->rx_coalesce_usecs == 0) {
+ if (ec->use_adaptive_rx_coalesce)
+ netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
+ } else if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1))) {
+ netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
return -EINVAL;
+ }
- if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
- (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
- vsi->tx_itr_setting = ec->tx_coalesce_usecs;
- else if (ec->use_adaptive_tx_coalesce)
- vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
else
+ if (ec->tx_coalesce_usecs == 0) {
+ if (ec->use_adaptive_tx_coalesce)
+ netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
+ } else if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1))) {
+ netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
return -EINVAL;
+ }
- if (ec->use_adaptive_rx_coalesce)
- vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
- else
- vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
-
- if (ec->use_adaptive_tx_coalesce)
- vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
- else
- vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
-
- for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
- q_vector = &adapter->q_vectors[i];
- q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
- wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
- q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
- wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
- i40e_flush(hw);
+ /* Rx and Tx usecs has per queue value. If user doesn't specify the
+ * queue, apply to all queues.
+ */
+ if (queue < 0) {
+ for (i = 0; i < adapter->num_active_queues; i++)
+ i40evf_set_itr_per_queue(adapter, ec, i);
+ } else if (queue < adapter->num_active_queues) {
+ i40evf_set_itr_per_queue(adapter, ec, queue);
+ } else {
+ netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
+ adapter->num_active_queues - 1);
+ return -EINVAL;
}
return 0;
}
/**
+ * i40evf_set_coalesce - Set interrupt coalescing settings
+ * @netdev: network interface device structure
+ * @ec: ethtool coalesce structure
+ *
+ * Change current coalescing settings for every queue.
+ **/
+static int i40evf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ return __i40evf_set_coalesce(netdev, ec, -1);
+}
+
+/**
+ * i40evf_set_per_queue_coalesce - set specific queue's coalesce settings
+ * @netdev: the netdev to change
+ * @ec: ethtool's coalesce settings
+ * @queue: the queue to modify
+ *
+ * Modifies a specific queue's coalesce settings.
+ */
+static int i40evf_set_per_queue_coalesce(struct net_device *netdev,
+ u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ return __i40evf_set_coalesce(netdev, ec, queue);
+}
+
+/**
* i40evf_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -533,6 +647,8 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
.set_msglevel = i40evf_set_msglevel,
.get_coalesce = i40evf_get_coalesce,
.set_coalesce = i40evf_set_coalesce,
+ .get_per_queue_coalesce = i40evf_get_per_queue_coalesce,
+ .set_per_queue_coalesce = i40evf_set_per_queue_coalesce,
.get_rxnfc = i40evf_get_rxnfc,
.get_rxfh_indir_size = i40evf_get_rxfh_indir_size,
.get_rxfh = i40evf_get_rxfh,
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index f751f7bc0d81..14372810fc27 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -38,7 +38,7 @@ static const char i40evf_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 12
+#define DRV_VERSION_BUILD 16
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -370,6 +370,7 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
{
struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx];
+ struct i40e_hw *hw = &adapter->hw;
rx_ring->q_vector = q_vector;
rx_ring->next = q_vector->rx.ring;
@@ -377,7 +378,10 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
q_vector->rx.ring = rx_ring;
q_vector->rx.count++;
q_vector->rx.latency_range = I40E_LOW_LATENCY;
+ q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
+ q_vector->ring_mask |= BIT(r_idx);
q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, v_idx - 1), q_vector->rx.itr);
}
/**
@@ -391,6 +395,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
{
struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx];
+ struct i40e_hw *hw = &adapter->hw;
tx_ring->q_vector = q_vector;
tx_ring->next = q_vector->tx.ring;
@@ -398,9 +403,10 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
q_vector->tx.ring = tx_ring;
q_vector->tx.count++;
q_vector->tx.latency_range = I40E_LOW_LATENCY;
+ q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
q_vector->itr_countdown = ITR_COUNTDOWN_START;
q_vector->num_ringpairs++;
- q_vector->ring_mask |= BIT(t_idx);
+ wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, v_idx - 1), q_vector->tx.itr);
}
/**
@@ -1007,7 +1013,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
* i40evf_up_complete - Finish the last steps of bringing up a connection
* @adapter: board private structure
**/
-static int i40evf_up_complete(struct i40evf_adapter *adapter)
+static void i40evf_up_complete(struct i40evf_adapter *adapter)
{
adapter->state = __I40EVF_RUNNING;
clear_bit(__I40E_DOWN, &adapter->vsi.state);
@@ -1016,7 +1022,6 @@ static int i40evf_up_complete(struct i40evf_adapter *adapter)
adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
- return 0;
}
/**
@@ -1037,6 +1042,7 @@ void i40evf_down(struct i40evf_adapter *adapter)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
+ adapter->link_up = false;
i40evf_napi_disable_all(adapter);
i40evf_irq_disable(adapter);
@@ -1154,6 +1160,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
tx_ring->netdev = adapter->netdev;
tx_ring->dev = &adapter->pdev->dev;
tx_ring->count = adapter->tx_desc_count;
+ tx_ring->tx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF);
if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
@@ -1162,6 +1169,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
rx_ring->netdev = adapter->netdev;
rx_ring->dev = &adapter->pdev->dev;
rx_ring->count = adapter->rx_desc_count;
+ rx_ring->rx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF);
}
return 0;
@@ -1731,6 +1739,7 @@ static void i40evf_reset_task(struct work_struct *work)
set_bit(__I40E_DOWN, &adapter->vsi.state);
netif_carrier_off(netdev);
netif_tx_disable(netdev);
+ adapter->link_up = false;
i40evf_napi_disable_all(adapter);
i40evf_irq_disable(adapter);
i40evf_free_traffic_irqs(adapter);
@@ -1769,6 +1778,7 @@ continue_reset:
if (netif_running(adapter->netdev)) {
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
+ adapter->link_up = false;
i40evf_napi_disable_all(adapter);
}
i40evf_irq_disable(adapter);
@@ -1783,8 +1793,7 @@ continue_reset:
i40evf_free_all_tx_resources(adapter);
/* kill and reinit the admin queue */
- if (i40evf_shutdown_adminq(hw))
- dev_warn(&adapter->pdev->dev, "Failed to shut down adminq\n");
+ i40evf_shutdown_adminq(hw);
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
err = i40evf_init_adminq(hw);
if (err)
@@ -1824,9 +1833,7 @@ continue_reset:
i40evf_configure(adapter);
- err = i40evf_up_complete(adapter);
- if (err)
- goto reset_err;
+ i40evf_up_complete(adapter);
i40evf_irq_enable(adapter, true);
} else {
@@ -2056,9 +2063,7 @@ static int i40evf_open(struct net_device *netdev)
i40evf_add_filter(adapter, adapter->hw.mac.addr);
i40evf_configure(adapter);
- err = i40evf_up_complete(adapter);
- if (err)
- goto err_req_irq;
+ i40evf_up_complete(adapter);
i40evf_irq_enable(adapter, true);
@@ -2272,10 +2277,6 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
adapter->vsi.back = adapter;
adapter->vsi.base_vector = 1;
adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
- adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
- adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
vsi->netdev = adapter->netdev;
vsi->qs_handle = adapter->vsi_res->qset_handle;
if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
@@ -2457,6 +2458,7 @@ static void i40evf_init_task(struct work_struct *work)
goto err_sw_init;
netif_carrier_off(netdev);
+ adapter->link_up = false;
if (!adapter->netdev_registered) {
err = register_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index cc6cb30c1667..ddf478d6322b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -898,8 +898,14 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
vpe->event_data.link_event.link_status) {
adapter->link_up =
vpe->event_data.link_event.link_status;
+ if (adapter->link_up) {
+ netif_tx_start_all_queues(netdev);
+ netif_carrier_on(netdev);
+ } else {
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ }
i40evf_print_link_message(adapter);
- netif_tx_stop_all_queues(netdev);
}
break;
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
@@ -974,8 +980,6 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
/* enable transmits */
i40evf_irq_enable(adapter, true);
- netif_tx_start_all_queues(adapter->netdev);
- netif_carrier_on(adapter->netdev);
break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
i40evf_free_all_tx_resources(adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index af75eac5fa16..a83aa13a5bf4 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -169,7 +169,7 @@ static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
static int igb_ndo_set_vf_vlan(struct net_device *netdev,
- int vf, u16 vlan, u8 qos);
+ int vf, u16 vlan, u8 qos, __be16 vlan_proto);
static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
bool setting);
@@ -6222,14 +6222,17 @@ static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
return 0;
}
-static int igb_ndo_set_vf_vlan(struct net_device *netdev,
- int vf, u16 vlan, u8 qos)
+static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
+ u16 vlan, u8 qos, __be16 vlan_proto)
{
struct igb_adapter *adapter = netdev_priv(netdev);
if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
igb_disable_port_vlan(adapter, vf);
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 66dfa2085cc7..1dd14e166dc8 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -1159,7 +1159,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
if (IS_ERR(adapter->ptp_clock)) {
adapter->ptp_clock = NULL;
dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
- } else {
+ } else if (adapter->ptp_clock) {
dev_info(&adapter->pdev->dev, "added PHC on %s\n",
adapter->netdev->name);
adapter->ptp_flags |= IGB_PTP_ENABLED;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 33c025055011..b06e32d0d22a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -45,10 +45,10 @@
#include "ixgbe_type.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb.h"
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#if IS_ENABLED(CONFIG_FCOE)
#define IXGBE_FCOE
#include "ixgbe_fcoe.h"
-#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
+#endif /* IS_ENABLED(CONFIG_FCOE) */
#ifdef CONFIG_IXGBE_DCA
#include <linux/dca.h>
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 9547191e26c9..f49f80380aa5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -313,6 +313,25 @@ static int ixgbe_get_settings(struct net_device *netdev,
break;
}
+ /* Indicate pause support */
+ ecmd->supported |= SUPPORTED_Pause;
+
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_full:
+ ecmd->advertising |= ADVERTISED_Pause;
+ break;
+ case ixgbe_fc_rx_pause:
+ ecmd->advertising |= ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause;
+ break;
+ case ixgbe_fc_tx_pause:
+ ecmd->advertising |= ADVERTISED_Asym_Pause;
+ break;
+ default:
+ ecmd->advertising &= ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ }
+
if (netif_carrier_ok(netdev)) {
switch (adapter->link_speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
@@ -2928,9 +2947,13 @@ static u32 ixgbe_rss_indir_size(struct net_device *netdev)
static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir)
{
int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter);
+ u16 rss_m = adapter->ring_feature[RING_F_RSS].mask;
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ rss_m = adapter->ring_feature[RING_F_RSS].indices - 1;
for (i = 0; i < reta_size; i++)
- indir[i] = adapter->rss_indir_tbl[i];
+ indir[i] = adapter->rss_indir_tbl[i] & rss_m;
}
static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
@@ -3041,8 +3064,8 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
/* We only support one q_vector without MSI-X */
max_combined = 1;
} else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- /* SR-IOV currently only allows one queue on the PF */
- max_combined = 1;
+ /* Limit value based on the queue mask */
+ max_combined = adapter->ring_feature[RING_F_RSS].mask + 1;
} else if (tcs > 1) {
/* For DCB report channels per traffic class */
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index bcdc88444ceb..15ab337fd7ad 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -515,15 +515,16 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
/* 64 pool mode with 2 queues per pool */
- if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) {
+ if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) {
vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
rss_m = IXGBE_RSS_2Q_MASK;
rss_i = min_t(u16, rss_i, 2);
- /* 32 pool mode with 4 queues per pool */
+ /* 32 pool mode with up to 4 queues per pool */
} else {
vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
rss_m = IXGBE_RSS_4Q_MASK;
- rss_i = 4;
+ /* We can support 4, 2, or 1 queues */
+ rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
}
#ifdef IXGBE_FCOE
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index d76bc1a313ea..a244d9a67264 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3248,7 +3248,8 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
else if (tcs > 1)
mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
- else if (adapter->ring_feature[RING_F_RSS].indices == 4)
+ else if (adapter->ring_feature[RING_F_VMDQ].mask ==
+ IXGBE_82599_VMDQ_4Q_MASK)
mtqc |= IXGBE_MTQC_32VF;
else
mtqc |= IXGBE_MTQC_64VF;
@@ -3475,12 +3476,12 @@ static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
- /* Program table for at least 2 queues w/ SR-IOV so that VFs can
+ /* Program table for at least 4 queues w/ SR-IOV so that VFs can
* make full use of any rings they may have. We will use the
* PSRTYPE register to control how many rings we use within the PF.
*/
- if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
- rss_i = 2;
+ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4))
+ rss_i = 4;
/* Fill out hash function seeds */
for (i = 0; i < 10; i++)
@@ -3544,7 +3545,8 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */
else if (tcs > 1)
mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */
- else if (adapter->ring_feature[RING_F_RSS].indices == 4)
+ else if (adapter->ring_feature[RING_F_VMDQ].mask ==
+ IXGBE_82599_VMDQ_4Q_MASK)
mrqc = IXGBE_MRQC_VMDQRSS32EN;
else
mrqc = IXGBE_MRQC_VMDQRSS64EN;
@@ -4105,23 +4107,20 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- switch (hw->mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_x550em_a:
- default:
- if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
- break;
- /* fall through */
- case ixgbe_mac_82598EB:
- /* legacy case, we can just disable VLAN filtering */
+ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
+ /* For VMDq and SR-IOV we must leave VLAN filtering enabled */
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ } else {
vlnctrl &= ~IXGBE_VLNCTRL_VFE;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
return;
}
+ /* Nothing to do for 82598 */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
/* We are already in VLAN promisc, nothing to do */
if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
return;
@@ -4129,10 +4128,6 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
/* Set flag so we don't redo unnecessary work */
adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
- /* For VMDq and SR-IOV we must leave VLAN filtering enabled */
- vlnctrl |= IXGBE_VLNCTRL_VFE;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-
/* Add PF to all active pools */
for (i = IXGBE_VLVF_ENTRIES; --i;) {
u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
@@ -4204,19 +4199,9 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
vlnctrl |= IXGBE_VLNCTRL_VFE;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
- switch (hw->mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- case ixgbe_mac_x550em_a:
- default:
- if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
- break;
- /* fall through */
- case ixgbe_mac_82598EB:
+ if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ||
+ hw->mac.type == ixgbe_mac_82598EB)
return;
- }
/* We are not in VLAN promisc, nothing to do */
if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index db0731e05401..021ab9b89c71 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -346,8 +346,8 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
return 0;
}
}
- /* clear value if nothing found */
- hw->phy.mdio.prtad = 0;
+ /* indicate no PHY found */
+ hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
return IXGBE_ERR_PHY_ADDR_INVALID;
}
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index e5431bfe3339..a92277683a64 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1254,7 +1254,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_clock = NULL;
e_dev_err("ptp_clock_register failed\n");
return err;
- } else
+ } else if (adapter->ptp_clock)
e_dev_info("registered PHC device on %s\n", netdev->name);
/* set default timestamp mode to disabled here. We do this in
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 8618599dfd6f..7e5d9850e4b2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -329,13 +329,15 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
for (i = 0; i < adapter->num_vfs; i++)
ixgbe_vf_configuration(dev, (i | 0x10000000));
+ /* reset before enabling SRIOV to avoid mailbox issues */
+ ixgbe_sriov_reinit(adapter);
+
err = pci_enable_sriov(dev, num_vfs);
if (err) {
e_dev_warn("Failed to enable PCI sriov: %d\n", err);
return err;
}
ixgbe_get_vfs(adapter);
- ixgbe_sriov_reinit(adapter);
return num_vfs;
#else
@@ -1354,13 +1356,16 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
return err;
}
-int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
+int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
+ u8 qos, __be16 vlan_proto)
{
int err = 0;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
if (vlan || qos) {
/* Check if there is already a port VLAN set, if so
* we have to delete the old one first before we
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 47e65e2f886a..0c7977d27b71 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -43,7 +43,7 @@ void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
- u8 qos);
+ u8 qos, __be16 vlan_proto);
int ixgbe_link_mbps(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int max_tx_rate);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index e092a8929413..7e6b9267ca9d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1459,7 +1459,7 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
/* Configure internal PHY for KR/KX. */
ixgbe_setup_kr_speed_x550em(hw, speed);
- if (!hw->phy.mdio.prtad || hw->phy.mdio.prtad == 0xFFFF)
+ if (hw->phy.mdio.prtad == MDIO_PRTAD_NONE)
return IXGBE_ERR_PHY_ADDR_INVALID;
/* Get external PHY device id */
@@ -2125,7 +2125,7 @@ static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure
* @led_idx: led number to turn on
**/
-s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
{
u16 phy_data;
@@ -2147,7 +2147,7 @@ s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
* @hw: pointer to hardware structure
* @led_idx: led number to turn off
**/
-s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
+static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
{
u16 phy_data;
@@ -3049,6 +3049,8 @@ static const struct ixgbe_phy_operations phy_ops_x550em_a = {
.identify = &ixgbe_identify_phy_x550em,
.read_reg = &ixgbe_read_phy_reg_x550a,
.write_reg = &ixgbe_write_phy_reg_x550a,
+ .read_reg_mdi = &ixgbe_read_phy_reg_mdi,
+ .write_reg_mdi = &ixgbe_write_phy_reg_mdi,
};
static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 4044608083cd..7eaac3234049 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1810,8 +1810,10 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
if (hw->mac.type >= ixgbe_mac_X550_vf)
ixgbevf_setup_vfmrqc(adapter);
+ spin_lock_bh(&adapter->mbx_lock);
/* notify the PF of our intent to use this size of frame */
ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
+ spin_unlock_bh(&adapter->mbx_lock);
if (ret)
dev_err(&adapter->pdev->dev,
"Failed to set MTU at %d\n", netdev->mtu);
@@ -3758,8 +3760,10 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
if ((new_mtu < 68) || (max_frame > max_possible_frame))
return -EINVAL;
+ spin_lock_bh(&adapter->mbx_lock);
/* notify the PF of our intent to use this size of frame */
ret = hw->mac.ops.set_rlpml(hw, max_frame);
+ spin_unlock_bh(&adapter->mbx_lock);
if (ret)
return -EINVAL;
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h
index e74fd44a92f7..a32de432800c 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.h
+++ b/drivers/net/ethernet/marvell/mvneta_bm.h
@@ -133,7 +133,7 @@ struct mvneta_bm_pool {
void *mvneta_frag_alloc(unsigned int frag_size);
void mvneta_frag_free(unsigned int frag_size, void *data);
-#if defined(CONFIG_MVNETA_BM) || defined(CONFIG_MVNETA_BM_MODULE)
+#if IS_ENABLED(CONFIG_MVNETA_BM)
void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
struct mvneta_bm_pool *bm_pool, u8 port_map);
void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 473977d6e9e8..4cc50c03c12b 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -18,6 +18,7 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/clk.h>
+#include <linux/pm_runtime.h>
#include <linux/if_vlan.h>
#include <linux/reset.h>
#include <linux/tcp.h>
@@ -50,6 +51,10 @@ static const struct mtk_ethtool_stats {
MTK_ETHTOOL_STAT(rx_flow_control_packets),
};
+static const char * const mtk_clks_source_name[] = {
+ "ethif", "esw", "gp1", "gp2", "trgpll"
+};
+
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
{
__raw_writel(val, eth->base + reg);
@@ -130,6 +135,33 @@ static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
return _mtk_mdio_read(eth, phy_addr, phy_reg);
}
+static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
+{
+ u32 val;
+ int ret;
+
+ val = (speed == SPEED_1000) ?
+ INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
+ mtk_w32(eth, val, INTF_MODE);
+
+ regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
+ ETHSYS_TRGMII_CLK_SEL362_5,
+ ETHSYS_TRGMII_CLK_SEL362_5);
+
+ val = (speed == SPEED_1000) ? 250000000 : 500000000;
+ ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
+ if (ret)
+ dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
+
+ val = (speed == SPEED_1000) ?
+ RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
+ mtk_w32(eth, val, TRGMII_RCK_CTRL);
+
+ val = (speed == SPEED_1000) ?
+ TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
+ mtk_w32(eth, val, TRGMII_TCK_CTRL);
+}
+
static void mtk_phy_link_adjust(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
@@ -140,7 +172,10 @@ static void mtk_phy_link_adjust(struct net_device *dev)
MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
MAC_MCR_BACKPR_EN;
- switch (mac->phy_dev->speed) {
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return;
+
+ switch (dev->phydev->speed) {
case SPEED_1000:
mcr |= MAC_MCR_SPEED_1000;
break;
@@ -149,20 +184,23 @@ static void mtk_phy_link_adjust(struct net_device *dev)
break;
};
- if (mac->phy_dev->link)
+ if (mac->id == 0 && !mac->trgmii)
+ mtk_gmac0_rgmii_adjust(mac->hw, dev->phydev->speed);
+
+ if (dev->phydev->link)
mcr |= MAC_MCR_FORCE_LINK;
- if (mac->phy_dev->duplex) {
+ if (dev->phydev->duplex) {
mcr |= MAC_MCR_FORCE_DPX;
- if (mac->phy_dev->pause)
+ if (dev->phydev->pause)
rmt_adv = LPA_PAUSE_CAP;
- if (mac->phy_dev->asym_pause)
+ if (dev->phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- if (mac->phy_dev->advertising & ADVERTISED_Pause)
+ if (dev->phydev->advertising & ADVERTISED_Pause)
lcl_adv |= ADVERTISE_PAUSE_CAP;
- if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
+ if (dev->phydev->advertising & ADVERTISED_Asym_Pause)
lcl_adv |= ADVERTISE_PAUSE_ASYM;
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
@@ -179,7 +217,7 @@ static void mtk_phy_link_adjust(struct net_device *dev)
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
- if (mac->phy_dev->link)
+ if (dev->phydev->link)
netif_carrier_on(dev);
else
netif_carrier_off(dev);
@@ -188,17 +226,9 @@ static void mtk_phy_link_adjust(struct net_device *dev)
static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
struct device_node *phy_node)
{
- const __be32 *_addr = NULL;
struct phy_device *phydev;
- int phy_mode, addr;
-
- _addr = of_get_property(phy_node, "reg", NULL);
+ int phy_mode;
- if (!_addr || (be32_to_cpu(*_addr) >= 0x20)) {
- pr_err("%s: invalid phy address\n", phy_node->name);
- return -EINVAL;
- }
- addr = be32_to_cpu(*_addr);
phy_mode = of_get_phy_mode(phy_node);
if (phy_mode < 0) {
dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
@@ -217,17 +247,17 @@ static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
mac->id, phydev_name(phydev), phydev->phy_id,
phydev->drv->name);
- mac->phy_dev = phydev;
-
return 0;
}
-static int mtk_phy_connect(struct mtk_mac *mac)
+static int mtk_phy_connect(struct net_device *dev)
{
- struct mtk_eth *eth = mac->hw;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth;
struct device_node *np;
- u32 val, ge_mode;
+ u32 val;
+ eth = mac->hw;
np = of_parse_phandle(mac->of_node, "phy-handle", 0);
if (!np && of_phy_is_fixed_link(mac->of_node))
if (!of_phy_register_fixed_link(mac->of_node))
@@ -236,22 +266,24 @@ static int mtk_phy_connect(struct mtk_mac *mac)
return -ENODEV;
switch (of_get_phy_mode(np)) {
+ case PHY_INTERFACE_MODE_TRGMII:
+ mac->trgmii = true;
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
- ge_mode = 0;
+ mac->ge_mode = 0;
break;
case PHY_INTERFACE_MODE_MII:
- ge_mode = 1;
+ mac->ge_mode = 1;
break;
case PHY_INTERFACE_MODE_REVMII:
- ge_mode = 2;
+ mac->ge_mode = 2;
break;
case PHY_INTERFACE_MODE_RMII:
if (!mac->id)
goto err_phy;
- ge_mode = 3;
+ mac->ge_mode = 3;
break;
default:
goto err_phy;
@@ -260,23 +292,26 @@ static int mtk_phy_connect(struct mtk_mac *mac)
/* put the gmac into the right mode */
regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
- val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
+ val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id);
regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
- mtk_phy_connect_node(eth, mac, np);
- mac->phy_dev->autoneg = AUTONEG_ENABLE;
- mac->phy_dev->speed = 0;
- mac->phy_dev->duplex = 0;
+ /* couple phydev to net_device */
+ if (mtk_phy_connect_node(eth, mac, np))
+ goto err_phy;
+
+ dev->phydev->autoneg = AUTONEG_ENABLE;
+ dev->phydev->speed = 0;
+ dev->phydev->duplex = 0;
if (of_phy_is_fixed_link(mac->of_node))
- mac->phy_dev->supported |=
+ dev->phydev->supported |=
SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ dev->phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause;
- mac->phy_dev->advertising = mac->phy_dev->supported |
+ dev->phydev->advertising = dev->phydev->supported |
ADVERTISED_Autoneg;
- phy_start_aneg(mac->phy_dev);
+ phy_start_aneg(dev->phydev);
of_node_put(np);
@@ -284,14 +319,14 @@ static int mtk_phy_connect(struct mtk_mac *mac)
err_phy:
of_node_put(np);
- dev_err(eth->dev, "invalid phy_mode\n");
+ dev_err(eth->dev, "%s: invalid phy\n", __func__);
return -EINVAL;
}
static int mtk_mdio_init(struct mtk_eth *eth)
{
struct device_node *mii_np;
- int err;
+ int ret;
mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
if (!mii_np) {
@@ -300,13 +335,13 @@ static int mtk_mdio_init(struct mtk_eth *eth)
}
if (!of_device_is_available(mii_np)) {
- err = 0;
+ ret = -ENODEV;
goto err_put_node;
}
- eth->mii_bus = mdiobus_alloc();
+ eth->mii_bus = devm_mdiobus_alloc(eth->dev);
if (!eth->mii_bus) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto err_put_node;
}
@@ -317,19 +352,11 @@ static int mtk_mdio_init(struct mtk_eth *eth)
eth->mii_bus->parent = eth->dev;
snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
- err = of_mdiobus_register(eth->mii_bus, mii_np);
- if (err)
- goto err_free_bus;
-
- return 0;
-
-err_free_bus:
- mdiobus_free(eth->mii_bus);
+ ret = of_mdiobus_register(eth->mii_bus, mii_np);
err_put_node:
of_node_put(mii_np);
- eth->mii_bus = NULL;
- return err;
+ return ret;
}
static void mtk_mdio_cleanup(struct mtk_eth *eth)
@@ -338,8 +365,6 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
return;
mdiobus_unregister(eth->mii_bus);
- of_node_put(eth->mii_bus->dev.of_node);
- mdiobus_free(eth->mii_bus);
}
static inline void mtk_irq_disable(struct mtk_eth *eth,
@@ -375,6 +400,9 @@ static int mtk_set_mac_address(struct net_device *dev, void *p)
if (ret)
return ret;
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
+
spin_lock_bh(&mac->hw->page_lock);
mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
MTK_GDMA_MAC_ADRH(mac->id));
@@ -589,14 +617,15 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
dma_addr_t mapped_addr;
unsigned int nr_frags;
int i, n_desc = 1;
- u32 txd4 = 0;
+ u32 txd4 = 0, fport;
itxd = ring->next_free;
if (itxd == ring->last_free)
return -ENOMEM;
/* set the forward port */
- txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
+ fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
+ txd4 |= fport;
tx_buf = mtk_desc_to_tx_buf(ring, itxd);
memset(tx_buf, 0, sizeof(*tx_buf));
@@ -654,7 +683,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
TX_DMA_PLEN0(frag_map_size) |
last_frag * TX_DMA_LS0));
- WRITE_ONCE(txd->txd4, 0);
+ WRITE_ONCE(txd->txd4, fport);
tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
tx_buf = mtk_desc_to_tx_buf(ring, txd);
@@ -774,6 +803,9 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
spin_lock(&eth->page_lock);
+ if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
+ goto drop;
+
tx_num = mtk_cal_txd_req(skb);
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
mtk_stop_queue(eth);
@@ -815,11 +847,51 @@ drop:
return NETDEV_TX_OK;
}
+static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
+{
+ int i;
+ struct mtk_rx_ring *ring;
+ int idx;
+
+ if (!eth->hwlro)
+ return &eth->rx_ring[0];
+
+ for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
+ ring = &eth->rx_ring[i];
+ idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
+ if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
+ ring->calc_idx_update = true;
+ return ring;
+ }
+ }
+
+ return NULL;
+}
+
+static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
+{
+ struct mtk_rx_ring *ring;
+ int i;
+
+ if (!eth->hwlro) {
+ ring = &eth->rx_ring[0];
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+ } else {
+ for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
+ ring = &eth->rx_ring[i];
+ if (ring->calc_idx_update) {
+ ring->calc_idx_update = false;
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+ }
+ }
+ }
+}
+
static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct mtk_eth *eth)
{
- struct mtk_rx_ring *ring = &eth->rx_ring;
- int idx = ring->calc_idx;
+ struct mtk_rx_ring *ring;
+ int idx;
struct sk_buff *skb;
u8 *data, *new_data;
struct mtk_rx_dma *rxd, trxd;
@@ -831,7 +903,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
dma_addr_t dma_addr;
int mac = 0;
- idx = NEXT_RX_DESP_IDX(idx);
+ ring = mtk_get_rx_ring(eth);
+ if (unlikely(!ring))
+ goto rx_done;
+
+ idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
rxd = &ring->dma[idx];
data = ring->data[idx];
@@ -846,6 +922,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
netdev = eth->netdev[mac];
+ if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
+ goto release_desc;
+
/* alloc new buffer */
new_data = napi_alloc_frag(ring->frag_size);
if (unlikely(!new_data)) {
@@ -865,7 +944,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
/* receive data */
skb = build_skb(data, ring->frag_size);
if (unlikely(!skb)) {
- put_page(virt_to_head_page(new_data));
+ skb_free_frag(new_data);
netdev->stats.rx_dropped++;
goto release_desc;
}
@@ -899,12 +978,13 @@ release_desc:
done++;
}
+rx_done:
if (done) {
/* make sure that all changes to the dma ring are flushed before
* we continue
*/
wmb();
- mtk_w32(eth, ring->calc_idx, MTK_PRX_CRX_IDX0);
+ mtk_update_rx_cpu_idx(eth);
}
return done;
@@ -1127,32 +1207,41 @@ static void mtk_tx_clean(struct mtk_eth *eth)
}
}
-static int mtk_rx_alloc(struct mtk_eth *eth)
+static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
{
- struct mtk_rx_ring *ring = &eth->rx_ring;
+ struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
+ int rx_data_len, rx_dma_size;
int i;
- ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
+ if (rx_flag == MTK_RX_FLAGS_HWLRO) {
+ rx_data_len = MTK_MAX_LRO_RX_LENGTH;
+ rx_dma_size = MTK_HW_LRO_DMA_SIZE;
+ } else {
+ rx_data_len = ETH_DATA_LEN;
+ rx_dma_size = MTK_DMA_SIZE;
+ }
+
+ ring->frag_size = mtk_max_frag_size(rx_data_len);
ring->buf_size = mtk_max_buf_size(ring->frag_size);
- ring->data = kcalloc(MTK_DMA_SIZE, sizeof(*ring->data),
+ ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
GFP_KERNEL);
if (!ring->data)
return -ENOMEM;
- for (i = 0; i < MTK_DMA_SIZE; i++) {
+ for (i = 0; i < rx_dma_size; i++) {
ring->data[i] = netdev_alloc_frag(ring->frag_size);
if (!ring->data[i])
return -ENOMEM;
}
ring->dma = dma_alloc_coherent(eth->dev,
- MTK_DMA_SIZE * sizeof(*ring->dma),
+ rx_dma_size * sizeof(*ring->dma),
&ring->phys,
GFP_ATOMIC | __GFP_ZERO);
if (!ring->dma)
return -ENOMEM;
- for (i = 0; i < MTK_DMA_SIZE; i++) {
+ for (i = 0; i < rx_dma_size; i++) {
dma_addr_t dma_addr = dma_map_single(eth->dev,
ring->data[i] + NET_SKB_PAD,
ring->buf_size,
@@ -1163,27 +1252,30 @@ static int mtk_rx_alloc(struct mtk_eth *eth)
ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
}
- ring->calc_idx = MTK_DMA_SIZE - 1;
+ ring->dma_size = rx_dma_size;
+ ring->calc_idx_update = false;
+ ring->calc_idx = rx_dma_size - 1;
+ ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
/* make sure that all changes to the dma ring are flushed before we
* continue
*/
wmb();
- mtk_w32(eth, eth->rx_ring.phys, MTK_PRX_BASE_PTR0);
- mtk_w32(eth, MTK_DMA_SIZE, MTK_PRX_MAX_CNT0);
- mtk_w32(eth, eth->rx_ring.calc_idx, MTK_PRX_CRX_IDX0);
- mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_PDMA_RST_IDX);
+ mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
+ mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
return 0;
}
-static void mtk_rx_clean(struct mtk_eth *eth)
+static void mtk_rx_clean(struct mtk_eth *eth, int ring_no)
{
- struct mtk_rx_ring *ring = &eth->rx_ring;
+ struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
int i;
if (ring->data && ring->dma) {
- for (i = 0; i < MTK_DMA_SIZE; i++) {
+ for (i = 0; i < ring->dma_size; i++) {
if (!ring->data[i])
continue;
if (!ring->dma[i].rxd1)
@@ -1200,13 +1292,274 @@ static void mtk_rx_clean(struct mtk_eth *eth)
if (ring->dma) {
dma_free_coherent(eth->dev,
- MTK_DMA_SIZE * sizeof(*ring->dma),
+ ring->dma_size * sizeof(*ring->dma),
ring->dma,
ring->phys);
ring->dma = NULL;
}
}
+static int mtk_hwlro_rx_init(struct mtk_eth *eth)
+{
+ int i;
+ u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
+ u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
+
+ /* set LRO rings to auto-learn modes */
+ ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
+
+ /* validate LRO ring */
+ ring_ctrl_dw2 |= MTK_RING_VLD;
+
+ /* set AGE timer (unit: 20us) */
+ ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
+ ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
+
+ /* set max AGG timer (unit: 20us) */
+ ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
+
+ /* set max LRO AGG count */
+ ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
+ ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
+
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
+ mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
+ mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
+ mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
+ }
+
+ /* IPv4 checksum update enable */
+ lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
+
+ /* switch priority comparison to packet count mode */
+ lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
+
+ /* bandwidth threshold setting */
+ mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
+
+ /* auto-learn score delta setting */
+ mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
+
+ /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
+ mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
+ MTK_PDMA_LRO_ALT_REFRESH_TIMER);
+
+ /* set HW LRO mode & the max aggregation count for rx packets */
+ lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
+
+ /* the minimal remaining room of SDL0 in RXD for lro aggregation */
+ lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
+
+ /* enable HW LRO */
+ lro_ctrl_dw0 |= MTK_LRO_EN;
+
+ mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
+ mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
+
+ return 0;
+}
+
+static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
+{
+ int i;
+ u32 val;
+
+ /* relinquish lro rings, flush aggregated packets */
+ mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
+
+ /* wait for relinquishments done */
+ for (i = 0; i < 10; i++) {
+ val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
+ if (val & MTK_LRO_RING_RELINQUISH_DONE) {
+ msleep(20);
+ continue;
+ }
+ }
+
+ /* invalidate lro rings */
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
+ mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
+
+ /* disable HW LRO */
+ mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
+}
+
+static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
+{
+ u32 reg_val;
+
+ reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
+
+ /* invalidate the IP setting */
+ mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+
+ mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
+
+ /* validate the IP setting */
+ mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+}
+
+static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
+{
+ u32 reg_val;
+
+ reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
+
+ /* invalidate the IP setting */
+ mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+
+ mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
+}
+
+static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
+{
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ if (mac->hwlro_ip[i])
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static int mtk_hwlro_add_ipaddr(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int hwlro_idx;
+
+ if ((fsp->flow_type != TCP_V4_FLOW) ||
+ (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
+ (fsp->location > 1))
+ return -EINVAL;
+
+ mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
+ hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
+
+ mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+ mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
+
+ return 0;
+}
+
+static int mtk_hwlro_del_ipaddr(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int hwlro_idx;
+
+ if (fsp->location > 1)
+ return -EINVAL;
+
+ mac->hwlro_ip[fsp->location] = 0;
+ hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
+
+ mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+ mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
+
+ return 0;
+}
+
+static void mtk_hwlro_netdev_disable(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int i, hwlro_idx;
+
+ for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ mac->hwlro_ip[i] = 0;
+ hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
+
+ mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
+ }
+
+ mac->hwlro_ip_cnt = 0;
+}
+
+static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ /* only tcp dst ipv4 is meaningful, others are meaningless */
+ fsp->flow_type = TCP_V4_FLOW;
+ fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
+ fsp->m_u.tcp_ip4_spec.ip4dst = 0;
+
+ fsp->h_u.tcp_ip4_spec.ip4src = 0;
+ fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
+ fsp->h_u.tcp_ip4_spec.psrc = 0;
+ fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
+ fsp->h_u.tcp_ip4_spec.pdst = 0;
+ fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
+ fsp->h_u.tcp_ip4_spec.tos = 0;
+ fsp->m_u.tcp_ip4_spec.tos = 0xff;
+
+ return 0;
+}
+
+static int mtk_hwlro_get_fdir_all(struct net_device *dev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ if (mac->hwlro_ip[i]) {
+ rule_locs[cnt] = i;
+ cnt++;
+ }
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static netdev_features_t mtk_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ if (!(features & NETIF_F_LRO)) {
+ struct mtk_mac *mac = netdev_priv(dev);
+ int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+ if (ip_cnt) {
+ netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
+
+ features |= NETIF_F_LRO;
+ }
+ }
+
+ return features;
+}
+
+static int mtk_set_features(struct net_device *dev, netdev_features_t features)
+{
+ int err = 0;
+
+ if (!((dev->features ^ features) & NETIF_F_LRO))
+ return 0;
+
+ if (!(features & NETIF_F_LRO))
+ mtk_hwlro_netdev_disable(dev);
+
+ return err;
+}
+
/* wait for DMA to finish whatever it is doing before we start using it again */
static int mtk_dma_busy_wait(struct mtk_eth *eth)
{
@@ -1227,6 +1580,7 @@ static int mtk_dma_busy_wait(struct mtk_eth *eth)
static int mtk_dma_init(struct mtk_eth *eth)
{
int err;
+ u32 i;
if (mtk_dma_busy_wait(eth))
return -EBUSY;
@@ -1242,10 +1596,21 @@ static int mtk_dma_init(struct mtk_eth *eth)
if (err)
return err;
- err = mtk_rx_alloc(eth);
+ err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
if (err)
return err;
+ if (eth->hwlro) {
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
+ err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
+ if (err)
+ return err;
+ }
+ err = mtk_hwlro_rx_init(eth);
+ if (err)
+ return err;
+ }
+
/* Enable random early drop and set drop threshold automatically */
mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
MTK_QDMA_FC_THRES);
@@ -1270,7 +1635,14 @@ static void mtk_dma_free(struct mtk_eth *eth)
eth->phy_scratch_ring = 0;
}
mtk_tx_clean(eth);
- mtk_rx_clean(eth);
+ mtk_rx_clean(eth, 0);
+
+ if (eth->hwlro) {
+ mtk_hwlro_rx_uninit(eth);
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
+ mtk_rx_clean(eth, i);
+ }
+
kfree(eth->scratch_head);
}
@@ -1365,7 +1737,7 @@ static int mtk_open(struct net_device *dev)
}
atomic_inc(&eth->dma_refcnt);
- phy_start(mac->phy_dev);
+ phy_start(dev->phydev);
netif_start_queue(dev);
return 0;
@@ -1400,7 +1772,7 @@ static int mtk_stop(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
netif_tx_disable(dev);
- phy_stop(mac->phy_dev);
+ phy_stop(dev->phydev);
/* only shutdown DMA if this is the last user */
if (!atomic_dec_and_test(&eth->dma_refcnt))
@@ -1418,15 +1790,44 @@ static int mtk_stop(struct net_device *dev)
return 0;
}
-static int __init mtk_hw_init(struct mtk_eth *eth)
+static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
{
- int err, i;
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
+ reset_bits,
+ reset_bits);
+
+ usleep_range(1000, 1100);
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
+ reset_bits,
+ ~reset_bits);
+ mdelay(10);
+}
+
+static int mtk_hw_init(struct mtk_eth *eth)
+{
+ int i, val;
- /* reset the frame engine */
- reset_control_assert(eth->rstc);
- usleep_range(10, 20);
- reset_control_deassert(eth->rstc);
- usleep_range(10, 20);
+ if (test_and_set_bit(MTK_HW_INIT, &eth->state))
+ return 0;
+
+ pm_runtime_enable(eth->dev);
+ pm_runtime_get_sync(eth->dev);
+
+ clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]);
+ clk_prepare_enable(eth->clks[MTK_CLK_ESW]);
+ clk_prepare_enable(eth->clks[MTK_CLK_GP1]);
+ clk_prepare_enable(eth->clks[MTK_CLK_GP2]);
+ ethsys_reset(eth, RSTCTRL_FE);
+ ethsys_reset(eth, RSTCTRL_PPE);
+
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->mac[i])
+ continue;
+ val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id);
+ val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id);
+ }
+ regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
/* Set GE2 driving and slew rate */
regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
@@ -1446,19 +1847,6 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
/* Enable RX VLan Offloading */
mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
- err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
- dev_name(eth->dev), eth);
- if (err)
- return err;
- err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
- dev_name(eth->dev), eth);
- if (err)
- return err;
-
- err = mtk_mdio_init(eth);
- if (err)
- return err;
-
/* disable delay and normal interrupt */
mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
mtk_w32(eth, 0, MTK_PDMA_DELAY_INT);
@@ -1490,6 +1878,22 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
return 0;
}
+static int mtk_hw_deinit(struct mtk_eth *eth)
+{
+ if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
+ return 0;
+
+ clk_disable_unprepare(eth->clks[MTK_CLK_GP2]);
+ clk_disable_unprepare(eth->clks[MTK_CLK_GP1]);
+ clk_disable_unprepare(eth->clks[MTK_CLK_ESW]);
+ clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]);
+
+ pm_runtime_put_sync(eth->dev);
+ pm_runtime_disable(eth->dev);
+
+ return 0;
+}
+
static int __init mtk_init(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
@@ -1508,7 +1912,7 @@ static int __init mtk_init(struct net_device *dev)
dev->addr_assign_type = NET_ADDR_RANDOM;
}
- return mtk_phy_connect(mac);
+ return mtk_phy_connect(dev);
}
static void mtk_uninit(struct net_device *dev)
@@ -1516,23 +1920,18 @@ static void mtk_uninit(struct net_device *dev)
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
- phy_disconnect(mac->phy_dev);
- mtk_mdio_cleanup(eth);
+ phy_disconnect(dev->phydev);
mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
- free_irq(eth->irq[1], dev);
- free_irq(eth->irq[2], dev);
}
static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct mtk_mac *mac = netdev_priv(dev);
-
switch (cmd) {
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
- return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
default:
break;
}
@@ -1548,6 +1947,12 @@ static void mtk_pending_work(struct work_struct *work)
rtnl_lock();
+ dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
+
+ while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
+ cpu_relax();
+
+ dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
/* stop all devices to make sure that dma is properly shut down */
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i])
@@ -1555,6 +1960,27 @@ static void mtk_pending_work(struct work_struct *work)
mtk_stop(eth->netdev[i]);
__set_bit(i, &restart);
}
+ dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
+
+ /* restart underlying hardware such as power, clock, pin mux
+ * and the connected phy
+ */
+ mtk_hw_deinit(eth);
+
+ if (eth->dev->pins)
+ pinctrl_select_state(eth->dev->pins->p,
+ eth->dev->pins->default_state);
+ mtk_hw_init(eth);
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->mac[i] ||
+ of_phy_is_fixed_link(eth->mac[i]->of_node))
+ continue;
+ err = phy_init_hw(eth->netdev[i]->phydev);
+ if (err)
+ dev_err(eth->dev, "%s: PHY init failed.\n",
+ eth->netdev[i]->name);
+ }
/* restart DMA and enable IRQs */
for (i = 0; i < MTK_MAC_COUNT; i++) {
@@ -1567,51 +1993,69 @@ static void mtk_pending_work(struct work_struct *work)
dev_close(eth->netdev[i]);
}
}
+
+ dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
+
+ clear_bit_unlock(MTK_RESETTING, &eth->state);
+
rtnl_unlock();
}
-static int mtk_cleanup(struct mtk_eth *eth)
+static int mtk_free_dev(struct mtk_eth *eth)
{
int i;
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i])
continue;
+ free_netdev(eth->netdev[i]);
+ }
+
+ return 0;
+}
+
+static int mtk_unreg_dev(struct mtk_eth *eth)
+{
+ int i;
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
unregister_netdev(eth->netdev[i]);
- free_netdev(eth->netdev[i]);
}
+
+ return 0;
+}
+
+static int mtk_cleanup(struct mtk_eth *eth)
+{
+ mtk_unreg_dev(eth);
+ mtk_free_dev(eth);
cancel_work_sync(&eth->pending_work);
return 0;
}
-static int mtk_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+int mtk_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
{
- struct mtk_mac *mac = netdev_priv(dev);
- int err;
+ struct mtk_mac *mac = netdev_priv(ndev);
- err = phy_read_status(mac->phy_dev);
- if (err)
- return -ENODEV;
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
- return phy_ethtool_gset(mac->phy_dev, cmd);
+ return phy_ethtool_ksettings_get(ndev->phydev, cmd);
}
-static int mtk_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+int mtk_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
{
- struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_mac *mac = netdev_priv(ndev);
- if (cmd->phy_address != mac->phy_dev->mdio.addr) {
- mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
- cmd->phy_address);
- if (!mac->phy_dev)
- return -ENODEV;
- }
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
- return phy_ethtool_sset(mac->phy_dev, cmd);
+ return phy_ethtool_ksettings_set(ndev->phydev, cmd);
}
static void mtk_get_drvinfo(struct net_device *dev,
@@ -1642,7 +2086,10 @@ static int mtk_nway_reset(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
- return genphy_restart_aneg(mac->phy_dev);
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
+
+ return genphy_restart_aneg(dev->phydev);
}
static u32 mtk_get_link(struct net_device *dev)
@@ -1650,11 +2097,14 @@ static u32 mtk_get_link(struct net_device *dev)
struct mtk_mac *mac = netdev_priv(dev);
int err;
- err = genphy_update_link(mac->phy_dev);
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
+
+ err = genphy_update_link(dev->phydev);
if (err)
return ethtool_op_get_link(dev);
- return mac->phy_dev->link;
+ return dev->phydev->link;
}
static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -1690,6 +2140,9 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
unsigned int start;
int i;
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return;
+
if (netif_running(dev) && netif_device_present(dev)) {
if (spin_trylock(&hwstats->stats_lock)) {
mtk_stats_update_mac(mac);
@@ -1697,8 +2150,9 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
}
}
+ data_src = (u64 *)hwstats;
+
do {
- data_src = (u64 *)hwstats;
data_dst = data;
start = u64_stats_fetch_begin_irq(&hwstats->syncp);
@@ -1707,9 +2161,65 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
} while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
}
+static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ if (dev->features & NETIF_F_LRO) {
+ cmd->data = MTK_MAX_RX_RING_NUM;
+ ret = 0;
+ }
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ if (dev->features & NETIF_F_LRO) {
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ cmd->rule_cnt = mac->hwlro_ip_cnt;
+ ret = 0;
+ }
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ if (dev->features & NETIF_F_LRO)
+ ret = mtk_hwlro_get_fdir_entry(dev, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ if (dev->features & NETIF_F_LRO)
+ ret = mtk_hwlro_get_fdir_all(dev, cmd,
+ rule_locs);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ if (dev->features & NETIF_F_LRO)
+ ret = mtk_hwlro_add_ipaddr(dev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ if (dev->features & NETIF_F_LRO)
+ ret = mtk_hwlro_del_ipaddr(dev, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
static const struct ethtool_ops mtk_ethtool_ops = {
- .get_settings = mtk_get_settings,
- .set_settings = mtk_set_settings,
+ .get_link_ksettings = mtk_get_link_ksettings,
+ .set_link_ksettings = mtk_set_link_ksettings,
.get_drvinfo = mtk_get_drvinfo,
.get_msglevel = mtk_get_msglevel,
.set_msglevel = mtk_set_msglevel,
@@ -1718,6 +2228,8 @@ static const struct ethtool_ops mtk_ethtool_ops = {
.get_strings = mtk_get_strings,
.get_sset_count = mtk_get_sset_count,
.get_ethtool_stats = mtk_get_ethtool_stats,
+ .get_rxnfc = mtk_get_rxnfc,
+ .set_rxnfc = mtk_set_rxnfc,
};
static const struct net_device_ops mtk_netdev_ops = {
@@ -1732,6 +2244,8 @@ static const struct net_device_ops mtk_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
.ndo_tx_timeout = mtk_tx_timeout,
.ndo_get_stats64 = mtk_get_stats64,
+ .ndo_fix_features = mtk_fix_features,
+ .ndo_set_features = mtk_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mtk_poll_controller,
#endif
@@ -1770,6 +2284,9 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->hw = eth;
mac->of_node = np;
+ memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
+ mac->hwlro_ip_cnt = 0;
+
mac->hw_stats = devm_kzalloc(eth->dev,
sizeof(*mac->hw_stats),
GFP_KERNEL);
@@ -1786,21 +2303,17 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->watchdog_timeo = 5 * HZ;
eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
eth->netdev[id]->base_addr = (unsigned long)eth->base;
+
+ eth->netdev[id]->hw_features = MTK_HW_FEATURES;
+ if (eth->hwlro)
+ eth->netdev[id]->hw_features |= NETIF_F_LRO;
+
eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
eth->netdev[id]->features |= MTK_HW_FEATURES;
eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
- err = register_netdev(eth->netdev[id]);
- if (err) {
- dev_err(eth->dev, "error bringing up device\n");
- goto free_netdev;
- }
eth->netdev[id]->irq = eth->irq[0];
- netif_info(eth, probe, eth->netdev[id],
- "mediatek frame engine at 0x%08lx, irq %d\n",
- eth->netdev[id]->base_addr, eth->irq[0]);
-
return 0;
free_netdev:
@@ -1825,6 +2338,7 @@ static int mtk_probe(struct platform_device *pdev)
if (!eth)
return -ENOMEM;
+ eth->dev = &pdev->dev;
eth->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(eth->base))
return PTR_ERR(eth->base);
@@ -1846,11 +2360,7 @@ static int mtk_probe(struct platform_device *pdev)
return PTR_ERR(eth->pctl);
}
- eth->rstc = devm_reset_control_get(&pdev->dev, "eth");
- if (IS_ERR(eth->rstc)) {
- dev_err(&pdev->dev, "no eth reset found\n");
- return PTR_ERR(eth->rstc);
- }
+ eth->hwlro = of_property_read_bool(pdev->dev.of_node, "mediatek,hwlro");
for (i = 0; i < 3; i++) {
eth->irq[i] = platform_get_irq(pdev, i);
@@ -1859,21 +2369,16 @@ static int mtk_probe(struct platform_device *pdev)
return -ENXIO;
}
}
+ for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
+ eth->clks[i] = devm_clk_get(eth->dev,
+ mtk_clks_source_name[i]);
+ if (IS_ERR(eth->clks[i])) {
+ if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ return -ENODEV;
+ }
+ }
- eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
- eth->clk_esw = devm_clk_get(&pdev->dev, "esw");
- eth->clk_gp1 = devm_clk_get(&pdev->dev, "gp1");
- eth->clk_gp2 = devm_clk_get(&pdev->dev, "gp2");
- if (IS_ERR(eth->clk_esw) || IS_ERR(eth->clk_gp1) ||
- IS_ERR(eth->clk_gp2) || IS_ERR(eth->clk_ethif))
- return -ENODEV;
-
- clk_prepare_enable(eth->clk_ethif);
- clk_prepare_enable(eth->clk_esw);
- clk_prepare_enable(eth->clk_gp1);
- clk_prepare_enable(eth->clk_gp2);
-
- eth->dev = &pdev->dev;
eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
INIT_WORK(&eth->pending_work, mtk_pending_work);
@@ -1891,7 +2396,35 @@ static int mtk_probe(struct platform_device *pdev)
err = mtk_add_mac(eth, mac_np);
if (err)
- goto err_free_dev;
+ goto err_deinit_hw;
+ }
+
+ err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
+ dev_name(eth->dev), eth);
+ if (err)
+ goto err_free_dev;
+
+ err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
+ dev_name(eth->dev), eth);
+ if (err)
+ goto err_free_dev;
+
+ err = mtk_mdio_init(eth);
+ if (err)
+ goto err_free_dev;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i])
+ continue;
+
+ err = register_netdev(eth->netdev[i]);
+ if (err) {
+ dev_err(eth->dev, "error bringing up device\n");
+ goto err_deinit_mdio;
+ } else
+ netif_info(eth, probe, eth->netdev[i],
+ "mediatek frame engine at 0x%08lx, irq %d\n",
+ eth->netdev[i]->base_addr, eth->irq[0]);
}
/* we run 2 devices on the same DMA ring so we need a dummy device
@@ -1907,23 +2440,34 @@ static int mtk_probe(struct platform_device *pdev)
return 0;
+err_deinit_mdio:
+ mtk_mdio_cleanup(eth);
err_free_dev:
- mtk_cleanup(eth);
+ mtk_free_dev(eth);
+err_deinit_hw:
+ mtk_hw_deinit(eth);
+
return err;
}
static int mtk_remove(struct platform_device *pdev)
{
struct mtk_eth *eth = platform_get_drvdata(pdev);
+ int i;
+
+ /* stop all devices to make sure that dma is properly shut down */
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ mtk_stop(eth->netdev[i]);
+ }
- clk_disable_unprepare(eth->clk_ethif);
- clk_disable_unprepare(eth->clk_esw);
- clk_disable_unprepare(eth->clk_gp1);
- clk_disable_unprepare(eth->clk_gp2);
+ mtk_hw_deinit(eth);
netif_napi_del(&eth->tx_napi);
netif_napi_del(&eth->rx_napi);
mtk_cleanup(eth);
+ mtk_mdio_cleanup(eth);
return 0;
}
@@ -1932,6 +2476,7 @@ const struct of_device_id of_mtk_match[] = {
{ .compatible = "mediatek,mt7623-eth" },
{},
};
+MODULE_DEVICE_TABLE(of, of_mtk_match);
static struct platform_driver mtk_driver = {
.probe = mtk_probe,
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 7c1f3f2e11d4..7e194f795c9a 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -39,7 +39,21 @@
NETIF_F_SG | NETIF_F_TSO | \
NETIF_F_TSO6 | \
NETIF_F_IPV6_CSUM)
-#define NEXT_RX_DESP_IDX(X) (((X) + 1) & (MTK_DMA_SIZE - 1))
+#define NEXT_RX_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
+
+#define MTK_MAX_RX_RING_NUM 4
+#define MTK_HW_LRO_DMA_SIZE 8
+
+#define MTK_MAX_LRO_RX_LENGTH (4096 * 3)
+#define MTK_MAX_LRO_IP_CNT 2
+#define MTK_HW_LRO_TIMER_UNIT 1 /* 20 us */
+#define MTK_HW_LRO_REFRESH_TIME 50000 /* 1 sec. */
+#define MTK_HW_LRO_AGG_TIME 10 /* 200us */
+#define MTK_HW_LRO_AGE_TIME 50 /* 1ms */
+#define MTK_HW_LRO_MAX_AGG_CNT 64
+#define MTK_HW_LRO_BW_THRE 3000
+#define MTK_HW_LRO_REPLACE_DELTA 1000
+#define MTK_HW_LRO_SDL_REMAIN_ROOM 1522
/* Frame Engine Global Reset Register */
#define MTK_RST_GL 0x04
@@ -50,6 +64,9 @@
#define MTK_GDM1_AF BIT(28)
#define MTK_GDM2_AF BIT(29)
+/* PDMA HW LRO Alter Flow Timer Register */
+#define MTK_PDMA_LRO_ALT_REFRESH_TIMER 0x1c
+
/* Frame Engine Interrupt Grouping Register */
#define MTK_FE_INT_GRP 0x20
@@ -70,12 +87,29 @@
/* PDMA RX Base Pointer Register */
#define MTK_PRX_BASE_PTR0 0x900
+#define MTK_PRX_BASE_PTR_CFG(x) (MTK_PRX_BASE_PTR0 + (x * 0x10))
/* PDMA RX Maximum Count Register */
#define MTK_PRX_MAX_CNT0 0x904
+#define MTK_PRX_MAX_CNT_CFG(x) (MTK_PRX_MAX_CNT0 + (x * 0x10))
/* PDMA RX CPU Pointer Register */
#define MTK_PRX_CRX_IDX0 0x908
+#define MTK_PRX_CRX_IDX_CFG(x) (MTK_PRX_CRX_IDX0 + (x * 0x10))
+
+/* PDMA HW LRO Control Registers */
+#define MTK_PDMA_LRO_CTRL_DW0 0x980
+#define MTK_LRO_EN BIT(0)
+#define MTK_L3_CKS_UPD_EN BIT(7)
+#define MTK_LRO_ALT_PKT_CNT_MODE BIT(21)
+#define MTK_LRO_RING_RELINQUISH_REQ (0x3 << 26)
+#define MTK_LRO_RING_RELINQUISH_DONE (0x3 << 29)
+
+#define MTK_PDMA_LRO_CTRL_DW1 0x984
+#define MTK_PDMA_LRO_CTRL_DW2 0x988
+#define MTK_PDMA_LRO_CTRL_DW3 0x98c
+#define MTK_ADMA_MODE BIT(15)
+#define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16)
/* PDMA Global Configuration Register */
#define MTK_PDMA_GLO_CFG 0xa04
@@ -84,6 +118,7 @@
/* PDMA Reset Index Register */
#define MTK_PDMA_RST_IDX 0xa08
#define MTK_PST_DRX_IDX0 BIT(16)
+#define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x))
/* PDMA Delay Interrupt Register */
#define MTK_PDMA_DELAY_INT 0xa0c
@@ -94,10 +129,33 @@
/* PDMA Interrupt Mask Register */
#define MTK_PDMA_INT_MASK 0xa28
+/* PDMA HW LRO Alter Flow Delta Register */
+#define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c
+
/* PDMA Interrupt grouping registers */
#define MTK_PDMA_INT_GRP1 0xa50
#define MTK_PDMA_INT_GRP2 0xa54
+/* PDMA HW LRO IP Setting Registers */
+#define MTK_LRO_RX_RING0_DIP_DW0 0xb04
+#define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40))
+#define MTK_RING_MYIP_VLD BIT(9)
+
+/* PDMA HW LRO Ring Control Registers */
+#define MTK_LRO_RX_RING0_CTRL_DW1 0xb28
+#define MTK_LRO_RX_RING0_CTRL_DW2 0xb2c
+#define MTK_LRO_RX_RING0_CTRL_DW3 0xb30
+#define MTK_LRO_CTRL_DW1_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW1 + (x * 0x40))
+#define MTK_LRO_CTRL_DW2_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW2 + (x * 0x40))
+#define MTK_LRO_CTRL_DW3_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW3 + (x * 0x40))
+#define MTK_RING_AGE_TIME_L ((MTK_HW_LRO_AGE_TIME & 0x3ff) << 22)
+#define MTK_RING_AGE_TIME_H ((MTK_HW_LRO_AGE_TIME >> 10) & 0x3f)
+#define MTK_RING_AUTO_LERAN_MODE (3 << 6)
+#define MTK_RING_VLD BIT(8)
+#define MTK_RING_MAX_AGG_TIME ((MTK_HW_LRO_AGG_TIME & 0xffff) << 10)
+#define MTK_RING_MAX_AGG_CNT_L ((MTK_HW_LRO_MAX_AGG_CNT & 0x3f) << 26)
+#define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
+
/* QDMA TX Queue Configuration Registers */
#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
#define QDMA_RES_THRES 4
@@ -132,7 +190,6 @@
/* QDMA Reset Index Register */
#define MTK_QDMA_RST_IDX 0x1A08
-#define MTK_PST_DRX_IDX0 BIT(16)
/* QDMA Delay Interrupt Register */
#define MTK_QDMA_DELAY_INT 0x1A0C
@@ -256,6 +313,30 @@
MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \
MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK)
+/* TRGMII RXC control register */
+#define TRGMII_RCK_CTRL 0x10300
+#define DQSI0(x) ((x << 0) & GENMASK(6, 0))
+#define DQSI1(x) ((x << 8) & GENMASK(14, 8))
+#define RXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16))
+#define RXC_DQSISEL BIT(30)
+#define RCK_CTRL_RGMII_1000 (RXC_DQSISEL | RXCTL_DMWTLAT(2) | DQSI1(16))
+#define RCK_CTRL_RGMII_10_100 RXCTL_DMWTLAT(2)
+
+/* TRGMII RXC control register */
+#define TRGMII_TCK_CTRL 0x10340
+#define TXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16))
+#define TXC_INV BIT(30)
+#define TCK_CTRL_RGMII_1000 TXCTL_DMWTLAT(2)
+#define TCK_CTRL_RGMII_10_100 (TXC_INV | TXCTL_DMWTLAT(2))
+
+/* TRGMII Interface mode register */
+#define INTF_MODE 0x10390
+#define TRGMII_INTF_DIS BIT(0)
+#define TRGMII_MODE BIT(1)
+#define TRGMII_CENTRAL_ALIGNED BIT(2)
+#define INTF_MODE_RGMII_1000 (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED)
+#define INTF_MODE_RGMII_10_100 0
+
/* GPIO port control registers for GMAC 2*/
#define GPIO_OD33_CTRL8 0x4c0
#define GPIO_BIAS_CTRL 0xed0
@@ -266,6 +347,15 @@
#define SYSCFG0_GE_MASK 0x3
#define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2)))
+/* ethernet subsystem clock register */
+#define ETHSYS_CLKCFG0 0x2c
+#define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11)
+
+/* ethernet reset control register */
+#define ETHSYS_RSTCTRL 0x34
+#define RSTCTRL_FE BIT(6)
+#define RSTCTRL_PPE BIT(31)
+
struct mtk_rx_dma {
unsigned int rxd1;
unsigned int rxd2;
@@ -319,6 +409,23 @@ enum mtk_tx_flags {
MTK_TX_FLAGS_PAGE0 = 0x02,
};
+/* This enum allows us to identify how the clock is defined on the array of the
+ * clock in the order
+ */
+enum mtk_clks_map {
+ MTK_CLK_ETHIF,
+ MTK_CLK_ESW,
+ MTK_CLK_GP1,
+ MTK_CLK_GP2,
+ MTK_CLK_TRGPLL,
+ MTK_CLK_MAX
+};
+
+enum mtk_dev_state {
+ MTK_HW_INIT,
+ MTK_RESETTING
+};
+
/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
* by the TX descriptor s
* @skb: The SKB pointer of the packet being sent
@@ -356,6 +463,12 @@ struct mtk_tx_ring {
atomic_t free_count;
};
+/* PDMA rx ring mode */
+enum mtk_rx_flags {
+ MTK_RX_FLAGS_NORMAL = 0,
+ MTK_RX_FLAGS_HWLRO,
+};
+
/* struct mtk_rx_ring - This struct holds info describing a RX ring
* @dma: The descriptor ring
* @data: The memory pointed at by the ring
@@ -370,7 +483,10 @@ struct mtk_rx_ring {
dma_addr_t phys;
u16 frag_size;
u16 buf_size;
+ u16 dma_size;
+ bool calc_idx_update;
u16 calc_idx;
+ u32 crx_idx_reg;
};
/* currently no SoC has more than 2 macs */
@@ -399,18 +515,15 @@ struct mtk_rx_ring {
* @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
* @phy_scratch_ring: physical address of scratch_ring
* @scratch_head: The scratch memory that scratch_ring points to.
- * @clk_ethif: The ethif clock
- * @clk_esw: The switch clock
- * @clk_gp1: The gmac1 clock
- * @clk_gp2: The gmac2 clock
+ * @clks: clock array for all clocks required
* @mii_bus: If there is a bus we need to create an instance for it
* @pending_work: The workqueue used to reset the dma ring
+ * @state Initialization and runtime state of the device.
*/
struct mtk_eth {
struct device *dev;
void __iomem *base;
- struct reset_control *rstc;
spinlock_t page_lock;
spinlock_t irq_lock;
struct net_device dummy_dev;
@@ -421,36 +534,41 @@ struct mtk_eth {
unsigned long sysclk;
struct regmap *ethsys;
struct regmap *pctl;
+ bool hwlro;
atomic_t dma_refcnt;
struct mtk_tx_ring tx_ring;
- struct mtk_rx_ring rx_ring;
+ struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM];
struct napi_struct tx_napi;
struct napi_struct rx_napi;
struct mtk_tx_dma *scratch_ring;
dma_addr_t phy_scratch_ring;
void *scratch_head;
- struct clk *clk_ethif;
- struct clk *clk_esw;
- struct clk *clk_gp1;
- struct clk *clk_gp2;
+ struct clk *clks[MTK_CLK_MAX];
+
struct mii_bus *mii_bus;
struct work_struct pending_work;
+ unsigned long state;
};
/* struct mtk_mac - the structure that holds the info about the MACs of the
* SoC
* @id: The number of the MAC
+ * @ge_mode: Interface mode kept for setup restoring
* @of_node: Our devicetree node
* @hw: Backpointer to our main datastruture
* @hw_stats: Packet statistics counter
- * @phy_dev: The attached PHY if available
+ * @trgmii Indicate if the MAC uses TRGMII connected to internal
+ switch
*/
struct mtk_mac {
int id;
+ int ge_mode;
struct device_node *of_node;
struct mtk_eth *hw;
struct mtk_hw_stats *hw_stats;
- struct phy_device *phy_dev;
+ __be32 hwlro_ip[MTK_MAX_LRO_IP_CNT];
+ int hwlro_ip_cnt;
+ bool trgmii;
};
/* the struct describing the SoC. these are declared in the soc_xyz.c files */
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index f04a423ff79d..b1cef7a0f7ca 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -785,17 +785,23 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
+ int ret;
+
if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
return mlx4_internal_err_ret_value(dev, op,
op_modifier);
+ down_read(&mlx4_priv(dev)->cmd.switch_sem);
if (mlx4_priv(dev)->cmd.use_events)
- return mlx4_cmd_wait(dev, in_param, out_param,
- out_is_imm, in_modifier,
- op_modifier, op, timeout);
+ ret = mlx4_cmd_wait(dev, in_param, out_param,
+ out_is_imm, in_modifier,
+ op_modifier, op, timeout);
else
- return mlx4_cmd_poll(dev, in_param, out_param,
- out_is_imm, in_modifier,
- op_modifier, op, timeout);
+ ret = mlx4_cmd_poll(dev, in_param, out_param,
+ out_is_imm, in_modifier,
+ op_modifier, op, timeout);
+
+ up_read(&mlx4_priv(dev)->cmd.switch_sem);
+ return ret;
}
return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
in_modifier, op_modifier, op, timeout);
@@ -1845,6 +1851,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
vp_oper->state.default_qos == vp_admin->default_qos &&
+ vp_oper->state.vlan_proto == vp_admin->vlan_proto &&
vp_oper->state.link_state == vp_admin->link_state &&
vp_oper->state.qos_vport == vp_admin->qos_vport)
return 0;
@@ -1903,6 +1910,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
vp_oper->state.default_vlan = vp_admin->default_vlan;
vp_oper->state.default_qos = vp_admin->default_qos;
+ vp_oper->state.vlan_proto = vp_admin->vlan_proto;
vp_oper->state.link_state = vp_admin->link_state;
vp_oper->state.qos_vport = vp_admin->qos_vport;
@@ -1916,6 +1924,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
work->qos_vport = vp_oper->state.qos_vport;
work->vlan_id = vp_oper->state.default_vlan;
work->vlan_ix = vp_oper->vlan_idx;
+ work->vlan_proto = vp_oper->state.vlan_proto;
work->priv = priv;
INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
queue_work(priv->mfunc.master.comm_wq, &work->work);
@@ -1986,6 +1995,8 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
int port, err;
struct mlx4_vport_state *vp_admin;
struct mlx4_vport_oper_state *vp_oper;
+ struct mlx4_slave_state *slave_state =
+ &priv->mfunc.master.slave_state[slave];
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
&priv->dev, slave);
int min_port = find_first_bit(actv_ports.ports,
@@ -2000,12 +2011,26 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
priv->mfunc.master.vf_admin[slave].enable_smi[port];
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
- vp_oper->state = *vp_admin;
+ if (vp_admin->vlan_proto != htons(ETH_P_8021AD) ||
+ slave_state->vst_qinq_supported) {
+ vp_oper->state.vlan_proto = vp_admin->vlan_proto;
+ vp_oper->state.default_vlan = vp_admin->default_vlan;
+ vp_oper->state.default_qos = vp_admin->default_qos;
+ }
+ vp_oper->state.link_state = vp_admin->link_state;
+ vp_oper->state.mac = vp_admin->mac;
+ vp_oper->state.spoofchk = vp_admin->spoofchk;
+ vp_oper->state.tx_rate = vp_admin->tx_rate;
+ vp_oper->state.qos_vport = vp_admin->qos_vport;
+ vp_oper->state.guid = vp_admin->guid;
+
if (MLX4_VGT != vp_admin->default_vlan) {
err = __mlx4_register_vlan(&priv->dev, port,
vp_admin->default_vlan, &(vp_oper->vlan_idx));
if (err) {
vp_oper->vlan_idx = NO_INDX;
+ vp_oper->state.default_vlan = MLX4_VGT;
+ vp_oper->state.vlan_proto = htons(ETH_P_8021Q);
mlx4_warn(&priv->dev,
"No vlan resources slave %d, port %d\n",
slave, port);
@@ -2086,6 +2111,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
mlx4_warn(dev, "Received reset from slave:%d\n", slave);
slave_state[slave].active = false;
slave_state[slave].old_vlan_api = false;
+ slave_state[slave].vst_qinq_supported = false;
mlx4_master_deactivate_admin_state(priv, slave);
for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
slave_state[slave].event_eq[i].eqn = -1;
@@ -2353,6 +2379,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
vf_oper = &priv->mfunc.master.vf_oper[i];
s_state = &priv->mfunc.master.slave_state[i];
s_state->last_cmd = MLX4_COMM_CMD_RESET;
+ s_state->vst_qinq_supported = false;
mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
s_state->event_eq[j].eqn = -1;
@@ -2382,6 +2409,8 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
admin_vport->qos_vport =
MLX4_VPP_DEFAULT_VPORT;
oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT;
+ admin_vport->vlan_proto = htons(ETH_P_8021Q);
+ oper_vport->vlan_proto = htons(ETH_P_8021Q);
vf_oper->vport[port].vlan_idx = NO_INDX;
vf_oper->vport[port].mac_idx = NO_INDX;
mlx4_set_random_admin_guid(dev, i, port);
@@ -2454,6 +2483,7 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
int flags = 0;
if (!priv->cmd.initialized) {
+ init_rwsem(&priv->cmd.switch_sem);
mutex_init(&priv->cmd.slave_cmd_mutex);
sema_init(&priv->cmd.poll_sem, 1);
priv->cmd.use_events = 0;
@@ -2583,6 +2613,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
if (!priv->cmd.context)
return -ENOMEM;
+ down_write(&priv->cmd.switch_sem);
for (i = 0; i < priv->cmd.max_cmds; ++i) {
priv->cmd.context[i].token = i;
priv->cmd.context[i].next = i + 1;
@@ -2606,6 +2637,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
down(&priv->cmd.poll_sem);
priv->cmd.use_events = 1;
+ up_write(&priv->cmd.switch_sem);
return err;
}
@@ -2618,6 +2650,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
+ down_write(&priv->cmd.switch_sem);
priv->cmd.use_events = 0;
for (i = 0; i < priv->cmd.max_cmds; ++i)
@@ -2626,6 +2659,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
kfree(priv->cmd.context);
up(&priv->cmd.poll_sem);
+ up_write(&priv->cmd.switch_sem);
}
struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
@@ -2937,10 +2971,13 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
-int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
+int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos,
+ __be16 proto)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_vport_state *vf_admin;
+ struct mlx4_slave_state *slave_state;
+ struct mlx4_vport_oper_state *vf_oper;
int slave;
if ((!mlx4_is_master(dev)) ||
@@ -2950,12 +2987,31 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
if ((vlan > 4095) || (qos > 7))
return -EINVAL;
+ if (proto == htons(ETH_P_8021AD) &&
+ !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP))
+ return -EPROTONOSUPPORT;
+
+ if (proto != htons(ETH_P_8021Q) &&
+ proto != htons(ETH_P_8021AD))
+ return -EINVAL;
+
+ if ((proto == htons(ETH_P_8021AD)) &&
+ ((vlan == 0) || (vlan == MLX4_VGT)))
+ return -EINVAL;
+
slave = mlx4_get_slave_indx(dev, vf);
if (slave < 0)
return -EINVAL;
+ slave_state = &priv->mfunc.master.slave_state[slave];
+ if ((proto == htons(ETH_P_8021AD)) && (slave_state->active) &&
+ (!slave_state->vst_qinq_supported)) {
+ mlx4_err(dev, "vf %d does not support VST QinQ mode\n", vf);
+ return -EPROTONOSUPPORT;
+ }
port = mlx4_slaves_closest_port(dev, slave, port);
vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
+ vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
if (!mlx4_valid_vf_state_change(dev, port, vf_admin, vlan, qos))
return -EPERM;
@@ -2965,6 +3021,7 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
else
vf_admin->default_vlan = vlan;
vf_admin->default_qos = qos;
+ vf_admin->vlan_proto = proto;
/* If rate was configured prior to VST, we saved the configured rate
* in vf_admin->rate and now, if priority supported we enforce the QoS
@@ -2973,7 +3030,12 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
vf_admin->tx_rate)
vf_admin->qos_vport = slave;
- if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
+ /* Try to activate new vf state without restart,
+ * this option is not supported while moving to VST QinQ mode.
+ */
+ if ((proto == htons(ETH_P_8021AD) &&
+ vf_oper->state.vlan_proto != proto) ||
+ mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
mlx4_info(dev,
"updating vf %d port %d config will take effect on next VF restart\n",
vf, port);
@@ -3117,6 +3179,7 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
ivf->vlan = s_info->default_vlan;
ivf->qos = s_info->default_qos;
+ ivf->vlan_proto = s_info->vlan_proto;
if (mlx4_is_vf_vst_and_prio_qos(dev, port, s_info))
ivf->max_tx_rate = s_info->tx_rate;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 1494997c4f7e..08fc5fc56d43 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -298,7 +298,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
if (IS_ERR(mdev->ptp_clock)) {
mdev->ptp_clock = NULL;
mlx4_err(mdev, "ptp_clock_register failed\n");
- } else {
+ } else if (mdev->ptp_clock) {
mlx4_info(mdev, "registered PHC clock\n");
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 99c6bbdff501..b04760a5034b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -94,7 +94,7 @@ static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap)
*cap = true;
break;
case DCB_CAP_ATTR_DCBX:
- *cap = priv->cee_params.dcbx_cap;
+ *cap = priv->dcbx_cap;
break;
case DCB_CAP_ATTR_PFC_TCS:
*cap = 1 << mlx4_max_tc(priv->mdev->dev);
@@ -111,14 +111,14 @@ static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
- return priv->cee_params.dcb_cfg.pfc_state;
+ return priv->cee_config.pfc_state;
}
static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
- priv->cee_params.dcb_cfg.pfc_state = state;
+ priv->cee_config.pfc_state = state;
}
static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
@@ -126,7 +126,7 @@ static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
- *setting = priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc;
+ *setting = priv->cee_config.dcb_pfc[priority];
}
static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
@@ -134,8 +134,8 @@ static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
- priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc = setting;
- priv->cee_params.dcb_cfg.pfc_state = true;
+ priv->cee_config.dcb_pfc[priority] = setting;
+ priv->cee_config.pfc_state = true;
}
static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
@@ -157,13 +157,11 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
struct mlx4_en_dev *mdev = priv->mdev;
- struct mlx4_en_cee_config *dcb_cfg = &priv->cee_params.dcb_cfg;
- int err = 0;
- if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
- return -EINVAL;
+ if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return 1;
- if (dcb_cfg->pfc_state) {
+ if (priv->cee_config.pfc_state) {
int tc;
priv->prof->rx_pause = 0;
@@ -171,7 +169,7 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
u8 tc_mask = 1 << tc;
- switch (dcb_cfg->tc_config[tc].dcb_pfc) {
+ switch (priv->cee_config.dcb_pfc[tc]) {
case pfc_disabled:
priv->prof->tx_ppp &= ~tc_mask;
priv->prof->rx_ppp &= ~tc_mask;
@@ -199,15 +197,17 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
en_dbg(DRV, priv, "Set pfc off\n");
}
- err = mlx4_SET_PORT_general(mdev->dev, priv->port,
- priv->rx_skb_size + ETH_FCS_LEN,
- priv->prof->tx_pause,
- priv->prof->tx_ppp,
- priv->prof->rx_pause,
- priv->prof->rx_ppp);
- if (err)
+ if (mlx4_SET_PORT_general(mdev->dev, priv->port,
+ priv->rx_skb_size + ETH_FCS_LEN,
+ priv->prof->tx_pause,
+ priv->prof->tx_ppp,
+ priv->prof->rx_pause,
+ priv->prof->rx_ppp)) {
en_err(priv, "Failed setting pause params\n");
- return err;
+ return 1;
+ }
+
+ return 0;
}
static u8 mlx4_en_dcbnl_get_state(struct net_device *dev)
@@ -225,7 +225,7 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
struct mlx4_en_priv *priv = netdev_priv(dev);
int num_tcs = 0;
- if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return 1;
if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
@@ -238,7 +238,10 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
}
- return mlx4_en_setup_tc(dev, num_tcs);
+ if (mlx4_en_setup_tc(dev, num_tcs))
+ return 1;
+
+ return 0;
}
/* On success returns a non-zero 802.1p user priority bitmap
@@ -252,7 +255,7 @@ static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
.selector = idtype,
.protocol = id,
};
- if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return 0;
return dcb_getapp(netdev, &app);
@@ -264,7 +267,7 @@ static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
struct mlx4_en_priv *priv = netdev_priv(netdev);
struct dcb_app app;
- if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return -EINVAL;
memset(&app, 0, sizeof(struct dcb_app));
@@ -433,7 +436,7 @@ static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- return priv->cee_params.dcbx_cap;
+ return priv->dcbx_cap;
}
static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
@@ -442,7 +445,7 @@ static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
struct ieee_ets ets = {0};
struct ieee_pfc pfc = {0};
- if (mode == priv->cee_params.dcbx_cap)
+ if (mode == priv->dcbx_cap)
return 0;
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
@@ -451,7 +454,7 @@ static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
!(mode & DCB_CAP_DCBX_HOST))
goto err;
- priv->cee_params.dcbx_cap = mode;
+ priv->dcbx_cap = mode;
ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 31a41add5b4c..7e703bed7b82 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -71,10 +71,11 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
if (up) {
- priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
+ if (priv->dcbx_cap)
+ priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
} else {
priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
- priv->cee_params.dcb_cfg.pfc_state = false;
+ priv->cee_config.pfc_state = false;
}
}
#endif /* CONFIG_MLX4_EN_DCB */
@@ -2399,12 +2400,14 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
}
-static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
+static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
- return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
+ return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos,
+ vlan_proto);
}
static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
@@ -3055,9 +3058,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_priv *priv;
int i;
int err;
-#ifdef CONFIG_MLX4_EN_DCB
- struct tc_configuration *tc;
-#endif
dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
MAX_TX_RINGS, MAX_RX_RINGS);
@@ -3124,16 +3124,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->msg_enable = MLX4_EN_MSG_LEVEL;
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
- priv->cee_params.dcbx_cap = DCB_CAP_DCBX_VER_CEE |
- DCB_CAP_DCBX_HOST |
- DCB_CAP_DCBX_VER_IEEE;
+ priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
+ DCB_CAP_DCBX_VER_IEEE;
priv->flags |= MLX4_EN_DCB_ENABLED;
- priv->cee_params.dcb_cfg.pfc_state = false;
+ priv->cee_config.pfc_state = false;
- for (i = 0; i < MLX4_EN_NUM_UP; i++) {
- tc = &priv->cee_params.dcb_cfg.tc_config[i];
- tc->dcb_pfc = pfc_disabled;
- }
+ for (i = 0; i < MLX4_EN_NUM_UP; i++)
+ priv->cee_config.dcb_pfc[i] = pfc_disabled;
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
@@ -3229,6 +3226,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
}
if (mlx4_is_slave(mdev->dev)) {
+ bool vlan_offload_disabled;
int phv;
err = get_phv_bit(mdev->dev, port, &phv);
@@ -3236,6 +3234,18 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
}
+ err = mlx4_get_is_vlan_offload_disabled(mdev->dev, port,
+ &vlan_offload_disabled);
+ if (!err && vlan_offload_disabled) {
+ dev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_TX |
+ NETIF_F_HW_VLAN_STAG_RX);
+ dev->features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_TX |
+ NETIF_F_HW_VLAN_STAG_RX);
+ }
} else {
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
!(mdev->dev->caps.flags2 &
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 6758292311f4..f2e8beddcf44 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -72,7 +72,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
}
dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order,
frag_info->dma_dir);
- if (dma_mapping_error(priv->ddev, dma)) {
+ if (unlikely(dma_mapping_error(priv->ddev, dma))) {
put_page(page);
return -ENOMEM;
}
@@ -108,7 +108,8 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
ring_alloc[i].page_size)
continue;
- if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
+ if (unlikely(mlx4_alloc_pages(priv, &page_alloc[i],
+ frag_info, gfp)))
goto out;
}
@@ -585,7 +586,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
frag_info = &priv->frag_info[nr];
if (length <= frag_info->frag_prefix_size)
break;
- if (!frags[nr].page)
+ if (unlikely(!frags[nr].page))
goto fail;
dma = be64_to_cpu(rx_desc->data[nr].addr);
@@ -625,7 +626,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
dma_addr_t dma;
skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN);
- if (!skb) {
+ if (unlikely(!skb)) {
en_dbg(RX_ERR, priv, "Failed allocating skb\n");
return NULL;
}
@@ -736,7 +737,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
{
__wsum csum_pseudo_hdr = 0;
- if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
+ if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT ||
+ ipv6h->nexthdr == IPPROTO_HOPOPTS))
return -1;
hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
@@ -769,7 +771,7 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
get_fixed_ipv4_csum(hw_checksum, skb, hdr);
#if IS_ENABLED(CONFIG_IPV6)
else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
- if (get_fixed_ipv6_csum(hw_checksum, skb, hdr))
+ if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr)))
return -1;
#endif
return 0;
@@ -796,10 +798,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
u64 timestamp;
bool l2_tunnel;
- if (!priv->port_up)
+ if (unlikely(!priv->port_up))
return 0;
- if (budget <= 0)
+ if (unlikely(budget <= 0))
return polled;
/* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
@@ -902,16 +904,17 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
case XDP_PASS:
break;
case XDP_TX:
- if (!mlx4_en_xmit_frame(frags, dev,
+ if (likely(!mlx4_en_xmit_frame(frags, dev,
length, tx_index,
- &doorbell_pending))
+ &doorbell_pending)))
goto consumed;
- break;
+ goto xdp_drop; /* Drop on xmit failure */
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
case XDP_DROP:
- if (mlx4_en_rx_recycle(ring, frags))
+xdp_drop:
+ if (likely(mlx4_en_rx_recycle(ring, frags)))
goto consumed;
goto next;
}
@@ -1015,12 +1018,12 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* GRO not possible, complete processing here */
skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
- if (!skb) {
+ if (unlikely(!skb)) {
ring->dropped++;
goto next;
}
- if (unlikely(priv->validate_loopback)) {
+ if (unlikely(priv->validate_loopback)) {
validate_loopback(priv, skb);
goto next;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 9df87ca0515a..e2509bba3e7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -818,7 +818,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
&inline_ok, &fragptr);
if (unlikely(!real_size))
- goto tx_drop;
+ goto tx_drop_count;
/* Align descriptor to TXBB size */
desc_size = ALIGN(real_size, TXBB_SIZE);
@@ -826,7 +826,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
if (netif_msg_tx_err(priv))
en_warn(priv, "Oversized header or SG list\n");
- goto tx_drop;
+ goto tx_drop_count;
}
bf_ok = ring->bf_enabled;
@@ -1071,9 +1071,10 @@ tx_drop_unmap:
PCI_DMA_TODEVICE);
}
+tx_drop_count:
+ ring->tx_dropped++;
tx_drop:
dev_kfree_skb_any(skb);
- ring->tx_dropped++;
return NETDEV_TX_OK;
}
@@ -1106,7 +1107,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
goto tx_drop;
if (mlx4_en_is_tx_ring_full(ring))
- goto tx_drop;
+ goto tx_drop_count;
/* fetch ring->cons far ahead before needing it to avoid stall */
ring_cons = READ_ONCE(ring->cons);
@@ -1176,7 +1177,8 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
return NETDEV_TX_OK;
-tx_drop:
+tx_drop_count:
ring->tx_dropped++;
+tx_drop:
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index f613977455e0..cf8f8a72a801 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1305,8 +1305,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
return 0;
err_out_unmap:
- while (i >= 0)
- mlx4_free_eq(dev, &priv->eq_table.eq[i--]);
+ while (i > 0)
+ mlx4_free_eq(dev, &priv->eq_table.eq[--i]);
#ifdef CONFIG_RFS_ACCEL
for (i = 1; i <= dev->caps.num_ports; i++) {
if (mlx4_priv(dev)->port[i].rmap) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index d728704d0c7b..090bf81076e8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -158,7 +158,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[31] = "Modifying loopback source checks using UPDATE_QP support",
[32] = "Loopback source checks support",
[33] = "RoCEv2 support",
- [34] = "DMFS Sniffer support (UC & MC)"
+ [34] = "DMFS Sniffer support (UC & MC)",
+ [35] = "QinQ VST mode support",
};
int i;
@@ -248,6 +249,72 @@ out:
return err;
}
+static int mlx4_activate_vst_qinq(struct mlx4_priv *priv, int slave, int port)
+{
+ struct mlx4_vport_oper_state *vp_oper;
+ struct mlx4_vport_state *vp_admin;
+ int err;
+
+ vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+ vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
+
+ if (vp_admin->default_vlan != vp_oper->state.default_vlan) {
+ err = __mlx4_register_vlan(&priv->dev, port,
+ vp_admin->default_vlan,
+ &vp_oper->vlan_idx);
+ if (err) {
+ vp_oper->vlan_idx = NO_INDX;
+ mlx4_warn(&priv->dev,
+ "No vlan resources slave %d, port %d\n",
+ slave, port);
+ return err;
+ }
+ mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n",
+ (int)(vp_oper->state.default_vlan),
+ vp_oper->vlan_idx, slave, port);
+ }
+ vp_oper->state.vlan_proto = vp_admin->vlan_proto;
+ vp_oper->state.default_vlan = vp_admin->default_vlan;
+ vp_oper->state.default_qos = vp_admin->default_qos;
+
+ return 0;
+}
+
+static int mlx4_handle_vst_qinq(struct mlx4_priv *priv, int slave, int port)
+{
+ struct mlx4_vport_oper_state *vp_oper;
+ struct mlx4_slave_state *slave_state;
+ struct mlx4_vport_state *vp_admin;
+ int err;
+
+ vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+ vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
+ slave_state = &priv->mfunc.master.slave_state[slave];
+
+ if ((vp_admin->vlan_proto != htons(ETH_P_8021AD)) ||
+ (!slave_state->active))
+ return 0;
+
+ if (vp_oper->state.vlan_proto == vp_admin->vlan_proto &&
+ vp_oper->state.default_vlan == vp_admin->default_vlan &&
+ vp_oper->state.default_qos == vp_admin->default_qos)
+ return 0;
+
+ if (!slave_state->vst_qinq_supported) {
+ /* Warn and revert the request to set vst QinQ mode */
+ vp_admin->vlan_proto = vp_oper->state.vlan_proto;
+ vp_admin->default_vlan = vp_oper->state.default_vlan;
+ vp_admin->default_qos = vp_oper->state.default_qos;
+
+ mlx4_warn(&priv->dev,
+ "Slave %d does not support VST QinQ mode\n", slave);
+ return 0;
+ }
+
+ err = mlx4_activate_vst_qinq(priv, slave, port);
+ return err;
+}
+
int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -311,14 +378,18 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08
#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
-#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
#define QUERY_FUNC_CAP_PHV_BIT 0x40
+#define QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE 0x20
+
+#define QUERY_FUNC_CAP_SUPPORTS_VST_QINQ BIT(30)
+#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS BIT(31)
if (vhcr->op_modifier == 1) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, slave);
int converted_port = mlx4_slave_convert_port(
dev, slave, vhcr->in_modifier);
+ struct mlx4_vport_oper_state *vp_oper;
if (converted_port < 0)
return -EINVAL;
@@ -357,15 +428,24 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
QUERY_FUNC_CAP_PHYS_PORT_ID);
- if (dev->caps.phv_bit[port]) {
- field = QUERY_FUNC_CAP_PHV_BIT;
- MLX4_PUT(outbox->buf, field,
- QUERY_FUNC_CAP_FLAGS0_OFFSET);
- }
+ vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
+ err = mlx4_handle_vst_qinq(priv, slave, port);
+ if (err)
+ return err;
+
+ field = 0;
+ if (dev->caps.phv_bit[port])
+ field |= QUERY_FUNC_CAP_PHV_BIT;
+ if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
+ field |= QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE;
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS0_OFFSET);
} else if (vhcr->op_modifier == 0) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, slave);
+ struct mlx4_slave_state *slave_state =
+ &priv->mfunc.master.slave_state[slave];
+
/* enable rdma and ethernet interfaces, new quota locations,
* and reserved lkey
*/
@@ -439,6 +519,10 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00);
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
+
+ if (vhcr->in_modifier & QUERY_FUNC_CAP_SUPPORTS_VST_QINQ)
+ slave_state->vst_qinq_supported = true;
+
} else
err = -EINVAL;
@@ -454,10 +538,12 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
u32 size, qkey;
int err = 0, quotas = 0;
u32 in_modifier;
+ u32 slave_caps;
op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
- in_modifier = op_modifier ? gen_or_port :
+ slave_caps = QUERY_FUNC_CAP_SUPPORTS_VST_QINQ |
QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS;
+ in_modifier = op_modifier ? gen_or_port : slave_caps;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -612,8 +698,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
MLX4_GET(func_cap->phys_port_id, outbox,
QUERY_FUNC_CAP_PHYS_PORT_ID);
- MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
- func_cap->flags |= (field & QUERY_FUNC_CAP_PHV_BIT);
+ MLX4_GET(func_cap->flags0, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
/* All other resources are allocated by the master, but we still report
* 'num' and 'reserved' capabilities as follows:
@@ -690,6 +775,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
#define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
#define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
+#define QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET 0x5D
#define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
#define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
@@ -857,6 +943,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
dev_cap->max_sq_desc_sz = size;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET);
+ if (field & 0x1)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
dev_cap->max_qp_per_mcg = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
@@ -2914,7 +3003,7 @@ int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv)
memset(&func_cap, 0, sizeof(func_cap));
err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
if (!err)
- *phv = func_cap.flags & QUERY_FUNC_CAP_PHV_BIT;
+ *phv = func_cap.flags0 & QUERY_FUNC_CAP_PHV_BIT;
return err;
}
EXPORT_SYMBOL(get_phv_bit);
@@ -2938,6 +3027,22 @@ int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
}
EXPORT_SYMBOL(set_phv_bit);
+int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
+ bool *vlan_offload_disabled)
+{
+ struct mlx4_func_cap func_cap;
+ int err;
+
+ memset(&func_cap, 0, sizeof(func_cap));
+ err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
+ if (!err)
+ *vlan_offload_disabled =
+ !!(func_cap.flags0 &
+ QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_get_is_vlan_offload_disabled);
+
void mlx4_replace_zero_macs(struct mlx4_dev *dev)
{
int i;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index cdbd76f10ced..f11614f12517 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -152,7 +152,7 @@ struct mlx4_func_cap {
u32 qp1_proxy_qpn;
u32 reserved_lkey;
u8 physical_port;
- u8 port_flags;
+ u8 flags0;
u8 flags1;
u64 phys_port_id;
u32 extra_flags;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 75dd2e3d3059..7183ac4135d2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2970,6 +2970,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_attr);
+ devlink_port_unregister(&info->devlink_port);
info->port = -1;
}
@@ -2984,6 +2985,8 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_mtu_attr);
+ devlink_port_unregister(&info->devlink_port);
+
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(info->rmap);
info->rmap = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index c9d7fc5159f2..e4878f31e45d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -46,6 +46,7 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <net/devlink.h>
+#include <linux/rwsem.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/driver.h>
@@ -482,6 +483,7 @@ struct mlx4_slave_state {
u8 init_port_mask;
bool active;
bool old_vlan_api;
+ bool vst_qinq_supported;
u8 function;
dma_addr_t vhcr_dma;
u16 mtu[MLX4_MAX_PORTS + 1];
@@ -507,6 +509,7 @@ struct mlx4_vport_state {
u64 mac;
u16 default_vlan;
u8 default_qos;
+ __be16 vlan_proto;
u32 tx_rate;
bool spoofchk;
u32 link_state;
@@ -627,6 +630,7 @@ struct mlx4_cmd {
struct mutex slave_cmd_mutex;
struct semaphore poll_sem;
struct semaphore event_sem;
+ struct rw_semaphore switch_sem;
int max_cmds;
spinlock_t context_lock;
int free_head;
@@ -655,6 +659,7 @@ struct mlx4_vf_immed_vlan_work {
u8 qos_vport;
u16 vlan_id;
u16 orig_vlan_id;
+ __be16 vlan_proto;
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 47867c49f91c..a3528dd1e72e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -482,20 +482,10 @@ enum dcb_pfc_type {
pfc_enabled_rx
};
-struct tc_configuration {
- enum dcb_pfc_type dcb_pfc;
-};
-
struct mlx4_en_cee_config {
bool pfc_state;
- struct tc_configuration tc_config[MLX4_EN_NUM_UP];
+ enum dcb_pfc_type dcb_pfc[MLX4_EN_NUM_UP];
};
-
-struct mlx4_en_cee_params {
- u8 dcbx_cap;
- struct mlx4_en_cee_config dcb_cfg;
-};
-
#endif
struct ethtool_flow_id {
@@ -624,7 +614,8 @@ struct mlx4_en_priv {
struct ieee_ets ets;
u16 maxrate[IEEE_8021QAZ_MAX_TCS];
enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS];
- struct mlx4_en_cee_params cee_params;
+ struct mlx4_en_cee_config cee_config;
+ u8 dcbx_cap;
#endif
#ifdef CONFIG_RFS_ACCEL
spinlock_t filters_lock;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 3d2095e5c61c..c5b2064297a1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -52,7 +52,7 @@
#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2
#define MLX4_IGNORE_FCS_MASK 0x1
-#define MLNX4_TX_MAX_NUMBER 8
+#define MLX4_TC_MAX_NUMBER 8
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
{
@@ -2022,7 +2022,7 @@ int mlx4_max_tc(struct mlx4_dev *dev)
u8 num_tc = dev->caps.max_tc_eth;
if (!num_tc)
- num_tc = MLNX4_TX_MAX_NUMBER;
+ num_tc = MLX4_TC_MAX_NUMBER;
return num_tc;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 8b81114bdc72..84d7857ccc27 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -790,10 +790,22 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
} else if (0 != vp_oper->state.default_vlan) {
- qpc->pri_path.vlan_control |=
- MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
- MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
- MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+ if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
+ /* vst QinQ should block untagged on TX,
+ * but cvlan is in payload and phv is set so
+ * hw see it as untagged. Block tagged instead.
+ */
+ qpc->pri_path.vlan_control |=
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+ } else { /* vst 802.1Q */
+ qpc->pri_path.vlan_control |=
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+ }
} else { /* priority tagged */
qpc->pri_path.vlan_control |=
MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
@@ -802,7 +814,11 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
qpc->pri_path.vlan_index = vp_oper->vlan_idx;
- qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
+ qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
+ if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
+ qpc->pri_path.fl |= MLX4_FL_SV;
+ else
+ qpc->pri_path.fl |= MLX4_FL_CV;
qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
qpc->pri_path.sched_queue &= 0xC7;
qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
@@ -5238,6 +5254,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
(1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
(1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
+ (1ULL << MLX4_UPD_QP_PATH_MASK_SV) |
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
(1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
(1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
@@ -5266,7 +5283,12 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
else if (!work->vlan_id)
vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
- else
+ else if (work->vlan_proto == htons(ETH_P_8021AD))
+ vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
+ else /* vst 802.1Q */
vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
@@ -5311,7 +5333,11 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
upd_context->qp_context.pri_path.fvl_rx =
qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
upd_context->qp_context.pri_path.fl =
- qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
+ qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN;
+ if (work->vlan_proto == htons(ETH_P_8021AD))
+ upd_context->qp_context.pri_path.fl |= MLX4_FL_SV;
+ else
+ upd_context->qp_context.pri_path.fl |= MLX4_FL_CV;
upd_context->qp_context.pri_path.feup =
qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
upd_context->qp_context.pri_path.sched_queue =
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 67146624eb58..f44d089e2ca6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -45,15 +45,12 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
struct mlx4_srq *srq;
- spin_lock(&srq_table->lock);
-
+ rcu_read_lock();
srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1));
+ rcu_read_unlock();
if (srq)
atomic_inc(&srq->refcount);
-
- spin_unlock(&srq_table->lock);
-
- if (!srq) {
+ else {
mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
return;
}
@@ -301,12 +298,11 @@ struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
struct mlx4_srq *srq;
- unsigned long flags;
- spin_lock_irqsave(&srq_table->lock, flags);
+ rcu_read_lock();
srq = radix_tree_lookup(&srq_table->tree,
srqn & (dev->caps.num_srqs - 1));
- spin_unlock_irqrestore(&srq_table->lock, flags);
+ rcu_read_unlock();
return srq;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index dad326ccd4dd..0343725d7f44 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
- fs_counters.o rl.o lag.o
+ fs_counters.o rl.o lag.o dev.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
new file mode 100644
index 000000000000..a9dbc28f6b97
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+
+static LIST_HEAD(intf_list);
+static LIST_HEAD(mlx5_dev_list);
+/* intf dev list mutex */
+static DEFINE_MUTEX(mlx5_intf_mutex);
+
+struct mlx5_device_context {
+ struct list_head list;
+ struct mlx5_interface *intf;
+ void *context;
+ unsigned long state;
+};
+
+enum {
+ MLX5_INTERFACE_ADDED,
+ MLX5_INTERFACE_ATTACHED,
+};
+
+void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ if (!mlx5_lag_intf_add(intf, priv))
+ return;
+
+ dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL);
+ if (!dev_ctx)
+ return;
+
+ dev_ctx->intf = intf;
+ dev_ctx->context = intf->add(dev);
+ set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
+ if (intf->attach)
+ set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
+
+ if (dev_ctx->context) {
+ spin_lock_irq(&priv->ctx_lock);
+ list_add_tail(&dev_ctx->list, &priv->ctx_list);
+ spin_unlock_irq(&priv->ctx_lock);
+ } else {
+ kfree(dev_ctx);
+ }
+}
+
+static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
+ struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+
+ list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+ if (dev_ctx->intf == intf)
+ return dev_ctx;
+ return NULL;
+}
+
+void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ dev_ctx = mlx5_get_device(intf, priv);
+ if (!dev_ctx)
+ return;
+
+ spin_lock_irq(&priv->ctx_lock);
+ list_del(&dev_ctx->list);
+ spin_unlock_irq(&priv->ctx_lock);
+
+ if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
+ intf->remove(dev, dev_ctx->context);
+
+ kfree(dev_ctx);
+}
+
+static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ dev_ctx = mlx5_get_device(intf, priv);
+ if (!dev_ctx)
+ return;
+
+ if (intf->attach) {
+ if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
+ return;
+ intf->attach(dev, dev_ctx->context);
+ set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
+ } else {
+ if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
+ return;
+ dev_ctx->context = intf->add(dev);
+ set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
+ }
+}
+
+void mlx5_attach_device(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_interface *intf;
+
+ mutex_lock(&mlx5_intf_mutex);
+ list_for_each_entry(intf, &intf_list, list)
+ mlx5_attach_interface(intf, priv);
+ mutex_unlock(&mlx5_intf_mutex);
+}
+
+static void mlx5_detach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ dev_ctx = mlx5_get_device(intf, priv);
+ if (!dev_ctx)
+ return;
+
+ if (intf->detach) {
+ if (!test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
+ return;
+ intf->detach(dev, dev_ctx->context);
+ clear_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
+ } else {
+ if (!test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
+ return;
+ intf->remove(dev, dev_ctx->context);
+ clear_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
+ }
+}
+
+void mlx5_detach_device(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_interface *intf;
+
+ mutex_lock(&mlx5_intf_mutex);
+ list_for_each_entry(intf, &intf_list, list)
+ mlx5_detach_interface(intf, priv);
+ mutex_unlock(&mlx5_intf_mutex);
+}
+
+bool mlx5_device_registered(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv;
+ bool found = false;
+
+ mutex_lock(&mlx5_intf_mutex);
+ list_for_each_entry(priv, &mlx5_dev_list, dev_list)
+ if (priv == &dev->priv)
+ found = true;
+ mutex_unlock(&mlx5_intf_mutex);
+
+ return found;
+}
+
+int mlx5_register_device(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_interface *intf;
+
+ mutex_lock(&mlx5_intf_mutex);
+ list_add_tail(&priv->dev_list, &mlx5_dev_list);
+ list_for_each_entry(intf, &intf_list, list)
+ mlx5_add_device(intf, priv);
+ mutex_unlock(&mlx5_intf_mutex);
+
+ return 0;
+}
+
+void mlx5_unregister_device(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_interface *intf;
+
+ mutex_lock(&mlx5_intf_mutex);
+ list_for_each_entry(intf, &intf_list, list)
+ mlx5_remove_device(intf, priv);
+ list_del(&priv->dev_list);
+ mutex_unlock(&mlx5_intf_mutex);
+}
+
+int mlx5_register_interface(struct mlx5_interface *intf)
+{
+ struct mlx5_priv *priv;
+
+ if (!intf->add || !intf->remove)
+ return -EINVAL;
+
+ mutex_lock(&mlx5_intf_mutex);
+ list_add_tail(&intf->list, &intf_list);
+ list_for_each_entry(priv, &mlx5_dev_list, dev_list)
+ mlx5_add_device(intf, priv);
+ mutex_unlock(&mlx5_intf_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(mlx5_register_interface);
+
+void mlx5_unregister_interface(struct mlx5_interface *intf)
+{
+ struct mlx5_priv *priv;
+
+ mutex_lock(&mlx5_intf_mutex);
+ list_for_each_entry(priv, &mlx5_dev_list, dev_list)
+ mlx5_remove_device(intf, priv);
+ list_del(&intf->list);
+ mutex_unlock(&mlx5_intf_mutex);
+}
+EXPORT_SYMBOL(mlx5_unregister_interface);
+
+void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ struct mlx5_device_context *dev_ctx;
+ unsigned long flags;
+ void *result = NULL;
+
+ spin_lock_irqsave(&priv->ctx_lock, flags);
+
+ list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
+ if ((dev_ctx->intf->protocol == protocol) &&
+ dev_ctx->intf->get_dev) {
+ result = dev_ctx->intf->get_dev(dev_ctx->context);
+ break;
+ }
+
+ spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+ return result;
+}
+EXPORT_SYMBOL(mlx5_get_protocol_dev);
+
+/* Must be called with intf_mutex held */
+void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
+{
+ struct mlx5_interface *intf;
+
+ list_for_each_entry(intf, &intf_list, list)
+ if (intf->protocol == protocol) {
+ mlx5_add_device(intf, &dev->priv);
+ break;
+ }
+}
+
+/* Must be called with intf_mutex held */
+void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
+{
+ struct mlx5_interface *intf;
+
+ list_for_each_entry(intf, &intf_list, list)
+ if (intf->protocol == protocol) {
+ mlx5_remove_device(intf, &dev->priv);
+ break;
+ }
+}
+
+static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
+{
+ return (u16)((dev->pdev->bus->number << 8) |
+ PCI_SLOT(dev->pdev->devfn));
+}
+
+/* Must be called with intf_mutex held */
+struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
+{
+ u16 pci_id = mlx5_gen_pci_id(dev);
+ struct mlx5_core_dev *res = NULL;
+ struct mlx5_core_dev *tmp_dev;
+ struct mlx5_priv *priv;
+
+ list_for_each_entry(priv, &mlx5_dev_list, dev_list) {
+ tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
+ if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
+ res = tmp_dev;
+ break;
+ }
+ }
+
+ return res;
+}
+
+void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
+ unsigned long param)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_device_context *dev_ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->ctx_lock, flags);
+
+ list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+ if (dev_ctx->intf->event)
+ dev_ctx->intf->event(dev, dev_ctx->context, event, param);
+
+ spin_unlock_irqrestore(&priv->ctx_lock, flags);
+}
+
+void mlx5_dev_list_lock(void)
+{
+ mutex_lock(&mlx5_intf_mutex);
+}
+
+void mlx5_dev_list_unlock(void)
+{
+ mutex_unlock(&mlx5_intf_mutex);
+}
+
+int mlx5_dev_list_trylock(void)
+{
+ return mutex_trylock(&mlx5_intf_mutex);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 96995609f205..460363b66cb1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -62,12 +62,14 @@
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x1
-#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x4
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
+#define MLX5_RX_HEADROOM NET_SKB_PAD
+
#define MLX5_MPWRQ_LOG_STRIDE_SIZE 6 /* >= 6, HW restriction */
#define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS 8 /* >= 6, HW restriction */
-#define MLX5_MPWRQ_LOG_WQE_SZ 17
+#define MLX5_MPWRQ_LOG_WQE_SZ 18
#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
@@ -99,6 +101,18 @@
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
#define MLX5E_SQ_BF_BUDGET 16
+#define MLX5E_ICOSQ_MAX_WQEBBS \
+ (DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB))
+
+#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
+#define MLX5E_XDP_IHS_DS_COUNT \
+ DIV_ROUND_UP(MLX5E_XDP_MIN_INLINE - 2, MLX5_SEND_WQE_DS)
+#define MLX5E_XDP_TX_DS_COUNT \
+ (MLX5E_XDP_IHS_DS_COUNT + \
+ (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
+#define MLX5E_XDP_TX_WQEBBS \
+ DIV_ROUND_UP(MLX5E_XDP_TX_DS_COUNT, MLX5_SEND_WQEBB_NUM_DS)
+
#define MLX5E_NUM_MAIN_GROUPS 9
static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
@@ -287,29 +301,53 @@ struct mlx5e_rx_am { /* Adaptive Moderation */
u8 tired;
};
+/* a single cache unit is capable to serve one napi call (for non-striding rq)
+ * or a MPWQE (for striding rq).
+ */
+#define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
+ MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
+#define MLX5E_CACHE_SIZE (2 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
+struct mlx5e_page_cache {
+ u32 head;
+ u32 tail;
+ struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
+};
+
struct mlx5e_rq {
/* data path */
struct mlx5_wq_ll wq;
- u32 wqe_sz;
- struct sk_buff **skb;
- struct mlx5e_mpw_info *wqe_info;
+
+ union {
+ struct mlx5e_dma_info *dma_info;
+ struct {
+ struct mlx5e_mpw_info *info;
+ void *mtt_no_align;
+ u32 mtt_offset;
+ } mpwqe;
+ };
+ struct {
+ u8 page_order;
+ u32 wqe_sz; /* wqe data buffer size */
+ u8 map_dir; /* dma map direction */
+ } buff;
__be32 mkey_be;
- __be32 umr_mkey_be;
struct device *pdev;
struct net_device *netdev;
struct mlx5e_tstamp *tstamp;
struct mlx5e_rq_stats stats;
struct mlx5e_cq cq;
+ struct mlx5e_page_cache page_cache;
+
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_alloc_wqe alloc_wqe;
mlx5e_fp_dealloc_wqe dealloc_wqe;
unsigned long state;
int ix;
- u32 mpwqe_mtt_offset;
struct mlx5e_rx_am am; /* Adaptive Moderation */
+ struct bpf_prog *xdp_prog;
/* control */
struct mlx5_wq_ctrl wq_ctrl;
@@ -323,32 +361,15 @@ struct mlx5e_rq {
struct mlx5e_umr_dma_info {
__be64 *mtt;
- __be64 *mtt_no_align;
dma_addr_t mtt_addr;
- struct mlx5e_dma_info *dma_info;
+ struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
+ struct mlx5e_umr_wqe wqe;
};
struct mlx5e_mpw_info {
- union {
- struct mlx5e_dma_info dma_info;
- struct mlx5e_umr_dma_info umr;
- };
+ struct mlx5e_umr_dma_info umr;
u16 consumed_strides;
u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
-
- void (*dma_pre_sync)(struct device *pdev,
- struct mlx5e_mpw_info *wi,
- u32 wqe_offset, u32 len);
- void (*add_skb_frag)(struct mlx5e_rq *rq,
- struct sk_buff *skb,
- struct mlx5e_mpw_info *wi,
- u32 page_idx, u32 frag_offset, u32 len);
- void (*copy_skb_header)(struct device *pdev,
- struct sk_buff *skb,
- struct mlx5e_mpw_info *wi,
- u32 page_idx, u32 offset,
- u32 headlen);
- void (*free_wqe)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
};
struct mlx5e_tx_wqe_info {
@@ -373,11 +394,17 @@ enum {
MLX5E_SQ_STATE_BF_ENABLE,
};
-struct mlx5e_ico_wqe_info {
+struct mlx5e_sq_wqe_info {
u8 opcode;
u8 num_wqebbs;
};
+enum mlx5e_sq_type {
+ MLX5E_SQ_TXQ,
+ MLX5E_SQ_ICO,
+ MLX5E_SQ_XDP
+};
+
struct mlx5e_sq {
/* data path */
@@ -395,10 +422,20 @@ struct mlx5e_sq {
struct mlx5e_cq cq;
- /* pointers to per packet info: write@xmit, read@completion */
- struct sk_buff **skb;
- struct mlx5e_sq_dma *dma_fifo;
- struct mlx5e_tx_wqe_info *wqe_info;
+ /* pointers to per tx element info: write@xmit, read@completion */
+ union {
+ struct {
+ struct sk_buff **skb;
+ struct mlx5e_sq_dma *dma_fifo;
+ struct mlx5e_tx_wqe_info *wqe_info;
+ } txq;
+ struct mlx5e_sq_wqe_info *ico_wqe;
+ struct {
+ struct mlx5e_sq_wqe_info *wqe_info;
+ struct mlx5e_dma_info *di;
+ bool doorbell;
+ } xdp;
+ } db;
/* read only */
struct mlx5_wq_cyc wq;
@@ -420,8 +457,8 @@ struct mlx5e_sq {
struct mlx5_uar uar;
struct mlx5e_channel *channel;
int tc;
- struct mlx5e_ico_wqe_info *ico_wqe_info;
u32 rate_limit;
+ u8 type;
} ____cacheline_aligned_in_smp;
static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
@@ -437,8 +474,10 @@ enum channel_flags {
struct mlx5e_channel {
/* data path */
struct mlx5e_rq rq;
+ struct mlx5e_sq xdp_sq;
struct mlx5e_sq sq[MLX5E_MAX_NUM_TC];
struct mlx5e_sq icosq; /* internal control operations */
+ bool xdp;
struct napi_struct napi;
struct device *pdev;
struct net_device *netdev;
@@ -620,6 +659,7 @@ struct mlx5e_priv {
/* priv data path fields - start */
struct mlx5e_sq **txq_to_sq_map;
int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
+ struct bpf_prog *xdp_prog;
/* priv data path fields - end */
unsigned long state;
@@ -666,30 +706,19 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
-void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
+void mlx5e_free_sq_descs(struct mlx5e_sq *sq);
+void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
+ bool recycle);
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
-int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
-void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
-void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
- struct mlx5_cqe64 *cqe,
- u16 byte_cnt,
- struct mlx5e_mpw_info *wi,
- struct sk_buff *skb);
-void mlx5e_complete_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
- struct mlx5_cqe64 *cqe,
- u16 byte_cnt,
- struct mlx5e_mpw_info *wi,
- struct sk_buff *skb);
-void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
- struct mlx5e_mpw_info *wi);
-void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
- struct mlx5e_mpw_info *wi);
+void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq);
+void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
void mlx5e_rx_am(struct mlx5e_rq *rq);
@@ -776,6 +805,12 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc);
}
+static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
+{
+ return rq->mpwqe.mtt_offset +
+ wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
+}
+
static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{
return min_t(int, mdev->priv.eq_table.num_comp_vectors,
@@ -834,6 +869,7 @@ void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
+void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
@@ -844,9 +880,12 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev);
void mlx5e_update_stats_work(struct work_struct *work);
-void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
- const struct mlx5e_profile *profile, void *ppriv);
+struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv);
void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
+int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
+void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index 847a8f3ac2b2..13dc388667b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -273,7 +273,7 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
&priv->mdev->pdev->dev);
- if (IS_ERR_OR_NULL(tstamp->ptp)) {
+ if (IS_ERR(tstamp->ptp)) {
mlx5_core_warn(priv->mdev, "ptp_clock_register failed %ld\n",
PTR_ERR(tstamp->ptp));
tstamp->ptp = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index d1cd1564e9b9..27ff401cec20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -331,7 +331,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
if (mlx5e_query_global_pause_combined(priv)) {
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
- pport_per_prio_pfc_stats_desc, 0);
+ pport_per_prio_pfc_stats_desc, i);
}
}
@@ -659,9 +659,10 @@ out:
static void ptys2ethtool_supported_link(unsigned long *supported_modes,
u32 eth_proto_cap)
{
+ unsigned long proto_cap = eth_proto_cap;
int proto;
- for_each_set_bit(proto, (unsigned long *)&eth_proto_cap, MLX5E_LINK_MODES_NUMBER)
+ for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
bitmap_or(supported_modes, supported_modes,
ptys2ethtool_table[proto].supported,
__ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -670,9 +671,10 @@ static void ptys2ethtool_supported_link(unsigned long *supported_modes,
static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
u32 eth_proto_cap)
{
+ unsigned long proto_cap = eth_proto_cap;
int proto;
- for_each_set_bit(proto, (unsigned long *)&eth_proto_cap, MLX5E_LINK_MODES_NUMBER)
+ for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
bitmap_or(advertising_modes, advertising_modes,
ptys2ethtool_table[proto].advertised,
__ETHTOOL_LINK_MODE_MASK_NBITS);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 1587a9fd5724..36fbc6b21a33 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -294,6 +294,36 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
return 0;
}
+static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
+{
+ int i;
+
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+
+ for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
+ }
+
+ if (priv->fs.vlan.filter_disabled &&
+ !(priv->netdev->flags & IFF_PROMISC))
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+}
+
+static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
+{
+ int i;
+
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+
+ for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
+ }
+
+ if (priv->fs.vlan.filter_disabled &&
+ !(priv->netdev->flags & IFF_PROMISC))
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+}
+
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
@@ -1024,14 +1054,10 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
if (err)
goto err_free_g;
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- if (err)
- goto err_destroy_vlan_flow_groups;
+ mlx5e_add_vlan_rules(priv);
return 0;
-err_destroy_vlan_flow_groups:
- mlx5e_destroy_groups(ft);
err_free_g:
kfree(ft->g);
err_destroy_vlan_table:
@@ -1043,6 +1069,7 @@ err_destroy_vlan_table:
static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
{
+ mlx5e_del_vlan_rules(priv);
mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
}
@@ -1100,7 +1127,6 @@ err_destroy_arfs_tables:
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
{
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
mlx5e_destroy_vlan_table(priv);
mlx5e_destroy_l2_table(priv);
mlx5e_destroy_ttc_table(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 03586ee68fc4..b58cfe37dead 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -34,6 +34,7 @@
#include <net/pkt_cls.h>
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
+#include <linux/bpf.h>
#include "en.h"
#include "en_tc.h"
#include "eswitch.h"
@@ -50,7 +51,7 @@ struct mlx5e_sq_param {
struct mlx5_wq_param wq;
u16 max_inline;
u8 min_inline_mode;
- bool icosq;
+ enum mlx5e_sq_type type;
};
struct mlx5e_cq_param {
@@ -63,12 +64,55 @@ struct mlx5e_cq_param {
struct mlx5e_channel_param {
struct mlx5e_rq_param rq;
struct mlx5e_sq_param sq;
+ struct mlx5e_sq_param xdp_sq;
struct mlx5e_sq_param icosq;
struct mlx5e_cq_param rx_cq;
struct mlx5e_cq_param tx_cq;
struct mlx5e_cq_param icosq_cq;
};
+static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_GEN(mdev, striding_rq) &&
+ MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
+ MLX5_CAP_ETH(mdev, reg_umr_sq);
+}
+
+static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
+{
+ priv->params.rq_wq_type = rq_type;
+ switch (priv->params.rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
+ priv->params.mpwqe_log_stride_sz = priv->params.rx_cqe_compress ?
+ MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
+ MLX5_MPWRQ_LOG_STRIDE_SIZE;
+ priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
+ priv->params.mpwqe_log_stride_sz;
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+ }
+ priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
+ BIT(priv->params.log_rq_size));
+
+ mlx5_core_info(priv->mdev,
+ "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
+ priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
+ BIT(priv->params.log_rq_size),
+ BIT(priv->params.mpwqe_log_stride_sz),
+ priv->params.rx_cqe_compress_admin);
+}
+
+static void mlx5e_set_rq_priv_params(struct mlx5e_priv *priv)
+{
+ u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(priv->mdev) &&
+ !priv->xdp_prog ?
+ MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
+ MLX5_WQ_TYPE_LINKED_LIST;
+ mlx5e_set_rq_type_params(priv, rq_type);
+}
+
static void mlx5e_update_carrier(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -136,12 +180,18 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_complete += rq_stats->csum_complete;
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
+ s->rx_xdp_drop += rq_stats->xdp_drop;
+ s->rx_xdp_tx += rq_stats->xdp_tx;
+ s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
s->rx_wqe_err += rq_stats->wqe_err;
s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
- s->rx_mpwqe_frag += rq_stats->mpwqe_frag;
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
+ s->rx_cache_reuse += rq_stats->cache_reuse;
+ s->rx_cache_full += rq_stats->cache_full;
+ s->rx_cache_empty += rq_stats->cache_empty;
+ s->rx_cache_busy += rq_stats->cache_busy;
for (j = 0; j < priv->params.num_tc; j++) {
sq_stats = &priv->channel[i]->sq[j].stats;
@@ -295,6 +345,117 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+static inline int mlx5e_get_wqe_mtt_sz(void)
+{
+ /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
+ * To avoid copying garbage after the mtt array, we allocate
+ * a little more.
+ */
+ return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
+ MLX5_UMR_MTT_ALIGNMENT);
+}
+
+static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_sq *sq,
+ struct mlx5e_umr_wqe *wqe, u16 ix)
+{
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
+ struct mlx5_wqe_data_seg *dseg = &wqe->data;
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
+ u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
+ u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
+
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
+ ds_cnt);
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ cseg->imm = rq->mkey_be;
+
+ ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
+ ucseg->klm_octowords =
+ cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
+ ucseg->bsf_octowords =
+ cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
+ ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+
+ dseg->lkey = sq->mkey_be;
+ dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
+}
+
+static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
+ struct mlx5e_channel *c)
+{
+ int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+ int mtt_sz = mlx5e_get_wqe_mtt_sz();
+ int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
+ int i;
+
+ rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
+ GFP_KERNEL, cpu_to_node(c->cpu));
+ if (!rq->mpwqe.info)
+ goto err_out;
+
+ /* We allocate more than mtt_sz as we will align the pointer */
+ rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
+ cpu_to_node(c->cpu));
+ if (unlikely(!rq->mpwqe.mtt_no_align))
+ goto err_free_wqe_info;
+
+ for (i = 0; i < wq_sz; i++) {
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
+
+ wi->umr.mtt = PTR_ALIGN(rq->mpwqe.mtt_no_align + i * mtt_alloc,
+ MLX5_UMR_ALIGN);
+ wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz,
+ PCI_DMA_TODEVICE);
+ if (unlikely(dma_mapping_error(c->pdev, wi->umr.mtt_addr)))
+ goto err_unmap_mtts;
+
+ mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i);
+ }
+
+ return 0;
+
+err_unmap_mtts:
+ while (--i >= 0) {
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
+
+ dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz,
+ PCI_DMA_TODEVICE);
+ }
+ kfree(rq->mpwqe.mtt_no_align);
+err_free_wqe_info:
+ kfree(rq->mpwqe.info);
+
+err_out:
+ return -ENOMEM;
+}
+
+static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
+{
+ int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+ int mtt_sz = mlx5e_get_wqe_mtt_sz();
+ int i;
+
+ for (i = 0; i < wq_sz; i++) {
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
+
+ dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz,
+ PCI_DMA_TODEVICE);
+ }
+ kfree(rq->mpwqe.mtt_no_align);
+ kfree(rq->mpwqe.info);
+}
+
+static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv;
+
+ if (rep && rep->vport != FDB_UPLINK_VPORT)
+ return true;
+
+ return false;
+}
+
static int mlx5e_create_rq(struct mlx5e_channel *c,
struct mlx5e_rq_param *param,
struct mlx5e_rq *rq)
@@ -304,6 +465,8 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
void *rqc = param->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
u32 byte_count;
+ u32 frag_sz;
+ int npages;
int wq_sz;
int err;
int i;
@@ -319,63 +482,92 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+ rq->wq_type = priv->params.rq_wq_type;
+ rq->pdev = c->pdev;
+ rq->netdev = c->netdev;
+ rq->tstamp = &priv->tstamp;
+ rq->channel = c;
+ rq->ix = c->ix;
+ rq->priv = c->priv;
+ rq->xdp_prog = priv->xdp_prog;
+
+ rq->buff.map_dir = DMA_FROM_DEVICE;
+ if (rq->xdp_prog)
+ rq->buff.map_dir = DMA_BIDIRECTIONAL;
+
switch (priv->params.rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- rq->wqe_info = kzalloc_node(wq_sz * sizeof(*rq->wqe_info),
- GFP_KERNEL, cpu_to_node(c->cpu));
- if (!rq->wqe_info) {
- err = -ENOMEM;
+ if (mlx5e_is_vf_vport_rep(priv)) {
+ err = -EINVAL;
goto err_rq_wq_destroy;
}
+
rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
- rq->mpwqe_mtt_offset = c->ix *
+ rq->mpwqe.mtt_offset = c->ix *
MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size));
rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
- rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
- byte_count = rq->wqe_sz;
+
+ rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
+ byte_count = rq->buff.wqe_sz;
+ rq->mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
+ err = mlx5e_rq_alloc_mpwqe_info(rq, c);
+ if (err)
+ goto err_rq_wq_destroy;
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
- cpu_to_node(c->cpu));
- if (!rq->skb) {
+ rq->dma_info = kzalloc_node(wq_sz * sizeof(*rq->dma_info),
+ GFP_KERNEL, cpu_to_node(c->cpu));
+ if (!rq->dma_info) {
err = -ENOMEM;
goto err_rq_wq_destroy;
}
- rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
+
+ if (mlx5e_is_vf_vport_rep(priv))
+ rq->handle_rx_cqe = mlx5e_handle_rx_cqe_rep;
+ else
+ rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
+
rq->alloc_wqe = mlx5e_alloc_rx_wqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
- rq->wqe_sz = (priv->params.lro_en) ?
+ rq->buff.wqe_sz = (priv->params.lro_en) ?
priv->params.lro_wqe_sz :
MLX5E_SW2HW_MTU(priv->netdev->mtu);
- rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz);
- byte_count = rq->wqe_sz;
+ byte_count = rq->buff.wqe_sz;
+
+ /* calc the required page order */
+ frag_sz = MLX5_RX_HEADROOM +
+ byte_count /* packet data */ +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ frag_sz = SKB_DATA_ALIGN(frag_sz);
+
+ npages = DIV_ROUND_UP(frag_sz, PAGE_SIZE);
+ rq->buff.page_order = order_base_2(npages);
+
byte_count |= MLX5_HW_START_PADDING;
+ rq->mkey_be = c->mkey_be;
}
for (i = 0; i < wq_sz; i++) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
wqe->data.byte_count = cpu_to_be32(byte_count);
+ wqe->data.lkey = rq->mkey_be;
}
INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
rq->am.mode = priv->params.rx_cq_period_mode;
- rq->wq_type = priv->params.rq_wq_type;
- rq->pdev = c->pdev;
- rq->netdev = c->netdev;
- rq->tstamp = &priv->tstamp;
- rq->channel = c;
- rq->ix = c->ix;
- rq->priv = c->priv;
- rq->mkey_be = c->mkey_be;
- rq->umr_mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
+ rq->page_cache.head = 0;
+ rq->page_cache.tail = 0;
+
+ if (rq->xdp_prog)
+ bpf_prog_add(rq->xdp_prog, 1);
return 0;
@@ -387,14 +579,25 @@ err_rq_wq_destroy:
static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
{
+ int i;
+
+ if (rq->xdp_prog)
+ bpf_prog_put(rq->xdp_prog);
+
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- kfree(rq->wqe_info);
+ mlx5e_rq_free_mpwqe_info(rq);
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- kfree(rq->skb);
+ kfree(rq->dma_info);
}
+ for (i = rq->page_cache.head; i != rq->page_cache.tail;
+ i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
+ struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
+
+ mlx5e_page_release(rq, dma_info, false);
+ }
mlx5_wq_destroy(&rq->wq_ctrl);
}
@@ -528,7 +731,7 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
/* UMR WQE (if in progress) is always at wq->head */
if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
- mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
+ mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
while (!mlx5_wq_ll_is_empty(wq)) {
wqe_ix_be = *wq->tail_next;
@@ -563,8 +766,8 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (param->am_enabled)
set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
- sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
- sq->ico_wqe_info[pi].num_wqebbs = 1;
+ sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
+ sq->db.ico_wqe[pi].num_wqebbs = 1;
mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
return 0;
@@ -588,26 +791,65 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
mlx5e_destroy_rq(rq);
}
-static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
+static void mlx5e_free_sq_xdp_db(struct mlx5e_sq *sq)
{
- kfree(sq->wqe_info);
- kfree(sq->dma_fifo);
- kfree(sq->skb);
+ kfree(sq->db.xdp.di);
+ kfree(sq->db.xdp.wqe_info);
}
-static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
+static int mlx5e_alloc_sq_xdp_db(struct mlx5e_sq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
- int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
- sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
- sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
- numa);
- sq->wqe_info = kzalloc_node(wq_sz * sizeof(*sq->wqe_info), GFP_KERNEL,
- numa);
+ sq->db.xdp.di = kzalloc_node(sizeof(*sq->db.xdp.di) * wq_sz,
+ GFP_KERNEL, numa);
+ sq->db.xdp.wqe_info = kzalloc_node(sizeof(*sq->db.xdp.wqe_info) * wq_sz,
+ GFP_KERNEL, numa);
+ if (!sq->db.xdp.di || !sq->db.xdp.wqe_info) {
+ mlx5e_free_sq_xdp_db(sq);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mlx5e_free_sq_ico_db(struct mlx5e_sq *sq)
+{
+ kfree(sq->db.ico_wqe);
+}
+
+static int mlx5e_alloc_sq_ico_db(struct mlx5e_sq *sq, int numa)
+{
+ u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
- if (!sq->skb || !sq->dma_fifo || !sq->wqe_info) {
- mlx5e_free_sq_db(sq);
+ sq->db.ico_wqe = kzalloc_node(sizeof(*sq->db.ico_wqe) * wq_sz,
+ GFP_KERNEL, numa);
+ if (!sq->db.ico_wqe)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void mlx5e_free_sq_txq_db(struct mlx5e_sq *sq)
+{
+ kfree(sq->db.txq.wqe_info);
+ kfree(sq->db.txq.dma_fifo);
+ kfree(sq->db.txq.skb);
+}
+
+static int mlx5e_alloc_sq_txq_db(struct mlx5e_sq *sq, int numa)
+{
+ int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+
+ sq->db.txq.skb = kzalloc_node(wq_sz * sizeof(*sq->db.txq.skb),
+ GFP_KERNEL, numa);
+ sq->db.txq.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.txq.dma_fifo),
+ GFP_KERNEL, numa);
+ sq->db.txq.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.txq.wqe_info),
+ GFP_KERNEL, numa);
+ if (!sq->db.txq.skb || !sq->db.txq.dma_fifo || !sq->db.txq.wqe_info) {
+ mlx5e_free_sq_txq_db(sq);
return -ENOMEM;
}
@@ -616,6 +858,46 @@ static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
return 0;
}
+static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
+{
+ switch (sq->type) {
+ case MLX5E_SQ_TXQ:
+ mlx5e_free_sq_txq_db(sq);
+ break;
+ case MLX5E_SQ_ICO:
+ mlx5e_free_sq_ico_db(sq);
+ break;
+ case MLX5E_SQ_XDP:
+ mlx5e_free_sq_xdp_db(sq);
+ break;
+ }
+}
+
+static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
+{
+ switch (sq->type) {
+ case MLX5E_SQ_TXQ:
+ return mlx5e_alloc_sq_txq_db(sq, numa);
+ case MLX5E_SQ_ICO:
+ return mlx5e_alloc_sq_ico_db(sq, numa);
+ case MLX5E_SQ_XDP:
+ return mlx5e_alloc_sq_xdp_db(sq, numa);
+ }
+
+ return 0;
+}
+
+static int mlx5e_sq_get_max_wqebbs(u8 sq_type)
+{
+ switch (sq_type) {
+ case MLX5E_SQ_ICO:
+ return MLX5E_ICOSQ_MAX_WQEBBS;
+ case MLX5E_SQ_XDP:
+ return MLX5E_XDP_TX_WQEBBS;
+ }
+ return MLX5_SEND_WQE_MAX_WQEBBS;
+}
+
static int mlx5e_create_sq(struct mlx5e_channel *c,
int tc,
struct mlx5e_sq_param *param,
@@ -628,6 +910,13 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
int err;
+ sq->type = param->type;
+ sq->pdev = c->pdev;
+ sq->tstamp = &priv->tstamp;
+ sq->mkey_be = c->mkey_be;
+ sq->channel = c;
+ sq->tc = tc;
+
err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
if (err)
return err;
@@ -656,18 +945,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
if (err)
goto err_sq_wq_destroy;
- if (param->icosq) {
- u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
-
- sq->ico_wqe_info = kzalloc_node(sizeof(*sq->ico_wqe_info) *
- wq_sz,
- GFP_KERNEL,
- cpu_to_node(c->cpu));
- if (!sq->ico_wqe_info) {
- err = -ENOMEM;
- goto err_free_sq_db;
- }
- } else {
+ if (sq->type == MLX5E_SQ_TXQ) {
int txq_ix;
txq_ix = c->ix + tc * priv->params.num_channels;
@@ -675,19 +953,11 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
priv->txq_to_sq_map[txq_ix] = sq;
}
- sq->pdev = c->pdev;
- sq->tstamp = &priv->tstamp;
- sq->mkey_be = c->mkey_be;
- sq->channel = c;
- sq->tc = tc;
- sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+ sq->edge = (sq->wq.sz_m1 + 1) - mlx5e_sq_get_max_wqebbs(sq->type);
sq->bf_budget = MLX5E_SQ_BF_BUDGET;
return 0;
-err_free_sq_db:
- mlx5e_free_sq_db(sq);
-
err_sq_wq_destroy:
mlx5_wq_destroy(&sq->wq_ctrl);
@@ -702,7 +972,6 @@ static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
struct mlx5e_channel *c = sq->channel;
struct mlx5e_priv *priv = c->priv;
- kfree(sq->ico_wqe_info);
mlx5e_free_sq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl);
mlx5_unmap_free_uar(priv->mdev, &sq->uar);
@@ -731,11 +1000,12 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
memcpy(sqc, param->sqc, sizeof(param->sqc));
- MLX5_SET(sqc, sqc, tis_num_0, param->icosq ? 0 : priv->tisn[sq->tc]);
+ MLX5_SET(sqc, sqc, tis_num_0, param->type == MLX5E_SQ_ICO ?
+ 0 : priv->tisn[sq->tc]);
MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
- MLX5_SET(sqc, sqc, tis_lst_sz, param->icosq ? 0 : 1);
+ MLX5_SET(sqc, sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
@@ -850,12 +1120,14 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq)
netif_tx_disable_queue(sq->txq);
/* last doorbell out, godspeed .. */
- if (mlx5e_sq_has_room_for(sq, 1))
+ if (mlx5e_sq_has_room_for(sq, 1)) {
+ sq->db.txq.skb[(sq->pc & sq->wq.sz_m1)] = NULL;
mlx5e_send_nop(sq, true);
+ }
}
mlx5e_disable_sq(sq);
- mlx5e_free_tx_descs(sq);
+ mlx5e_free_sq_descs(sq);
mlx5e_destroy_sq(sq);
}
@@ -1216,14 +1488,31 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
}
}
+ if (priv->xdp_prog) {
+ /* XDP SQ CQ params are same as normal TXQ sq CQ params */
+ err = mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq,
+ priv->params.tx_cq_moderation);
+ if (err)
+ goto err_close_sqs;
+
+ err = mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq);
+ if (err) {
+ mlx5e_close_cq(&c->xdp_sq.cq);
+ goto err_close_sqs;
+ }
+ }
+
+ c->xdp = !!priv->xdp_prog;
err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
if (err)
- goto err_close_sqs;
+ goto err_close_xdp_sq;
netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
*cp = c;
return 0;
+err_close_xdp_sq:
+ mlx5e_close_sq(&c->xdp_sq);
err_close_sqs:
mlx5e_close_sqs(c);
@@ -1252,9 +1541,13 @@ err_napi_del:
static void mlx5e_close_channel(struct mlx5e_channel *c)
{
mlx5e_close_rq(&c->rq);
+ if (c->xdp)
+ mlx5e_close_sq(&c->xdp_sq);
mlx5e_close_sqs(c);
mlx5e_close_sq(&c->icosq);
napi_disable(&c->napi);
+ if (c->xdp)
+ mlx5e_close_cq(&c->xdp_sq.cq);
mlx5e_close_cq(&c->rq.cq);
mlx5e_close_tx_cqs(c);
mlx5e_close_cq(&c->icosq.cq);
@@ -1328,6 +1621,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
param->max_inline = priv->params.tx_max_inline;
param->min_inline_mode = priv->params.tx_min_inline_mode;
+ param->type = MLX5E_SQ_TXQ;
}
static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -1401,7 +1695,22 @@ static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
- param->icosq = true;
+ param->type = MLX5E_SQ_ICO;
+}
+
+static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ mlx5e_build_sq_param_common(priv, param);
+ MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
+
+ param->max_inline = priv->params.tx_max_inline;
+ /* FOR XDP SQs will support only L2 inline mode */
+ param->min_inline_mode = MLX5_INLINE_MODE_NONE;
+ param->type = MLX5E_SQ_XDP;
}
static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam)
@@ -1410,6 +1719,7 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_chan
mlx5e_build_rq_param(priv, &cparam->rq);
mlx5e_build_sq_param(priv, &cparam->sq);
+ mlx5e_build_xdpsq_param(priv, &cparam->xdp_sq);
mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz);
mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
@@ -1883,6 +2193,9 @@ int mlx5e_close(struct net_device *netdev)
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
+ if (!netif_device_present(netdev))
+ return -ENODEV;
+
mutex_lock(&priv->state_lock);
err = mlx5e_close_locked(netdev);
mutex_unlock(&priv->state_lock);
@@ -2604,11 +2917,15 @@ static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
}
-static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
+static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
vlan, qos);
}
@@ -2785,6 +3102,92 @@ static void mlx5e_tx_timeout(struct net_device *dev)
schedule_work(&priv->tx_timeout_work);
}
+static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct bpf_prog *old_prog;
+ int err = 0;
+ bool reset, was_opened;
+ int i;
+
+ mutex_lock(&priv->state_lock);
+
+ if ((netdev->features & NETIF_F_LRO) && prog) {
+ netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ /* no need for full reset when exchanging programs */
+ reset = (!priv->xdp_prog || !prog);
+
+ if (was_opened && reset)
+ mlx5e_close_locked(netdev);
+
+ /* exchange programs */
+ old_prog = xchg(&priv->xdp_prog, prog);
+ if (prog)
+ bpf_prog_add(prog, 1);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (reset) /* change RQ type according to priv->xdp_prog */
+ mlx5e_set_rq_priv_params(priv);
+
+ if (was_opened && reset)
+ mlx5e_open_locked(netdev);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
+ goto unlock;
+
+ /* exchanging programs w/o reset, we update ref counts on behalf
+ * of the channels RQs here.
+ */
+ bpf_prog_add(prog, priv->params.num_channels);
+ for (i = 0; i < priv->params.num_channels; i++) {
+ struct mlx5e_channel *c = priv->channel[i];
+
+ set_bit(MLX5E_RQ_STATE_FLUSH, &c->rq.state);
+ napi_synchronize(&c->napi);
+ /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
+
+ old_prog = xchg(&c->rq.xdp_prog, prog);
+
+ clear_bit(MLX5E_RQ_STATE_FLUSH, &c->rq.state);
+ /* napi_schedule in case we have missed anything */
+ set_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
+ napi_schedule(&c->napi);
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+ }
+
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+static bool mlx5e_xdp_attached(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return !!priv->xdp_prog;
+}
+
+static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return mlx5e_xdp_set(dev, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = mlx5e_xdp_attached(dev);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close,
@@ -2804,6 +3207,7 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
.ndo_tx_timeout = mlx5e_tx_timeout,
+ .ndo_xdp = mlx5e_xdp,
};
static const struct net_device_ops mlx5e_netdev_ops_sriov = {
@@ -2835,6 +3239,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
.ndo_tx_timeout = mlx5e_tx_timeout,
+ .ndo_xdp = mlx5e_xdp,
};
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -2909,13 +3314,6 @@ void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
indirection_rqt[i] = i % num_channels;
}
-static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
-{
- return MLX5_CAP_GEN(mdev, striding_rq) &&
- MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
- MLX5_CAP_ETH(mdev, reg_umr_sq);
-}
-
static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
{
enum pcie_link_width width;
@@ -2995,11 +3393,13 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- priv->params.log_sq_size =
- MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
- priv->params.rq_wq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) ?
- MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
- MLX5_WQ_TYPE_LINKED_LIST;
+ priv->mdev = mdev;
+ priv->netdev = netdev;
+ priv->params.num_channels = profile->max_nch(mdev);
+ priv->profile = profile;
+ priv->ppriv = ppriv;
+
+ priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
/* set CQE compression */
priv->params.rx_cqe_compress_admin = false;
@@ -3012,33 +3412,11 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->params.rx_cqe_compress_admin =
cqe_compress_heuristic(link_speed, pci_bw);
}
-
priv->params.rx_cqe_compress = priv->params.rx_cqe_compress_admin;
- switch (priv->params.rq_wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
- priv->params.mpwqe_log_stride_sz =
- priv->params.rx_cqe_compress ?
- MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
- MLX5_MPWRQ_LOG_STRIDE_SIZE;
- priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
- priv->params.mpwqe_log_stride_sz;
+ mlx5e_set_rq_priv_params(priv);
+ if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
priv->params.lro_en = true;
- break;
- default: /* MLX5_WQ_TYPE_LINKED_LIST */
- priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
- }
-
- mlx5_core_info(mdev,
- "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
- priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
- BIT(priv->params.log_rq_size),
- BIT(priv->params.mpwqe_log_stride_sz),
- priv->params.rx_cqe_compress_admin);
-
- priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
- BIT(priv->params.log_rq_size));
priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
@@ -3058,19 +3436,16 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
- priv->params.lro_wqe_sz =
- MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ priv->params.lro_wqe_sz =
+ MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ -
+ /* Extra room needed for build_skb */
+ MLX5_RX_HEADROOM -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
/* Initialize pflags */
MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
- priv->mdev = mdev;
- priv->netdev = netdev;
- priv->params.num_channels = profile->max_nch(mdev);
- priv->profile = profile;
- priv->ppriv = ppriv;
-
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_ets_init(priv);
#endif
@@ -3374,9 +3749,9 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
rep.load = mlx5e_nic_rep_load;
rep.unload = mlx5e_nic_rep_unload;
- rep.vport = 0;
+ rep.vport = FDB_UPLINK_VPORT;
rep.priv_data = priv;
- mlx5_eswitch_register_vport_rep(esw, &rep);
+ mlx5_eswitch_register_vport_rep(esw, 0, &rep);
}
}
@@ -3401,13 +3776,13 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.max_tc = MLX5E_MAX_NUM_TC,
};
-void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
- const struct mlx5e_profile *profile, void *ppriv)
+struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
+ int nch = profile->max_nch(mdev);
struct net_device *netdev;
struct mlx5e_priv *priv;
- int nch = profile->max_nch(mdev);
- int err;
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
nch * profile->max_tc,
@@ -3425,12 +3800,31 @@ void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
priv->wq = create_singlethread_workqueue("mlx5e");
if (!priv->wq)
- goto err_free_netdev;
+ goto err_cleanup_nic;
+
+ return netdev;
+
+err_cleanup_nic:
+ profile->cleanup(priv);
+ free_netdev(netdev);
+
+ return NULL;
+}
+
+int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
+{
+ const struct mlx5e_profile *profile;
+ struct mlx5e_priv *priv;
+ int err;
+
+ priv = netdev_priv(netdev);
+ profile = priv->profile;
+ clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
err = mlx5e_create_umr_mkey(priv);
if (err) {
mlx5_core_err(mdev, "create umr mkey failed, %d\n", err);
- goto err_destroy_wq;
+ goto out;
}
err = profile->init_tx(priv);
@@ -3453,20 +3847,16 @@ void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
mlx5e_set_dev_port_mtu(netdev);
- err = register_netdev(netdev);
- if (err) {
- mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
- goto err_dealloc_q_counters;
- }
-
if (profile->enable)
profile->enable(priv);
- return priv;
+ rtnl_lock();
+ if (netif_running(netdev))
+ mlx5e_open(netdev);
+ netif_device_attach(netdev);
+ rtnl_unlock();
-err_dealloc_q_counters:
- mlx5e_destroy_q_counter(priv);
- profile->cleanup_rx(priv);
+ return 0;
err_close_drop_rq:
mlx5e_close_drop_rq(priv);
@@ -3477,13 +3867,8 @@ err_cleanup_tx:
err_destroy_umr_mkey:
mlx5_core_destroy_mkey(mdev, &priv->umr_mkey);
-err_destroy_wq:
- destroy_workqueue(priv->wq);
-
-err_free_netdev:
- free_netdev(netdev);
-
- return NULL;
+out:
+ return err;
}
static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
@@ -3505,20 +3890,84 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
rep.unload = mlx5e_vport_rep_unload;
rep.vport = vport;
ether_addr_copy(rep.hw_id, mac);
- mlx5_eswitch_register_vport_rep(esw, &rep);
+ mlx5_eswitch_register_vport_rep(esw, vport, &rep);
}
}
+void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ const struct mlx5e_profile *profile = priv->profile;
+
+ set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ if (profile->disable)
+ profile->disable(priv);
+
+ flush_workqueue(priv->wq);
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ mlx5e_close(netdev);
+ netif_device_detach(netdev);
+ rtnl_unlock();
+
+ mlx5e_destroy_q_counter(priv);
+ profile->cleanup_rx(priv);
+ mlx5e_close_drop_rq(priv);
+ profile->cleanup_tx(priv);
+ mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
+ cancel_delayed_work_sync(&priv->update_stats_work);
+}
+
+/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
+ * hardware contexts and to connect it to the current netdev.
+ */
+static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
+{
+ struct mlx5e_priv *priv = vpriv;
+ struct net_device *netdev = priv->netdev;
+ int err;
+
+ if (netif_device_present(netdev))
+ return 0;
+
+ err = mlx5e_create_mdev_resources(mdev);
+ if (err)
+ return err;
+
+ err = mlx5e_attach_netdev(mdev, netdev);
+ if (err) {
+ mlx5e_destroy_mdev_resources(mdev);
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
+{
+ struct mlx5e_priv *priv = vpriv;
+ struct net_device *netdev = priv->netdev;
+
+ if (!netif_device_present(netdev))
+ return;
+
+ mlx5e_detach_netdev(mdev, netdev);
+ mlx5e_destroy_mdev_resources(mdev);
+}
+
static void *mlx5e_add(struct mlx5_core_dev *mdev)
{
struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ int total_vfs = MLX5_TOTAL_VPORTS(mdev);
void *ppriv = NULL;
- void *ret;
-
- if (mlx5e_check_required_hca_cap(mdev))
- return NULL;
+ void *priv;
+ int vport;
+ int err;
+ struct net_device *netdev;
- if (mlx5e_create_mdev_resources(mdev))
+ err = mlx5e_check_required_hca_cap(mdev);
+ if (err)
return NULL;
mlx5e_register_vport_rep(mdev);
@@ -3526,12 +3975,39 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
if (MLX5_CAP_GEN(mdev, vport_group_manager))
ppriv = &esw->offloads.vport_reps[0];
- ret = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
- if (!ret) {
- mlx5e_destroy_mdev_resources(mdev);
- return NULL;
+ netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
+ if (!netdev) {
+ mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
+ goto err_unregister_reps;
+ }
+
+ priv = netdev_priv(netdev);
+
+ err = mlx5e_attach(mdev, priv);
+ if (err) {
+ mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
+ goto err_destroy_netdev;
+ }
+
+ err = register_netdev(netdev);
+ if (err) {
+ mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
+ goto err_detach;
}
- return ret;
+
+ return priv;
+
+err_detach:
+ mlx5e_detach(mdev, priv);
+
+err_destroy_netdev:
+ mlx5e_destroy_netdev(mdev, priv);
+
+err_unregister_reps:
+ for (vport = 1; vport < total_vfs; vport++)
+ mlx5_eswitch_unregister_vport_rep(esw, vport);
+
+ return NULL;
}
void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
@@ -3539,30 +4015,11 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
const struct mlx5e_profile *profile = priv->profile;
struct net_device *netdev = priv->netdev;
- set_bit(MLX5E_STATE_DESTROYING, &priv->state);
- if (profile->disable)
- profile->disable(priv);
-
- flush_workqueue(priv->wq);
- if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
- netif_device_detach(netdev);
- mlx5e_close(netdev);
- } else {
- unregister_netdev(netdev);
- }
-
- mlx5e_destroy_q_counter(priv);
- profile->cleanup_rx(priv);
- mlx5e_close_drop_rq(priv);
- profile->cleanup_tx(priv);
- mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
- cancel_delayed_work_sync(&priv->update_stats_work);
+ unregister_netdev(netdev);
destroy_workqueue(priv->wq);
if (profile->cleanup)
profile->cleanup(priv);
-
- if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
- free_netdev(netdev);
+ free_netdev(netdev);
}
static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
@@ -3572,12 +4029,11 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
struct mlx5e_priv *priv = vpriv;
int vport;
- mlx5e_destroy_netdev(mdev, priv);
-
for (vport = 1; vport < total_vfs; vport++)
mlx5_eswitch_unregister_vport_rep(esw, vport);
- mlx5e_destroy_mdev_resources(mdev);
+ mlx5e_detach(mdev, vpriv);
+ mlx5e_destroy_netdev(mdev, priv);
}
static void *mlx5e_get_netdev(void *vpriv)
@@ -3590,6 +4046,8 @@ static void *mlx5e_get_netdev(void *vpriv)
static struct mlx5_interface mlx5e_interface = {
.add = mlx5e_add,
.remove = mlx5e_remove,
+ .attach = mlx5e_attach,
+ .detach = mlx5e_detach,
.event = mlx5e_async_event,
.protocol = MLX5_INTERFACE_PROTOCOL_ETH,
.get_dev = mlx5e_get_netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 29db4735182a..3c97da103d30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -413,19 +413,50 @@ static struct mlx5e_profile mlx5e_rep_profile = {
int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep)
{
- rep->priv_data = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rep);
- if (!rep->priv_data) {
- mlx5_core_warn(esw->dev, "Failed to create representor for vport %d\n",
- rep->vport);
+ struct net_device *netdev;
+ int err;
+
+ netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rep);
+ if (!netdev) {
+ pr_warn("Failed to create representor netdev for vport %d\n",
+ rep->vport);
return -EINVAL;
}
+
+ rep->priv_data = netdev_priv(netdev);
+
+ err = mlx5e_attach_netdev(esw->dev, netdev);
+ if (err) {
+ pr_warn("Failed to attach representor netdev for vport %d\n",
+ rep->vport);
+ goto err_destroy_netdev;
+ }
+
+ err = register_netdev(netdev);
+ if (err) {
+ pr_warn("Failed to register representor netdev for vport %d\n",
+ rep->vport);
+ goto err_detach_netdev;
+ }
+
return 0;
+
+err_detach_netdev:
+ mlx5e_detach_netdev(esw->dev, netdev);
+
+err_destroy_netdev:
+ mlx5e_destroy_netdev(esw->dev, rep->priv_data);
+
+ return err;
+
}
void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep)
{
struct mlx5e_priv *priv = rep->priv_data;
+ struct net_device *netdev = priv->netdev;
+ mlx5e_detach_netdev(esw->dev, netdev);
mlx5e_destroy_netdev(esw->dev, priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index b6f8ebbdb487..c6de6fba5843 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -36,6 +36,7 @@
#include <net/busy_poll.h>
#include "en.h"
#include "en_tc.h"
+#include "eswitch.h"
static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
{
@@ -179,96 +180,111 @@ unlock:
mutex_unlock(&priv->state_lock);
}
-int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
-{
- struct sk_buff *skb;
- dma_addr_t dma_addr;
+#define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT)
- skb = napi_alloc_skb(rq->cq.napi, rq->wqe_sz);
- if (unlikely(!skb))
- return -ENOMEM;
+static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info)
+{
+ struct mlx5e_page_cache *cache = &rq->page_cache;
+ u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
- dma_addr = dma_map_single(rq->pdev,
- /* hw start padding */
- skb->data,
- /* hw end padding */
- rq->wqe_sz,
- DMA_FROM_DEVICE);
+ if (tail_next == cache->head) {
+ rq->stats.cache_full++;
+ return false;
+ }
- if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
- goto err_free_skb;
+ cache->page_cache[cache->tail] = *dma_info;
+ cache->tail = tail_next;
+ return true;
+}
- *((dma_addr_t *)skb->cb) = dma_addr;
- wqe->data.addr = cpu_to_be64(dma_addr);
- wqe->data.lkey = rq->mkey_be;
+static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info)
+{
+ struct mlx5e_page_cache *cache = &rq->page_cache;
- rq->skb[ix] = skb;
+ if (unlikely(cache->head == cache->tail)) {
+ rq->stats.cache_empty++;
+ return false;
+ }
- return 0;
+ if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
+ rq->stats.cache_busy++;
+ return false;
+ }
-err_free_skb:
- dev_kfree_skb(skb);
+ *dma_info = cache->page_cache[cache->head];
+ cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
+ rq->stats.cache_reuse++;
- return -ENOMEM;
+ dma_sync_single_for_device(rq->pdev, dma_info->addr,
+ RQ_PAGE_SIZE(rq),
+ DMA_FROM_DEVICE);
+ return true;
}
-void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
+static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *dma_info)
{
- struct sk_buff *skb = rq->skb[ix];
+ struct page *page;
+
+ if (mlx5e_rx_cache_get(rq, dma_info))
+ return 0;
+
+ page = dev_alloc_pages(rq->buff.page_order);
+ if (unlikely(!page))
+ return -ENOMEM;
- if (skb) {
- rq->skb[ix] = NULL;
- dma_unmap_single(rq->pdev,
- *((dma_addr_t *)skb->cb),
- rq->wqe_sz,
- DMA_FROM_DEVICE);
- dev_kfree_skb(skb);
+ dma_info->page = page;
+ dma_info->addr = dma_map_page(rq->pdev, page, 0,
+ RQ_PAGE_SIZE(rq), rq->buff.map_dir);
+ if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
+ put_page(page);
+ return -ENOMEM;
}
+
+ return 0;
}
-static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
+void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
+ bool recycle)
{
- return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
+ if (likely(recycle) && mlx5e_rx_cache_put(rq, dma_info))
+ return;
+
+ dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq),
+ rq->buff.map_dir);
+ put_page(dma_info->page);
}
-static inline void
-mlx5e_dma_pre_sync_linear_mpwqe(struct device *pdev,
- struct mlx5e_mpw_info *wi,
- u32 wqe_offset, u32 len)
+int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
{
- dma_sync_single_for_cpu(pdev, wi->dma_info.addr + wqe_offset,
- len, DMA_FROM_DEVICE);
+ struct mlx5e_dma_info *di = &rq->dma_info[ix];
+
+ if (unlikely(mlx5e_page_alloc_mapped(rq, di)))
+ return -ENOMEM;
+
+ wqe->data.addr = cpu_to_be64(di->addr + MLX5_RX_HEADROOM);
+ return 0;
}
-static inline void
-mlx5e_dma_pre_sync_fragmented_mpwqe(struct device *pdev,
- struct mlx5e_mpw_info *wi,
- u32 wqe_offset, u32 len)
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
{
- /* No dma pre sync for fragmented MPWQE */
+ struct mlx5e_dma_info *di = &rq->dma_info[ix];
+
+ mlx5e_page_release(rq, di, true);
}
-static inline void
-mlx5e_add_skb_frag_linear_mpwqe(struct mlx5e_rq *rq,
- struct sk_buff *skb,
- struct mlx5e_mpw_info *wi,
- u32 page_idx, u32 frag_offset,
- u32 len)
+static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
{
- unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz);
-
- wi->skbs_frags[page_idx]++;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- &wi->dma_info.page[page_idx], frag_offset,
- len, truesize);
+ return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
}
-static inline void
-mlx5e_add_skb_frag_fragmented_mpwqe(struct mlx5e_rq *rq,
- struct sk_buff *skb,
- struct mlx5e_mpw_info *wi,
- u32 page_idx, u32 frag_offset,
- u32 len)
+static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 frag_offset,
+ u32 len)
{
unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz);
@@ -282,24 +298,11 @@ mlx5e_add_skb_frag_fragmented_mpwqe(struct mlx5e_rq *rq,
}
static inline void
-mlx5e_copy_skb_header_linear_mpwqe(struct device *pdev,
- struct sk_buff *skb,
- struct mlx5e_mpw_info *wi,
- u32 page_idx, u32 offset,
- u32 headlen)
-{
- struct page *page = &wi->dma_info.page[page_idx];
-
- skb_copy_to_linear_data(skb, page_address(page) + offset,
- ALIGN(headlen, sizeof(long)));
-}
-
-static inline void
-mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
- struct sk_buff *skb,
- struct mlx5e_mpw_info *wi,
- u32 page_idx, u32 offset,
- u32 headlen)
+mlx5e_copy_skb_header_mpwqe(struct device *pdev,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 offset,
+ u32 headlen)
{
u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset);
struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[page_idx];
@@ -324,46 +327,9 @@ mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
}
}
-static u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
-{
- return rq->mpwqe_mtt_offset +
- wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
-}
-
-static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
- struct mlx5e_sq *sq,
- struct mlx5e_umr_wqe *wqe,
- u16 ix)
-{
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
- struct mlx5_wqe_data_seg *dseg = &wqe->data;
- struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
- u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
- u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
-
- memset(wqe, 0, sizeof(*wqe));
- cseg->opmod_idx_opcode =
- cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
- MLX5_OPCODE_UMR);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
- ds_cnt);
- cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- cseg->imm = rq->umr_mkey_be;
-
- ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
- ucseg->klm_octowords =
- cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
- ucseg->bsf_octowords =
- cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
- ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
-
- dseg->lkey = sq->mkey_be;
- dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
-}
-
-static void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
+static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
{
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
struct mlx5e_sq *sq = &rq->channel->icosq;
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *wqe;
@@ -372,135 +338,74 @@ static void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
/* fill sq edge with nops to avoid wqe wrap around */
while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
- sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
- sq->ico_wqe_info[pi].num_wqebbs = 1;
+ sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
+ sq->db.ico_wqe[pi].num_wqebbs = 1;
mlx5e_send_nop(sq, true);
}
wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- mlx5e_build_umr_wqe(rq, sq, wqe, ix);
- sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_UMR;
- sq->ico_wqe_info[pi].num_wqebbs = num_wqebbs;
+ memcpy(wqe, &wi->umr.wqe, sizeof(*wqe));
+ wqe->ctrl.opmod_idx_opcode =
+ cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+ MLX5_OPCODE_UMR);
+
+ sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
+ sq->db.ico_wqe[pi].num_wqebbs = num_wqebbs;
sq->pc += num_wqebbs;
mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
}
-static inline int mlx5e_get_wqe_mtt_sz(void)
-{
- /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
- * To avoid copying garbage after the mtt array, we allocate
- * a little more.
- */
- return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
- MLX5_UMR_MTT_ALIGNMENT);
-}
-
-static int mlx5e_alloc_and_map_page(struct mlx5e_rq *rq,
- struct mlx5e_mpw_info *wi,
- int i)
+static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5e_rx_wqe *wqe,
+ u16 ix)
{
- struct page *page;
-
- page = dev_alloc_page();
- if (unlikely(!page))
- return -ENOMEM;
-
- wi->umr.dma_info[i].page = page;
- wi->umr.dma_info[i].addr = dma_map_page(rq->pdev, page, 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- if (unlikely(dma_mapping_error(rq->pdev, wi->umr.dma_info[i].addr))) {
- put_page(page);
- return -ENOMEM;
- }
- wi->umr.mtt[i] = cpu_to_be64(wi->umr.dma_info[i].addr | MLX5_EN_WR);
-
- return 0;
-}
-
-static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
- struct mlx5e_rx_wqe *wqe,
- u16 ix)
-{
- struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
- int mtt_sz = mlx5e_get_wqe_mtt_sz();
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT;
+ int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
+ int err;
int i;
- wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) *
- MLX5_MPWRQ_PAGES_PER_WQE,
- GFP_ATOMIC);
- if (unlikely(!wi->umr.dma_info))
- goto err_out;
-
- /* We allocate more than mtt_sz as we will align the pointer */
- wi->umr.mtt_no_align = kzalloc(mtt_sz + MLX5_UMR_ALIGN - 1,
- GFP_ATOMIC);
- if (unlikely(!wi->umr.mtt_no_align))
- goto err_free_umr;
-
- wi->umr.mtt = PTR_ALIGN(wi->umr.mtt_no_align, MLX5_UMR_ALIGN);
- wi->umr.mtt_addr = dma_map_single(rq->pdev, wi->umr.mtt, mtt_sz,
- PCI_DMA_TODEVICE);
- if (unlikely(dma_mapping_error(rq->pdev, wi->umr.mtt_addr)))
- goto err_free_mtt;
-
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
- if (unlikely(mlx5e_alloc_and_map_page(rq, wi, i)))
+ struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
+
+ err = mlx5e_page_alloc_mapped(rq, dma_info);
+ if (unlikely(err))
goto err_unmap;
- page_ref_add(wi->umr.dma_info[i].page,
- mlx5e_mpwqe_strides_per_page(rq));
+ wi->umr.mtt[i] = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
+ page_ref_add(dma_info->page, pg_strides);
wi->skbs_frags[i] = 0;
}
wi->consumed_strides = 0;
- wi->dma_pre_sync = mlx5e_dma_pre_sync_fragmented_mpwqe;
- wi->add_skb_frag = mlx5e_add_skb_frag_fragmented_mpwqe;
- wi->copy_skb_header = mlx5e_copy_skb_header_fragmented_mpwqe;
- wi->free_wqe = mlx5e_free_rx_fragmented_mpwqe;
- wqe->data.lkey = rq->umr_mkey_be;
wqe->data.addr = cpu_to_be64(dma_offset);
return 0;
err_unmap:
while (--i >= 0) {
- dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- page_ref_sub(wi->umr.dma_info[i].page,
- mlx5e_mpwqe_strides_per_page(rq));
- put_page(wi->umr.dma_info[i].page);
- }
- dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
-
-err_free_mtt:
- kfree(wi->umr.mtt_no_align);
+ struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
-err_free_umr:
- kfree(wi->umr.dma_info);
+ page_ref_sub(dma_info->page, pg_strides);
+ mlx5e_page_release(rq, dma_info, true);
+ }
-err_out:
- return -ENOMEM;
+ return err;
}
-void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
- struct mlx5e_mpw_info *wi)
+void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
{
- int mtt_sz = mlx5e_get_wqe_mtt_sz();
+ int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
int i;
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
- dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- page_ref_sub(wi->umr.dma_info[i].page,
- mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i]);
- put_page(wi->umr.dma_info[i].page);
+ struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
+
+ page_ref_sub(dma_info->page, pg_strides - wi->skbs_frags[i]);
+ mlx5e_page_release(rq, dma_info, true);
}
- dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
- kfree(wi->umr.mtt_no_align);
- kfree(wi->umr.dma_info);
}
-void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
+void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq = &rq->wq;
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
@@ -508,12 +413,11 @@ void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) {
- mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
+ mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
return;
}
mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
- rq->stats.mpwqe_frag++;
/* ensure wqes are visible to device before updating doorbell record */
dma_wmb();
@@ -521,84 +425,23 @@ void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
mlx5_wq_ll_update_db_record(wq);
}
-static int mlx5e_alloc_rx_linear_mpwqe(struct mlx5e_rq *rq,
- struct mlx5e_rx_wqe *wqe,
- u16 ix)
-{
- struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
- gfp_t gfp_mask;
- int i;
-
- gfp_mask = GFP_ATOMIC | __GFP_COLD | __GFP_MEMALLOC;
- wi->dma_info.page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
- MLX5_MPWRQ_WQE_PAGE_ORDER);
- if (unlikely(!wi->dma_info.page))
- return -ENOMEM;
-
- wi->dma_info.addr = dma_map_page(rq->pdev, wi->dma_info.page, 0,
- rq->wqe_sz, PCI_DMA_FROMDEVICE);
- if (unlikely(dma_mapping_error(rq->pdev, wi->dma_info.addr))) {
- put_page(wi->dma_info.page);
- return -ENOMEM;
- }
-
- /* We split the high-order page into order-0 ones and manage their
- * reference counter to minimize the memory held by small skb fragments
- */
- split_page(wi->dma_info.page, MLX5_MPWRQ_WQE_PAGE_ORDER);
- for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
- page_ref_add(&wi->dma_info.page[i],
- mlx5e_mpwqe_strides_per_page(rq));
- wi->skbs_frags[i] = 0;
- }
-
- wi->consumed_strides = 0;
- wi->dma_pre_sync = mlx5e_dma_pre_sync_linear_mpwqe;
- wi->add_skb_frag = mlx5e_add_skb_frag_linear_mpwqe;
- wi->copy_skb_header = mlx5e_copy_skb_header_linear_mpwqe;
- wi->free_wqe = mlx5e_free_rx_linear_mpwqe;
- wqe->data.lkey = rq->mkey_be;
- wqe->data.addr = cpu_to_be64(wi->dma_info.addr);
-
- return 0;
-}
-
-void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
- struct mlx5e_mpw_info *wi)
-{
- int i;
-
- dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz,
- PCI_DMA_FROMDEVICE);
- for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
- page_ref_sub(&wi->dma_info.page[i],
- mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i]);
- put_page(&wi->dma_info.page[i]);
- }
-}
-
int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
{
int err;
- err = mlx5e_alloc_rx_linear_mpwqe(rq, wqe, ix);
- if (unlikely(err)) {
- err = mlx5e_alloc_rx_fragmented_mpwqe(rq, wqe, ix);
- if (unlikely(err))
- return err;
- set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
- mlx5e_post_umr_wqe(rq, ix);
- return -EBUSY;
- }
-
- return 0;
+ err = mlx5e_alloc_rx_umr_mpwqe(rq, wqe, ix);
+ if (unlikely(err))
+ return err;
+ set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
+ mlx5e_post_umr_wqe(rq, ix);
+ return -EBUSY;
}
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
- struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
- wi->free_wqe(rq, wi);
+ mlx5e_free_rx_mpwqe(rq, wi);
}
#define RQ_CANNOT_POST(rq) \
@@ -617,9 +460,10 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
int err;
err = rq->alloc_wqe(rq, wqe, wq->head);
+ if (err == -EBUSY)
+ return true;
if (unlikely(err)) {
- if (err != -EBUSY)
- rq->stats.buff_alloc_err++;
+ rq->stats.buff_alloc_err++;
break;
}
@@ -637,24 +481,32 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
u32 cqe_bcnt)
{
- struct ethhdr *eth = (struct ethhdr *)(skb->data);
- struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN);
- struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
struct tcphdr *tcp;
+ int network_depth = 0;
+ __be16 proto;
+ u16 tot_len;
u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
(CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
- u16 tot_len = cqe_bcnt - ETH_HLEN;
+ skb->mac_len = ETH_HLEN;
+ proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
- if (eth->h_proto == htons(ETH_P_IP)) {
- tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+ ipv4 = (struct iphdr *)(skb->data + network_depth);
+ ipv6 = (struct ipv6hdr *)(skb->data + network_depth);
+ tot_len = cqe_bcnt - network_depth;
+
+ if (proto == htons(ETH_P_IP)) {
+ tcp = (struct tcphdr *)(skb->data + network_depth +
sizeof(struct iphdr));
ipv6 = NULL;
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
} else {
- tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+ tcp = (struct tcphdr *)(skb->data + network_depth +
sizeof(struct ipv6hdr));
ipv4 = NULL;
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
@@ -778,40 +630,207 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
rq->stats.packets++;
rq->stats.bytes += cqe_bcnt;
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
- napi_gro_receive(rq->cq.napi, skb);
+}
+
+static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5e_tx_wqe *wqe;
+ u16 pi = (sq->pc - MLX5E_XDP_TX_WQEBBS) & wq->sz_m1; /* last pi */
+
+ wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
+}
+
+static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *di,
+ unsigned int data_offset,
+ int len)
+{
+ struct mlx5e_sq *sq = &rq->channel->xdp_sq;
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi = sq->pc & wq->sz_m1;
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ struct mlx5e_sq_wqe_info *wi = &sq->db.xdp.wqe_info[pi];
+
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
+ struct mlx5_wqe_data_seg *dseg;
+
+ dma_addr_t dma_addr = di->addr + data_offset + MLX5E_XDP_MIN_INLINE;
+ unsigned int dma_len = len - MLX5E_XDP_MIN_INLINE;
+ void *data = page_address(di->page) + data_offset;
+
+ if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) {
+ if (sq->db.xdp.doorbell) {
+ /* SQ is full, ring doorbell */
+ mlx5e_xmit_xdp_doorbell(sq);
+ sq->db.xdp.doorbell = false;
+ }
+ rq->stats.xdp_tx_full++;
+ mlx5e_page_release(rq, di, true);
+ return;
+ }
+
+ dma_sync_single_for_device(sq->pdev, dma_addr, dma_len,
+ PCI_DMA_TODEVICE);
+
+ memset(wqe, 0, sizeof(*wqe));
+
+ /* copy the inline part */
+ memcpy(eseg->inline_hdr_start, data, MLX5E_XDP_MIN_INLINE);
+ eseg->inline_hdr_sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
+
+ dseg = (struct mlx5_wqe_data_seg *)cseg + (MLX5E_XDP_TX_DS_COUNT - 1);
+
+ /* write the dma part */
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->byte_count = cpu_to_be32(dma_len);
+ dseg->lkey = sq->mkey_be;
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | MLX5E_XDP_TX_DS_COUNT);
+
+ sq->db.xdp.di[pi] = *di;
+ wi->opcode = MLX5_OPCODE_SEND;
+ wi->num_wqebbs = MLX5E_XDP_TX_WQEBBS;
+ sq->pc += MLX5E_XDP_TX_WQEBBS;
+
+ sq->db.xdp.doorbell = true;
+ rq->stats.xdp_tx++;
+}
+
+/* returns true if packet was consumed by xdp */
+static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
+ const struct bpf_prog *prog,
+ struct mlx5e_dma_info *di,
+ void *data, u16 len)
+{
+ struct xdp_buff xdp;
+ u32 act;
+
+ if (!prog)
+ return false;
+
+ xdp.data = data;
+ xdp.data_end = xdp.data + len;
+ act = bpf_prog_run_xdp(prog, &xdp);
+ switch (act) {
+ case XDP_PASS:
+ return false;
+ case XDP_TX:
+ mlx5e_xmit_xdp_frame(rq, di, MLX5_RX_HEADROOM, len);
+ return true;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ case XDP_DROP:
+ rq->stats.xdp_drop++;
+ mlx5e_page_release(rq, di, true);
+ return true;
+ }
+}
+
+static inline
+struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+ u16 wqe_counter, u32 cqe_bcnt)
+{
+ struct bpf_prog *xdp_prog = READ_ONCE(rq->xdp_prog);
+ struct mlx5e_dma_info *di;
+ struct sk_buff *skb;
+ void *va, *data;
+
+ di = &rq->dma_info[wqe_counter];
+ va = page_address(di->page);
+ data = va + MLX5_RX_HEADROOM;
+
+ dma_sync_single_range_for_cpu(rq->pdev,
+ di->addr,
+ MLX5_RX_HEADROOM,
+ rq->buff.wqe_sz,
+ DMA_FROM_DEVICE);
+ prefetch(data);
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ rq->stats.wqe_err++;
+ mlx5e_page_release(rq, di, true);
+ return NULL;
+ }
+
+ if (mlx5e_xdp_handle(rq, xdp_prog, di, data, cqe_bcnt))
+ return NULL; /* page/packet was consumed by XDP */
+
+ skb = build_skb(va, RQ_PAGE_SIZE(rq));
+ if (unlikely(!skb)) {
+ rq->stats.buff_alloc_err++;
+ mlx5e_page_release(rq, di, true);
+ return NULL;
+ }
+
+ /* queue up for recycling ..*/
+ page_ref_inc(di->page);
+ mlx5e_page_release(rq, di, true);
+
+ skb_reserve(skb, MLX5_RX_HEADROOM);
+ skb_put(skb, cqe_bcnt);
+
+ return skb;
}
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5e_rx_wqe *wqe;
- struct sk_buff *skb;
__be16 wqe_counter_be;
+ struct sk_buff *skb;
u16 wqe_counter;
u32 cqe_bcnt;
wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be);
wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
- skb = rq->skb[wqe_counter];
- prefetch(skb->data);
- rq->skb[wqe_counter] = NULL;
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- dma_unmap_single(rq->pdev,
- *((dma_addr_t *)skb->cb),
- rq->wqe_sz,
- DMA_FROM_DEVICE);
-
- if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
- rq->stats.wqe_err++;
- dev_kfree_skb(skb);
+ skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
+ if (!skb)
goto wq_ll_pop;
- }
- cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- skb_put(skb, cqe_bcnt);
+ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ napi_gro_receive(rq->cq.napi, skb);
+
+wq_ll_pop:
+ mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+ &wqe->next.next_wqe_index);
+}
+
+void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ struct net_device *netdev = rq->netdev;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_rx_wqe *wqe;
+ struct sk_buff *skb;
+ __be16 wqe_counter_be;
+ u16 wqe_counter;
+ u32 cqe_bcnt;
+
+ wqe_counter_be = cqe->wqe_counter;
+ wqe_counter = be16_to_cpu(wqe_counter_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+
+ skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
+ if (!skb)
+ goto wq_ll_pop;
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (rep->vlan && skb_vlan_tag_present(skb))
+ skb_vlan_pop(skb);
+
+ napi_gro_receive(rq->cq.napi, skb);
+
wq_ll_pop:
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
&wqe->next.next_wqe_index);
@@ -823,7 +842,6 @@ static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq,
u32 cqe_bcnt,
struct sk_buff *skb)
{
- u32 consumed_bytes = ALIGN(cqe_bcnt, rq->mpwqe_stride_sz);
u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
u32 wqe_offset = stride_ix * rq->mpwqe_stride_sz;
u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
@@ -837,21 +855,20 @@ static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq,
page_idx++;
frag_offset -= PAGE_SIZE;
}
- wi->dma_pre_sync(rq->pdev, wi, wqe_offset, consumed_bytes);
while (byte_cnt) {
u32 pg_consumed_bytes =
min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
- wi->add_skb_frag(rq, skb, wi, page_idx, frag_offset,
- pg_consumed_bytes);
+ mlx5e_add_skb_frag_mpwqe(rq, skb, wi, page_idx, frag_offset,
+ pg_consumed_bytes);
byte_cnt -= pg_consumed_bytes;
frag_offset = 0;
page_idx++;
}
/* copy header */
- wi->copy_skb_header(rq->pdev, skb, wi, head_page_idx, head_offset,
- headlen);
+ mlx5e_copy_skb_header_mpwqe(rq->pdev, skb, wi, head_page_idx,
+ head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
skb->len += headlen;
@@ -861,7 +878,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id);
- struct mlx5e_mpw_info *wi = &rq->wqe_info[wqe_id];
+ struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id);
struct sk_buff *skb;
u16 cqe_bcnt;
@@ -891,18 +908,20 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb);
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ napi_gro_receive(rq->cq.napi, skb);
mpwrq_cqe_out:
if (likely(wi->consumed_strides < rq->mpwqe_num_strides))
return;
- wi->free_wqe(rq, wi);
+ mlx5e_free_rx_mpwqe(rq, wi);
mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index);
}
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
+ struct mlx5e_sq *xdp_sq = &rq->channel->xdp_sq;
int work_done = 0;
if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state)))
@@ -929,6 +948,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
rq->handle_rx_cqe(rq, cqe);
}
+ if (xdp_sq->db.xdp.doorbell) {
+ mlx5e_xmit_xdp_doorbell(xdp_sq);
+ xdp_sq->db.xdp.doorbell = false;
+ }
+
mlx5_cqwq_update_db_record(&cq->wq);
/* ensure cq space is freed before enabling more cqes */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 499487ce3b53..57452fdc5154 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -65,6 +65,9 @@ struct mlx5e_sw_stats {
u64 rx_csum_none;
u64 rx_csum_complete;
u64 rx_csum_unnecessary_inner;
+ u64 rx_xdp_drop;
+ u64 rx_xdp_tx;
+ u64 rx_xdp_tx_full;
u64 tx_csum_partial;
u64 tx_csum_partial_inner;
u64 tx_queue_stopped;
@@ -73,10 +76,13 @@ struct mlx5e_sw_stats {
u64 tx_xmit_more;
u64 rx_wqe_err;
u64 rx_mpwqe_filler;
- u64 rx_mpwqe_frag;
u64 rx_buff_alloc_err;
u64 rx_cqe_compress_blks;
u64 rx_cqe_compress_pkts;
+ u64 rx_cache_reuse;
+ u64 rx_cache_full;
+ u64 rx_cache_empty;
+ u64 rx_cache_busy;
/* Special handling counters */
u64 link_down_events_phy;
@@ -97,6 +103,9 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
@@ -105,10 +114,13 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
};
@@ -272,12 +284,18 @@ struct mlx5e_rq_stats {
u64 csum_none;
u64 lro_packets;
u64 lro_bytes;
+ u64 xdp_drop;
+ u64 xdp_tx;
+ u64 xdp_tx_full;
u64 wqe_err;
u64 mpwqe_filler;
- u64 mpwqe_frag;
u64 buff_alloc_err;
u64 cqe_compress_blks;
u64 cqe_compress_pkts;
+ u64 cache_reuse;
+ u64 cache_full;
+ u64 cache_empty;
+ u64 cache_busy;
};
static const struct counter_desc rq_stats_desc[] = {
@@ -286,14 +304,20 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
};
struct mlx5e_sq_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 22cfc4ac1837..a350b7171e3d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -39,6 +39,7 @@
#include <linux/rhashtable.h>
#include <net/switchdev.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_vlan.h>
#include "en.h"
#include "en_tc.h"
#include "eswitch.h"
@@ -47,6 +48,7 @@ struct mlx5e_tc_flow {
struct rhash_head node;
u64 cookie;
struct mlx5_flow_rule *rule;
+ struct mlx5_esw_flow_attr *attr;
};
#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
@@ -114,27 +116,30 @@ err_create_ft:
static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
- u32 action, u32 dst_vport)
+ struct mlx5_esw_flow_attr *attr)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_eswitch_rep *rep = priv->ppriv;
- u32 src_vport;
+ int err;
- if (rep->vport) /* set source vport for the flow */
- src_vport = rep->vport;
- else
- src_vport = FDB_UPLINK_VPORT;
+ err = mlx5_eswitch_add_vlan_action(esw, attr);
+ if (err)
+ return ERR_PTR(err);
- return mlx5_eswitch_add_offloaded_rule(esw, spec, action, src_vport, dst_vport);
+ return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
}
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
- struct mlx5_flow_rule *rule)
+ struct mlx5_flow_rule *rule,
+ struct mlx5_esw_flow_attr *attr)
{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_fc *counter = NULL;
counter = mlx5_flow_rule_counter(rule);
+ if (esw && esw->mode == SRIOV_OFFLOADS)
+ mlx5_eswitch_del_vlan_action(esw, attr);
+
mlx5_del_flow_rule(rule);
mlx5_fc_destroy(priv->mdev, counter);
@@ -159,6 +164,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS))) {
@@ -222,6 +228,24 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
key->src);
}
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_dissector_key_vlan *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->key);
+ struct flow_dissector_key_vlan *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->mask);
+ if (mask->vlan_id) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
+ }
+ }
+
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_dissector_key_ipv4_addrs *key =
skb_flow_dissector_target(f->dissector,
@@ -361,7 +385,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
}
static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
- u32 *action, u32 *dest_vport)
+ struct mlx5_esw_flow_attr *attr)
{
const struct tc_action *a;
LIST_HEAD(actions);
@@ -369,17 +393,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (tc_no_actions(exts))
return -EINVAL;
- *action = 0;
+ memset(attr, 0, sizeof(*attr));
+ attr->in_rep = priv->ppriv;
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
- /* Only support a single action per rule */
- if (*action)
- return -EINVAL;
-
if (is_tcf_gact_shot(a)) {
- *action = MLX5_FLOW_CONTEXT_ACTION_DROP |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
continue;
}
@@ -387,7 +408,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev;
struct mlx5e_priv *out_priv;
- struct mlx5_eswitch_rep *out_rep;
out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
@@ -397,13 +417,22 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EINVAL;
}
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
out_priv = netdev_priv(out_dev);
- out_rep = out_priv->ppriv;
- if (out_rep->vport == 0)
- *dest_vport = FDB_UPLINK_VPORT;
- else
- *dest_vport = out_rep->vport;
- *action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ attr->out_rep = out_priv->ppriv;
+ continue;
+ }
+
+ if (is_tcf_vlan(a)) {
+ if (tcf_vlan_action(a) == VLAN_F_POP) {
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
+ if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+ attr->vlan = tcf_vlan_push_vid(a);
+ }
continue;
}
@@ -417,18 +446,29 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
int err = 0;
- u32 flow_tag, action, dest_vport = 0;
+ bool fdb_flow = false;
+ u32 flow_tag, action;
struct mlx5e_tc_flow *flow;
struct mlx5_flow_spec *spec;
struct mlx5_flow_rule *old = NULL;
+ struct mlx5_esw_flow_attr *old_attr;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ if (esw && esw->mode == SRIOV_OFFLOADS)
+ fdb_flow = true;
+
flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
tc->ht_params);
- if (flow)
+ if (flow) {
old = flow->rule;
- else
- flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ old_attr = flow->attr;
+ } else {
+ if (fdb_flow)
+ flow = kzalloc(sizeof(*flow) + sizeof(struct mlx5_esw_flow_attr),
+ GFP_KERNEL);
+ else
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ }
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec || !flow) {
@@ -442,11 +482,12 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
if (err < 0)
goto err_free;
- if (esw && esw->mode == SRIOV_OFFLOADS) {
- err = parse_tc_fdb_actions(priv, f->exts, &action, &dest_vport);
+ if (fdb_flow) {
+ flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
+ err = parse_tc_fdb_actions(priv, f->exts, flow->attr);
if (err < 0)
goto err_free;
- flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, action, dest_vport);
+ flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
} else {
err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
if (err < 0)
@@ -465,7 +506,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
goto err_del_rule;
if (old)
- mlx5e_tc_del_flow(priv, old);
+ mlx5e_tc_del_flow(priv, old, old_attr);
goto out;
@@ -493,7 +534,7 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
- mlx5e_tc_del_flow(priv, flow->rule);
+ mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
kfree(flow);
@@ -550,7 +591,7 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg)
struct mlx5e_tc_flow *flow = ptr;
struct mlx5e_priv *priv = arg;
- mlx5e_tc_del_flow(priv, flow->rule);
+ mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
kfree(flow);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 988eca99ee0f..70a717382357 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -52,7 +52,6 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01);
- sq->skb[pi] = NULL;
sq->pc++;
sq->stats.nop++;
@@ -82,15 +81,17 @@ static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
u32 size,
enum mlx5e_dma_map_type map_type)
{
- sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
- sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
- sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].type = map_type;
+ u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
+
+ sq->db.txq.dma_fifo[i].addr = addr;
+ sq->db.txq.dma_fifo[i].size = size;
+ sq->db.txq.dma_fifo[i].type = map_type;
sq->dma_fifo_pc++;
}
static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
{
- return &sq->dma_fifo[i & sq->dma_fifo_mask];
+ return &sq->db.txq.dma_fifo[i & sq->dma_fifo_mask];
}
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma)
@@ -221,7 +222,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
u16 pi = sq->pc & wq->sz_m1;
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- struct mlx5e_tx_wqe_info *wi = &sq->wqe_info[pi];
+ struct mlx5e_tx_wqe_info *wi = &sq->db.txq.wqe_info[pi];
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
@@ -341,7 +342,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- sq->skb[pi] = skb;
+ sq->db.txq.skb[pi] = skb;
wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
sq->pc += wi->num_wqebbs;
@@ -356,6 +357,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.stopped++;
}
+ sq->stats.xmit_more += skb->xmit_more;
if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
int bf_sz = 0;
@@ -367,15 +369,16 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
}
/* fill sq edge with nops to avoid wqe wrap around */
- while ((sq->pc & wq->sz_m1) > sq->edge)
+ while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
+ sq->db.txq.skb[pi] = NULL;
mlx5e_send_nop(sq, false);
+ }
if (bf)
sq->bf_budget--;
sq->stats.packets++;
sq->stats.bytes += num_bytes;
- sq->stats.xmit_more += skb->xmit_more;
return NETDEV_TX_OK;
dma_unmap_wqe_err:
@@ -442,8 +445,8 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
last_wqe = (sqcc == wqe_counter);
ci = sqcc & sq->wq.sz_m1;
- skb = sq->skb[ci];
- wi = &sq->wqe_info[ci];
+ skb = sq->db.txq.skb[ci];
+ wi = &sq->db.txq.wqe_info[ci];
if (unlikely(!skb)) { /* nop */
sqcc++;
@@ -492,7 +495,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
-void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
+static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq)
{
struct mlx5e_tx_wqe_info *wi;
struct sk_buff *skb;
@@ -501,8 +504,8 @@ void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
while (sq->cc != sq->pc) {
ci = sq->cc & sq->wq.sz_m1;
- skb = sq->skb[ci];
- wi = &sq->wqe_info[ci];
+ skb = sq->db.txq.skb[ci];
+ wi = &sq->db.txq.wqe_info[ci];
if (!skb) { /* nop */
sq->cc++;
@@ -520,3 +523,37 @@ void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
sq->cc += wi->num_wqebbs;
}
}
+
+static void mlx5e_free_xdp_sq_descs(struct mlx5e_sq *sq)
+{
+ struct mlx5e_sq_wqe_info *wi;
+ struct mlx5e_dma_info *di;
+ u16 ci;
+
+ while (sq->cc != sq->pc) {
+ ci = sq->cc & sq->wq.sz_m1;
+ di = &sq->db.xdp.di[ci];
+ wi = &sq->db.xdp.wqe_info[ci];
+
+ if (wi->opcode == MLX5_OPCODE_NOP) {
+ sq->cc++;
+ continue;
+ }
+
+ sq->cc += wi->num_wqebbs;
+
+ mlx5e_page_release(&sq->channel->rq, di, false);
+ }
+}
+
+void mlx5e_free_sq_descs(struct mlx5e_sq *sq)
+{
+ switch (sq->type) {
+ case MLX5E_SQ_TXQ:
+ mlx5e_free_txq_sq_descs(sq);
+ break;
+ case MLX5E_SQ_XDP:
+ mlx5e_free_xdp_sq_descs(sq);
+ break;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 9bf33bb69210..5703f19a6a24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -72,7 +72,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
do {
u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1;
- struct mlx5e_ico_wqe_info *icowi = &sq->ico_wqe_info[ci];
+ struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci];
mlx5_cqwq_pop(&cq->wq);
sqcc += icowi->num_wqebbs;
@@ -87,7 +87,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
case MLX5_OPCODE_NOP:
break;
case MLX5_OPCODE_UMR:
- mlx5e_post_rx_fragmented_mpwqe(&sq->channel->rq);
+ mlx5e_post_rx_mpwqe(&sq->channel->rq);
break;
default:
WARN_ONCE(true,
@@ -105,6 +105,66 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
sq->cc = sqcc;
}
+static inline bool mlx5e_poll_xdp_tx_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5e_sq *sq;
+ u16 sqcc;
+ int i;
+
+ sq = container_of(cq, struct mlx5e_sq, cq);
+
+ if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
+ return false;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ sqcc = sq->cc;
+
+ for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
+ struct mlx5_cqe64 *cqe;
+ u16 wqe_counter;
+ bool last_wqe;
+
+ cqe = mlx5e_get_cqe(cq);
+ if (!cqe)
+ break;
+
+ mlx5_cqwq_pop(&cq->wq);
+
+ wqe_counter = be16_to_cpu(cqe->wqe_counter);
+
+ do {
+ struct mlx5e_sq_wqe_info *wi;
+ struct mlx5e_dma_info *di;
+ u16 ci;
+
+ last_wqe = (sqcc == wqe_counter);
+
+ ci = sqcc & sq->wq.sz_m1;
+ di = &sq->db.xdp.di[ci];
+ wi = &sq->db.xdp.wqe_info[ci];
+
+ if (unlikely(wi->opcode == MLX5_OPCODE_NOP)) {
+ sqcc++;
+ continue;
+ }
+
+ sqcc += wi->num_wqebbs;
+ /* Recycle RX page */
+ mlx5e_page_release(&sq->channel->rq, di, true);
+ } while (!last_wqe);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->cc = sqcc;
+ return (i == MLX5E_TX_CQ_POLL_BUDGET);
+}
+
int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
@@ -121,6 +181,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
busy |= work_done == budget;
+ if (c->xdp)
+ busy |= mlx5e_poll_xdp_tx_cq(&c->xdp_sq.cq);
+
mlx5e_poll_ico_cq(&c->icosq.cq);
busy |= mlx5e_post_rx_wqes(&c->rq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 101430571d6d..abbf2c369923 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -81,9 +81,6 @@ enum {
MC_ADDR_CHANGE | \
PROMISC_CHANGE)
-int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
-void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
-
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
u32 events_mask)
{
@@ -116,57 +113,6 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
}
/* E-Switch vport context HW commands */
-static int query_esw_vport_context_cmd(struct mlx5_core_dev *mdev, u32 vport,
- u32 *out, int outlen)
-{
- u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {0};
-
- MLX5_SET(query_nic_vport_context_in, in, opcode,
- MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
- MLX5_SET(query_esw_vport_context_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(query_esw_vport_context_in, in, other_vport, 1);
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
-}
-
-static int query_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
- u16 *vlan, u8 *qos)
-{
- u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {0};
- int err;
- bool cvlan_strip;
- bool cvlan_insert;
-
- *vlan = 0;
- *qos = 0;
-
- if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
- !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
- return -ENOTSUPP;
-
- err = query_esw_vport_context_cmd(dev, vport, out, sizeof(out));
- if (err)
- goto out;
-
- cvlan_strip = MLX5_GET(query_esw_vport_context_out, out,
- esw_vport_context.vport_cvlan_strip);
-
- cvlan_insert = MLX5_GET(query_esw_vport_context_out, out,
- esw_vport_context.vport_cvlan_insert);
-
- if (cvlan_strip || cvlan_insert) {
- *vlan = MLX5_GET(query_esw_vport_context_out, out,
- esw_vport_context.cvlan_id);
- *qos = MLX5_GET(query_esw_vport_context_out, out,
- esw_vport_context.cvlan_pcp);
- }
-
- esw_debug(dev, "Query Vport[%d] cvlan: VLAN %d qos=%d\n",
- vport, *vlan, *qos);
-out:
- return err;
-}
-
static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
void *in, int inlen)
{
@@ -181,7 +127,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
}
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
- u16 vlan, u8 qos, bool set)
+ u16 vlan, u8 qos, u8 set_flags)
{
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
@@ -189,14 +135,18 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
!MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
return -ENOTSUPP;
- esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%d\n",
- vport, vlan, qos, set);
- if (set) {
+ esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
+ vport, vlan, qos, set_flags);
+
+ if (set_flags & SET_VLAN_STRIP)
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.vport_cvlan_strip, 1);
+
+ if (set_flags & SET_VLAN_INSERT) {
/* insert only if no vlan in packet */
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.vport_cvlan_insert, 1);
+
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.cvlan_pcp, qos);
MLX5_SET(modify_esw_vport_context_in, in,
@@ -921,7 +871,7 @@ static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
vport_num, promisc_all, promisc_mc);
- if (!vport->trusted || !vport->enabled) {
+ if (!vport->info.trusted || !vport->enabled) {
promisc_uc = 0;
promisc_mc = 0;
promisc_all = 0;
@@ -1257,30 +1207,20 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_flow_spec *spec;
- u8 smac[ETH_ALEN];
int err = 0;
u8 *smac_v;
- if (vport->spoofchk) {
- err = mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, smac);
- if (err) {
- esw_warn(esw->dev,
- "vport[%d] configure ingress rules failed, query smac failed, err(%d)\n",
- vport->vport, err);
- return err;
- }
+ if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
+ mlx5_core_warn(esw->dev,
+ "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
+ vport->vport);
+ return -EPERM;
- if (!is_valid_ether_addr(smac)) {
- mlx5_core_warn(esw->dev,
- "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
- vport->vport);
- return -EPERM;
- }
}
esw_vport_cleanup_ingress_rules(esw, vport);
- if (!vport->vlan && !vport->qos && !vport->spoofchk) {
+ if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
esw_vport_disable_ingress_acl(esw, vport);
return 0;
}
@@ -1289,7 +1229,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
esw_debug(esw->dev,
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
- vport->vport, vport->vlan, vport->qos);
+ vport->vport, vport->info.vlan, vport->info.qos);
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec) {
@@ -1299,16 +1239,16 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
goto out;
}
- if (vport->vlan || vport->qos)
+ if (vport->info.vlan || vport->info.qos)
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
- if (vport->spoofchk) {
+ if (vport->info.spoofchk) {
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
smac_v = MLX5_ADDR_OF(fte_match_param,
spec->match_value,
outer_headers.smac_47_16);
- ether_addr_copy(smac_v, smac);
+ ether_addr_copy(smac_v, vport->info.mac);
}
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
@@ -1354,7 +1294,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
esw_vport_cleanup_egress_rules(esw, vport);
- if (!vport->vlan && !vport->qos) {
+ if (!vport->info.vlan && !vport->info.qos) {
esw_vport_disable_egress_acl(esw, vport);
return 0;
}
@@ -1363,7 +1303,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
esw_debug(esw->dev,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
- vport->vport, vport->vlan, vport->qos);
+ vport->vport, vport->info.vlan, vport->info.qos);
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec) {
@@ -1377,7 +1317,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->vlan);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
vport->egress.allowed_vlan =
@@ -1411,6 +1351,41 @@ out:
return err;
}
+static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
+{
+ ((u8 *)node_guid)[7] = mac[0];
+ ((u8 *)node_guid)[6] = mac[1];
+ ((u8 *)node_guid)[5] = mac[2];
+ ((u8 *)node_guid)[4] = 0xff;
+ ((u8 *)node_guid)[3] = 0xfe;
+ ((u8 *)node_guid)[2] = mac[3];
+ ((u8 *)node_guid)[1] = mac[4];
+ ((u8 *)node_guid)[0] = mac[5];
+}
+
+static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int vport_num = vport->vport;
+
+ if (!vport_num)
+ return;
+
+ mlx5_modify_vport_admin_state(esw->dev,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ vport_num,
+ vport->info.link_state);
+ mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, vport->info.mac);
+ mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, vport->info.node_guid);
+ modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
+ (vport->info.vlan || vport->info.qos));
+
+ /* Only legacy mode needs ACLs */
+ if (esw->mode == SRIOV_LEGACY) {
+ esw_vport_ingress_config(esw, vport);
+ esw_vport_egress_config(esw, vport);
+ }
+}
static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
int enable_events)
{
@@ -1421,23 +1396,17 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
- /* Only VFs need ACLs for VST and spoofchk filtering */
- if (vport_num && esw->mode == SRIOV_LEGACY) {
- esw_vport_ingress_config(esw, vport);
- esw_vport_egress_config(esw, vport);
- }
-
- mlx5_modify_vport_admin_state(esw->dev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
- vport_num,
- MLX5_ESW_VPORT_ADMIN_STATE_AUTO);
+ /* Restore old vport configuration */
+ esw_apply_vport_conf(esw, vport);
/* Sync with current vport context */
vport->enabled_events = enable_events;
vport->enabled = true;
/* only PF is trusted by default */
- vport->trusted = (vport_num) ? false : true;
+ if (!vport_num)
+ vport->info.trusted = true;
+
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
@@ -1457,11 +1426,6 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
vport->enabled = false;
synchronize_irq(mlx5_get_msix_vec(esw->dev, MLX5_EQ_VEC_ASYNC));
-
- mlx5_modify_vport_admin_state(esw->dev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
- vport_num,
- MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
/* Wait for current already scheduled events to complete */
flush_workqueue(esw->work_queue);
/* Disable events from this vport */
@@ -1473,7 +1437,12 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
*/
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
+
if (vport_num && esw->mode == SRIOV_LEGACY) {
+ mlx5_modify_vport_admin_state(esw->dev,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ vport_num,
+ MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport);
}
@@ -1524,6 +1493,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
abort:
esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
+ esw->mode = SRIOV_NONE;
return err;
}
@@ -1559,6 +1529,25 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
}
+void mlx5_eswitch_attach(struct mlx5_eswitch *esw)
+{
+ if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
+ MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return;
+
+ esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
+ /* VF Vports will be enabled when SRIOV is enabled */
+}
+
+void mlx5_eswitch_detach(struct mlx5_eswitch *esw)
+{
+ if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
+ MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return;
+
+ esw_disable_vport(esw, 0);
+}
+
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
{
int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
@@ -1626,6 +1615,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
struct mlx5_vport *vport = &esw->vports[vport_num];
vport->vport = vport_num;
+ vport->info.link_state = MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
vport->dev = dev;
INIT_WORK(&vport->vport_change_handler,
esw_vport_change_handler);
@@ -1636,8 +1626,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->mode = SRIOV_NONE;
dev->priv.eswitch = esw;
- esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
- /* VF Vports will be enabled when SRIOV is enabled */
return 0;
abort:
if (esw->work_queue)
@@ -1656,7 +1644,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
return;
esw_info(esw->dev, "cleanup\n");
- esw_disable_vport(esw, 0);
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
@@ -1689,18 +1676,6 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
(esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
-static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
-{
- ((u8 *)node_guid)[7] = mac[0];
- ((u8 *)node_guid)[6] = mac[1];
- ((u8 *)node_guid)[5] = mac[2];
- ((u8 *)node_guid)[4] = 0xff;
- ((u8 *)node_guid)[3] = 0xfe;
- ((u8 *)node_guid)[2] = mac[3];
- ((u8 *)node_guid)[1] = mac[4];
- ((u8 *)node_guid)[0] = mac[5];
-}
-
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int vport, u8 mac[ETH_ALEN])
{
@@ -1713,13 +1688,15 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
+ mutex_lock(&esw->state_lock);
evport = &esw->vports[vport];
- if (evport->spoofchk && !is_valid_ether_addr(mac)) {
+ if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
mlx5_core_warn(esw->dev,
"MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
vport);
- return -EPERM;
+ err = -EPERM;
+ goto unlock;
}
err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
@@ -1727,7 +1704,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
mlx5_core_warn(esw->dev,
"Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
vport, err);
- return err;
+ goto unlock;
}
node_guid_gen_from_mac(&node_guid, mac);
@@ -1737,9 +1714,12 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
"Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
vport, err);
- mutex_lock(&esw->state_lock);
+ ether_addr_copy(evport->info.mac, mac);
+ evport->info.node_guid = node_guid;
if (evport->enabled && esw->mode == SRIOV_LEGACY)
err = esw_vport_ingress_config(esw, evport);
+
+unlock:
mutex_unlock(&esw->state_lock);
return err;
}
@@ -1747,22 +1727,38 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
int vport, int link_state)
{
+ struct mlx5_vport *evport;
+ int err = 0;
+
if (!ESW_ALLOWED(esw))
return -EPERM;
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
- return mlx5_modify_vport_admin_state(esw->dev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
- vport, link_state);
+ mutex_lock(&esw->state_lock);
+ evport = &esw->vports[vport];
+
+ err = mlx5_modify_vport_admin_state(esw->dev,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+ vport, link_state);
+ if (err) {
+ mlx5_core_warn(esw->dev,
+ "Failed to set vport %d link state, err = %d",
+ vport, err);
+ goto unlock;
+ }
+
+ evport->info.link_state = link_state;
+
+unlock:
+ mutex_unlock(&esw->state_lock);
+ return 0;
}
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi)
{
struct mlx5_vport *evport;
- u16 vlan;
- u8 qos;
if (!ESW_ALLOWED(esw))
return -EPERM;
@@ -1774,54 +1770,61 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
memset(ivi, 0, sizeof(*ivi));
ivi->vf = vport - 1;
- mlx5_query_nic_vport_mac_address(esw->dev, vport, ivi->mac);
- ivi->linkstate = mlx5_query_vport_admin_state(esw->dev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
- vport);
- query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos);
- ivi->vlan = vlan;
- ivi->qos = qos;
- ivi->spoofchk = evport->spoofchk;
+ mutex_lock(&esw->state_lock);
+ ether_addr_copy(ivi->mac, evport->info.mac);
+ ivi->linkstate = evport->info.link_state;
+ ivi->vlan = evport->info.vlan;
+ ivi->qos = evport->info.qos;
+ ivi->spoofchk = evport->info.spoofchk;
+ ivi->trusted = evport->info.trusted;
+ mutex_unlock(&esw->state_lock);
return 0;
}
-int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
- int vport, u16 vlan, u8 qos)
+int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+ int vport, u16 vlan, u8 qos, u8 set_flags)
{
struct mlx5_vport *evport;
int err = 0;
- int set = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
return -EINVAL;
- if (vlan || qos)
- set = 1;
-
+ mutex_lock(&esw->state_lock);
evport = &esw->vports[vport];
- err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
+ err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
if (err)
- return err;
+ goto unlock;
- mutex_lock(&esw->state_lock);
- evport->vlan = vlan;
- evport->qos = qos;
+ evport->info.vlan = vlan;
+ evport->info.qos = qos;
if (evport->enabled && esw->mode == SRIOV_LEGACY) {
err = esw_vport_ingress_config(esw, evport);
if (err)
- goto out;
+ goto unlock;
err = esw_vport_egress_config(esw, evport);
}
-out:
+unlock:
mutex_unlock(&esw->state_lock);
return err;
}
+int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+ int vport, u16 vlan, u8 qos)
+{
+ u8 set_flags = 0;
+
+ if (vlan || qos)
+ set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
+
+ return __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
+}
+
int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
int vport, bool spoofchk)
{
@@ -1834,16 +1837,14 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
- evport = &esw->vports[vport];
-
mutex_lock(&esw->state_lock);
- pschk = evport->spoofchk;
- evport->spoofchk = spoofchk;
- if (evport->enabled && esw->mode == SRIOV_LEGACY) {
+ evport = &esw->vports[vport];
+ pschk = evport->info.spoofchk;
+ evport->info.spoofchk = spoofchk;
+ if (evport->enabled && esw->mode == SRIOV_LEGACY)
err = esw_vport_ingress_config(esw, evport);
- if (err)
- evport->spoofchk = pschk;
- }
+ if (err)
+ evport->info.spoofchk = pschk;
mutex_unlock(&esw->state_lock);
return err;
@@ -1859,10 +1860,9 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
- evport = &esw->vports[vport];
-
mutex_lock(&esw->state_lock);
- evport->trusted = setting;
+ evport = &esw->vports[vport];
+ evport->info.trusted = setting;
if (evport->enabled)
esw_vport_change_handle_locked(evport);
mutex_unlock(&esw->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index a96140971d77..2e2938e08cda 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -109,6 +109,16 @@ struct vport_egress {
struct mlx5_flow_rule *drop_rule;
};
+struct mlx5_vport_info {
+ u8 mac[ETH_ALEN];
+ u16 vlan;
+ u8 qos;
+ u64 node_guid;
+ int link_state;
+ bool spoofchk;
+ bool trusted;
+};
+
struct mlx5_vport {
struct mlx5_core_dev *dev;
int vport;
@@ -121,10 +131,8 @@ struct mlx5_vport {
struct vport_ingress ingress;
struct vport_egress egress;
- u16 vlan;
- u8 qos;
- bool spoofchk;
- bool trusted;
+ struct mlx5_vport_info info;
+
bool enabled;
u16 enabled_events;
};
@@ -149,6 +157,7 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *miss_grp;
struct mlx5_flow_rule *miss_rule;
+ int vlan_push_pop_refcount;
} offloads;
};
};
@@ -170,11 +179,14 @@ struct mlx5_eswitch_rep {
void (*unload)(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep);
u16 vport;
- struct mlx5_flow_rule *vport_rx_rule;
+ u8 hw_id[ETH_ALEN];
void *priv_data;
+
+ struct mlx5_flow_rule *vport_rx_rule;
struct list_head vport_sqs_list;
+ u16 vlan;
+ u32 vlan_refcount;
bool valid;
- u8 hw_id[ETH_ALEN];
};
struct mlx5_esw_offload {
@@ -201,9 +213,14 @@ struct mlx5_eswitch {
int mode;
};
+void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
+int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
+
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
+void mlx5_eswitch_attach(struct mlx5_eswitch *esw);
+void mlx5_eswitch_detach(struct mlx5_eswitch *esw);
void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe);
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode);
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw);
@@ -224,14 +241,32 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
struct ifla_vf_stats *vf_stats);
struct mlx5_flow_spec;
+struct mlx5_esw_flow_attr;
struct mlx5_flow_rule *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- u32 action, u32 src_vport, u32 dst_vport);
+ struct mlx5_esw_flow_attr *attr);
struct mlx5_flow_rule *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
+enum {
+ SET_VLAN_STRIP = BIT(0),
+ SET_VLAN_INSERT = BIT(1)
+};
+
+#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x40
+#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80
+
+struct mlx5_esw_flow_attr {
+ struct mlx5_eswitch_rep *in_rep;
+ struct mlx5_eswitch_rep *out_rep;
+
+ int action;
+ u16 vlan;
+ bool vlan_handled;
+};
+
int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep,
u16 *sqns_array, int sqns_num);
@@ -241,9 +276,17 @@ void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
+ int vport_index,
struct mlx5_eswitch_rep *rep);
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
- int vport);
+ int vport_index);
+
+int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr);
+int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr);
+int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+ int vport, u16 vlan, u8 qos, u8 set_flags);
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 3dc83a9459a4..c55ad8d00c05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -46,19 +46,22 @@ enum {
struct mlx5_flow_rule *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- u32 action, u32 src_vport, u32 dst_vport)
+ struct mlx5_esw_flow_attr *attr)
{
struct mlx5_flow_destination dest = { 0 };
struct mlx5_fc *counter = NULL;
struct mlx5_flow_rule *rule;
void *misc;
+ int action;
if (esw->mode != SRIOV_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
+ action = attr->action;
+
if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest.vport_num = dst_vport;
+ dest.vport_num = attr->out_rep->vport;
action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
} else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(esw->dev, true);
@@ -69,7 +72,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
}
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
- MLX5_SET(fte_match_set_misc, misc, source_port, src_vport);
+ MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
@@ -86,6 +89,186 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
return rule;
}
+static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
+{
+ struct mlx5_eswitch_rep *rep;
+ int vf_vport, err = 0;
+
+ esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
+ for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
+ rep = &esw->offloads.vport_reps[vf_vport];
+ if (!rep->valid)
+ continue;
+
+ err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
+ if (err)
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+static struct mlx5_eswitch_rep *
+esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
+{
+ struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
+
+ in_rep = attr->in_rep;
+ out_rep = attr->out_rep;
+
+ if (push)
+ vport = in_rep;
+ else if (pop)
+ vport = out_rep;
+ else
+ vport = in_rep;
+
+ return vport;
+}
+
+static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
+ bool push, bool pop, bool fwd)
+{
+ struct mlx5_eswitch_rep *in_rep, *out_rep;
+
+ if ((push || pop) && !fwd)
+ goto out_notsupp;
+
+ in_rep = attr->in_rep;
+ out_rep = attr->out_rep;
+
+ if (push && in_rep->vport == FDB_UPLINK_VPORT)
+ goto out_notsupp;
+
+ if (pop && out_rep->vport == FDB_UPLINK_VPORT)
+ goto out_notsupp;
+
+ /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
+ if (!push && !pop && fwd)
+ if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
+ goto out_notsupp;
+
+ /* protects against (1) setting rules with different vlans to push and
+ * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
+ */
+ if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
+ goto out_notsupp;
+
+ return 0;
+
+out_notsupp:
+ return -ENOTSUPP;
+}
+
+int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr)
+{
+ struct offloads_fdb *offloads = &esw->fdb_table.offloads;
+ struct mlx5_eswitch_rep *vport = NULL;
+ bool push, pop, fwd;
+ int err = 0;
+
+ push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
+ pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
+ fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+
+ err = esw_add_vlan_action_check(attr, push, pop, fwd);
+ if (err)
+ return err;
+
+ attr->vlan_handled = false;
+
+ vport = esw_vlan_action_get_vport(attr, push, pop);
+
+ if (!push && !pop && fwd) {
+ /* tracks VF --> wire rules without vlan push action */
+ if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
+ vport->vlan_refcount++;
+ attr->vlan_handled = true;
+ }
+
+ return 0;
+ }
+
+ if (!push && !pop)
+ return 0;
+
+ if (!(offloads->vlan_push_pop_refcount)) {
+ /* it's the 1st vlan rule, apply global vlan pop policy */
+ err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
+ if (err)
+ goto out;
+ }
+ offloads->vlan_push_pop_refcount++;
+
+ if (push) {
+ if (vport->vlan_refcount)
+ goto skip_set_push;
+
+ err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
+ SET_VLAN_INSERT | SET_VLAN_STRIP);
+ if (err)
+ goto out;
+ vport->vlan = attr->vlan;
+skip_set_push:
+ vport->vlan_refcount++;
+ }
+out:
+ if (!err)
+ attr->vlan_handled = true;
+ return err;
+}
+
+int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr)
+{
+ struct offloads_fdb *offloads = &esw->fdb_table.offloads;
+ struct mlx5_eswitch_rep *vport = NULL;
+ bool push, pop, fwd;
+ int err = 0;
+
+ if (!attr->vlan_handled)
+ return 0;
+
+ push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
+ pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
+ fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+
+ vport = esw_vlan_action_get_vport(attr, push, pop);
+
+ if (!push && !pop && fwd) {
+ /* tracks VF --> wire rules without vlan push action */
+ if (attr->out_rep->vport == FDB_UPLINK_VPORT)
+ vport->vlan_refcount--;
+
+ return 0;
+ }
+
+ if (push) {
+ vport->vlan_refcount--;
+ if (vport->vlan_refcount)
+ goto skip_unset_push;
+
+ vport->vlan = 0;
+ err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
+ 0, 0, SET_VLAN_STRIP);
+ if (err)
+ goto out;
+ }
+
+skip_unset_push:
+ offloads->vlan_push_pop_refcount--;
+ if (offloads->vlan_push_pop_refcount)
+ return 0;
+
+ /* no more vlan rules, stop global vlan pop policy */
+ err = esw_set_global_vlan_pop(esw, 0);
+
+out:
+ return err;
+}
+
static struct mlx5_flow_rule *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
{
@@ -144,16 +327,12 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
{
struct mlx5_flow_rule *flow_rule;
struct mlx5_esw_sq *esw_sq;
- int vport;
int err;
int i;
if (esw->mode != SRIOV_OFFLOADS)
return 0;
- vport = rep->vport == 0 ?
- FDB_UPLINK_VPORT : rep->vport;
-
for (i = 0; i < sqns_num; i++) {
esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
if (!esw_sq) {
@@ -163,7 +342,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
/* Add re-inject rule to the PF/representor sqs */
flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
- vport,
+ rep->vport,
sqns_array[i]);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
@@ -446,7 +625,7 @@ out:
static int esw_offloads_start(struct mlx5_eswitch *esw)
{
- int err, num_vfs = esw->dev->priv.sriov.num_vfs;
+ int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
if (esw->mode != SRIOV_LEGACY) {
esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
@@ -455,8 +634,12 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
- if (err)
- esw_warn(esw->dev, "Failed set eswitch to offloads, err %d\n", err);
+ if (err) {
+ esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
+ err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
+ if (err1)
+ esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
+ }
return err;
}
@@ -508,12 +691,16 @@ create_ft_err:
static int esw_offloads_stop(struct mlx5_eswitch *esw)
{
- int err, num_vfs = esw->dev->priv.sriov.num_vfs;
+ int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
- if (err)
- esw_warn(esw->dev, "Failed set eswitch legacy mode. err %d\n", err);
+ if (err) {
+ esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
+ err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
+ if (err1)
+ esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
+ }
return err;
}
@@ -612,27 +799,36 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
}
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep)
+ int vport_index,
+ struct mlx5_eswitch_rep *__rep)
{
struct mlx5_esw_offload *offloads = &esw->offloads;
+ struct mlx5_eswitch_rep *rep;
+
+ rep = &offloads->vport_reps[vport_index];
+
+ memset(rep, 0, sizeof(*rep));
- memcpy(&offloads->vport_reps[rep->vport], rep,
- sizeof(struct mlx5_eswitch_rep));
+ rep->load = __rep->load;
+ rep->unload = __rep->unload;
+ rep->vport = __rep->vport;
+ rep->priv_data = __rep->priv_data;
+ ether_addr_copy(rep->hw_id, __rep->hw_id);
- INIT_LIST_HEAD(&offloads->vport_reps[rep->vport].vport_sqs_list);
- offloads->vport_reps[rep->vport].valid = true;
+ INIT_LIST_HEAD(&rep->vport_sqs_list);
+ rep->valid = true;
}
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
- int vport)
+ int vport_index)
{
struct mlx5_esw_offload *offloads = &esw->offloads;
struct mlx5_eswitch_rep *rep;
- rep = &offloads->vport_reps[vport];
+ rep = &offloads->vport_reps[vport_index];
- if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport].enabled)
+ if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
rep->unload(esw, rep);
- offloads->vport_reps[vport].valid = false;
+ rep->valid = false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 7a0415e6d339..113c32326333 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -401,11 +401,11 @@ struct mlx5_cmd_fc_bulk *
mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num)
{
struct mlx5_cmd_fc_bulk *b;
- int outlen = sizeof(*b) +
+ int outlen =
MLX5_ST_SZ_BYTES(query_flow_counter_out) +
MLX5_ST_SZ_BYTES(traffic_counter) * num;
- b = kzalloc(outlen, GFP_KERNEL);
+ b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
if (!b)
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 92c3e0dbcbdc..55957246c0e8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -277,7 +277,7 @@ static void mlx5_do_bond_work(struct work_struct *work)
bond_work);
int status;
- status = mutex_trylock(&mlx5_intf_mutex);
+ status = mlx5_dev_list_trylock();
if (!status) {
/* 1 sec delay. */
mlx5_queue_bond_work(ldev, HZ);
@@ -285,7 +285,7 @@ static void mlx5_do_bond_work(struct work_struct *work)
}
mlx5_do_bond(ldev);
- mutex_unlock(&mlx5_intf_mutex);
+ mlx5_dev_list_unlock();
}
static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
@@ -466,35 +466,21 @@ static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
mutex_unlock(&lag_mutex);
}
-static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
-{
- return (u16)((dev->pdev->bus->number << 8) |
- PCI_SLOT(dev->pdev->devfn));
-}
/* Must be called with intf_mutex held */
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
{
struct mlx5_lag *ldev = NULL;
struct mlx5_core_dev *tmp_dev;
- struct mlx5_priv *priv;
- u16 pci_id;
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
(MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS))
return;
- pci_id = mlx5_gen_pci_id(dev);
-
- mlx5_core_for_each_priv(priv) {
- tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
- if ((dev != tmp_dev) &&
- (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
- ldev = tmp_dev->priv.lag;
- break;
- }
- }
+ tmp_dev = mlx5_get_next_phys_dev(dev);
+ if (tmp_dev)
+ ldev = tmp_dev->priv.lag;
if (!ldev) {
ldev = mlx5_lag_dev_alloc();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c132ef1faefe..d9c3c70b29e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -72,17 +72,6 @@ static int prof_sel = MLX5_DEFAULT_PROF;
module_param_named(prof_sel, prof_sel, int, 0444);
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
-static LIST_HEAD(intf_list);
-
-LIST_HEAD(mlx5_dev_list);
-DEFINE_MUTEX(mlx5_intf_mutex);
-
-struct mlx5_device_context {
- struct list_head list;
- struct mlx5_interface *intf;
- void *context;
-};
-
enum {
MLX5_ATOMIC_REQ_MODE_BE = 0x0,
MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
@@ -778,147 +767,6 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
return -ENOTSUPP;
}
-static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
-{
- struct mlx5_device_context *dev_ctx;
- struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
-
- if (!mlx5_lag_intf_add(intf, priv))
- return;
-
- dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
- if (!dev_ctx)
- return;
-
- dev_ctx->intf = intf;
- dev_ctx->context = intf->add(dev);
-
- if (dev_ctx->context) {
- spin_lock_irq(&priv->ctx_lock);
- list_add_tail(&dev_ctx->list, &priv->ctx_list);
- spin_unlock_irq(&priv->ctx_lock);
- } else {
- kfree(dev_ctx);
- }
-}
-
-static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
-{
- struct mlx5_device_context *dev_ctx;
- struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
-
- list_for_each_entry(dev_ctx, &priv->ctx_list, list)
- if (dev_ctx->intf == intf) {
- spin_lock_irq(&priv->ctx_lock);
- list_del(&dev_ctx->list);
- spin_unlock_irq(&priv->ctx_lock);
-
- intf->remove(dev, dev_ctx->context);
- kfree(dev_ctx);
- return;
- }
-}
-
-static int mlx5_register_device(struct mlx5_core_dev *dev)
-{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_interface *intf;
-
- mutex_lock(&mlx5_intf_mutex);
- list_add_tail(&priv->dev_list, &mlx5_dev_list);
- list_for_each_entry(intf, &intf_list, list)
- mlx5_add_device(intf, priv);
- mutex_unlock(&mlx5_intf_mutex);
-
- return 0;
-}
-
-static void mlx5_unregister_device(struct mlx5_core_dev *dev)
-{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_interface *intf;
-
- mutex_lock(&mlx5_intf_mutex);
- list_for_each_entry(intf, &intf_list, list)
- mlx5_remove_device(intf, priv);
- list_del(&priv->dev_list);
- mutex_unlock(&mlx5_intf_mutex);
-}
-
-int mlx5_register_interface(struct mlx5_interface *intf)
-{
- struct mlx5_priv *priv;
-
- if (!intf->add || !intf->remove)
- return -EINVAL;
-
- mutex_lock(&mlx5_intf_mutex);
- list_add_tail(&intf->list, &intf_list);
- list_for_each_entry(priv, &mlx5_dev_list, dev_list)
- mlx5_add_device(intf, priv);
- mutex_unlock(&mlx5_intf_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL(mlx5_register_interface);
-
-void mlx5_unregister_interface(struct mlx5_interface *intf)
-{
- struct mlx5_priv *priv;
-
- mutex_lock(&mlx5_intf_mutex);
- list_for_each_entry(priv, &mlx5_dev_list, dev_list)
- mlx5_remove_device(intf, priv);
- list_del(&intf->list);
- mutex_unlock(&mlx5_intf_mutex);
-}
-EXPORT_SYMBOL(mlx5_unregister_interface);
-
-void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
-{
- struct mlx5_priv *priv = &mdev->priv;
- struct mlx5_device_context *dev_ctx;
- unsigned long flags;
- void *result = NULL;
-
- spin_lock_irqsave(&priv->ctx_lock, flags);
-
- list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
- if ((dev_ctx->intf->protocol == protocol) &&
- dev_ctx->intf->get_dev) {
- result = dev_ctx->intf->get_dev(dev_ctx->context);
- break;
- }
-
- spin_unlock_irqrestore(&priv->ctx_lock, flags);
-
- return result;
-}
-EXPORT_SYMBOL(mlx5_get_protocol_dev);
-
-/* Must be called with intf_mutex held */
-void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
-{
- struct mlx5_interface *intf;
-
- list_for_each_entry(intf, &intf_list, list)
- if (intf->protocol == protocol) {
- mlx5_add_device(intf, &dev->priv);
- break;
- }
-}
-
-/* Must be called with intf_mutex held */
-void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
-{
- struct mlx5_interface *intf;
-
- list_for_each_entry(intf, &intf_list, list)
- if (intf->protocol == protocol) {
- mlx5_remove_device(intf, &dev->priv);
- break;
- }
-}
static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
{
@@ -991,8 +839,102 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
debugfs_remove(priv->dbg_root);
}
-#define MLX5_IB_MOD "mlx5_ib"
-static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int err;
+
+ err = mlx5_query_hca_caps(dev);
+ if (err) {
+ dev_err(&pdev->dev, "query hca failed\n");
+ goto out;
+ }
+
+ err = mlx5_query_board_id(dev);
+ if (err) {
+ dev_err(&pdev->dev, "query board id failed\n");
+ goto out;
+ }
+
+ err = mlx5_eq_init(dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to initialize eq\n");
+ goto out;
+ }
+
+ MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
+
+ err = mlx5_init_cq_table(dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to initialize cq table\n");
+ goto err_eq_cleanup;
+ }
+
+ mlx5_init_qp_table(dev);
+
+ mlx5_init_srq_table(dev);
+
+ mlx5_init_mkey_table(dev);
+
+ err = mlx5_init_rl_table(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init rate limiting\n");
+ goto err_tables_cleanup;
+ }
+
+#ifdef CONFIG_MLX5_CORE_EN
+ err = mlx5_eswitch_init(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init eswitch %d\n", err);
+ goto err_rl_cleanup;
+ }
+#endif
+
+ err = mlx5_sriov_init(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init sriov %d\n", err);
+ goto err_eswitch_cleanup;
+ }
+
+ return 0;
+
+err_eswitch_cleanup:
+#ifdef CONFIG_MLX5_CORE_EN
+ mlx5_eswitch_cleanup(dev->priv.eswitch);
+
+err_rl_cleanup:
+#endif
+ mlx5_cleanup_rl_table(dev);
+
+err_tables_cleanup:
+ mlx5_cleanup_mkey_table(dev);
+ mlx5_cleanup_srq_table(dev);
+ mlx5_cleanup_qp_table(dev);
+ mlx5_cleanup_cq_table(dev);
+
+err_eq_cleanup:
+ mlx5_eq_cleanup(dev);
+
+out:
+ return err;
+}
+
+static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
+{
+ mlx5_sriov_cleanup(dev);
+#ifdef CONFIG_MLX5_CORE_EN
+ mlx5_eswitch_cleanup(dev->priv.eswitch);
+#endif
+ mlx5_cleanup_rl_table(dev);
+ mlx5_cleanup_mkey_table(dev);
+ mlx5_cleanup_srq_table(dev);
+ mlx5_cleanup_qp_table(dev);
+ mlx5_cleanup_cq_table(dev);
+ mlx5_eq_cleanup(dev);
+}
+
+static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
+ bool boot)
{
struct pci_dev *pdev = dev->pdev;
int err;
@@ -1025,12 +967,10 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto out_err;
}
- mlx5_pagealloc_init(dev);
-
err = mlx5_core_enable_hca(dev, 0);
if (err) {
dev_err(&pdev->dev, "enable hca failed\n");
- goto err_pagealloc_cleanup;
+ goto err_cmd_cleanup;
}
err = mlx5_core_set_issi(dev);
@@ -1083,34 +1023,21 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_start_health_poll(dev);
- err = mlx5_query_hca_caps(dev);
- if (err) {
- dev_err(&pdev->dev, "query hca failed\n");
- goto err_stop_poll;
- }
-
- err = mlx5_query_board_id(dev);
- if (err) {
- dev_err(&pdev->dev, "query board id failed\n");
+ if (boot && mlx5_init_once(dev, priv)) {
+ dev_err(&pdev->dev, "sw objs init failed\n");
goto err_stop_poll;
}
err = mlx5_enable_msix(dev);
if (err) {
dev_err(&pdev->dev, "enable msix failed\n");
- goto err_stop_poll;
- }
-
- err = mlx5_eq_init(dev);
- if (err) {
- dev_err(&pdev->dev, "failed to initialize eq\n");
- goto disable_msix;
+ goto err_cleanup_once;
}
err = mlx5_alloc_uuars(dev, &priv->uuari);
if (err) {
dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
- goto err_eq_cleanup;
+ goto err_disable_msix;
}
err = mlx5_start_eqs(dev);
@@ -1126,15 +1053,10 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
}
err = mlx5_irq_set_affinity_hints(dev);
- if (err)
+ if (err) {
dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
-
- MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
-
- mlx5_init_cq_table(dev);
- mlx5_init_qp_table(dev);
- mlx5_init_srq_table(dev);
- mlx5_init_mkey_table(dev);
+ goto err_affinity_hints;
+ }
err = mlx5_init_fs(dev);
if (err) {
@@ -1142,36 +1064,26 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto err_fs;
}
- err = mlx5_init_rl_table(dev);
- if (err) {
- dev_err(&pdev->dev, "Failed to init rate limiting\n");
- goto err_rl;
- }
-
#ifdef CONFIG_MLX5_CORE_EN
- err = mlx5_eswitch_init(dev);
- if (err) {
- dev_err(&pdev->dev, "eswitch init failed %d\n", err);
- goto err_reg_dev;
- }
+ mlx5_eswitch_attach(dev->priv.eswitch);
#endif
- err = mlx5_sriov_init(dev);
+ err = mlx5_sriov_attach(dev);
if (err) {
dev_err(&pdev->dev, "sriov init failed %d\n", err);
goto err_sriov;
}
- err = mlx5_register_device(dev);
- if (err) {
- dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
- goto err_reg_dev;
+ if (mlx5_device_registered(dev)) {
+ mlx5_attach_device(dev);
+ } else {
+ err = mlx5_register_device(dev);
+ if (err) {
+ dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
+ goto err_reg_dev;
+ }
}
- err = request_module_nowait(MLX5_IB_MOD);
- if (err)
- pr_info("failed request module on %s\n", MLX5_IB_MOD);
-
clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
out:
@@ -1179,23 +1091,19 @@ out:
return 0;
-err_sriov:
- if (mlx5_sriov_cleanup(dev))
- dev_err(&dev->pdev->dev, "sriov cleanup failed\n");
+err_reg_dev:
+ mlx5_sriov_detach(dev);
+err_sriov:
#ifdef CONFIG_MLX5_CORE_EN
- mlx5_eswitch_cleanup(dev->priv.eswitch);
+ mlx5_eswitch_detach(dev->priv.eswitch);
#endif
-err_reg_dev:
- mlx5_cleanup_rl_table(dev);
-err_rl:
mlx5_cleanup_fs(dev);
+
err_fs:
- mlx5_cleanup_mkey_table(dev);
- mlx5_cleanup_srq_table(dev);
- mlx5_cleanup_qp_table(dev);
- mlx5_cleanup_cq_table(dev);
mlx5_irq_clear_affinity_hints(dev);
+
+err_affinity_hints:
free_comp_eqs(dev);
err_stop_eqs:
@@ -1204,12 +1112,13 @@ err_stop_eqs:
err_free_uar:
mlx5_free_uuars(dev, &priv->uuari);
-err_eq_cleanup:
- mlx5_eq_cleanup(dev);
-
-disable_msix:
+err_disable_msix:
mlx5_disable_msix(dev);
+err_cleanup_once:
+ if (boot)
+ mlx5_cleanup_once(dev);
+
err_stop_poll:
mlx5_stop_health_poll(dev);
if (mlx5_cmd_teardown_hca(dev)) {
@@ -1226,8 +1135,7 @@ reclaim_boot_pages:
err_disable_hca:
mlx5_core_disable_hca(dev, 0);
-err_pagealloc_cleanup:
- mlx5_pagealloc_cleanup(dev);
+err_cmd_cleanup:
mlx5_cmd_cleanup(dev);
out_err:
@@ -1237,40 +1145,35 @@ out_err:
return err;
}
-static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
+ bool cleanup)
{
int err = 0;
- err = mlx5_sriov_cleanup(dev);
- if (err) {
- dev_warn(&dev->pdev->dev, "%s: sriov cleanup failed - abort\n",
- __func__);
- return err;
- }
-
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
__func__);
+ if (cleanup)
+ mlx5_cleanup_once(dev);
goto out;
}
- mlx5_unregister_device(dev);
+
+ if (mlx5_device_registered(dev))
+ mlx5_detach_device(dev);
+
+ mlx5_sriov_detach(dev);
#ifdef CONFIG_MLX5_CORE_EN
- mlx5_eswitch_cleanup(dev->priv.eswitch);
+ mlx5_eswitch_detach(dev->priv.eswitch);
#endif
-
- mlx5_cleanup_rl_table(dev);
mlx5_cleanup_fs(dev);
- mlx5_cleanup_mkey_table(dev);
- mlx5_cleanup_srq_table(dev);
- mlx5_cleanup_qp_table(dev);
- mlx5_cleanup_cq_table(dev);
mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
mlx5_free_uuars(dev, &priv->uuari);
- mlx5_eq_cleanup(dev);
mlx5_disable_msix(dev);
+ if (cleanup)
+ mlx5_cleanup_once(dev);
mlx5_stop_health_poll(dev);
err = mlx5_cmd_teardown_hca(dev);
if (err) {
@@ -1280,7 +1183,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_pagealloc_stop(dev);
mlx5_reclaim_startup_pages(dev);
mlx5_core_disable_hca(dev, 0);
- mlx5_pagealloc_cleanup(dev);
mlx5_cmd_cleanup(dev);
out:
@@ -1290,22 +1192,6 @@ out:
return err;
}
-void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
- unsigned long param)
-{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_device_context *dev_ctx;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->ctx_lock, flags);
-
- list_for_each_entry(dev_ctx, &priv->ctx_list, list)
- if (dev_ctx->intf->event)
- dev_ctx->intf->event(dev, dev_ctx->context, event, param);
-
- spin_unlock_irqrestore(&priv->ctx_lock, flags);
-}
-
struct mlx5_core_event_handler {
void (*event)(struct mlx5_core_dev *dev,
enum mlx5_dev_event event,
@@ -1319,6 +1205,7 @@ static const struct devlink_ops mlx5_devlink_ops = {
#endif
};
+#define MLX5_IB_MOD "mlx5_ib"
static int init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -1365,12 +1252,18 @@ static int init_one(struct pci_dev *pdev,
goto close_pci;
}
- err = mlx5_load_one(dev, priv);
+ mlx5_pagealloc_init(dev);
+
+ err = mlx5_load_one(dev, priv, true);
if (err) {
dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
goto clean_health;
}
+ err = request_module_nowait(MLX5_IB_MOD);
+ if (err)
+ pr_info("failed request module on %s\n", MLX5_IB_MOD);
+
err = devlink_register(devlink, &pdev->dev);
if (err)
goto clean_load;
@@ -1378,8 +1271,9 @@ static int init_one(struct pci_dev *pdev,
return 0;
clean_load:
- mlx5_unload_one(dev, priv);
+ mlx5_unload_one(dev, priv, true);
clean_health:
+ mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
close_pci:
mlx5_pci_close(dev, priv);
@@ -1397,11 +1291,15 @@ static void remove_one(struct pci_dev *pdev)
struct mlx5_priv *priv = &dev->priv;
devlink_unregister(devlink);
- if (mlx5_unload_one(dev, priv)) {
+ mlx5_unregister_device(dev);
+
+ if (mlx5_unload_one(dev, priv, true)) {
dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
mlx5_health_cleanup(dev);
return;
}
+
+ mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
mlx5_pci_close(dev, priv);
pci_set_drvdata(pdev, NULL);
@@ -1416,7 +1314,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s was called\n", __func__);
mlx5_enter_error_state(dev);
- mlx5_unload_one(dev, priv);
+ mlx5_unload_one(dev, priv, false);
pci_save_state(pdev);
mlx5_pci_disable_device(dev);
return state == pci_channel_io_perm_failure ?
@@ -1488,7 +1386,7 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
dev_info(&pdev->dev, "%s was called\n", __func__);
- err = mlx5_load_one(dev, priv);
+ err = mlx5_load_one(dev, priv, false);
if (err)
dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
, __func__, err);
@@ -1510,7 +1408,7 @@ static void shutdown(struct pci_dev *pdev)
dev_info(&pdev->dev, "Shutdown was called\n");
/* Notify mlx5 clients that the kernel is being shut down */
set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
- mlx5_unload_one(dev, priv);
+ mlx5_unload_one(dev, priv, false);
mlx5_pci_disable_device(dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 714b71bed2be..3d0cfb9f18f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -46,9 +46,6 @@
extern int mlx5_core_debug_mask;
-extern struct list_head mlx5_dev_list;
-extern struct mutex mlx5_intf_mutex;
-
#define mlx5_core_dbg(__dev, format, ...) \
dev_dbg(&(__dev)->pdev->dev, "%s:%s:%d:(pid %d): " format, \
(__dev)->priv.name, __func__, __LINE__, current->pid, \
@@ -73,9 +70,6 @@ do { \
#define mlx5_core_info(__dev, format, ...) \
dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__)
-#define mlx5_core_for_each_priv(__priv) \
- list_for_each_entry(__priv, &mlx5_dev_list, dev_list)
-
enum {
MLX5_CMD_DATA, /* print command payload only */
MLX5_CMD_TIME, /* print command execution time */
@@ -89,6 +83,10 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
void mlx5_enter_error_state(struct mlx5_core_dev *dev);
void mlx5_disable_device(struct mlx5_core_dev *dev);
+int mlx5_sriov_init(struct mlx5_core_dev *dev);
+void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
+int mlx5_sriov_attach(struct mlx5_core_dev *dev);
+void mlx5_sriov_detach(struct mlx5_core_dev *dev);
int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev);
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
@@ -102,8 +100,19 @@ void mlx5_cq_tasklet_cb(unsigned long data);
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
void mlx5_lag_remove(struct mlx5_core_dev *dev);
+void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
+void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
+void mlx5_attach_device(struct mlx5_core_dev *dev);
+void mlx5_detach_device(struct mlx5_core_dev *dev);
+bool mlx5_device_registered(struct mlx5_core_dev *dev);
+int mlx5_register_device(struct mlx5_core_dev *dev);
+void mlx5_unregister_device(struct mlx5_core_dev *dev);
void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
+struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
+void mlx5_dev_list_lock(void);
+void mlx5_dev_list_unlock(void);
+int mlx5_dev_list_trylock(void);
bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 673a7c96479a..d4585154151d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -326,6 +326,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
{
struct fw_page *fwp;
struct rb_node *p;
+ u32 func_id;
u32 npages;
u32 i = 0;
@@ -334,12 +335,16 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
/* No hard feelings, we want our pages back! */
npages = MLX5_GET(manage_pages_in, in, input_num_entries);
+ func_id = MLX5_GET(manage_pages_in, in, function_id);
p = rb_first(&dev->priv.page_root);
while (p && i < npages) {
fwp = rb_entry(p, struct fw_page, rb_node);
- MLX5_SET64(manage_pages_out, out, pas[i], fwp->addr);
p = rb_next(p);
+ if (fwp->func_id != func_id)
+ continue;
+
+ MLX5_SET64(manage_pages_out, out, pas[i], fwp->addr);
i++;
}
@@ -540,6 +545,12 @@ int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev)
unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
int prev_vfs_pages = dev->priv.vfs_pages;
+ /* In case of internal error we will free the pages manually later */
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ mlx5_core_warn(dev, "Skipping wait for vf pages stage");
+ return 0;
+ }
+
mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_vfs_pages,
dev->priv.name);
while (dev->priv.vfs_pages) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 78e789245183..e08627785590 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -44,108 +44,132 @@ bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
return !!sriov->num_vfs;
}
-static void enable_vfs(struct mlx5_core_dev *dev, int num_vfs)
+static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
int err;
int vf;
- for (vf = 1; vf <= num_vfs; vf++) {
- err = mlx5_core_enable_hca(dev, vf);
+ if (sriov->enabled_vfs) {
+ mlx5_core_warn(dev,
+ "failed to enable SRIOV on device, already enabled with %d vfs\n",
+ sriov->enabled_vfs);
+ return -EBUSY;
+ }
+
+#ifdef CONFIG_MLX5_CORE_EN
+ err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
+ if (err) {
+ mlx5_core_warn(dev,
+ "failed to enable eswitch SRIOV (%d)\n", err);
+ return err;
+ }
+#endif
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ err = mlx5_core_enable_hca(dev, vf + 1);
if (err) {
- mlx5_core_warn(dev, "failed to enable VF %d\n", vf - 1);
- } else {
- sriov->vfs_ctx[vf - 1].enabled = 1;
- mlx5_core_dbg(dev, "successfully enabled VF %d\n", vf - 1);
+ mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err);
+ continue;
}
+ sriov->vfs_ctx[vf].enabled = 1;
+ sriov->enabled_vfs++;
+ mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf);
+
}
+
+ return 0;
}
-static void disable_vfs(struct mlx5_core_dev *dev, int num_vfs)
+static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
+ int err;
int vf;
- for (vf = 1; vf <= num_vfs; vf++) {
- if (sriov->vfs_ctx[vf - 1].enabled) {
- if (mlx5_core_disable_hca(dev, vf))
- mlx5_core_warn(dev, "failed to disable VF %d\n", vf - 1);
- else
- sriov->vfs_ctx[vf - 1].enabled = 0;
+ if (!sriov->enabled_vfs)
+ return;
+
+ for (vf = 0; vf < sriov->num_vfs; vf++) {
+ if (!sriov->vfs_ctx[vf].enabled)
+ continue;
+ err = mlx5_core_disable_hca(dev, vf + 1);
+ if (err) {
+ mlx5_core_warn(dev, "failed to disable VF %d\n", vf);
+ continue;
}
+ sriov->vfs_ctx[vf].enabled = 0;
+ sriov->enabled_vfs--;
}
+
+#ifdef CONFIG_MLX5_CORE_EN
+ mlx5_eswitch_disable_sriov(dev->priv.eswitch);
+#endif
+
+ if (mlx5_wait_for_vf_pages(dev))
+ mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
}
-static int mlx5_core_create_vfs(struct pci_dev *pdev, int num_vfs)
+static int mlx5_pci_enable_sriov(struct pci_dev *pdev, int num_vfs)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- int err;
-
- if (pci_num_vf(pdev))
- pci_disable_sriov(pdev);
-
- enable_vfs(dev, num_vfs);
+ int err = 0;
- err = pci_enable_sriov(pdev, num_vfs);
- if (err) {
- dev_warn(&pdev->dev, "enable sriov failed %d\n", err);
- goto ex;
+ if (pci_num_vf(pdev)) {
+ mlx5_core_warn(dev, "Unable to enable pci sriov, already enabled\n");
+ return -EBUSY;
}
- return 0;
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err)
+ mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
-ex:
- disable_vfs(dev, num_vfs);
return err;
}
-static int mlx5_core_sriov_enable(struct pci_dev *pdev, int num_vfs)
+static void mlx5_pci_disable_sriov(struct pci_dev *pdev)
+{
+ pci_disable_sriov(pdev);
+}
+
+static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- int err;
+ int err = 0;
- kfree(sriov->vfs_ctx);
- sriov->vfs_ctx = kcalloc(num_vfs, sizeof(*sriov->vfs_ctx), GFP_ATOMIC);
- if (!sriov->vfs_ctx)
- return -ENOMEM;
+ err = mlx5_device_enable_sriov(dev, num_vfs);
+ if (err) {
+ mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
+ return err;
+ }
- sriov->enabled_vfs = num_vfs;
- err = mlx5_core_create_vfs(pdev, num_vfs);
+ err = mlx5_pci_enable_sriov(pdev, num_vfs);
if (err) {
- kfree(sriov->vfs_ctx);
- sriov->vfs_ctx = NULL;
+ mlx5_core_warn(dev, "mlx5_pci_enable_sriov failed : %d\n", err);
+ mlx5_device_disable_sriov(dev);
return err;
}
+ sriov->num_vfs = num_vfs;
+
return 0;
}
-static void mlx5_core_init_vfs(struct mlx5_core_dev *dev, int num_vfs)
+static void mlx5_sriov_disable(struct pci_dev *pdev)
{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- sriov->num_vfs = num_vfs;
-}
-
-static void mlx5_core_cleanup_vfs(struct mlx5_core_dev *dev)
-{
- struct mlx5_core_sriov *sriov;
-
- sriov = &dev->priv.sriov;
- disable_vfs(dev, sriov->num_vfs);
-
- if (mlx5_wait_for_vf_pages(dev))
- mlx5_core_warn(dev, "timeout claiming VFs pages\n");
-
+ mlx5_pci_disable_sriov(pdev);
+ mlx5_device_disable_sriov(dev);
sriov->num_vfs = 0;
}
int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- int err;
+ int err = 0;
mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs);
if (!mlx5_core_is_pf(dev))
@@ -156,92 +180,57 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
return -EINVAL;
}
- mlx5_core_cleanup_vfs(dev);
+ if (num_vfs)
+ err = mlx5_sriov_enable(pdev, num_vfs);
+ else
+ mlx5_sriov_disable(pdev);
- if (!num_vfs) {
-#ifdef CONFIG_MLX5_CORE_EN
- mlx5_eswitch_disable_sriov(dev->priv.eswitch);
-#endif
- kfree(sriov->vfs_ctx);
- sriov->vfs_ctx = NULL;
- if (!pci_vfs_assigned(pdev))
- pci_disable_sriov(pdev);
- else
- mlx5_core_info(dev, "unloading PF driver while leaving orphan VFs\n");
- return 0;
- }
+ return err ? err : num_vfs;
+}
- err = mlx5_core_sriov_enable(pdev, num_vfs);
- if (err) {
- mlx5_core_warn(dev, "mlx5_core_sriov_enable failed %d\n", err);
- return err;
- }
+int mlx5_sriov_attach(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- mlx5_core_init_vfs(dev, num_vfs);
-#ifdef CONFIG_MLX5_CORE_EN
- mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
-#endif
+ if (!mlx5_core_is_pf(dev) || !sriov->num_vfs)
+ return 0;
- return num_vfs;
+ /* If sriov VFs exist in PCI level, enable them in device level */
+ return mlx5_device_enable_sriov(dev, sriov->num_vfs);
}
-static int sync_required(struct pci_dev *pdev)
+void mlx5_sriov_detach(struct mlx5_core_dev *dev)
{
- struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- int cur_vfs = pci_num_vf(pdev);
-
- if (cur_vfs != sriov->num_vfs) {
- mlx5_core_warn(dev, "current VFs %d, registered %d - sync needed\n",
- cur_vfs, sriov->num_vfs);
- return 1;
- }
+ if (!mlx5_core_is_pf(dev))
+ return;
- return 0;
+ mlx5_device_disable_sriov(dev);
}
int mlx5_sriov_init(struct mlx5_core_dev *dev)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
struct pci_dev *pdev = dev->pdev;
- int cur_vfs;
+ int total_vfs;
if (!mlx5_core_is_pf(dev))
return 0;
- if (!sync_required(dev->pdev))
- return 0;
-
- cur_vfs = pci_num_vf(pdev);
- sriov->vfs_ctx = kcalloc(cur_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL);
+ total_vfs = pci_sriov_get_totalvfs(pdev);
+ sriov->num_vfs = pci_num_vf(pdev);
+ sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL);
if (!sriov->vfs_ctx)
return -ENOMEM;
- sriov->enabled_vfs = cur_vfs;
-
- mlx5_core_init_vfs(dev, cur_vfs);
-#ifdef CONFIG_MLX5_CORE_EN
- if (cur_vfs)
- mlx5_eswitch_enable_sriov(dev->priv.eswitch, cur_vfs,
- SRIOV_LEGACY);
-#endif
-
- enable_vfs(dev, cur_vfs);
-
return 0;
}
-int mlx5_sriov_cleanup(struct mlx5_core_dev *dev)
+void mlx5_sriov_cleanup(struct mlx5_core_dev *dev)
{
- struct pci_dev *pdev = dev->pdev;
- int err;
+ struct mlx5_core_sriov *sriov = &dev->priv.sriov;
if (!mlx5_core_is_pf(dev))
- return 0;
+ return;
- err = mlx5_core_sriov_configure(pdev, 0);
- if (err)
- return err;
-
- return 0;
+ kfree(sriov->vfs_ctx);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 068ee65a960b..aa33d58b9f81 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1100,10 +1100,15 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_alloc_stats;
}
- if (mlxsw_driver->profile->used_max_lag &&
- mlxsw_driver->profile->used_max_port_per_lag) {
- alloc_size = sizeof(u8) * mlxsw_driver->profile->max_lag *
- mlxsw_driver->profile->max_port_per_lag;
+ err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
+ &mlxsw_core->resources);
+ if (err)
+ goto err_bus_init;
+
+ if (mlxsw_core->resources.max_lag_valid &&
+ mlxsw_core->resources.max_ports_in_lag_valid) {
+ alloc_size = sizeof(u8) * mlxsw_core->resources.max_lag *
+ mlxsw_core->resources.max_ports_in_lag;
mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
if (!mlxsw_core->lag.mapping) {
err = -ENOMEM;
@@ -1111,11 +1116,6 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
}
}
- err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
- &mlxsw_core->resources);
- if (err)
- goto err_bus_init;
-
err = mlxsw_emad_init(mlxsw_core);
if (err)
goto err_emad_init;
@@ -1146,10 +1146,10 @@ err_hwmon_init:
err_devlink_register:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
- mlxsw_bus->fini(bus_priv);
-err_bus_init:
kfree(mlxsw_core->lag.mapping);
err_alloc_lag_mapping:
+ mlxsw_bus->fini(bus_priv);
+err_bus_init:
free_percpu(mlxsw_core->pcpu_stats);
err_alloc_stats:
devlink_free(devlink);
@@ -1615,7 +1615,7 @@ EXPORT_SYMBOL(mlxsw_core_skb_receive);
static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
u16 lag_id, u8 port_index)
{
- return mlxsw_core->driver->profile->max_port_per_lag * lag_id +
+ return mlxsw_core->resources.max_ports_in_lag * lag_id +
port_index;
}
@@ -1644,7 +1644,7 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
{
int i;
- for (i = 0; i < mlxsw_core->driver->profile->max_port_per_lag; i++) {
+ for (i = 0; i < mlxsw_core->resources.max_ports_in_lag; i++) {
int index = mlxsw_core_lag_mapping_index(mlxsw_core,
lag_id, i);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index d2e32979319c..c4f550b6f783 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -179,8 +179,6 @@ struct mlxsw_swid_config {
struct mlxsw_config_profile {
u16 used_max_vepa_channels:1,
- used_max_lag:1,
- used_max_port_per_lag:1,
used_max_mid:1,
used_max_pgt:1,
used_max_system_port:1,
@@ -192,10 +190,9 @@ struct mlxsw_config_profile {
used_max_pkey:1,
used_ar_sec:1,
used_adaptive_routing_group_cap:1,
- used_kvd_sizes:1;
+ used_kvd_split_data:1; /* indicate for the kvd's values */
+
u8 max_vepa_channels;
- u16 max_lag;
- u16 max_port_per_lag;
u16 max_mid;
u16 max_pgt;
u16 max_system_port;
@@ -214,8 +211,9 @@ struct mlxsw_config_profile {
u16 adaptive_routing_group_cap;
u8 arn;
u32 kvd_linear_size;
- u32 kvd_hash_single_size;
- u32 kvd_hash_double_size;
+ u16 kvd_hash_granularity;
+ u8 kvd_hash_single_parts;
+ u8 kvd_hash_double_parts;
u8 resource_query_enable;
struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
};
@@ -269,8 +267,35 @@ struct mlxsw_driver {
};
struct mlxsw_resources {
- u8 max_span_valid:1;
+ u32 max_span_valid:1,
+ max_lag_valid:1,
+ max_ports_in_lag_valid:1,
+ kvd_size_valid:1,
+ kvd_single_min_size_valid:1,
+ kvd_double_min_size_valid:1,
+ max_virtual_routers_valid:1,
+ max_system_ports_valid:1,
+ max_vlan_groups_valid:1,
+ max_regions_valid:1,
+ max_rif_valid:1;
u8 max_span;
+ u8 max_lag;
+ u8 max_ports_in_lag;
+ u32 kvd_size;
+ u32 kvd_single_min_size;
+ u32 kvd_double_min_size;
+ u16 max_virtual_routers;
+ u16 max_system_ports;
+ u16 max_vlan_groups;
+ u16 max_regions;
+ u16 max_rif;
+
+ /* Internal resources.
+ * Determined by the SW, not queried from the HW.
+ */
+ u32 kvd_single_size;
+ u32 kvd_double_size;
+ u32 kvd_linear_size;
};
struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 1d1360c178bb..e742bd4e8894 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1156,6 +1156,16 @@ mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
#define MLXSW_RESOURCES_TABLE_END_ID 0xffff
#define MLXSW_MAX_SPAN_ID 0x2420
+#define MLXSW_MAX_LAG_ID 0x2520
+#define MLXSW_MAX_PORTS_IN_LAG_ID 0x2521
+#define MLXSW_KVD_SIZE_ID 0x1001
+#define MLXSW_KVD_SINGLE_MIN_SIZE_ID 0x1002
+#define MLXSW_KVD_DOUBLE_MIN_SIZE_ID 0x1003
+#define MLXSW_MAX_VIRTUAL_ROUTERS_ID 0x2C01
+#define MLXSW_MAX_SYSTEM_PORT_ID 0x2502
+#define MLXSW_MAX_VLAN_GROUPS_ID 0x2906
+#define MLXSW_MAX_REGIONS_ID 0x2901
+#define MLXSW_MAX_RIF_ID 0x2C02
#define MLXSW_RESOURCES_QUERY_MAX_QUERIES 100
#define MLXSW_RESOURCES_PER_QUERY 32
@@ -1167,6 +1177,46 @@ static void mlxsw_pci_resources_query_parse(int id, u64 val,
resources->max_span = val;
resources->max_span_valid = 1;
break;
+ case MLXSW_MAX_LAG_ID:
+ resources->max_lag = val;
+ resources->max_lag_valid = 1;
+ break;
+ case MLXSW_MAX_PORTS_IN_LAG_ID:
+ resources->max_ports_in_lag = val;
+ resources->max_ports_in_lag_valid = 1;
+ break;
+ case MLXSW_KVD_SIZE_ID:
+ resources->kvd_size = val;
+ resources->kvd_size_valid = 1;
+ break;
+ case MLXSW_KVD_SINGLE_MIN_SIZE_ID:
+ resources->kvd_single_min_size = val;
+ resources->kvd_single_min_size_valid = 1;
+ break;
+ case MLXSW_KVD_DOUBLE_MIN_SIZE_ID:
+ resources->kvd_double_min_size = val;
+ resources->kvd_double_min_size_valid = 1;
+ break;
+ case MLXSW_MAX_VIRTUAL_ROUTERS_ID:
+ resources->max_virtual_routers = val;
+ resources->max_virtual_routers_valid = 1;
+ break;
+ case MLXSW_MAX_SYSTEM_PORT_ID:
+ resources->max_system_ports = val;
+ resources->max_system_ports_valid = 1;
+ break;
+ case MLXSW_MAX_VLAN_GROUPS_ID:
+ resources->max_vlan_groups = val;
+ resources->max_vlan_groups_valid = 1;
+ break;
+ case MLXSW_MAX_REGIONS_ID:
+ resources->max_regions = val;
+ resources->max_regions_valid = 1;
+ break;
+ case MLXSW_MAX_RIF_ID:
+ resources->max_rif = val;
+ resources->max_rif_valid = 1;
+ break;
default:
break;
}
@@ -1209,10 +1259,52 @@ static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
return -EIO;
}
+static int mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile,
+ struct mlxsw_resources *resources)
+{
+ u32 singles_size, doubles_size, linear_size;
+
+ if (!resources->kvd_single_min_size_valid ||
+ !resources->kvd_double_min_size_valid ||
+ !profile->used_kvd_split_data)
+ return -EIO;
+
+ linear_size = profile->kvd_linear_size;
+
+ /* The hash part is what left of the kvd without the
+ * linear part. It is split to the single size and
+ * double size by the parts ratio from the profile.
+ * Both sizes must be a multiplications of the
+ * granularity from the profile.
+ */
+ doubles_size = (resources->kvd_size - linear_size);
+ doubles_size *= profile->kvd_hash_double_parts;
+ doubles_size /= (profile->kvd_hash_double_parts +
+ profile->kvd_hash_single_parts);
+ doubles_size /= profile->kvd_hash_granularity;
+ doubles_size *= profile->kvd_hash_granularity;
+ singles_size = resources->kvd_size - doubles_size -
+ linear_size;
+
+ /* Check results are legal. */
+ if (singles_size < resources->kvd_single_min_size ||
+ doubles_size < resources->kvd_double_min_size ||
+ resources->kvd_size < linear_size)
+ return -EIO;
+
+ resources->kvd_single_size = singles_size;
+ resources->kvd_double_size = doubles_size;
+ resources->kvd_linear_size = linear_size;
+
+ return 0;
+}
+
static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
- const struct mlxsw_config_profile *profile)
+ const struct mlxsw_config_profile *profile,
+ struct mlxsw_resources *resources)
{
int i;
+ int err;
mlxsw_cmd_mbox_zero(mbox);
@@ -1222,18 +1314,6 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
mbox, profile->max_vepa_channels);
}
- if (profile->used_max_lag) {
- mlxsw_cmd_mbox_config_profile_set_max_lag_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_max_lag_set(
- mbox, profile->max_lag);
- }
- if (profile->used_max_port_per_lag) {
- mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_max_port_per_lag_set(
- mbox, profile->max_port_per_lag);
- }
if (profile->used_max_mid) {
mlxsw_cmd_mbox_config_profile_set_max_mid_set(
mbox, 1);
@@ -1310,19 +1390,22 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
mbox, profile->adaptive_routing_group_cap);
}
- if (profile->used_kvd_sizes) {
- mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(
- mbox, profile->kvd_linear_size);
- mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(
- mbox, profile->kvd_hash_single_size);
+ if (resources->kvd_size_valid) {
+ err = mlxsw_pci_profile_get_kvd_sizes(profile, resources);
+ if (err)
+ return err;
+
+ mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
+ mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
+ resources->kvd_linear_size);
+ mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
+ 1);
+ mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
+ resources->kvd_single_size);
mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
- mbox, 1);
- mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(
- mbox, profile->kvd_hash_double_size);
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
+ resources->kvd_double_size);
}
for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
@@ -1524,7 +1607,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_query_resources;
- err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
+ err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, resources);
if (err)
goto err_config_profile;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index b83d0a7a0b49..6460c7256f2b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1392,7 +1392,7 @@ static inline void mlxsw_reg_slcr_pack(char *payload, u16 lag_hash)
{
MLXSW_REG_ZERO(slcr, payload);
mlxsw_reg_slcr_pp_set(payload, MLXSW_REG_SLCR_PP_GLOBAL);
- mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_XOR);
+ mlxsw_reg_slcr_type_set(payload, MLXSW_REG_SLCR_TYPE_CRC);
mlxsw_reg_slcr_lag_hash_set(payload, lag_hash);
}
@@ -2138,6 +2138,18 @@ MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3);
+enum {
+ MLXSW_REG_PTYS_AN_STATUS_NA,
+ MLXSW_REG_PTYS_AN_STATUS_OK,
+ MLXSW_REG_PTYS_AN_STATUS_FAIL,
+};
+
+/* reg_ptys_an_status
+ * Autonegotiation status.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4);
+
#define MLXSW_REG_PTYS_ETH_SPEED_SGMII BIT(0)
#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX BIT(1)
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 BIT(2)
@@ -2152,6 +2164,7 @@ MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3);
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR BIT(14)
#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 BIT(15)
#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4 BIT(16)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2 BIT(18)
#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 BIT(19)
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 BIT(20)
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 BIT(21)
@@ -2184,6 +2197,13 @@ MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32);
*/
MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32);
+/* reg_ptys_eth_proto_lp_advertise
+ * The protocols that were advertised by the link partner during
+ * autonegotiation.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_lp_advertise, 0x30, 0, 32);
+
static inline void mlxsw_reg_ptys_pack(char *payload, u8 local_port,
u32 proto_admin)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 6c6b726c4897..fd74d1064ff3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -56,6 +56,7 @@
#include <generated/utsrelease.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/netevent.h>
#include "spectrum.h"
#include "core.h"
@@ -247,7 +248,8 @@ static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
span_entry->used = false;
}
-struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
+static struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
{
struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
int i;
@@ -261,7 +263,8 @@ struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
return NULL;
}
-struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
+static struct mlxsw_sp_span_entry
+*mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
{
struct mlxsw_sp_span_entry *span_entry;
@@ -363,7 +366,8 @@ mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
}
/* bind the port to the SPAN entry */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, true, pa_id);
+ mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
+ (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
if (err)
goto err_mpar_reg_write;
@@ -404,7 +408,8 @@ mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
return;
/* remove the inspected port */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, false, pa_id);
+ mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
+ (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
/* remove the SBIB buffer if it was egress SPAN */
@@ -818,9 +823,9 @@ err_span_port_mtu_update:
return err;
}
-static struct rtnl_link_stats64 *
-mlxsw_sp_port_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static int
+mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp_port_pcpu_stats *p;
@@ -847,6 +852,107 @@ mlxsw_sp_port_get_stats64(struct net_device *dev,
tx_dropped += p->tx_dropped;
}
stats->tx_dropped = tx_dropped;
+ return 0;
+}
+
+static bool mlxsw_sp_port_has_offload_stats(int attr_id)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ return true;
+ }
+
+ return false;
+}
+
+static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ return mlxsw_sp_port_get_sw_stats64(dev, sp);
+ }
+
+ return -EINVAL;
+}
+
+static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
+ int prio, char *ppcnt_pl)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
+ return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
+}
+
+static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+ int err;
+
+ err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
+ 0, ppcnt_pl);
+ if (err)
+ goto out;
+
+ stats->tx_packets =
+ mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
+ stats->rx_packets =
+ mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
+ stats->tx_bytes =
+ mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
+ stats->rx_bytes =
+ mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
+ stats->multicast =
+ mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
+
+ stats->rx_crc_errors =
+ mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
+ stats->rx_frame_errors =
+ mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
+
+ stats->rx_length_errors = (
+ mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
+ mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
+ mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
+
+ stats->rx_errors = (stats->rx_crc_errors +
+ stats->rx_frame_errors + stats->rx_length_errors);
+
+out:
+ return err;
+}
+
+static void update_stats_cache(struct work_struct *work)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ container_of(work, struct mlxsw_sp_port,
+ hw_stats.update_dw.work);
+
+ if (!netif_carrier_ok(mlxsw_sp_port->dev))
+ goto out;
+
+ mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
+ mlxsw_sp_port->hw_stats.cache);
+
+out:
+ mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
+ MLXSW_HW_STATS_UPDATE_TIME);
+}
+
+/* Return the stats from a cache that is updated periodically,
+ * as this function might get called in an atomic context.
+ */
+static struct rtnl_link_stats64 *
+mlxsw_sp_port_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
+
return stats;
}
@@ -1208,6 +1314,8 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
.ndo_change_mtu = mlxsw_sp_port_change_mtu,
.ndo_get_stats64 = mlxsw_sp_port_get_stats64,
+ .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
+ .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
.ndo_neigh_construct = mlxsw_sp_router_neigh_construct,
@@ -1546,8 +1654,6 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
enum mlxsw_reg_ppcnt_grp grp, int prio,
u64 *data, int data_index)
{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port_hw_stats *hw_stats;
char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
int i, len;
@@ -1556,10 +1662,9 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
if (err)
return;
- mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
+ mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
for (i = 0; i < len; i++)
- data[data_index + i] = !err ? hw_stats[i].getter(ppcnt_pl) : 0;
+ data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
}
static void mlxsw_sp_port_get_stats(struct net_device *dev,
@@ -1598,112 +1703,149 @@ static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
}
struct mlxsw_sp_port_link_mode {
+ enum ethtool_link_mode_bit_indices mask_ethtool;
u32 mask;
- u32 supported;
- u32 advertised;
u32 speed;
};
static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
- .supported = SUPPORTED_100baseT_Full,
- .advertised = ADVERTISED_100baseT_Full,
- .speed = 100,
- },
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
- .speed = 100,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ .speed = SPEED_100,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
- .supported = SUPPORTED_1000baseKX_Full,
- .advertised = ADVERTISED_1000baseKX_Full,
- .speed = 1000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = SPEED_1000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
- .supported = SUPPORTED_10000baseT_Full,
- .advertised = ADVERTISED_10000baseT_Full,
- .speed = 10000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ .speed = SPEED_10000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
- .supported = SUPPORTED_10000baseKX4_Full,
- .advertised = ADVERTISED_10000baseKX4_Full,
- .speed = 10000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ .speed = SPEED_10000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
- .supported = SUPPORTED_10000baseKR_Full,
- .advertised = ADVERTISED_10000baseKR_Full,
- .speed = 10000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ .speed = SPEED_10000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
- .supported = SUPPORTED_20000baseKR2_Full,
- .advertised = ADVERTISED_20000baseKR2_Full,
- .speed = 20000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+ .speed = SPEED_20000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
- .supported = SUPPORTED_40000baseCR4_Full,
- .advertised = ADVERTISED_40000baseCR4_Full,
- .speed = 40000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ .speed = SPEED_40000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
- .supported = SUPPORTED_40000baseKR4_Full,
- .advertised = ADVERTISED_40000baseKR4_Full,
- .speed = 40000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ .speed = SPEED_40000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
- .supported = SUPPORTED_40000baseSR4_Full,
- .advertised = ADVERTISED_40000baseSR4_Full,
- .speed = 40000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ .speed = SPEED_40000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
- .supported = SUPPORTED_40000baseLR4_Full,
- .advertised = ADVERTISED_40000baseLR4_Full,
- .speed = 40000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ .speed = SPEED_40000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
- MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
- MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
- .speed = 25000,
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
+ .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = SPEED_25000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
- MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
- MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
- .speed = 50000,
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
+ .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = SPEED_25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+ .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = SPEED_25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+ .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = SPEED_25000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
+ .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ .speed = SPEED_50000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
+ .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ .speed = SPEED_50000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
+ .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ .speed = SPEED_50000,
},
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
- .supported = SUPPORTED_56000baseKR4_Full,
- .advertised = ADVERTISED_56000baseKR4_Full,
- .speed = 56000,
+ .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
+ .speed = SPEED_56000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
- .speed = 100000,
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
+ .speed = SPEED_56000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
+ .speed = SPEED_56000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
+ .speed = SPEED_56000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = SPEED_100000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ .speed = SPEED_100000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = SPEED_100000,
+ },
+ {
+ .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+ .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ .speed = SPEED_100000,
},
};
#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
-static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
+static void
+mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
+ struct ethtool_link_ksettings *cmd)
{
if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
@@ -1711,43 +1853,29 @@ static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
MLXSW_REG_PTYS_ETH_SPEED_SGMII))
- return SUPPORTED_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
- return SUPPORTED_Backplane;
- return 0;
-}
-
-static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
-{
- u32 modes = 0;
- int i;
-
- for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
- if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
- modes |= mlxsw_sp_port_link_mode[i].supported;
- }
- return modes;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
}
-static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
+static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
{
- u32 modes = 0;
int i;
for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
- modes |= mlxsw_sp_port_link_mode[i].advertised;
+ __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
+ mode);
}
- return modes;
}
static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
- struct ethtool_cmd *cmd)
+ struct ethtool_link_ksettings *cmd)
{
u32 speed = SPEED_UNKNOWN;
u8 duplex = DUPLEX_UNKNOWN;
@@ -1764,8 +1892,8 @@ static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
}
}
out:
- ethtool_cmd_speed_set(cmd, speed);
- cmd->duplex = duplex;
+ cmd->base.speed = speed;
+ cmd->base.duplex = duplex;
}
static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
@@ -1790,49 +1918,15 @@ static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
return PORT_OTHER;
}
-static int mlxsw_sp_port_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char ptys_pl[MLXSW_REG_PTYS_LEN];
- u32 eth_proto_cap;
- u32 eth_proto_admin;
- u32 eth_proto_oper;
- int err;
-
- mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
- if (err) {
- netdev_err(dev, "Failed to get proto");
- return err;
- }
- mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
- &eth_proto_admin, &eth_proto_oper);
-
- cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
- mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause |
- SUPPORTED_Autoneg;
- cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
- mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
- eth_proto_oper, cmd);
-
- eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
- cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
- cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
-
- cmd->transceiver = XCVR_INTERNAL;
- return 0;
-}
-
-static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
+static u32
+mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
{
u32 ptys_proto = 0;
int i;
for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
- if (advertising & mlxsw_sp_port_link_mode[i].advertised)
+ if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
+ cmd->link_modes.advertising))
ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
}
return ptys_proto;
@@ -1862,61 +1956,113 @@ static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
return ptys_proto;
}
-static int mlxsw_sp_port_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
+ struct ethtool_link_ksettings *cmd)
{
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+
+ mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
+ mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
+}
+
+static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
+ struct ethtool_link_ksettings *cmd)
+{
+ if (!autoneg)
+ return;
+
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+ mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
+}
+
+static void
+mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
+ struct ethtool_link_ksettings *cmd)
+{
+ if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
+ return;
+
+ ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
+ mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
+}
+
+static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char ptys_pl[MLXSW_REG_PTYS_LEN];
- u32 speed;
- u32 eth_proto_new;
- u32 eth_proto_cap;
- u32 eth_proto_admin;
+ u8 autoneg_status;
+ bool autoneg;
int err;
- speed = ethtool_cmd_speed(cmd);
+ autoneg = mlxsw_sp_port->link.autoneg;
+ mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err)
+ return err;
+ mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
+ &eth_proto_oper);
+
+ mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
+
+ mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
+
+ eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
+ autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
+ mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
- eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
- mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
- mlxsw_sp_to_ptys_speed(speed);
+ cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
+ mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
+ cmd);
+
+ return 0;
+}
+
+static int
+mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_cap, eth_proto_new;
+ bool autoneg;
+ int err;
mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
- if (err) {
- netdev_err(dev, "Failed to get proto");
+ if (err)
return err;
- }
- mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
+ mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
+
+ autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
+ eth_proto_new = autoneg ?
+ mlxsw_sp_to_ptys_advert_link(cmd) :
+ mlxsw_sp_to_ptys_speed(cmd->base.speed);
eth_proto_new = eth_proto_new & eth_proto_cap;
if (!eth_proto_new) {
- netdev_err(dev, "Not supported proto admin requested");
+ netdev_err(dev, "No supported speed requested\n");
return -EINVAL;
}
- if (eth_proto_new == eth_proto_admin)
- return 0;
mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
- if (err) {
- netdev_err(dev, "Failed to set proto admin");
+ if (err)
return err;
- }
if (!netif_running(dev))
return 0;
- err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
- if (err) {
- netdev_err(dev, "Failed to set admin status");
- return err;
- }
+ mlxsw_sp_port->link.autoneg = autoneg;
- err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
- if (err) {
- netdev_err(dev, "Failed to set admin status");
- return err;
- }
+ mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+ mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
return 0;
}
@@ -1930,8 +2076,8 @@ static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.set_phys_id = mlxsw_sp_port_set_phys_id,
.get_ethtool_stats = mlxsw_sp_port_get_stats,
.get_sset_count = mlxsw_sp_port_get_sset_count,
- .get_settings = mlxsw_sp_port_get_settings,
- .set_settings = mlxsw_sp_port_set_settings,
+ .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
+ .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
};
static int
@@ -2081,6 +2227,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_sp_port->mapping.module = module;
mlxsw_sp_port->mapping.width = width;
mlxsw_sp_port->mapping.lane = lane;
+ mlxsw_sp_port->link.autoneg = 1;
bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
if (!mlxsw_sp_port->active_vlans) {
@@ -2102,9 +2249,26 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_alloc_stats;
}
+ mlxsw_sp_port->hw_stats.cache =
+ kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
+
+ if (!mlxsw_sp_port->hw_stats.cache) {
+ err = -ENOMEM;
+ goto err_alloc_hw_stats;
+ }
+ INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
+ &update_stats_cache);
+
dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
+ err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_swid_set;
+ }
+
err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
@@ -2130,13 +2294,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_system_port_mapping_set;
}
- err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
- mlxsw_sp_port->local_port);
- goto err_port_swid_set;
- }
-
err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
@@ -2202,6 +2359,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_core_port_init;
}
+ mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
return 0;
err_core_port_init:
@@ -2218,10 +2376,12 @@ err_port_buffers_init:
err_port_admin_status_set:
err_port_mtu_set:
err_port_speed_by_width_set:
- mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
-err_port_swid_set:
err_port_system_port_mapping_set:
err_dev_addr_init:
+ mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
+err_port_swid_set:
+ kfree(mlxsw_sp_port->hw_stats.cache);
+err_alloc_hw_stats:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
kfree(mlxsw_sp_port->untagged_vlans);
@@ -2238,6 +2398,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
if (!mlxsw_sp_port)
return;
+ cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
mlxsw_sp->ports[local_port] = NULL;
@@ -2247,6 +2408,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
free_percpu(mlxsw_sp_port->pcpu_stats);
+ kfree(mlxsw_sp_port->hw_stats.cache);
kfree(mlxsw_sp_port->untagged_vlans);
kfree(mlxsw_sp_port->active_vlans);
WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
@@ -2725,7 +2887,9 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
char slcr_pl[MLXSW_REG_SLCR_LEN];
+ int err;
mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
MLXSW_REG_SLCR_LAG_HASH_DMAC |
@@ -2736,7 +2900,26 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
MLXSW_REG_SLCR_LAG_HASH_SPORT |
MLXSW_REG_SLCR_LAG_HASH_DPORT |
MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
+ if (err)
+ return err;
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ if (!(resources->max_lag_valid && resources->max_ports_in_lag_valid))
+ return -EIO;
+
+ mlxsw_sp->lags = kcalloc(resources->max_lag,
+ sizeof(struct mlxsw_sp_upper),
+ GFP_KERNEL);
+ if (!mlxsw_sp->lags)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->lags);
}
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
@@ -2820,6 +3003,7 @@ err_span_init:
err_router_init:
mlxsw_sp_switchdev_fini(mlxsw_sp);
err_switchdev_init:
+ mlxsw_sp_lag_fini(mlxsw_sp);
err_lag_init:
mlxsw_sp_buffers_fini(mlxsw_sp);
err_buffers_init:
@@ -2833,38 +3017,26 @@ err_rx_listener_register:
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- int i;
mlxsw_sp_ports_remove(mlxsw_sp);
mlxsw_sp_span_fini(mlxsw_sp);
mlxsw_sp_router_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
+ mlxsw_sp_lag_fini(mlxsw_sp);
mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
WARN_ON(!list_empty(&mlxsw_sp->fids));
- for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
- WARN_ON_ONCE(mlxsw_sp->rifs[i]);
}
static struct mlxsw_config_profile mlxsw_sp_config_profile = {
.used_max_vepa_channels = 1,
.max_vepa_channels = 0,
- .used_max_lag = 1,
- .max_lag = MLXSW_SP_LAG_MAX,
- .used_max_port_per_lag = 1,
- .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX,
.used_max_mid = 1,
.max_mid = MLXSW_SP_MID_MAX,
.used_max_pgt = 1,
.max_pgt = 0,
- .used_max_system_port = 1,
- .max_system_port = 64,
- .used_max_vlan_groups = 1,
- .max_vlan_groups = 127,
- .used_max_regions = 1,
- .max_regions = 400,
.used_flood_tables = 1,
.used_flood_mode = 1,
.flood_mode = 3,
@@ -2876,10 +3048,11 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
- .used_kvd_sizes = 1,
+ .used_kvd_split_data = 1,
+ .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
+ .kvd_hash_single_parts = 2,
+ .kvd_hash_double_parts = 1,
.kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
- .kvd_hash_single_size = MLXSW_SP_KVD_HASH_SINGLE_SIZE,
- .kvd_hash_double_size = MLXSW_SP_KVD_HASH_DOUBLE_SIZE,
.swid_config = {
{
.used_type = 1,
@@ -2996,13 +3169,15 @@ static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
int i;
- for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_rif; i++)
if (!mlxsw_sp->rifs[i])
return i;
- return MLXSW_SP_RIF_MAX;
+ return MLXSW_SP_INVALID_RIF;
}
static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
@@ -3082,7 +3257,7 @@ mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
int err;
rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
- if (rif == MLXSW_SP_RIF_MAX)
+ if (rif == MLXSW_SP_INVALID_RIF)
return ERR_PTR(-ERANGE);
err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
@@ -3314,7 +3489,7 @@ static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
int err;
rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
- if (rif == MLXSW_SP_RIF_MAX)
+ if (rif == MLXSW_SP_INVALID_RIF)
return -ERANGE;
err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
@@ -3521,12 +3696,14 @@ static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u16 lag_id = mlxsw_sp_port->lag_id;
+ struct mlxsw_resources *resources;
int i, count = 0;
if (!mlxsw_sp_port->lagged)
return true;
- for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_ports_in_lag; i++) {
struct mlxsw_sp_port *lag_port;
lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
@@ -3732,11 +3909,13 @@ static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
struct net_device *lag_dev,
u16 *p_lag_id)
{
+ struct mlxsw_resources *resources;
struct mlxsw_sp_upper *lag;
int free_lag_id = -1;
int i;
- for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_lag; i++) {
lag = mlxsw_sp_lag_get(mlxsw_sp, i);
if (lag->ref_count) {
if (lag->dev == lag_dev) {
@@ -3770,9 +3949,11 @@ mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
u16 lag_id, u8 *p_port_index)
{
+ struct mlxsw_resources *resources;
int i;
- for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_ports_in_lag; i++) {
if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
*p_port_index = i;
return 0;
@@ -4465,18 +4646,26 @@ static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
.priority = 10, /* Must be called before FIB notifier block */
};
+static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
+ .notifier_call = mlxsw_sp_router_netevent_event,
+};
+
static int __init mlxsw_sp_module_init(void)
{
int err;
register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
+ register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
+
err = mlxsw_core_driver_register(&mlxsw_sp_driver);
if (err)
goto err_core_driver_register;
return 0;
err_core_driver_register:
+ unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
+ unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
return err;
}
@@ -4484,6 +4673,7 @@ err_core_driver_register:
static void __exit mlxsw_sp_module_exit(void)
{
mlxsw_core_driver_unregister(&mlxsw_sp_driver);
+ unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 01537d3a1c48..73cae211a5ce 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -54,10 +54,7 @@
#define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */
#define MLXSW_SP_RFID_BASE 15360
-#define MLXSW_SP_RIF_MAX 800
-
-#define MLXSW_SP_LAG_MAX 64
-#define MLXSW_SP_PORT_PER_LAG_MAX 16
+#define MLXSW_SP_INVALID_RIF 0xffff
#define MLXSW_SP_MID_MAX 7000
@@ -67,8 +64,6 @@
#define MLXSW_SP_LPM_TREE_MAX 22
#define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN)
-#define MLXSW_SP_VIRTUAL_ROUTER_MAX 256
-
#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
#define MLXSW_SP_BYTES_PER_CELL 96
@@ -77,8 +72,7 @@
#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
#define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
-#define MLXSW_SP_KVD_HASH_SINGLE_SIZE 163840 /* entries */
-#define MLXSW_SP_KVD_HASH_DOUBLE_SIZE 32768 /* entries */
+#define MLXSW_SP_KVD_GRANULARITY 128
/* Maximum delay buffer needed in case of PAUSE frames, in cells.
* Assumes 100m cable and maximum MTU.
@@ -253,7 +247,7 @@ struct mlxsw_sp_port_mall_tc_entry {
struct mlxsw_sp_router {
struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
- struct mlxsw_sp_vr vrs[MLXSW_SP_VIRTUAL_ROUTER_MAX];
+ struct mlxsw_sp_vr *vrs;
struct rhashtable neigh_ht;
struct {
struct delayed_work dw;
@@ -275,7 +269,7 @@ struct mlxsw_sp {
DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
} br_mids;
struct list_head fids; /* VLAN-aware bridge FIDs */
- struct mlxsw_sp_rif *rifs[MLXSW_SP_RIF_MAX];
+ struct mlxsw_sp_rif **rifs;
struct mlxsw_sp_port **ports;
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
@@ -290,7 +284,7 @@ struct mlxsw_sp {
#define MLXSW_SP_DEFAULT_AGEING_TIME 300
u32 ageing_time;
struct mlxsw_sp_upper master_bridge;
- struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
+ struct mlxsw_sp_upper *lags;
u8 port_to_module[MLXSW_PORT_MAX_PORTS];
struct mlxsw_sp_sb sb;
struct mlxsw_sp_router router;
@@ -341,7 +335,8 @@ struct mlxsw_sp_port {
} vport;
struct {
u8 tx_pause:1,
- rx_pause:1;
+ rx_pause:1,
+ autoneg:1;
} link;
struct {
struct ieee_ets *ets;
@@ -360,6 +355,11 @@ struct mlxsw_sp_port {
struct list_head vports_list;
/* TC handles */
struct list_head mall_tc_list;
+ struct {
+ #define MLXSW_HW_STATS_UPDATE_TIME HZ
+ struct rtnl_link_stats64 *cache;
+ struct delayed_work update_dw;
+ } hw_stats;
};
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
@@ -477,9 +477,12 @@ static inline struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev)
{
+ struct mlxsw_resources *resources;
int i;
- for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+
+ for (i = 0; i < resources->max_rif; i++)
if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
return mlxsw_sp->rifs[i];
@@ -590,6 +593,8 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev,
struct neighbour *n);
void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
struct neighbour *n);
+int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
+ unsigned long event, void *ptr);
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 953b214f38d0..bcaed8a38037 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -595,9 +595,9 @@ int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
- pool_info->pool_type = dir;
+ pool_info->pool_type = (enum devlink_sb_pool_type) dir;
pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size);
- pool_info->threshold_type = pr->mode;
+ pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
return 0;
}
@@ -608,9 +608,10 @@ int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
u8 pool = pool_get(pool_index);
enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
- enum mlxsw_reg_sbpr_mode mode = threshold_type;
u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size);
+ enum mlxsw_reg_sbpr_mode mode;
+ mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
}
@@ -696,13 +697,13 @@ int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
- enum mlxsw_reg_sbxx_dir dir = pool_type;
+ enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
pg_buff, dir);
*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
cm->max_buff);
- *p_pool_index = pool_index_get(cm->pool, pool_type);
+ *p_pool_index = pool_index_get(cm->pool, dir);
return 0;
}
@@ -716,7 +717,7 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
- enum mlxsw_reg_sbxx_dir dir = pool_type;
+ enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
u8 pool = pool_get(pool_index);
u32 max_buff;
int err;
@@ -943,7 +944,7 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
- enum mlxsw_reg_sbxx_dir dir = pool_type;
+ enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
pg_buff, dir);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 917ddd1e422f..cc653ac2d7d6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -107,6 +107,7 @@ mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
}
struct mlxsw_sp_fib_key {
+ struct net_device *dev;
unsigned char addr[sizeof(struct in6_addr)];
unsigned char prefix_len;
};
@@ -123,7 +124,7 @@ struct mlxsw_sp_fib_entry {
struct rhash_head ht_node;
struct mlxsw_sp_fib_key key;
enum mlxsw_sp_fib_entry_type type;
- u8 added:1;
+ unsigned int ref_count;
u16 rif; /* used for action local */
struct mlxsw_sp_vr *vr;
struct list_head nexthop_group_node;
@@ -171,13 +172,15 @@ static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
static struct mlxsw_sp_fib_entry *
mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr,
- size_t addr_len, unsigned char prefix_len)
+ size_t addr_len, unsigned char prefix_len,
+ struct net_device *dev)
{
struct mlxsw_sp_fib_entry *fib_entry;
fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
if (!fib_entry)
return NULL;
+ fib_entry->key.dev = dev;
memcpy(fib_entry->key.addr, addr, addr_len);
fib_entry->key.prefix_len = prefix_len;
return fib_entry;
@@ -190,10 +193,13 @@ static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry)
static struct mlxsw_sp_fib_entry *
mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr,
- size_t addr_len, unsigned char prefix_len)
+ size_t addr_len, unsigned char prefix_len,
+ struct net_device *dev)
{
- struct mlxsw_sp_fib_key key = {{ 0 } };
+ struct mlxsw_sp_fib_key key;
+ memset(&key, 0, sizeof(key));
+ key.dev = dev;
memcpy(key.addr, addr, addr_len);
key.prefix_len = prefix_len;
return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
@@ -246,7 +252,9 @@ static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
{
char ralta_pl[MLXSW_REG_RALTA_LEN];
- mlxsw_reg_ralta_pack(ralta_pl, true, lpm_tree->proto, lpm_tree->id);
+ mlxsw_reg_ralta_pack(ralta_pl, true,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
}
@@ -255,7 +263,9 @@ static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
{
char ralta_pl[MLXSW_REG_RALTA_LEN];
- mlxsw_reg_ralta_pack(ralta_pl, false, lpm_tree->proto, lpm_tree->id);
+ mlxsw_reg_ralta_pack(ralta_pl, false,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
}
@@ -362,10 +372,12 @@ static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
struct mlxsw_sp_vr *vr;
int i;
- for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_virtual_routers; i++) {
vr = &mlxsw_sp->router.vrs[i];
if (!vr->used)
return vr;
@@ -378,7 +390,9 @@ static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
{
char raltb_pl[MLXSW_REG_RALTB_LEN];
- mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, vr->lpm_tree->id);
+ mlxsw_reg_raltb_pack(raltb_pl, vr->id,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto,
+ vr->lpm_tree->id);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
@@ -388,7 +402,8 @@ static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
char raltb_pl[MLXSW_REG_RALTB_LEN];
/* Bind to tree 0 which is default */
- mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, 0);
+ mlxsw_reg_raltb_pack(raltb_pl, vr->id,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, 0);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
}
@@ -404,11 +419,14 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
u32 tb_id,
enum mlxsw_sp_l3proto proto)
{
+ struct mlxsw_resources *resources;
struct mlxsw_sp_vr *vr;
int i;
tb_id = mlxsw_sp_fix_tb_id(tb_id);
- for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_virtual_routers; i++) {
vr = &mlxsw_sp->router.vrs[i];
if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
return vr;
@@ -542,15 +560,33 @@ static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
&vr->fib->prefix_usage);
}
-static void mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
+static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
struct mlxsw_sp_vr *vr;
int i;
- for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ if (!resources->max_virtual_routers_valid)
+ return -EIO;
+
+ mlxsw_sp->router.vrs = kcalloc(resources->max_virtual_routers,
+ sizeof(struct mlxsw_sp_vr),
+ GFP_KERNEL);
+ if (!mlxsw_sp->router.vrs)
+ return -ENOMEM;
+
+ for (i = 0; i < resources->max_virtual_routers; i++) {
vr = &mlxsw_sp->router.vrs[i];
vr->id = i;
}
+
+ return 0;
+}
+
+static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ kfree(mlxsw_sp->router.vrs);
}
struct mlxsw_sp_neigh_key {
@@ -938,8 +974,8 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
mlxsw_sp_port_dev_put(mlxsw_sp_port);
}
-static int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
+int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
{
struct mlxsw_sp_neigh_entry *neigh_entry;
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -1009,10 +1045,6 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
-static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
- .notifier_call = mlxsw_sp_router_netevent_event,
-};
-
static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
{
int err;
@@ -1027,10 +1059,6 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
*/
mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
- err = register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
- if (err)
- goto err_register_netevent_notifier;
-
/* Create the delayed works for the activity_update */
INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
mlxsw_sp_router_neighs_update_work);
@@ -1039,17 +1067,12 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
return 0;
-
-err_register_netevent_notifier:
- rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
- return err;
}
static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
{
cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
- unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
}
@@ -1088,9 +1111,10 @@ static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
{
char raleu_pl[MLXSW_REG_RALEU_LEN];
- mlxsw_reg_raleu_pack(raleu_pl, vr->proto, vr->id,
- adj_index, ecmp_size,
- new_adj_index, new_ecmp_size);
+ mlxsw_reg_raleu_pack(raleu_pl,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, vr->id,
+ adj_index, ecmp_size, new_adj_index,
+ new_ecmp_size);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
}
@@ -1498,19 +1522,46 @@ static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
char rgcr_pl[MLXSW_REG_RGCR_LEN];
+ int err;
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ if (!resources->max_rif_valid)
+ return -EIO;
+
+ mlxsw_sp->rifs = kcalloc(resources->max_rif,
+ sizeof(struct mlxsw_sp_rif *), GFP_KERNEL);
+ if (!mlxsw_sp->rifs)
+ return -ENOMEM;
mlxsw_reg_rgcr_pack(rgcr_pl, true);
- mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, MLXSW_SP_RIF_MAX);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+ mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, resources->max_rif);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+ if (err)
+ goto err_rgcr_fail;
+
+ return 0;
+
+err_rgcr_fail:
+ kfree(mlxsw_sp->rifs);
+ return err;
}
static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_resources *resources;
char rgcr_pl[MLXSW_REG_RGCR_LEN];
+ int i;
mlxsw_reg_rgcr_pack(rgcr_pl, false);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_rif; i++)
+ WARN_ON_ONCE(mlxsw_sp->rifs[i]);
+
+ kfree(mlxsw_sp->rifs);
}
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
@@ -1522,14 +1573,29 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
return err;
+
mlxsw_sp_lpm_init(mlxsw_sp);
- mlxsw_sp_vrs_init(mlxsw_sp);
- return mlxsw_sp_neigh_init(mlxsw_sp);
+ err = mlxsw_sp_vrs_init(mlxsw_sp);
+ if (err)
+ goto err_vrs_init;
+
+ err = mlxsw_sp_neigh_init(mlxsw_sp);
+ if (err)
+ goto err_neigh_init;
+
+ return 0;
+
+err_neigh_init:
+ mlxsw_sp_vrs_fini(mlxsw_sp);
+err_vrs_init:
+ __mlxsw_sp_router_fini(mlxsw_sp);
+ return err;
}
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp_neigh_fini(mlxsw_sp);
+ mlxsw_sp_vrs_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
}
@@ -1558,8 +1624,9 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
- mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
- fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_pack4(ralue_pl,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
+ vr->id, fib_entry->key.prefix_len, *p_dip);
mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
adjacency_index, ecmp_size);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
@@ -1573,8 +1640,9 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
u32 *p_dip = (u32 *) fib_entry->key.addr;
struct mlxsw_sp_vr *vr = fib_entry->vr;
- mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
- fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_pack4(ralue_pl,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
+ vr->id, fib_entry->key.prefix_len, *p_dip);
mlxsw_reg_ralue_act_local_pack(ralue_pl,
MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0,
fib_entry->rif);
@@ -1589,8 +1657,9 @@ static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
u32 *p_dip = (u32 *) fib_entry->key.addr;
struct mlxsw_sp_vr *vr = fib_entry->vr;
- mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
- fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_pack4(ralue_pl,
+ (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
+ vr->id, fib_entry->key.prefix_len, *p_dip);
mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
}
@@ -1626,11 +1695,8 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
- enum mlxsw_reg_ralue_op op;
-
- op = !fib_entry->added ? MLXSW_REG_RALUE_OP_WRITE_WRITE :
- MLXSW_REG_RALUE_OP_WRITE_UPDATE;
- return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
+ MLXSW_REG_RALUE_OP_WRITE_WRITE);
}
static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
@@ -1695,34 +1761,93 @@ mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
}
-static int
-mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans)
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
+ const struct switchdev_obj_ipv4_fib *fib4)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_router_fib4_add_info *info;
struct mlxsw_sp_fib_entry *fib_entry;
+ struct fib_info *fi = fib4->fi;
struct mlxsw_sp_vr *vr;
int err;
vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id,
MLXSW_SP_L3_PROTO_IPV4);
if (IS_ERR(vr))
- return PTR_ERR(vr);
+ return ERR_CAST(vr);
+ fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
+ sizeof(fib4->dst),
+ fib4->dst_len, fi->fib_dev);
+ if (fib_entry) {
+ /* Already exists, just take a reference */
+ fib_entry->ref_count++;
+ return fib_entry;
+ }
fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst,
- sizeof(fib4->dst), fib4->dst_len);
+ sizeof(fib4->dst),
+ fib4->dst_len, fi->fib_dev);
if (!fib_entry) {
err = -ENOMEM;
goto err_fib_entry_create;
}
fib_entry->vr = vr;
+ fib_entry->ref_count = 1;
err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry);
if (err)
goto err_fib4_entry_init;
+ return fib_entry;
+
+err_fib4_entry_init:
+ mlxsw_sp_fib_entry_destroy(fib_entry);
+err_fib_entry_create:
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+
+ return ERR_PTR(err);
+}
+
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp,
+ const struct switchdev_obj_ipv4_fib *fib4)
+{
+ struct mlxsw_sp_vr *vr;
+
+ vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
+ if (!vr)
+ return NULL;
+
+ return mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
+ sizeof(fib4->dst), fib4->dst_len,
+ fib4->fi->fib_dev);
+}
+
+static void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct mlxsw_sp_vr *vr = fib_entry->vr;
+
+ if (--fib_entry->ref_count == 0) {
+ mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
+ mlxsw_sp_fib_entry_destroy(fib_entry);
+ }
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+}
+
+static int
+mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_ipv4_fib *fib4,
+ struct switchdev_trans *trans)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_router_fib4_add_info *info;
+ struct mlxsw_sp_fib_entry *fib_entry;
+ int err;
+
+ fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fib4);
+ if (IS_ERR(fib_entry))
+ return PTR_ERR(fib_entry);
+
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
err = -ENOMEM;
@@ -1736,11 +1861,7 @@ mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
err_alloc_info:
- mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
-err_fib4_entry_init:
- mlxsw_sp_fib_entry_destroy(fib_entry);
-err_fib_entry_create:
- mlxsw_sp_vr_put(mlxsw_sp, vr);
+ mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
return err;
}
@@ -1759,11 +1880,14 @@ mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
fib_entry = info->fib_entry;
kfree(info);
+ if (fib_entry->ref_count != 1)
+ return 0;
+
vr = fib_entry->vr;
- err = mlxsw_sp_fib_entry_insert(fib_entry->vr->fib, fib_entry);
+ err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry);
if (err)
goto err_fib_entry_insert;
- err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp_port->mlxsw_sp, fib_entry);
if (err)
goto err_fib_entry_add;
return 0;
@@ -1771,9 +1895,7 @@ mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
err_fib_entry_add:
mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
err_fib_entry_insert:
- mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
- mlxsw_sp_fib_entry_destroy(fib_entry);
- mlxsw_sp_vr_put(mlxsw_sp, vr);
+ mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
return err;
}
@@ -1793,23 +1915,18 @@ int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_fib_entry *fib_entry;
- struct mlxsw_sp_vr *vr;
- vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
- if (!vr) {
- dev_warn(mlxsw_sp->bus_info->dev, "Failed to find virtual router for FIB4 entry being removed.\n");
- return -ENOENT;
- }
- fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
- sizeof(fib4->dst), fib4->dst_len);
+ fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fib4);
if (!fib_entry) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
return -ENOENT;
}
- mlxsw_sp_fib_entry_del(mlxsw_sp_port->mlxsw_sp, fib_entry);
- mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
- mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
- mlxsw_sp_fib_entry_destroy(fib_entry);
- mlxsw_sp_vr_put(mlxsw_sp, vr);
+
+ if (fib_entry->ref_count == 1) {
+ mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
+ mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, fib_entry);
+ }
+
+ mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 0c3fbbc6b537..2b04b76b503e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -167,8 +167,8 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 idx_begin, u16 idx_end, bool set,
- bool only_uc)
+ u16 idx_begin, u16 idx_end, bool uc_set,
+ bool bm_set)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u16 local_port = mlxsw_sp_port->local_port;
@@ -187,28 +187,22 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
return -ENOMEM;
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
- table_type, range, local_port, set);
+ table_type, range, local_port, uc_set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
if (err)
goto buffer_out;
- /* Flooding control allows one to decide whether a given port will
- * flood unicast traffic for which there is no FDB entry.
- */
- if (only_uc)
- goto buffer_out;
-
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
- table_type, range, local_port, set);
+ table_type, range, local_port, bm_set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
if (err)
goto err_flood_bm_set;
- else
- goto buffer_out;
+
+ goto buffer_out;
err_flood_bm_set:
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
- table_type, range, local_port, !set);
+ table_type, range, local_port, !uc_set);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
buffer_out:
kfree(sftr_pl);
@@ -257,8 +251,7 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
* the start of the vFIDs range.
*/
vfid = mlxsw_sp_fid_to_vfid(fid);
- return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set,
- false);
+ return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set);
}
static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -501,6 +494,9 @@ static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp_fid *f;
+ if (test_bit(fid, mlxsw_sp_port->active_vlans))
+ return 0;
+
f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
if (!f) {
f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
@@ -558,7 +554,7 @@ static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
}
err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
- true, false);
+ mlxsw_sp_port->uc_flood, true);
if (err)
goto err_port_flood_set;
@@ -1209,9 +1205,11 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
u16 lag_id)
{
struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_resources *resources;
int i;
- for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ for (i = 0; i < resources->max_ports_in_lag; i++) {
mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
if (mlxsw_sp_port)
return mlxsw_sp_port;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 377daa4d509c..8b15bf0c744d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -1512,10 +1512,6 @@ static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core)
static struct mlxsw_config_profile mlxsw_sx_config_profile = {
.used_max_vepa_channels = 1,
.max_vepa_channels = 0,
- .used_max_lag = 1,
- .max_lag = 64,
- .used_max_port_per_lag = 1,
- .max_port_per_lag = 16,
.used_max_mid = 1,
.max_mid = 7000,
.used_max_pgt = 1,
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index eb807b0dc72a..569ade6cf85c 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -134,7 +134,7 @@ static int lnksts = 0; /* CFG_LNKSTS bit polarity */
/* tunables */
#define RX_BUF_SIZE 1500 /* 8192 */
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define NS83820_VLAN_ACCEL_SUPPORT
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 68178819ff12..0efb2ba9a558 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -3,6 +3,13 @@ obj-$(CONFIG_NFP_NETVF) += nfp_netvf.o
nfp_netvf-objs := \
nfp_net_common.o \
nfp_net_ethtool.o \
+ nfp_net_offload.o \
nfp_netvf_main.o
+ifeq ($(CONFIG_BPF_SYSCALL),y)
+nfp_netvf-objs += \
+ nfp_bpf_verifier.o \
+ nfp_bpf_jit.o
+endif
+
nfp_netvf-$(CONFIG_NFP_NET_DEBUG) += nfp_net_debugfs.o
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
new file mode 100644
index 000000000000..22484b6fd3e8
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2016 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __NFP_ASM_H__
+#define __NFP_ASM_H__ 1
+
+#include "nfp_bpf.h"
+
+#define REG_NONE 0
+
+#define RE_REG_NO_DST 0x020
+#define RE_REG_IMM 0x020
+#define RE_REG_IMM_encode(x) \
+ (RE_REG_IMM | ((x) & 0x1f) | (((x) & 0x60) << 1))
+#define RE_REG_IMM_MAX 0x07fULL
+#define RE_REG_XFR 0x080
+
+#define UR_REG_XFR 0x180
+#define UR_REG_NN 0x280
+#define UR_REG_NO_DST 0x300
+#define UR_REG_IMM UR_REG_NO_DST
+#define UR_REG_IMM_encode(x) (UR_REG_IMM | (x))
+#define UR_REG_IMM_MAX 0x0ffULL
+
+#define OP_BR_BASE 0x0d800000020ULL
+#define OP_BR_BASE_MASK 0x0f8000c3ce0ULL
+#define OP_BR_MASK 0x0000000001fULL
+#define OP_BR_EV_PIP 0x00000000300ULL
+#define OP_BR_CSS 0x0000003c000ULL
+#define OP_BR_DEFBR 0x00000300000ULL
+#define OP_BR_ADDR_LO 0x007ffc00000ULL
+#define OP_BR_ADDR_HI 0x10000000000ULL
+
+#define nfp_is_br(_insn) \
+ (((_insn) & OP_BR_BASE_MASK) == OP_BR_BASE)
+
+enum br_mask {
+ BR_BEQ = 0x00,
+ BR_BNE = 0x01,
+ BR_BHS = 0x04,
+ BR_BLO = 0x05,
+ BR_BGE = 0x08,
+ BR_UNC = 0x18,
+};
+
+enum br_ev_pip {
+ BR_EV_PIP_UNCOND = 0,
+ BR_EV_PIP_COND = 1,
+};
+
+enum br_ctx_signal_state {
+ BR_CSS_NONE = 2,
+};
+
+#define OP_BBYTE_BASE 0x0c800000000ULL
+#define OP_BB_A_SRC 0x000000000ffULL
+#define OP_BB_BYTE 0x00000000300ULL
+#define OP_BB_B_SRC 0x0000003fc00ULL
+#define OP_BB_I8 0x00000040000ULL
+#define OP_BB_EQ 0x00000080000ULL
+#define OP_BB_DEFBR 0x00000300000ULL
+#define OP_BB_ADDR_LO 0x007ffc00000ULL
+#define OP_BB_ADDR_HI 0x10000000000ULL
+
+#define OP_BALU_BASE 0x0e800000000ULL
+#define OP_BA_A_SRC 0x000000003ffULL
+#define OP_BA_B_SRC 0x000000ffc00ULL
+#define OP_BA_DEFBR 0x00000300000ULL
+#define OP_BA_ADDR_HI 0x0007fc00000ULL
+
+#define OP_IMMED_A_SRC 0x000000003ffULL
+#define OP_IMMED_B_SRC 0x000000ffc00ULL
+#define OP_IMMED_IMM 0x0000ff00000ULL
+#define OP_IMMED_WIDTH 0x00060000000ULL
+#define OP_IMMED_INV 0x00080000000ULL
+#define OP_IMMED_SHIFT 0x00600000000ULL
+#define OP_IMMED_BASE 0x0f000000000ULL
+#define OP_IMMED_WR_AB 0x20000000000ULL
+
+enum immed_width {
+ IMMED_WIDTH_ALL = 0,
+ IMMED_WIDTH_BYTE = 1,
+ IMMED_WIDTH_WORD = 2,
+};
+
+enum immed_shift {
+ IMMED_SHIFT_0B = 0,
+ IMMED_SHIFT_1B = 1,
+ IMMED_SHIFT_2B = 2,
+};
+
+#define OP_SHF_BASE 0x08000000000ULL
+#define OP_SHF_A_SRC 0x000000000ffULL
+#define OP_SHF_SC 0x00000000300ULL
+#define OP_SHF_B_SRC 0x0000003fc00ULL
+#define OP_SHF_I8 0x00000040000ULL
+#define OP_SHF_SW 0x00000080000ULL
+#define OP_SHF_DST 0x0000ff00000ULL
+#define OP_SHF_SHIFT 0x001f0000000ULL
+#define OP_SHF_OP 0x00e00000000ULL
+#define OP_SHF_DST_AB 0x01000000000ULL
+#define OP_SHF_WR_AB 0x20000000000ULL
+
+enum shf_op {
+ SHF_OP_NONE = 0,
+ SHF_OP_AND = 2,
+ SHF_OP_OR = 5,
+};
+
+enum shf_sc {
+ SHF_SC_R_ROT = 0,
+ SHF_SC_R_SHF = 1,
+ SHF_SC_L_SHF = 2,
+ SHF_SC_R_DSHF = 3,
+};
+
+#define OP_ALU_A_SRC 0x000000003ffULL
+#define OP_ALU_B_SRC 0x000000ffc00ULL
+#define OP_ALU_DST 0x0003ff00000ULL
+#define OP_ALU_SW 0x00040000000ULL
+#define OP_ALU_OP 0x00f80000000ULL
+#define OP_ALU_DST_AB 0x01000000000ULL
+#define OP_ALU_BASE 0x0a000000000ULL
+#define OP_ALU_WR_AB 0x20000000000ULL
+
+enum alu_op {
+ ALU_OP_NONE = 0x00,
+ ALU_OP_ADD = 0x01,
+ ALU_OP_NEG = 0x04,
+ ALU_OP_AND = 0x08,
+ ALU_OP_SUB_C = 0x0d,
+ ALU_OP_ADD_C = 0x11,
+ ALU_OP_OR = 0x14,
+ ALU_OP_SUB = 0x15,
+ ALU_OP_XOR = 0x18,
+};
+
+enum alu_dst_ab {
+ ALU_DST_A = 0,
+ ALU_DST_B = 1,
+};
+
+#define OP_LDF_BASE 0x0c000000000ULL
+#define OP_LDF_A_SRC 0x000000000ffULL
+#define OP_LDF_SC 0x00000000300ULL
+#define OP_LDF_B_SRC 0x0000003fc00ULL
+#define OP_LDF_I8 0x00000040000ULL
+#define OP_LDF_SW 0x00000080000ULL
+#define OP_LDF_ZF 0x00000100000ULL
+#define OP_LDF_BMASK 0x0000f000000ULL
+#define OP_LDF_SHF 0x001f0000000ULL
+#define OP_LDF_WR_AB 0x20000000000ULL
+
+#define OP_CMD_A_SRC 0x000000000ffULL
+#define OP_CMD_CTX 0x00000000300ULL
+#define OP_CMD_B_SRC 0x0000003fc00ULL
+#define OP_CMD_TOKEN 0x000000c0000ULL
+#define OP_CMD_XFER 0x00001f00000ULL
+#define OP_CMD_CNT 0x0000e000000ULL
+#define OP_CMD_SIG 0x000f0000000ULL
+#define OP_CMD_TGT_CMD 0x07f00000000ULL
+#define OP_CMD_MODE 0x1c0000000000ULL
+
+struct cmd_tgt_act {
+ u8 token;
+ u8 tgt_cmd;
+};
+
+enum cmd_tgt_map {
+ CMD_TGT_READ8,
+ CMD_TGT_WRITE8,
+ CMD_TGT_READ_LE,
+ CMD_TGT_READ_SWAP_LE,
+ __CMD_TGT_MAP_SIZE,
+};
+
+enum cmd_mode {
+ CMD_MODE_40b_AB = 0,
+ CMD_MODE_40b_BA = 1,
+ CMD_MODE_32b = 4,
+};
+
+enum cmd_ctx_swap {
+ CMD_CTX_SWAP = 0,
+ CMD_CTX_NO_SWAP = 3,
+};
+
+#define OP_LCSR_BASE 0x0fc00000000ULL
+#define OP_LCSR_A_SRC 0x000000003ffULL
+#define OP_LCSR_B_SRC 0x000000ffc00ULL
+#define OP_LCSR_WRITE 0x00000200000ULL
+#define OP_LCSR_ADDR 0x001ffc00000ULL
+
+enum lcsr_wr_src {
+ LCSR_WR_AREG,
+ LCSR_WR_BREG,
+ LCSR_WR_IMM,
+};
+
+#define OP_CARB_BASE 0x0e000000000ULL
+#define OP_CARB_OR 0x00000010000ULL
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf.h b/drivers/net/ethernet/netronome/nfp/nfp_bpf.h
new file mode 100644
index 000000000000..fc220cd04115
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright (C) 2016 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __NFP_BPF_H__
+#define __NFP_BPF_H__ 1
+
+#include <linux/bitfield.h>
+#include <linux/bpf.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+#define FIELD_FIT(mask, val) (!((((u64)val) << __bf_shf(mask)) & ~(mask)))
+
+/* For branch fixup logic use up-most byte of branch instruction as scratch
+ * area. Remember to clear this before sending instructions to HW!
+ */
+#define OP_BR_SPECIAL 0xff00000000000000ULL
+
+enum br_special {
+ OP_BR_NORMAL = 0,
+ OP_BR_GO_OUT,
+ OP_BR_GO_ABORT,
+};
+
+enum static_regs {
+ STATIC_REG_PKT = 1,
+#define REG_PKT_BANK ALU_DST_A
+ STATIC_REG_IMM = 2, /* Bank AB */
+};
+
+enum nfp_bpf_action_type {
+ NN_ACT_TC_DROP,
+ NN_ACT_TC_REDIR,
+ NN_ACT_DIRECT,
+};
+
+/* Software register representation, hardware encoding in asm.h */
+#define NN_REG_TYPE GENMASK(31, 24)
+#define NN_REG_VAL GENMASK(7, 0)
+
+enum nfp_bpf_reg_type {
+ NN_REG_GPR_A = BIT(0),
+ NN_REG_GPR_B = BIT(1),
+ NN_REG_NNR = BIT(2),
+ NN_REG_XFER = BIT(3),
+ NN_REG_IMM = BIT(4),
+ NN_REG_NONE = BIT(5),
+};
+
+#define NN_REG_GPR_BOTH (NN_REG_GPR_A | NN_REG_GPR_B)
+
+#define reg_both(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_BOTH))
+#define reg_a(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_A))
+#define reg_b(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_B))
+#define reg_nnr(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_NNR))
+#define reg_xfer(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_XFER))
+#define reg_imm(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_IMM))
+#define reg_none() (FIELD_PREP(NN_REG_TYPE, NN_REG_NONE))
+
+#define pkt_reg(np) reg_a((np)->regs_per_thread - STATIC_REG_PKT)
+#define imm_a(np) reg_a((np)->regs_per_thread - STATIC_REG_IMM)
+#define imm_b(np) reg_b((np)->regs_per_thread - STATIC_REG_IMM)
+#define imm_both(np) reg_both((np)->regs_per_thread - STATIC_REG_IMM)
+
+#define NFP_BPF_ABI_FLAGS reg_nnr(0)
+#define NFP_BPF_ABI_FLAG_MARK 1
+#define NFP_BPF_ABI_MARK reg_nnr(1)
+#define NFP_BPF_ABI_PKT reg_nnr(2)
+#define NFP_BPF_ABI_LEN reg_nnr(3)
+
+struct nfp_prog;
+struct nfp_insn_meta;
+typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
+
+#define nfp_prog_first_meta(nfp_prog) \
+ list_first_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
+#define nfp_prog_last_meta(nfp_prog) \
+ list_last_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
+#define nfp_meta_next(meta) list_next_entry(meta, l)
+#define nfp_meta_prev(meta) list_prev_entry(meta, l)
+
+/**
+ * struct nfp_insn_meta - BPF instruction wrapper
+ * @insn: BPF instruction
+ * @off: index of first generated machine instruction (in nfp_prog.prog)
+ * @n: eBPF instruction number
+ * @skip: skip this instruction (optimized out)
+ * @double_cb: callback for second part of the instruction
+ * @l: link on nfp_prog->insns list
+ */
+struct nfp_insn_meta {
+ struct bpf_insn insn;
+ unsigned int off;
+ unsigned short n;
+ bool skip;
+ instr_cb_t double_cb;
+
+ struct list_head l;
+};
+
+#define BPF_SIZE_MASK 0x18
+
+static inline u8 mbpf_class(const struct nfp_insn_meta *meta)
+{
+ return BPF_CLASS(meta->insn.code);
+}
+
+static inline u8 mbpf_src(const struct nfp_insn_meta *meta)
+{
+ return BPF_SRC(meta->insn.code);
+}
+
+static inline u8 mbpf_op(const struct nfp_insn_meta *meta)
+{
+ return BPF_OP(meta->insn.code);
+}
+
+static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
+{
+ return BPF_MODE(meta->insn.code);
+}
+
+/**
+ * struct nfp_prog - nfp BPF program
+ * @prog: machine code
+ * @prog_len: number of valid instructions in @prog array
+ * @__prog_alloc_len: alloc size of @prog array
+ * @act: BPF program/action type (TC DA, TC with action, XDP etc.)
+ * @num_regs: number of registers used by this program
+ * @regs_per_thread: number of basic registers allocated per thread
+ * @start_off: address of the first instruction in the memory
+ * @tgt_out: jump target for normal exit
+ * @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
+ * @tgt_done: jump target to get the next packet
+ * @n_translated: number of successfully translated instructions (for errors)
+ * @error: error code if something went wrong
+ * @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
+ */
+struct nfp_prog {
+ u64 *prog;
+ unsigned int prog_len;
+ unsigned int __prog_alloc_len;
+
+ enum nfp_bpf_action_type act;
+
+ unsigned int num_regs;
+ unsigned int regs_per_thread;
+
+ unsigned int start_off;
+ unsigned int tgt_out;
+ unsigned int tgt_abort;
+ unsigned int tgt_done;
+
+ unsigned int n_translated;
+ int error;
+
+ struct list_head insns;
+};
+
+struct nfp_bpf_result {
+ unsigned int n_instr;
+ bool dense_mode;
+};
+
+#ifdef CONFIG_BPF_SYSCALL
+int
+nfp_bpf_jit(struct bpf_prog *filter, void *prog, enum nfp_bpf_action_type act,
+ unsigned int prog_start, unsigned int prog_done,
+ unsigned int prog_sz, struct nfp_bpf_result *res);
+#else
+int
+nfp_bpf_jit(struct bpf_prog *filter, void *prog, enum nfp_bpf_action_type act,
+ unsigned int prog_start, unsigned int prog_done,
+ unsigned int prog_sz, struct nfp_bpf_result *res)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog);
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c b/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
new file mode 100644
index 000000000000..3de819acd68c
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf_jit.c
@@ -0,0 +1,1811 @@
+/*
+ * Copyright (C) 2016 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) "NFP net bpf: " fmt
+
+#include <linux/kernel.h>
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/pkt_cls.h>
+#include <linux/unistd.h>
+
+#include "nfp_asm.h"
+#include "nfp_bpf.h"
+
+/* --- NFP prog --- */
+/* Foreach "multiple" entries macros provide pos and next<n> pointers.
+ * It's safe to modify the next pointers (but not pos).
+ */
+#define nfp_for_each_insn_walk2(nfp_prog, pos, next) \
+ for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
+ next = list_next_entry(pos, l); \
+ &(nfp_prog)->insns != &pos->l && \
+ &(nfp_prog)->insns != &next->l; \
+ pos = nfp_meta_next(pos), \
+ next = nfp_meta_next(pos))
+
+#define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \
+ for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
+ next = list_next_entry(pos, l), \
+ next2 = list_next_entry(next, l); \
+ &(nfp_prog)->insns != &pos->l && \
+ &(nfp_prog)->insns != &next->l && \
+ &(nfp_prog)->insns != &next2->l; \
+ pos = nfp_meta_next(pos), \
+ next = nfp_meta_next(pos), \
+ next2 = nfp_meta_next(next))
+
+static bool
+nfp_meta_has_next(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return meta->l.next != &nfp_prog->insns;
+}
+
+static bool
+nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return meta->l.prev != &nfp_prog->insns;
+}
+
+static void nfp_prog_free(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta, *tmp;
+
+ list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
+ list_del(&meta->l);
+ kfree(meta);
+ }
+ kfree(nfp_prog);
+}
+
+static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
+{
+ if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) {
+ nfp_prog->error = -ENOSPC;
+ return;
+ }
+
+ nfp_prog->prog[nfp_prog->prog_len] = insn;
+ nfp_prog->prog_len++;
+}
+
+static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog)
+{
+ return nfp_prog->start_off + nfp_prog->prog_len;
+}
+
+static unsigned int
+nfp_prog_offset_to_index(struct nfp_prog *nfp_prog, unsigned int offset)
+{
+ return offset - nfp_prog->start_off;
+}
+
+/* --- SW reg --- */
+struct nfp_insn_ur_regs {
+ enum alu_dst_ab dst_ab;
+ u16 dst;
+ u16 areg, breg;
+ bool swap;
+ bool wr_both;
+};
+
+struct nfp_insn_re_regs {
+ enum alu_dst_ab dst_ab;
+ u8 dst;
+ u8 areg, breg;
+ bool swap;
+ bool wr_both;
+ bool i8;
+};
+
+static u16 nfp_swreg_to_unreg(u32 swreg, bool is_dst)
+{
+ u16 val = FIELD_GET(NN_REG_VAL, swreg);
+
+ switch (FIELD_GET(NN_REG_TYPE, swreg)) {
+ case NN_REG_GPR_A:
+ case NN_REG_GPR_B:
+ case NN_REG_GPR_BOTH:
+ return val;
+ case NN_REG_NNR:
+ return UR_REG_NN | val;
+ case NN_REG_XFER:
+ return UR_REG_XFR | val;
+ case NN_REG_IMM:
+ if (val & ~0xff) {
+ pr_err("immediate too large\n");
+ return 0;
+ }
+ return UR_REG_IMM_encode(val);
+ case NN_REG_NONE:
+ return is_dst ? UR_REG_NO_DST : REG_NONE;
+ default:
+ pr_err("unrecognized reg encoding %08x\n", swreg);
+ return 0;
+ }
+}
+
+static int
+swreg_to_unrestricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_ur_regs *reg)
+{
+ memset(reg, 0, sizeof(*reg));
+
+ /* Decode destination */
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM)
+ return -EFAULT;
+
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_B)
+ reg->dst_ab = ALU_DST_B;
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_BOTH)
+ reg->wr_both = true;
+ reg->dst = nfp_swreg_to_unreg(dst, true);
+
+ /* Decode source operands */
+ if (FIELD_GET(NN_REG_TYPE, lreg) == FIELD_GET(NN_REG_TYPE, rreg))
+ return -EFAULT;
+
+ if (FIELD_GET(NN_REG_TYPE, lreg) == NN_REG_GPR_B ||
+ FIELD_GET(NN_REG_TYPE, rreg) == NN_REG_GPR_A) {
+ reg->areg = nfp_swreg_to_unreg(rreg, false);
+ reg->breg = nfp_swreg_to_unreg(lreg, false);
+ reg->swap = true;
+ } else {
+ reg->areg = nfp_swreg_to_unreg(lreg, false);
+ reg->breg = nfp_swreg_to_unreg(rreg, false);
+ }
+
+ return 0;
+}
+
+static u16 nfp_swreg_to_rereg(u32 swreg, bool is_dst, bool has_imm8, bool *i8)
+{
+ u16 val = FIELD_GET(NN_REG_VAL, swreg);
+
+ switch (FIELD_GET(NN_REG_TYPE, swreg)) {
+ case NN_REG_GPR_A:
+ case NN_REG_GPR_B:
+ case NN_REG_GPR_BOTH:
+ return val;
+ case NN_REG_XFER:
+ return RE_REG_XFR | val;
+ case NN_REG_IMM:
+ if (val & ~(0x7f | has_imm8 << 7)) {
+ pr_err("immediate too large\n");
+ return 0;
+ }
+ *i8 = val & 0x80;
+ return RE_REG_IMM_encode(val & 0x7f);
+ case NN_REG_NONE:
+ return is_dst ? RE_REG_NO_DST : REG_NONE;
+ default:
+ pr_err("unrecognized reg encoding\n");
+ return 0;
+ }
+}
+
+static int
+swreg_to_restricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_re_regs *reg,
+ bool has_imm8)
+{
+ memset(reg, 0, sizeof(*reg));
+
+ /* Decode destination */
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM)
+ return -EFAULT;
+
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_B)
+ reg->dst_ab = ALU_DST_B;
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_BOTH)
+ reg->wr_both = true;
+ reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL);
+
+ /* Decode source operands */
+ if (FIELD_GET(NN_REG_TYPE, lreg) == FIELD_GET(NN_REG_TYPE, rreg))
+ return -EFAULT;
+
+ if (FIELD_GET(NN_REG_TYPE, lreg) == NN_REG_GPR_B ||
+ FIELD_GET(NN_REG_TYPE, rreg) == NN_REG_GPR_A) {
+ reg->areg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
+ reg->breg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
+ reg->swap = true;
+ } else {
+ reg->areg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
+ reg->breg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
+ }
+
+ return 0;
+}
+
+/* --- Emitters --- */
+static const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
+ [CMD_TGT_WRITE8] = { 0x00, 0x42 },
+ [CMD_TGT_READ8] = { 0x01, 0x43 },
+ [CMD_TGT_READ_LE] = { 0x01, 0x40 },
+ [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 },
+};
+
+static void
+__emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
+ u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync)
+{
+ enum cmd_ctx_swap ctx;
+ u64 insn;
+
+ if (sync)
+ ctx = CMD_CTX_SWAP;
+ else
+ ctx = CMD_CTX_NO_SWAP;
+
+ insn = FIELD_PREP(OP_CMD_A_SRC, areg) |
+ FIELD_PREP(OP_CMD_CTX, ctx) |
+ FIELD_PREP(OP_CMD_B_SRC, breg) |
+ FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) |
+ FIELD_PREP(OP_CMD_XFER, xfer) |
+ FIELD_PREP(OP_CMD_CNT, size) |
+ FIELD_PREP(OP_CMD_SIG, sync) |
+ FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) |
+ FIELD_PREP(OP_CMD_MODE, mode);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
+ u8 mode, u8 xfer, u32 lreg, u32 rreg, u8 size, bool sync)
+{
+ struct nfp_insn_re_regs reg;
+ int err;
+
+ err = swreg_to_restricted(reg_none(), lreg, rreg, &reg, false);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+ if (reg.swap) {
+ pr_err("cmd can't swap arguments\n");
+ nfp_prog->error = -EFAULT;
+ return;
+ }
+
+ __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync);
+}
+
+static void
+__emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip,
+ enum br_ctx_signal_state css, u16 addr, u8 defer)
+{
+ u16 addr_lo, addr_hi;
+ u64 insn;
+
+ addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
+ addr_hi = addr != addr_lo;
+
+ insn = OP_BR_BASE |
+ FIELD_PREP(OP_BR_MASK, mask) |
+ FIELD_PREP(OP_BR_EV_PIP, ev_pip) |
+ FIELD_PREP(OP_BR_CSS, css) |
+ FIELD_PREP(OP_BR_DEFBR, defer) |
+ FIELD_PREP(OP_BR_ADDR_LO, addr_lo) |
+ FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void emit_br_def(struct nfp_prog *nfp_prog, u16 addr, u8 defer)
+{
+ if (defer > 2) {
+ pr_err("BUG: branch defer out of bounds %d\n", defer);
+ nfp_prog->error = -EFAULT;
+ return;
+ }
+ __emit_br(nfp_prog, BR_UNC, BR_EV_PIP_UNCOND, BR_CSS_NONE, addr, defer);
+}
+
+static void
+emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
+{
+ __emit_br(nfp_prog, mask,
+ mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
+ BR_CSS_NONE, addr, defer);
+}
+
+static void
+__emit_br_byte(struct nfp_prog *nfp_prog, u8 areg, u8 breg, bool imm8,
+ u8 byte, bool equal, u16 addr, u8 defer)
+{
+ u16 addr_lo, addr_hi;
+ u64 insn;
+
+ addr_lo = addr & (OP_BB_ADDR_LO >> __bf_shf(OP_BB_ADDR_LO));
+ addr_hi = addr != addr_lo;
+
+ insn = OP_BBYTE_BASE |
+ FIELD_PREP(OP_BB_A_SRC, areg) |
+ FIELD_PREP(OP_BB_BYTE, byte) |
+ FIELD_PREP(OP_BB_B_SRC, breg) |
+ FIELD_PREP(OP_BB_I8, imm8) |
+ FIELD_PREP(OP_BB_EQ, equal) |
+ FIELD_PREP(OP_BB_DEFBR, defer) |
+ FIELD_PREP(OP_BB_ADDR_LO, addr_lo) |
+ FIELD_PREP(OP_BB_ADDR_HI, addr_hi);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_br_byte_neq(struct nfp_prog *nfp_prog,
+ u32 dst, u8 imm, u8 byte, u16 addr, u8 defer)
+{
+ struct nfp_insn_re_regs reg;
+ int err;
+
+ err = swreg_to_restricted(reg_none(), dst, reg_imm(imm), &reg, true);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_br_byte(nfp_prog, reg.areg, reg.breg, reg.i8, byte, false, addr,
+ defer);
+}
+
+static void
+__emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
+ enum immed_width width, bool invert,
+ enum immed_shift shift, bool wr_both)
+{
+ u64 insn;
+
+ insn = OP_IMMED_BASE |
+ FIELD_PREP(OP_IMMED_A_SRC, areg) |
+ FIELD_PREP(OP_IMMED_B_SRC, breg) |
+ FIELD_PREP(OP_IMMED_IMM, imm_hi) |
+ FIELD_PREP(OP_IMMED_WIDTH, width) |
+ FIELD_PREP(OP_IMMED_INV, invert) |
+ FIELD_PREP(OP_IMMED_SHIFT, shift) |
+ FIELD_PREP(OP_IMMED_WR_AB, wr_both);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_immed(struct nfp_prog *nfp_prog, u32 dst, u16 imm,
+ enum immed_width width, bool invert, enum immed_shift shift)
+{
+ struct nfp_insn_ur_regs reg;
+ int err;
+
+ if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM) {
+ nfp_prog->error = -EFAULT;
+ return;
+ }
+
+ err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), &reg);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_immed(nfp_prog, reg.areg, reg.breg, imm >> 8, width,
+ invert, shift, reg.wr_both);
+}
+
+static void
+__emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
+ enum shf_sc sc, u8 shift,
+ u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both)
+{
+ u64 insn;
+
+ if (!FIELD_FIT(OP_SHF_SHIFT, shift)) {
+ nfp_prog->error = -EFAULT;
+ return;
+ }
+
+ if (sc == SHF_SC_L_SHF)
+ shift = 32 - shift;
+
+ insn = OP_SHF_BASE |
+ FIELD_PREP(OP_SHF_A_SRC, areg) |
+ FIELD_PREP(OP_SHF_SC, sc) |
+ FIELD_PREP(OP_SHF_B_SRC, breg) |
+ FIELD_PREP(OP_SHF_I8, i8) |
+ FIELD_PREP(OP_SHF_SW, sw) |
+ FIELD_PREP(OP_SHF_DST, dst) |
+ FIELD_PREP(OP_SHF_SHIFT, shift) |
+ FIELD_PREP(OP_SHF_OP, op) |
+ FIELD_PREP(OP_SHF_DST_AB, dst_ab) |
+ FIELD_PREP(OP_SHF_WR_AB, wr_both);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_shf(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum shf_op op, u32 rreg,
+ enum shf_sc sc, u8 shift)
+{
+ struct nfp_insn_re_regs reg;
+ int err;
+
+ err = swreg_to_restricted(dst, lreg, rreg, &reg, true);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift,
+ reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both);
+}
+
+static void
+__emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
+ u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both)
+{
+ u64 insn;
+
+ insn = OP_ALU_BASE |
+ FIELD_PREP(OP_ALU_A_SRC, areg) |
+ FIELD_PREP(OP_ALU_B_SRC, breg) |
+ FIELD_PREP(OP_ALU_DST, dst) |
+ FIELD_PREP(OP_ALU_SW, swap) |
+ FIELD_PREP(OP_ALU_OP, op) |
+ FIELD_PREP(OP_ALU_DST_AB, dst_ab) |
+ FIELD_PREP(OP_ALU_WR_AB, wr_both);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_alu(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum alu_op op, u32 rreg)
+{
+ struct nfp_insn_ur_regs reg;
+ int err;
+
+ err = swreg_to_unrestricted(dst, lreg, rreg, &reg);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_alu(nfp_prog, reg.dst, reg.dst_ab,
+ reg.areg, op, reg.breg, reg.swap, reg.wr_both);
+}
+
+static void
+__emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
+ u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
+ bool zero, bool swap, bool wr_both)
+{
+ u64 insn;
+
+ insn = OP_LDF_BASE |
+ FIELD_PREP(OP_LDF_A_SRC, areg) |
+ FIELD_PREP(OP_LDF_SC, sc) |
+ FIELD_PREP(OP_LDF_B_SRC, breg) |
+ FIELD_PREP(OP_LDF_I8, imm8) |
+ FIELD_PREP(OP_LDF_SW, swap) |
+ FIELD_PREP(OP_LDF_ZF, zero) |
+ FIELD_PREP(OP_LDF_BMASK, bmask) |
+ FIELD_PREP(OP_LDF_SHF, shift) |
+ FIELD_PREP(OP_LDF_WR_AB, wr_both);
+
+ nfp_prog_push(nfp_prog, insn);
+}
+
+static void
+emit_ld_field_any(struct nfp_prog *nfp_prog, enum shf_sc sc, u8 shift,
+ u32 dst, u8 bmask, u32 src, bool zero)
+{
+ struct nfp_insn_re_regs reg;
+ int err;
+
+ err = swreg_to_restricted(reg_none(), dst, src, &reg, true);
+ if (err) {
+ nfp_prog->error = err;
+ return;
+ }
+
+ __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift,
+ reg.i8, zero, reg.swap, reg.wr_both);
+}
+
+static void
+emit_ld_field(struct nfp_prog *nfp_prog, u32 dst, u8 bmask, u32 src,
+ enum shf_sc sc, u8 shift)
+{
+ emit_ld_field_any(nfp_prog, sc, shift, dst, bmask, src, false);
+}
+
+/* --- Wrappers --- */
+static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift)
+{
+ if (!(imm & 0xffff0000)) {
+ *val = imm;
+ *shift = IMMED_SHIFT_0B;
+ } else if (!(imm & 0xff0000ff)) {
+ *val = imm >> 8;
+ *shift = IMMED_SHIFT_1B;
+ } else if (!(imm & 0x0000ffff)) {
+ *val = imm >> 16;
+ *shift = IMMED_SHIFT_2B;
+ } else {
+ return false;
+ }
+
+ return true;
+}
+
+static void wrp_immed(struct nfp_prog *nfp_prog, u32 dst, u32 imm)
+{
+ enum immed_shift shift;
+ u16 val;
+
+ if (pack_immed(imm, &val, &shift)) {
+ emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift);
+ } else if (pack_immed(~imm, &val, &shift)) {
+ emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift);
+ } else {
+ emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL,
+ false, IMMED_SHIFT_0B);
+ emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD,
+ false, IMMED_SHIFT_2B);
+ }
+}
+
+/* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
+ * If the @imm is small enough encode it directly in operand and return
+ * otherwise load @imm to a spare register and return its encoding.
+ */
+static u32 ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
+{
+ if (FIELD_FIT(UR_REG_IMM_MAX, imm))
+ return reg_imm(imm);
+
+ wrp_immed(nfp_prog, tmp_reg, imm);
+ return tmp_reg;
+}
+
+/* re_load_imm_any() - encode immediate or use tmp register (restricted)
+ * If the @imm is small enough encode it directly in operand and return
+ * otherwise load @imm to a spare register and return its encoding.
+ */
+static u32 re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
+{
+ if (FIELD_FIT(RE_REG_IMM_MAX, imm))
+ return reg_imm(imm);
+
+ wrp_immed(nfp_prog, tmp_reg, imm);
+ return tmp_reg;
+}
+
+static void
+wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask,
+ enum br_special special)
+{
+ emit_br(nfp_prog, mask, 0, 0);
+
+ nfp_prog->prog[nfp_prog->prog_len - 1] |=
+ FIELD_PREP(OP_BR_SPECIAL, special);
+}
+
+static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
+{
+ emit_alu(nfp_prog, reg_both(dst), reg_none(), ALU_OP_NONE, reg_b(src));
+}
+
+static int
+construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset,
+ u16 src, bool src_valid, u8 size)
+{
+ unsigned int i;
+ u16 shift, sz;
+ u32 tmp_reg;
+
+ /* We load the value from the address indicated in @offset and then
+ * shift out the data we don't need. Note: this is big endian!
+ */
+ sz = size < 4 ? 4 : size;
+ shift = size < 4 ? 4 - size : 0;
+
+ if (src_valid) {
+ /* Calculate the true offset (src_reg + imm) */
+ tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
+ emit_alu(nfp_prog, imm_both(nfp_prog),
+ reg_a(src), ALU_OP_ADD, tmp_reg);
+ /* Check packet length (size guaranteed to fit b/c it's u8) */
+ emit_alu(nfp_prog, imm_a(nfp_prog),
+ imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
+ emit_alu(nfp_prog, reg_none(),
+ NFP_BPF_ABI_LEN, ALU_OP_SUB, imm_a(nfp_prog));
+ wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+ /* Load data */
+ emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
+ pkt_reg(nfp_prog), imm_b(nfp_prog), sz - 1, true);
+ } else {
+ /* Check packet length */
+ tmp_reg = ur_load_imm_any(nfp_prog, offset + size,
+ imm_a(nfp_prog));
+ emit_alu(nfp_prog, reg_none(),
+ NFP_BPF_ABI_LEN, ALU_OP_SUB, tmp_reg);
+ wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
+ /* Load data */
+ tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
+ emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
+ pkt_reg(nfp_prog), tmp_reg, sz - 1, true);
+ }
+
+ i = 0;
+ if (shift)
+ emit_shf(nfp_prog, reg_both(0), reg_none(), SHF_OP_NONE,
+ reg_xfer(0), SHF_SC_R_SHF, shift * 8);
+ else
+ for (; i * 4 < size; i++)
+ emit_alu(nfp_prog, reg_both(i),
+ reg_none(), ALU_OP_NONE, reg_xfer(i));
+
+ if (i < 2)
+ wrp_immed(nfp_prog, reg_both(1), 0);
+
+ return 0;
+}
+
+static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
+{
+ return construct_data_ind_ld(nfp_prog, offset, 0, false, size);
+}
+
+static int wrp_set_mark(struct nfp_prog *nfp_prog, u8 src)
+{
+ emit_alu(nfp_prog, NFP_BPF_ABI_MARK,
+ reg_none(), ALU_OP_NONE, reg_b(src));
+ emit_alu(nfp_prog, NFP_BPF_ABI_FLAGS,
+ NFP_BPF_ABI_FLAGS, ALU_OP_OR, reg_imm(NFP_BPF_ABI_FLAG_MARK));
+
+ return 0;
+}
+
+static void
+wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm)
+{
+ u32 tmp_reg;
+
+ if (alu_op == ALU_OP_AND) {
+ if (!imm)
+ wrp_immed(nfp_prog, reg_both(dst), 0);
+ if (!imm || !~imm)
+ return;
+ }
+ if (alu_op == ALU_OP_OR) {
+ if (!~imm)
+ wrp_immed(nfp_prog, reg_both(dst), ~0U);
+ if (!imm || !~imm)
+ return;
+ }
+ if (alu_op == ALU_OP_XOR) {
+ if (!~imm)
+ emit_alu(nfp_prog, reg_both(dst), reg_none(),
+ ALU_OP_NEG, reg_b(dst));
+ if (!imm || !~imm)
+ return;
+ }
+
+ tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
+ emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg);
+}
+
+static int
+wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum alu_op alu_op, bool skip)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+
+ if (skip) {
+ meta->skip = true;
+ return 0;
+ }
+
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U);
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32);
+
+ return 0;
+}
+
+static int
+wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum alu_op alu_op)
+{
+ u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
+
+ emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
+ emit_alu(nfp_prog, reg_both(dst + 1),
+ reg_a(dst + 1), alu_op, reg_b(src + 1));
+
+ return 0;
+}
+
+static int
+wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum alu_op alu_op, bool skip)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (skip) {
+ meta->skip = true;
+ return 0;
+ }
+
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int
+wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum alu_op alu_op)
+{
+ u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
+
+ emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static void
+wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src,
+ enum br_mask br_mask, u16 off)
+{
+ emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src));
+ emit_br(nfp_prog, br_mask, off, 0);
+}
+
+static int
+wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum alu_op alu_op, enum br_mask br_mask)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
+ insn->src_reg * 2, br_mask, insn->off);
+ wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
+ insn->src_reg * 2 + 1, br_mask, insn->off);
+
+ return 0;
+}
+
+static int
+wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum br_mask br_mask, bool swap)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+ u8 reg = insn->dst_reg * 2;
+ u32 tmp_reg;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
+ if (!swap)
+ emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg);
+ else
+ emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg));
+
+ tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
+ if (!swap)
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg);
+ else
+ emit_alu(nfp_prog, reg_none(),
+ tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1));
+
+ emit_br(nfp_prog, br_mask, insn->off, 0);
+
+ return 0;
+}
+
+static int
+wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ enum br_mask br_mask, bool swap)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u8 areg = insn->src_reg * 2, breg = insn->dst_reg * 2;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ if (swap) {
+ areg ^= breg;
+ breg ^= areg;
+ areg ^= breg;
+ }
+
+ emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg));
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1));
+ emit_br(nfp_prog, br_mask, insn->off, 0);
+
+ return 0;
+}
+
+/* --- Callbacks --- */
+static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2);
+ wrp_reg_mov(nfp_prog, insn->dst_reg * 2 + 1, insn->src_reg * 2 + 1);
+
+ return 0;
+}
+
+static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ u64 imm = meta->insn.imm; /* sign extend */
+
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U);
+ wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32);
+
+ return 0;
+}
+
+static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR);
+}
+
+static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm);
+}
+
+static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND);
+}
+
+static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
+}
+
+static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR);
+}
+
+static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
+}
+
+static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
+ reg_a(insn->dst_reg * 2), ALU_OP_ADD,
+ reg_b(insn->src_reg * 2));
+ emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
+ reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C,
+ reg_b(insn->src_reg * 2 + 1));
+
+ return 0;
+}
+
+static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U);
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32);
+
+ return 0;
+}
+
+static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
+ reg_a(insn->dst_reg * 2), ALU_OP_SUB,
+ reg_b(insn->src_reg * 2));
+ emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
+ reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C,
+ reg_b(insn->src_reg * 2 + 1));
+
+ return 0;
+}
+
+static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U);
+ wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32);
+
+ return 0;
+}
+
+static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (insn->imm != 32)
+ return 1; /* TODO */
+
+ wrp_reg_mov(nfp_prog, insn->dst_reg * 2 + 1, insn->dst_reg * 2);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), 0);
+
+ return 0;
+}
+
+static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (insn->imm != 32)
+ return 1; /* TODO */
+
+ wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->dst_reg * 2 + 1);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR);
+}
+
+static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm);
+}
+
+static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND);
+}
+
+static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
+}
+
+static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR);
+}
+
+static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
+}
+
+static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD);
+}
+
+static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm);
+}
+
+static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB);
+}
+
+static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
+}
+
+static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (!insn->imm)
+ return 1; /* TODO: zero shift means indirect */
+
+ emit_shf(nfp_prog, reg_both(insn->dst_reg * 2),
+ reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2),
+ SHF_SC_L_SHF, insn->imm);
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+
+ return 0;
+}
+
+static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ wrp_immed(nfp_prog, reg_both(nfp_meta_prev(meta)->insn.dst_reg * 2 + 1),
+ meta->insn.imm);
+
+ return 0;
+}
+
+static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ meta->double_cb = imm_ld8_part2;
+ wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm);
+
+ return 0;
+}
+
+static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ld(nfp_prog, meta->insn.imm, 1);
+}
+
+static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ld(nfp_prog, meta->insn.imm, 2);
+}
+
+static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ld(nfp_prog, meta->insn.imm, 4);
+}
+
+static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ meta->insn.src_reg * 2, true, 1);
+}
+
+static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ meta->insn.src_reg * 2, true, 2);
+}
+
+static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return construct_data_ind_ld(nfp_prog, meta->insn.imm,
+ meta->insn.src_reg * 2, true, 4);
+}
+
+static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ if (meta->insn.off == offsetof(struct sk_buff, len))
+ emit_alu(nfp_prog, reg_both(meta->insn.dst_reg * 2),
+ reg_none(), ALU_OP_NONE, NFP_BPF_ABI_LEN);
+ else
+ return -ENOTSUPP;
+
+ return 0;
+}
+
+static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ if (meta->insn.off == offsetof(struct sk_buff, mark))
+ return wrp_set_mark(nfp_prog, meta->insn.src_reg * 2);
+
+ return -ENOTSUPP;
+}
+
+static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ if (meta->insn.off < 0) /* TODO */
+ return -ENOTSUPP;
+ emit_br(nfp_prog, BR_UNC, meta->insn.off, 0);
+
+ return 0;
+}
+
+static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+ u32 or1 = reg_a(insn->dst_reg * 2), or2 = reg_b(insn->dst_reg * 2 + 1);
+ u32 tmp_reg;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ if (imm & ~0U) {
+ tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
+ emit_alu(nfp_prog, imm_a(nfp_prog),
+ reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
+ or1 = imm_a(nfp_prog);
+ }
+
+ if (imm >> 32) {
+ tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
+ emit_alu(nfp_prog, imm_b(nfp_prog),
+ reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
+ or2 = imm_b(nfp_prog);
+ }
+
+ emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2);
+ emit_br(nfp_prog, BR_BEQ, insn->off, 0);
+
+ return 0;
+}
+
+static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false);
+}
+
+static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true);
+}
+
+static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+ u32 tmp_reg;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ if (!imm) {
+ meta->skip = true;
+ return 0;
+ }
+
+ if (imm & ~0U) {
+ tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg);
+ emit_br(nfp_prog, BR_BNE, insn->off, 0);
+ }
+
+ if (imm >> 32) {
+ tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg);
+ emit_br(nfp_prog, BR_BNE, insn->off, 0);
+ }
+
+ return 0;
+}
+
+static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 imm = insn->imm; /* sign extend */
+ u32 tmp_reg;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ if (!imm) {
+ emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
+ ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
+ emit_br(nfp_prog, BR_BNE, insn->off, 0);
+ }
+
+ tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
+ emit_br(nfp_prog, BR_BNE, insn->off, 0);
+
+ tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
+ emit_alu(nfp_prog, reg_none(),
+ reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
+ emit_br(nfp_prog, BR_BNE, insn->off, 0);
+
+ return 0;
+}
+
+static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+
+ if (insn->off < 0) /* TODO */
+ return -ENOTSUPP;
+
+ emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
+ ALU_OP_XOR, reg_b(insn->src_reg * 2));
+ emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1),
+ ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1));
+ emit_alu(nfp_prog, reg_none(),
+ imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog));
+ emit_br(nfp_prog, BR_BEQ, insn->off, 0);
+
+ return 0;
+}
+
+static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false);
+}
+
+static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true);
+}
+
+static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
+}
+
+static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE);
+}
+
+static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ wrp_br_special(nfp_prog, BR_UNC, OP_BR_GO_OUT);
+
+ return 0;
+}
+
+static const instr_cb_t instr_cb[256] = {
+ [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64,
+ [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64,
+ [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64,
+ [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64,
+ [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64,
+ [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64,
+ [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64,
+ [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64,
+ [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64,
+ [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64,
+ [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64,
+ [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64,
+ [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64,
+ [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64,
+ [BPF_ALU | BPF_MOV | BPF_X] = mov_reg,
+ [BPF_ALU | BPF_MOV | BPF_K] = mov_imm,
+ [BPF_ALU | BPF_XOR | BPF_X] = xor_reg,
+ [BPF_ALU | BPF_XOR | BPF_K] = xor_imm,
+ [BPF_ALU | BPF_AND | BPF_X] = and_reg,
+ [BPF_ALU | BPF_AND | BPF_K] = and_imm,
+ [BPF_ALU | BPF_OR | BPF_X] = or_reg,
+ [BPF_ALU | BPF_OR | BPF_K] = or_imm,
+ [BPF_ALU | BPF_ADD | BPF_X] = add_reg,
+ [BPF_ALU | BPF_ADD | BPF_K] = add_imm,
+ [BPF_ALU | BPF_SUB | BPF_X] = sub_reg,
+ [BPF_ALU | BPF_SUB | BPF_K] = sub_imm,
+ [BPF_ALU | BPF_LSH | BPF_K] = shl_imm,
+ [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8,
+ [BPF_LD | BPF_ABS | BPF_B] = data_ld1,
+ [BPF_LD | BPF_ABS | BPF_H] = data_ld2,
+ [BPF_LD | BPF_ABS | BPF_W] = data_ld4,
+ [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1,
+ [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2,
+ [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4,
+ [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4,
+ [BPF_STX | BPF_MEM | BPF_W] = mem_stx4,
+ [BPF_JMP | BPF_JA | BPF_K] = jump,
+ [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm,
+ [BPF_JMP | BPF_JGT | BPF_K] = jgt_imm,
+ [BPF_JMP | BPF_JGE | BPF_K] = jge_imm,
+ [BPF_JMP | BPF_JSET | BPF_K] = jset_imm,
+ [BPF_JMP | BPF_JNE | BPF_K] = jne_imm,
+ [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg,
+ [BPF_JMP | BPF_JGT | BPF_X] = jgt_reg,
+ [BPF_JMP | BPF_JGE | BPF_X] = jge_reg,
+ [BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
+ [BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
+ [BPF_JMP | BPF_EXIT] = goto_out,
+};
+
+/* --- Misc code --- */
+static void br_set_offset(u64 *instr, u16 offset)
+{
+ u16 addr_lo, addr_hi;
+
+ addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
+ addr_hi = offset != addr_lo;
+ *instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO);
+ *instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
+ *instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo);
+}
+
+/* --- Assembler logic --- */
+static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta, *next;
+ u32 off, br_idx;
+ u32 idx;
+
+ nfp_for_each_insn_walk2(nfp_prog, meta, next) {
+ if (meta->skip)
+ continue;
+ if (BPF_CLASS(meta->insn.code) != BPF_JMP)
+ continue;
+
+ br_idx = nfp_prog_offset_to_index(nfp_prog, next->off) - 1;
+ if (!nfp_is_br(nfp_prog->prog[br_idx])) {
+ pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
+ br_idx, meta->insn.code, nfp_prog->prog[br_idx]);
+ return -ELOOP;
+ }
+ /* Leave special branches for later */
+ if (FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]))
+ continue;
+
+ /* Find the target offset in assembler realm */
+ off = meta->insn.off;
+ if (!off) {
+ pr_err("Fixup found zero offset!!\n");
+ return -ELOOP;
+ }
+
+ while (off && nfp_meta_has_next(nfp_prog, next)) {
+ next = nfp_meta_next(next);
+ off--;
+ }
+ if (off) {
+ pr_err("Fixup found too large jump!! %d\n", off);
+ return -ELOOP;
+ }
+
+ if (next->skip) {
+ pr_err("Branch landing on removed instruction!!\n");
+ return -ELOOP;
+ }
+
+ for (idx = nfp_prog_offset_to_index(nfp_prog, meta->off);
+ idx <= br_idx; idx++) {
+ if (!nfp_is_br(nfp_prog->prog[idx]))
+ continue;
+ br_set_offset(&nfp_prog->prog[idx], next->off);
+ }
+ }
+
+ /* Fixup 'goto out's separately, they can be scattered around */
+ for (br_idx = 0; br_idx < nfp_prog->prog_len; br_idx++) {
+ enum br_special special;
+
+ if ((nfp_prog->prog[br_idx] & OP_BR_BASE_MASK) != OP_BR_BASE)
+ continue;
+
+ special = FIELD_GET(OP_BR_SPECIAL, nfp_prog->prog[br_idx]);
+ switch (special) {
+ case OP_BR_NORMAL:
+ break;
+ case OP_BR_GO_OUT:
+ br_set_offset(&nfp_prog->prog[br_idx],
+ nfp_prog->tgt_out);
+ break;
+ case OP_BR_GO_ABORT:
+ br_set_offset(&nfp_prog->prog[br_idx],
+ nfp_prog->tgt_abort);
+ break;
+ }
+
+ nfp_prog->prog[br_idx] &= ~OP_BR_SPECIAL;
+ }
+
+ return 0;
+}
+
+static void nfp_intro(struct nfp_prog *nfp_prog)
+{
+ emit_alu(nfp_prog, pkt_reg(nfp_prog),
+ reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT);
+}
+
+static void nfp_outro_tc_legacy(struct nfp_prog *nfp_prog)
+{
+ const u8 act2code[] = {
+ [NN_ACT_TC_DROP] = 0x22,
+ [NN_ACT_TC_REDIR] = 0x24
+ };
+ /* Target for aborts */
+ nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
+ wrp_immed(nfp_prog, reg_both(0), 0);
+
+ /* Target for normal exits */
+ nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
+ /* Legacy TC mode:
+ * 0 0x11 -> pass, count as stat0
+ * -1 drop 0x22 -> drop, count as stat1
+ * redir 0x24 -> redir, count as stat1
+ * ife mark 0x21 -> pass, count as stat1
+ * ife + tx 0x24 -> redir, count as stat1
+ */
+ emit_br_byte_neq(nfp_prog, reg_b(0), 0xff, 0, nfp_prog->tgt_done, 2);
+ emit_alu(nfp_prog, reg_a(0),
+ reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
+
+ emit_br(nfp_prog, BR_UNC, nfp_prog->tgt_done, 1);
+ emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(act2code[nfp_prog->act]),
+ SHF_SC_L_SHF, 16);
+}
+
+static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
+{
+ /* TC direct-action mode:
+ * 0,1 ok NOT SUPPORTED[1]
+ * 2 drop 0x22 -> drop, count as stat1
+ * 4,5 nuke 0x02 -> drop
+ * 7 redir 0x44 -> redir, count as stat2
+ * * unspec 0x11 -> pass, count as stat0
+ *
+ * [1] We can't support OK and RECLASSIFY because we can't tell TC
+ * the exact decision made. We are forced to support UNSPEC
+ * to handle aborts so that's the only one we handle for passing
+ * packets up the stack.
+ */
+ /* Target for aborts */
+ nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
+
+ emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+
+ emit_alu(nfp_prog, reg_a(0),
+ reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+ emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
+
+ /* Target for normal exits */
+ nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
+
+ /* if R0 > 7 jump to abort */
+ emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0));
+ emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
+ emit_alu(nfp_prog, reg_a(0),
+ reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
+
+ wrp_immed(nfp_prog, reg_b(2), 0x41221211);
+ wrp_immed(nfp_prog, reg_b(3), 0x41001211);
+
+ emit_shf(nfp_prog, reg_a(1),
+ reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2);
+
+ emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
+ emit_shf(nfp_prog, reg_a(2),
+ reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
+
+ emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
+ emit_shf(nfp_prog, reg_b(2),
+ reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0);
+
+ emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
+
+ emit_shf(nfp_prog, reg_b(2),
+ reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4);
+ emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
+}
+
+static void nfp_outro(struct nfp_prog *nfp_prog)
+{
+ switch (nfp_prog->act) {
+ case NN_ACT_DIRECT:
+ nfp_outro_tc_da(nfp_prog);
+ break;
+ case NN_ACT_TC_DROP:
+ case NN_ACT_TC_REDIR:
+ nfp_outro_tc_legacy(nfp_prog);
+ break;
+ }
+}
+
+static int nfp_translate(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta;
+ int err;
+
+ nfp_intro(nfp_prog);
+ if (nfp_prog->error)
+ return nfp_prog->error;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ instr_cb_t cb = instr_cb[meta->insn.code];
+
+ meta->off = nfp_prog_current_offset(nfp_prog);
+
+ if (meta->skip) {
+ nfp_prog->n_translated++;
+ continue;
+ }
+
+ if (nfp_meta_has_prev(nfp_prog, meta) &&
+ nfp_meta_prev(meta)->double_cb)
+ cb = nfp_meta_prev(meta)->double_cb;
+ if (!cb)
+ return -ENOENT;
+ err = cb(nfp_prog, meta);
+ if (err)
+ return err;
+
+ nfp_prog->n_translated++;
+ }
+
+ nfp_outro(nfp_prog);
+ if (nfp_prog->error)
+ return nfp_prog->error;
+
+ return nfp_fixup_branches(nfp_prog);
+}
+
+static int
+nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
+ unsigned int cnt)
+{
+ unsigned int i;
+
+ for (i = 0; i < cnt; i++) {
+ struct nfp_insn_meta *meta;
+
+ meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+ if (!meta)
+ return -ENOMEM;
+
+ meta->insn = prog[i];
+ meta->n = i;
+
+ list_add_tail(&meta->l, &nfp_prog->insns);
+ }
+
+ return 0;
+}
+
+/* --- Optimizations --- */
+static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ struct bpf_insn insn = meta->insn;
+
+ /* Programs converted from cBPF start with register xoring */
+ if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) &&
+ insn.src_reg == insn.dst_reg)
+ continue;
+
+ /* Programs start with R6 = R1 but we ignore the skb pointer */
+ if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
+ insn.src_reg == 1 && insn.dst_reg == 6)
+ meta->skip = true;
+
+ /* Return as soon as something doesn't match */
+ if (!meta->skip)
+ return;
+ }
+}
+
+/* Try to rename registers so that program uses only low ones */
+static int nfp_bpf_opt_reg_rename(struct nfp_prog *nfp_prog)
+{
+ bool reg_used[MAX_BPF_REG] = {};
+ u8 tgt_reg[MAX_BPF_REG] = {};
+ struct nfp_insn_meta *meta;
+ unsigned int i, j;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ if (meta->skip)
+ continue;
+
+ reg_used[meta->insn.src_reg] = true;
+ reg_used[meta->insn.dst_reg] = true;
+ }
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(tgt_reg); i++) {
+ if (!reg_used[i])
+ continue;
+
+ tgt_reg[i] = j++;
+ }
+ nfp_prog->num_regs = j;
+
+ list_for_each_entry(meta, &nfp_prog->insns, l) {
+ meta->insn.src_reg = tgt_reg[meta->insn.src_reg];
+ meta->insn.dst_reg = tgt_reg[meta->insn.dst_reg];
+ }
+
+ return 0;
+}
+
+/* Remove masking after load since our load guarantees this is not needed */
+static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta1, *meta2;
+ const s32 exp_mask[] = {
+ [BPF_B] = 0x000000ffU,
+ [BPF_H] = 0x0000ffffU,
+ [BPF_W] = 0xffffffffU,
+ };
+
+ nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
+ struct bpf_insn insn, next;
+
+ insn = meta1->insn;
+ next = meta2->insn;
+
+ if (BPF_CLASS(insn.code) != BPF_LD)
+ continue;
+ if (BPF_MODE(insn.code) != BPF_ABS &&
+ BPF_MODE(insn.code) != BPF_IND)
+ continue;
+
+ if (next.code != (BPF_ALU64 | BPF_AND | BPF_K))
+ continue;
+
+ if (!exp_mask[BPF_SIZE(insn.code)])
+ continue;
+ if (exp_mask[BPF_SIZE(insn.code)] != next.imm)
+ continue;
+
+ if (next.src_reg || next.dst_reg)
+ continue;
+
+ meta2->skip = true;
+ }
+}
+
+static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
+{
+ struct nfp_insn_meta *meta1, *meta2, *meta3;
+
+ nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) {
+ struct bpf_insn insn, next1, next2;
+
+ insn = meta1->insn;
+ next1 = meta2->insn;
+ next2 = meta3->insn;
+
+ if (BPF_CLASS(insn.code) != BPF_LD)
+ continue;
+ if (BPF_MODE(insn.code) != BPF_ABS &&
+ BPF_MODE(insn.code) != BPF_IND)
+ continue;
+ if (BPF_SIZE(insn.code) != BPF_W)
+ continue;
+
+ if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) &&
+ next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) &&
+ !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) &&
+ next2.code == (BPF_LSH | BPF_K | BPF_ALU64)))
+ continue;
+
+ if (next1.src_reg || next1.dst_reg ||
+ next2.src_reg || next2.dst_reg)
+ continue;
+
+ if (next1.imm != 0x20 || next2.imm != 0x20)
+ continue;
+
+ meta2->skip = true;
+ meta3->skip = true;
+ }
+}
+
+static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
+{
+ int ret;
+
+ nfp_bpf_opt_reg_init(nfp_prog);
+
+ ret = nfp_bpf_opt_reg_rename(nfp_prog);
+ if (ret)
+ return ret;
+
+ nfp_bpf_opt_ld_mask(nfp_prog);
+ nfp_bpf_opt_ld_shift(nfp_prog);
+
+ return 0;
+}
+
+/**
+ * nfp_bpf_jit() - translate BPF code into NFP assembly
+ * @filter: kernel BPF filter struct
+ * @prog_mem: memory to store assembler instructions
+ * @act: action attached to this eBPF program
+ * @prog_start: offset of the first instruction when loaded
+ * @prog_done: where to jump on exit
+ * @prog_sz: size of @prog_mem in instructions
+ * @res: achieved parameters of translation results
+ */
+int
+nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem,
+ enum nfp_bpf_action_type act,
+ unsigned int prog_start, unsigned int prog_done,
+ unsigned int prog_sz, struct nfp_bpf_result *res)
+{
+ struct nfp_prog *nfp_prog;
+ int ret;
+
+ nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
+ if (!nfp_prog)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&nfp_prog->insns);
+ nfp_prog->act = act;
+ nfp_prog->start_off = prog_start;
+ nfp_prog->tgt_done = prog_done;
+
+ ret = nfp_prog_prepare(nfp_prog, filter->insnsi, filter->len);
+ if (ret)
+ goto out;
+
+ ret = nfp_prog_verify(nfp_prog, filter);
+ if (ret)
+ goto out;
+
+ ret = nfp_bpf_optimize(nfp_prog);
+ if (ret)
+ goto out;
+
+ if (nfp_prog->num_regs <= 7)
+ nfp_prog->regs_per_thread = 16;
+ else
+ nfp_prog->regs_per_thread = 32;
+
+ nfp_prog->prog = prog_mem;
+ nfp_prog->__prog_alloc_len = prog_sz;
+
+ ret = nfp_translate(nfp_prog);
+ if (ret) {
+ pr_err("Translation failed with error %d (translated: %u)\n",
+ ret, nfp_prog->n_translated);
+ ret = -EINVAL;
+ }
+
+ res->n_instr = nfp_prog->prog_len;
+ res->dense_mode = nfp_prog->num_regs <= 7;
+out:
+ nfp_prog_free(nfp_prog);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c b/drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c
new file mode 100644
index 000000000000..144cae87f63a
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_bpf_verifier.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2016 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) "NFP net bpf: " fmt
+
+#include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
+#include <linux/kernel.h>
+#include <linux/pkt_cls.h>
+
+#include "nfp_bpf.h"
+
+/* Analyzer/verifier definitions */
+struct nfp_bpf_analyzer_priv {
+ struct nfp_prog *prog;
+ struct nfp_insn_meta *meta;
+};
+
+static struct nfp_insn_meta *
+nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ unsigned int insn_idx, unsigned int n_insns)
+{
+ unsigned int forward, backward, i;
+
+ backward = meta->n - insn_idx;
+ forward = insn_idx - meta->n;
+
+ if (min(forward, backward) > n_insns - insn_idx - 1) {
+ backward = n_insns - insn_idx - 1;
+ meta = nfp_prog_last_meta(nfp_prog);
+ }
+ if (min(forward, backward) > insn_idx && backward > insn_idx) {
+ forward = insn_idx;
+ meta = nfp_prog_first_meta(nfp_prog);
+ }
+
+ if (forward < backward)
+ for (i = 0; i < forward; i++)
+ meta = nfp_meta_next(meta);
+ else
+ for (i = 0; i < backward; i++)
+ meta = nfp_meta_prev(meta);
+
+ return meta;
+}
+
+static int
+nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
+ const struct bpf_verifier_env *env)
+{
+ const struct bpf_reg_state *reg0 = &env->cur_state.regs[0];
+
+ if (reg0->type != CONST_IMM) {
+ pr_info("unsupported exit state: %d, imm: %llx\n",
+ reg0->type, reg0->imm);
+ return -EINVAL;
+ }
+
+ if (nfp_prog->act != NN_ACT_DIRECT &&
+ reg0->imm != 0 && (reg0->imm & ~0U) != ~0U) {
+ pr_info("unsupported exit state: %d, imm: %llx\n",
+ reg0->type, reg0->imm);
+ return -EINVAL;
+ }
+
+ if (nfp_prog->act == NN_ACT_DIRECT && reg0->imm <= TC_ACT_REDIRECT &&
+ reg0->imm != TC_ACT_SHOT && reg0->imm != TC_ACT_STOLEN &&
+ reg0->imm != TC_ACT_QUEUED) {
+ pr_info("unsupported exit state: %d, imm: %llx\n",
+ reg0->type, reg0->imm);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+nfp_bpf_check_ctx_ptr(struct nfp_prog *nfp_prog,
+ const struct bpf_verifier_env *env, u8 reg)
+{
+ if (env->cur_state.regs[reg].type != PTR_TO_CTX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
+{
+ struct nfp_bpf_analyzer_priv *priv = env->analyzer_priv;
+ struct nfp_insn_meta *meta = priv->meta;
+
+ meta = nfp_bpf_goto_meta(priv->prog, meta, insn_idx, env->prog->len);
+ priv->meta = meta;
+
+ if (meta->insn.src_reg == BPF_REG_10 ||
+ meta->insn.dst_reg == BPF_REG_10) {
+ pr_err("stack not yet supported\n");
+ return -EINVAL;
+ }
+ if (meta->insn.src_reg >= MAX_BPF_REG ||
+ meta->insn.dst_reg >= MAX_BPF_REG) {
+ pr_err("program uses extended registers - jit hardening?\n");
+ return -EINVAL;
+ }
+
+ if (meta->insn.code == (BPF_JMP | BPF_EXIT))
+ return nfp_bpf_check_exit(priv->prog, env);
+
+ if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM))
+ return nfp_bpf_check_ctx_ptr(priv->prog, env,
+ meta->insn.src_reg);
+ if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM))
+ return nfp_bpf_check_ctx_ptr(priv->prog, env,
+ meta->insn.dst_reg);
+
+ return 0;
+}
+
+static const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
+ .insn_hook = nfp_verify_insn,
+};
+
+int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog)
+{
+ struct nfp_bpf_analyzer_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->prog = nfp_prog;
+ priv->meta = nfp_prog_first_meta(nfp_prog);
+
+ ret = bpf_analyzer(prog, &nfp_bpf_analyzer_ops, priv);
+
+ kfree(priv);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 690635660195..ed824e11a1e3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -62,6 +62,9 @@
/* Max time to wait for NFP to respond on updates (in seconds) */
#define NFP_NET_POLL_TIMEOUT 5
+/* Interval for reading offloaded filter stats */
+#define NFP_NET_STAT_POLL_IVL msecs_to_jiffies(100)
+
/* Bar allocation */
#define NFP_NET_CTRL_BAR 0
#define NFP_NET_Q0_BAR 2
@@ -220,7 +223,7 @@ struct nfp_net_tx_ring {
#define PCIE_DESC_RX_I_TCP_CSUM_OK cpu_to_le16(BIT(11))
#define PCIE_DESC_RX_I_UDP_CSUM cpu_to_le16(BIT(10))
#define PCIE_DESC_RX_I_UDP_CSUM_OK cpu_to_le16(BIT(9))
-#define PCIE_DESC_RX_SPARE cpu_to_le16(BIT(8))
+#define PCIE_DESC_RX_BPF cpu_to_le16(BIT(8))
#define PCIE_DESC_RX_EOP cpu_to_le16(BIT(7))
#define PCIE_DESC_RX_IP4_CSUM cpu_to_le16(BIT(6))
#define PCIE_DESC_RX_IP4_CSUM_OK cpu_to_le16(BIT(5))
@@ -266,6 +269,8 @@ struct nfp_net_rx_desc {
};
};
+#define NFP_NET_META_FIELD_MASK GENMASK(NFP_NET_META_FIELD_SIZE - 1, 0)
+
struct nfp_net_rx_hash {
__be32 hash_type;
__be32 hash;
@@ -405,6 +410,11 @@ static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
fw_ver->minor == minor;
}
+struct nfp_stat_pair {
+ u64 pkts;
+ u64 bytes;
+};
+
/**
* struct nfp_net - NFP network device structure
* @pdev: Backpointer to PCI device
@@ -413,6 +423,7 @@ static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
* @is_vf: Is the driver attached to a VF?
* @is_nfp3200: Is the driver for a NFP-3200 card?
* @fw_loaded: Is the firmware loaded?
+ * @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf
* @ctrl: Local copy of the control register/word.
* @fl_bufsz: Currently configured size of the freelist buffers
* @rx_offset: Offset in the RX buffers where packet data starts
@@ -427,6 +438,11 @@ static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
* @rss_cfg: RSS configuration
* @rss_key: RSS secret key
* @rss_itbl: RSS indirection table
+ * @rx_filter: Filter offload statistics - dropped packets/bytes
+ * @rx_filter_prev: Filter offload statistics - values from previous update
+ * @rx_filter_change: Jiffies when statistics last changed
+ * @rx_filter_stats_timer: Timer for polling filter offload statistics
+ * @rx_filter_lock: Lock protecting timer state changes (teardown)
* @max_tx_rings: Maximum number of TX rings supported by the Firmware
* @max_rx_rings: Maximum number of RX rings supported by the Firmware
* @num_tx_rings: Currently configured number of TX rings
@@ -473,6 +489,7 @@ struct nfp_net {
unsigned is_vf:1;
unsigned is_nfp3200:1;
unsigned fw_loaded:1;
+ unsigned bpf_offload_skip_sw:1;
u32 ctrl;
u32 fl_bufsz;
@@ -502,6 +519,11 @@ struct nfp_net {
u8 rss_key[NFP_NET_CFG_RSS_KEY_SZ];
u8 rss_itbl[NFP_NET_CFG_RSS_ITBL_SZ];
+ struct nfp_stat_pair rx_filter, rx_filter_prev;
+ unsigned long rx_filter_change;
+ struct timer_list rx_filter_stats_timer;
+ spinlock_t rx_filter_lock;
+
int max_tx_rings;
int max_rx_rings;
@@ -561,12 +583,28 @@ struct nfp_net {
/* Functions to read/write from/to a BAR
* Performs any endian conversion necessary.
*/
+static inline u16 nn_readb(struct nfp_net *nn, int off)
+{
+ return readb(nn->ctrl_bar + off);
+}
+
static inline void nn_writeb(struct nfp_net *nn, int off, u8 val)
{
writeb(val, nn->ctrl_bar + off);
}
-/* NFP-3200 can't handle 16-bit accesses too well - hence no readw/writew */
+/* NFP-3200 can't handle 16-bit accesses too well */
+static inline u16 nn_readw(struct nfp_net *nn, int off)
+{
+ WARN_ON_ONCE(nn->is_nfp3200);
+ return readw(nn->ctrl_bar + off);
+}
+
+static inline void nn_writew(struct nfp_net *nn, int off, u16 val)
+{
+ WARN_ON_ONCE(nn->is_nfp3200);
+ writew(val, nn->ctrl_bar + off);
+}
static inline u32 nn_readl(struct nfp_net *nn, int off)
{
@@ -757,4 +795,9 @@ static inline void nfp_net_debugfs_adapter_del(struct nfp_net *nn)
}
#endif /* CONFIG_NFP_NET_DEBUG */
+void nfp_net_filter_stats_timer(unsigned long data);
+int
+nfp_net_bpf_offload(struct nfp_net *nn, u32 handle, __be16 proto,
+ struct tc_cls_bpf_offload *cls_bpf);
+
#endif /* _NFP_NET_H_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 88678c172b19..aee3fd2b6538 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -41,7 +41,6 @@
* Chris Telfer <chris.telfer@netronome.com>
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -61,6 +60,7 @@
#include <linux/ktime.h>
+#include <net/pkt_cls.h>
#include <net/vxlan.h>
#include "nfp_net_ctrl.h"
@@ -1293,38 +1293,72 @@ static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
}
}
-/**
- * nfp_net_set_hash() - Set SKB hash data
- * @netdev: adapter's net_device structure
- * @skb: SKB to set the hash data on
- * @rxd: RX descriptor
- *
- * The RSS hash and hash-type are pre-pended to the packet data.
- * Extract and decode it and set the skb fields.
- */
static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb,
- struct nfp_net_rx_desc *rxd)
+ unsigned int type, __be32 *hash)
{
- struct nfp_net_rx_hash *rx_hash;
-
- if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS) ||
- !(netdev->features & NETIF_F_RXHASH))
+ if (!(netdev->features & NETIF_F_RXHASH))
return;
- rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash));
-
- switch (be32_to_cpu(rx_hash->hash_type)) {
+ switch (type) {
case NFP_NET_RSS_IPV4:
case NFP_NET_RSS_IPV6:
case NFP_NET_RSS_IPV6_EX:
- skb_set_hash(skb, be32_to_cpu(rx_hash->hash), PKT_HASH_TYPE_L3);
+ skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L3);
break;
default:
- skb_set_hash(skb, be32_to_cpu(rx_hash->hash), PKT_HASH_TYPE_L4);
+ skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L4);
break;
}
}
+static void
+nfp_net_set_hash_desc(struct net_device *netdev, struct sk_buff *skb,
+ struct nfp_net_rx_desc *rxd)
+{
+ struct nfp_net_rx_hash *rx_hash;
+
+ if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
+ return;
+
+ rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash));
+
+ nfp_net_set_hash(netdev, skb, get_unaligned_be32(&rx_hash->hash_type),
+ &rx_hash->hash);
+}
+
+static void *
+nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
+ int meta_len)
+{
+ u8 *data = skb->data - meta_len;
+ u32 meta_info;
+
+ meta_info = get_unaligned_be32(data);
+ data += 4;
+
+ while (meta_info) {
+ switch (meta_info & NFP_NET_META_FIELD_MASK) {
+ case NFP_NET_META_HASH:
+ meta_info >>= NFP_NET_META_FIELD_SIZE;
+ nfp_net_set_hash(netdev, skb,
+ meta_info & NFP_NET_META_FIELD_MASK,
+ (__be32 *)data);
+ data += 4;
+ break;
+ case NFP_NET_META_MARK:
+ skb->mark = get_unaligned_be32(data);
+ data += 4;
+ break;
+ default:
+ return NULL;
+ }
+
+ meta_info >>= NFP_NET_META_FIELD_SIZE;
+ }
+
+ return data;
+}
+
/**
* nfp_net_rx() - receive up to @budget packets on @rx_ring
* @rx_ring: RX ring to receive from
@@ -1439,18 +1473,29 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
skb_reserve(skb, nn->rx_offset);
skb_put(skb, data_len - meta_len);
- nfp_net_set_hash(nn->netdev, skb, rxd);
-
- /* Pad small frames to minimum */
- if (skb_put_padto(skb, 60))
- break;
-
/* Stats update */
u64_stats_update_begin(&r_vec->rx_sync);
r_vec->rx_pkts++;
r_vec->rx_bytes += skb->len;
u64_stats_update_end(&r_vec->rx_sync);
+ if (nn->fw_ver.major <= 3) {
+ nfp_net_set_hash_desc(nn->netdev, skb, rxd);
+ } else if (meta_len) {
+ void *end;
+
+ end = nfp_net_parse_meta(nn->netdev, skb, meta_len);
+ if (unlikely(end != skb->data)) {
+ u64_stats_update_begin(&r_vec->rx_sync);
+ r_vec->rx_drops++;
+ u64_stats_update_end(&r_vec->rx_sync);
+
+ dev_kfree_skb_any(skb);
+ nn_warn_ratelimit(nn, "invalid RX packet metadata\n");
+ continue;
+ }
+ }
+
skb_record_rx_queue(skb, rx_ring->idx);
skb->protocol = eth_type_trans(skb, nn->netdev);
@@ -2049,12 +2094,16 @@ static int nfp_net_netdev_open(struct net_device *netdev)
nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
GFP_KERNEL);
- if (!nn->rx_rings)
+ if (!nn->rx_rings) {
+ err = -ENOMEM;
goto err_free_lsc;
+ }
nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings),
GFP_KERNEL);
- if (!nn->tx_rings)
+ if (!nn->tx_rings) {
+ err = -ENOMEM;
goto err_free_rx_rings;
+ }
for (r = 0; r < nn->num_r_vecs; r++) {
err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
@@ -2387,6 +2436,31 @@ static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
return stats;
}
+static bool nfp_net_ebpf_capable(struct nfp_net *nn)
+{
+ if (nn->cap & NFP_NET_CFG_CTRL_BPF &&
+ nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI)
+ return true;
+ return false;
+}
+
+static int
+nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
+ struct tc_to_netdev *tc)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+
+ if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
+ return -ENOTSUPP;
+ if (proto != htons(ETH_P_ALL))
+ return -ENOTSUPP;
+
+ if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn))
+ return nfp_net_bpf_offload(nn, handle, proto, tc->cls_bpf);
+
+ return -EINVAL;
+}
+
static int nfp_net_set_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -2441,6 +2515,11 @@ static int nfp_net_set_features(struct net_device *netdev,
new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
}
+ if (changed & NETIF_F_HW_TC && nn->ctrl & NFP_NET_CFG_CTRL_BPF) {
+ nn_err(nn, "Cannot disable HW TC offload while in use\n");
+ return -EBUSY;
+ }
+
nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
netdev->features, features, changed);
@@ -2590,6 +2669,7 @@ static const struct net_device_ops nfp_net_netdev_ops = {
.ndo_stop = nfp_net_netdev_close,
.ndo_start_xmit = nfp_net_tx,
.ndo_get_stats64 = nfp_net_stat64,
+ .ndo_setup_tc = nfp_net_setup_tc,
.ndo_tx_timeout = nfp_net_tx_timeout,
.ndo_set_rx_mode = nfp_net_set_rx_mode,
.ndo_change_mtu = nfp_net_change_mtu,
@@ -2615,7 +2695,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->fw_ver.resv, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
nn->max_mtu);
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
nn->cap,
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
@@ -2632,7 +2712,8 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
- nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "");
+ nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
+ nfp_net_ebpf_capable(nn) ? "BPF " : "");
}
/**
@@ -2675,10 +2756,13 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
nn->rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
spin_lock_init(&nn->reconfig_lock);
+ spin_lock_init(&nn->rx_filter_lock);
spin_lock_init(&nn->link_status_lock);
setup_timer(&nn->reconfig_timer,
nfp_net_reconfig_timer, (unsigned long)nn);
+ setup_timer(&nn->rx_filter_stats_timer,
+ nfp_net_filter_stats_timer, (unsigned long)nn);
return nn;
}
@@ -2800,6 +2884,9 @@ int nfp_net_netdev_init(struct net_device *netdev)
netdev->features = netdev->hw_features;
+ if (nfp_net_ebpf_capable(nn))
+ netdev->hw_features |= NETIF_F_HW_TC;
+
/* Advertise but disable TSO by default. */
netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index ad6c4e31cedd..93b10b441acb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -66,6 +66,13 @@
#define NFP_NET_LSO_MAX_HDR_SZ 255
/**
+ * Prepend field types
+ */
+#define NFP_NET_META_FIELD_SIZE 4
+#define NFP_NET_META_HASH 1 /* next field carries hash type */
+#define NFP_NET_META_MARK 2
+
+/**
* Hash type pre-pended when a RSS hash was computed
*/
#define NFP_NET_RSS_NONE 0
@@ -123,6 +130,7 @@
#define NFP_NET_CFG_CTRL_L2SWITCH_LOCAL (0x1 << 23) /* Switch to local */
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* VXLAN tunnel support */
#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* NVGRE tunnel support */
+#define NFP_NET_CFG_CTRL_BPF (0x1 << 27) /* BPF offload capable */
#define NFP_NET_CFG_UPDATE 0x0004
#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */
#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */
@@ -134,6 +142,7 @@
#define NFP_NET_CFG_UPDATE_RESET (0x1 << 7) /* Update due to FLR */
#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */
#define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */
+#define NFP_NET_CFG_UPDATE_BPF (0x1 << 10) /* BPF program load */
#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */
#define NFP_NET_CFG_TXRS_ENABLE 0x0008
#define NFP_NET_CFG_RXRS_ENABLE 0x0010
@@ -196,10 +205,37 @@
#define NFP_NET_CFG_VXLAN_SZ 0x0008
/**
- * 64B reserved for future use (0x0080 - 0x00c0)
+ * NFP6000 - BPF section
+ * @NFP_NET_CFG_BPF_ABI: BPF ABI version
+ * @NFP_NET_CFG_BPF_CAP: BPF capabilities
+ * @NFP_NET_CFG_BPF_MAX_LEN: Maximum size of JITed BPF code in bytes
+ * @NFP_NET_CFG_BPF_START: Offset at which BPF will be loaded
+ * @NFP_NET_CFG_BPF_DONE: Offset to jump to on exit
+ * @NFP_NET_CFG_BPF_STACK_SZ: Total size of stack area in 64B chunks
+ * @NFP_NET_CFG_BPF_INL_MTU: Packet data split offset in 64B chunks
+ * @NFP_NET_CFG_BPF_SIZE: Size of the JITed BPF code in instructions
+ * @NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code
*/
-#define NFP_NET_CFG_RESERVED 0x0080
-#define NFP_NET_CFG_RESERVED_SZ 0x0040
+#define NFP_NET_CFG_BPF_ABI 0x0080
+#define NFP_NET_BPF_ABI 1
+#define NFP_NET_CFG_BPF_CAP 0x0081
+#define NFP_NET_BPF_CAP_RELO (1 << 0) /* seamless reload */
+#define NFP_NET_CFG_BPF_MAX_LEN 0x0082
+#define NFP_NET_CFG_BPF_START 0x0084
+#define NFP_NET_CFG_BPF_DONE 0x0086
+#define NFP_NET_CFG_BPF_STACK_SZ 0x0088
+#define NFP_NET_CFG_BPF_INL_MTU 0x0089
+#define NFP_NET_CFG_BPF_SIZE 0x008e
+#define NFP_NET_CFG_BPF_ADDR 0x0090
+#define NFP_NET_CFG_BPF_CFG_8CTX (1 << 0) /* 8ctx mode */
+#define NFP_NET_CFG_BPF_CFG_MASK 7ULL
+#define NFP_NET_CFG_BPF_ADDR_MASK (~NFP_NET_CFG_BPF_CFG_MASK)
+
+/**
+ * 40B reserved for future use (0x0098 - 0x00c0)
+ */
+#define NFP_NET_CFG_RESERVED 0x0098
+#define NFP_NET_CFG_RESERVED_SZ 0x0028
/**
* RSS configuration (0x0100 - 0x01ac):
@@ -303,6 +339,15 @@
#define NFP_NET_CFG_STATS_TX_MC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x80)
#define NFP_NET_CFG_STATS_TX_BC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x88)
+#define NFP_NET_CFG_STATS_APP0_FRAMES (NFP_NET_CFG_STATS_BASE + 0x90)
+#define NFP_NET_CFG_STATS_APP0_BYTES (NFP_NET_CFG_STATS_BASE + 0x98)
+#define NFP_NET_CFG_STATS_APP1_FRAMES (NFP_NET_CFG_STATS_BASE + 0xa0)
+#define NFP_NET_CFG_STATS_APP1_BYTES (NFP_NET_CFG_STATS_BASE + 0xa8)
+#define NFP_NET_CFG_STATS_APP2_FRAMES (NFP_NET_CFG_STATS_BASE + 0xb0)
+#define NFP_NET_CFG_STATS_APP2_BYTES (NFP_NET_CFG_STATS_BASE + 0xb8)
+#define NFP_NET_CFG_STATS_APP3_FRAMES (NFP_NET_CFG_STATS_BASE + 0xc0)
+#define NFP_NET_CFG_STATS_APP3_BYTES (NFP_NET_CFG_STATS_BASE + 0xc8)
+
/**
* Per ring stats (0x1000 - 0x1800)
* options, 64bit per entry
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 7d7933d00b8f..3418f2277e9d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -40,7 +40,6 @@
* Brad Petrus <brad.petrus@netronome.com>
*/
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -107,6 +106,18 @@ static const struct _nfp_net_et_stats nfp_net_et_stats[] = {
{"dev_tx_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_FRAMES)},
{"dev_tx_mc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_MC_FRAMES)},
{"dev_tx_bc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_BC_FRAMES)},
+
+ {"bpf_pass_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP0_FRAMES)},
+ {"bpf_pass_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP0_BYTES)},
+ /* see comments in outro functions in nfp_bpf_jit.c to find out
+ * how different BPF modes use app-specific counters
+ */
+ {"bpf_app1_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP1_FRAMES)},
+ {"bpf_app1_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP1_BYTES)},
+ {"bpf_app2_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP2_FRAMES)},
+ {"bpf_app2_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP2_BYTES)},
+ {"bpf_app3_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP3_FRAMES)},
+ {"bpf_app3_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP3_BYTES)},
};
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
new file mode 100644
index 000000000000..43f42f842eda
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_offload.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright (C) 2016 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_net_offload.c
+ * Netronome network device driver: TC offload functions for PF and VF
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
+
+#include "nfp_bpf.h"
+#include "nfp_net_ctrl.h"
+#include "nfp_net.h"
+
+void nfp_net_filter_stats_timer(unsigned long data)
+{
+ struct nfp_net *nn = (void *)data;
+ struct nfp_stat_pair latest;
+
+ spin_lock_bh(&nn->rx_filter_lock);
+
+ if (nn->ctrl & NFP_NET_CFG_CTRL_BPF)
+ mod_timer(&nn->rx_filter_stats_timer,
+ jiffies + NFP_NET_STAT_POLL_IVL);
+
+ spin_unlock_bh(&nn->rx_filter_lock);
+
+ latest.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
+ latest.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
+
+ if (latest.pkts != nn->rx_filter.pkts)
+ nn->rx_filter_change = jiffies;
+
+ nn->rx_filter = latest;
+}
+
+static void nfp_net_bpf_stats_reset(struct nfp_net *nn)
+{
+ nn->rx_filter.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
+ nn->rx_filter.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
+ nn->rx_filter_prev = nn->rx_filter;
+ nn->rx_filter_change = jiffies;
+}
+
+static int
+nfp_net_bpf_stats_update(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
+{
+ struct tc_action *a;
+ LIST_HEAD(actions);
+ u64 bytes, pkts;
+
+ pkts = nn->rx_filter.pkts - nn->rx_filter_prev.pkts;
+ bytes = nn->rx_filter.bytes - nn->rx_filter_prev.bytes;
+ bytes -= pkts * ETH_HLEN;
+
+ nn->rx_filter_prev = nn->rx_filter;
+
+ preempt_disable();
+
+ tcf_exts_to_list(cls_bpf->exts, &actions);
+ list_for_each_entry(a, &actions, list)
+ tcf_action_stats_update(a, bytes, pkts, nn->rx_filter_change);
+
+ preempt_enable();
+
+ return 0;
+}
+
+static int
+nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
+{
+ const struct tc_action *a;
+ LIST_HEAD(actions);
+
+ /* TC direct action */
+ if (cls_bpf->exts_integrated) {
+ if (tc_no_actions(cls_bpf->exts))
+ return NN_ACT_DIRECT;
+
+ return -ENOTSUPP;
+ }
+
+ /* TC legacy mode */
+ if (!tc_single_action(cls_bpf->exts))
+ return -ENOTSUPP;
+
+ tcf_exts_to_list(cls_bpf->exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ if (is_tcf_gact_shot(a))
+ return NN_ACT_TC_DROP;
+
+ if (is_tcf_mirred_redirect(a) &&
+ tcf_mirred_ifindex(a) == nn->netdev->ifindex)
+ return NN_ACT_TC_REDIR;
+ }
+
+ return -ENOTSUPP;
+}
+
+static int
+nfp_net_bpf_offload_prepare(struct nfp_net *nn,
+ struct tc_cls_bpf_offload *cls_bpf,
+ struct nfp_bpf_result *res,
+ void **code, dma_addr_t *dma_addr, u16 max_instr)
+{
+ unsigned int code_sz = max_instr * sizeof(u64);
+ enum nfp_bpf_action_type act;
+ u16 start_off, done_off;
+ unsigned int max_mtu;
+ int ret;
+
+ ret = nfp_net_bpf_get_act(nn, cls_bpf);
+ if (ret < 0)
+ return ret;
+ act = ret;
+
+ max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
+ if (max_mtu < nn->netdev->mtu) {
+ nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
+ return -ENOTSUPP;
+ }
+
+ start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
+ done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
+
+ *code = dma_zalloc_coherent(&nn->pdev->dev, code_sz, dma_addr,
+ GFP_KERNEL);
+ if (!*code)
+ return -ENOMEM;
+
+ ret = nfp_bpf_jit(cls_bpf->prog, *code, act, start_off, done_off,
+ max_instr, res);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ dma_free_coherent(&nn->pdev->dev, code_sz, *code, *dma_addr);
+ return ret;
+}
+
+static void
+nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
+ void *code, dma_addr_t dma_addr,
+ unsigned int code_sz, unsigned int n_instr,
+ bool dense_mode)
+{
+ u64 bpf_addr = dma_addr;
+ int err;
+
+ nn->bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
+
+ if (dense_mode)
+ bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX;
+
+ nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr);
+ nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, bpf_addr);
+
+ /* Load up the JITed code */
+ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
+ if (err)
+ nn_err(nn, "FW command error while loading BPF: %d\n", err);
+
+ /* Enable passing packets through BPF function */
+ nn->ctrl |= NFP_NET_CFG_CTRL_BPF;
+ nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl);
+ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
+ if (err)
+ nn_err(nn, "FW command error while enabling BPF: %d\n", err);
+
+ dma_free_coherent(&nn->pdev->dev, code_sz, code, dma_addr);
+
+ nfp_net_bpf_stats_reset(nn);
+ mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL);
+}
+
+static int nfp_net_bpf_stop(struct nfp_net *nn)
+{
+ if (!(nn->ctrl & NFP_NET_CFG_CTRL_BPF))
+ return 0;
+
+ spin_lock_bh(&nn->rx_filter_lock);
+ nn->ctrl &= ~NFP_NET_CFG_CTRL_BPF;
+ spin_unlock_bh(&nn->rx_filter_lock);
+ nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl);
+
+ del_timer_sync(&nn->rx_filter_stats_timer);
+ nn->bpf_offload_skip_sw = 0;
+
+ return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
+}
+
+int
+nfp_net_bpf_offload(struct nfp_net *nn, u32 handle, __be16 proto,
+ struct tc_cls_bpf_offload *cls_bpf)
+{
+ struct nfp_bpf_result res;
+ dma_addr_t dma_addr;
+ u16 max_instr;
+ void *code;
+ int err;
+
+ max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
+
+ switch (cls_bpf->command) {
+ case TC_CLSBPF_REPLACE:
+ /* There is nothing stopping us from implementing seamless
+ * replace but the simple method of loading I adopted in
+ * the firmware does not handle atomic replace (i.e. we have to
+ * stop the BPF offload and re-enable it). Leaking-in a few
+ * frames which didn't have BPF applied in the hardware should
+ * be fine if software fallback is available, though.
+ */
+ if (nn->bpf_offload_skip_sw)
+ return -EBUSY;
+
+ err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
+ &dma_addr, max_instr);
+ if (err)
+ return err;
+
+ nfp_net_bpf_stop(nn);
+ nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
+ dma_addr, max_instr * sizeof(u64),
+ res.n_instr, res.dense_mode);
+ return 0;
+
+ case TC_CLSBPF_ADD:
+ if (nn->ctrl & NFP_NET_CFG_CTRL_BPF)
+ return -EBUSY;
+
+ err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
+ &dma_addr, max_instr);
+ if (err)
+ return err;
+
+ nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
+ dma_addr, max_instr * sizeof(u64),
+ res.n_instr, res.dense_mode);
+ return 0;
+
+ case TC_CLSBPF_DESTROY:
+ return nfp_net_bpf_stop(nn);
+
+ case TC_CLSBPF_STATS:
+ return nfp_net_bpf_stats_update(nn, cls_bpf);
+
+ default:
+ return -ENOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index 37abef016a0a..2800bbf65a89 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -38,7 +38,6 @@
* Rolf Neugebauer <rolf.neugebauer@netronome.com>
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -134,7 +133,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
}
nfp_net_get_fw_version(&fw_ver, ctrl_bar);
- if (fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
+ if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n",
fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
err = -EINVAL;
@@ -142,16 +141,14 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
}
/* Determine stride */
- if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 0) ||
- nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1) ||
- nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0x12, 0x48)) {
+ if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
stride = 2;
tx_bar_no = NFP_NET_Q0_BAR;
rx_bar_no = NFP_NET_Q1_BAR;
dev_warn(&pdev->dev, "OBSOLETE Firmware detected - VF isolation not available\n");
} else {
switch (fw_ver.major) {
- case 1 ... 3:
+ case 1 ... 4:
if (is_nfp3200) {
stride = 2;
tx_bar_no = NFP_NET_Q0_BAR;
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index d1f157e439cf..86a5b4f5f870 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -2,5 +2,5 @@ obj-$(CONFIG_QED) := qed.o
qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
- qed_selftest.o qed_dcbx.o
+ qed_selftest.o qed_dcbx.o qed_debug.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 2d67469eb8f6..0929582fc82b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -23,6 +23,7 @@
#include <linux/zlib.h>
#include <linux/hashtable.h>
#include <linux/qed/qed_if.h>
+#include "qed_debug.h"
#include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
@@ -395,6 +396,8 @@ struct qed_hwfn {
/* Buffer for unzipping firmware data */
void *unzip_buf;
+ struct dbg_tools_data dbg_info;
+
struct qed_simd_fp_handler simd_proto_handler[64];
#ifdef CONFIG_QED_SRIOV
@@ -430,6 +433,19 @@ struct qed_int_params {
u8 fp_msix_cnt;
};
+struct qed_dbg_feature {
+ struct dentry *dentry;
+ u8 *dump_buf;
+ u32 buf_size;
+ u32 dumped_dwords;
+};
+
+struct qed_dbg_params {
+ struct qed_dbg_feature features[DBG_FEATURE_NUM];
+ u8 engine_for_debug;
+ bool print_data;
+};
+
struct qed_dev {
u32 dp_module;
u8 dp_level;
@@ -444,6 +460,8 @@ struct qed_dev {
CHIP_REV_IS_A0(dev))
#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \
CHIP_REV_IS_B0(dev))
+#define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
+#define QED_IS_K2(dev) QED_IS_AH(dev)
#define QED_GET_TYPE(dev) (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \
QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
@@ -544,6 +562,8 @@ struct qed_dev {
} protocol_ops;
void *ops_cookie;
+ struct qed_dbg_params dbg_params;
+
const struct firmware *firmware;
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index be7b3dc7c9a7..130da1c0490b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -19,6 +19,7 @@
#include "qed_dcbx.h"
#include "qed_hsi.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
#ifdef CONFIG_DCB
#include <linux/qed/qed_eth_if.h>
#endif
@@ -942,6 +943,9 @@ static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt;
int rc;
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt)
return -EBUSY;
@@ -981,6 +985,7 @@ qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
if (p_params->pfc.prio[i])
pfc_map |= BIT(i);
+ *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
*pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc);
@@ -1055,24 +1060,33 @@ qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
entry = &p_app->app_pri_tbl[i].entry;
+ *entry = 0;
if (ieee) {
- *entry &= ~DCBX_APP_SF_IEEE_MASK;
+ *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK);
switch (p_params->app_entry[i].sf_ieee) {
case QED_DCBX_SF_IEEE_ETHTYPE:
*entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+ DCBX_APP_SF_SHIFT);
break;
case QED_DCBX_SF_IEEE_TCP_PORT:
*entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
break;
case QED_DCBX_SF_IEEE_UDP_PORT:
*entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
break;
case QED_DCBX_SF_IEEE_TCP_UDP_PORT:
*entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
break;
}
} else {
@@ -1172,7 +1186,7 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
return 0;
}
- dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL);
+ dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
if (!dcbx_info)
return -ENOMEM;
@@ -1207,7 +1221,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
{
struct qed_dcbx_get *dcbx_info;
- dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL);
+ dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
if (!dcbx_info)
return NULL;
@@ -2130,17 +2144,19 @@ static int qed_dcbnl_ieee_setets(struct qed_dev *cdev, struct ieee_ets *ets)
return rc;
}
-int qed_dcbnl_ieee_peer_getets(struct qed_dev *cdev, struct ieee_ets *ets)
+static int
+qed_dcbnl_ieee_peer_getets(struct qed_dev *cdev, struct ieee_ets *ets)
{
return qed_dcbnl_get_ieee_ets(cdev, ets, true);
}
-int qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+static int
+qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
{
return qed_dcbnl_get_ieee_pfc(cdev, pfc, true);
}
-int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
+static int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_dcbx_get *dcbx_info;
@@ -2184,7 +2200,7 @@ int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
return 0;
}
-int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
+static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_dcbx_get *dcbx_info;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
new file mode 100644
index 000000000000..88e7d5bef909
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -0,0 +1,6898 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/crc32.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+
+/* Chip IDs enum */
+enum chip_ids {
+ CHIP_RESERVED,
+ CHIP_BB_B0,
+ CHIP_K2,
+ MAX_CHIP_IDS
+};
+
+/* Memory groups enum */
+enum mem_groups {
+ MEM_GROUP_PXP_MEM,
+ MEM_GROUP_DMAE_MEM,
+ MEM_GROUP_CM_MEM,
+ MEM_GROUP_QM_MEM,
+ MEM_GROUP_TM_MEM,
+ MEM_GROUP_BRB_RAM,
+ MEM_GROUP_BRB_MEM,
+ MEM_GROUP_PRS_MEM,
+ MEM_GROUP_SDM_MEM,
+ MEM_GROUP_PBUF,
+ MEM_GROUP_IOR,
+ MEM_GROUP_RAM,
+ MEM_GROUP_BTB_RAM,
+ MEM_GROUP_RDIF_CTX,
+ MEM_GROUP_TDIF_CTX,
+ MEM_GROUP_CONN_CFC_MEM,
+ MEM_GROUP_TASK_CFC_MEM,
+ MEM_GROUP_CAU_PI,
+ MEM_GROUP_CAU_MEM,
+ MEM_GROUP_PXP_ILT,
+ MEM_GROUP_MULD_MEM,
+ MEM_GROUP_BTB_MEM,
+ MEM_GROUP_IGU_MEM,
+ MEM_GROUP_IGU_MSIX,
+ MEM_GROUP_CAU_SB,
+ MEM_GROUP_BMB_RAM,
+ MEM_GROUP_BMB_MEM,
+ MEM_GROUPS_NUM
+};
+
+/* Memory groups names */
+static const char * const s_mem_group_names[] = {
+ "PXP_MEM",
+ "DMAE_MEM",
+ "CM_MEM",
+ "QM_MEM",
+ "TM_MEM",
+ "BRB_RAM",
+ "BRB_MEM",
+ "PRS_MEM",
+ "SDM_MEM",
+ "PBUF",
+ "IOR",
+ "RAM",
+ "BTB_RAM",
+ "RDIF_CTX",
+ "TDIF_CTX",
+ "CONN_CFC_MEM",
+ "TASK_CFC_MEM",
+ "CAU_PI",
+ "CAU_MEM",
+ "PXP_ILT",
+ "MULD_MEM",
+ "BTB_MEM",
+ "IGU_MEM",
+ "IGU_MSIX",
+ "CAU_SB",
+ "BMB_RAM",
+ "BMB_MEM",
+};
+
+/* Idle check conditions */
+static u32 cond4(const u32 *r, const u32 *imm)
+{
+ return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
+}
+
+static u32 cond6(const u32 *r, const u32 *imm)
+{
+ return ((r[0] >> imm[0]) & imm[1]) != imm[2];
+}
+
+static u32 cond5(const u32 *r, const u32 *imm)
+{
+ return (r[0] & imm[0]) != imm[1];
+}
+
+static u32 cond8(const u32 *r, const u32 *imm)
+{
+ return ((r[0] & imm[0]) >> imm[1]) !=
+ (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
+}
+
+static u32 cond9(const u32 *r, const u32 *imm)
+{
+ return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
+}
+
+static u32 cond1(const u32 *r, const u32 *imm)
+{
+ return (r[0] & ~imm[0]) != imm[1];
+}
+
+static u32 cond0(const u32 *r, const u32 *imm)
+{
+ return r[0] != imm[0];
+}
+
+static u32 cond10(const u32 *r, const u32 *imm)
+{
+ return r[0] != r[1] && r[2] == imm[0];
+}
+
+static u32 cond11(const u32 *r, const u32 *imm)
+{
+ return r[0] != r[1] && r[2] > imm[0];
+}
+
+static u32 cond3(const u32 *r, const u32 *imm)
+{
+ return r[0] != r[1];
+}
+
+static u32 cond12(const u32 *r, const u32 *imm)
+{
+ return r[0] & imm[0];
+}
+
+static u32 cond7(const u32 *r, const u32 *imm)
+{
+ return r[0] < (r[1] - imm[0]);
+}
+
+static u32 cond2(const u32 *r, const u32 *imm)
+{
+ return r[0] > imm[0];
+}
+
+/* Array of Idle Check conditions */
+static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
+ cond0,
+ cond1,
+ cond2,
+ cond3,
+ cond4,
+ cond5,
+ cond6,
+ cond7,
+ cond8,
+ cond9,
+ cond10,
+ cond11,
+ cond12,
+};
+
+/******************************* Data Types **********************************/
+
+enum platform_ids {
+ PLATFORM_ASIC,
+ PLATFORM_RESERVED,
+ PLATFORM_RESERVED2,
+ PLATFORM_RESERVED3,
+ MAX_PLATFORM_IDS
+};
+
+struct dbg_array {
+ const u32 *ptr;
+ u32 size_in_dwords;
+};
+
+/* Chip constant definitions */
+struct chip_defs {
+ const char *name;
+ struct {
+ u8 num_ports;
+ u8 num_pfs;
+ } per_platform[MAX_PLATFORM_IDS];
+};
+
+/* Platform constant definitions */
+struct platform_defs {
+ const char *name;
+ u32 delay_factor;
+};
+
+/* Storm constant definitions */
+struct storm_defs {
+ char letter;
+ enum block_id block_id;
+ enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
+ bool has_vfc;
+ u32 sem_fast_mem_addr;
+ u32 sem_frame_mode_addr;
+ u32 sem_slow_enable_addr;
+ u32 sem_slow_mode_addr;
+ u32 sem_slow_mode1_conf_addr;
+ u32 sem_sync_dbg_empty_addr;
+ u32 sem_slow_dbg_empty_addr;
+ u32 cm_ctx_wr_addr;
+ u32 cm_conn_ag_ctx_lid_size; /* In quad-regs */
+ u32 cm_conn_ag_ctx_rd_addr;
+ u32 cm_conn_st_ctx_lid_size; /* In quad-regs */
+ u32 cm_conn_st_ctx_rd_addr;
+ u32 cm_task_ag_ctx_lid_size; /* In quad-regs */
+ u32 cm_task_ag_ctx_rd_addr;
+ u32 cm_task_st_ctx_lid_size; /* In quad-regs */
+ u32 cm_task_st_ctx_rd_addr;
+};
+
+/* Block constant definitions */
+struct block_defs {
+ const char *name;
+ bool has_dbg_bus[MAX_CHIP_IDS];
+ bool associated_to_storm;
+ u32 storm_id; /* Valid only if associated_to_storm is true */
+ enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
+ u32 dbg_select_addr;
+ u32 dbg_cycle_enable_addr;
+ u32 dbg_shift_addr;
+ u32 dbg_force_valid_addr;
+ u32 dbg_force_frame_addr;
+ bool has_reset_bit;
+ bool unreset; /* If true, the block is taken out of reset before dump */
+ enum dbg_reset_regs reset_reg;
+ u8 reset_bit_offset; /* Bit offset in reset register */
+};
+
+/* Reset register definitions */
+struct reset_reg_defs {
+ u32 addr;
+ u32 unreset_val;
+ bool exists[MAX_CHIP_IDS];
+};
+
+struct grc_param_defs {
+ u32 default_val[MAX_CHIP_IDS];
+ u32 min;
+ u32 max;
+ bool is_preset;
+ u32 exclude_all_preset_val;
+ u32 crash_preset_val;
+};
+
+struct rss_mem_defs {
+ const char *mem_name;
+ const char *type_name;
+ u32 addr; /* In 128b units */
+ u32 num_entries[MAX_CHIP_IDS];
+ u32 entry_width[MAX_CHIP_IDS]; /* In bits */
+};
+
+struct vfc_ram_defs {
+ const char *mem_name;
+ const char *type_name;
+ u32 base_row;
+ u32 num_rows;
+};
+
+struct big_ram_defs {
+ const char *instance_name;
+ enum mem_groups mem_group_id;
+ enum mem_groups ram_mem_group_id;
+ enum dbg_grc_params grc_param;
+ u32 addr_reg_addr;
+ u32 data_reg_addr;
+ u32 num_of_blocks[MAX_CHIP_IDS];
+};
+
+struct phy_defs {
+ const char *phy_name;
+ u32 base_addr;
+ u32 tbus_addr_lo_addr;
+ u32 tbus_addr_hi_addr;
+ u32 tbus_data_lo_addr;
+ u32 tbus_data_hi_addr;
+};
+
+/******************************** Constants **********************************/
+
+#define MAX_LCIDS 320
+#define MAX_LTIDS 320
+#define NUM_IOR_SETS 2
+#define IORS_PER_SET 176
+#define IOR_SET_OFFSET(set_id) ((set_id) * 256)
+#define BYTES_IN_DWORD sizeof(u32)
+
+/* In the macros below, size and offset are specified in bits */
+#define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
+#define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
+#define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE
+#define FIELD_DWORD_OFFSET(type, field) \
+ (int)(FIELD_BIT_OFFSET(type, field) / 32)
+#define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
+#define FIELD_BIT_MASK(type, field) \
+ (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
+ FIELD_DWORD_SHIFT(type, field))
+#define SET_VAR_FIELD(var, type, field, val) \
+ do { \
+ var[FIELD_DWORD_OFFSET(type, field)] &= \
+ (~FIELD_BIT_MASK(type, field)); \
+ var[FIELD_DWORD_OFFSET(type, field)] |= \
+ (val) << FIELD_DWORD_SHIFT(type, field); \
+ } while (0)
+#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
+ do { \
+ for (i = 0; i < (arr_size); i++) \
+ qed_wr(dev, ptt, addr, (arr)[i]); \
+ } while (0)
+#define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
+ do { \
+ for (i = 0; i < (arr_size); i++) \
+ (arr)[i] = qed_rd(dev, ptt, addr); \
+ } while (0)
+
+#define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
+#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
+#define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
+#define RAM_LINES_TO_BYTES(lines) \
+ DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
+#define REG_DUMP_LEN_SHIFT 24
+#define MEM_DUMP_ENTRY_SIZE_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
+#define IDLE_CHK_RULE_SIZE_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
+#define IDLE_CHK_RESULT_HDR_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
+#define IDLE_CHK_RESULT_REG_HDR_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
+#define IDLE_CHK_MAX_ENTRIES_SIZE 32
+
+/* The sizes and offsets below are specified in bits */
+#define VFC_CAM_CMD_STRUCT_SIZE 64
+#define VFC_CAM_CMD_ROW_OFFSET 48
+#define VFC_CAM_CMD_ROW_SIZE 9
+#define VFC_CAM_ADDR_STRUCT_SIZE 16
+#define VFC_CAM_ADDR_OP_OFFSET 0
+#define VFC_CAM_ADDR_OP_SIZE 4
+#define VFC_CAM_RESP_STRUCT_SIZE 256
+#define VFC_RAM_ADDR_STRUCT_SIZE 16
+#define VFC_RAM_ADDR_OP_OFFSET 0
+#define VFC_RAM_ADDR_OP_SIZE 2
+#define VFC_RAM_ADDR_ROW_OFFSET 2
+#define VFC_RAM_ADDR_ROW_SIZE 10
+#define VFC_RAM_RESP_STRUCT_SIZE 256
+#define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
+#define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
+#define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
+#define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
+#define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
+#define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
+#define NUM_VFC_RAM_TYPES 4
+#define VFC_CAM_NUM_ROWS 512
+#define VFC_OPCODE_CAM_RD 14
+#define VFC_OPCODE_RAM_RD 0
+#define NUM_RSS_MEM_TYPES 5
+#define NUM_BIG_RAM_TYPES 3
+#define BIG_RAM_BLOCK_SIZE_BYTES 128
+#define BIG_RAM_BLOCK_SIZE_DWORDS \
+ BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
+#define NUM_PHY_TBUS_ADDRESSES 2048
+#define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
+#define RESET_REG_UNRESET_OFFSET 4
+#define STALL_DELAY_MS 500
+#define STATIC_DEBUG_LINE_DWORDS 9
+#define NUM_DBG_BUS_LINES 256
+#define NUM_COMMON_GLOBAL_PARAMS 8
+#define FW_IMG_MAIN 1
+#define REG_FIFO_DEPTH_ELEMENTS 32
+#define REG_FIFO_ELEMENT_DWORDS 2
+#define REG_FIFO_DEPTH_DWORDS \
+ (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
+#define IGU_FIFO_DEPTH_ELEMENTS 64
+#define IGU_FIFO_ELEMENT_DWORDS 4
+#define IGU_FIFO_DEPTH_DWORDS \
+ (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
+#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
+#define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
+#define PROTECTION_OVERRIDE_DEPTH_DWORDS \
+ (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
+ PROTECTION_OVERRIDE_ELEMENT_DWORDS)
+#define MCP_SPAD_TRACE_OFFSIZE_ADDR \
+ (MCP_REG_SCRATCH + \
+ offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
+#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa
+#define EMPTY_FW_VERSION_STR "???_???_???_???"
+#define EMPTY_FW_IMAGE_STR "???????????????"
+
+/***************************** Constant Arrays *******************************/
+
+/* Debug arrays */
+static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
+
+/* Chip constant definitions array */
+static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
+ { "reserved", { {0, 0}, {0, 0}, {0, 0}, {0, 0} } },
+ { "bb_b0",
+ { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB}, {0, 0}, {0, 0}, {0, 0} } },
+ { "k2", { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2}, {0, 0}, {0, 0}, {0, 0} } }
+};
+
+/* Storm constant definitions array */
+static struct storm_defs s_storm_defs[] = {
+ /* Tstorm */
+ {'T', BLOCK_TSEM,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCT}, true,
+ TSEM_REG_FAST_MEMORY,
+ TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
+ TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
+ TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY,
+ TCM_REG_CTX_RBC_ACCS,
+ 4, TCM_REG_AGG_CON_CTX,
+ 16, TCM_REG_SM_CON_CTX,
+ 2, TCM_REG_AGG_TASK_CTX,
+ 4, TCM_REG_SM_TASK_CTX},
+ /* Mstorm */
+ {'M', BLOCK_MSEM,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCM}, false,
+ MSEM_REG_FAST_MEMORY,
+ MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE,
+ MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG,
+ MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY,
+ MCM_REG_CTX_RBC_ACCS,
+ 1, MCM_REG_AGG_CON_CTX,
+ 10, MCM_REG_SM_CON_CTX,
+ 2, MCM_REG_AGG_TASK_CTX,
+ 7, MCM_REG_SM_TASK_CTX},
+ /* Ustorm */
+ {'U', BLOCK_USEM,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
+ DBG_BUS_CLIENT_RBCU}, false,
+ USEM_REG_FAST_MEMORY,
+ USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE,
+ USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG,
+ USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY,
+ UCM_REG_CTX_RBC_ACCS,
+ 2, UCM_REG_AGG_CON_CTX,
+ 13, UCM_REG_SM_CON_CTX,
+ 3, UCM_REG_AGG_TASK_CTX,
+ 3, UCM_REG_SM_TASK_CTX},
+ /* Xstorm */
+ {'X', BLOCK_XSEM,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCX}, false,
+ XSEM_REG_FAST_MEMORY,
+ XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE,
+ XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG,
+ XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY,
+ XCM_REG_CTX_RBC_ACCS,
+ 9, XCM_REG_AGG_CON_CTX,
+ 15, XCM_REG_SM_CON_CTX,
+ 0, 0,
+ 0, 0},
+ /* Ystorm */
+ {'Y', BLOCK_YSEM,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCY}, false,
+ YSEM_REG_FAST_MEMORY,
+ YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE,
+ YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG,
+ YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY,
+ YCM_REG_CTX_RBC_ACCS,
+ 2, YCM_REG_AGG_CON_CTX,
+ 3, YCM_REG_SM_CON_CTX,
+ 2, YCM_REG_AGG_TASK_CTX,
+ 12, YCM_REG_SM_TASK_CTX},
+ /* Pstorm */
+ {'P', BLOCK_PSEM,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
+ DBG_BUS_CLIENT_RBCS}, true,
+ PSEM_REG_FAST_MEMORY,
+ PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE,
+ PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG,
+ PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY,
+ PCM_REG_CTX_RBC_ACCS,
+ 0, 0,
+ 10, PCM_REG_SM_CON_CTX,
+ 0, 0,
+ 0, 0}
+};
+
+/* Block definitions array */
+static struct block_defs block_grc_defs = {
+ "grc", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+ GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
+ GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
+ GRC_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_UA, 1
+};
+
+static struct block_defs block_miscs_defs = {
+ "miscs", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_misc_defs = {
+ "misc", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_dbu_defs = {
+ "dbu", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_pglue_b_defs = {
+ "pglue_b", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
+ PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
+ PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
+ PGLUE_B_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 1
+};
+
+static struct block_defs block_cnig_defs = {
+ "cnig", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+ CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
+ CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
+ CNIG_REG_DBG_FORCE_FRAME_K2,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 0
+};
+
+static struct block_defs block_cpmu_defs = {
+ "cpmu", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 8
+};
+
+static struct block_defs block_ncsi_defs = {
+ "ncsi", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
+ NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
+ NCSI_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 5
+};
+
+static struct block_defs block_opte_defs = {
+ "opte", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 4
+};
+
+static struct block_defs block_bmb_defs = {
+ "bmb", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
+ BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
+ BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
+ BMB_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_UA, 7
+};
+
+static struct block_defs block_pcie_defs = {
+ "pcie", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
+ PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
+ PCIE_REG_DBG_COMMON_FORCE_FRAME,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_mcp_defs = {
+ "mcp", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_mcp2_defs = {
+ "mcp2", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+ MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
+ MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
+ MCP2_REG_DBG_FORCE_FRAME,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_pswhst_defs = {
+ "pswhst", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
+ PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
+ PSWHST_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 0
+};
+
+static struct block_defs block_pswhst2_defs = {
+ "pswhst2", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
+ PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
+ PSWHST2_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 0
+};
+
+static struct block_defs block_pswrd_defs = {
+ "pswrd", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
+ PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
+ PSWRD_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 2
+};
+
+static struct block_defs block_pswrd2_defs = {
+ "pswrd2", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
+ PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
+ PSWRD2_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 2
+};
+
+static struct block_defs block_pswwr_defs = {
+ "pswwr", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
+ PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
+ PSWWR_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 3
+};
+
+static struct block_defs block_pswwr2_defs = {
+ "pswwr2", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 3
+};
+
+static struct block_defs block_pswrq_defs = {
+ "pswrq", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
+ PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
+ PSWRQ_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 1
+};
+
+static struct block_defs block_pswrq2_defs = {
+ "pswrq2", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
+ PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
+ PSWRQ2_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISC_PL_HV, 1
+};
+
+static struct block_defs block_pglcs_defs = {
+ "pglcs", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE,
+ PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID,
+ PGLCS_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 2
+};
+
+static struct block_defs block_ptu_defs = {
+ "ptu", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
+ PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
+ PTU_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
+};
+
+static struct block_defs block_dmae_defs = {
+ "dmae", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
+ DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
+ DMAE_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
+};
+
+static struct block_defs block_tcm_defs = {
+ "tcm", {true, true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
+ TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
+ TCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
+};
+
+static struct block_defs block_mcm_defs = {
+ "mcm", {true, true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
+ MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
+ MCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
+};
+
+static struct block_defs block_ucm_defs = {
+ "ucm", {true, true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
+ UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
+ UCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
+};
+
+static struct block_defs block_xcm_defs = {
+ "xcm", {true, true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
+ XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
+ XCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
+};
+
+static struct block_defs block_ycm_defs = {
+ "ycm", {true, true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
+ YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
+ YCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
+};
+
+static struct block_defs block_pcm_defs = {
+ "pcm", {true, true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
+ PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
+ PCM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
+};
+
+static struct block_defs block_qm_defs = {
+ "qm", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
+ QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
+ QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
+ QM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
+};
+
+static struct block_defs block_tm_defs = {
+ "tm", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
+ TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
+ TM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
+};
+
+static struct block_defs block_dorq_defs = {
+ "dorq", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
+ DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
+ DORQ_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
+};
+
+static struct block_defs block_brb_defs = {
+ "brb", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+ BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
+ BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
+ BRB_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
+};
+
+static struct block_defs block_src_defs = {
+ "src", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
+ SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
+ SRC_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
+};
+
+static struct block_defs block_prs_defs = {
+ "prs", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+ PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
+ PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
+ PRS_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
+};
+
+static struct block_defs block_tsdm_defs = {
+ "tsdm", {true, true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
+ TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
+ TSDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
+};
+
+static struct block_defs block_msdm_defs = {
+ "msdm", {true, true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
+ MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
+ MSDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
+};
+
+static struct block_defs block_usdm_defs = {
+ "usdm", {true, true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
+ USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
+ USDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
+};
+
+static struct block_defs block_xsdm_defs = {
+ "xsdm", {true, true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
+ XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
+ XSDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
+};
+
+static struct block_defs block_ysdm_defs = {
+ "ysdm", {true, true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
+ YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
+ YSDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
+};
+
+static struct block_defs block_psdm_defs = {
+ "psdm", {true, true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
+ PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
+ PSDM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
+};
+
+static struct block_defs block_tsem_defs = {
+ "tsem", {true, true, true}, true, DBG_TSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
+ TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
+ TSEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
+};
+
+static struct block_defs block_msem_defs = {
+ "msem", {true, true, true}, true, DBG_MSTORM_ID,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
+ MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
+ MSEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
+};
+
+static struct block_defs block_usem_defs = {
+ "usem", {true, true, true}, true, DBG_USTORM_ID,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
+ USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
+ USEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
+};
+
+static struct block_defs block_xsem_defs = {
+ "xsem", {true, true, true}, true, DBG_XSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
+ XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
+ XSEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
+};
+
+static struct block_defs block_ysem_defs = {
+ "ysem", {true, true, true}, true, DBG_YSTORM_ID,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
+ YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
+ YSEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
+};
+
+static struct block_defs block_psem_defs = {
+ "psem", {true, true, true}, true, DBG_PSTORM_ID,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
+ PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
+ PSEM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
+};
+
+static struct block_defs block_rss_defs = {
+ "rss", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
+ RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
+ RSS_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
+};
+
+static struct block_defs block_tmld_defs = {
+ "tmld", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
+ TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
+ TMLD_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
+};
+
+static struct block_defs block_muld_defs = {
+ "muld", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
+ MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
+ MULD_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
+};
+
+static struct block_defs block_yuld_defs = {
+ "yuld", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE,
+ YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID,
+ YULD_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 15
+};
+
+static struct block_defs block_xyld_defs = {
+ "xyld", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
+ XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
+ XYLD_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
+};
+
+static struct block_defs block_prm_defs = {
+ "prm", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
+ PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
+ PRM_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
+};
+
+static struct block_defs block_pbf_pb1_defs = {
+ "pbf_pb1", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
+ PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
+ PBF_PB1_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+ 11
+};
+
+static struct block_defs block_pbf_pb2_defs = {
+ "pbf_pb2", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
+ PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
+ PBF_PB2_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+ 12
+};
+
+static struct block_defs block_rpb_defs = {
+ "rpb", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
+ RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
+ RPB_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
+};
+
+static struct block_defs block_btb_defs = {
+ "btb", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
+ BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
+ BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
+ BTB_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
+};
+
+static struct block_defs block_pbf_defs = {
+ "pbf", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+ PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
+ PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
+ PBF_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
+};
+
+static struct block_defs block_rdif_defs = {
+ "rdif", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
+ RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
+ RDIF_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
+};
+
+static struct block_defs block_tdif_defs = {
+ "tdif", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
+ TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
+ TDIF_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
+};
+
+static struct block_defs block_cdu_defs = {
+ "cdu", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
+ CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
+ CDU_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
+};
+
+static struct block_defs block_ccfc_defs = {
+ "ccfc", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
+ CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
+ CCFC_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
+};
+
+static struct block_defs block_tcfc_defs = {
+ "tcfc", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+ TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
+ TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
+ TCFC_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
+};
+
+static struct block_defs block_igu_defs = {
+ "igu", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
+ IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
+ IGU_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
+};
+
+static struct block_defs block_cau_defs = {
+ "cau", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+ CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
+ CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
+ CAU_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
+};
+
+static struct block_defs block_umac_defs = {
+ "umac", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+ UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE,
+ UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID,
+ UMAC_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 6
+};
+
+static struct block_defs block_xmac_defs = {
+ "xmac", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_dbg_defs = {
+ "dbg", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
+};
+
+static struct block_defs block_nig_defs = {
+ "nig", {true, true, true}, false, 0,
+ {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+ NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
+ NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
+ NIG_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
+};
+
+static struct block_defs block_wol_defs = {
+ "wol", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+ WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE,
+ WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID,
+ WOL_REG_DBG_FORCE_FRAME,
+ true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
+};
+
+static struct block_defs block_bmbn_defs = {
+ "bmbn", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
+ BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE,
+ BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID,
+ BMBN_REG_DBG_FORCE_FRAME,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_ipc_defs = {
+ "ipc", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_UA, 8
+};
+
+static struct block_defs block_nwm_defs = {
+ "nwm", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+ NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE,
+ NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID,
+ NWM_REG_DBG_FORCE_FRAME,
+ true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
+};
+
+static struct block_defs block_nws_defs = {
+ "nws", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 12
+};
+
+static struct block_defs block_ms_defs = {
+ "ms", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, false, DBG_RESET_REG_MISCS_PL_HV, 13
+};
+
+static struct block_defs block_phy_pcie_defs = {
+ "phy_pcie", {false, false, true}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+ PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
+ PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
+ PCIE_REG_DBG_COMMON_FORCE_FRAME,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_led_defs = {
+ "led", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ true, true, DBG_RESET_REG_MISCS_PL_HV, 14
+};
+
+static struct block_defs block_misc_aeu_defs = {
+ "misc_aeu", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_bar0_map_defs = {
+ "bar0_map", {false, false, false}, false, 0,
+ {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+ 0, 0, 0, 0, 0,
+ false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
+ &block_grc_defs,
+ &block_miscs_defs,
+ &block_misc_defs,
+ &block_dbu_defs,
+ &block_pglue_b_defs,
+ &block_cnig_defs,
+ &block_cpmu_defs,
+ &block_ncsi_defs,
+ &block_opte_defs,
+ &block_bmb_defs,
+ &block_pcie_defs,
+ &block_mcp_defs,
+ &block_mcp2_defs,
+ &block_pswhst_defs,
+ &block_pswhst2_defs,
+ &block_pswrd_defs,
+ &block_pswrd2_defs,
+ &block_pswwr_defs,
+ &block_pswwr2_defs,
+ &block_pswrq_defs,
+ &block_pswrq2_defs,
+ &block_pglcs_defs,
+ &block_dmae_defs,
+ &block_ptu_defs,
+ &block_tcm_defs,
+ &block_mcm_defs,
+ &block_ucm_defs,
+ &block_xcm_defs,
+ &block_ycm_defs,
+ &block_pcm_defs,
+ &block_qm_defs,
+ &block_tm_defs,
+ &block_dorq_defs,
+ &block_brb_defs,
+ &block_src_defs,
+ &block_prs_defs,
+ &block_tsdm_defs,
+ &block_msdm_defs,
+ &block_usdm_defs,
+ &block_xsdm_defs,
+ &block_ysdm_defs,
+ &block_psdm_defs,
+ &block_tsem_defs,
+ &block_msem_defs,
+ &block_usem_defs,
+ &block_xsem_defs,
+ &block_ysem_defs,
+ &block_psem_defs,
+ &block_rss_defs,
+ &block_tmld_defs,
+ &block_muld_defs,
+ &block_yuld_defs,
+ &block_xyld_defs,
+ &block_prm_defs,
+ &block_pbf_pb1_defs,
+ &block_pbf_pb2_defs,
+ &block_rpb_defs,
+ &block_btb_defs,
+ &block_pbf_defs,
+ &block_rdif_defs,
+ &block_tdif_defs,
+ &block_cdu_defs,
+ &block_ccfc_defs,
+ &block_tcfc_defs,
+ &block_igu_defs,
+ &block_cau_defs,
+ &block_umac_defs,
+ &block_xmac_defs,
+ &block_dbg_defs,
+ &block_nig_defs,
+ &block_wol_defs,
+ &block_bmbn_defs,
+ &block_ipc_defs,
+ &block_nwm_defs,
+ &block_nws_defs,
+ &block_ms_defs,
+ &block_phy_pcie_defs,
+ &block_led_defs,
+ &block_misc_aeu_defs,
+ &block_bar0_map_defs,
+};
+
+static struct platform_defs s_platform_defs[] = {
+ {"asic", 1},
+ {"reserved", 0},
+ {"reserved2", 0},
+ {"reserved3", 0}
+};
+
+static struct grc_param_defs s_grc_param_defs[] = {
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_TSTORM */
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */
+ {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_REGS */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */
+ {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */
+ {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */
+ {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */
+ {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */
+ {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */
+ {{0, 0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_UNSTALL */
+ {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
+ MAX_LCIDS}, /* DBG_GRC_PARAM_NUM_LCIDS */
+ {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
+ MAX_LTIDS}, /* DBG_GRC_PARAM_NUM_LTIDS */
+ {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_EXCLUDE_ALL */
+ {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */
+ {{0, 0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */
+ {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM */
+ {{1, 1, 1}, 0, 1, false, 0, 1} /* DBG_GRC_PARAM_DUMP_PHY */
+};
+
+static struct rss_mem_defs s_rss_mem_defs[] = {
+ { "rss_mem_cid", "rss_cid", 0,
+ {256, 256, 320},
+ {32, 32, 32} },
+ { "rss_mem_key_msb", "rss_key", 1024,
+ {128, 128, 208},
+ {256, 256, 256} },
+ { "rss_mem_key_lsb", "rss_key", 2048,
+ {128, 128, 208},
+ {64, 64, 64} },
+ { "rss_mem_info", "rss_info", 3072,
+ {128, 128, 208},
+ {16, 16, 16} },
+ { "rss_mem_ind", "rss_ind", 4096,
+ {(128 * 128), (128 * 128), (128 * 208)},
+ {16, 16, 16} }
+};
+
+static struct vfc_ram_defs s_vfc_ram_defs[] = {
+ {"vfc_ram_tt1", "vfc_ram", 0, 512},
+ {"vfc_ram_mtt2", "vfc_ram", 512, 128},
+ {"vfc_ram_stt2", "vfc_ram", 640, 32},
+ {"vfc_ram_ro_vect", "vfc_ram", 672, 32}
+};
+
+static struct big_ram_defs s_big_ram_defs[] = {
+ { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
+ BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
+ {4800, 4800, 5632} },
+ { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
+ BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
+ {2880, 2880, 3680} },
+ { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
+ BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
+ {1152, 1152, 1152} }
+};
+
+static struct reset_reg_defs s_reset_regs_defs[] = {
+ { MISCS_REG_RESET_PL_UA, 0x0,
+ {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_UA */
+ { MISCS_REG_RESET_PL_HV, 0x0,
+ {true, true, true} }, /* DBG_RESET_REG_MISCS_PL_HV */
+ { MISCS_REG_RESET_PL_HV_2, 0x0,
+ {false, false, true} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */
+ { MISC_REG_RESET_PL_UA, 0x0,
+ {true, true, true} }, /* DBG_RESET_REG_MISC_PL_UA */
+ { MISC_REG_RESET_PL_HV, 0x0,
+ {true, true, true} }, /* DBG_RESET_REG_MISC_PL_HV */
+ { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
+ {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
+ { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
+ {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
+ { MISC_REG_RESET_PL_PDA_VAUX, 0x2,
+ {true, true, true} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */
+};
+
+static struct phy_defs s_phy_defs[] = {
+ {"nw_phy", NWS_REG_NWS_CMU, PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8},
+ {"sgmii_phy", MS_REG_MS_CMU, PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131},
+ {"pcie_phy0", PHY_PCIE_REG_PHY0, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131},
+ {"pcie_phy1", PHY_PCIE_REG_PHY1, PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131},
+};
+
+/**************************** Private Functions ******************************/
+
+/* Reads and returns a single dword from the specified unaligned buffer */
+static u32 qed_read_unaligned_dword(u8 *buf)
+{
+ u32 dword;
+
+ memcpy((u8 *)&dword, buf, sizeof(dword));
+ return dword;
+}
+
+/* Initializes debug data for the specified device */
+static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ if (dev_data->initialized)
+ return DBG_STATUS_OK;
+
+ if (QED_IS_K2(p_hwfn->cdev)) {
+ dev_data->chip_id = CHIP_K2;
+ dev_data->mode_enable[MODE_K2] = 1;
+ } else if (QED_IS_BB_B0(p_hwfn->cdev)) {
+ dev_data->chip_id = CHIP_BB_B0;
+ dev_data->mode_enable[MODE_BB_B0] = 1;
+ } else {
+ return DBG_STATUS_UNKNOWN_CHIP;
+ }
+
+ dev_data->platform_id = PLATFORM_ASIC;
+ dev_data->mode_enable[MODE_ASIC] = 1;
+ dev_data->initialized = true;
+ return DBG_STATUS_OK;
+}
+
+/* Reads the FW info structure for the specified Storm from the chip,
+ * and writes it to the specified fw_info pointer.
+ */
+static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 storm_id, struct fw_info *fw_info)
+{
+ /* Read first the address that points to fw_info location.
+ * The address is located in the last line of the Storm RAM.
+ */
+ u32 addr = s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_INT_RAM +
+ DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
+ sizeof(struct fw_info_location);
+ struct fw_info_location fw_info_location;
+ u32 *dest = (u32 *)&fw_info_location;
+ u32 i;
+
+ memset(&fw_info_location, 0, sizeof(fw_info_location));
+ memset(fw_info, 0, sizeof(*fw_info));
+ for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
+ i++, addr += BYTES_IN_DWORD)
+ dest[i] = qed_rd(p_hwfn, p_ptt, addr);
+ if (fw_info_location.size > 0 && fw_info_location.size <=
+ sizeof(*fw_info)) {
+ /* Read FW version info from Storm RAM */
+ addr = fw_info_location.grc_addr;
+ dest = (u32 *)fw_info;
+ for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
+ i++, addr += BYTES_IN_DWORD)
+ dest[i] = qed_rd(p_hwfn, p_ptt, addr);
+ }
+}
+
+/* Dumps the specified string to the specified buffer. Returns the dumped size
+ * in bytes (actual length + 1 for the null character termination).
+ */
+static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
+{
+ if (dump)
+ strcpy(dump_buf, str);
+ return (u32)strlen(str) + 1;
+}
+
+/* Dumps zeros to align the specified buffer to dwords. Returns the dumped size
+ * in bytes.
+ */
+static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
+{
+ u8 offset_in_dword = (u8)(byte_offset & 0x3), align_size;
+
+ align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
+
+ if (dump && align_size)
+ memset(dump_buf, 0, align_size);
+ return align_size;
+}
+
+/* Writes the specified string param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_str_param(u32 *dump_buf,
+ bool dump,
+ const char *param_name, const char *param_val)
+{
+ char *char_buf = (char *)dump_buf;
+ u32 offset = 0;
+
+ /* Dump param name */
+ offset += qed_dump_str(char_buf + offset, dump, param_name);
+
+ /* Indicate a string param value */
+ if (dump)
+ *(char_buf + offset) = 1;
+ offset++;
+
+ /* Dump param value */
+ offset += qed_dump_str(char_buf + offset, dump, param_val);
+
+ /* Align buffer to next dword */
+ offset += qed_dump_align(char_buf + offset, dump, offset);
+ return BYTES_TO_DWORDS(offset);
+}
+
+/* Writes the specified numeric param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_num_param(u32 *dump_buf,
+ bool dump, const char *param_name, u32 param_val)
+{
+ char *char_buf = (char *)dump_buf;
+ u32 offset = 0;
+
+ /* Dump param name */
+ offset += qed_dump_str(char_buf + offset, dump, param_name);
+
+ /* Indicate a numeric param value */
+ if (dump)
+ *(char_buf + offset) = 0;
+ offset++;
+
+ /* Align buffer to next dword */
+ offset += qed_dump_align(char_buf + offset, dump, offset);
+
+ /* Dump param value (and change offset from bytes to dwords) */
+ offset = BYTES_TO_DWORDS(offset);
+ if (dump)
+ *(dump_buf + offset) = param_val;
+ offset++;
+ return offset;
+}
+
+/* Reads the FW version and writes it as a param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
+ char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
+ struct fw_info fw_info = { {0}, {0} };
+ int printed_chars;
+ u32 offset = 0;
+
+ if (dump) {
+ /* Read FW image/version from PRAM in a non-reset SEMI */
+ bool found = false;
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found;
+ storm_id++) {
+ /* Read FW version/image */
+ if (!dev_data->block_in_reset
+ [s_storm_defs[storm_id].block_id]) {
+ /* read FW info for the current Storm */
+ qed_read_fw_info(p_hwfn,
+ p_ptt, storm_id, &fw_info);
+
+ /* Create FW version/image strings */
+ printed_chars =
+ snprintf(fw_ver_str,
+ sizeof(fw_ver_str),
+ "%d_%d_%d_%d",
+ fw_info.ver.num.major,
+ fw_info.ver.num.minor,
+ fw_info.ver.num.rev,
+ fw_info.ver.num.eng);
+ if (printed_chars < 0 || printed_chars >=
+ sizeof(fw_ver_str))
+ DP_NOTICE(p_hwfn,
+ "Unexpected debug error: invalid FW version string\n");
+ switch (fw_info.ver.image_id) {
+ case FW_IMG_MAIN:
+ strcpy(fw_img_str, "main");
+ break;
+ default:
+ strcpy(fw_img_str, "unknown");
+ break;
+ }
+
+ found = true;
+ }
+ }
+ }
+
+ /* Dump FW version, image and timestamp */
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "fw-version", fw_ver_str);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "fw-image", fw_img_str);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "fw-timestamp", fw_info.ver.timestamp);
+ return offset;
+}
+
+/* Reads the MFW version and writes it as a param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
+
+ if (dump) {
+ u32 global_section_offsize, global_section_addr, mfw_ver;
+ u32 public_data_addr, global_section_offsize_addr;
+ int printed_chars;
+
+ /* Find MCP public data GRC address.
+ * Needs to be ORed with MCP_REG_SCRATCH due to a HW bug.
+ */
+ public_data_addr = qed_rd(p_hwfn, p_ptt,
+ MISC_REG_SHARED_MEM_ADDR) |
+ MCP_REG_SCRATCH;
+
+ /* Find MCP public global section offset */
+ global_section_offsize_addr = public_data_addr +
+ offsetof(struct mcp_public_data,
+ sections) +
+ sizeof(offsize_t) * PUBLIC_GLOBAL;
+ global_section_offsize = qed_rd(p_hwfn, p_ptt,
+ global_section_offsize_addr);
+ global_section_addr = MCP_REG_SCRATCH +
+ (global_section_offsize &
+ OFFSIZE_OFFSET_MASK) * 4;
+
+ /* Read MFW version from MCP public global section */
+ mfw_ver = qed_rd(p_hwfn, p_ptt,
+ global_section_addr +
+ offsetof(struct public_global, mfw_ver));
+
+ /* Dump MFW version param */
+ printed_chars = snprintf(mfw_ver_str, sizeof(mfw_ver_str),
+ "%d_%d_%d_%d",
+ (u8) (mfw_ver >> 24),
+ (u8) (mfw_ver >> 16),
+ (u8) (mfw_ver >> 8),
+ (u8) mfw_ver);
+ if (printed_chars < 0 || printed_chars >= sizeof(mfw_ver_str))
+ DP_NOTICE(p_hwfn,
+ "Unexpected debug error: invalid MFW version string\n");
+ }
+
+ return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
+}
+
+/* Writes a section header to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_section_hdr(u32 *dump_buf,
+ bool dump, const char *name, u32 num_params)
+{
+ return qed_dump_num_param(dump_buf, dump, name, num_params);
+}
+
+/* Writes the common global params to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u8 num_specific_global_params)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0;
+
+ /* Find platform string and dump global params section header */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump,
+ "global_params",
+ NUM_COMMON_GLOBAL_PARAMS +
+ num_specific_global_params);
+
+ /* Store params */
+ offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
+ offset += qed_dump_mfw_ver_param(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "tools-version", TOOLS_VERSION);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump,
+ "chip",
+ s_chip_defs[dev_data->chip_id].name);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump,
+ "platform",
+ s_platform_defs[dev_data->platform_id].
+ name);
+ offset +=
+ qed_dump_num_param(dump_buf + offset, dump, "pci-func",
+ p_hwfn->abs_pf_id);
+ return offset;
+}
+
+/* Writes the last section to the specified buffer at the given offset.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
+{
+ u32 start_offset = offset, crc = ~0;
+
+ /* Dump CRC section header */
+ offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
+
+ /* Calculate CRC32 and add it to the dword following the "last" section.
+ */
+ if (dump)
+ *(dump_buf + offset) = ~crc32(crc, (u8 *)dump_buf,
+ DWORDS_TO_BYTES(offset));
+ offset++;
+ return offset - start_offset;
+}
+
+/* Update blocks reset state */
+static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
+ u32 i;
+
+ /* Read reset registers */
+ for (i = 0; i < MAX_DBG_RESET_REGS; i++)
+ if (s_reset_regs_defs[i].exists[dev_data->chip_id])
+ reg_val[i] = qed_rd(p_hwfn,
+ p_ptt, s_reset_regs_defs[i].addr);
+
+ /* Check if blocks are in reset */
+ for (i = 0; i < MAX_BLOCK_ID; i++)
+ dev_data->block_in_reset[i] =
+ s_block_defs[i]->has_reset_bit &&
+ !(reg_val[s_block_defs[i]->reset_reg] &
+ BIT(s_block_defs[i]->reset_bit_offset));
+}
+
+/* Enable / disable the Debug block */
+static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool enable)
+{
+ qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
+}
+
+/* Resets the Debug block */
+static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
+
+ dbg_reset_reg_addr =
+ s_reset_regs_defs[s_block_defs[BLOCK_DBG]->reset_reg].addr;
+ old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
+ new_reset_reg_val = old_reset_reg_val &
+ ~BIT(s_block_defs[BLOCK_DBG]->reset_bit_offset);
+
+ qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
+ qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
+}
+
+static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum dbg_bus_frame_modes mode)
+{
+ qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
+}
+
+/* Enable / disable Debug Bus clients according to the specified mask.
+ * (1 = enable, 0 = disable)
+ */
+static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 client_mask)
+{
+ qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
+}
+
+static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
+{
+ const u32 *ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
+ bool arg1, arg2;
+
+ switch (tree_val) {
+ case INIT_MODE_OP_NOT:
+ return !qed_is_mode_match(p_hwfn, modes_buf_offset);
+ case INIT_MODE_OP_OR:
+ case INIT_MODE_OP_AND:
+ arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
+ arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
+ return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
+ arg2) : (arg1 && arg2);
+ default:
+ return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
+ }
+}
+
+/* Returns the value of the specified GRC param */
+static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ return dev_data->grc.param_val[grc_param];
+}
+
+/* Clear all GRC params */
+static void qed_dbg_grc_clear_params(struct qed_hwfn *p_hwfn)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 i;
+
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+ dev_data->grc.param_set_by_user[i] = 0;
+}
+
+/* Assign default GRC param values */
+static void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 i;
+
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+ if (!dev_data->grc.param_set_by_user[i])
+ dev_data->grc.param_val[i] =
+ s_grc_param_defs[i].default_val[dev_data->chip_id];
+}
+
+/* Returns true if the specified entity (indicated by GRC param) should be
+ * included in the dump, false otherwise.
+ */
+static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param)
+{
+ return qed_grc_get_param(p_hwfn, grc_param) > 0;
+}
+
+/* Returns true of the specified Storm should be included in the dump, false
+ * otherwise.
+ */
+static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
+ enum dbg_storms storm)
+{
+ return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
+}
+
+/* Returns true if the specified memory should be included in the dump, false
+ * otherwise.
+ */
+static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
+ enum block_id block_id, u8 mem_group_id)
+{
+ u8 i;
+
+ /* Check Storm match */
+ if (s_block_defs[block_id]->associated_to_storm &&
+ !qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)s_block_defs[block_id]->storm_id))
+ return false;
+
+ for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
+ if (mem_group_id == s_big_ram_defs[i].mem_group_id ||
+ mem_group_id == s_big_ram_defs[i].ram_mem_group_id)
+ return qed_grc_is_included(p_hwfn,
+ s_big_ram_defs[i].grc_param);
+ if (mem_group_id == MEM_GROUP_PXP_ILT || mem_group_id ==
+ MEM_GROUP_PXP_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
+ if (mem_group_id == MEM_GROUP_RAM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
+ if (mem_group_id == MEM_GROUP_PBUF)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
+ if (mem_group_id == MEM_GROUP_CAU_MEM ||
+ mem_group_id == MEM_GROUP_CAU_SB ||
+ mem_group_id == MEM_GROUP_CAU_PI)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
+ if (mem_group_id == MEM_GROUP_QM_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
+ if (mem_group_id == MEM_GROUP_CONN_CFC_MEM ||
+ mem_group_id == MEM_GROUP_TASK_CFC_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
+ if (mem_group_id == MEM_GROUP_IGU_MEM || mem_group_id ==
+ MEM_GROUP_IGU_MSIX)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
+ if (mem_group_id == MEM_GROUP_MULD_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
+ if (mem_group_id == MEM_GROUP_PRS_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
+ if (mem_group_id == MEM_GROUP_DMAE_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
+ if (mem_group_id == MEM_GROUP_TM_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
+ if (mem_group_id == MEM_GROUP_SDM_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
+ if (mem_group_id == MEM_GROUP_TDIF_CTX || mem_group_id ==
+ MEM_GROUP_RDIF_CTX)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
+ if (mem_group_id == MEM_GROUP_CM_MEM)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
+ if (mem_group_id == MEM_GROUP_IOR)
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
+
+ return true;
+}
+
+/* Stalls all Storms */
+static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool stall)
+{
+ u8 reg_val = stall ? 1 : 0;
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id)) {
+ u32 reg_addr =
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_STALL_0;
+
+ qed_wr(p_hwfn, p_ptt, reg_addr, reg_val);
+ }
+ }
+
+ msleep(STALL_DELAY_MS);
+}
+
+/* Takes all blocks out of reset */
+static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
+ u32 i;
+
+ /* Fill reset regs values */
+ for (i = 0; i < MAX_BLOCK_ID; i++)
+ if (s_block_defs[i]->has_reset_bit && s_block_defs[i]->unreset)
+ reg_val[s_block_defs[i]->reset_reg] |=
+ BIT(s_block_defs[i]->reset_bit_offset);
+
+ /* Write reset registers */
+ for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
+ if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
+ reg_val[i] |= s_reset_regs_defs[i].unreset_val;
+ if (reg_val[i])
+ qed_wr(p_hwfn,
+ p_ptt,
+ s_reset_regs_defs[i].addr +
+ RESET_REG_UNRESET_OFFSET, reg_val[i]);
+ }
+ }
+}
+
+/* Returns the attention name offsets of the specified block */
+static const struct dbg_attn_block_type_data *
+qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
+{
+ const struct dbg_attn_block *base_attn_block_arr =
+ (const struct dbg_attn_block *)
+ s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
+
+ return &base_attn_block_arr[block_id].per_type_data[attn_type];
+}
+
+/* Returns the attention registers of the specified block */
+static const struct dbg_attn_reg *
+qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
+ u8 *num_attn_regs)
+{
+ const struct dbg_attn_block_type_data *block_type_data =
+ qed_get_block_attn_data(block_id, attn_type);
+
+ *num_attn_regs = block_type_data->num_regs;
+ return &((const struct dbg_attn_reg *)
+ s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
+ regs_offset];
+}
+
+/* For each block, clear the status of all parities */
+static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 reg_idx, num_attn_regs;
+ u32 block_id;
+
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ const struct dbg_attn_reg *attn_reg_arr;
+
+ if (dev_data->block_in_reset[block_id])
+ continue;
+
+ attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
+ ATTN_TYPE_PARITY,
+ &num_attn_regs);
+ for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
+ const struct dbg_attn_reg *reg_data =
+ &attn_reg_arr[reg_idx];
+
+ /* Check mode */
+ bool eval_mode = GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ u16 modes_buf_offset =
+ GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+
+ if (!eval_mode ||
+ qed_is_mode_match(p_hwfn, &modes_buf_offset))
+ /* Mode match - read parity status read-clear
+ * register.
+ */
+ qed_rd(p_hwfn, p_ptt,
+ DWORDS_TO_BYTES(reg_data->
+ sts_clr_address));
+ }
+ }
+}
+
+/* Dumps GRC registers section header. Returns the dumped size in dwords.
+ * The following parameters are dumped:
+ * - 'count' = num_dumped_entries
+ * - 'split' = split_type
+ * - 'id'i = split_id (dumped only if split_id >= 0)
+ * - 'param_name' = param_val (user param, dumped only if param_name != NULL and
+ * param_val != NULL)
+ */
+static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
+ bool dump,
+ u32 num_reg_entries,
+ const char *split_type,
+ int split_id,
+ const char *param_name, const char *param_val)
+{
+ u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
+ u32 offset = 0;
+
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "grc_regs", num_params);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "count", num_reg_entries);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "split", split_type);
+ if (split_id >= 0)
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "id", split_id);
+ if (param_name && param_val)
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, param_name, param_val);
+ return offset;
+}
+
+/* Dumps GRC register/memory. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf,
+ bool dump, u32 addr, u32 len)
+{
+ u32 offset = 0, i;
+
+ if (dump) {
+ *(dump_buf + offset++) = addr | (len << REG_DUMP_LEN_SHIFT);
+ for (i = 0; i < len; i++, addr++, offset++)
+ *(dump_buf + offset) = qed_rd(p_hwfn,
+ p_ptt,
+ DWORDS_TO_BYTES(addr));
+ } else {
+ offset += len + 1;
+ }
+
+ return offset;
+}
+
+/* Dumps GRC registers entries. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct dbg_array input_regs_arr,
+ u32 *dump_buf,
+ bool dump,
+ bool block_enable[MAX_BLOCK_ID],
+ u32 *num_dumped_reg_entries)
+{
+ u32 i, offset = 0, input_offset = 0;
+ bool mode_match = true;
+
+ *num_dumped_reg_entries = 0;
+ while (input_offset < input_regs_arr.size_in_dwords) {
+ const struct dbg_dump_cond_hdr *cond_hdr =
+ (const struct dbg_dump_cond_hdr *)
+ &input_regs_arr.ptr[input_offset++];
+ bool eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+
+ /* Check mode/block */
+ if (eval_mode) {
+ u16 modes_buf_offset =
+ GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ mode_match = qed_is_mode_match(p_hwfn,
+ &modes_buf_offset);
+ }
+
+ if (mode_match && block_enable[cond_hdr->block_id]) {
+ for (i = 0; i < cond_hdr->data_size;
+ i++, input_offset++) {
+ const struct dbg_dump_reg *reg =
+ (const struct dbg_dump_reg *)
+ &input_regs_arr.ptr[input_offset];
+
+ offset +=
+ qed_grc_dump_reg_entry(p_hwfn, p_ptt,
+ dump_buf + offset, dump,
+ GET_FIELD(reg->data,
+ DBG_DUMP_REG_ADDRESS),
+ GET_FIELD(reg->data,
+ DBG_DUMP_REG_LENGTH));
+ (*num_dumped_reg_entries)++;
+ }
+ } else {
+ input_offset += cond_hdr->data_size;
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps GRC registers entries. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct dbg_array input_regs_arr,
+ u32 *dump_buf,
+ bool dump,
+ bool block_enable[MAX_BLOCK_ID],
+ const char *split_type_name,
+ u32 split_id,
+ const char *param_name,
+ const char *param_val)
+{
+ u32 num_dumped_reg_entries, offset;
+
+ /* Calculate register dump header size (and skip it for now) */
+ offset = qed_grc_dump_regs_hdr(dump_buf,
+ false,
+ 0,
+ split_type_name,
+ split_id, param_name, param_val);
+
+ /* Dump registers */
+ offset += qed_grc_dump_regs_entries(p_hwfn,
+ p_ptt,
+ input_regs_arr,
+ dump_buf + offset,
+ dump,
+ block_enable,
+ &num_dumped_reg_entries);
+
+ /* Write register dump header */
+ if (dump && num_dumped_reg_entries > 0)
+ qed_grc_dump_regs_hdr(dump_buf,
+ dump,
+ num_dumped_reg_entries,
+ split_type_name,
+ split_id, param_name, param_val);
+
+ return num_dumped_reg_entries > 0 ? offset : 0;
+}
+
+/* Dumps registers according to the input registers array.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ bool block_enable[MAX_BLOCK_ID],
+ const char *param_name, const char *param_val)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0, input_offset = 0;
+ u8 port_id, pf_id;
+
+ if (dump)
+ DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
+ while (input_offset <
+ s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
+ const struct dbg_dump_split_hdr *split_hdr =
+ (const struct dbg_dump_split_hdr *)
+ &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
+ u8 split_type_id = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+ u32 split_data_size = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+ struct dbg_array curr_input_regs_arr = {
+ &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset],
+ split_data_size};
+
+ switch (split_type_id) {
+ case SPLIT_TYPE_NONE:
+ case SPLIT_TYPE_VF:
+ offset += qed_grc_dump_split_data(p_hwfn,
+ p_ptt,
+ curr_input_regs_arr,
+ dump_buf + offset,
+ dump,
+ block_enable,
+ "eng",
+ (u32)(-1),
+ param_name,
+ param_val);
+ break;
+ case SPLIT_TYPE_PORT:
+ for (port_id = 0;
+ port_id <
+ s_chip_defs[dev_data->chip_id].
+ per_platform[dev_data->platform_id].num_ports;
+ port_id++) {
+ if (dump)
+ qed_port_pretend(p_hwfn, p_ptt,
+ port_id);
+ offset +=
+ qed_grc_dump_split_data(p_hwfn, p_ptt,
+ curr_input_regs_arr,
+ dump_buf + offset,
+ dump, block_enable,
+ "port", port_id,
+ param_name,
+ param_val);
+ }
+ break;
+ case SPLIT_TYPE_PF:
+ case SPLIT_TYPE_PORT_PF:
+ for (pf_id = 0;
+ pf_id <
+ s_chip_defs[dev_data->chip_id].
+ per_platform[dev_data->platform_id].num_pfs;
+ pf_id++) {
+ if (dump)
+ qed_fid_pretend(p_hwfn, p_ptt, pf_id);
+ offset += qed_grc_dump_split_data(p_hwfn,
+ p_ptt,
+ curr_input_regs_arr,
+ dump_buf + offset,
+ dump, block_enable,
+ "pf", pf_id, param_name,
+ param_val);
+ }
+ break;
+ default:
+ break;
+ }
+
+ input_offset += split_data_size;
+ }
+
+ /* Pretend to original PF */
+ if (dump)
+ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ return offset;
+}
+
+/* Dump reset registers. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 i, offset = 0, num_regs = 0;
+
+ /* Calculate header size */
+ offset += qed_grc_dump_regs_hdr(dump_buf,
+ false, 0, "eng", -1, NULL, NULL);
+
+ /* Write reset registers */
+ for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
+ if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ BYTES_TO_DWORDS
+ (s_reset_regs_defs
+ [i].addr), 1);
+ num_regs++;
+ }
+ }
+
+ /* Write header */
+ if (dump)
+ qed_grc_dump_regs_hdr(dump_buf,
+ true, num_regs, "eng", -1, NULL, NULL);
+ return offset;
+}
+
+/* Dump registers that are modified during GRC Dump and therefore must be dumped
+ * first. Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0, num_reg_entries = 0, block_id;
+ u8 storm_id, reg_idx, num_attn_regs;
+
+ /* Calculate header size */
+ offset += qed_grc_dump_regs_hdr(dump_buf,
+ false, 0, "eng", -1, NULL, NULL);
+
+ /* Write parity registers */
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ const struct dbg_attn_reg *attn_reg_arr;
+
+ if (dev_data->block_in_reset[block_id] && dump)
+ continue;
+
+ attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
+ ATTN_TYPE_PARITY,
+ &num_attn_regs);
+ for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
+ const struct dbg_attn_reg *reg_data =
+ &attn_reg_arr[reg_idx];
+ u16 modes_buf_offset;
+ bool eval_mode;
+
+ /* Check mode */
+ eval_mode = GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ modes_buf_offset =
+ GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ if (!eval_mode ||
+ qed_is_mode_match(p_hwfn, &modes_buf_offset)) {
+ /* Mode match - read and dump registers */
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ reg_data->mask_address,
+ 1);
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ GET_FIELD(reg_data->data,
+ DBG_ATTN_REG_STS_ADDRESS),
+ 1);
+ num_reg_entries += 2;
+ }
+ }
+ }
+
+ /* Write storm stall status registers */
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] &&
+ dump)
+ continue;
+
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ BYTES_TO_DWORDS(s_storm_defs[storm_id].
+ sem_fast_mem_addr +
+ SEM_FAST_REG_STALLED),
+ 1);
+ num_reg_entries++;
+ }
+
+ /* Write header */
+ if (dump)
+ qed_grc_dump_regs_hdr(dump_buf,
+ true,
+ num_reg_entries, "eng", -1, NULL, NULL);
+ return offset;
+}
+
+/* Dumps a GRC memory header (section and params).
+ * The following parameters are dumped:
+ * name - name is dumped only if it's not NULL.
+ * addr - byte_addr is dumped only if name is NULL.
+ * len - dword_len is always dumped.
+ * width - bit_width is dumped if it's not zero.
+ * packed - packed=1 is dumped if it's not false.
+ * mem_group - mem_group is always dumped.
+ * is_storm - true only if the memory is related to a Storm.
+ * storm_letter - storm letter (valid only if is_storm is true).
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ bool dump,
+ const char *name,
+ u32 byte_addr,
+ u32 dword_len,
+ u32 bit_width,
+ bool packed,
+ const char *mem_group,
+ bool is_storm, char storm_letter)
+{
+ u8 num_params = 3;
+ u32 offset = 0;
+ char buf[64];
+
+ if (!dword_len)
+ DP_NOTICE(p_hwfn,
+ "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
+ if (bit_width)
+ num_params++;
+ if (packed)
+ num_params++;
+
+ /* Dump section header */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "grc_mem", num_params);
+ if (name) {
+ /* Dump name */
+ if (is_storm) {
+ strcpy(buf, "?STORM_");
+ buf[0] = storm_letter;
+ strcpy(buf + strlen(buf), name);
+ } else {
+ strcpy(buf, name);
+ }
+
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "name", buf);
+ if (dump)
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Dumping %d registers from %s...\n",
+ dword_len, buf);
+ } else {
+ /* Dump address */
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "addr", byte_addr);
+ if (dump && dword_len > 64)
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Dumping %d registers from address 0x%x...\n",
+ dword_len, byte_addr);
+ }
+
+ /* Dump len */
+ offset += qed_dump_num_param(dump_buf + offset, dump, "len", dword_len);
+
+ /* Dump bit width */
+ if (bit_width)
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "width", bit_width);
+
+ /* Dump packed */
+ if (packed)
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "packed", 1);
+
+ /* Dump reg type */
+ if (is_storm) {
+ strcpy(buf, "?STORM_");
+ buf[0] = storm_letter;
+ strcpy(buf + strlen(buf), mem_group);
+ } else {
+ strcpy(buf, mem_group);
+ }
+
+ offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
+ return offset;
+}
+
+/* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ const char *name,
+ u32 byte_addr,
+ u32 dword_len,
+ u32 bit_width,
+ bool packed,
+ const char *mem_group,
+ bool is_storm, char storm_letter)
+{
+ u32 offset = 0;
+
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ name,
+ byte_addr,
+ dword_len,
+ bit_width,
+ packed,
+ mem_group, is_storm, storm_letter);
+ if (dump) {
+ u32 i;
+
+ for (i = 0; i < dword_len;
+ i++, byte_addr += BYTES_IN_DWORD, offset++)
+ *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
+ } else {
+ offset += dword_len;
+ }
+
+ return offset;
+}
+
+/* Dumps GRC memories entries. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct dbg_array input_mems_arr,
+ u32 *dump_buf, bool dump)
+{
+ u32 i, offset = 0, input_offset = 0;
+ bool mode_match = true;
+
+ while (input_offset < input_mems_arr.size_in_dwords) {
+ const struct dbg_dump_cond_hdr *cond_hdr;
+ u32 num_entries;
+ bool eval_mode;
+
+ cond_hdr = (const struct dbg_dump_cond_hdr *)
+ &input_mems_arr.ptr[input_offset++];
+ eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+
+ /* Check required mode */
+ if (eval_mode) {
+ u16 modes_buf_offset =
+ GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+
+ mode_match = qed_is_mode_match(p_hwfn,
+ &modes_buf_offset);
+ }
+
+ if (!mode_match) {
+ input_offset += cond_hdr->data_size;
+ continue;
+ }
+
+ num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
+ for (i = 0; i < num_entries;
+ i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
+ const struct dbg_dump_mem *mem =
+ (const struct dbg_dump_mem *)
+ &input_mems_arr.ptr[input_offset];
+ u8 mem_group_id;
+
+ mem_group_id = GET_FIELD(mem->dword0,
+ DBG_DUMP_MEM_MEM_GROUP_ID);
+ if (mem_group_id >= MEM_GROUPS_NUM) {
+ DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
+ return 0;
+ }
+
+ if (qed_grc_is_mem_included(p_hwfn,
+ (enum block_id)cond_hdr->block_id,
+ mem_group_id)) {
+ u32 mem_byte_addr =
+ DWORDS_TO_BYTES(GET_FIELD(mem->dword0,
+ DBG_DUMP_MEM_ADDRESS));
+ u32 mem_len = GET_FIELD(mem->dword1,
+ DBG_DUMP_MEM_LENGTH);
+ char storm_letter = 'a';
+ bool is_storm = false;
+
+ /* Update memory length for CCFC/TCFC memories
+ * according to number of LCIDs/LTIDs.
+ */
+ if (mem_group_id == MEM_GROUP_CONN_CFC_MEM)
+ mem_len = qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LCIDS)
+ * (mem_len / MAX_LCIDS);
+ else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM)
+ mem_len = qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LTIDS)
+ * (mem_len / MAX_LTIDS);
+
+ /* If memory is associated with Storm, update
+ * Storm details.
+ */
+ if (s_block_defs[cond_hdr->block_id]->
+ associated_to_storm) {
+ is_storm = true;
+ storm_letter =
+ s_storm_defs[s_block_defs[
+ cond_hdr->block_id]->
+ storm_id].letter;
+ }
+
+ /* Dump memory */
+ offset += qed_grc_dump_mem(p_hwfn, p_ptt,
+ dump_buf + offset, dump, NULL,
+ mem_byte_addr, mem_len, 0,
+ false,
+ s_mem_group_names[mem_group_id],
+ is_storm, storm_letter);
+ }
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps GRC memories according to the input array dump_mem.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ u32 offset = 0, input_offset = 0;
+
+ while (input_offset <
+ s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
+ const struct dbg_dump_split_hdr *split_hdr =
+ (const struct dbg_dump_split_hdr *)
+ &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
+ u8 split_type_id = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+ u32 split_data_size = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+ struct dbg_array curr_input_mems_arr = {
+ &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset],
+ split_data_size};
+
+ switch (split_type_id) {
+ case SPLIT_TYPE_NONE:
+ offset += qed_grc_dump_mem_entries(p_hwfn,
+ p_ptt,
+ curr_input_mems_arr,
+ dump_buf + offset,
+ dump);
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Dumping split memories is currently not supported\n");
+ break;
+ }
+
+ input_offset += split_data_size;
+ }
+
+ return offset;
+}
+
+/* Dumps GRC context data for the specified Storm.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ const char *name,
+ u32 num_lids,
+ u32 lid_size,
+ u32 rd_reg_addr,
+ u8 storm_id)
+{
+ u32 i, lid, total_size;
+ u32 offset = 0;
+
+ if (!lid_size)
+ return 0;
+ lid_size *= BYTES_IN_DWORD;
+ total_size = num_lids * lid_size;
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ name,
+ 0,
+ total_size,
+ lid_size * 32,
+ false,
+ name,
+ true, s_storm_defs[storm_id].letter);
+
+ /* Dump context data */
+ if (dump) {
+ for (lid = 0; lid < num_lids; lid++) {
+ for (i = 0; i < lid_size; i++, offset++) {
+ qed_wr(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].cm_ctx_wr_addr,
+ BIT(9) | lid);
+ *(dump_buf + offset) = qed_rd(p_hwfn,
+ p_ptt,
+ rd_reg_addr);
+ }
+ }
+ } else {
+ offset += total_size;
+ }
+
+ return offset;
+}
+
+/* Dumps GRC contexts. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ u32 offset = 0;
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (!qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id))
+ continue;
+
+ /* Dump Conn AG context size */
+ offset +=
+ qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "CONN_AG_CTX",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LCIDS),
+ s_storm_defs[storm_id].
+ cm_conn_ag_ctx_lid_size,
+ s_storm_defs[storm_id].
+ cm_conn_ag_ctx_rd_addr,
+ storm_id);
+
+ /* Dump Conn ST context size */
+ offset +=
+ qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "CONN_ST_CTX",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LCIDS),
+ s_storm_defs[storm_id].
+ cm_conn_st_ctx_lid_size,
+ s_storm_defs[storm_id].
+ cm_conn_st_ctx_rd_addr,
+ storm_id);
+
+ /* Dump Task AG context size */
+ offset +=
+ qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "TASK_AG_CTX",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LTIDS),
+ s_storm_defs[storm_id].
+ cm_task_ag_ctx_lid_size,
+ s_storm_defs[storm_id].
+ cm_task_ag_ctx_rd_addr,
+ storm_id);
+
+ /* Dump Task ST context size */
+ offset +=
+ qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "TASK_ST_CTX",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LTIDS),
+ s_storm_defs[storm_id].
+ cm_task_st_ctx_lid_size,
+ s_storm_defs[storm_id].
+ cm_task_st_ctx_rd_addr,
+ storm_id);
+ }
+
+ return offset;
+}
+
+/* Dumps GRC IORs data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ char buf[10] = "IOR_SET_?";
+ u8 storm_id, set_id;
+ u32 offset = 0;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id)) {
+ for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
+ u32 addr =
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_STORM_REG_FILE +
+ DWORDS_TO_BYTES(IOR_SET_OFFSET(set_id));
+
+ buf[strlen(buf) - 1] = '0' + set_id;
+ offset += qed_grc_dump_mem(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ buf,
+ addr,
+ IORS_PER_SET,
+ 32,
+ false,
+ "ior",
+ true,
+ s_storm_defs
+ [storm_id].letter);
+ }
+ }
+ }
+
+ return offset;
+}
+
+/* Dump VFC CAM. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump, u8 storm_id)
+{
+ u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
+ u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
+ u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
+ u32 offset = 0;
+ u32 row, i;
+
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ "vfc_cam",
+ 0,
+ total_size,
+ 256,
+ false,
+ "vfc_cam",
+ true, s_storm_defs[storm_id].letter);
+ if (dump) {
+ /* Prepare CAM address */
+ SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
+ for (row = 0; row < VFC_CAM_NUM_ROWS;
+ row++, offset += VFC_CAM_RESP_DWORDS) {
+ /* Write VFC CAM command */
+ SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_DATA_WR,
+ cam_cmd, VFC_CAM_CMD_DWORDS);
+
+ /* Write VFC CAM address */
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_ADDR,
+ cam_addr, VFC_CAM_ADDR_DWORDS);
+
+ /* Read VFC CAM read response */
+ ARR_REG_RD(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_DATA_RD,
+ dump_buf + offset, VFC_CAM_RESP_DWORDS);
+ }
+ } else {
+ offset += total_size;
+ }
+
+ return offset;
+}
+
+/* Dump VFC RAM. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u8 storm_id, struct vfc_ram_defs *ram_defs)
+{
+ u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
+ u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
+ u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
+ u32 offset = 0;
+ u32 row, i;
+
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ ram_defs->mem_name,
+ 0,
+ total_size,
+ 256,
+ false,
+ ram_defs->type_name,
+ true, s_storm_defs[storm_id].letter);
+
+ /* Prepare RAM address */
+ SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
+
+ if (!dump)
+ return offset + total_size;
+
+ for (row = ram_defs->base_row;
+ row < ram_defs->base_row + ram_defs->num_rows;
+ row++, offset += VFC_RAM_RESP_DWORDS) {
+ /* Write VFC RAM command */
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_DATA_WR,
+ ram_cmd, VFC_RAM_CMD_DWORDS);
+
+ /* Write VFC RAM address */
+ SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_ADDR,
+ ram_addr, VFC_RAM_ADDR_DWORDS);
+
+ /* Read VFC RAM read response */
+ ARR_REG_RD(p_hwfn,
+ p_ptt,
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_VFC_DATA_RD,
+ dump_buf + offset, VFC_RAM_RESP_DWORDS);
+ }
+
+ return offset;
+}
+
+/* Dumps GRC VFC data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 storm_id, i;
+ u32 offset = 0;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id) &&
+ s_storm_defs[storm_id].has_vfc &&
+ (storm_id != DBG_PSTORM_ID ||
+ dev_data->platform_id == PLATFORM_ASIC)) {
+ /* Read CAM */
+ offset += qed_grc_dump_vfc_cam(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, storm_id);
+
+ /* Read RAM */
+ for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
+ offset += qed_grc_dump_vfc_ram(p_hwfn,
+ p_ptt,
+ dump_buf +
+ offset,
+ dump,
+ storm_id,
+ &s_vfc_ram_defs
+ [i]);
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps GRC RSS data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0;
+ u8 rss_mem_id;
+
+ for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
+ struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id];
+ u32 num_entries = rss_defs->num_entries[dev_data->chip_id];
+ u32 entry_width = rss_defs->entry_width[dev_data->chip_id];
+ u32 total_size = (num_entries * entry_width) / 32;
+ bool packed = (entry_width == 16);
+ u32 addr = rss_defs->addr;
+ u32 i, j;
+
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ rss_defs->mem_name,
+ addr,
+ total_size,
+ entry_width,
+ packed,
+ rss_defs->type_name, false, 0);
+
+ if (!dump) {
+ offset += total_size;
+ continue;
+ }
+
+ /* Dump RSS data */
+ for (i = 0; i < BYTES_TO_DWORDS(total_size); i++, addr++) {
+ qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, addr);
+ for (j = 0; j < BYTES_IN_DWORD; j++, offset++)
+ *(dump_buf + offset) =
+ qed_rd(p_hwfn, p_ptt,
+ RSS_REG_RSS_RAM_DATA +
+ DWORDS_TO_BYTES(j));
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps GRC Big RAM. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump, u8 big_ram_id)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ char mem_name[12] = "???_BIG_RAM";
+ char type_name[8] = "???_RAM";
+ u32 ram_size, total_blocks;
+ u32 offset = 0, i, j;
+
+ total_blocks =
+ s_big_ram_defs[big_ram_id].num_of_blocks[dev_data->chip_id];
+ ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
+
+ strncpy(type_name, s_big_ram_defs[big_ram_id].instance_name,
+ strlen(s_big_ram_defs[big_ram_id].instance_name));
+ strncpy(mem_name, s_big_ram_defs[big_ram_id].instance_name,
+ strlen(s_big_ram_defs[big_ram_id].instance_name));
+
+ /* Dump memory header */
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ mem_name,
+ 0,
+ ram_size,
+ BIG_RAM_BLOCK_SIZE_BYTES * 8,
+ false, type_name, false, 0);
+
+ if (!dump)
+ return offset + ram_size;
+
+ /* Read and dump Big RAM data */
+ for (i = 0; i < total_blocks / 2; i++) {
+ qed_wr(p_hwfn, p_ptt, s_big_ram_defs[big_ram_id].addr_reg_addr,
+ i);
+ for (j = 0; j < 2 * BIG_RAM_BLOCK_SIZE_DWORDS; j++, offset++)
+ *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt,
+ s_big_ram_defs[big_ram_id].
+ data_reg_addr +
+ DWORDS_TO_BYTES(j));
+ }
+
+ return offset;
+}
+
+static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ bool block_enable[MAX_BLOCK_ID] = { 0 };
+ bool halted = false;
+ u32 offset = 0;
+
+ /* Halt MCP */
+ if (dump) {
+ halted = !qed_mcp_halt(p_hwfn, p_ptt);
+ if (!halted)
+ DP_NOTICE(p_hwfn, "MCP halt failed!\n");
+ }
+
+ /* Dump MCP scratchpad */
+ offset += qed_grc_dump_mem(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ NULL,
+ MCP_REG_SCRATCH,
+ MCP_REG_SCRATCH_SIZE,
+ 0, false, "MCP", false, 0);
+
+ /* Dump MCP cpu_reg_file */
+ offset += qed_grc_dump_mem(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ NULL,
+ MCP_REG_CPU_REG_FILE,
+ MCP_REG_CPU_REG_FILE_SIZE,
+ 0, false, "MCP", false, 0);
+
+ /* Dump MCP registers */
+ block_enable[BLOCK_MCP] = true;
+ offset += qed_grc_dump_registers(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, block_enable, "block", "MCP");
+
+ /* Dump required non-MCP registers */
+ offset += qed_grc_dump_regs_hdr(dump_buf + offset,
+ dump, 1, "eng", -1, "block", "MCP");
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ BYTES_TO_DWORDS
+ (MISC_REG_SHARED_MEM_ADDR), 1);
+
+ /* Release MCP */
+ if (halted && qed_mcp_resume(p_hwfn, p_ptt))
+ DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
+ return offset;
+}
+
+/* Dumps the tbus indirect memory for all PHYs. */
+static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
+ char mem_name[32];
+ u8 phy_id;
+
+ for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
+ struct phy_defs *phy_defs = &s_phy_defs[phy_id];
+ int printed_chars;
+
+ printed_chars = snprintf(mem_name, sizeof(mem_name), "tbus_%s",
+ phy_defs->phy_name);
+ if (printed_chars < 0 || printed_chars >= sizeof(mem_name))
+ DP_NOTICE(p_hwfn,
+ "Unexpected debug error: invalid PHY memory name\n");
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ mem_name,
+ 0,
+ PHY_DUMP_SIZE_DWORDS,
+ 16, true, mem_name, false, 0);
+ if (dump) {
+ u32 addr_lo_addr = phy_defs->base_addr +
+ phy_defs->tbus_addr_lo_addr;
+ u32 addr_hi_addr = phy_defs->base_addr +
+ phy_defs->tbus_addr_hi_addr;
+ u32 data_lo_addr = phy_defs->base_addr +
+ phy_defs->tbus_data_lo_addr;
+ u32 data_hi_addr = phy_defs->base_addr +
+ phy_defs->tbus_data_hi_addr;
+ u8 *bytes_buf = (u8 *)(dump_buf + offset);
+
+ for (tbus_hi_offset = 0;
+ tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
+ tbus_hi_offset++) {
+ qed_wr(p_hwfn,
+ p_ptt, addr_hi_addr, tbus_hi_offset);
+ for (tbus_lo_offset = 0; tbus_lo_offset < 256;
+ tbus_lo_offset++) {
+ qed_wr(p_hwfn,
+ p_ptt,
+ addr_lo_addr, tbus_lo_offset);
+ *(bytes_buf++) =
+ (u8)qed_rd(p_hwfn, p_ptt,
+ data_lo_addr);
+ *(bytes_buf++) =
+ (u8)qed_rd(p_hwfn, p_ptt,
+ data_hi_addr);
+ }
+ }
+ }
+
+ offset += PHY_DUMP_SIZE_DWORDS;
+ }
+
+ return offset;
+}
+
+static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum block_id block_id,
+ u8 line_id,
+ u8 cycle_en,
+ u8 right_shift, u8 force_valid, u8 force_frame)
+{
+ struct block_defs *p_block_defs = s_block_defs[block_id];
+
+ qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_select_addr, line_id);
+ qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_cycle_enable_addr, cycle_en);
+ qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_shift_addr, right_shift);
+ qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_valid_addr, force_valid);
+ qed_wr(p_hwfn, p_ptt, p_block_defs->dbg_force_frame_addr, force_frame);
+}
+
+/* Dumps Static Debug data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS;
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0, block_id, line_id, addr, i;
+ struct block_defs *p_block_defs;
+
+ if (dump) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG, "Dumping static debug data...\n");
+
+ /* Disable all blocks debug output */
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ p_block_defs = s_block_defs[block_id];
+
+ if (p_block_defs->has_dbg_bus[dev_data->chip_id])
+ qed_wr(p_hwfn, p_ptt,
+ p_block_defs->dbg_cycle_enable_addr, 0);
+ }
+
+ qed_bus_reset_dbg_block(p_hwfn, p_ptt);
+ qed_bus_set_framing_mode(p_hwfn,
+ p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
+ qed_wr(p_hwfn,
+ p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
+ qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
+ qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
+ }
+
+ /* Dump all static debug lines for each relevant block */
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ p_block_defs = s_block_defs[block_id];
+
+ if (!p_block_defs->has_dbg_bus[dev_data->chip_id])
+ continue;
+
+ /* Dump static section params */
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ p_block_defs->name, 0,
+ block_dwords, 32, false,
+ "STATIC", false, 0);
+
+ if (dump && !dev_data->block_in_reset[block_id]) {
+ u8 dbg_client_id =
+ p_block_defs->dbg_client_id[dev_data->chip_id];
+
+ /* Enable block's client */
+ qed_bus_enable_clients(p_hwfn, p_ptt,
+ BIT(dbg_client_id));
+
+ for (line_id = 0; line_id < NUM_DBG_BUS_LINES;
+ line_id++) {
+ /* Configure debug line ID */
+ qed_config_dbg_line(p_hwfn,
+ p_ptt,
+ (enum block_id)block_id,
+ (u8)line_id,
+ 0xf, 0, 0, 0);
+
+ /* Read debug line info */
+ for (i = 0, addr = DBG_REG_CALENDAR_OUT_DATA;
+ i < STATIC_DEBUG_LINE_DWORDS;
+ i++, offset++, addr += BYTES_IN_DWORD)
+ dump_buf[offset] = qed_rd(p_hwfn, p_ptt,
+ addr);
+ }
+
+ /* Disable block's client and debug output */
+ qed_bus_enable_clients(p_hwfn, p_ptt, 0);
+ qed_wr(p_hwfn, p_ptt,
+ p_block_defs->dbg_cycle_enable_addr, 0);
+ } else {
+ /* All lines are invalid - dump zeros */
+ if (dump)
+ memset(dump_buf + offset, 0,
+ DWORDS_TO_BYTES(block_dwords));
+ offset += block_dwords;
+ }
+ }
+
+ if (dump) {
+ qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
+ qed_bus_enable_clients(p_hwfn, p_ptt, 0);
+ }
+
+ return offset;
+}
+
+/* Performs GRC Dump to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ bool parities_masked = false;
+ u8 i, port_mode = 0;
+ u32 offset = 0;
+
+ /* Check if emulation platform */
+ *num_dumped_dwords = 0;
+
+ /* Fill GRC parameters that were not set by the user with their default
+ * value.
+ */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ /* Find port mode */
+ if (dump) {
+ switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
+ case 0:
+ port_mode = 1;
+ break;
+ case 1:
+ port_mode = 2;
+ break;
+ case 2:
+ port_mode = 4;
+ break;
+ }
+ }
+
+ /* Update reset state */
+ if (dump)
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 4);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "grc-dump");
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "num-lcids",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LCIDS));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "num-ltids",
+ qed_grc_get_param(p_hwfn,
+ DBG_GRC_PARAM_NUM_LTIDS));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "num-ports", port_mode);
+
+ /* Dump reset registers (dumped before taking blocks out of reset ) */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
+ offset += qed_grc_dump_reset_regs(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
+
+ /* Take all blocks out of reset (using reset registers) */
+ if (dump) {
+ qed_grc_unreset_blocks(p_hwfn, p_ptt);
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ }
+
+ /* Disable all parities using MFW command */
+ if (dump) {
+ parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
+ if (!parities_masked) {
+ if (qed_grc_get_param
+ (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
+ return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
+ else
+ DP_NOTICE(p_hwfn,
+ "Failed to mask parities using MFW\n");
+ }
+ }
+
+ /* Dump modified registers (dumped before modifying them) */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
+ offset += qed_grc_dump_modified_regs(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
+
+ /* Stall storms */
+ if (dump &&
+ (qed_grc_is_included(p_hwfn,
+ DBG_GRC_PARAM_DUMP_IOR) ||
+ qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
+ qed_grc_stall_storms(p_hwfn, p_ptt, true);
+
+ /* Dump all regs */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
+ /* Dump all blocks except MCP */
+ bool block_enable[MAX_BLOCK_ID];
+
+ for (i = 0; i < MAX_BLOCK_ID; i++)
+ block_enable[i] = true;
+ block_enable[BLOCK_MCP] = false;
+ offset += qed_grc_dump_registers(p_hwfn,
+ p_ptt,
+ dump_buf +
+ offset,
+ dump,
+ block_enable, NULL, NULL);
+ }
+
+ /* Dump memories */
+ offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
+
+ /* Dump MCP */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
+ offset += qed_grc_dump_mcp(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump context */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
+ offset += qed_grc_dump_ctx(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump RSS memories */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
+ offset += qed_grc_dump_rss(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump Big RAM */
+ for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
+ if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
+ offset += qed_grc_dump_big_ram(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, i);
+
+ /* Dump IORs */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
+ offset += qed_grc_dump_iors(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump VFC */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
+ offset += qed_grc_dump_vfc(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump PHY tbus */
+ if (qed_grc_is_included(p_hwfn,
+ DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
+ CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
+ offset += qed_grc_dump_phy(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump static debug data */
+ if (qed_grc_is_included(p_hwfn,
+ DBG_GRC_PARAM_DUMP_STATIC) &&
+ dev_data->bus.state == DBG_BUS_STATE_IDLE)
+ offset += qed_grc_dump_static_debug(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
+
+ /* Dump last section */
+ offset += qed_dump_last_section(dump_buf, offset, dump);
+ if (dump) {
+ /* Unstall storms */
+ if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
+ qed_grc_stall_storms(p_hwfn, p_ptt, false);
+
+ /* Clear parity status */
+ qed_grc_clear_all_prty(p_hwfn, p_ptt);
+
+ /* Enable all parities using MFW command */
+ if (parities_masked)
+ qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
+ }
+
+ *num_dumped_dwords = offset;
+
+ return DBG_STATUS_OK;
+}
+
+/* Writes the specified failing Idle Check rule to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *
+ dump_buf,
+ bool dump,
+ u16 rule_id,
+ const struct dbg_idle_chk_rule *rule,
+ u16 fail_entry_id, u32 *cond_reg_values)
+{
+ const union dbg_idle_chk_reg *regs = &((const union dbg_idle_chk_reg *)
+ s_dbg_arrays
+ [BIN_BUF_DBG_IDLE_CHK_REGS].
+ ptr)[rule->reg_offset];
+ const struct dbg_idle_chk_cond_reg *cond_regs = &regs[0].cond_reg;
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ struct dbg_idle_chk_result_hdr *hdr =
+ (struct dbg_idle_chk_result_hdr *)dump_buf;
+ const struct dbg_idle_chk_info_reg *info_regs =
+ &regs[rule->num_cond_regs].info_reg;
+ u32 next_reg_offset = 0, i, offset = 0;
+ u8 reg_id;
+
+ /* Dump rule data */
+ if (dump) {
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->rule_id = rule_id;
+ hdr->mem_entry_id = fail_entry_id;
+ hdr->severity = rule->severity;
+ hdr->num_dumped_cond_regs = rule->num_cond_regs;
+ }
+
+ offset += IDLE_CHK_RESULT_HDR_DWORDS;
+
+ /* Dump condition register values */
+ for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
+ const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
+
+ /* Write register header */
+ if (dump) {
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr =
+ (struct dbg_idle_chk_result_reg_hdr *)(dump_buf
+ + offset);
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
+ memset(reg_hdr, 0,
+ sizeof(struct dbg_idle_chk_result_reg_hdr));
+ reg_hdr->start_entry = reg->start_entry;
+ reg_hdr->size = reg->entry_size;
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
+ reg->num_entries > 1 || reg->start_entry > 0
+ ? 1 : 0);
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
+
+ /* Write register values */
+ for (i = 0; i < reg_hdr->size;
+ i++, next_reg_offset++, offset++)
+ dump_buf[offset] =
+ cond_reg_values[next_reg_offset];
+ } else {
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
+ reg->entry_size;
+ }
+ }
+
+ /* Dump info register values */
+ for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
+ const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
+ u32 block_id;
+
+ if (!dump) {
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
+ continue;
+ }
+
+ /* Check if register's block is in reset */
+ block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
+ if (block_id >= MAX_BLOCK_ID) {
+ DP_NOTICE(p_hwfn, "Invalid block_id\n");
+ return 0;
+ }
+
+ if (!dev_data->block_in_reset[block_id]) {
+ bool eval_mode = GET_FIELD(reg->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ bool mode_match = true;
+
+ /* Check mode */
+ if (eval_mode) {
+ u16 modes_buf_offset =
+ GET_FIELD(reg->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ mode_match =
+ qed_is_mode_match(p_hwfn,
+ &modes_buf_offset);
+ }
+
+ if (mode_match) {
+ u32 grc_addr =
+ DWORDS_TO_BYTES(GET_FIELD(reg->data,
+ DBG_IDLE_CHK_INFO_REG_ADDRESS));
+
+ /* Write register header */
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr =
+ (struct dbg_idle_chk_result_reg_hdr *)
+ (dump_buf + offset);
+
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
+ hdr->num_dumped_info_regs++;
+ memset(reg_hdr, 0, sizeof(*reg_hdr));
+ reg_hdr->size = reg->size;
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
+ rule->num_cond_regs + reg_id);
+
+ /* Write register values */
+ for (i = 0; i < reg->size;
+ i++, offset++, grc_addr += 4)
+ dump_buf[offset] =
+ qed_rd(p_hwfn, p_ptt, grc_addr);
+ }
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps idle check rule entries. Returns the dumped size in dwords. */
+static u32
+qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump,
+ const struct dbg_idle_chk_rule *input_rules,
+ u32 num_input_rules, u32 *num_failing_rules)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
+ u32 i, j, offset = 0;
+ u16 entry_id;
+ u8 reg_id;
+
+ *num_failing_rules = 0;
+ for (i = 0; i < num_input_rules; i++) {
+ const struct dbg_idle_chk_cond_reg *cond_regs;
+ const struct dbg_idle_chk_rule *rule;
+ const union dbg_idle_chk_reg *regs;
+ u16 num_reg_entries = 1;
+ bool check_rule = true;
+ const u32 *imm_values;
+
+ rule = &input_rules[i];
+ regs = &((const union dbg_idle_chk_reg *)
+ s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
+ [rule->reg_offset];
+ cond_regs = &regs[0].cond_reg;
+ imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
+ [rule->imm_offset];
+
+ /* Check if all condition register blocks are out of reset, and
+ * find maximal number of entries (all condition registers that
+ * are memories must have the same size, which is > 1).
+ */
+ for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
+ reg_id++) {
+ u32 block_id = GET_FIELD(cond_regs[reg_id].data,
+ DBG_IDLE_CHK_COND_REG_BLOCK_ID);
+
+ if (block_id >= MAX_BLOCK_ID) {
+ DP_NOTICE(p_hwfn, "Invalid block_id\n");
+ return 0;
+ }
+
+ check_rule = !dev_data->block_in_reset[block_id];
+ if (cond_regs[reg_id].num_entries > num_reg_entries)
+ num_reg_entries = cond_regs[reg_id].num_entries;
+ }
+
+ if (!check_rule && dump)
+ continue;
+
+ /* Go over all register entries (number of entries is the same
+ * for all condition registers).
+ */
+ for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
+ /* Read current entry of all condition registers */
+ if (dump) {
+ u32 next_reg_offset = 0;
+
+ for (reg_id = 0;
+ reg_id < rule->num_cond_regs;
+ reg_id++) {
+ const struct dbg_idle_chk_cond_reg
+ *reg = &cond_regs[reg_id];
+
+ /* Find GRC address (if it's a memory,
+ * the address of the specific entry is
+ * calculated).
+ */
+ u32 grc_addr =
+ DWORDS_TO_BYTES(
+ GET_FIELD(reg->data,
+ DBG_IDLE_CHK_COND_REG_ADDRESS));
+
+ if (reg->num_entries > 1 ||
+ reg->start_entry > 0) {
+ u32 padded_entry_size =
+ reg->entry_size > 1 ?
+ roundup_pow_of_two
+ (reg->entry_size) : 1;
+
+ grc_addr +=
+ DWORDS_TO_BYTES(
+ (reg->start_entry +
+ entry_id)
+ * padded_entry_size);
+ }
+
+ /* Read registers */
+ if (next_reg_offset + reg->entry_size >=
+ IDLE_CHK_MAX_ENTRIES_SIZE) {
+ DP_NOTICE(p_hwfn,
+ "idle check registers entry is too large\n");
+ return 0;
+ }
+
+ for (j = 0; j < reg->entry_size;
+ j++, next_reg_offset++,
+ grc_addr += 4)
+ cond_reg_values[next_reg_offset] =
+ qed_rd(p_hwfn, p_ptt, grc_addr);
+ }
+ }
+
+ /* Call rule's condition function - a return value of
+ * true indicates failure.
+ */
+ if ((*cond_arr[rule->cond_id])(cond_reg_values,
+ imm_values) || !dump) {
+ offset +=
+ qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ rule->rule_id,
+ rule,
+ entry_id,
+ cond_reg_values);
+ (*num_failing_rules)++;
+ break;
+ }
+ }
+ }
+
+ return offset;
+}
+
+/* Performs Idle Check Dump to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ u32 offset = 0, input_offset = 0, num_failing_rules = 0;
+ u32 num_failing_rules_offset;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "idle-chk");
+
+ /* Dump idle check section header with a single parameter */
+ offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
+ num_failing_rules_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
+ while (input_offset <
+ s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
+ const struct dbg_idle_chk_cond_hdr *cond_hdr =
+ (const struct dbg_idle_chk_cond_hdr *)
+ &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
+ [input_offset++];
+ bool eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ bool mode_match = true;
+
+ /* Check mode */
+ if (eval_mode) {
+ u16 modes_buf_offset =
+ GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+
+ mode_match = qed_is_mode_match(p_hwfn,
+ &modes_buf_offset);
+ }
+
+ if (mode_match) {
+ u32 curr_failing_rules;
+
+ offset +=
+ qed_idle_chk_dump_rule_entries(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ (const struct dbg_idle_chk_rule *)
+ &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
+ ptr[input_offset],
+ cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
+ &curr_failing_rules);
+ num_failing_rules += curr_failing_rules;
+ }
+
+ input_offset += cond_hdr->data_size;
+ }
+
+ /* Overwrite num_rules parameter */
+ if (dump)
+ qed_dump_num_param(dump_buf + num_failing_rules_offset,
+ dump, "num_rules", num_failing_rules);
+
+ return offset;
+}
+
+/* Finds the meta data image in NVRAM. */
+static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 image_type,
+ u32 *nvram_offset_bytes,
+ u32 *nvram_size_bytes)
+{
+ u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
+ struct mcp_file_att file_att;
+
+ /* Call NVRAM get file command */
+ if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT,
+ image_type, &ret_mcp_resp, &ret_mcp_param,
+ &ret_txn_size, (u32 *)&file_att) != 0)
+ return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+
+ /* Check response */
+ if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+ return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+
+ /* Update return values */
+ *nvram_offset_bytes = file_att.nvm_start_addr;
+ *nvram_size_bytes = file_att.len;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
+ image_type, *nvram_offset_bytes, *nvram_size_bytes);
+
+ /* Check alignment */
+ if (*nvram_size_bytes & 0x3)
+ return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
+ return DBG_STATUS_OK;
+}
+
+static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 nvram_offset_bytes,
+ u32 nvram_size_bytes, u32 *ret_buf)
+{
+ u32 ret_mcp_resp, ret_mcp_param, ret_read_size;
+ u32 bytes_to_copy, read_offset = 0;
+ s32 bytes_left = nvram_size_bytes;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "nvram_read: reading image of size %d bytes from NVRAM\n",
+ nvram_size_bytes);
+ do {
+ bytes_to_copy =
+ (bytes_left >
+ MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
+
+ /* Call NVRAM read command */
+ if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NVM_READ_NVRAM,
+ (nvram_offset_bytes +
+ read_offset) |
+ (bytes_to_copy <<
+ DRV_MB_PARAM_NVM_LEN_SHIFT),
+ &ret_mcp_resp, &ret_mcp_param,
+ &ret_read_size,
+ (u32 *)((u8 *)ret_buf +
+ read_offset)) != 0)
+ return DBG_STATUS_NVRAM_READ_FAILED;
+
+ /* Check response */
+ if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+ return DBG_STATUS_NVRAM_READ_FAILED;
+
+ /* Update read offset */
+ read_offset += ret_read_size;
+ bytes_left -= ret_read_size;
+ } while (bytes_left > 0);
+
+ return DBG_STATUS_OK;
+}
+
+/* Get info on the MCP Trace data in the scratchpad:
+ * - trace_data_grc_addr - the GRC address of the trace data
+ * - trace_data_size_bytes - the size in bytes of the MCP Trace data (without
+ * the header)
+ */
+static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *trace_data_grc_addr,
+ u32 *trace_data_size_bytes)
+{
+ /* Read MCP trace section offsize structure from MCP scratchpad */
+ u32 spad_trace_offsize = qed_rd(p_hwfn,
+ p_ptt,
+ MCP_SPAD_TRACE_OFFSIZE_ADDR);
+ u32 signature;
+
+ /* Extract MCP trace section GRC address from offsize structure (within
+ * scratchpad).
+ */
+ *trace_data_grc_addr =
+ MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
+
+ /* Read signature from MCP trace section */
+ signature = qed_rd(p_hwfn, p_ptt,
+ *trace_data_grc_addr +
+ offsetof(struct mcp_trace, signature));
+ if (signature != MFW_TRACE_SIGNATURE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+ /* Read trace size from MCP trace section */
+ *trace_data_size_bytes = qed_rd(p_hwfn,
+ p_ptt,
+ *trace_data_grc_addr +
+ offsetof(struct mcp_trace, size));
+ return DBG_STATUS_OK;
+}
+
+/* Reads MCP trace meta data image from NVRAM.
+ * - running_bundle_id (OUT) - the running bundle ID (invalid when loaded from
+ * file)
+ * - trace_meta_offset_bytes (OUT) - the NVRAM offset in bytes in which the MCP
+ * Trace meta data starts (invalid when loaded from file)
+ * - trace_meta_size_bytes (OUT) - the size in bytes of the MCP Trace meta data
+ */
+static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 trace_data_size_bytes,
+ u32 *running_bundle_id,
+ u32 *trace_meta_offset_bytes,
+ u32 *trace_meta_size_bytes)
+{
+ /* Read MCP trace section offsize structure from MCP scratchpad */
+ u32 spad_trace_offsize = qed_rd(p_hwfn,
+ p_ptt,
+ MCP_SPAD_TRACE_OFFSIZE_ADDR);
+
+ /* Find running bundle ID */
+ u32 running_mfw_addr =
+ MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
+ QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
+ enum dbg_status status;
+ u32 nvram_image_type;
+
+ *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
+ if (*running_bundle_id > 1)
+ return DBG_STATUS_INVALID_NVRAM_BUNDLE;
+
+ /* Find image in NVRAM */
+ nvram_image_type =
+ (*running_bundle_id ==
+ DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
+ status = qed_find_nvram_image(p_hwfn,
+ p_ptt,
+ nvram_image_type,
+ trace_meta_offset_bytes,
+ trace_meta_size_bytes);
+
+ return status;
+}
+
+/* Reads the MCP Trace data from the specified GRC address into the specified
+ * buffer.
+ */
+static void qed_mcp_trace_read_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 grc_addr, u32 size_in_dwords, u32 *buf)
+{
+ u32 i;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "mcp_trace_read_data: reading trace data of size %d dwords from GRC address 0x%x\n",
+ size_in_dwords, grc_addr);
+ for (i = 0; i < size_in_dwords; i++, grc_addr += BYTES_IN_DWORD)
+ buf[i] = qed_rd(p_hwfn, p_ptt, grc_addr);
+}
+
+/* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified
+ * buffer.
+ */
+static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 nvram_offset_in_bytes,
+ u32 size_in_bytes, u32 *buf)
+{
+ u8 *byte_buf = (u8 *)buf;
+ u8 modules_num, i;
+ u32 signature;
+
+ /* Read meta data from NVRAM */
+ enum dbg_status status = qed_nvram_read(p_hwfn,
+ p_ptt,
+ nvram_offset_in_bytes,
+ size_in_bytes,
+ buf);
+
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ /* Extract and check first signature */
+ signature = qed_read_unaligned_dword(byte_buf);
+ byte_buf += sizeof(u32);
+ if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+ /* Extract number of modules */
+ modules_num = *(byte_buf++);
+
+ /* Skip all modules */
+ for (i = 0; i < modules_num; i++) {
+ u8 module_len = *(byte_buf++);
+
+ byte_buf += module_len;
+ }
+
+ /* Extract and check second signature */
+ signature = qed_read_unaligned_dword(byte_buf);
+ byte_buf += sizeof(u32);
+ if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+ return DBG_STATUS_OK;
+}
+
+/* Dump MCP Trace */
+enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
+ u32 trace_meta_size_dwords, running_bundle_id, offset = 0;
+ u32 trace_meta_offset_bytes, trace_meta_size_bytes;
+ enum dbg_status status;
+ int halted = 0;
+
+ *num_dumped_dwords = 0;
+
+ /* Get trace data info */
+ status = qed_mcp_trace_get_data_info(p_hwfn,
+ p_ptt,
+ &trace_data_grc_addr,
+ &trace_data_size_bytes);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "mcp-trace");
+
+ /* Halt MCP while reading from scratchpad so the read data will be
+ * consistent if halt fails, MCP trace is taken anyway, with a small
+ * risk that it may be corrupt.
+ */
+ if (dump) {
+ halted = !qed_mcp_halt(p_hwfn, p_ptt);
+ if (!halted)
+ DP_NOTICE(p_hwfn, "MCP halt failed!\n");
+ }
+
+ /* Find trace data size */
+ trace_data_size_dwords =
+ DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
+ BYTES_IN_DWORD);
+
+ /* Dump trace data section header and param */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "mcp_trace_data", 1);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "size", trace_data_size_dwords);
+
+ /* Read trace data from scratchpad into dump buffer */
+ if (dump)
+ qed_mcp_trace_read_data(p_hwfn,
+ p_ptt,
+ trace_data_grc_addr,
+ trace_data_size_dwords,
+ dump_buf + offset);
+ offset += trace_data_size_dwords;
+
+ /* Resume MCP (only if halt succeeded) */
+ if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0)
+ DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
+
+ /* Dump trace meta section header */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "mcp_trace_meta", 1);
+
+ /* Read trace meta info */
+ status = qed_mcp_trace_get_meta_info(p_hwfn,
+ p_ptt,
+ trace_data_size_bytes,
+ &running_bundle_id,
+ &trace_meta_offset_bytes,
+ &trace_meta_size_bytes);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ /* Dump trace meta size param (trace_meta_size_bytes is always
+ * dword-aligned).
+ */
+ trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size",
+ trace_meta_size_dwords);
+
+ /* Read trace meta image into dump buffer */
+ if (dump) {
+ status = qed_mcp_trace_read_meta(p_hwfn,
+ p_ptt,
+ trace_meta_offset_bytes,
+ trace_meta_size_bytes,
+ dump_buf + offset);
+ if (status != DBG_STATUS_OK)
+ return status;
+ }
+
+ offset += trace_meta_size_dwords;
+
+ *num_dumped_dwords = offset;
+
+ return DBG_STATUS_OK;
+}
+
+/* Dump GRC FIFO */
+enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ u32 offset = 0, dwords_read, size_param_offset;
+ bool fifo_has_data;
+
+ *num_dumped_dwords = 0;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "reg-fifo");
+
+ /* Dump fifo data section header and param. The size param is 0 for now,
+ * and is overwritten after reading the FIFO.
+ */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "reg_fifo_data", 1);
+ size_param_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+
+ if (!dump) {
+ /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
+ * test how much data is available, except for reading it.
+ */
+ offset += REG_FIFO_DEPTH_DWORDS;
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+ }
+
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
+
+ /* Pull available data from fifo. Use DMAE since this is widebus memory
+ * and must be accessed atomically. Test for dwords_read not passing
+ * buffer size since more entries could be added to the buffer as we are
+ * emptying it.
+ */
+ for (dwords_read = 0;
+ fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
+ dwords_read += REG_FIFO_ELEMENT_DWORDS, offset +=
+ REG_FIFO_ELEMENT_DWORDS) {
+ if (qed_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO,
+ (u64)(uintptr_t)(&dump_buf[offset]),
+ REG_FIFO_ELEMENT_DWORDS, 0))
+ return DBG_STATUS_DMAE_FAILED;
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
+ }
+
+ qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
+ dwords_read);
+
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+}
+
+/* Dump IGU FIFO */
+enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ u32 offset = 0, dwords_read, size_param_offset;
+ bool fifo_has_data;
+
+ *num_dumped_dwords = 0;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "igu-fifo");
+
+ /* Dump fifo data section header and param. The size param is 0 for now,
+ * and is overwritten after reading the FIFO.
+ */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "igu_fifo_data", 1);
+ size_param_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+
+ if (!dump) {
+ /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
+ * test how much data is available, except for reading it.
+ */
+ offset += IGU_FIFO_DEPTH_DWORDS;
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+ }
+
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
+
+ /* Pull available data from fifo. Use DMAE since this is widebus memory
+ * and must be accessed atomically. Test for dwords_read not passing
+ * buffer size since more entries could be added to the buffer as we are
+ * emptying it.
+ */
+ for (dwords_read = 0;
+ fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
+ dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset +=
+ IGU_FIFO_ELEMENT_DWORDS) {
+ if (qed_dmae_grc2host(p_hwfn, p_ptt,
+ IGU_REG_ERROR_HANDLING_MEMORY,
+ (u64)(uintptr_t)(&dump_buf[offset]),
+ IGU_FIFO_ELEMENT_DWORDS, 0))
+ return DBG_STATUS_DMAE_FAILED;
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
+ }
+
+ qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
+ dwords_read);
+
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+}
+
+/* Protection Override dump */
+enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ u32 offset = 0, size_param_offset, override_window_dwords;
+
+ *num_dumped_dwords = 0;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "protection-override");
+
+ /* Dump data section header and param. The size param is 0 for now, and
+ * is overwritten after reading the data.
+ */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "protection_override_data", 1);
+ size_param_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+
+ if (!dump) {
+ offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+ }
+
+ /* Add override window info to buffer */
+ override_window_dwords =
+ qed_rd(p_hwfn, p_ptt,
+ GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+ PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+ if (qed_dmae_grc2host(p_hwfn, p_ptt,
+ GRC_REG_PROTECTION_OVERRIDE_WINDOW,
+ (u64)(uintptr_t)(dump_buf + offset),
+ override_window_dwords, 0))
+ return DBG_STATUS_DMAE_FAILED;
+ offset += override_window_dwords;
+ qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
+ override_window_dwords);
+
+ *num_dumped_dwords = offset;
+ return DBG_STATUS_OK;
+}
+
+/* Performs FW Asserts Dump to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ char storm_letter_str[2] = "?";
+ struct fw_info fw_info;
+ u32 offset = 0, i;
+ u8 storm_id;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "fw-asserts");
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx,
+ last_list_idx, element_addr;
+
+ if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id])
+ continue;
+
+ /* Read FW info for the current Storm */
+ qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
+
+ /* Dump FW Asserts section header and params */
+ storm_letter_str[0] = s_storm_defs[storm_id].letter;
+ offset += qed_dump_section_hdr(dump_buf + offset, dump,
+ "fw_asserts", 2);
+ offset += qed_dump_str_param(dump_buf + offset, dump, "storm",
+ storm_letter_str);
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size",
+ fw_info.fw_asserts_section.
+ list_element_dword_size);
+
+ if (!dump) {
+ offset += fw_info.fw_asserts_section.
+ list_element_dword_size;
+ continue;
+ }
+
+ /* Read and dump FW Asserts data */
+ fw_asserts_section_addr =
+ s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_INT_RAM +
+ RAM_LINES_TO_BYTES(fw_info.fw_asserts_section.
+ section_ram_line_offset);
+ next_list_idx_addr =
+ fw_asserts_section_addr +
+ DWORDS_TO_BYTES(fw_info.fw_asserts_section.
+ list_next_index_dword_offset);
+ next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
+ last_list_idx = (next_list_idx > 0
+ ? next_list_idx
+ : fw_info.fw_asserts_section.list_num_elements)
+ - 1;
+ element_addr =
+ fw_asserts_section_addr +
+ DWORDS_TO_BYTES(fw_info.fw_asserts_section.
+ list_dword_offset) +
+ last_list_idx *
+ DWORDS_TO_BYTES(fw_info.fw_asserts_section.
+ list_element_dword_size);
+ for (i = 0;
+ i < fw_info.fw_asserts_section.list_element_dword_size;
+ i++, offset++, element_addr += BYTES_IN_DWORD)
+ dump_buf[offset] = qed_rd(p_hwfn, p_ptt, element_addr);
+ }
+
+ /* Dump last section */
+ offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
+ return offset;
+}
+
+/***************************** Public Functions *******************************/
+
+enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
+{
+ /* Convert binary data to debug arrays */
+ u32 num_of_buffers = *(u32 *)bin_ptr;
+ struct bin_buffer_hdr *buf_array;
+ u8 buf_id;
+
+ buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
+
+ for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
+ s_dbg_arrays[buf_id].ptr =
+ (u32 *)(bin_ptr + buf_array[buf_id].offset);
+ s_dbg_arrays[buf_id].size_in_dwords =
+ BYTES_TO_DWORDS(buf_array[buf_id].length);
+ }
+
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
+ return DBG_STATUS_DBG_ARRAY_NOT_SET;
+ return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* GRC Dump */
+ status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
+
+ /* Clear all GRC params */
+ qed_dbg_grc_clear_params(p_hwfn);
+ return status;
+}
+
+enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
+ return DBG_STATUS_DBG_ARRAY_NOT_SET;
+ if (!dev_data->idle_chk.buf_size_set) {
+ dev_data->idle_chk.buf_size = qed_idle_chk_dump(p_hwfn,
+ p_ptt,
+ NULL, false);
+ dev_data->idle_chk.buf_size_set = true;
+ }
+
+ *buf_size = dev_data->idle_chk.buf_size;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+ /* Idle Check Dump */
+ *num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+ /* Perform dump */
+ return qed_mcp_trace_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+}
+
+enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ return qed_reg_fifo_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+}
+
+enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ return qed_igu_fifo_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+}
+
+enum dbg_status
+qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ return qed_protection_override_dump(p_hwfn,
+ p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_protection_override_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ return qed_protection_override_dump(p_hwfn,
+ p_ptt,
+ dump_buf, true, num_dumped_dwords);
+}
+
+enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
+
+ *buf_size = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ *buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status = qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn, p_ptt,
+ &needed_buf_size_in_dwords);
+
+ *num_dumped_dwords = 0;
+ if (status != DBG_STATUS_OK)
+ return status;
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ *num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
+ return DBG_STATUS_OK;
+}
+
+/******************************* Data Types **********************************/
+
+struct mcp_trace_format {
+ u32 data;
+#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
+#define MCP_TRACE_FORMAT_MODULE_SHIFT 0
+#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
+#define MCP_TRACE_FORMAT_LEVEL_SHIFT 16
+#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
+#define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18
+#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
+#define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20
+#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
+#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22
+#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
+#define MCP_TRACE_FORMAT_LEN_SHIFT 24
+ char *format_str;
+};
+
+struct mcp_trace_meta {
+ u32 modules_num;
+ char **modules;
+ u32 formats_num;
+ struct mcp_trace_format *formats;
+};
+
+/* Reg fifo element */
+struct reg_fifo_element {
+ u64 data;
+#define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0
+#define REG_FIFO_ELEMENT_ADDRESS_MASK 0x7fffff
+#define REG_FIFO_ELEMENT_ACCESS_SHIFT 23
+#define REG_FIFO_ELEMENT_ACCESS_MASK 0x1
+#define REG_FIFO_ELEMENT_PF_SHIFT 24
+#define REG_FIFO_ELEMENT_PF_MASK 0xf
+#define REG_FIFO_ELEMENT_VF_SHIFT 28
+#define REG_FIFO_ELEMENT_VF_MASK 0xff
+#define REG_FIFO_ELEMENT_PORT_SHIFT 36
+#define REG_FIFO_ELEMENT_PORT_MASK 0x3
+#define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT 38
+#define REG_FIFO_ELEMENT_PRIVILEGE_MASK 0x3
+#define REG_FIFO_ELEMENT_PROTECTION_SHIFT 40
+#define REG_FIFO_ELEMENT_PROTECTION_MASK 0x7
+#define REG_FIFO_ELEMENT_MASTER_SHIFT 43
+#define REG_FIFO_ELEMENT_MASTER_MASK 0xf
+#define REG_FIFO_ELEMENT_ERROR_SHIFT 47
+#define REG_FIFO_ELEMENT_ERROR_MASK 0x1f
+};
+
+/* IGU fifo element */
+struct igu_fifo_element {
+ u32 dword0;
+#define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT 0
+#define IGU_FIFO_ELEMENT_DWORD0_FID_MASK 0xff
+#define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT 8
+#define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK 0x1
+#define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT 9
+#define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK 0xf
+#define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT 13
+#define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK 0xf
+#define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT 17
+#define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK 0x7fff
+ u32 dword1;
+ u32 dword2;
+#define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT 0
+#define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK 0x1
+#define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT 1
+#define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK 0xffffffff
+ u32 reserved;
+};
+
+struct igu_fifo_wr_data {
+ u32 data;
+#define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT 0
+#define IGU_FIFO_WR_DATA_PROD_CONS_MASK 0xffffff
+#define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT 24
+#define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK 0x1
+#define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT 25
+#define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK 0x3
+#define IGU_FIFO_WR_DATA_SEGMENT_SHIFT 27
+#define IGU_FIFO_WR_DATA_SEGMENT_MASK 0x1
+#define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT 28
+#define IGU_FIFO_WR_DATA_TIMER_MASK_MASK 0x1
+#define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT 31
+#define IGU_FIFO_WR_DATA_CMD_TYPE_MASK 0x1
+};
+
+struct igu_fifo_cleanup_wr_data {
+ u32 data;
+#define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT 0
+#define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK 0x7ffffff
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT 27
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK 0x1
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT 28
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK 0x7
+#define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT 31
+#define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK 0x1
+};
+
+/* Protection override element */
+struct protection_override_element {
+ u64 data;
+#define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT 0
+#define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK 0x7fffff
+#define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT 23
+#define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK 0xffffff
+#define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT 47
+#define PROTECTION_OVERRIDE_ELEMENT_READ_MASK 0x1
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT 48
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK 0x1
+#define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT 49
+#define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK 0x7
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT 52
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK 0x7
+};
+
+enum igu_fifo_sources {
+ IGU_SRC_PXP0,
+ IGU_SRC_PXP1,
+ IGU_SRC_PXP2,
+ IGU_SRC_PXP3,
+ IGU_SRC_PXP4,
+ IGU_SRC_PXP5,
+ IGU_SRC_PXP6,
+ IGU_SRC_PXP7,
+ IGU_SRC_CAU,
+ IGU_SRC_ATTN,
+ IGU_SRC_GRC
+};
+
+enum igu_fifo_addr_types {
+ IGU_ADDR_TYPE_MSIX_MEM,
+ IGU_ADDR_TYPE_WRITE_PBA,
+ IGU_ADDR_TYPE_WRITE_INT_ACK,
+ IGU_ADDR_TYPE_WRITE_ATTN_BITS,
+ IGU_ADDR_TYPE_READ_INT,
+ IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
+ IGU_ADDR_TYPE_RESERVED
+};
+
+struct igu_fifo_addr_data {
+ u16 start_addr;
+ u16 end_addr;
+ char *desc;
+ char *vf_desc;
+ enum igu_fifo_addr_types type;
+};
+
+/******************************** Constants **********************************/
+
+#define MAX_MSG_LEN 1024
+#define MCP_TRACE_MAX_MODULE_LEN 8
+#define MCP_TRACE_FORMAT_MAX_PARAMS 3
+#define MCP_TRACE_FORMAT_PARAM_WIDTH \
+ (MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
+#define REG_FIFO_ELEMENT_ADDR_FACTOR 4
+#define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127
+#define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
+
+/********************************* Macros ************************************/
+
+#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
+
+/***************************** Constant Arrays *******************************/
+
+/* Status string array */
+static const char * const s_status_str[] = {
+ "Operation completed successfully",
+ "Debug application version wasn't set",
+ "Unsupported debug application version",
+ "The debug block wasn't reset since the last recording",
+ "Invalid arguments",
+ "The debug output was already set",
+ "Invalid PCI buffer size",
+ "PCI buffer allocation failed",
+ "A PCI buffer wasn't allocated",
+ "Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
+ "GRC/Timestamp input overlap in cycle dword 0",
+ "Cannot record Storm data since the entire recording cycle is used by HW",
+ "The Storm was already enabled",
+ "The specified Storm wasn't enabled",
+ "The block was already enabled",
+ "The specified block wasn't enabled",
+ "No input was enabled for recording",
+ "Filters and triggers are not allowed when recording in 64b units",
+ "The filter was already enabled",
+ "The trigger was already enabled",
+ "The trigger wasn't enabled",
+ "A constraint can be added only after a filter was enabled or a trigger state was added",
+ "Cannot add more than 3 trigger states",
+ "Cannot add more than 4 constraints per filter or trigger state",
+ "The recording wasn't started",
+ "A trigger was configured, but it didn't trigger",
+ "No data was recorded",
+ "Dump buffer is too small",
+ "Dumped data is not aligned to chunks",
+ "Unknown chip",
+ "Failed allocating virtual memory",
+ "The input block is in reset",
+ "Invalid MCP trace signature found in NVRAM",
+ "Invalid bundle ID found in NVRAM",
+ "Failed getting NVRAM image",
+ "NVRAM image is not dword-aligned",
+ "Failed reading from NVRAM",
+ "Idle check parsing failed",
+ "MCP Trace data is corrupt",
+ "Dump doesn't contain meta data - it must be provided in an image file",
+ "Failed to halt MCP",
+ "Failed to resume MCP after halt",
+ "DMAE transaction failed",
+ "Failed to empty SEMI sync FIFO",
+ "IGU FIFO data is corrupt",
+ "MCP failed to mask parities",
+ "FW Asserts parsing failed",
+ "GRC FIFO data is corrupt",
+ "Protection Override data is corrupt",
+ "Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
+ "When a block is filtered, no other blocks can be recorded unless inputs are unified (due to a HW bug)"
+};
+
+/* Idle check severity names array */
+static const char * const s_idle_chk_severity_str[] = {
+ "Error",
+ "Error if no traffic",
+ "Warning"
+};
+
+/* MCP Trace level names array */
+static const char * const s_mcp_trace_level_str[] = {
+ "ERROR",
+ "TRACE",
+ "DEBUG"
+};
+
+/* Parsing strings */
+static const char * const s_access_strs[] = {
+ "read",
+ "write"
+};
+
+static const char * const s_privilege_strs[] = {
+ "VF",
+ "PDA",
+ "HV",
+ "UA"
+};
+
+static const char * const s_protection_strs[] = {
+ "(default)",
+ "(default)",
+ "(default)",
+ "(default)",
+ "override VF",
+ "override PDA",
+ "override HV",
+ "override UA"
+};
+
+static const char * const s_master_strs[] = {
+ "???",
+ "pxp",
+ "mcp",
+ "msdm",
+ "psdm",
+ "ysdm",
+ "usdm",
+ "tsdm",
+ "xsdm",
+ "dbu",
+ "dmae",
+ "???",
+ "???",
+ "???",
+ "???",
+ "???"
+};
+
+static const char * const s_reg_fifo_error_strs[] = {
+ "grc timeout",
+ "address doesn't belong to any block",
+ "reserved address in block or write to read-only address",
+ "privilege/protection mismatch",
+ "path isolation error"
+};
+
+static const char * const s_igu_fifo_source_strs[] = {
+ "TSTORM",
+ "MSTORM",
+ "USTORM",
+ "XSTORM",
+ "YSTORM",
+ "PSTORM",
+ "PCIE",
+ "NIG_QM_PBF",
+ "CAU",
+ "ATTN",
+ "GRC",
+};
+
+static const char * const s_igu_fifo_error_strs[] = {
+ "no error",
+ "length error",
+ "function disabled",
+ "VF sent command to attnetion address",
+ "host sent prod update command",
+ "read of during interrupt register while in MIMD mode",
+ "access to PXP BAR reserved address",
+ "producer update command to attention index",
+ "unknown error",
+ "SB index not valid",
+ "SB relative index and FID not found",
+ "FID not match",
+ "command with error flag asserted (PCI error or CAU discard)",
+ "VF sent cleanup and RF cleanup is disabled",
+ "cleanup command on type bigger than 4"
+};
+
+/* IGU FIFO address data */
+static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
+ {0x0, 0x101, "MSI-X Memory", NULL, IGU_ADDR_TYPE_MSIX_MEM},
+ {0x102, 0x1ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
+ {0x200, 0x200, "Write PBA[0:63]", NULL, IGU_ADDR_TYPE_WRITE_PBA},
+ {0x201, 0x201, "Write PBA[64:127]", "reserved",
+ IGU_ADDR_TYPE_WRITE_PBA},
+ {0x202, 0x202, "Write PBA[128]", "reserved", IGU_ADDR_TYPE_WRITE_PBA},
+ {0x203, 0x3ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
+ {0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
+ IGU_ADDR_TYPE_WRITE_INT_ACK},
+ {0x5f0, 0x5f0, "Attention bits update", NULL,
+ IGU_ADDR_TYPE_WRITE_ATTN_BITS},
+ {0x5f1, 0x5f1, "Attention bits set", NULL,
+ IGU_ADDR_TYPE_WRITE_ATTN_BITS},
+ {0x5f2, 0x5f2, "Attention bits clear", NULL,
+ IGU_ADDR_TYPE_WRITE_ATTN_BITS},
+ {0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
+ IGU_ADDR_TYPE_READ_INT},
+ {0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
+ IGU_ADDR_TYPE_READ_INT},
+ {0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
+ IGU_ADDR_TYPE_READ_INT},
+ {0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
+ IGU_ADDR_TYPE_READ_INT},
+ {0x5f7, 0x5ff, "reserved", NULL, IGU_ADDR_TYPE_RESERVED},
+ {0x600, 0x7ff, "Producer update", NULL, IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
+};
+
+/******************************** Variables **********************************/
+
+/* MCP Trace meta data - used in case the dump doesn't contain the meta data
+ * (e.g. due to no NVRAM access).
+ */
+static struct dbg_array s_mcp_trace_meta = { NULL, 0 };
+
+/* Temporary buffer, used for print size calculations */
+static char s_temp_buf[MAX_MSG_LEN];
+
+/***************************** Public Functions *******************************/
+
+enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
+{
+ /* Convert binary data to debug arrays */
+ u32 num_of_buffers = *(u32 *)bin_ptr;
+ struct bin_buffer_hdr *buf_array;
+ u8 buf_id;
+
+ buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
+
+ for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
+ s_dbg_arrays[buf_id].ptr =
+ (u32 *)(bin_ptr + buf_array[buf_id].offset);
+ s_dbg_arrays[buf_id].size_in_dwords =
+ BYTES_TO_DWORDS(buf_array[buf_id].length);
+ }
+
+ return DBG_STATUS_OK;
+}
+
+static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
+{
+ return (a + b) % size;
+}
+
+static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
+{
+ return (size + a - b) % size;
+}
+
+/* Reads the specified number of bytes from the specified cyclic buffer (up to 4
+ * bytes) and returns them as a dword value. the specified buffer offset is
+ * updated.
+ */
+static u32 qed_read_from_cyclic_buf(void *buf,
+ u32 *offset,
+ u32 buf_size, u8 num_bytes_to_read)
+{
+ u8 *bytes_buf = (u8 *)buf;
+ u8 *val_ptr;
+ u32 val = 0;
+ u8 i;
+
+ val_ptr = (u8 *)&val;
+
+ for (i = 0; i < num_bytes_to_read; i++) {
+ val_ptr[i] = bytes_buf[*offset];
+ *offset = qed_cyclic_add(*offset, 1, buf_size);
+ }
+
+ return val;
+}
+
+/* Reads and returns the next byte from the specified buffer.
+ * The specified buffer offset is updated.
+ */
+static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
+{
+ return ((u8 *)buf)[(*offset)++];
+}
+
+/* Reads and returns the next dword from the specified buffer.
+ * The specified buffer offset is updated.
+ */
+static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
+{
+ u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
+
+ *offset += 4;
+ return dword_val;
+}
+
+/* Reads the next string from the specified buffer, and copies it to the
+ * specified pointer. The specified buffer offset is updated.
+ */
+static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
+{
+ const char *source_str = &((const char *)buf)[*offset];
+
+ strncpy(dest, source_str, size);
+ dest[size - 1] = '\0';
+ *offset += size;
+}
+
+/* Returns a pointer to the specified offset (in bytes) of the specified buffer.
+ * If the specified buffer in NULL, a temporary buffer pointer is returned.
+ */
+static char *qed_get_buf_ptr(void *buf, u32 offset)
+{
+ return buf ? (char *)buf + offset : s_temp_buf;
+}
+
+/* Reads a param from the specified buffer. Returns the number of dwords read.
+ * If the returned str_param is NULL, the param is numeric and its value is
+ * returned in num_param.
+ * Otheriwise, the param is a string and its pointer is returned in str_param.
+ */
+static u32 qed_read_param(u32 *dump_buf,
+ const char **param_name,
+ const char **param_str_val, u32 *param_num_val)
+{
+ char *char_buf = (char *)dump_buf;
+ u32 offset = 0; /* In bytes */
+
+ /* Extract param name */
+ *param_name = char_buf;
+ offset += strlen(*param_name) + 1;
+
+ /* Check param type */
+ if (*(char_buf + offset++)) {
+ /* String param */
+ *param_str_val = char_buf + offset;
+ offset += strlen(*param_str_val) + 1;
+ if (offset & 0x3)
+ offset += (4 - (offset & 0x3));
+ } else {
+ /* Numeric param */
+ *param_str_val = NULL;
+ if (offset & 0x3)
+ offset += (4 - (offset & 0x3));
+ *param_num_val = *(u32 *)(char_buf + offset);
+ offset += 4;
+ }
+
+ return offset / 4;
+}
+
+/* Reads a section header from the specified buffer.
+ * Returns the number of dwords read.
+ */
+static u32 qed_read_section_hdr(u32 *dump_buf,
+ const char **section_name,
+ u32 *num_section_params)
+{
+ const char *param_str_val;
+
+ return qed_read_param(dump_buf,
+ section_name, &param_str_val, num_section_params);
+}
+
+/* Reads section params from the specified buffer and prints them to the results
+ * buffer. Returns the number of dwords read.
+ */
+static u32 qed_print_section_params(u32 *dump_buf,
+ u32 num_section_params,
+ char *results_buf, u32 *num_chars_printed)
+{
+ u32 i, dump_offset = 0, results_offset = 0;
+
+ for (i = 0; i < num_section_params; i++) {
+ const char *param_name;
+ const char *param_str_val;
+ u32 param_num_val = 0;
+
+ dump_offset += qed_read_param(dump_buf + dump_offset,
+ &param_name,
+ &param_str_val, &param_num_val);
+ if (param_str_val)
+ /* String param */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "%s: %s\n", param_name, param_str_val);
+ else if (strcmp(param_name, "fw-timestamp"))
+ /* Numeric param */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "%s: %d\n", param_name, param_num_val);
+ }
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
+ *num_chars_printed = results_offset;
+ return dump_offset;
+}
+
+const char *qed_dbg_get_status_str(enum dbg_status status)
+{
+ return (status <
+ MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
+}
+
+/* Parses the idle check rules and returns the number of characters printed.
+ * In case of parsing error, returns 0.
+ */
+static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 *dump_buf_end,
+ u32 num_rules,
+ bool print_fw_idle_chk,
+ char *results_buf,
+ u32 *num_errors, u32 *num_warnings)
+{
+ u32 rule_idx, results_offset = 0; /* Offset in results_buf in bytes */
+ u16 i, j;
+
+ *num_errors = 0;
+ *num_warnings = 0;
+
+ /* Go over dumped results */
+ for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
+ rule_idx++) {
+ const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
+ struct dbg_idle_chk_result_hdr *hdr;
+ const char *parsing_str;
+ u32 parsing_str_offset;
+ const char *lsi_msg;
+ u8 curr_reg_id = 0;
+ bool has_fw_msg;
+
+ hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
+ rule_parsing_data =
+ (const struct dbg_idle_chk_rule_parsing_data *)
+ &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
+ ptr[hdr->rule_id];
+ parsing_str_offset =
+ GET_FIELD(rule_parsing_data->data,
+ DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
+ has_fw_msg =
+ GET_FIELD(rule_parsing_data->data,
+ DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
+ parsing_str = &((const char *)
+ s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
+ [parsing_str_offset];
+ lsi_msg = parsing_str;
+
+ if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
+ return 0;
+
+ /* Skip rule header */
+ dump_buf += (sizeof(struct dbg_idle_chk_result_hdr) / 4);
+
+ /* Update errors/warnings count */
+ if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
+ hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
+ (*num_errors)++;
+ else
+ (*num_warnings)++;
+
+ /* Print rule severity */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "%s: ",
+ s_idle_chk_severity_str[hdr->severity]);
+
+ /* Print rule message */
+ if (has_fw_msg)
+ parsing_str += strlen(parsing_str) + 1;
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "%s.",
+ has_fw_msg &&
+ print_fw_idle_chk ? parsing_str : lsi_msg);
+ parsing_str += strlen(parsing_str) + 1;
+
+ /* Print register values */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), " Registers:");
+ for (i = 0;
+ i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
+ i++) {
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr
+ = (struct dbg_idle_chk_result_reg_hdr *)
+ dump_buf;
+ bool is_mem =
+ GET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
+ u8 reg_id =
+ GET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
+
+ /* Skip reg header */
+ dump_buf +=
+ (sizeof(struct dbg_idle_chk_result_reg_hdr) / 4);
+
+ /* Skip register names until the required reg_id is
+ * reached.
+ */
+ for (; reg_id > curr_reg_id;
+ curr_reg_id++,
+ parsing_str += strlen(parsing_str) + 1);
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), " %s",
+ parsing_str);
+ if (i < hdr->num_dumped_cond_regs && is_mem)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "[%d]", hdr->mem_entry_id +
+ reg_hdr->start_entry);
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "=");
+ for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "0x%x", *dump_buf);
+ if (j < reg_hdr->size - 1)
+ results_offset +=
+ sprintf(qed_get_buf_ptr
+ (results_buf,
+ results_offset), ",");
+ }
+ }
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
+ }
+
+ /* Check if end of dump buffer was exceeded */
+ if (dump_buf > dump_buf_end)
+ return 0;
+ return results_offset;
+}
+
+/* Parses an idle check dump buffer.
+ * If result_buf is not NULL, the idle check results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes,
+ u32 *num_errors,
+ u32 *num_warnings)
+{
+ const char *section_name, *param_name, *param_str_val;
+ u32 *dump_buf_end = dump_buf + num_dumped_dwords;
+ u32 num_section_params = 0, num_rules;
+ u32 results_offset = 0; /* Offset in results_buf in bytes */
+
+ *parsed_results_bytes = 0;
+ *num_errors = 0;
+ *num_warnings = 0;
+ if (!s_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
+ !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
+ return DBG_STATUS_DBG_ARRAY_NOT_SET;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read idle_chk section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "idle_chk") || num_section_params != 1)
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &num_rules);
+ if (strcmp(param_name, "num_rules") != 0)
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+
+ if (num_rules) {
+ u32 rules_print_size;
+
+ /* Print FW output */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "FW_IDLE_CHECK:\n");
+ rules_print_size =
+ qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
+ dump_buf_end, num_rules,
+ true,
+ results_buf ?
+ results_buf +
+ results_offset : NULL,
+ num_errors, num_warnings);
+ results_offset += rules_print_size;
+ if (rules_print_size == 0)
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+
+ /* Print LSI output */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nLSI_IDLE_CHECK:\n");
+ rules_print_size =
+ qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
+ dump_buf_end, num_rules,
+ false,
+ results_buf ?
+ results_buf +
+ results_offset : NULL,
+ num_errors, num_warnings);
+ results_offset += rules_print_size;
+ if (rules_print_size == 0)
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+ }
+
+ /* Print errors/warnings count */
+ if (*num_errors) {
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
+ *num_errors, *num_warnings);
+ } else if (*num_warnings) {
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nIdle Check completed successfuly (with %d warnings)\n",
+ *num_warnings);
+ } else {
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nIdle Check completed successfuly\n");
+ }
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ u32 num_errors, num_warnings;
+
+ return qed_parse_idle_chk_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL,
+ results_buf_size,
+ &num_errors, &num_warnings);
+}
+
+enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *num_errors, u32 *num_warnings)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_idle_chk_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf,
+ &parsed_buf_size,
+ num_errors, num_warnings);
+}
+
+/* Frees the specified MCP Trace meta data */
+static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn,
+ struct mcp_trace_meta *meta)
+{
+ u32 i;
+
+ /* Release modules */
+ if (meta->modules) {
+ for (i = 0; i < meta->modules_num; i++)
+ kfree(meta->modules[i]);
+ kfree(meta->modules);
+ }
+
+ /* Release formats */
+ if (meta->formats) {
+ for (i = 0; i < meta->formats_num; i++)
+ kfree(meta->formats[i].format_str);
+ kfree(meta->formats);
+ }
+}
+
+/* Allocates and fills MCP Trace meta data based on the specified meta data
+ * dump buffer.
+ * Returns debug status code.
+ */
+static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
+ const u32 *meta_buf,
+ struct mcp_trace_meta *meta)
+{
+ u8 *meta_buf_bytes = (u8 *)meta_buf;
+ u32 offset = 0, signature, i;
+
+ memset(meta, 0, sizeof(*meta));
+
+ /* Read first signature */
+ signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
+ if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+ /* Read number of modules and allocate memory for all the modules
+ * pointers.
+ */
+ meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
+ meta->modules = kzalloc(meta->modules_num * sizeof(char *), GFP_KERNEL);
+ if (!meta->modules)
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+ /* Allocate and read all module strings */
+ for (i = 0; i < meta->modules_num; i++) {
+ u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
+
+ *(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
+ if (!(*(meta->modules + i))) {
+ /* Update number of modules to be released */
+ meta->modules_num = i ? i - 1 : 0;
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+ }
+
+ qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
+ *(meta->modules + i));
+ if (module_len > MCP_TRACE_MAX_MODULE_LEN)
+ (*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
+ }
+
+ /* Read second signature */
+ signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
+ if (signature != MCP_TRACE_META_IMAGE_SIGNATURE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+ /* Read number of formats and allocate memory for all formats */
+ meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
+ meta->formats = kzalloc(meta->formats_num *
+ sizeof(struct mcp_trace_format),
+ GFP_KERNEL);
+ if (!meta->formats)
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+ /* Allocate and read all strings */
+ for (i = 0; i < meta->formats_num; i++) {
+ struct mcp_trace_format *format_ptr = &meta->formats[i];
+ u8 format_len;
+
+ format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
+ &offset);
+ format_len =
+ (format_ptr->data &
+ MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
+ format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
+ if (!format_ptr->format_str) {
+ /* Update number of modules to be released */
+ meta->formats_num = i ? i - 1 : 0;
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+ }
+
+ qed_read_str_from_buf(meta_buf_bytes,
+ &offset,
+ format_len, format_ptr->format_str);
+ }
+
+ return DBG_STATUS_OK;
+}
+
+/* Parses an MCP Trace dump buffer.
+ * If result_buf is not NULL, the MCP Trace results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ u32 results_offset = 0, param_mask, param_shift, param_num_val;
+ u32 num_section_params, offset, end_offset, bytes_left;
+ const char *section_name, *param_name, *param_str_val;
+ u32 trace_data_dwords, trace_meta_dwords;
+ struct mcp_trace_meta meta;
+ struct mcp_trace *trace;
+ enum dbg_status status;
+ const u32 *meta_buf;
+ u8 *trace_buf;
+
+ *parsed_results_bytes = 0;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read trace_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size"))
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+ trace_data_dwords = param_num_val;
+
+ /* Prepare trace info */
+ trace = (struct mcp_trace *)dump_buf;
+ trace_buf = (u8 *)dump_buf + sizeof(struct mcp_trace);
+ offset = trace->trace_oldest;
+ end_offset = trace->trace_prod;
+ bytes_left = qed_cyclic_sub(end_offset, offset, trace->size);
+ dump_buf += trace_data_dwords;
+
+ /* Read meta_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "mcp_trace_meta"))
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size") != 0)
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+ trace_meta_dwords = param_num_val;
+
+ /* Choose meta data buffer */
+ if (!trace_meta_dwords) {
+ /* Dump doesn't include meta data */
+ if (!s_mcp_trace_meta.ptr)
+ return DBG_STATUS_MCP_TRACE_NO_META;
+ meta_buf = s_mcp_trace_meta.ptr;
+ } else {
+ /* Dump includes meta data */
+ meta_buf = dump_buf;
+ }
+
+ /* Allocate meta data memory */
+ status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &meta);
+ if (status != DBG_STATUS_OK)
+ goto free_mem;
+
+ /* Ignore the level and modules masks - just print everything that is
+ * already in the buffer.
+ */
+ while (bytes_left) {
+ struct mcp_trace_format *format_ptr;
+ u8 format_level, format_module;
+ u32 params[3] = { 0, 0, 0 };
+ u32 header, format_idx, i;
+
+ if (bytes_left < MFW_TRACE_ENTRY_SIZE) {
+ status = DBG_STATUS_MCP_TRACE_BAD_DATA;
+ goto free_mem;
+ }
+
+ header = qed_read_from_cyclic_buf(trace_buf,
+ &offset,
+ trace->size,
+ MFW_TRACE_ENTRY_SIZE);
+ bytes_left -= MFW_TRACE_ENTRY_SIZE;
+ format_idx = header & MFW_TRACE_EVENTID_MASK;
+
+ /* Skip message if its index doesn't exist in the meta data */
+ if (format_idx > meta.formats_num) {
+ u8 format_size =
+ (u8)((header &
+ MFW_TRACE_PRM_SIZE_MASK) >>
+ MFW_TRACE_PRM_SIZE_SHIFT);
+
+ if (bytes_left < format_size) {
+ status = DBG_STATUS_MCP_TRACE_BAD_DATA;
+ goto free_mem;
+ }
+
+ offset = qed_cyclic_add(offset,
+ format_size, trace->size);
+ bytes_left -= format_size;
+ continue;
+ }
+
+ format_ptr = &meta.formats[format_idx];
+ for (i = 0,
+ param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
+ MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
+ i < MCP_TRACE_FORMAT_MAX_PARAMS;
+ i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
+ param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
+ /* Extract param size (0..3) */
+ u8 param_size =
+ (u8)((format_ptr->data &
+ param_mask) >> param_shift);
+
+ /* If the param size is zero, there are no other
+ * parameters.
+ */
+ if (!param_size)
+ break;
+
+ /* Size is encoded using 2 bits, where 3 is used to
+ * encode 4.
+ */
+ if (param_size == 3)
+ param_size = 4;
+ if (bytes_left < param_size) {
+ status = DBG_STATUS_MCP_TRACE_BAD_DATA;
+ goto free_mem;
+ }
+
+ params[i] = qed_read_from_cyclic_buf(trace_buf,
+ &offset,
+ trace->size,
+ param_size);
+ bytes_left -= param_size;
+ }
+
+ format_level =
+ (u8)((format_ptr->data &
+ MCP_TRACE_FORMAT_LEVEL_MASK) >>
+ MCP_TRACE_FORMAT_LEVEL_SHIFT);
+ format_module =
+ (u8)((format_ptr->data &
+ MCP_TRACE_FORMAT_MODULE_MASK) >>
+ MCP_TRACE_FORMAT_MODULE_SHIFT);
+ if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str)) {
+ status = DBG_STATUS_MCP_TRACE_BAD_DATA;
+ goto free_mem;
+ }
+
+ /* Print current message to results buffer */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "%s %-8s: ",
+ s_mcp_trace_level_str[format_level],
+ meta.modules[format_module]);
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ format_ptr->format_str, params[0], params[1],
+ params[2]);
+ }
+
+free_mem:
+ *parsed_results_bytes = results_offset + 1;
+ qed_mcp_trace_free_meta(p_hwfn, &meta);
+ return status;
+}
+
+enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_mcp_trace_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_mcp_trace_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+/* Parses a Reg FIFO dump buffer.
+ * If result_buf is not NULL, the Reg FIFO results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ u32 results_offset = 0, param_num_val, num_section_params, num_elements;
+ const char *section_name, *param_name, *param_str_val;
+ struct reg_fifo_element *elements;
+ u8 i, j, err_val, vf_val;
+ char vf_str[4];
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_REG_FIFO_BAD_DATA;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read reg_fifo_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "reg_fifo_data"))
+ return DBG_STATUS_REG_FIFO_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size"))
+ return DBG_STATUS_REG_FIFO_BAD_DATA;
+ if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
+ return DBG_STATUS_REG_FIFO_BAD_DATA;
+ num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
+ elements = (struct reg_fifo_element *)dump_buf;
+
+ /* Decode elements */
+ for (i = 0; i < num_elements; i++) {
+ bool err_printed = false;
+
+ /* Discover if element belongs to a VF or a PF */
+ vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
+ if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
+ sprintf(vf_str, "%s", "N/A");
+ else
+ sprintf(vf_str, "%d", vf_val);
+
+ /* Add parsed element to parsed buffer */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "raw: 0x%016llx, address: 0x%07llx, access: %-5s, pf: %2lld, vf: %s, port: %lld, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
+ elements[i].data,
+ GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_ADDRESS) *
+ REG_FIFO_ELEMENT_ADDR_FACTOR,
+ s_access_strs[GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_ACCESS)],
+ GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PF), vf_str,
+ GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PORT),
+ s_privilege_strs[GET_FIELD(elements[i].
+ data,
+ REG_FIFO_ELEMENT_PRIVILEGE)],
+ s_protection_strs[GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PROTECTION)],
+ s_master_strs[GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_MASTER)]);
+
+ /* Print errors */
+ for (j = 0,
+ err_val = GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_ERROR);
+ j < ARRAY_SIZE(s_reg_fifo_error_strs);
+ j++, err_val >>= 1) {
+ if (!(err_val & 0x1))
+ continue;
+ if (err_printed)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ ", ");
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "%s",
+ s_reg_fifo_error_strs[j]);
+ err_printed = true;
+ }
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
+ }
+
+ results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "fifo contained %d elements", num_elements);
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_reg_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_reg_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+/* Parses an IGU FIFO dump buffer.
+ * If result_buf is not NULL, the IGU FIFO results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ u32 results_offset = 0, param_num_val, num_section_params, num_elements;
+ const char *section_name, *param_name, *param_str_val;
+ struct igu_fifo_element *elements;
+ char parsed_addr_data[32];
+ char parsed_wr_data[256];
+ u8 i, j;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read igu_fifo_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "igu_fifo_data"))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size"))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
+ elements = (struct igu_fifo_element *)dump_buf;
+
+ /* Decode elements */
+ for (i = 0; i < num_elements; i++) {
+ /* dword12 (dword index 1 and 2) contains bits 32..95 of the
+ * FIFO element.
+ */
+ u64 dword12 =
+ ((u64)elements[i].dword2 << 32) | elements[i].dword1;
+ bool is_wr_cmd = GET_FIELD(dword12,
+ IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
+ bool is_pf = GET_FIELD(elements[i].dword0,
+ IGU_FIFO_ELEMENT_DWORD0_IS_PF);
+ u16 cmd_addr = GET_FIELD(elements[i].dword0,
+ IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
+ u8 source = GET_FIELD(elements[i].dword0,
+ IGU_FIFO_ELEMENT_DWORD0_SOURCE);
+ u8 err_type = GET_FIELD(elements[i].dword0,
+ IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
+ const struct igu_fifo_addr_data *addr_data = NULL;
+
+ if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+
+ /* Find address data */
+ for (j = 0; j < ARRAY_SIZE(s_igu_fifo_addr_data) && !addr_data;
+ j++)
+ if (cmd_addr >= s_igu_fifo_addr_data[j].start_addr &&
+ cmd_addr <= s_igu_fifo_addr_data[j].end_addr)
+ addr_data = &s_igu_fifo_addr_data[j];
+ if (!addr_data)
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+
+ /* Prepare parsed address data */
+ switch (addr_data->type) {
+ case IGU_ADDR_TYPE_MSIX_MEM:
+ sprintf(parsed_addr_data,
+ " vector_num=0x%x", cmd_addr / 2);
+ break;
+ case IGU_ADDR_TYPE_WRITE_INT_ACK:
+ case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
+ sprintf(parsed_addr_data,
+ " SB=0x%x", cmd_addr - addr_data->start_addr);
+ break;
+ default:
+ parsed_addr_data[0] = '\0';
+ }
+
+ /* Prepare parsed write data */
+ if (is_wr_cmd) {
+ u32 wr_data = GET_FIELD(dword12,
+ IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
+ u32 prod_cons = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_PROD_CONS);
+ u8 is_cleanup = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_CMD_TYPE);
+
+ if (source == IGU_SRC_ATTN) {
+ sprintf(parsed_wr_data,
+ "prod: 0x%x, ", prod_cons);
+ } else {
+ if (is_cleanup) {
+ u8 cleanup_val = GET_FIELD(wr_data,
+ IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
+ u8 cleanup_type = GET_FIELD(wr_data,
+ IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
+
+ sprintf(parsed_wr_data,
+ "cmd_type: cleanup, cleanup_val: %s, cleanup_type: %d, ",
+ cleanup_val ? "set" : "clear",
+ cleanup_type);
+ } else {
+ u8 update_flag = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_UPDATE_FLAG);
+ u8 en_dis_int_for_sb =
+ GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
+ u8 segment = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_SEGMENT);
+ u8 timer_mask = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_TIMER_MASK);
+
+ sprintf(parsed_wr_data,
+ "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb: %s, segment: %s, timer_mask=%d, ",
+ prod_cons,
+ update_flag ? "update" : "nop",
+ en_dis_int_for_sb
+ ? (en_dis_int_for_sb ==
+ 1 ? "disable" : "nop") :
+ "enable",
+ segment ? "attn" : "regular",
+ timer_mask);
+ }
+ }
+ } else {
+ parsed_wr_data[0] = '\0';
+ }
+
+ /* Add parsed element to parsed buffer */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "raw: 0x%01x%08x%08x, %s: %d, source: %s, type: %s, cmd_addr: 0x%x (%s%s), %serror: %s\n",
+ elements[i].dword2, elements[i].dword1,
+ elements[i].dword0,
+ is_pf ? "pf" : "vf",
+ GET_FIELD(elements[i].dword0,
+ IGU_FIFO_ELEMENT_DWORD0_FID),
+ s_igu_fifo_source_strs[source],
+ is_wr_cmd ? "wr" : "rd", cmd_addr,
+ (!is_pf && addr_data->vf_desc)
+ ? addr_data->vf_desc : addr_data->desc,
+ parsed_addr_data, parsed_wr_data,
+ s_igu_fifo_error_strs[err_type]);
+ }
+
+ results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "fifo contained %d elements", num_elements);
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_igu_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_igu_fifo_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+static enum dbg_status
+qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ u32 results_offset = 0, param_num_val, num_section_params, num_elements;
+ const char *section_name, *param_name, *param_str_val;
+ struct protection_override_element *elements;
+ u8 i;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read protection_override_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "protection_override_data"))
+ return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size"))
+ return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+ if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS != 0)
+ return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+ num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+ elements = (struct protection_override_element *)dump_buf;
+
+ /* Decode elements */
+ for (i = 0; i < num_elements; i++) {
+ u32 address = GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
+ PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "window %2d, address: 0x%07x, size: %7lld regs, read: %lld, write: %lld, read protection: %-12s, write protection: %-12s\n",
+ i, address,
+ GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
+ GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_READ),
+ GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_WRITE),
+ s_protection_strs[GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
+ s_protection_strs[GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
+ }
+
+ results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "protection override contained %d elements",
+ num_elements);
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status
+qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_protection_override_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_protection_override_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf,
+ &parsed_buf_size);
+}
+
+/* Parses a FW Asserts dump buffer.
+ * If result_buf is not NULL, the FW Asserts results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ u32 results_offset = 0, num_section_params, param_num_val, i;
+ const char *param_name, *param_str_val, *section_name;
+ bool last_section_found = false;
+
+ *parsed_results_bytes = 0;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+ while (!last_section_found) {
+ const char *storm_letter = NULL;
+ u32 storm_dump_size = 0;
+
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name,
+ &num_section_params);
+ if (!strcmp(section_name, "last")) {
+ last_section_found = true;
+ continue;
+ } else if (strcmp(section_name, "fw_asserts")) {
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+ }
+
+ /* Extract params */
+ for (i = 0; i < num_section_params; i++) {
+ dump_buf += qed_read_param(dump_buf,
+ &param_name,
+ &param_str_val,
+ &param_num_val);
+ if (!strcmp(param_name, "storm"))
+ storm_letter = param_str_val;
+ else if (!strcmp(param_name, "size"))
+ storm_dump_size = param_num_val;
+ else
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+ }
+
+ if (!storm_letter || !storm_dump_size)
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+
+ /* Print data */
+ results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\n%sSTORM_ASSERT: size=%d\n",
+ storm_letter, storm_dump_size);
+ for (i = 0; i < storm_dump_size; i++, dump_buf++)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "%08x\n", *dump_buf);
+ }
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_fw_asserts_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_fw_asserts_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf, &parsed_buf_size);
+}
+
+/* Wrapper for unifying the idle_chk and mcp_trace api */
+enum dbg_status qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 num_errors, num_warnnings;
+
+ return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
+ results_buf, &num_errors,
+ &num_warnnings);
+}
+
+/* Feature meta data lookup table */
+static struct {
+ char *name;
+ enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *size);
+ enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf,
+ u32 buf_size, u32 *dumped_dwords);
+ enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf, u32 num_dumped_dwords,
+ char *results_buf);
+ enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+} qed_features_lookup[] = {
+ {
+ "grc", qed_dbg_grc_get_dump_buf_size,
+ qed_dbg_grc_dump, NULL, NULL}, {
+ "idle_chk",
+ qed_dbg_idle_chk_get_dump_buf_size,
+ qed_dbg_idle_chk_dump,
+ qed_print_idle_chk_results_wrapper,
+ qed_get_idle_chk_results_buf_size}, {
+ "mcp_trace",
+ qed_dbg_mcp_trace_get_dump_buf_size,
+ qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
+ qed_get_mcp_trace_results_buf_size}, {
+ "reg_fifo",
+ qed_dbg_reg_fifo_get_dump_buf_size,
+ qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
+ qed_get_reg_fifo_results_buf_size}, {
+ "igu_fifo",
+ qed_dbg_igu_fifo_get_dump_buf_size,
+ qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
+ qed_get_igu_fifo_results_buf_size}, {
+ "protection_override",
+ qed_dbg_protection_override_get_dump_buf_size,
+ qed_dbg_protection_override_dump,
+ qed_print_protection_override_results,
+ qed_get_protection_override_results_buf_size}, {
+ "fw_asserts",
+ qed_dbg_fw_asserts_get_dump_buf_size,
+ qed_dbg_fw_asserts_dump,
+ qed_print_fw_asserts_results,
+ qed_get_fw_asserts_results_buf_size},};
+
+static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
+{
+ u32 i, precision = 80;
+
+ if (!p_text_buf)
+ return;
+
+ pr_notice("\n%.*s", precision, p_text_buf);
+ for (i = precision; i < text_size; i += precision)
+ pr_cont("%.*s", precision, p_text_buf + i);
+ pr_cont("\n");
+}
+
+#define QED_RESULTS_BUF_MIN_SIZE 16
+/* Generic function for decoding debug feature info */
+enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
+ enum qed_dbg_features feature_idx)
+{
+ struct qed_dbg_feature *feature =
+ &p_hwfn->cdev->dbg_params.features[feature_idx];
+ u32 text_size_bytes, null_char_pos, i;
+ enum dbg_status rc;
+ char *text_buf;
+
+ /* Check if feature supports formatting capability */
+ if (!qed_features_lookup[feature_idx].results_buf_size)
+ return DBG_STATUS_OK;
+
+ /* Obtain size of formatted output */
+ rc = qed_features_lookup[feature_idx].
+ results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
+ feature->dumped_dwords, &text_size_bytes);
+ if (rc != DBG_STATUS_OK)
+ return rc;
+
+ /* Make sure that the allocated size is a multiple of dword (4 bytes) */
+ null_char_pos = text_size_bytes - 1;
+ text_size_bytes = (text_size_bytes + 3) & ~0x3;
+
+ if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
+ DP_NOTICE(p_hwfn->cdev,
+ "formatted size of feature was too small %d. Aborting\n",
+ text_size_bytes);
+ return DBG_STATUS_INVALID_ARGS;
+ }
+
+ /* Allocate temp text buf */
+ text_buf = vzalloc(text_size_bytes);
+ if (!text_buf)
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+ /* Decode feature opcodes to string on temp buf */
+ rc = qed_features_lookup[feature_idx].
+ print_results(p_hwfn, (u32 *)feature->dump_buf,
+ feature->dumped_dwords, text_buf);
+ if (rc != DBG_STATUS_OK) {
+ vfree(text_buf);
+ return rc;
+ }
+
+ /* Replace the original null character with a '\n' character.
+ * The bytes that were added as a result of the dword alignment are also
+ * padded with '\n' characters.
+ */
+ for (i = null_char_pos; i < text_size_bytes; i++)
+ text_buf[i] = '\n';
+
+ /* Dump printable feature to log */
+ if (p_hwfn->cdev->dbg_params.print_data)
+ qed_dbg_print_feature(text_buf, text_size_bytes);
+
+ /* Free the old dump_buf and point the dump_buf to the newly allocagted
+ * and formatted text buffer.
+ */
+ vfree(feature->dump_buf);
+ feature->dump_buf = text_buf;
+ feature->buf_size = text_size_bytes;
+ feature->dumped_dwords = text_size_bytes / 4;
+ return rc;
+}
+
+/* Generic function for performing the dump of a debug feature. */
+enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ enum qed_dbg_features feature_idx)
+{
+ struct qed_dbg_feature *feature =
+ &p_hwfn->cdev->dbg_params.features[feature_idx];
+ u32 buf_size_dwords;
+ enum dbg_status rc;
+
+ DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
+ qed_features_lookup[feature_idx].name);
+
+ /* Dump_buf was already allocated need to free (this can happen if dump
+ * was called but file was never read).
+ * We can't use the buffer as is since size may have changed.
+ */
+ if (feature->dump_buf) {
+ vfree(feature->dump_buf);
+ feature->dump_buf = NULL;
+ }
+
+ /* Get buffer size from hsi, allocate accordingly, and perform the
+ * dump.
+ */
+ rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
+ &buf_size_dwords);
+ if (rc != DBG_STATUS_OK)
+ return rc;
+ feature->buf_size = buf_size_dwords * sizeof(u32);
+ feature->dump_buf = vmalloc(feature->buf_size);
+ if (!feature->dump_buf)
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+ rc = qed_features_lookup[feature_idx].
+ perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
+ feature->buf_size / sizeof(u32),
+ &feature->dumped_dwords);
+
+ /* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
+ * In this case the buffer holds valid binary data, but we wont able
+ * to parse it (since parsing relies on data in NVRAM which is only
+ * accessible when MFW is responsive). skip the formatting but return
+ * success so that binary data is provided.
+ */
+ if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
+ return DBG_STATUS_OK;
+
+ if (rc != DBG_STATUS_OK)
+ return rc;
+
+ /* Format output */
+ rc = format_feature(p_hwfn, feature_idx);
+ return rc;
+}
+
+int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
+}
+
+int qed_dbg_grc_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
+}
+
+int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
+ num_dumped_bytes);
+}
+
+int qed_dbg_idle_chk_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
+}
+
+int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
+ num_dumped_bytes);
+}
+
+int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
+}
+
+int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
+ num_dumped_bytes);
+}
+
+int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
+}
+
+int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
+ num_dumped_bytes);
+}
+
+int qed_dbg_protection_override_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
+}
+
+int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
+ num_dumped_bytes);
+}
+
+int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
+}
+
+int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
+ num_dumped_bytes);
+}
+
+int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
+}
+
+/* Defines the amount of bytes allocated for recording the length of debugfs
+ * feature buffer.
+ */
+#define REGDUMP_HEADER_SIZE sizeof(u32)
+#define REGDUMP_HEADER_FEATURE_SHIFT 24
+#define REGDUMP_HEADER_ENGINE_SHIFT 31
+#define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30
+enum debug_print_features {
+ OLD_MODE = 0,
+ IDLE_CHK = 1,
+ GRC_DUMP = 2,
+ MCP_TRACE = 3,
+ REG_FIFO = 4,
+ PROTECTION_OVERRIDE = 5,
+ IGU_FIFO = 6,
+ PHY = 7,
+ FW_ASSERTS = 8,
+};
+
+static u32 qed_calc_regdump_header(enum debug_print_features feature,
+ int engine, u32 feature_size, u8 omit_engine)
+{
+ /* Insert the engine, feature and mode inside the header and combine it
+ * with feature size.
+ */
+ return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
+ (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
+ (engine << REGDUMP_HEADER_ENGINE_SHIFT);
+}
+
+int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
+{
+ u8 cur_engine, omit_engine = 0, org_engine;
+ u32 offset = 0, feature_size;
+ int rc;
+
+ if (cdev->num_hwfns == 1)
+ omit_engine = 1;
+
+ org_engine = qed_get_debug_engine(cdev);
+ for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
+ /* Collect idle_chks and grcDump for each hw function */
+ DP_VERBOSE(cdev, QED_MSG_DEBUG,
+ "obtaining idle_chk and grcdump for current engine\n");
+ qed_set_debug_engine(cdev, cur_engine);
+
+ /* First idle_chk */
+ rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(IDLE_CHK, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
+ }
+
+ /* Second idle_chk */
+ rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(IDLE_CHK, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
+ }
+
+ /* reg_fifo dump */
+ rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(REG_FIFO, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
+ }
+
+ /* igu_fifo dump */
+ rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(IGU_FIFO, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
+ }
+
+ /* protection_override dump */
+ rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE,
+ &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(PROTECTION_OVERRIDE,
+ cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev,
+ "qed_dbg_protection_override failed. rc = %d\n",
+ rc);
+ }
+
+ /* fw_asserts dump */
+ rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(FW_ASSERTS, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
+ rc);
+ }
+
+ /* GRC dump - must be last because when mcp stuck it will
+ * clutter idle_chk, reg_fifo, ...
+ */
+ rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(GRC_DUMP, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
+ }
+ }
+
+ /* mcp_trace */
+ rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(MCP_TRACE, cur_engine,
+ feature_size, omit_engine);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
+ }
+
+ qed_set_debug_engine(cdev, org_engine);
+
+ return 0;
+}
+
+int qed_dbg_all_data_size(struct qed_dev *cdev)
+{
+ u8 cur_engine, org_engine;
+ u32 regs_len = 0;
+
+ org_engine = qed_get_debug_engine(cdev);
+ for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
+ /* Engine specific */
+ DP_VERBOSE(cdev, QED_MSG_DEBUG,
+ "calculating idle_chk and grcdump register length for current engine\n");
+ qed_set_debug_engine(cdev, cur_engine);
+ regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
+ REGDUMP_HEADER_SIZE +
+ qed_dbg_protection_override_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
+ }
+
+ /* Engine common */
+ regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
+ qed_set_debug_engine(cdev, org_engine);
+
+ return regs_len;
+}
+
+int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
+ enum qed_dbg_features feature, u32 *num_dumped_bytes)
+{
+ struct qed_hwfn *p_hwfn =
+ &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ struct qed_dbg_feature *qed_feature =
+ &cdev->dbg_params.features[feature];
+ enum dbg_status dbg_rc;
+ struct qed_ptt *p_ptt;
+ int rc = 0;
+
+ /* Acquire ptt */
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EINVAL;
+
+ /* Get dump */
+ dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
+ if (dbg_rc != DBG_STATUS_OK) {
+ DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
+ qed_dbg_get_status_str(dbg_rc));
+ *num_dumped_bytes = 0;
+ rc = -EINVAL;
+ goto out;
+ }
+
+ DP_VERBOSE(cdev, QED_MSG_DEBUG,
+ "copying debugfs feature to external buffer\n");
+ memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
+ *num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
+ 4;
+
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+}
+
+int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
+{
+ struct qed_hwfn *p_hwfn =
+ &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ struct qed_dbg_feature *qed_feature =
+ &cdev->dbg_params.features[feature];
+ u32 buf_size_dwords;
+ enum dbg_status rc;
+
+ if (!p_ptt)
+ return -EINVAL;
+
+ rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
+ &buf_size_dwords);
+ if (rc != DBG_STATUS_OK)
+ buf_size_dwords = 0;
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ qed_feature->buf_size = buf_size_dwords * sizeof(u32);
+ return qed_feature->buf_size;
+}
+
+u8 qed_get_debug_engine(struct qed_dev *cdev)
+{
+ return cdev->dbg_params.engine_for_debug;
+}
+
+void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
+{
+ DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
+ engine_number);
+ cdev->dbg_params.engine_for_debug = engine_number;
+}
+
+void qed_dbg_pf_init(struct qed_dev *cdev)
+{
+ const u8 *dbg_values;
+
+ /* Debug values are after init values.
+ * The offset is the first dword of the file.
+ */
+ dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
+ qed_dbg_set_bin_ptr((u8 *)dbg_values);
+ qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
+}
+
+void qed_dbg_pf_exit(struct qed_dev *cdev)
+{
+ struct qed_dbg_feature *feature = NULL;
+ enum qed_dbg_features feature_idx;
+
+ /* Debug features' buffers may be allocated if debug feature was used
+ * but dump wasn't called.
+ */
+ for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
+ feature = &cdev->dbg_params.features[feature_idx];
+ if (feature->dump_buf) {
+ vfree(feature->dump_buf);
+ feature->dump_buf = NULL;
+ }
+ }
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h
new file mode 100644
index 000000000000..f872d7324814
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h
@@ -0,0 +1,54 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_DEBUGFS_H
+#define _QED_DEBUGFS_H
+
+enum qed_dbg_features {
+ DBG_FEATURE_GRC,
+ DBG_FEATURE_IDLE_CHK,
+ DBG_FEATURE_MCP_TRACE,
+ DBG_FEATURE_REG_FIFO,
+ DBG_FEATURE_IGU_FIFO,
+ DBG_FEATURE_PROTECTION_OVERRIDE,
+ DBG_FEATURE_FW_ASSERTS,
+ DBG_FEATURE_NUM
+};
+
+int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
+int qed_dbg_grc_size(struct qed_dev *cdev);
+int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_idle_chk_size(struct qed_dev *cdev);
+int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_reg_fifo_size(struct qed_dev *cdev);
+int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_igu_fifo_size(struct qed_dev *cdev);
+int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_protection_override_size(struct qed_dev *cdev);
+int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_fw_asserts_size(struct qed_dev *cdev);
+int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_mcp_trace_size(struct qed_dev *cdev);
+int qed_dbg_all_data(struct qed_dev *cdev, void *buffer);
+int qed_dbg_all_data_size(struct qed_dev *cdev);
+u8 qed_get_debug_engine(struct qed_dev *cdev);
+void qed_set_debug_engine(struct qed_dev *cdev, int engine_number);
+int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
+ enum qed_dbg_features feature, u32 *num_dumped_bytes);
+int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature);
+
+void qed_dbg_pf_init(struct qed_dev *cdev);
+void qed_dbg_pf_exit(struct qed_dev *cdev);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 855691f18412..2777d5bb4380 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -1728,13 +1728,6 @@ enum bin_dbg_buffer_type {
MAX_BIN_DBG_BUFFER_TYPE
};
-/* Chip IDs */
-enum chip_ids {
- CHIP_RESERVED,
- CHIP_BB_B0,
- CHIP_RESERVED2,
- MAX_CHIP_IDS
-};
/* Attention bit mapping */
struct dbg_attn_bit_mapping {
@@ -1813,6 +1806,371 @@ enum dbg_attn_type {
MAX_DBG_ATTN_TYPE
};
+/* condition header for registers dump */
+struct dbg_dump_cond_hdr {
+ struct dbg_mode_hdr mode; /* Mode header */
+ u8 block_id; /* block ID */
+ u8 data_size; /* size in dwords of the data following this header */
+};
+
+/* memory data for registers dump */
+struct dbg_dump_mem {
+ __le32 dword0;
+#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
+#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF
+#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
+ __le32 dword1;
+#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_LENGTH_SHIFT 0
+#define DBG_DUMP_MEM_RESERVED_MASK 0xFF
+#define DBG_DUMP_MEM_RESERVED_SHIFT 24
+};
+
+/* register data for registers dump */
+struct dbg_dump_reg {
+ __le32 data;
+#define DBG_DUMP_REG_ADDRESS_MASK 0xFFFFFF /* register address (in dwords) */
+#define DBG_DUMP_REG_ADDRESS_SHIFT 0
+#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */
+#define DBG_DUMP_REG_LENGTH_SHIFT 24
+};
+
+/* split header for registers dump */
+struct dbg_dump_split_hdr {
+ __le32 hdr;
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
+};
+
+/* condition header for idle check */
+struct dbg_idle_chk_cond_hdr {
+ struct dbg_mode_hdr mode; /* Mode header */
+ __le16 data_size; /* size in dwords of the data following this header */
+};
+
+/* Idle Check condition register */
+struct dbg_idle_chk_cond_reg {
+ __le32 data;
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
+ __le16 num_entries; /* number of registers entries to check */
+ u8 entry_size; /* size of registers entry (in dwords) */
+ u8 start_entry; /* index of the first entry to check */
+};
+
+/* Idle Check info register */
+struct dbg_idle_chk_info_reg {
+ __le32 data;
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
+ __le16 size; /* register size in dwords */
+ struct dbg_mode_hdr mode; /* Mode header */
+};
+
+/* Idle Check register */
+union dbg_idle_chk_reg {
+ struct dbg_idle_chk_cond_reg cond_reg; /* condition register */
+ struct dbg_idle_chk_info_reg info_reg; /* info register */
+};
+
+/* Idle Check result header */
+struct dbg_idle_chk_result_hdr {
+ __le16 rule_id; /* Failing rule index */
+ __le16 mem_entry_id; /* Failing memory entry index */
+ u8 num_dumped_cond_regs; /* number of dumped condition registers */
+ u8 num_dumped_info_regs; /* number of dumped condition registers */
+ u8 severity; /* from dbg_idle_chk_severity_types enum */
+ u8 reserved;
+};
+
+/* Idle Check result register header */
+struct dbg_idle_chk_result_reg_hdr {
+ u8 data;
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
+ u8 start_entry; /* index of the first checked entry */
+ __le16 size; /* register size in dwords */
+};
+
+/* Idle Check rule */
+struct dbg_idle_chk_rule {
+ __le16 rule_id; /* Idle Check rule ID */
+ u8 severity; /* value from dbg_idle_chk_severity_types enum */
+ u8 cond_id; /* Condition ID */
+ u8 num_cond_regs; /* number of condition registers */
+ u8 num_info_regs; /* number of info registers */
+ u8 num_imms; /* number of immediates in the condition */
+ u8 reserved1;
+ __le16 reg_offset; /* offset of this rules registers in the idle check
+ * register array (in dbg_idle_chk_reg units).
+ */
+ __le16 imm_offset; /* offset of this rules immediate values in the
+ * immediate values array (in dwords).
+ */
+};
+
+/* Idle Check rule parsing data */
+struct dbg_idle_chk_rule_parsing_data {
+ __le32 data;
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
+};
+
+/* idle check severity types */
+enum dbg_idle_chk_severity_types {
+ /* idle check failure should cause an error */
+ IDLE_CHK_SEVERITY_ERROR,
+ /* idle check failure should cause an error only if theres no traffic */
+ IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
+ /* idle check failure should cause a warning */
+ IDLE_CHK_SEVERITY_WARNING,
+ MAX_DBG_IDLE_CHK_SEVERITY_TYPES
+};
+
+/* Debug Bus block data */
+struct dbg_bus_block_data {
+ u8 enabled; /* Indicates if the block is enabled for recording (0/1) */
+ u8 hw_id; /* HW ID associated with the block */
+ u8 line_num; /* Debug line number to select */
+ u8 right_shift; /* Number of units to right the debug data (0-3) */
+ u8 cycle_en; /* 4-bit value: bit i set -> unit i is enabled. */
+ u8 force_valid; /* 4-bit value: bit i set -> unit i is forced valid. */
+ u8 force_frame; /* 4-bit value: bit i set -> unit i frame bit is forced.
+ */
+ u8 reserved;
+};
+
+/* Debug Bus Clients */
+enum dbg_bus_clients {
+ DBG_BUS_CLIENT_RBCN,
+ DBG_BUS_CLIENT_RBCP,
+ DBG_BUS_CLIENT_RBCR,
+ DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCU,
+ DBG_BUS_CLIENT_RBCF,
+ DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCS,
+ DBG_BUS_CLIENT_RBCH,
+ DBG_BUS_CLIENT_RBCZ,
+ DBG_BUS_CLIENT_OTHER_ENGINE,
+ DBG_BUS_CLIENT_TIMESTAMP,
+ DBG_BUS_CLIENT_CPU,
+ DBG_BUS_CLIENT_RBCY,
+ DBG_BUS_CLIENT_RBCQ,
+ DBG_BUS_CLIENT_RBCM,
+ DBG_BUS_CLIENT_RBCB,
+ DBG_BUS_CLIENT_RBCW,
+ DBG_BUS_CLIENT_RBCV,
+ MAX_DBG_BUS_CLIENTS
+};
+
+/* Debug Bus memory address */
+struct dbg_bus_mem_addr {
+ __le32 lo;
+ __le32 hi;
+};
+
+/* Debug Bus PCI buffer data */
+struct dbg_bus_pci_buf_data {
+ struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */
+ struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */
+ __le32 size; /* PCI buffer size in bytes */
+};
+
+/* Debug Bus Storm EID range filter params */
+struct dbg_bus_storm_eid_range_params {
+ u8 min; /* Minimal event ID to filter on */
+ u8 max; /* Maximal event ID to filter on */
+};
+
+/* Debug Bus Storm EID mask filter params */
+struct dbg_bus_storm_eid_mask_params {
+ u8 val; /* Event ID value */
+ u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */
+};
+
+/* Debug Bus Storm EID filter params */
+union dbg_bus_storm_eid_params {
+ struct dbg_bus_storm_eid_range_params range;
+ struct dbg_bus_storm_eid_mask_params mask;
+};
+
+/* Debug Bus Storm data */
+struct dbg_bus_storm_data {
+ u8 fast_enabled;
+ u8 fast_mode;
+ u8 slow_enabled;
+ u8 slow_mode;
+ u8 hw_id;
+ u8 eid_filter_en;
+ u8 eid_range_not_mask;
+ u8 cid_filter_en;
+ union dbg_bus_storm_eid_params eid_filter_params;
+ __le16 reserved;
+ __le32 cid;
+};
+
+/* Debug Bus data */
+struct dbg_bus_data {
+ __le32 app_version; /* The tools version number of the application */
+ u8 state; /* The current debug bus state */
+ u8 hw_dwords; /* HW dwords per cycle */
+ u8 next_hw_id; /* Next HW ID to be associated with an input */
+ u8 num_enabled_blocks; /* Number of blocks enabled for recording */
+ u8 num_enabled_storms; /* Number of Storms enabled for recording */
+ u8 target; /* Output target */
+ u8 next_trigger_state; /* ID of next trigger state to be added */
+ u8 next_constraint_id; /* ID of next filter/trigger constraint to be
+ * added.
+ */
+ u8 one_shot_en; /* Indicates if one-shot mode is enabled (0/1) */
+ u8 grc_input_en; /* Indicates if GRC recording is enabled (0/1) */
+ u8 timestamp_input_en; /* Indicates if timestamp recording is enabled
+ * (0/1).
+ */
+ u8 filter_en; /* Indicates if the recording filter is enabled (0/1) */
+ u8 trigger_en; /* Indicates if the recording trigger is enabled (0/1) */
+ u8 adding_filter; /* If true, the next added constraint belong to the
+ * filter. Otherwise, it belongs to the last added
+ * trigger state. Valid only if either filter or
+ * triggers are enabled.
+ */
+ u8 filter_pre_trigger; /* Indicates if the recording filter should be
+ * applied before the trigger. Valid only if both
+ * filter and trigger are enabled (0/1).
+ */
+ u8 filter_post_trigger; /* Indicates if the recording filter should be
+ * applied after the trigger. Valid only if both
+ * filter and trigger are enabled (0/1).
+ */
+ u8 unify_inputs; /* If true, all inputs are associated with HW ID 0.
+ * Otherwise, each input is assigned a different HW ID
+ * (0/1).
+ */
+ u8 rcv_from_other_engine; /* Indicates if the other engine sends it NW
+ * recording to this engine (0/1).
+ */
+ struct dbg_bus_pci_buf_data pci_buf; /* Debug Bus PCI buffer data. Valid
+ * only when the target is
+ * DBG_BUS_TARGET_ID_PCI.
+ */
+ __le16 reserved;
+ struct dbg_bus_block_data blocks[80];/* Debug Bus data for each block */
+ struct dbg_bus_storm_data storms[6]; /* Debug Bus data for each block */
+};
+
+/* Debug bus frame modes */
+enum dbg_bus_frame_modes {
+ DBG_BUS_FRAME_MODE_0HW_4ST = 0, /* 0 HW dwords, 4 Storm dwords */
+ DBG_BUS_FRAME_MODE_4HW_0ST = 3, /* 4 HW dwords, 0 Storm dwords */
+ DBG_BUS_FRAME_MODE_8HW_0ST = 4, /* 8 HW dwords, 0 Storm dwords */
+ MAX_DBG_BUS_FRAME_MODES
+};
+
+/* Debug bus states */
+enum dbg_bus_states {
+ DBG_BUS_STATE_IDLE, /* debug bus idle state (not recording) */
+ DBG_BUS_STATE_READY, /* debug bus is ready for configuration and
+ * recording.
+ */
+ DBG_BUS_STATE_RECORDING, /* debug bus is currently recording */
+ DBG_BUS_STATE_STOPPED, /* debug bus recording has stopped */
+ MAX_DBG_BUS_STATES
+};
+
+/* Debug bus target IDs */
+enum dbg_bus_targets {
+ /* records debug bus to DBG block internal buffer */
+ DBG_BUS_TARGET_ID_INT_BUF,
+ /* records debug bus to the NW */
+ DBG_BUS_TARGET_ID_NIG,
+ /* records debug bus to a PCI buffer */
+ DBG_BUS_TARGET_ID_PCI,
+ MAX_DBG_BUS_TARGETS
+};
+
+/* GRC Dump data */
+struct dbg_grc_data {
+ __le32 param_val[40]; /* Value of each GRC parameter. Array size must
+ * match the enum dbg_grc_params.
+ */
+ u8 param_set_by_user[40]; /* Indicates for each GRC parameter if it was
+ * set by the user (0/1). Array size must
+ * match the enum dbg_grc_params.
+ */
+};
+
+/* Debug GRC params */
+enum dbg_grc_params {
+ DBG_GRC_PARAM_DUMP_TSTORM, /* dump Tstorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_MSTORM, /* dump Mstorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_USTORM, /* dump Ustorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_XSTORM, /* dump Xstorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_YSTORM, /* dump Ystorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_PSTORM, /* dump Pstorm memories (0/1) */
+ DBG_GRC_PARAM_DUMP_REGS, /* dump non-memory registers (0/1) */
+ DBG_GRC_PARAM_DUMP_RAM, /* dump Storm internal RAMs (0/1) */
+ DBG_GRC_PARAM_DUMP_PBUF, /* dump Storm passive buffer (0/1) */
+ DBG_GRC_PARAM_DUMP_IOR, /* dump Storm IORs (0/1) */
+ DBG_GRC_PARAM_DUMP_VFC, /* dump VFC memories (0/1) */
+ DBG_GRC_PARAM_DUMP_CM_CTX, /* dump CM contexts (0/1) */
+ DBG_GRC_PARAM_DUMP_PXP, /* dump PXP memories (0/1) */
+ DBG_GRC_PARAM_DUMP_RSS, /* dump RSS memories (0/1) */
+ DBG_GRC_PARAM_DUMP_CAU, /* dump CAU memories (0/1) */
+ DBG_GRC_PARAM_DUMP_QM, /* dump QM memories (0/1) */
+ DBG_GRC_PARAM_DUMP_MCP, /* dump MCP memories (0/1) */
+ DBG_GRC_PARAM_RESERVED, /* reserved */
+ DBG_GRC_PARAM_DUMP_CFC, /* dump CFC memories (0/1) */
+ DBG_GRC_PARAM_DUMP_IGU, /* dump IGU memories (0/1) */
+ DBG_GRC_PARAM_DUMP_BRB, /* dump BRB memories (0/1) */
+ DBG_GRC_PARAM_DUMP_BTB, /* dump BTB memories (0/1) */
+ DBG_GRC_PARAM_DUMP_BMB, /* dump BMB memories (0/1) */
+ DBG_GRC_PARAM_DUMP_NIG, /* dump NIG memories (0/1) */
+ DBG_GRC_PARAM_DUMP_MULD, /* dump MULD memories (0/1) */
+ DBG_GRC_PARAM_DUMP_PRS, /* dump PRS memories (0/1) */
+ DBG_GRC_PARAM_DUMP_DMAE, /* dump PRS memories (0/1) */
+ DBG_GRC_PARAM_DUMP_TM, /* dump TM (timers) memories (0/1) */
+ DBG_GRC_PARAM_DUMP_SDM, /* dump SDM memories (0/1) */
+ DBG_GRC_PARAM_DUMP_DIF, /* dump DIF memories (0/1) */
+ DBG_GRC_PARAM_DUMP_STATIC, /* dump static debug data (0/1) */
+ DBG_GRC_PARAM_UNSTALL, /* un-stall Storms after dump (0/1) */
+ DBG_GRC_PARAM_NUM_LCIDS, /* number of LCIDs (0..320) */
+ DBG_GRC_PARAM_NUM_LTIDS, /* number of LTIDs (0..320) */
+ /* preset: exclude all memories from dump (1 only) */
+ DBG_GRC_PARAM_EXCLUDE_ALL,
+ /* preset: include memories for crash dump (1 only) */
+ DBG_GRC_PARAM_CRASH,
+ /* perform dump only if MFW is responding (0/1) */
+ DBG_GRC_PARAM_PARITY_SAFE,
+ DBG_GRC_PARAM_DUMP_CM, /* dump CM memories (0/1) */
+ DBG_GRC_PARAM_DUMP_PHY, /* dump PHY memories (0/1) */
+ MAX_DBG_GRC_PARAMS
+};
+
+/* Debug reset registers */
+enum dbg_reset_regs {
+ DBG_RESET_REG_MISCS_PL_UA,
+ DBG_RESET_REG_MISCS_PL_HV,
+ DBG_RESET_REG_MISCS_PL_HV_2,
+ DBG_RESET_REG_MISC_PL_UA,
+ DBG_RESET_REG_MISC_PL_HV,
+ DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+ DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+ DBG_RESET_REG_MISC_PL_PDA_VAUX,
+ MAX_DBG_RESET_REGS
+};
+
/* Debug status codes */
enum dbg_status {
DBG_STATUS_OK,
@@ -1869,6 +2227,41 @@ enum dbg_status {
MAX_DBG_STATUS
};
+/* Debug Storms IDs */
+enum dbg_storms {
+ DBG_TSTORM_ID,
+ DBG_MSTORM_ID,
+ DBG_USTORM_ID,
+ DBG_XSTORM_ID,
+ DBG_YSTORM_ID,
+ DBG_PSTORM_ID,
+ MAX_DBG_STORMS
+};
+
+/* Idle Check data */
+struct idle_chk_data {
+ __le32 buf_size; /* Idle check buffer size in dwords */
+ u8 buf_size_set; /* Indicates if the idle check buffer size was set
+ * (0/1).
+ */
+ u8 reserved1;
+ __le16 reserved2;
+};
+
+/* Debug Tools data (per HW function) */
+struct dbg_tools_data {
+ struct dbg_grc_data grc; /* GRC Dump data */
+ struct dbg_bus_data bus; /* Debug Bus data */
+ struct idle_chk_data idle_chk; /* Idle Check data */
+ u8 mode_enable[40]; /* Indicates if a mode is enabled (0/1) */
+ u8 block_in_reset[80]; /* Indicates if a block is in reset state (0/1).
+ */
+ u8 chip_id; /* Chip ID (from enum chip_ids) */
+ u8 platform_id; /* Platform ID (from enum platform_ids) */
+ u8 initialized; /* Indicates if the data was initialized */
+ u8 reserved;
+};
+
/********************************/
/* HSI Init Functions constants */
/********************************/
@@ -1948,15 +2341,50 @@ struct init_qm_vport_params {
/* Max size in dwords of a zipped array */
#define MAX_ZIPPED_SIZE 8192
+struct fw_asserts_ram_section {
+ __le16 section_ram_line_offset;
+ __le16 section_ram_line_size;
+ u8 list_dword_offset;
+ u8 list_element_dword_size;
+ u8 list_num_elements;
+ u8 list_next_index_dword_offset;
+};
+
+struct fw_ver_num {
+ u8 major; /* Firmware major version number */
+ u8 minor; /* Firmware minor version number */
+ u8 rev; /* Firmware revision version number */
+ u8 eng; /* Firmware engineering version number (for bootleg versions) */
+};
+
+struct fw_ver_info {
+ __le16 tools_ver; /* Tools version number */
+ u8 image_id; /* FW image ID (e.g. main) */
+ u8 reserved1;
+ struct fw_ver_num num; /* FW version number */
+ __le32 timestamp; /* FW Timestamp in unix time (sec. since 1970) */
+ __le32 reserved2;
+};
+
+struct fw_info {
+ struct fw_ver_info ver;
+ struct fw_asserts_ram_section fw_asserts_section;
+};
+
+struct fw_info_location {
+ __le32 grc_addr;
+ __le32 size;
+};
+
enum init_modes {
MODE_RESERVED,
MODE_BB_B0,
- MODE_RESERVED2,
+ MODE_K2,
MODE_ASIC,
+ MODE_RESERVED2,
MODE_RESERVED3,
MODE_RESERVED4,
MODE_RESERVED5,
- MODE_RESERVED6,
MODE_SF,
MODE_MF_SD,
MODE_MF_SI,
@@ -1965,7 +2393,7 @@ enum init_modes {
MODE_PORTS_PER_ENG_4,
MODE_100G,
MODE_40G,
- MODE_RESERVED7,
+ MODE_RESERVED6,
MAX_INIT_MODES
};
@@ -2223,8 +2651,276 @@ struct iro {
__le16 size;
};
+/***************************** Public Functions *******************************/
+/**
+ * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
+ * arrays.
+ *
+ * @param bin_ptr - a pointer to the binary data with debug arrays.
+ */
+enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
+/**
+ * @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for
+ * GRC Dump.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for the GRC Dump
+ * data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_grc_dump - Dumps GRC data into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the collected GRC data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified dump buffer is too small
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_idle_chk_get_dump_buf_size - Returns the required buffer size
+ * for idle check results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for the idle check
+ * data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_idle_chk_dump - Performs idle check and writes the results
+ * into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the idle check data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer size
+ * for mcp trace results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for mcp trace data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the trace data in MCP scratchpad contain an invalid signature
+ * - the bundle ID in NVRAM is invalid
+ * - the trace meta data cannot be found (in NVRAM or image file)
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_mcp_trace_dump - Performs mcp trace and writes the results
+ * into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the mcp trace data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * - the trace data in MCP scratchpad contain an invalid signature
+ * - the bundle ID in NVRAM is invalid
+ * - the trace meta data cannot be found (in NVRAM or image file)
+ * - the trace meta data cannot be read (from NVRAM or image file)
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer size
+ * for grc trace fifo results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for reg fifo data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_reg_fifo_dump - Reads the reg fifo and writes the results into
+ * the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the reg fifo data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * - DMAE transaction failed
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer size
+ * for the IGU fifo results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for the IGU fifo
+ * data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results into
+ * the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the IGU fifo data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * - DMAE transaction failed
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_protection_override_get_dump_buf_size - Returns the required
+ * buffer size for protection override window results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for protection
+ * override data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status
+qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_protection_override_dump - Reads protection override window
+ * entries and writes the results into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the protection override data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * - DMAE transaction failed
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * @brief qed_dbg_fw_asserts_get_dump_buf_size - Returns the required buffer
+ * size for FW Asserts results.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param buf_size - OUT: required buffer size (in dwords) for FW Asserts data.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * @brief qed_dbg_fw_asserts_dump - Reads the FW Asserts and writes the results
+ * into the specified buffer.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - Ptt window used for writing the registers.
+ * @param dump_buf - Pointer to write the FW Asserts data into.
+ * @param buf_size_in_dwords - Size of the specified buffer in dwords.
+ * @param num_dumped_dwords - OUT: number of dumped dwords.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - the specified buffer is too small
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
/**
- * @brief qed_dbg_print_attn - Prints attention registers values in the specified results struct.
+ * @brief qed_dbg_print_attn - Prints attention registers values in the
+ * specified results struct.
*
* @param p_hwfn
* @param results - Pointer to the attention read results
@@ -2236,8 +2932,212 @@ struct iro {
enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
struct dbg_attn_block_result *results);
+/******************************** Constants **********************************/
+
#define MAX_NAME_LEN 16
+/***************************** Public Functions *******************************/
+/**
+ * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with
+ * debug arrays.
+ *
+ * @param bin_ptr - a pointer to the binary data with debug arrays.
+ */
+enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr);
+/**
+ * @brief qed_dbg_get_status_str - Returns a string for the specified status.
+ *
+ * @param status - a debug status code.
+ *
+ * @return a string for the specified status
+ */
+const char *qed_dbg_get_status_str(enum dbg_status status);
+/**
+ * @brief qed_get_idle_chk_results_buf_size - Returns the required buffer size
+ * for idle check results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - idle check dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_idle_chk_results - Prints idle check results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - idle check dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the idle check results.
+ * @param num_errors - OUT: number of errors found in idle check.
+ * @param num_warnings - OUT: number of warnings found in idle check.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *num_errors,
+ u32 *num_warnings);
+/**
+ * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
+ * for MCP Trace results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - MCP Trace dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_mcp_trace_results - Prints MCP Trace results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - mcp trace dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the mcp trace results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+/**
+ * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size
+ * for reg_fifo results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - reg fifo dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_reg_fifo_results - Prints reg fifo results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - reg fifo dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the reg fifo results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+/**
+ * @brief qed_get_igu_fifo_results_buf_size - Returns the required buffer size
+ * for igu_fifo results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - IGU fifo dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_igu_fifo_results - Prints IGU fifo results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - IGU fifo dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the IGU fifo results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+/**
+ * @brief qed_get_protection_override_results_buf_size - Returns the required
+ * buffer size for protection override results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - protection override dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status
+qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_protection_override_results - Prints protection override
+ * results.
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - protection override dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the reg fifo results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+/**
+ * @brief qed_get_fw_asserts_results_buf_size - Returns the required buffer size
+ * for FW Asserts results (in bytes).
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - FW Asserts dump buffer.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * @brief qed_print_fw_asserts_results - Prints FW Asserts results
+ *
+ * @param p_hwfn - HW device data
+ * @param dump_buf - FW Asserts dump buffer, starting from the header.
+ * @param num_dumped_dwords - number of dwords that were dumped.
+ * @param results_buf - buffer for printing the FW Asserts results.
+ *
+ * @return error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
/* Win 2 */
#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
@@ -7039,6 +7939,35 @@ struct ystorm_iscsi_conn_ag_ctx {
__le32 reg2;
__le32 reg3;
};
+
+#define MFW_TRACE_SIGNATURE 0x25071946
+
+/* The trace in the buffer */
+#define MFW_TRACE_EVENTID_MASK 0x00ffff
+#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000
+#define MFW_TRACE_PRM_SIZE_SHIFT 16
+#define MFW_TRACE_ENTRY_SIZE 3
+
+struct mcp_trace {
+ u32 signature; /* Help to identify that the trace is valid */
+ u32 size; /* the size of the trace buffer in bytes */
+ u32 curr_level; /* 2 - all will be written to the buffer
+ * 1 - debug trace will not be written
+ * 0 - just errors will be written to the buffer
+ */
+ u32 modules_mask[2]; /* a bit per module, 1 means write it, 0 means
+ * mask it.
+ */
+
+ /* Warning: the following pointers are assumed to be 32bits as they are
+ * used only in the MFW.
+ */
+ u32 trace_prod; /* The next trace will be written to this offset */
+ u32 trace_oldest; /* The oldest valid trace starts at this offset
+ * (usually very close after the current producer).
+ */
+};
+
#define VF_MAX_STATIC 192
#define MCP_GLOB_PATH_MAX 2
@@ -7046,6 +7975,7 @@ struct ystorm_iscsi_conn_ag_ctx {
#define MCP_GLOB_PORT_MAX 4
#define MCP_GLOB_FUNC_MAX 16
+typedef u32 offsize_t; /* In DWORDS !!! */
/* Offset from the beginning of the MCP scratchpad */
#define OFFSIZE_OFFSET_SHIFT 0
#define OFFSIZE_OFFSET_MASK 0x0000ffff
@@ -7636,6 +8566,8 @@ struct public_drv_mb {
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
+#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
+#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
#define DRV_MSG_CODE_MCP_RESET 0x00090000
#define DRV_MSG_CODE_SET_VERSION 0x000f0000
#define DRV_MSG_CODE_MCP_HALT 0x00100000
@@ -7657,6 +8589,9 @@ struct public_drv_mb {
#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF
#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
+
+#define DRV_MB_PARAM_NVM_LEN_SHIFT 24
+
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
@@ -7694,6 +8629,8 @@ struct public_drv_mb {
#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
+
+#define FW_MSG_CODE_NVM_OK 0x00010000
#define FW_MSG_CODE_OK 0x00160000
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
@@ -7930,4 +8867,101 @@ struct nvm_cfg1 {
struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX];
struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX];
};
+
+enum spad_sections {
+ SPAD_SECTION_TRACE,
+ SPAD_SECTION_NVM_CFG,
+ SPAD_SECTION_PUBLIC,
+ SPAD_SECTION_PRIVATE,
+ SPAD_SECTION_MAX
+};
+
+#define MCP_TRACE_SIZE 2048 /* 2kb */
+
+/* This section is located at a fixed location in the beginning of the
+ * scratchpad, to ensure that the MCP trace is not run over during MFW upgrade.
+ * All the rest of data has a floating location which differs from version to
+ * version, and is pointed by the mcp_meta_data below.
+ * Moreover, the spad_layout section is part of the MFW firmware, and is loaded
+ * with it from nvram in order to clear this portion.
+ */
+struct static_init {
+ u32 num_sections;
+ offsize_t sections[SPAD_SECTION_MAX];
+#define SECTION(_sec_) (*((offsize_t *)(STRUCT_OFFSET(sections[_sec_]))))
+
+ struct mcp_trace trace;
+#define MCP_TRACE_P ((struct mcp_trace *)(STRUCT_OFFSET(trace)))
+ u8 trace_buffer[MCP_TRACE_SIZE];
+#define MCP_TRACE_BUF ((u8 *)(STRUCT_OFFSET(trace_buffer)))
+ /* running_mfw has the same definition as in nvm_map.h.
+ * This bit indicate both the running dir, and the running bundle.
+ * It is set once when the LIM is loaded.
+ */
+ u32 running_mfw;
+#define RUNNING_MFW (*((u32 *)(STRUCT_OFFSET(running_mfw))))
+ u32 build_time;
+#define MFW_BUILD_TIME (*((u32 *)(STRUCT_OFFSET(build_time))))
+ u32 reset_type;
+#define RESET_TYPE (*((u32 *)(STRUCT_OFFSET(reset_type))))
+ u32 mfw_secure_mode;
+#define MFW_SECURE_MODE (*((u32 *)(STRUCT_OFFSET(mfw_secure_mode))))
+ u16 pme_status_pf_bitmap;
+#define PME_STATUS_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_status_pf_bitmap))))
+ u16 pme_enable_pf_bitmap;
+#define PME_ENABLE_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_enable_pf_bitmap))))
+ u32 mim_nvm_addr;
+ u32 mim_start_addr;
+ u32 ah_pcie_link_params;
+#define AH_PCIE_LINK_PARAMS_LINK_SPEED_MASK (0x000000ff)
+#define AH_PCIE_LINK_PARAMS_LINK_SPEED_SHIFT (0)
+#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_MASK (0x0000ff00)
+#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_SHIFT (8)
+#define AH_PCIE_LINK_PARAMS_ASPM_MODE_MASK (0x00ff0000)
+#define AH_PCIE_LINK_PARAMS_ASPM_MODE_SHIFT (16)
+#define AH_PCIE_LINK_PARAMS_ASPM_CAP_MASK (0xff000000)
+#define AH_PCIE_LINK_PARAMS_ASPM_CAP_SHIFT (24)
+#define AH_PCIE_LINK_PARAMS (*((u32 *)(STRUCT_OFFSET(ah_pcie_link_params))))
+
+ u32 rsrv_persist[5]; /* Persist reserved for MFW upgrades */
+};
+
+enum nvm_image_type {
+ NVM_TYPE_TIM1 = 0x01,
+ NVM_TYPE_TIM2 = 0x02,
+ NVM_TYPE_MIM1 = 0x03,
+ NVM_TYPE_MIM2 = 0x04,
+ NVM_TYPE_MBA = 0x05,
+ NVM_TYPE_MODULES_PN = 0x06,
+ NVM_TYPE_VPD = 0x07,
+ NVM_TYPE_MFW_TRACE1 = 0x08,
+ NVM_TYPE_MFW_TRACE2 = 0x09,
+ NVM_TYPE_NVM_CFG1 = 0x0a,
+ NVM_TYPE_L2B = 0x0b,
+ NVM_TYPE_DIR1 = 0x0c,
+ NVM_TYPE_EAGLE_FW1 = 0x0d,
+ NVM_TYPE_FALCON_FW1 = 0x0e,
+ NVM_TYPE_PCIE_FW1 = 0x0f,
+ NVM_TYPE_HW_SET = 0x10,
+ NVM_TYPE_LIM = 0x11,
+ NVM_TYPE_AVS_FW1 = 0x12,
+ NVM_TYPE_DIR2 = 0x13,
+ NVM_TYPE_CCM = 0x14,
+ NVM_TYPE_EAGLE_FW2 = 0x15,
+ NVM_TYPE_FALCON_FW2 = 0x16,
+ NVM_TYPE_PCIE_FW2 = 0x17,
+ NVM_TYPE_AVS_FW2 = 0x18,
+ NVM_TYPE_INIT_HW = 0x19,
+ NVM_TYPE_DEFAULT_CFG = 0x1a,
+ NVM_TYPE_MDUMP = 0x1b,
+ NVM_TYPE_META = 0x1c,
+ NVM_TYPE_ISCSI_CFG = 0x1d,
+ NVM_TYPE_FCOE_CFG = 0x1f,
+ NVM_TYPE_ETH_PHY_FW1 = 0x20,
+ NVM_TYPE_ETH_PHY_FW2 = 0x21,
+ NVM_TYPE_MAX,
+};
+
+#define DIR_ID_1 (0)
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 4409ea3f7d40..ddd410a91e13 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -111,8 +111,8 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
-int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
- struct qed_sp_vport_start_params *p_params)
+static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params)
{
if (IS_VF(p_hwfn->cdev)) {
return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index d22e3f8816ae..b730a632c383 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -588,6 +588,8 @@ static int qed_nic_stop(struct qed_dev *cdev)
}
}
+ qed_dbg_pf_exit(cdev);
+
return rc;
}
@@ -846,6 +848,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
/* First Dword used to diffrentiate between various sources */
data = cdev->firmware->data + sizeof(u32);
+
+ qed_dbg_pf_init(cdev);
}
memset(&tunn_info, 0, sizeof(tunn_info));
@@ -1394,6 +1398,8 @@ const struct qed_common_ops qed_common_ops_pass = {
.get_link = &qed_get_current_link,
.drain = &qed_drain,
.update_msglvl = &qed_init_dp,
+ .dbg_all_data = &qed_dbg_all_data,
+ .dbg_all_data_size = &qed_dbg_all_data_size,
.chain_alloc = &qed_chain_alloc,
.chain_free = &qed_chain_free,
.get_coalesce = &qed_get_coalesce,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 7d39cb9b775b..bdc9ba92f6d4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1181,8 +1181,8 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
p_drv_version = &union_data.drv_version;
p_drv_version->version = p_ver->version;
- for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) {
- val = cpu_to_be32(p_ver->name[i]);
+ for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
+ val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
*(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index b44f09b18eb1..759cb04e02b0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -528,9 +528,903 @@
#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
#define QM_REG_WFQVPWEIGHT 0x2fa000UL
+#define PGLCS_REG_DBG_SELECT \
+ 0x001d14UL
+#define PGLCS_REG_DBG_DWORD_ENABLE \
+ 0x001d18UL
+#define PGLCS_REG_DBG_SHIFT \
+ 0x001d1cUL
+#define PGLCS_REG_DBG_FORCE_VALID \
+ 0x001d20UL
+#define PGLCS_REG_DBG_FORCE_FRAME \
+ 0x001d24UL
+#define MISC_REG_RESET_PL_PDA_VMAIN_1 \
+ 0x008070UL
+#define MISC_REG_RESET_PL_PDA_VMAIN_2 \
+ 0x008080UL
+#define MISC_REG_RESET_PL_PDA_VAUX \
+ 0x008090UL
+#define MISCS_REG_RESET_PL_UA \
+ 0x009050UL
+#define MISCS_REG_RESET_PL_HV \
+ 0x009060UL
+#define MISCS_REG_RESET_PL_HV_2 \
+ 0x009150UL
+#define DMAE_REG_DBG_SELECT \
+ 0x00c510UL
+#define DMAE_REG_DBG_DWORD_ENABLE \
+ 0x00c514UL
+#define DMAE_REG_DBG_SHIFT \
+ 0x00c518UL
+#define DMAE_REG_DBG_FORCE_VALID \
+ 0x00c51cUL
+#define DMAE_REG_DBG_FORCE_FRAME \
+ 0x00c520UL
+#define NCSI_REG_DBG_SELECT \
+ 0x040474UL
+#define NCSI_REG_DBG_DWORD_ENABLE \
+ 0x040478UL
+#define NCSI_REG_DBG_SHIFT \
+ 0x04047cUL
+#define NCSI_REG_DBG_FORCE_VALID \
+ 0x040480UL
+#define NCSI_REG_DBG_FORCE_FRAME \
+ 0x040484UL
+#define GRC_REG_DBG_SELECT \
+ 0x0500a4UL
+#define GRC_REG_DBG_DWORD_ENABLE \
+ 0x0500a8UL
+#define GRC_REG_DBG_SHIFT \
+ 0x0500acUL
+#define GRC_REG_DBG_FORCE_VALID \
+ 0x0500b0UL
+#define GRC_REG_DBG_FORCE_FRAME \
+ 0x0500b4UL
+#define UMAC_REG_DBG_SELECT \
+ 0x051094UL
+#define UMAC_REG_DBG_DWORD_ENABLE \
+ 0x051098UL
+#define UMAC_REG_DBG_SHIFT \
+ 0x05109cUL
+#define UMAC_REG_DBG_FORCE_VALID \
+ 0x0510a0UL
+#define UMAC_REG_DBG_FORCE_FRAME \
+ 0x0510a4UL
+#define MCP2_REG_DBG_SELECT \
+ 0x052400UL
+#define MCP2_REG_DBG_DWORD_ENABLE \
+ 0x052404UL
+#define MCP2_REG_DBG_SHIFT \
+ 0x052408UL
+#define MCP2_REG_DBG_FORCE_VALID \
+ 0x052440UL
+#define MCP2_REG_DBG_FORCE_FRAME \
+ 0x052444UL
+#define PCIE_REG_DBG_SELECT \
+ 0x0547e8UL
+#define PCIE_REG_DBG_DWORD_ENABLE \
+ 0x0547ecUL
+#define PCIE_REG_DBG_SHIFT \
+ 0x0547f0UL
+#define PCIE_REG_DBG_FORCE_VALID \
+ 0x0547f4UL
+#define PCIE_REG_DBG_FORCE_FRAME \
+ 0x0547f8UL
+#define DORQ_REG_DBG_SELECT \
+ 0x100ad0UL
+#define DORQ_REG_DBG_DWORD_ENABLE \
+ 0x100ad4UL
+#define DORQ_REG_DBG_SHIFT \
+ 0x100ad8UL
+#define DORQ_REG_DBG_FORCE_VALID \
+ 0x100adcUL
+#define DORQ_REG_DBG_FORCE_FRAME \
+ 0x100ae0UL
+#define IGU_REG_DBG_SELECT \
+ 0x181578UL
+#define IGU_REG_DBG_DWORD_ENABLE \
+ 0x18157cUL
+#define IGU_REG_DBG_SHIFT \
+ 0x181580UL
+#define IGU_REG_DBG_FORCE_VALID \
+ 0x181584UL
+#define IGU_REG_DBG_FORCE_FRAME \
+ 0x181588UL
+#define CAU_REG_DBG_SELECT \
+ 0x1c0ea8UL
+#define CAU_REG_DBG_DWORD_ENABLE \
+ 0x1c0eacUL
+#define CAU_REG_DBG_SHIFT \
+ 0x1c0eb0UL
+#define CAU_REG_DBG_FORCE_VALID \
+ 0x1c0eb4UL
+#define CAU_REG_DBG_FORCE_FRAME \
+ 0x1c0eb8UL
+#define PRS_REG_DBG_SELECT \
+ 0x1f0b6cUL
+#define PRS_REG_DBG_DWORD_ENABLE \
+ 0x1f0b70UL
+#define PRS_REG_DBG_SHIFT \
+ 0x1f0b74UL
+#define PRS_REG_DBG_FORCE_VALID \
+ 0x1f0ba0UL
+#define PRS_REG_DBG_FORCE_FRAME \
+ 0x1f0ba4UL
+#define CNIG_REG_DBG_SELECT_K2 \
+ 0x218254UL
+#define CNIG_REG_DBG_DWORD_ENABLE_K2 \
+ 0x218258UL
+#define CNIG_REG_DBG_SHIFT_K2 \
+ 0x21825cUL
+#define CNIG_REG_DBG_FORCE_VALID_K2 \
+ 0x218260UL
+#define CNIG_REG_DBG_FORCE_FRAME_K2 \
+ 0x218264UL
+#define PRM_REG_DBG_SELECT \
+ 0x2306a8UL
+#define PRM_REG_DBG_DWORD_ENABLE \
+ 0x2306acUL
+#define PRM_REG_DBG_SHIFT \
+ 0x2306b0UL
+#define PRM_REG_DBG_FORCE_VALID \
+ 0x2306b4UL
+#define PRM_REG_DBG_FORCE_FRAME \
+ 0x2306b8UL
+#define SRC_REG_DBG_SELECT \
+ 0x238700UL
+#define SRC_REG_DBG_DWORD_ENABLE \
+ 0x238704UL
+#define SRC_REG_DBG_SHIFT \
+ 0x238708UL
+#define SRC_REG_DBG_FORCE_VALID \
+ 0x23870cUL
+#define SRC_REG_DBG_FORCE_FRAME \
+ 0x238710UL
+#define RSS_REG_DBG_SELECT \
+ 0x238c4cUL
+#define RSS_REG_DBG_DWORD_ENABLE \
+ 0x238c50UL
+#define RSS_REG_DBG_SHIFT \
+ 0x238c54UL
+#define RSS_REG_DBG_FORCE_VALID \
+ 0x238c58UL
+#define RSS_REG_DBG_FORCE_FRAME \
+ 0x238c5cUL
+#define RPB_REG_DBG_SELECT \
+ 0x23c728UL
+#define RPB_REG_DBG_DWORD_ENABLE \
+ 0x23c72cUL
+#define RPB_REG_DBG_SHIFT \
+ 0x23c730UL
+#define RPB_REG_DBG_FORCE_VALID \
+ 0x23c734UL
+#define RPB_REG_DBG_FORCE_FRAME \
+ 0x23c738UL
+#define PSWRQ2_REG_DBG_SELECT \
+ 0x240100UL
+#define PSWRQ2_REG_DBG_DWORD_ENABLE \
+ 0x240104UL
+#define PSWRQ2_REG_DBG_SHIFT \
+ 0x240108UL
+#define PSWRQ2_REG_DBG_FORCE_VALID \
+ 0x24010cUL
+#define PSWRQ2_REG_DBG_FORCE_FRAME \
+ 0x240110UL
+#define PSWRQ_REG_DBG_SELECT \
+ 0x280020UL
+#define PSWRQ_REG_DBG_DWORD_ENABLE \
+ 0x280024UL
+#define PSWRQ_REG_DBG_SHIFT \
+ 0x280028UL
+#define PSWRQ_REG_DBG_FORCE_VALID \
+ 0x28002cUL
+#define PSWRQ_REG_DBG_FORCE_FRAME \
+ 0x280030UL
+#define PSWWR_REG_DBG_SELECT \
+ 0x29a084UL
+#define PSWWR_REG_DBG_DWORD_ENABLE \
+ 0x29a088UL
+#define PSWWR_REG_DBG_SHIFT \
+ 0x29a08cUL
+#define PSWWR_REG_DBG_FORCE_VALID \
+ 0x29a090UL
+#define PSWWR_REG_DBG_FORCE_FRAME \
+ 0x29a094UL
+#define PSWRD_REG_DBG_SELECT \
+ 0x29c040UL
+#define PSWRD_REG_DBG_DWORD_ENABLE \
+ 0x29c044UL
+#define PSWRD_REG_DBG_SHIFT \
+ 0x29c048UL
+#define PSWRD_REG_DBG_FORCE_VALID \
+ 0x29c04cUL
+#define PSWRD_REG_DBG_FORCE_FRAME \
+ 0x29c050UL
+#define PSWRD2_REG_DBG_SELECT \
+ 0x29d400UL
+#define PSWRD2_REG_DBG_DWORD_ENABLE \
+ 0x29d404UL
+#define PSWRD2_REG_DBG_SHIFT \
+ 0x29d408UL
+#define PSWRD2_REG_DBG_FORCE_VALID \
+ 0x29d40cUL
+#define PSWRD2_REG_DBG_FORCE_FRAME \
+ 0x29d410UL
+#define PSWHST2_REG_DBG_SELECT \
+ 0x29e058UL
+#define PSWHST2_REG_DBG_DWORD_ENABLE \
+ 0x29e05cUL
+#define PSWHST2_REG_DBG_SHIFT \
+ 0x29e060UL
+#define PSWHST2_REG_DBG_FORCE_VALID \
+ 0x29e064UL
+#define PSWHST2_REG_DBG_FORCE_FRAME \
+ 0x29e068UL
+#define PSWHST_REG_DBG_SELECT \
+ 0x2a0100UL
+#define PSWHST_REG_DBG_DWORD_ENABLE \
+ 0x2a0104UL
+#define PSWHST_REG_DBG_SHIFT \
+ 0x2a0108UL
+#define PSWHST_REG_DBG_FORCE_VALID \
+ 0x2a010cUL
+#define PSWHST_REG_DBG_FORCE_FRAME \
+ 0x2a0110UL
+#define PGLUE_B_REG_DBG_SELECT \
+ 0x2a8400UL
+#define PGLUE_B_REG_DBG_DWORD_ENABLE \
+ 0x2a8404UL
+#define PGLUE_B_REG_DBG_SHIFT \
+ 0x2a8408UL
+#define PGLUE_B_REG_DBG_FORCE_VALID \
+ 0x2a840cUL
+#define PGLUE_B_REG_DBG_FORCE_FRAME \
+ 0x2a8410UL
+#define TM_REG_DBG_SELECT \
+ 0x2c07a8UL
+#define TM_REG_DBG_DWORD_ENABLE \
+ 0x2c07acUL
+#define TM_REG_DBG_SHIFT \
+ 0x2c07b0UL
+#define TM_REG_DBG_FORCE_VALID \
+ 0x2c07b4UL
+#define TM_REG_DBG_FORCE_FRAME \
+ 0x2c07b8UL
+#define TCFC_REG_DBG_SELECT \
+ 0x2d0500UL
+#define TCFC_REG_DBG_DWORD_ENABLE \
+ 0x2d0504UL
+#define TCFC_REG_DBG_SHIFT \
+ 0x2d0508UL
+#define TCFC_REG_DBG_FORCE_VALID \
+ 0x2d050cUL
+#define TCFC_REG_DBG_FORCE_FRAME \
+ 0x2d0510UL
+#define CCFC_REG_DBG_SELECT \
+ 0x2e0500UL
+#define CCFC_REG_DBG_DWORD_ENABLE \
+ 0x2e0504UL
+#define CCFC_REG_DBG_SHIFT \
+ 0x2e0508UL
+#define CCFC_REG_DBG_FORCE_VALID \
+ 0x2e050cUL
+#define CCFC_REG_DBG_FORCE_FRAME \
+ 0x2e0510UL
+#define QM_REG_DBG_SELECT \
+ 0x2f2e74UL
+#define QM_REG_DBG_DWORD_ENABLE \
+ 0x2f2e78UL
+#define QM_REG_DBG_SHIFT \
+ 0x2f2e7cUL
+#define QM_REG_DBG_FORCE_VALID \
+ 0x2f2e80UL
+#define QM_REG_DBG_FORCE_FRAME \
+ 0x2f2e84UL
+#define RDIF_REG_DBG_SELECT \
+ 0x300500UL
+#define RDIF_REG_DBG_DWORD_ENABLE \
+ 0x300504UL
+#define RDIF_REG_DBG_SHIFT \
+ 0x300508UL
+#define RDIF_REG_DBG_FORCE_VALID \
+ 0x30050cUL
+#define RDIF_REG_DBG_FORCE_FRAME \
+ 0x300510UL
+#define TDIF_REG_DBG_SELECT \
+ 0x310500UL
+#define TDIF_REG_DBG_DWORD_ENABLE \
+ 0x310504UL
+#define TDIF_REG_DBG_SHIFT \
+ 0x310508UL
+#define TDIF_REG_DBG_FORCE_VALID \
+ 0x31050cUL
+#define TDIF_REG_DBG_FORCE_FRAME \
+ 0x310510UL
+#define BRB_REG_DBG_SELECT \
+ 0x340ed0UL
+#define BRB_REG_DBG_DWORD_ENABLE \
+ 0x340ed4UL
+#define BRB_REG_DBG_SHIFT \
+ 0x340ed8UL
+#define BRB_REG_DBG_FORCE_VALID \
+ 0x340edcUL
+#define BRB_REG_DBG_FORCE_FRAME \
+ 0x340ee0UL
+#define XYLD_REG_DBG_SELECT \
+ 0x4c1600UL
+#define XYLD_REG_DBG_DWORD_ENABLE \
+ 0x4c1604UL
+#define XYLD_REG_DBG_SHIFT \
+ 0x4c1608UL
+#define XYLD_REG_DBG_FORCE_VALID \
+ 0x4c160cUL
+#define XYLD_REG_DBG_FORCE_FRAME \
+ 0x4c1610UL
+#define YULD_REG_DBG_SELECT \
+ 0x4c9600UL
+#define YULD_REG_DBG_DWORD_ENABLE \
+ 0x4c9604UL
+#define YULD_REG_DBG_SHIFT \
+ 0x4c9608UL
+#define YULD_REG_DBG_FORCE_VALID \
+ 0x4c960cUL
+#define YULD_REG_DBG_FORCE_FRAME \
+ 0x4c9610UL
+#define TMLD_REG_DBG_SELECT \
+ 0x4d1600UL
+#define TMLD_REG_DBG_DWORD_ENABLE \
+ 0x4d1604UL
+#define TMLD_REG_DBG_SHIFT \
+ 0x4d1608UL
+#define TMLD_REG_DBG_FORCE_VALID \
+ 0x4d160cUL
+#define TMLD_REG_DBG_FORCE_FRAME \
+ 0x4d1610UL
+#define MULD_REG_DBG_SELECT \
+ 0x4e1600UL
+#define MULD_REG_DBG_DWORD_ENABLE \
+ 0x4e1604UL
+#define MULD_REG_DBG_SHIFT \
+ 0x4e1608UL
+#define MULD_REG_DBG_FORCE_VALID \
+ 0x4e160cUL
+#define MULD_REG_DBG_FORCE_FRAME \
+ 0x4e1610UL
+#define NIG_REG_DBG_SELECT \
+ 0x502140UL
+#define NIG_REG_DBG_DWORD_ENABLE \
+ 0x502144UL
+#define NIG_REG_DBG_SHIFT \
+ 0x502148UL
+#define NIG_REG_DBG_FORCE_VALID \
+ 0x50214cUL
+#define NIG_REG_DBG_FORCE_FRAME \
+ 0x502150UL
+#define BMB_REG_DBG_SELECT \
+ 0x540a7cUL
+#define BMB_REG_DBG_DWORD_ENABLE \
+ 0x540a80UL
+#define BMB_REG_DBG_SHIFT \
+ 0x540a84UL
+#define BMB_REG_DBG_FORCE_VALID \
+ 0x540a88UL
+#define BMB_REG_DBG_FORCE_FRAME \
+ 0x540a8cUL
+#define PTU_REG_DBG_SELECT \
+ 0x560100UL
+#define PTU_REG_DBG_DWORD_ENABLE \
+ 0x560104UL
+#define PTU_REG_DBG_SHIFT \
+ 0x560108UL
+#define PTU_REG_DBG_FORCE_VALID \
+ 0x56010cUL
+#define PTU_REG_DBG_FORCE_FRAME \
+ 0x560110UL
+#define CDU_REG_DBG_SELECT \
+ 0x580704UL
+#define CDU_REG_DBG_DWORD_ENABLE \
+ 0x580708UL
+#define CDU_REG_DBG_SHIFT \
+ 0x58070cUL
+#define CDU_REG_DBG_FORCE_VALID \
+ 0x580710UL
+#define CDU_REG_DBG_FORCE_FRAME \
+ 0x580714UL
+#define WOL_REG_DBG_SELECT \
+ 0x600140UL
+#define WOL_REG_DBG_DWORD_ENABLE \
+ 0x600144UL
+#define WOL_REG_DBG_SHIFT \
+ 0x600148UL
+#define WOL_REG_DBG_FORCE_VALID \
+ 0x60014cUL
+#define WOL_REG_DBG_FORCE_FRAME \
+ 0x600150UL
+#define BMBN_REG_DBG_SELECT \
+ 0x610140UL
+#define BMBN_REG_DBG_DWORD_ENABLE \
+ 0x610144UL
+#define BMBN_REG_DBG_SHIFT \
+ 0x610148UL
+#define BMBN_REG_DBG_FORCE_VALID \
+ 0x61014cUL
+#define BMBN_REG_DBG_FORCE_FRAME \
+ 0x610150UL
+#define NWM_REG_DBG_SELECT \
+ 0x8000ecUL
+#define NWM_REG_DBG_DWORD_ENABLE \
+ 0x8000f0UL
+#define NWM_REG_DBG_SHIFT \
+ 0x8000f4UL
+#define NWM_REG_DBG_FORCE_VALID \
+ 0x8000f8UL
+#define NWM_REG_DBG_FORCE_FRAME \
+ 0x8000fcUL
+#define PBF_REG_DBG_SELECT \
+ 0xd80060UL
+#define PBF_REG_DBG_DWORD_ENABLE \
+ 0xd80064UL
+#define PBF_REG_DBG_SHIFT \
+ 0xd80068UL
+#define PBF_REG_DBG_FORCE_VALID \
+ 0xd8006cUL
+#define PBF_REG_DBG_FORCE_FRAME \
+ 0xd80070UL
+#define PBF_PB1_REG_DBG_SELECT \
+ 0xda0728UL
+#define PBF_PB1_REG_DBG_DWORD_ENABLE \
+ 0xda072cUL
+#define PBF_PB1_REG_DBG_SHIFT \
+ 0xda0730UL
+#define PBF_PB1_REG_DBG_FORCE_VALID \
+ 0xda0734UL
+#define PBF_PB1_REG_DBG_FORCE_FRAME \
+ 0xda0738UL
+#define PBF_PB2_REG_DBG_SELECT \
+ 0xda4728UL
+#define PBF_PB2_REG_DBG_DWORD_ENABLE \
+ 0xda472cUL
+#define PBF_PB2_REG_DBG_SHIFT \
+ 0xda4730UL
+#define PBF_PB2_REG_DBG_FORCE_VALID \
+ 0xda4734UL
+#define PBF_PB2_REG_DBG_FORCE_FRAME \
+ 0xda4738UL
+#define BTB_REG_DBG_SELECT \
+ 0xdb08c8UL
+#define BTB_REG_DBG_DWORD_ENABLE \
+ 0xdb08ccUL
+#define BTB_REG_DBG_SHIFT \
+ 0xdb08d0UL
+#define BTB_REG_DBG_FORCE_VALID \
+ 0xdb08d4UL
+#define BTB_REG_DBG_FORCE_FRAME \
+ 0xdb08d8UL
+#define XSDM_REG_DBG_SELECT \
+ 0xf80e28UL
+#define XSDM_REG_DBG_DWORD_ENABLE \
+ 0xf80e2cUL
+#define XSDM_REG_DBG_SHIFT \
+ 0xf80e30UL
+#define XSDM_REG_DBG_FORCE_VALID \
+ 0xf80e34UL
+#define XSDM_REG_DBG_FORCE_FRAME \
+ 0xf80e38UL
+#define YSDM_REG_DBG_SELECT \
+ 0xf90e28UL
+#define YSDM_REG_DBG_DWORD_ENABLE \
+ 0xf90e2cUL
+#define YSDM_REG_DBG_SHIFT \
+ 0xf90e30UL
+#define YSDM_REG_DBG_FORCE_VALID \
+ 0xf90e34UL
+#define YSDM_REG_DBG_FORCE_FRAME \
+ 0xf90e38UL
+#define PSDM_REG_DBG_SELECT \
+ 0xfa0e28UL
+#define PSDM_REG_DBG_DWORD_ENABLE \
+ 0xfa0e2cUL
+#define PSDM_REG_DBG_SHIFT \
+ 0xfa0e30UL
+#define PSDM_REG_DBG_FORCE_VALID \
+ 0xfa0e34UL
+#define PSDM_REG_DBG_FORCE_FRAME \
+ 0xfa0e38UL
+#define TSDM_REG_DBG_SELECT \
+ 0xfb0e28UL
+#define TSDM_REG_DBG_DWORD_ENABLE \
+ 0xfb0e2cUL
+#define TSDM_REG_DBG_SHIFT \
+ 0xfb0e30UL
+#define TSDM_REG_DBG_FORCE_VALID \
+ 0xfb0e34UL
+#define TSDM_REG_DBG_FORCE_FRAME \
+ 0xfb0e38UL
+#define MSDM_REG_DBG_SELECT \
+ 0xfc0e28UL
+#define MSDM_REG_DBG_DWORD_ENABLE \
+ 0xfc0e2cUL
+#define MSDM_REG_DBG_SHIFT \
+ 0xfc0e30UL
+#define MSDM_REG_DBG_FORCE_VALID \
+ 0xfc0e34UL
+#define MSDM_REG_DBG_FORCE_FRAME \
+ 0xfc0e38UL
+#define USDM_REG_DBG_SELECT \
+ 0xfd0e28UL
+#define USDM_REG_DBG_DWORD_ENABLE \
+ 0xfd0e2cUL
+#define USDM_REG_DBG_SHIFT \
+ 0xfd0e30UL
+#define USDM_REG_DBG_FORCE_VALID \
+ 0xfd0e34UL
+#define USDM_REG_DBG_FORCE_FRAME \
+ 0xfd0e38UL
+#define XCM_REG_DBG_SELECT \
+ 0x1000040UL
+#define XCM_REG_DBG_DWORD_ENABLE \
+ 0x1000044UL
+#define XCM_REG_DBG_SHIFT \
+ 0x1000048UL
+#define XCM_REG_DBG_FORCE_VALID \
+ 0x100004cUL
+#define XCM_REG_DBG_FORCE_FRAME \
+ 0x1000050UL
+#define YCM_REG_DBG_SELECT \
+ 0x1080040UL
+#define YCM_REG_DBG_DWORD_ENABLE \
+ 0x1080044UL
+#define YCM_REG_DBG_SHIFT \
+ 0x1080048UL
+#define YCM_REG_DBG_FORCE_VALID \
+ 0x108004cUL
+#define YCM_REG_DBG_FORCE_FRAME \
+ 0x1080050UL
+#define PCM_REG_DBG_SELECT \
+ 0x1100040UL
+#define PCM_REG_DBG_DWORD_ENABLE \
+ 0x1100044UL
+#define PCM_REG_DBG_SHIFT \
+ 0x1100048UL
+#define PCM_REG_DBG_FORCE_VALID \
+ 0x110004cUL
+#define PCM_REG_DBG_FORCE_FRAME \
+ 0x1100050UL
+#define TCM_REG_DBG_SELECT \
+ 0x1180040UL
+#define TCM_REG_DBG_DWORD_ENABLE \
+ 0x1180044UL
+#define TCM_REG_DBG_SHIFT \
+ 0x1180048UL
+#define TCM_REG_DBG_FORCE_VALID \
+ 0x118004cUL
+#define TCM_REG_DBG_FORCE_FRAME \
+ 0x1180050UL
+#define MCM_REG_DBG_SELECT \
+ 0x1200040UL
+#define MCM_REG_DBG_DWORD_ENABLE \
+ 0x1200044UL
+#define MCM_REG_DBG_SHIFT \
+ 0x1200048UL
+#define MCM_REG_DBG_FORCE_VALID \
+ 0x120004cUL
+#define MCM_REG_DBG_FORCE_FRAME \
+ 0x1200050UL
+#define UCM_REG_DBG_SELECT \
+ 0x1280050UL
+#define UCM_REG_DBG_DWORD_ENABLE \
+ 0x1280054UL
+#define UCM_REG_DBG_SHIFT \
+ 0x1280058UL
+#define UCM_REG_DBG_FORCE_VALID \
+ 0x128005cUL
+#define UCM_REG_DBG_FORCE_FRAME \
+ 0x1280060UL
+#define XSEM_REG_DBG_SELECT \
+ 0x1401528UL
+#define XSEM_REG_DBG_DWORD_ENABLE \
+ 0x140152cUL
+#define XSEM_REG_DBG_SHIFT \
+ 0x1401530UL
+#define XSEM_REG_DBG_FORCE_VALID \
+ 0x1401534UL
+#define XSEM_REG_DBG_FORCE_FRAME \
+ 0x1401538UL
+#define YSEM_REG_DBG_SELECT \
+ 0x1501528UL
+#define YSEM_REG_DBG_DWORD_ENABLE \
+ 0x150152cUL
+#define YSEM_REG_DBG_SHIFT \
+ 0x1501530UL
+#define YSEM_REG_DBG_FORCE_VALID \
+ 0x1501534UL
+#define YSEM_REG_DBG_FORCE_FRAME \
+ 0x1501538UL
+#define PSEM_REG_DBG_SELECT \
+ 0x1601528UL
+#define PSEM_REG_DBG_DWORD_ENABLE \
+ 0x160152cUL
+#define PSEM_REG_DBG_SHIFT \
+ 0x1601530UL
+#define PSEM_REG_DBG_FORCE_VALID \
+ 0x1601534UL
+#define PSEM_REG_DBG_FORCE_FRAME \
+ 0x1601538UL
+#define TSEM_REG_DBG_SELECT \
+ 0x1701528UL
+#define TSEM_REG_DBG_DWORD_ENABLE \
+ 0x170152cUL
+#define TSEM_REG_DBG_SHIFT \
+ 0x1701530UL
+#define TSEM_REG_DBG_FORCE_VALID \
+ 0x1701534UL
+#define TSEM_REG_DBG_FORCE_FRAME \
+ 0x1701538UL
+#define MSEM_REG_DBG_SELECT \
+ 0x1801528UL
+#define MSEM_REG_DBG_DWORD_ENABLE \
+ 0x180152cUL
+#define MSEM_REG_DBG_SHIFT \
+ 0x1801530UL
+#define MSEM_REG_DBG_FORCE_VALID \
+ 0x1801534UL
+#define MSEM_REG_DBG_FORCE_FRAME \
+ 0x1801538UL
+#define USEM_REG_DBG_SELECT \
+ 0x1901528UL
+#define USEM_REG_DBG_DWORD_ENABLE \
+ 0x190152cUL
+#define USEM_REG_DBG_SHIFT \
+ 0x1901530UL
+#define USEM_REG_DBG_FORCE_VALID \
+ 0x1901534UL
+#define USEM_REG_DBG_FORCE_FRAME \
+ 0x1901538UL
+#define PCIE_REG_DBG_COMMON_SELECT \
+ 0x054398UL
+#define PCIE_REG_DBG_COMMON_DWORD_ENABLE \
+ 0x05439cUL
+#define PCIE_REG_DBG_COMMON_SHIFT \
+ 0x0543a0UL
+#define PCIE_REG_DBG_COMMON_FORCE_VALID \
+ 0x0543a4UL
+#define PCIE_REG_DBG_COMMON_FORCE_FRAME \
+ 0x0543a8UL
+#define MISC_REG_RESET_PL_UA \
+ 0x008050UL
+#define MISC_REG_RESET_PL_HV \
+ 0x008060UL
+#define XCM_REG_CTX_RBC_ACCS \
+ 0x1001800UL
+#define XCM_REG_AGG_CON_CTX \
+ 0x1001804UL
+#define XCM_REG_SM_CON_CTX \
+ 0x1001808UL
+#define YCM_REG_CTX_RBC_ACCS \
+ 0x1081800UL
+#define YCM_REG_AGG_CON_CTX \
+ 0x1081804UL
+#define YCM_REG_AGG_TASK_CTX \
+ 0x1081808UL
+#define YCM_REG_SM_CON_CTX \
+ 0x108180cUL
+#define YCM_REG_SM_TASK_CTX \
+ 0x1081810UL
+#define PCM_REG_CTX_RBC_ACCS \
+ 0x1101440UL
+#define PCM_REG_SM_CON_CTX \
+ 0x1101444UL
+#define TCM_REG_CTX_RBC_ACCS \
+ 0x11814c0UL
+#define TCM_REG_AGG_CON_CTX \
+ 0x11814c4UL
+#define TCM_REG_AGG_TASK_CTX \
+ 0x11814c8UL
+#define TCM_REG_SM_CON_CTX \
+ 0x11814ccUL
+#define TCM_REG_SM_TASK_CTX \
+ 0x11814d0UL
+#define MCM_REG_CTX_RBC_ACCS \
+ 0x1201800UL
+#define MCM_REG_AGG_CON_CTX \
+ 0x1201804UL
+#define MCM_REG_AGG_TASK_CTX \
+ 0x1201808UL
+#define MCM_REG_SM_CON_CTX \
+ 0x120180cUL
+#define MCM_REG_SM_TASK_CTX \
+ 0x1201810UL
+#define UCM_REG_CTX_RBC_ACCS \
+ 0x1281700UL
+#define UCM_REG_AGG_CON_CTX \
+ 0x1281704UL
+#define UCM_REG_AGG_TASK_CTX \
+ 0x1281708UL
+#define UCM_REG_SM_CON_CTX \
+ 0x128170cUL
+#define UCM_REG_SM_TASK_CTX \
+ 0x1281710UL
+#define XSEM_REG_SLOW_DBG_EMPTY \
+ 0x1401140UL
+#define XSEM_REG_SYNC_DBG_EMPTY \
+ 0x1401160UL
+#define XSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1401400UL
+#define XSEM_REG_SLOW_DBG_MODE \
+ 0x1401404UL
+#define XSEM_REG_DBG_FRAME_MODE \
+ 0x1401408UL
+#define XSEM_REG_DBG_MODE1_CFG \
+ 0x1401420UL
+#define XSEM_REG_FAST_MEMORY \
+ 0x1440000UL
+#define YSEM_REG_SYNC_DBG_EMPTY \
+ 0x1501160UL
+#define YSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1501400UL
+#define YSEM_REG_SLOW_DBG_MODE \
+ 0x1501404UL
+#define YSEM_REG_DBG_FRAME_MODE \
+ 0x1501408UL
+#define YSEM_REG_DBG_MODE1_CFG \
+ 0x1501420UL
+#define YSEM_REG_FAST_MEMORY \
+ 0x1540000UL
+#define PSEM_REG_SLOW_DBG_EMPTY \
+ 0x1601140UL
+#define PSEM_REG_SYNC_DBG_EMPTY \
+ 0x1601160UL
+#define PSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1601400UL
+#define PSEM_REG_SLOW_DBG_MODE \
+ 0x1601404UL
+#define PSEM_REG_DBG_FRAME_MODE \
+ 0x1601408UL
+#define PSEM_REG_DBG_MODE1_CFG \
+ 0x1601420UL
+#define PSEM_REG_FAST_MEMORY \
+ 0x1640000UL
+#define TSEM_REG_SLOW_DBG_EMPTY \
+ 0x1701140UL
+#define TSEM_REG_SYNC_DBG_EMPTY \
+ 0x1701160UL
+#define TSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1701400UL
+#define TSEM_REG_SLOW_DBG_MODE \
+ 0x1701404UL
+#define TSEM_REG_DBG_FRAME_MODE \
+ 0x1701408UL
+#define TSEM_REG_DBG_MODE1_CFG \
+ 0x1701420UL
+#define TSEM_REG_FAST_MEMORY \
+ 0x1740000UL
+#define MSEM_REG_SLOW_DBG_EMPTY \
+ 0x1801140UL
+#define MSEM_REG_SYNC_DBG_EMPTY \
+ 0x1801160UL
+#define MSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1801400UL
+#define MSEM_REG_SLOW_DBG_MODE \
+ 0x1801404UL
+#define MSEM_REG_DBG_FRAME_MODE \
+ 0x1801408UL
+#define MSEM_REG_DBG_MODE1_CFG \
+ 0x1801420UL
+#define MSEM_REG_FAST_MEMORY \
+ 0x1840000UL
+#define USEM_REG_SLOW_DBG_EMPTY \
+ 0x1901140UL
+#define USEM_REG_SYNC_DBG_EMPTY \
+ 0x1901160UL
+#define USEM_REG_SLOW_DBG_ACTIVE \
+ 0x1901400UL
+#define USEM_REG_SLOW_DBG_MODE \
+ 0x1901404UL
+#define USEM_REG_DBG_FRAME_MODE \
+ 0x1901408UL
+#define USEM_REG_DBG_MODE1_CFG \
+ 0x1901420UL
+#define USEM_REG_FAST_MEMORY \
+ 0x1940000UL
+#define SEM_FAST_REG_INT_RAM \
+ 0x020000UL
+#define SEM_FAST_REG_INT_RAM_SIZE \
+ 20480
+#define GRC_REG_TRACE_FIFO_VALID_DATA \
+ 0x050064UL
+#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW \
+ 0x05040cUL
+#define GRC_REG_PROTECTION_OVERRIDE_WINDOW \
+ 0x050500UL
+#define IGU_REG_ERROR_HANDLING_MEMORY \
+ 0x181520UL
#define MCP_REG_CPU_MODE \
0xe05000UL
#define MCP_REG_CPU_MODE_SOFT_HALT \
(0x1 << 10)
+#define BRB_REG_BIG_RAM_ADDRESS \
+ 0x340800UL
+#define BRB_REG_BIG_RAM_DATA \
+ 0x341500UL
+#define SEM_FAST_REG_STALL_0 \
+ 0x000488UL
+#define SEM_FAST_REG_STALLED \
+ 0x000494UL
+#define BTB_REG_BIG_RAM_ADDRESS \
+ 0xdb0800UL
+#define BTB_REG_BIG_RAM_DATA \
+ 0xdb0c00UL
+#define BMB_REG_BIG_RAM_ADDRESS \
+ 0x540800UL
+#define BMB_REG_BIG_RAM_DATA \
+ 0x540f00UL
+#define SEM_FAST_REG_STORM_REG_FILE \
+ 0x008000UL
+#define RSS_REG_RSS_RAM_ADDR \
+ 0x238c30UL
+#define MISCS_REG_BLOCK_256B_EN \
+ 0x009074UL
+#define MCP_REG_SCRATCH_SIZE \
+ 57344
+#define MCP_REG_CPU_REG_FILE \
+ 0xe05200UL
+#define MCP_REG_CPU_REG_FILE_SIZE \
+ 32
+#define DBG_REG_DEBUG_TARGET \
+ 0x01005cUL
+#define DBG_REG_FULL_MODE \
+ 0x010060UL
+#define DBG_REG_CALENDAR_OUT_DATA \
+ 0x010480UL
+#define GRC_REG_TRACE_FIFO \
+ 0x050068UL
+#define IGU_REG_ERROR_HANDLING_DATA_VALID \
+ 0x181530UL
+#define DBG_REG_DBG_BLOCK_ON \
+ 0x010454UL
+#define DBG_REG_FRAMING_MODE \
+ 0x010058UL
+#define SEM_FAST_REG_VFC_DATA_WR \
+ 0x000b40UL
+#define SEM_FAST_REG_VFC_ADDR \
+ 0x000b44UL
+#define SEM_FAST_REG_VFC_DATA_RD \
+ 0x000b48UL
+#define RSS_REG_RSS_RAM_DATA \
+ 0x238c20UL
+#define MISC_REG_BLOCK_256B_EN \
+ 0x008c14UL
+#define NWS_REG_NWS_CMU \
+ 0x720000UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0 \
+ 0x000680UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8 \
+ 0x000684UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0 \
+ 0x0006c0UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8 \
+ 0x0006c4UL
+#define MS_REG_MS_CMU \
+ 0x6a4000UL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130 \
+ 0x000208UL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132 \
+ 0x000210UL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131 \
+ 0x00020cUL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133 \
+ 0x000214UL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130 \
+ 0x000208UL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131 \
+ 0x00020cUL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132 \
+ 0x000210UL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133 \
+ 0x000214UL
+#define PHY_PCIE_REG_PHY0 \
+ 0x620000UL
+#define PHY_PCIE_REG_PHY1 \
+ 0x624000UL
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
index a342bfe4280d..9b7678f26909 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_selftest.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
@@ -2,6 +2,7 @@
#include "qed_dev_api.h"
#include "qed_mcp.h"
#include "qed_sp.h"
+#include "qed_selftest.h"
int qed_selftest_memory(struct qed_dev *cdev)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index d7c21cdc608c..a4a3cead15bb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -108,8 +108,8 @@ static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
-bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
- int rel_vf_id, bool b_enabled_only)
+static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+ int rel_vf_id, bool b_enabled_only)
{
if (!p_hwfn->pf_iov_info) {
DP_NOTICE(p_hwfn->cdev, "No iov info\n");
@@ -186,8 +186,8 @@ static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
return false;
}
-int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
- int vfid, struct qed_ptt *p_ptt)
+static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
+ int vfid, struct qed_ptt *p_ptt)
{
struct qed_bulletin_content *p_bulletin;
int crc_size = sizeof(p_bulletin->crc);
@@ -573,7 +573,7 @@ static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
}
}
-void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
+static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
{
u16 i;
@@ -1130,9 +1130,10 @@ static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
}
-struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
- u16 relative_vf_id,
- bool b_enabled_only)
+static struct
+qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only)
{
struct qed_vf_info *vf = NULL;
@@ -1143,7 +1144,7 @@ struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
return &vf->p_vf_info;
}
-void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
+static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
{
struct qed_public_vf_info *vf_info;
@@ -2510,8 +2511,8 @@ qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
- int vfid, struct qed_filter_ucast *params)
+static int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
+ int vfid, struct qed_filter_ucast *params)
{
struct qed_public_vf_info *vf;
@@ -2828,7 +2829,8 @@ cleanup:
return rc;
}
-int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+static int
+qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 ack_vfs[VF_MAX_STATIC / 32];
int rc = 0;
@@ -3015,7 +3017,7 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
}
}
-void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
+static void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
{
u64 add_bit = 1ULL << (vfid % 64);
@@ -3138,8 +3140,8 @@ static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
}
-void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
- u16 pvid, int vfid)
+static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
+ u16 pvid, int vfid)
{
struct qed_vf_info *vf_info;
u64 feature;
@@ -3172,7 +3174,7 @@ static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
return !!p_vf_info->vport_instance;
}
-bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
+static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
{
struct qed_vf_info *p_vf_info;
@@ -3194,7 +3196,7 @@ static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
return vf_info->spoof_chk;
}
-int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
+static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
{
struct qed_vf_info *vf;
int rc = -EINVAL;
@@ -3237,7 +3239,8 @@ static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
return p_vf->bulletin.p_virt->mac;
}
-u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
+static u16
+qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
{
struct qed_vf_info *p_vf;
@@ -3269,7 +3272,8 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
}
-int qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
+static int
+qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
{
struct qed_vf_info *vf;
u8 vport_id;
@@ -3828,7 +3832,8 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
qed_ptt_release(hwfn, ptt);
}
-void qed_iov_pf_task(struct work_struct *work)
+static void qed_iov_pf_task(struct work_struct *work)
+
{
struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
iov_task.work);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index de0acbcaf85c..85334ceaf69c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -1186,8 +1186,8 @@ bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
return false;
}
-bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
- u8 *dst_mac, u8 *p_is_forced)
+static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
+ u8 *dst_mac, u8 *p_is_forced)
{
struct qed_bulletin_content *bulletin;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 14d5328e6ac9..25a9b293ee8f 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -695,6 +695,28 @@ static int qede_set_pauseparam(struct net_device *dev,
return 0;
}
+static void qede_get_regs(struct net_device *ndev,
+ struct ethtool_regs *regs, void *buffer)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ regs->version = 0;
+ memset(buffer, 0, regs->len);
+
+ if (edev->ops && edev->ops->common)
+ edev->ops->common->dbg_all_data(edev->cdev, buffer);
+}
+
+static int qede_get_regs_len(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ if (edev->ops && edev->ops->common)
+ return edev->ops->common->dbg_all_data_size(edev->cdev);
+ else
+ return -EINVAL;
+}
+
static void qede_update_mtu(struct qede_dev *edev, union qede_reload_args *args)
{
edev->ndev->mtu = args->mtu;
@@ -1395,6 +1417,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_link_ksettings = qede_get_link_ksettings,
.set_link_ksettings = qede_set_link_ksettings,
.get_drvinfo = qede_get_drvinfo,
+ .get_regs_len = qede_get_regs_len,
+ .get_regs = qede_get_regs,
.get_msglevel = qede_get_msglevel,
.set_msglevel = qede_set_msglevel,
.nway_reset = qede_nway_reset,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index b4a56e61631a..0e198fe89d1a 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -100,7 +100,8 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
static void qede_link_update(void *dev, struct qed_link_output *link);
#ifdef CONFIG_QED_SRIOV
-static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos)
+static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
{
struct qede_dev *edev = netdev_priv(ndev);
@@ -109,6 +110,9 @@ static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos)
return -EINVAL;
}
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
vlan, vf);
@@ -2116,7 +2120,7 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
edev->accept_any_vlan = false;
}
-int qede_set_features(struct net_device *dev, netdev_features_t features)
+static int qede_set_features(struct net_device *dev, netdev_features_t features)
{
struct qede_dev *edev = netdev_priv(dev);
netdev_features_t changes = features ^ dev->features;
@@ -2552,7 +2556,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
#ifdef CONFIG_DCB
- qede_set_dcbnl_ops(edev->ndev);
+ if (!IS_VF(edev))
+ qede_set_dcbnl_ops(edev->ndev);
#endif
INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 24061b9b92e8..5f327659efa7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -238,7 +238,7 @@ int qlcnic_sriov_set_vf_mac(struct net_device *, int, u8 *);
int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int, int);
int qlcnic_sriov_get_vf_config(struct net_device *, int ,
struct ifla_vf_info *);
-int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8);
+int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8, __be16);
int qlcnic_sriov_set_vf_spoofchk(struct net_device *, int, bool);
#else
static inline void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) {}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index afd687e5e779..50eaafa3eaba 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -1915,7 +1915,7 @@ int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf,
}
int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
- u16 vlan, u8 qos)
+ u16 vlan, u8 qos, __be16 vlan_proto)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
@@ -1928,6 +1928,9 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
if (vf >= sriov->num_vfs || qos > 7)
return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
if (vlan > MAX_VLAN_ID) {
netdev_err(netdev,
"Invalid VLAN ID, allowed range is [0 - %d]\n",
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 56e0a9f1c5ec..e47d38701d6c 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -722,7 +722,6 @@ static int emac_remove(struct platform_device *pdev)
mdiobus_unregister(adpt->mii_bus);
free_netdev(netdev);
- dev_set_drvdata(&pdev->dev, NULL);
return 0;
}
@@ -731,7 +730,6 @@ static struct platform_driver emac_platform_driver = {
.probe = emac_probe,
.remove = emac_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "qcom-emac",
.of_match_table = emac_dt_match,
},
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index cb29ee24cf1b..5ef5d728c250 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1062,14 +1062,12 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* this should always be supported */
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "32-bit PCI DMA addresses"
- "not supported by the card\n");
+ dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n");
goto err_out_disable_dev;
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "32-bit PCI DMA addresses"
- "not supported by the card\n");
+ dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n");
goto err_out_disable_dev;
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 1f8240aec086..440ae272161c 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -201,9 +201,14 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
[ARSTR] = 0x0000,
[TSU_CTRST] = 0x0004,
+ [TSU_FWSLC] = 0x0038,
[TSU_VTAG0] = 0x0058,
[TSU_ADSBSY] = 0x0060,
[TSU_TEN] = 0x0064,
+ [TSU_POST1] = 0x0070,
+ [TSU_POST2] = 0x0074,
+ [TSU_POST3] = 0x0078,
+ [TSU_POST4] = 0x007c,
[TSU_ADRH0] = 0x0100,
[TXNLCR0] = 0x0080,
@@ -2781,6 +2786,8 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
{
if (sh_eth_is_rz_fast_ether(mdp)) {
sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
+ sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
+ TSU_FWSLC); /* Enable POST registers */
return;
}
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
index 1736f4b806af..f6883b2b5da3 100644
--- a/drivers/net/ethernet/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
@@ -64,7 +64,7 @@
#define LM87_ALARM_TEMP_INT 0x10
#define LM87_ALARM_TEMP_EXT1 0x20
-#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
+#if IS_ENABLED(CONFIG_SENSORS_LM87)
static int efx_poke_lm87(struct i2c_client *client, const u8 *reg_values)
{
@@ -455,7 +455,7 @@ static int sfe4001_init(struct efx_nic *efx)
struct falcon_board *board = falcon_board(efx);
int rc;
-#if defined(CONFIG_SENSORS_LM90) || defined(CONFIG_SENSORS_LM90_MODULE)
+#if IS_ENABLED(CONFIG_SENSORS_LM90)
board->hwmon_client =
i2c_new_device(&board->i2c_adap, &sfe4001_hwmon_info);
#else
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 9fbc12a8f80c..241520943ada 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1156,7 +1156,8 @@ void efx_mcdi_flush_async(struct efx_nic *efx)
* acquired locks in the wrong order.
*/
list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
- async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
+ if (async->complete)
+ async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
list_del(&async->list);
kfree(async);
}
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index dd204d9704c6..77a5364f7a10 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1269,13 +1269,13 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
if (IS_ERR(ptp->phc_clock)) {
rc = PTR_ERR(ptp->phc_clock);
goto fail3;
- }
-
- INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
- ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
- if (!ptp->pps_workwq) {
- rc = -ENOMEM;
- goto fail4;
+ } else if (ptp->phc_clock) {
+ INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
+ ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
+ if (!ptp->pps_workwq) {
+ rc = -ENOMEM;
+ goto fail4;
+ }
}
}
ptp->nic_ts_enabled = false;
diff --git a/drivers/net/ethernet/sfc/sriov.c b/drivers/net/ethernet/sfc/sriov.c
index 816c44689e67..9abcf4aded30 100644
--- a/drivers/net/ethernet/sfc/sriov.c
+++ b/drivers/net/ethernet/sfc/sriov.c
@@ -22,7 +22,7 @@ int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
}
int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
- u8 qos)
+ u8 qos, __be16 vlan_proto)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -31,6 +31,9 @@ int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
(qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)))
return -EINVAL;
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos);
} else {
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/sfc/sriov.h b/drivers/net/ethernet/sfc/sriov.h
index 400df526586d..ba1762e7f216 100644
--- a/drivers/net/ethernet/sfc/sriov.h
+++ b/drivers/net/ethernet/sfc/sriov.h
@@ -16,7 +16,7 @@
int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac);
int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
- u8 qos);
+ u8 qos, __be16 vlan_proto);
int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
bool spoofchk);
int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 95001ee408ab..6f85276376e8 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1426,7 +1426,7 @@ static void sis900_set_mode(struct sis900_private *sp, int speed, int duplex)
rx_flags |= RxATX;
}
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
/* Can accept Jumbo packet */
rx_flags |= RxAJAB;
#endif
@@ -1750,7 +1750,7 @@ static int sis900_rx(struct net_device *net_dev)
data_size = rx_status & DSIZE;
rx_size = data_size - CRC_SIZE;
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
/* ``TOOLONG'' flag means jumbo packet received. */
if ((rx_status & TOOLONG) && data_size <= MAX_FRAME_SIZE)
rx_status &= (~ ((unsigned int)TOOLONG));
diff --git a/drivers/net/ethernet/sis/sis900.h b/drivers/net/ethernet/sis/sis900.h
index 7d430d322931..f0da3dc52c01 100644
--- a/drivers/net/ethernet/sis/sis900.h
+++ b/drivers/net/ethernet/sis/sis900.h
@@ -310,7 +310,7 @@ enum sis630_revision_id {
#define CRC_SIZE 4
#define MAC_HEADER_SIZE 14
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define MAX_FRAME_SIZE (1518 + 4)
#else
#define MAX_FRAME_SIZE 1518
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index e17671c9d1b0..ea8465467469 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -470,7 +470,9 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
#endif
#if ! SMC_CAN_USE_8BIT
+#undef SMC_inb
#define SMC_inb(ioaddr, reg) ({ BUG(); 0; })
+#undef SMC_outb
#define SMC_outb(x, ioaddr, reg) BUG()
#define SMC_insb(a, r, p, l) BUG()
#define SMC_outsb(a, r, p, l) BUG()
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index ca3134540d2d..e9b8579e6241 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -62,6 +62,7 @@
#include <linux/acpi.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
+#include <linux/gpio/consumer.h>
#include "smsc911x.h"
@@ -147,6 +148,9 @@ struct smsc911x_data {
/* regulators */
struct regulator_bulk_data supplies[SMSC911X_NUM_SUPPLIES];
+ /* Reset GPIO */
+ struct gpio_desc *reset_gpiod;
+
/* clock */
struct clk *clk;
};
@@ -438,6 +442,11 @@ static int smsc911x_request_resources(struct platform_device *pdev)
netdev_err(ndev, "couldn't get regulators %d\n",
ret);
+ /* Request optional RESET GPIO */
+ pdata->reset_gpiod = devm_gpiod_get_optional(&pdev->dev,
+ "reset",
+ GPIOD_OUT_LOW);
+
/* Request clock */
pdata->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(pdata->clk))
@@ -1099,15 +1108,8 @@ static int smsc911x_mii_init(struct platform_device *pdev,
goto err_out_free_bus_2;
}
- if (smsc911x_mii_probe(dev) < 0) {
- SMSC_WARN(pdata, probe, "Error registering mii bus");
- goto err_out_unregister_bus_3;
- }
-
return 0;
-err_out_unregister_bus_3:
- mdiobus_unregister(pdata->mii_bus);
err_out_free_bus_2:
mdiobus_free(pdata->mii_bus);
err_out_1:
@@ -1514,23 +1516,90 @@ static void smsc911x_disable_irq_chip(struct net_device *dev)
smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
}
+static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ u32 intsts = smsc911x_reg_read(pdata, INT_STS);
+ u32 inten = smsc911x_reg_read(pdata, INT_EN);
+ int serviced = IRQ_NONE;
+ u32 temp;
+
+ if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
+ temp = smsc911x_reg_read(pdata, INT_EN);
+ temp &= (~INT_EN_SW_INT_EN_);
+ smsc911x_reg_write(pdata, INT_EN, temp);
+ smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_);
+ pdata->software_irq_signal = 1;
+ smp_wmb();
+ serviced = IRQ_HANDLED;
+ }
+
+ if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
+ /* Called when there is a multicast update scheduled and
+ * it is now safe to complete the update */
+ SMSC_TRACE(pdata, intr, "RX Stop interrupt");
+ smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
+ if (pdata->multicast_update_pending)
+ smsc911x_rx_multicast_update_workaround(pdata);
+ serviced = IRQ_HANDLED;
+ }
+
+ if (intsts & inten & INT_STS_TDFA_) {
+ temp = smsc911x_reg_read(pdata, FIFO_INT);
+ temp |= FIFO_INT_TX_AVAIL_LEVEL_;
+ smsc911x_reg_write(pdata, FIFO_INT, temp);
+ smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_);
+ netif_wake_queue(dev);
+ serviced = IRQ_HANDLED;
+ }
+
+ if (unlikely(intsts & inten & INT_STS_RXE_)) {
+ SMSC_TRACE(pdata, intr, "RX Error interrupt");
+ smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
+ serviced = IRQ_HANDLED;
+ }
+
+ if (likely(intsts & inten & INT_STS_RSFL_)) {
+ if (likely(napi_schedule_prep(&pdata->napi))) {
+ /* Disable Rx interrupts */
+ temp = smsc911x_reg_read(pdata, INT_EN);
+ temp &= (~INT_EN_RSFL_EN_);
+ smsc911x_reg_write(pdata, INT_EN, temp);
+ /* Schedule a NAPI poll */
+ __napi_schedule(&pdata->napi);
+ } else {
+ SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
+ }
+ serviced = IRQ_HANDLED;
+ }
+
+ return serviced;
+}
+
static int smsc911x_open(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
unsigned int timeout;
unsigned int temp;
unsigned int intcfg;
+ int retval;
+ int irq_flags;
- /* if the phy is not yet registered, retry later*/
+ /* find and start the given phy */
if (!dev->phydev) {
- SMSC_WARN(pdata, hw, "phy_dev is NULL");
- return -EAGAIN;
+ retval = smsc911x_mii_probe(dev);
+ if (retval < 0) {
+ SMSC_WARN(pdata, probe, "Error starting phy");
+ goto out;
+ }
}
/* Reset the LAN911x */
- if (smsc911x_soft_reset(pdata)) {
+ retval = smsc911x_soft_reset(pdata);
+ if (retval) {
SMSC_WARN(pdata, hw, "soft reset failed");
- return -EIO;
+ goto mii_free_out;
}
smsc911x_reg_write(pdata, HW_CFG, 0x00050000);
@@ -1586,6 +1655,15 @@ static int smsc911x_open(struct net_device *dev)
pdata->software_irq_signal = 0;
smp_wmb();
+ irq_flags = irq_get_trigger_type(dev->irq);
+ retval = request_irq(dev->irq, smsc911x_irqhandler,
+ irq_flags | IRQF_SHARED, dev->name, dev);
+ if (retval) {
+ SMSC_WARN(pdata, probe,
+ "Unable to claim requested irq: %d", dev->irq);
+ goto mii_free_out;
+ }
+
temp = smsc911x_reg_read(pdata, INT_EN);
temp |= INT_EN_SW_INT_EN_;
smsc911x_reg_write(pdata, INT_EN, temp);
@@ -1600,7 +1678,8 @@ static int smsc911x_open(struct net_device *dev)
if (!pdata->software_irq_signal) {
netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n",
dev->irq);
- return -ENODEV;
+ retval = -ENODEV;
+ goto irq_stop_out;
}
SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d",
dev->irq);
@@ -1646,6 +1725,14 @@ static int smsc911x_open(struct net_device *dev)
netif_start_queue(dev);
return 0;
+
+irq_stop_out:
+ free_irq(dev->irq, dev);
+mii_free_out:
+ phy_disconnect(dev->phydev);
+ dev->phydev = NULL;
+out:
+ return retval;
}
/* Entry point for stopping the interface */
@@ -1667,9 +1754,15 @@ static int smsc911x_stop(struct net_device *dev)
dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP);
smsc911x_tx_update_txcounters(dev);
+ free_irq(dev->irq, dev);
+
/* Bring the PHY down */
- if (dev->phydev)
+ if (dev->phydev) {
phy_stop(dev->phydev);
+ phy_disconnect(dev->phydev);
+ dev->phydev = NULL;
+ }
+ netif_carrier_off(dev);
SMSC_TRACE(pdata, ifdown, "Interface stopped");
return 0;
@@ -1811,67 +1904,6 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
spin_unlock_irqrestore(&pdata->mac_lock, flags);
}
-static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct smsc911x_data *pdata = netdev_priv(dev);
- u32 intsts = smsc911x_reg_read(pdata, INT_STS);
- u32 inten = smsc911x_reg_read(pdata, INT_EN);
- int serviced = IRQ_NONE;
- u32 temp;
-
- if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
- temp = smsc911x_reg_read(pdata, INT_EN);
- temp &= (~INT_EN_SW_INT_EN_);
- smsc911x_reg_write(pdata, INT_EN, temp);
- smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_);
- pdata->software_irq_signal = 1;
- smp_wmb();
- serviced = IRQ_HANDLED;
- }
-
- if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
- /* Called when there is a multicast update scheduled and
- * it is now safe to complete the update */
- SMSC_TRACE(pdata, intr, "RX Stop interrupt");
- smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
- if (pdata->multicast_update_pending)
- smsc911x_rx_multicast_update_workaround(pdata);
- serviced = IRQ_HANDLED;
- }
-
- if (intsts & inten & INT_STS_TDFA_) {
- temp = smsc911x_reg_read(pdata, FIFO_INT);
- temp |= FIFO_INT_TX_AVAIL_LEVEL_;
- smsc911x_reg_write(pdata, FIFO_INT, temp);
- smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_);
- netif_wake_queue(dev);
- serviced = IRQ_HANDLED;
- }
-
- if (unlikely(intsts & inten & INT_STS_RXE_)) {
- SMSC_TRACE(pdata, intr, "RX Error interrupt");
- smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
- serviced = IRQ_HANDLED;
- }
-
- if (likely(intsts & inten & INT_STS_RSFL_)) {
- if (likely(napi_schedule_prep(&pdata->napi))) {
- /* Disable Rx interrupts */
- temp = smsc911x_reg_read(pdata, INT_EN);
- temp &= (~INT_EN_RSFL_EN_);
- smsc911x_reg_write(pdata, INT_EN, temp);
- /* Schedule a NAPI poll */
- __napi_schedule(&pdata->napi);
- } else {
- SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
- }
- serviced = IRQ_HANDLED;
- }
-
- return serviced;
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void smsc911x_poll_controller(struct net_device *dev)
{
@@ -2291,16 +2323,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
pdata = netdev_priv(dev);
BUG_ON(!pdata);
BUG_ON(!pdata->ioaddr);
- BUG_ON(!dev->phydev);
+ WARN_ON(dev->phydev);
SMSC_TRACE(pdata, ifdown, "Stopping driver");
- phy_disconnect(dev->phydev);
mdiobus_unregister(pdata->mii_bus);
mdiobus_free(pdata->mii_bus);
unregister_netdev(dev);
- free_irq(dev->irq, dev);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"smsc911x-memory");
if (!res)
@@ -2385,8 +2415,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
struct smsc911x_data *pdata;
struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
struct resource *res;
- unsigned int intcfg = 0;
- int res_size, irq, irq_flags;
+ int res_size, irq;
int retval;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -2425,7 +2454,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
pdata = netdev_priv(dev);
dev->irq = irq;
- irq_flags = irq_get_trigger_type(irq);
pdata->ioaddr = ioremap_nocache(res->start, res_size);
pdata->dev = dev;
@@ -2472,43 +2500,23 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
if (retval < 0)
goto out_disable_resources;
- /* configure irq polarity and type before connecting isr */
- if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH)
- intcfg |= INT_CFG_IRQ_POL_;
-
- if (pdata->config.irq_type == SMSC911X_IRQ_TYPE_PUSH_PULL)
- intcfg |= INT_CFG_IRQ_TYPE_;
-
- smsc911x_reg_write(pdata, INT_CFG, intcfg);
-
- /* Ensure interrupts are globally disabled before connecting ISR */
- smsc911x_disable_irq_chip(dev);
+ netif_carrier_off(dev);
- retval = request_irq(dev->irq, smsc911x_irqhandler,
- irq_flags | IRQF_SHARED, dev->name, dev);
+ retval = smsc911x_mii_init(pdev, dev);
if (retval) {
- SMSC_WARN(pdata, probe,
- "Unable to claim requested irq: %d", dev->irq);
+ SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
goto out_disable_resources;
}
- netif_carrier_off(dev);
-
retval = register_netdev(dev);
if (retval) {
SMSC_WARN(pdata, probe, "Error %i registering device", retval);
- goto out_free_irq;
+ goto out_disable_resources;
} else {
SMSC_TRACE(pdata, probe,
"Network interface: \"%s\"", dev->name);
}
- retval = smsc911x_mii_init(pdev, dev);
- if (retval) {
- SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
- goto out_unregister_netdev_5;
- }
-
spin_lock_irq(&pdata->mac_lock);
/* Check if mac address has been specified when bringing interface up */
@@ -2544,10 +2552,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
return 0;
-out_unregister_netdev_5:
- unregister_netdev(dev);
-out_free_irq:
- free_irq(dev->irq, dev);
out_disable_resources:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 2533b91f1421..d3292c4a6eda 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -30,7 +30,7 @@
#include <linux/stmmac.h>
#include <linux/phy.h>
#include <linux/module.h>
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define STMMAC_VLAN_TAG_USED
#include <linux/if_vlan.h>
#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 79d8b926f17c..e5a926b8bee7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -169,7 +169,8 @@ static int stm32_dwmac_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-SIMPLE_DEV_PM_OPS(stm32_dwmac_pm_ops, stm32_dwmac_suspend, stm32_dwmac_resume);
+static SIMPLE_DEV_PM_OPS(stm32_dwmac_pm_ops,
+ stm32_dwmac_suspend, stm32_dwmac_resume);
static const struct of_device_id stm32_dwmac_match[] = {
{ .compatible = "st,stm32-dwmac"},
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 6f6bbc54e9fe..7df4ff158f3d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -261,7 +261,7 @@ static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode)
}
if (mode & WAKE_UCAST) {
pr_debug("GMAC: WOL on global unicast\n");
- pmt |= global_unicast;
+ pmt |= power_down | global_unicast | wake_up_frame_en;
}
writel(pmt, ioaddr + GMAC_PMT);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index df5580dcdfed..51019b794be5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -102,7 +102,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
}
if (mode & WAKE_UCAST) {
pr_debug("GMAC: WOL on global unicast\n");
- pmt |= global_unicast;
+ pmt |= power_down | global_unicast | wake_up_frame_en;
}
writel(pmt, ioaddr + GMAC_PMT);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 170a18b61281..6e3b82972ce8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -187,7 +187,7 @@ int stmmac_ptp_register(struct stmmac_priv *priv)
if (IS_ERR(priv->ptp_clock)) {
priv->ptp_clock = NULL;
pr_err("ptp_clock_register() failed on %s\n", priv->dev->name);
- } else
+ } else if (priv->ptp_clock)
pr_debug("Added PTP HW clock successfully on %s\n",
priv->dev->name);
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index c25d9711a221..0d0053128542 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -1246,7 +1246,7 @@ static int dwceqos_mii_init(struct net_local *lp)
lp->mii_bus->read = &dwceqos_mdio_read;
lp->mii_bus->write = &dwceqos_mdio_write;
lp->mii_bus->priv = lp;
- lp->mii_bus->parent = &lp->ndev->dev;
+ lp->mii_bus->parent = &lp->pdev->dev;
of_address_to_resource(lp->pdev->dev.of_node, 0, &res);
snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
@@ -2761,7 +2761,7 @@ static const struct ethtool_ops dwceqos_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
-static struct net_device_ops netdev_ops = {
+static const struct net_device_ops netdev_ops = {
.ndo_open = dwceqos_open,
.ndo_stop = dwceqos_stop,
.ndo_start_xmit = dwceqos_start_xmit,
@@ -2853,25 +2853,17 @@ static int dwceqos_probe(struct platform_device *pdev)
ndev->features = ndev->hw_features;
- netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
-
- ret = register_netdev(ndev);
- if (ret) {
- dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_clk_dis_aper;
- }
-
lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
if (IS_ERR(lp->phy_ref_clk)) {
dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
ret = PTR_ERR(lp->phy_ref_clk);
- goto err_out_unregister_netdev;
+ goto err_out_clk_dis_aper;
}
ret = clk_prepare_enable(lp->phy_ref_clk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable device clock.\n");
- goto err_out_unregister_netdev;
+ goto err_out_clk_dis_aper;
}
lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
@@ -2880,7 +2872,7 @@ static int dwceqos_probe(struct platform_device *pdev)
ret = of_phy_register_fixed_link(lp->pdev->dev.of_node);
if (ret < 0) {
dev_err(&pdev->dev, "invalid fixed-link");
- goto err_out_unregister_clk_notifier;
+ goto err_out_clk_dis_phy;
}
lp->phy_node = of_node_get(lp->pdev->dev.of_node);
@@ -2889,7 +2881,7 @@ static int dwceqos_probe(struct platform_device *pdev)
ret = of_get_phy_mode(lp->pdev->dev.of_node);
if (ret < 0) {
dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
- goto err_out_unregister_clk_notifier;
+ goto err_out_clk_dis_phy;
}
lp->phy_interface = ret;
@@ -2897,14 +2889,14 @@ static int dwceqos_probe(struct platform_device *pdev)
ret = dwceqos_mii_init(lp);
if (ret) {
dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
- goto err_out_unregister_clk_notifier;
+ goto err_out_clk_dis_phy;
}
ret = dwceqos_mii_probe(ndev);
if (ret != 0) {
netdev_err(ndev, "mii_probe fail.\n");
ret = -ENXIO;
- goto err_out_unregister_clk_notifier;
+ goto err_out_clk_dis_phy;
}
dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
@@ -2922,7 +2914,7 @@ static int dwceqos_probe(struct platform_device *pdev)
if (ret) {
dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
ret);
- goto err_out_unregister_clk_notifier;
+ goto err_out_clk_dis_phy;
}
dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
pdev->id, ndev->base_addr, ndev->irq);
@@ -2932,18 +2924,24 @@ static int dwceqos_probe(struct platform_device *pdev)
if (ret) {
dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
ndev->irq, ret);
- goto err_out_unregister_clk_notifier;
+ goto err_out_clk_dis_phy;
}
if (netif_msg_probe(lp))
netdev_dbg(ndev, "net_local@%p\n", lp);
+ netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ goto err_out_clk_dis_phy;
+ }
+
return 0;
-err_out_unregister_clk_notifier:
+err_out_clk_dis_phy:
clk_disable_unprepare(lp->phy_ref_clk);
-err_out_unregister_netdev:
- unregister_netdev(ndev);
err_out_clk_dis_aper:
clk_disable_unprepare(lp->apb_pclk);
err_out_free_netdev:
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 79f0ec4e51ac..bc258d7e41df 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1791,7 +1791,7 @@ fail_alloc_rx:
gelic_card_free_chain(card, card->tx_chain.head);
fail_alloc_tx:
free_irq(card->irq, card);
- netdev->irq = NO_IRQ;
+ netdev->irq = 0;
fail_request_irq:
ps3_sb_event_receive_port_destroy(dev, card->irq);
fail_alloc_irq:
@@ -1843,7 +1843,7 @@ static int ps3_gelic_driver_remove(struct ps3_system_bus_device *dev)
netdev0 = card->netdev[GELIC_PORT_ETHERNET_0];
/* disconnect event port */
free_irq(card->irq, card);
- netdev0->irq = NO_IRQ;
+ netdev0->irq = 0;
ps3_sb_event_receive_port_destroy(card->dev, card->irq);
wait_event(card->waitq,
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index f38696ceee74..908e72e18ef7 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -1724,24 +1724,21 @@ static void velocity_free_tx_buf(struct velocity_info *vptr,
struct velocity_td_info *tdinfo, struct tx_desc *td)
{
struct sk_buff *skb = tdinfo->skb;
+ int i;
/*
* Don't unmap the pre-allocated tx_bufs
*/
- if (tdinfo->skb_dma) {
- int i;
+ for (i = 0; i < tdinfo->nskb_dma; i++) {
+ size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
- for (i = 0; i < tdinfo->nskb_dma; i++) {
- size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
+ /* For scatter-gather */
+ if (skb_shinfo(skb)->nr_frags > 0)
+ pktlen = max_t(size_t, pktlen,
+ td->td_buf[i].size & ~TD_QUEUE);
- /* For scatter-gather */
- if (skb_shinfo(skb)->nr_frags > 0)
- pktlen = max_t(size_t, pktlen,
- td->td_buf[i].size & ~TD_QUEUE);
-
- dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
- le16_to_cpu(pktlen), DMA_TO_DEVICE);
- }
+ dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
+ le16_to_cpu(pktlen), DMA_TO_DEVICE);
}
dev_kfree_skb_irq(skb);
tdinfo->skb = NULL;
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 4f5c024c6192..6d68c8a8f4f2 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_XILINX
bool "Xilinx devices"
default y
- depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ
+ depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -18,7 +18,7 @@ if NET_VENDOR_XILINX
config XILINX_EMACLITE
tristate "Xilinx 10/100 Ethernet Lite support"
- depends on (PPC32 || MICROBLAZE || ARCH_ZYNQ)
+ depends on PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS
select PHYLIB
---help---
This driver supports the 10/100 Ethernet Lite from Xilinx.
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 5a1e98547031..470b3dcd54e5 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -127,7 +127,7 @@ struct sixpack {
#define AX25_6PACK_HEADER_LEN 0
-static void sixpack_decode(struct sixpack *, unsigned char[], int);
+static void sixpack_decode(struct sixpack *, const unsigned char[], int);
static int encode_sixpack(unsigned char *, unsigned char *, int, unsigned char);
/*
@@ -428,7 +428,7 @@ out:
/*
* Handle the 'receiver data ready' interrupt.
- * This function is called by the 'tty_io' module in the kernel when
+ * This function is called by the tty module in the kernel when
* a block of 6pack data has been received, which can now be decapsulated
* and sent on to some IP layer for further processing.
*/
@@ -436,7 +436,6 @@ static void sixpack_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
{
struct sixpack *sp;
- unsigned char buf[512];
int count1;
if (!count)
@@ -446,10 +445,7 @@ static void sixpack_receive_buf(struct tty_struct *tty,
if (!sp)
return;
- memcpy(buf, cp, count < sizeof(buf) ? count : sizeof(buf));
-
/* Read the characters out of the buffer */
-
count1 = count;
while (count) {
count--;
@@ -459,7 +455,7 @@ static void sixpack_receive_buf(struct tty_struct *tty,
continue;
}
}
- sixpack_decode(sp, buf, count1);
+ sixpack_decode(sp, cp, count1);
sp_put(sp);
tty_unthrottle(tty);
@@ -992,7 +988,7 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd)
/* decode a 6pack packet */
static void
-sixpack_decode(struct sixpack *sp, unsigned char *pre_rbuff, int count)
+sixpack_decode(struct sixpack *sp, const unsigned char *pre_rbuff, int count)
{
unsigned char inbyte;
int count1;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index d95a50ae996d..622ab3ab9e93 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -484,7 +484,7 @@ static void bpq_setup(struct net_device *dev)
dev->flags = 0;
dev->features = NETIF_F_LLTX; /* Allow recursion */
-#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+#if IS_ENABLED(CONFIG_AX25)
dev->header_ops = &ax25_header_ops;
#endif
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 284b97b6b258..f4fbcb5aa24a 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -433,7 +433,7 @@ struct nvsp_1_message_revoke_send_buffer {
*/
struct nvsp_1_message_send_rndis_packet {
/*
- * This field is specified by RNIDS. They assume there's two different
+ * This field is specified by RNDIS. They assume there's two different
* channels of communication. However, the Network VSP only has one.
* Therefore, the channel travels with the RNDIS packet.
*/
@@ -578,7 +578,7 @@ struct nvsp_5_send_indirect_table {
/* The number of entries in the send indirection table */
u32 count;
- /* The offset of the send indireciton table from top of this struct.
+ /* The offset of the send indirection table from top of this struct.
* The send indirection table tells which channel to put the send
* traffic on. Each entry is a channel number.
*/
@@ -649,6 +649,8 @@ struct multi_recv_comp {
struct netvsc_stats {
u64 packets;
u64 bytes;
+ u64 broadcast;
+ u64 multicast;
struct u64_stats_sync syncp;
};
@@ -695,9 +697,8 @@ struct net_device_context {
bool start_remove;
/* State to manage the associated VF interface. */
- struct net_device *vf_netdev;
- bool vf_inject;
- atomic_t vf_use_cnt;
+ struct net_device __rcu *vf_netdev;
+
/* 1: allocated, serial number is valid. 0: not allocated */
u32 vf_alloc;
/* Serial number of the VF to team with */
@@ -733,7 +734,6 @@ struct netvsc_device {
struct nvsp_message channel_init_pkt;
struct nvsp_message revoke_packet;
- /* unsigned char HwMacAddr[HW_MACADDR_LEN]; */
struct vmbus_channel *chn_table[VRSS_CHANNEL_MAX];
u32 send_table[VRSS_SEND_TAB_SIZE];
@@ -1238,7 +1238,7 @@ struct rndis_message {
u32 ndis_msg_type;
/* Total length of this message, from the beginning */
- /* of the sruct rndis_message, in bytes. */
+ /* of the struct rndis_message, in bytes. */
u32 msg_len;
/* Actual message */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 2a9ccc4d9e3c..720b5fa9e625 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -34,89 +34,6 @@
#include "hyperv_net.h"
/*
- * An API to support in-place processing of incoming VMBUS packets.
- */
-#define VMBUS_PKT_TRAILER 8
-
-static struct vmpacket_descriptor *
-get_next_pkt_raw(struct vmbus_channel *channel)
-{
- struct hv_ring_buffer_info *ring_info = &channel->inbound;
- u32 read_loc = ring_info->priv_read_index;
- void *ring_buffer = hv_get_ring_buffer(ring_info);
- struct vmpacket_descriptor *cur_desc;
- u32 packetlen;
- u32 dsize = ring_info->ring_datasize;
- u32 delta = read_loc - ring_info->ring_buffer->read_index;
- u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
-
- if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
- return NULL;
-
- if ((read_loc + sizeof(*cur_desc)) > dsize)
- return NULL;
-
- cur_desc = ring_buffer + read_loc;
- packetlen = cur_desc->len8 << 3;
-
- /*
- * If the packet under consideration is wrapping around,
- * return failure.
- */
- if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1))
- return NULL;
-
- return cur_desc;
-}
-
-/*
- * A helper function to step through packets "in-place"
- * This API is to be called after each successful call
- * get_next_pkt_raw().
- */
-static void put_pkt_raw(struct vmbus_channel *channel,
- struct vmpacket_descriptor *desc)
-{
- struct hv_ring_buffer_info *ring_info = &channel->inbound;
- u32 read_loc = ring_info->priv_read_index;
- u32 packetlen = desc->len8 << 3;
- u32 dsize = ring_info->ring_datasize;
-
- BUG_ON((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize);
-
- /*
- * Include the packet trailer.
- */
- ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
-}
-
-/*
- * This call commits the read index and potentially signals the host.
- * Here is the pattern for using the "in-place" consumption APIs:
- *
- * while (get_next_pkt_raw() {
- * process the packet "in-place";
- * put_pkt_raw();
- * }
- * if (packets processed in place)
- * commit_rd_index();
- */
-static void commit_rd_index(struct vmbus_channel *channel)
-{
- struct hv_ring_buffer_info *ring_info = &channel->inbound;
- /*
- * Make sure all reads are done before we update the read index since
- * the writer may start writing to the read area once the read index
- * is updated.
- */
- virt_rmb();
- ring_info->ring_buffer->read_index = ring_info->priv_read_index;
-
- if (hv_need_to_signal_on_read(ring_info))
- vmbus_set_event(channel);
-}
-
-/*
* Switch the data path from the synthetic interface to the VF
* interface.
*/
@@ -718,7 +635,7 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
q_idx = nvsc_packet->q_idx;
channel = incoming_channel;
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
}
num_outstanding_sends =
@@ -840,7 +757,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
return msg_size;
}
-static int netvsc_send_pkt(
+static inline int netvsc_send_pkt(
struct hv_device *device,
struct hv_netvsc_packet *packet,
struct netvsc_device *net_device,
@@ -1027,7 +944,7 @@ int netvsc_send(struct hv_device *device,
}
if (msdp->skb)
- dev_kfree_skb_any(msdp->skb);
+ dev_consume_skb_any(msdp->skb);
if (xmit_more && !packet->cp_partial) {
msdp->skb = skb;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 2360e704e271..52eeb2f67276 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -667,51 +667,23 @@ int netvsc_recv_callback(struct hv_device *device_obj,
{
struct net_device *net = hv_get_drvdata(device_obj);
struct net_device_context *net_device_ctx = netdev_priv(net);
+ struct net_device *vf_netdev;
struct sk_buff *skb;
- struct sk_buff *vf_skb;
struct netvsc_stats *rx_stats;
- u32 bytes_recvd = packet->total_data_buflen;
- int ret = 0;
- if (!net || net->reg_state != NETREG_REGISTERED)
+ if (net->reg_state != NETREG_REGISTERED)
return NVSP_STAT_FAIL;
- if (READ_ONCE(net_device_ctx->vf_inject)) {
- atomic_inc(&net_device_ctx->vf_use_cnt);
- if (!READ_ONCE(net_device_ctx->vf_inject)) {
- /*
- * We raced; just move on.
- */
- atomic_dec(&net_device_ctx->vf_use_cnt);
- goto vf_injection_done;
- }
-
- /*
- * Inject this packet into the VF inerface.
- * On Hyper-V, multicast and brodcast packets
- * are only delivered on the synthetic interface
- * (after subjecting these to policy filters on
- * the host). Deliver these via the VF interface
- * in the guest.
- */
- vf_skb = netvsc_alloc_recv_skb(net_device_ctx->vf_netdev,
- packet, csum_info, *data,
- vlan_tci);
- if (vf_skb != NULL) {
- ++net_device_ctx->vf_netdev->stats.rx_packets;
- net_device_ctx->vf_netdev->stats.rx_bytes +=
- bytes_recvd;
- netif_receive_skb(vf_skb);
- } else {
- ++net->stats.rx_dropped;
- ret = NVSP_STAT_FAIL;
- }
- atomic_dec(&net_device_ctx->vf_use_cnt);
- return ret;
- }
-
-vf_injection_done:
- rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
+ /*
+ * If necessary, inject this packet into the VF interface.
+ * On Hyper-V, multicast and brodcast packets are only delivered
+ * to the synthetic interface (after subjecting these to
+ * policy filters on the host). Deliver these via the VF
+ * interface in the guest.
+ */
+ vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
+ if (vf_netdev && (vf_netdev->flags & IFF_UP))
+ net = vf_netdev;
/* Allocate a skb - TODO direct I/O to pages? */
skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
@@ -719,12 +691,25 @@ vf_injection_done:
++net->stats.rx_dropped;
return NVSP_STAT_FAIL;
}
- skb_record_rx_queue(skb, channel->
- offermsg.offer.sub_channel_index);
+ if (net != vf_netdev)
+ skb_record_rx_queue(skb,
+ channel->offermsg.offer.sub_channel_index);
+
+ /*
+ * Even if injecting the packet, record the statistics
+ * on the synthetic device because modifying the VF device
+ * statistics will not work correctly.
+ */
+ rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->packets++;
rx_stats->bytes += packet->total_data_buflen;
+
+ if (skb->pkt_type == PACKET_BROADCAST)
+ ++rx_stats->broadcast;
+ else if (skb->pkt_type == PACKET_MULTICAST)
+ ++rx_stats->multicast;
u64_stats_update_end(&rx_stats->syncp);
/*
@@ -967,7 +952,7 @@ static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
cpu);
struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats,
cpu);
- u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+ u64 tx_packets, tx_bytes, rx_packets, rx_bytes, rx_multicast;
unsigned int start;
do {
@@ -980,12 +965,14 @@ static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
rx_packets = rx_stats->packets;
rx_bytes = rx_stats->bytes;
+ rx_multicast = rx_stats->multicast + rx_stats->broadcast;
} while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
t->tx_bytes += tx_bytes;
t->tx_packets += tx_packets;
t->rx_bytes += rx_bytes;
t->rx_packets += rx_packets;
+ t->multicast += rx_multicast;
}
t->tx_dropped = net->stats.tx_dropped;
@@ -1215,22 +1202,44 @@ static void netvsc_free_netdev(struct net_device *netdev)
free_netdev(netdev);
}
-static struct net_device *get_netvsc_net_device(char *mac)
+static struct net_device *get_netvsc_bymac(const u8 *mac)
{
- struct net_device *dev, *found = NULL;
+ struct net_device *dev;
ASSERT_RTNL();
for_each_netdev(&init_net, dev) {
- if (memcmp(dev->dev_addr, mac, ETH_ALEN) == 0) {
- if (dev->netdev_ops != &device_ops)
- continue;
- found = dev;
- break;
- }
+ if (dev->netdev_ops != &device_ops)
+ continue; /* not a netvsc device */
+
+ if (ether_addr_equal(mac, dev->perm_addr))
+ return dev;
+ }
+
+ return NULL;
+}
+
+static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
+{
+ struct net_device *dev;
+
+ ASSERT_RTNL();
+
+ for_each_netdev(&init_net, dev) {
+ struct net_device_context *net_device_ctx;
+
+ if (dev->netdev_ops != &device_ops)
+ continue; /* not a netvsc device */
+
+ net_device_ctx = netdev_priv(dev);
+ if (net_device_ctx->nvdev == NULL)
+ continue; /* device is removed */
+
+ if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev)
+ return dev; /* a match */
}
- return found;
+ return NULL;
}
static int netvsc_register_vf(struct net_device *vf_netdev)
@@ -1238,9 +1247,8 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
struct net_device *ndev;
struct net_device_context *net_device_ctx;
struct netvsc_device *netvsc_dev;
- const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
- if (eth_ops == NULL || eth_ops == &ethtool_ops)
+ if (vf_netdev->addr_len != ETH_ALEN)
return NOTIFY_DONE;
/*
@@ -1248,13 +1256,13 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
* associate with the VF interface. If we don't find a matching
* synthetic interface, move on.
*/
- ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ ndev = get_netvsc_bymac(vf_netdev->perm_addr);
if (!ndev)
return NOTIFY_DONE;
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
- if (!netvsc_dev || net_device_ctx->vf_netdev)
+ if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
return NOTIFY_DONE;
netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
@@ -1262,46 +1270,26 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
* Take a reference on the module.
*/
try_module_get(THIS_MODULE);
- net_device_ctx->vf_netdev = vf_netdev;
- return NOTIFY_OK;
-}
-static void netvsc_inject_enable(struct net_device_context *net_device_ctx)
-{
- net_device_ctx->vf_inject = true;
-}
-
-static void netvsc_inject_disable(struct net_device_context *net_device_ctx)
-{
- net_device_ctx->vf_inject = false;
-
- /* Wait for currently active users to drain out. */
- while (atomic_read(&net_device_ctx->vf_use_cnt) != 0)
- udelay(50);
+ dev_hold(vf_netdev);
+ rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
+ return NOTIFY_OK;
}
static int netvsc_vf_up(struct net_device *vf_netdev)
{
struct net_device *ndev;
struct netvsc_device *netvsc_dev;
- const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
struct net_device_context *net_device_ctx;
- if (eth_ops == &ethtool_ops)
- return NOTIFY_DONE;
-
- ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ ndev = get_netvsc_byref(vf_netdev);
if (!ndev)
return NOTIFY_DONE;
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
- if (!netvsc_dev || !net_device_ctx->vf_netdev)
- return NOTIFY_DONE;
-
netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
- netvsc_inject_enable(net_device_ctx);
/*
* Open the device before switching data path.
@@ -1327,23 +1315,15 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
struct net_device *ndev;
struct netvsc_device *netvsc_dev;
struct net_device_context *net_device_ctx;
- const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
-
- if (eth_ops == &ethtool_ops)
- return NOTIFY_DONE;
- ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ ndev = get_netvsc_byref(vf_netdev);
if (!ndev)
return NOTIFY_DONE;
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
- if (!netvsc_dev || !net_device_ctx->vf_netdev)
- return NOTIFY_DONE;
-
netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
- netvsc_inject_disable(net_device_ctx);
netvsc_switch_datapath(ndev, false);
netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
rndis_filter_close(netvsc_dev);
@@ -1359,23 +1339,19 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
{
struct net_device *ndev;
struct netvsc_device *netvsc_dev;
- const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
struct net_device_context *net_device_ctx;
- if (eth_ops == &ethtool_ops)
- return NOTIFY_DONE;
-
- ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ ndev = get_netvsc_byref(vf_netdev);
if (!ndev)
return NOTIFY_DONE;
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
- if (!netvsc_dev || !net_device_ctx->vf_netdev)
- return NOTIFY_DONE;
+
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
- netvsc_inject_disable(net_device_ctx);
- net_device_ctx->vf_netdev = NULL;
+
+ RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
+ dev_put(vf_netdev);
module_put(THIS_MODULE);
return NOTIFY_OK;
}
@@ -1427,10 +1403,6 @@ static int netvsc_probe(struct hv_device *dev,
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
- atomic_set(&net_device_ctx->vf_use_cnt, 0);
- net_device_ctx->vf_netdev = NULL;
- net_device_ctx->vf_inject = false;
-
net->netdev_ops = &device_ops;
net->hw_features = NETVSC_HW_FEATURES;
@@ -1539,13 +1511,21 @@ static int netvsc_netdev_event(struct notifier_block *this,
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+ /* Skip our own events */
+ if (event_dev->netdev_ops == &device_ops)
+ return NOTIFY_DONE;
+
+ /* Avoid non-Ethernet type devices */
+ if (event_dev->type != ARPHRD_ETHER)
+ return NOTIFY_DONE;
+
/* Avoid Vlan dev with same MAC registering as VF */
if (event_dev->priv_flags & IFF_802_1Q_VLAN)
return NOTIFY_DONE;
/* Avoid Bonding master dev with same MAC registering as VF */
- if (event_dev->priv_flags & IFF_BONDING &&
- event_dev->flags & IFF_MASTER)
+ if ((event_dev->priv_flags & IFF_BONDING) &&
+ (event_dev->flags & IFF_MASTER))
return NOTIFY_DONE;
switch (event) {
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index 0becf0ac3926..ec387efb61d0 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -30,7 +30,7 @@
static int numlbs = 2;
static LIST_HEAD(fakelb_phys);
-static DEFINE_SPINLOCK(fakelb_phys_lock);
+static DEFINE_MUTEX(fakelb_phys_lock);
static LIST_HEAD(fakelb_ifup_phys);
static DEFINE_RWLOCK(fakelb_ifup_phys_lock);
@@ -188,9 +188,9 @@ static int fakelb_add_one(struct device *dev)
if (err)
goto err_reg;
- spin_lock(&fakelb_phys_lock);
+ mutex_lock(&fakelb_phys_lock);
list_add_tail(&phy->list, &fakelb_phys);
- spin_unlock(&fakelb_phys_lock);
+ mutex_unlock(&fakelb_phys_lock);
return 0;
@@ -222,10 +222,10 @@ static int fakelb_probe(struct platform_device *pdev)
return 0;
err_slave:
- spin_lock(&fakelb_phys_lock);
+ mutex_lock(&fakelb_phys_lock);
list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
fakelb_del(phy);
- spin_unlock(&fakelb_phys_lock);
+ mutex_unlock(&fakelb_phys_lock);
return err;
}
@@ -233,10 +233,10 @@ static int fakelb_remove(struct platform_device *pdev)
{
struct fakelb_phy *phy, *tmp;
- spin_lock(&fakelb_phys_lock);
+ mutex_lock(&fakelb_phys_lock);
list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
fakelb_del(phy);
- spin_unlock(&fakelb_phys_lock);
+ mutex_unlock(&fakelb_phys_lock);
return 0;
}
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 695a5dc9ace3..7e0732f5ea07 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -23,11 +23,13 @@
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/inetdevice.h>
+#include <linux/netfilter.h>
#include <net/ip.h>
#include <net/ip6_route.h>
#include <net/rtnetlink.h>
#include <net/route.h>
#include <net/addrconf.h>
+#include <net/l3mdev.h>
#define IPVLAN_DRV "ipvlan"
#define IPV_DRV_VER "0.1"
@@ -124,4 +126,8 @@ struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
const void *iaddr, bool is_v6);
bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
void ipvlan_ht_addr_del(struct ipvl_addr *addr);
+struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb,
+ u16 proto);
+unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state);
#endif /* __IPVLAN_H */
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index b5f9511d819e..b4e990743e1d 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -560,6 +560,7 @@ int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
case IPVLAN_MODE_L2:
return ipvlan_xmit_mode_l2(skb, dev);
case IPVLAN_MODE_L3:
+ case IPVLAN_MODE_L3S:
return ipvlan_xmit_mode_l3(skb, dev);
}
@@ -664,6 +665,8 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
return ipvlan_handle_mode_l2(pskb, port);
case IPVLAN_MODE_L3:
return ipvlan_handle_mode_l3(pskb, port);
+ case IPVLAN_MODE_L3S:
+ return RX_HANDLER_PASS;
}
/* Should not reach here */
@@ -672,3 +675,94 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
+
+static struct ipvl_addr *ipvlan_skb_to_addr(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ipvl_addr *addr = NULL;
+ struct ipvl_port *port;
+ void *lyr3h;
+ int addr_type;
+
+ if (!dev || !netif_is_ipvlan_port(dev))
+ goto out;
+
+ port = ipvlan_port_get_rcu(dev);
+ if (!port || port->mode != IPVLAN_MODE_L3S)
+ goto out;
+
+ lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ if (!lyr3h)
+ goto out;
+
+ addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
+out:
+ return addr;
+}
+
+struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb,
+ u16 proto)
+{
+ struct ipvl_addr *addr;
+ struct net_device *sdev;
+
+ addr = ipvlan_skb_to_addr(skb, dev);
+ if (!addr)
+ goto out;
+
+ sdev = addr->master->dev;
+ switch (proto) {
+ case AF_INET:
+ {
+ int err;
+ struct iphdr *ip4h = ip_hdr(skb);
+
+ err = ip_route_input_noref(skb, ip4h->daddr, ip4h->saddr,
+ ip4h->tos, sdev);
+ if (unlikely(err))
+ goto out;
+ break;
+ }
+ case AF_INET6:
+ {
+ struct dst_entry *dst;
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ int flags = RT6_LOOKUP_F_HAS_SADDR;
+ struct flowi6 fl6 = {
+ .flowi6_iif = sdev->ifindex,
+ .daddr = ip6h->daddr,
+ .saddr = ip6h->saddr,
+ .flowlabel = ip6_flowinfo(ip6h),
+ .flowi6_mark = skb->mark,
+ .flowi6_proto = ip6h->nexthdr,
+ };
+
+ skb_dst_drop(skb);
+ dst = ip6_route_input_lookup(dev_net(sdev), sdev, &fl6, flags);
+ skb_dst_set(skb, dst);
+ break;
+ }
+ default:
+ break;
+ }
+
+out:
+ return skb;
+}
+
+unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct ipvl_addr *addr;
+ unsigned int len;
+
+ addr = ipvlan_skb_to_addr(skb, skb->dev);
+ if (!addr)
+ goto out;
+
+ skb->dev = addr->master->dev;
+ len = skb->len + ETH_HLEN;
+ ipvlan_count_rx(addr->master, len, true, false);
+out:
+ return NF_ACCEPT;
+}
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 18b4e8c7f68a..f442eb366863 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -9,24 +9,87 @@
#include "ipvlan.h"
+static u32 ipvl_nf_hook_refcnt = 0;
+
+static struct nf_hook_ops ipvl_nfops[] __read_mostly = {
+ {
+ .hook = ipvlan_nf_input,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = INT_MAX,
+ },
+ {
+ .hook = ipvlan_nf_input,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = INT_MAX,
+ },
+};
+
+static struct l3mdev_ops ipvl_l3mdev_ops __read_mostly = {
+ .l3mdev_l3_rcv = ipvlan_l3_rcv,
+};
+
static void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev)
{
ipvlan->dev->mtu = dev->mtu - ipvlan->mtu_adj;
}
-static void ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
+static int ipvlan_register_nf_hook(void)
+{
+ int err = 0;
+
+ if (!ipvl_nf_hook_refcnt) {
+ err = _nf_register_hooks(ipvl_nfops, ARRAY_SIZE(ipvl_nfops));
+ if (!err)
+ ipvl_nf_hook_refcnt = 1;
+ } else {
+ ipvl_nf_hook_refcnt++;
+ }
+
+ return err;
+}
+
+static void ipvlan_unregister_nf_hook(void)
+{
+ WARN_ON(!ipvl_nf_hook_refcnt);
+
+ ipvl_nf_hook_refcnt--;
+ if (!ipvl_nf_hook_refcnt)
+ _nf_unregister_hooks(ipvl_nfops, ARRAY_SIZE(ipvl_nfops));
+}
+
+static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
{
struct ipvl_dev *ipvlan;
+ struct net_device *mdev = port->dev;
+ int err = 0;
+ ASSERT_RTNL();
if (port->mode != nval) {
+ if (nval == IPVLAN_MODE_L3S) {
+ /* New mode is L3S */
+ err = ipvlan_register_nf_hook();
+ if (!err) {
+ mdev->l3mdev_ops = &ipvl_l3mdev_ops;
+ mdev->priv_flags |= IFF_L3MDEV_MASTER;
+ } else
+ return err;
+ } else if (port->mode == IPVLAN_MODE_L3S) {
+ /* Old mode was L3S */
+ mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
+ ipvlan_unregister_nf_hook();
+ mdev->l3mdev_ops = NULL;
+ }
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
- if (nval == IPVLAN_MODE_L3)
+ if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S)
ipvlan->dev->flags |= IFF_NOARP;
else
ipvlan->dev->flags &= ~IFF_NOARP;
}
port->mode = nval;
}
+ return err;
}
static int ipvlan_port_create(struct net_device *dev)
@@ -74,6 +137,11 @@ static void ipvlan_port_destroy(struct net_device *dev)
struct ipvl_port *port = ipvlan_port_get_rtnl(dev);
dev->priv_flags &= ~IFF_IPVLAN_MASTER;
+ if (port->mode == IPVLAN_MODE_L3S) {
+ dev->priv_flags &= ~IFF_L3MDEV_MASTER;
+ ipvlan_unregister_nf_hook();
+ dev->l3mdev_ops = NULL;
+ }
netdev_rx_handler_unregister(dev);
cancel_work_sync(&port->wq);
__skb_queue_purge(&port->backlog);
@@ -132,7 +200,8 @@ static int ipvlan_open(struct net_device *dev)
struct net_device *phy_dev = ipvlan->phy_dev;
struct ipvl_addr *addr;
- if (ipvlan->port->mode == IPVLAN_MODE_L3)
+ if (ipvlan->port->mode == IPVLAN_MODE_L3 ||
+ ipvlan->port->mode == IPVLAN_MODE_L3S)
dev->flags |= IFF_NOARP;
else
dev->flags &= ~IFF_NOARP;
@@ -372,13 +441,14 @@ static int ipvlan_nl_changelink(struct net_device *dev,
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev);
+ int err = 0;
if (data && data[IFLA_IPVLAN_MODE]) {
u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
- ipvlan_set_port_mode(port, nmode);
+ err = ipvlan_set_port_mode(port, nmode);
}
- return 0;
+ return err;
}
static size_t ipvlan_nl_getsize(const struct net_device *dev)
@@ -473,10 +543,13 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
unregister_netdevice(dev);
return err;
}
+ err = ipvlan_set_port_mode(port, mode);
+ if (err) {
+ unregister_netdevice(dev);
+ return err;
+ }
list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans);
- ipvlan_set_port_mode(port, mode);
-
netif_stacked_transfer_operstate(phy_dev, dev);
return 0;
}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 351e701eb043..3ea47f28e143 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2973,6 +2973,7 @@ static void macsec_setup(struct net_device *dev)
dev->priv_flags |= IFF_NO_QUEUE;
dev->netdev_ops = &macsec_netdev_ops;
dev->destructor = macsec_free_netdev;
+ SET_NETDEV_DEVTYPE(dev, &macsec_type);
eth_zero_addr(dev->broadcast);
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 1c3e07c3d0b8..5078a0d0db64 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -274,9 +274,9 @@ config MICROCHIP_PHY
Supports the LAN88XX PHYs.
config MICROSEMI_PHY
- tristate "Microsemi PHYs"
- ---help---
- Currently supports the VSC8531 and VSC8541 PHYs
+ tristate "Microsemi PHYs"
+ ---help---
+ Currently supports the VSC8531 and VSC8541 PHYs
config NATIONAL_PHY
tristate "National Semiconductor PHYs"
@@ -320,6 +320,13 @@ config XILINX_GMII2RGMII
the Reduced Gigabit Media Independent Interface(RGMII) between
Ethernet physical media devices and the Gigabit Ethernet controller.
+config MDIO_XGENE
+ tristate "APM X-Gene SoC MDIO bus controller"
+ depends on ARCH_XGENE || COMPILE_TEST
+ help
+ This module provides a driver for the MDIO busses found in the
+ APM X-Gene SoC's.
+
endif # PHYLIB
config MICREL_KS8995MA
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
index 775674808249..92af182951be 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/phy/mdio-xgene.c
@@ -424,10 +424,8 @@ static int xgene_mdio_remove(struct platform_device *pdev)
mdiobus_unregister(mdio_bus);
mdiobus_free(mdio_bus);
- if (dev->of_node) {
- if (IS_ERR(pdata->clk))
- clk_disable_unprepare(pdata->clk);
- }
+ if (dev->of_node)
+ clk_disable_unprepare(pdata->clk);
return 0;
}
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 15f820648f82..7c00e508a101 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -55,7 +55,7 @@ static int lan88xx_phy_ack_interrupt(struct phy_device *phydev)
return rc < 0 ? rc : 0;
}
-int lan88xx_suspend(struct phy_device *phydev)
+static int lan88xx_suspend(struct phy_device *phydev)
{
struct lan88xx_priv *priv = phydev->priv;
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index ad33390b382a..d350debd174a 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -13,135 +13,186 @@
#include <linux/phy.h>
enum rgmii_rx_clock_delay {
- RGMII_RX_CLK_DELAY_0_2_NS = 0,
- RGMII_RX_CLK_DELAY_0_8_NS = 1,
- RGMII_RX_CLK_DELAY_1_1_NS = 2,
- RGMII_RX_CLK_DELAY_1_7_NS = 3,
- RGMII_RX_CLK_DELAY_2_0_NS = 4,
- RGMII_RX_CLK_DELAY_2_3_NS = 5,
- RGMII_RX_CLK_DELAY_2_6_NS = 6,
- RGMII_RX_CLK_DELAY_3_4_NS = 7
+ RGMII_RX_CLK_DELAY_0_2_NS = 0,
+ RGMII_RX_CLK_DELAY_0_8_NS = 1,
+ RGMII_RX_CLK_DELAY_1_1_NS = 2,
+ RGMII_RX_CLK_DELAY_1_7_NS = 3,
+ RGMII_RX_CLK_DELAY_2_0_NS = 4,
+ RGMII_RX_CLK_DELAY_2_3_NS = 5,
+ RGMII_RX_CLK_DELAY_2_6_NS = 6,
+ RGMII_RX_CLK_DELAY_3_4_NS = 7
};
-#define MII_VSC85XX_INT_MASK 25
-#define MII_VSC85XX_INT_MASK_MASK 0xa000
-#define MII_VSC85XX_INT_STATUS 26
+/* Microsemi VSC85xx PHY registers */
+/* IEEE 802. Std Registers */
+#define MSCC_PHY_EXT_PHY_CNTL_1 23
+#define MAC_IF_SELECTION_MASK 0x1800
+#define MAC_IF_SELECTION_GMII 0
+#define MAC_IF_SELECTION_RMII 1
+#define MAC_IF_SELECTION_RGMII 2
+#define MAC_IF_SELECTION_POS 11
+#define FAR_END_LOOPBACK_MODE_MASK 0x0008
-#define MSCC_EXT_PAGE_ACCESS 31
-#define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */
-#define MSCC_PHY_PAGE_EXTENDED_2 0x0002 /* Extended reg - page 2 */
+#define MII_VSC85XX_INT_MASK 25
+#define MII_VSC85XX_INT_MASK_MASK 0xa000
+#define MII_VSC85XX_INT_STATUS 26
+
+#define MSCC_EXT_PAGE_ACCESS 31
+#define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */
+#define MSCC_PHY_PAGE_EXTENDED_2 0x0002 /* Extended reg - page 2 */
/* Extended Page 2 Registers */
-#define MSCC_PHY_RGMII_CNTL 20
-#define RGMII_RX_CLK_DELAY_MASK 0x0070
-#define RGMII_RX_CLK_DELAY_POS 4
+#define MSCC_PHY_RGMII_CNTL 20
+#define RGMII_RX_CLK_DELAY_MASK 0x0070
+#define RGMII_RX_CLK_DELAY_POS 4
/* Microsemi PHY ID's */
-#define PHY_ID_VSC8531 0x00070570
-#define PHY_ID_VSC8541 0x00070770
+#define PHY_ID_VSC8531 0x00070570
+#define PHY_ID_VSC8541 0x00070770
static int vsc85xx_phy_page_set(struct phy_device *phydev, u8 page)
{
- int rc;
+ int rc;
- rc = phy_write(phydev, MSCC_EXT_PAGE_ACCESS, page);
- return rc;
+ rc = phy_write(phydev, MSCC_EXT_PAGE_ACCESS, page);
+ return rc;
+}
+
+static int vsc85xx_mac_if_set(struct phy_device *phydev,
+ phy_interface_t interface)
+{
+ int rc;
+ u16 reg_val;
+
+ mutex_lock(&phydev->lock);
+ reg_val = phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_1);
+ reg_val &= ~(MAC_IF_SELECTION_MASK);
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ reg_val |= (MAC_IF_SELECTION_RGMII << MAC_IF_SELECTION_POS);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ reg_val |= (MAC_IF_SELECTION_RMII << MAC_IF_SELECTION_POS);
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
+ reg_val |= (MAC_IF_SELECTION_GMII << MAC_IF_SELECTION_POS);
+ break;
+ default:
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+ rc = phy_write(phydev, MSCC_PHY_EXT_PHY_CNTL_1, reg_val);
+ if (rc != 0)
+ goto out_unlock;
+
+ rc = genphy_soft_reset(phydev);
+
+out_unlock:
+ mutex_unlock(&phydev->lock);
+
+ return rc;
}
static int vsc85xx_default_config(struct phy_device *phydev)
{
- int rc;
- u16 reg_val;
+ int rc;
+ u16 reg_val;
- mutex_lock(&phydev->lock);
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
- if (rc != 0)
- goto out_unlock;
+ mutex_lock(&phydev->lock);
+ rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_EXTENDED_2);
+ if (rc != 0)
+ goto out_unlock;
- reg_val = phy_read(phydev, MSCC_PHY_RGMII_CNTL);
- reg_val &= ~(RGMII_RX_CLK_DELAY_MASK);
- reg_val |= (RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS);
- phy_write(phydev, MSCC_PHY_RGMII_CNTL, reg_val);
- rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
+ reg_val = phy_read(phydev, MSCC_PHY_RGMII_CNTL);
+ reg_val &= ~(RGMII_RX_CLK_DELAY_MASK);
+ reg_val |= (RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS);
+ phy_write(phydev, MSCC_PHY_RGMII_CNTL, reg_val);
+ rc = vsc85xx_phy_page_set(phydev, MSCC_PHY_PAGE_STANDARD);
out_unlock:
- mutex_unlock(&phydev->lock);
+ mutex_unlock(&phydev->lock);
- return rc;
+ return rc;
}
static int vsc85xx_config_init(struct phy_device *phydev)
{
- int rc;
+ int rc;
+
+ rc = vsc85xx_default_config(phydev);
+ if (rc)
+ return rc;
+
+ rc = vsc85xx_mac_if_set(phydev, phydev->interface);
+ if (rc)
+ return rc;
- rc = vsc85xx_default_config(phydev);
- if (rc)
- return rc;
- rc = genphy_config_init(phydev);
+ rc = genphy_config_init(phydev);
- return rc;
+ return rc;
}
static int vsc85xx_ack_interrupt(struct phy_device *phydev)
{
- int rc = 0;
+ int rc = 0;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- rc = phy_read(phydev, MII_VSC85XX_INT_STATUS);
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ rc = phy_read(phydev, MII_VSC85XX_INT_STATUS);
- return (rc < 0) ? rc : 0;
+ return (rc < 0) ? rc : 0;
}
static int vsc85xx_config_intr(struct phy_device *phydev)
{
- int rc;
-
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
- rc = phy_write(phydev, MII_VSC85XX_INT_MASK,
- MII_VSC85XX_INT_MASK_MASK);
- } else {
- rc = phy_write(phydev, MII_VSC85XX_INT_MASK, 0);
- if (rc < 0)
- return rc;
- rc = phy_read(phydev, MII_VSC85XX_INT_STATUS);
- }
-
- return rc;
+ int rc;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ rc = phy_write(phydev, MII_VSC85XX_INT_MASK,
+ MII_VSC85XX_INT_MASK_MASK);
+ } else {
+ rc = phy_write(phydev, MII_VSC85XX_INT_MASK, 0);
+ if (rc < 0)
+ return rc;
+ rc = phy_read(phydev, MII_VSC85XX_INT_STATUS);
+ }
+
+ return rc;
}
/* Microsemi VSC85xx PHYs */
static struct phy_driver vsc85xx_driver[] = {
{
- .phy_id = PHY_ID_VSC8531,
- .name = "Microsemi VSC8531",
- .phy_id_mask = 0xfffffff0,
- .features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
- .soft_reset = &genphy_soft_reset,
- .config_init = &vsc85xx_config_init,
- .config_aneg = &genphy_config_aneg,
- .aneg_done = &genphy_aneg_done,
- .read_status = &genphy_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
- .config_intr = &vsc85xx_config_intr,
- .suspend = &genphy_suspend,
- .resume = &genphy_resume,
+ .phy_id = PHY_ID_VSC8531,
+ .name = "Microsemi VSC8531",
+ .phy_id_mask = 0xfffffff0,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc85xx_config_init,
+ .config_aneg = &genphy_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
},
{
- .phy_id = PHY_ID_VSC8541,
- .name = "Microsemi VSC8541 SyncE",
- .phy_id_mask = 0xfffffff0,
- .features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
- .soft_reset = &genphy_soft_reset,
- .config_init = &vsc85xx_config_init,
- .config_aneg = &genphy_config_aneg,
- .aneg_done = &genphy_aneg_done,
- .read_status = &genphy_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
- .config_intr = &vsc85xx_config_intr,
- .suspend = &genphy_suspend,
- .resume = &genphy_resume,
+ .phy_id = PHY_ID_VSC8541,
+ .name = "Microsemi VSC8541 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc85xx_config_init,
+ .config_aneg = &genphy_config_aneg,
+ .aneg_done = &genphy_aneg_done,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
}
};
@@ -149,9 +200,9 @@ static struct phy_driver vsc85xx_driver[] = {
module_phy_driver(vsc85xx_driver);
static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
- { PHY_ID_VSC8531, 0xfffffff0, },
- { PHY_ID_VSC8541, 0xfffffff0, },
- { }
+ { PHY_ID_VSC8531, 0xfffffff0, },
+ { PHY_ID_VSC8541, 0xfffffff0, },
+ { }
};
MODULE_DEVICE_TABLE(mdio, vsc85xx_tbl);
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 528b9c9c4e60..66b34ddbe216 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -265,8 +265,6 @@ static int kaweth_control(struct kaweth_device *kaweth,
struct usb_ctrlrequest *dr;
int retval;
- netdev_dbg(kaweth->net, "kaweth_control()\n");
-
if(in_interrupt()) {
netdev_dbg(kaweth->net, "in_interrupt()\n");
return -EBUSY;
@@ -300,8 +298,6 @@ static int kaweth_read_configuration(struct kaweth_device *kaweth)
{
int retval;
- netdev_dbg(kaweth->net, "Reading kaweth configuration\n");
-
retval = kaweth_control(kaweth,
usb_rcvctrlpipe(kaweth->dev, 0),
KAWETH_COMMAND_GET_ETHERNET_DESC,
@@ -451,8 +447,6 @@ static int kaweth_trigger_firmware(struct kaweth_device *kaweth,
kaweth->firmware_buf[6] = 0x00;
kaweth->firmware_buf[7] = 0x00;
- netdev_dbg(kaweth->net, "Triggering firmware\n");
-
return kaweth_control(kaweth,
usb_sndctrlpipe(kaweth->dev, 0),
KAWETH_COMMAND_SCAN,
@@ -471,7 +465,6 @@ static int kaweth_reset(struct kaweth_device *kaweth)
{
int result;
- netdev_dbg(kaweth->net, "kaweth_reset(%p)\n", kaweth);
result = usb_reset_configuration(kaweth->dev);
mdelay(10);
@@ -685,8 +678,6 @@ static int kaweth_open(struct net_device *net)
struct kaweth_device *kaweth = netdev_priv(net);
int res;
- netdev_dbg(kaweth->net, "Opening network device.\n");
-
res = usb_autopm_get_interface(kaweth->intf);
if (res) {
dev_err(&kaweth->intf->dev, "Interface cannot be resumed.\n");
@@ -951,7 +942,6 @@ static int kaweth_suspend(struct usb_interface *intf, pm_message_t message)
struct kaweth_device *kaweth = usb_get_intfdata(intf);
unsigned long flags;
- dev_dbg(&intf->dev, "Suspending device\n");
spin_lock_irqsave(&kaweth->device_lock, flags);
kaweth->status |= KAWETH_STATUS_SUSPENDING;
spin_unlock_irqrestore(&kaweth->device_lock, flags);
@@ -968,7 +958,6 @@ static int kaweth_resume(struct usb_interface *intf)
struct kaweth_device *kaweth = usb_get_intfdata(intf);
unsigned long flags;
- dev_dbg(&intf->dev, "Resuming device\n");
spin_lock_irqsave(&kaweth->device_lock, flags);
kaweth->status &= ~KAWETH_STATUS_SUSPENDING;
spin_unlock_irqrestore(&kaweth->device_lock, flags);
@@ -1190,8 +1179,6 @@ err_fw:
dev_info(dev, "kaweth interface created at %s\n",
kaweth->net->name);
- dev_dbg(dev, "Kaweth probe returning.\n");
-
return 0;
err_intfdata:
@@ -1219,8 +1206,6 @@ static void kaweth_disconnect(struct usb_interface *intf)
struct kaweth_device *kaweth = usb_get_intfdata(intf);
struct net_device *netdev;
- dev_info(&intf->dev, "Unregistering\n");
-
usb_set_intfdata(intf, NULL);
if (!kaweth) {
dev_warn(&intf->dev, "unregistering non-existent device\n");
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 9338f5812b7e..44d439f50961 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
#define NETNEXT_VERSION "08"
/* Information for net */
-#define NET_VERSION "5"
+#define NET_VERSION "6"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -2551,6 +2551,77 @@ static void r8152_aldps_en(struct r8152 *tp, bool enable)
}
}
+static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg)
+{
+ ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | dev);
+ ocp_reg_write(tp, OCP_EEE_DATA, reg);
+ ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev);
+}
+
+static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg)
+{
+ u16 data;
+
+ r8152_mmd_indirect(tp, dev, reg);
+ data = ocp_reg_read(tp, OCP_EEE_DATA);
+ ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
+
+ return data;
+}
+
+static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data)
+{
+ r8152_mmd_indirect(tp, dev, reg);
+ ocp_reg_write(tp, OCP_EEE_DATA, data);
+ ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
+}
+
+static void r8152_eee_en(struct r8152 *tp, bool enable)
+{
+ u16 config1, config2, config3;
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
+ config1 = ocp_reg_read(tp, OCP_EEE_CONFIG1) & ~sd_rise_time_mask;
+ config2 = ocp_reg_read(tp, OCP_EEE_CONFIG2);
+ config3 = ocp_reg_read(tp, OCP_EEE_CONFIG3) & ~fast_snr_mask;
+
+ if (enable) {
+ ocp_data |= EEE_RX_EN | EEE_TX_EN;
+ config1 |= EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN;
+ config1 |= sd_rise_time(1);
+ config2 |= RG_DACQUIET_EN | RG_LDVQUIET_EN;
+ config3 |= fast_snr(42);
+ } else {
+ ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
+ config1 &= ~(EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN |
+ RX_QUIET_EN);
+ config1 |= sd_rise_time(7);
+ config2 &= ~(RG_DACQUIET_EN | RG_LDVQUIET_EN);
+ config3 |= fast_snr(511);
+ }
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
+ ocp_reg_write(tp, OCP_EEE_CONFIG1, config1);
+ ocp_reg_write(tp, OCP_EEE_CONFIG2, config2);
+ ocp_reg_write(tp, OCP_EEE_CONFIG3, config3);
+}
+
+static void r8152b_enable_eee(struct r8152 *tp)
+{
+ r8152_eee_en(tp, true);
+ r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX);
+}
+
+static void r8152b_enable_fc(struct r8152 *tp)
+{
+ u16 anar;
+
+ anar = r8152_mdio_read(tp, MII_ADVERTISE);
+ anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ r8152_mdio_write(tp, MII_ADVERTISE, anar);
+}
+
static void rtl8152_disable(struct r8152 *tp)
{
r8152_aldps_en(tp, false);
@@ -2560,13 +2631,9 @@ static void rtl8152_disable(struct r8152 *tp)
static void r8152b_hw_phy_cfg(struct r8152 *tp)
{
- u16 data;
-
- data = r8152_mdio_read(tp, MII_BMCR);
- if (data & BMCR_PDOWN) {
- data &= ~BMCR_PDOWN;
- r8152_mdio_write(tp, MII_BMCR, data);
- }
+ r8152b_enable_eee(tp);
+ r8152_aldps_en(tp, true);
+ r8152b_enable_fc(tp);
set_bit(PHY_RESET, &tp->flags);
}
@@ -2700,20 +2767,52 @@ static void r8152b_enter_oob(struct r8152 *tp)
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
}
+static void r8153_aldps_en(struct r8152 *tp, bool enable)
+{
+ u16 data;
+
+ data = ocp_reg_read(tp, OCP_POWER_CFG);
+ if (enable) {
+ data |= EN_ALDPS;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+ } else {
+ data &= ~EN_ALDPS;
+ ocp_reg_write(tp, OCP_POWER_CFG, data);
+ msleep(20);
+ }
+}
+
+static void r8153_eee_en(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data;
+ u16 config;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
+ config = ocp_reg_read(tp, OCP_EEE_CFG);
+
+ if (enable) {
+ ocp_data |= EEE_RX_EN | EEE_TX_EN;
+ config |= EEE10_EN;
+ } else {
+ ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
+ config &= ~EEE10_EN;
+ }
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
+ ocp_reg_write(tp, OCP_EEE_CFG, config);
+}
+
static void r8153_hw_phy_cfg(struct r8152 *tp)
{
u32 ocp_data;
u16 data;
- if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 ||
- tp->version == RTL_VER_05)
- ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+ /* disable ALDPS before updating the PHY parameters */
+ r8153_aldps_en(tp, false);
- data = r8152_mdio_read(tp, MII_BMCR);
- if (data & BMCR_PDOWN) {
- data &= ~BMCR_PDOWN;
- r8152_mdio_write(tp, MII_BMCR, data);
- }
+ /* disable EEE before updating the PHY parameters */
+ r8153_eee_en(tp, false);
+ ocp_reg_write(tp, OCP_EEE_ADV, 0);
if (tp->version == RTL_VER_03) {
data = ocp_reg_read(tp, OCP_EEE_CFG);
@@ -2744,6 +2843,12 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
sram_write(tp, SRAM_10M_AMP1, 0x00af);
sram_write(tp, SRAM_10M_AMP2, 0x0208);
+ r8153_eee_en(tp, true);
+ ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX);
+
+ r8153_aldps_en(tp, true);
+ r8152b_enable_fc(tp);
+
set_bit(PHY_RESET, &tp->flags);
}
@@ -2865,21 +2970,6 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
}
-static void r8153_aldps_en(struct r8152 *tp, bool enable)
-{
- u16 data;
-
- data = ocp_reg_read(tp, OCP_POWER_CFG);
- if (enable) {
- data |= EN_ALDPS;
- ocp_reg_write(tp, OCP_POWER_CFG, data);
- } else {
- data &= ~EN_ALDPS;
- ocp_reg_write(tp, OCP_POWER_CFG, data);
- msleep(20);
- }
-}
-
static void rtl8153_disable(struct r8152 *tp)
{
r8153_aldps_en(tp, false);
@@ -3245,103 +3335,6 @@ static int rtl8152_close(struct net_device *netdev)
return res;
}
-static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg)
-{
- ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | dev);
- ocp_reg_write(tp, OCP_EEE_DATA, reg);
- ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev);
-}
-
-static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg)
-{
- u16 data;
-
- r8152_mmd_indirect(tp, dev, reg);
- data = ocp_reg_read(tp, OCP_EEE_DATA);
- ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
-
- return data;
-}
-
-static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data)
-{
- r8152_mmd_indirect(tp, dev, reg);
- ocp_reg_write(tp, OCP_EEE_DATA, data);
- ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
-}
-
-static void r8152_eee_en(struct r8152 *tp, bool enable)
-{
- u16 config1, config2, config3;
- u32 ocp_data;
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
- config1 = ocp_reg_read(tp, OCP_EEE_CONFIG1) & ~sd_rise_time_mask;
- config2 = ocp_reg_read(tp, OCP_EEE_CONFIG2);
- config3 = ocp_reg_read(tp, OCP_EEE_CONFIG3) & ~fast_snr_mask;
-
- if (enable) {
- ocp_data |= EEE_RX_EN | EEE_TX_EN;
- config1 |= EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN;
- config1 |= sd_rise_time(1);
- config2 |= RG_DACQUIET_EN | RG_LDVQUIET_EN;
- config3 |= fast_snr(42);
- } else {
- ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
- config1 &= ~(EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN |
- RX_QUIET_EN);
- config1 |= sd_rise_time(7);
- config2 &= ~(RG_DACQUIET_EN | RG_LDVQUIET_EN);
- config3 |= fast_snr(511);
- }
-
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
- ocp_reg_write(tp, OCP_EEE_CONFIG1, config1);
- ocp_reg_write(tp, OCP_EEE_CONFIG2, config2);
- ocp_reg_write(tp, OCP_EEE_CONFIG3, config3);
-}
-
-static void r8152b_enable_eee(struct r8152 *tp)
-{
- r8152_eee_en(tp, true);
- r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX);
-}
-
-static void r8153_eee_en(struct r8152 *tp, bool enable)
-{
- u32 ocp_data;
- u16 config;
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
- config = ocp_reg_read(tp, OCP_EEE_CFG);
-
- if (enable) {
- ocp_data |= EEE_RX_EN | EEE_TX_EN;
- config |= EEE10_EN;
- } else {
- ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
- config &= ~EEE10_EN;
- }
-
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
- ocp_reg_write(tp, OCP_EEE_CFG, config);
-}
-
-static void r8153_enable_eee(struct r8152 *tp)
-{
- r8153_eee_en(tp, true);
- ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX);
-}
-
-static void r8152b_enable_fc(struct r8152 *tp)
-{
- u16 anar;
-
- anar = r8152_mdio_read(tp, MII_ADVERTISE);
- anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
- r8152_mdio_write(tp, MII_ADVERTISE, anar);
-}
-
static void rtl_tally_reset(struct r8152 *tp)
{
u32 ocp_data;
@@ -3354,10 +3347,17 @@ static void rtl_tally_reset(struct r8152 *tp)
static void r8152b_init(struct r8152 *tp)
{
u32 ocp_data;
+ u16 data;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
+ data = r8152_mdio_read(tp, MII_BMCR);
+ if (data & BMCR_PDOWN) {
+ data &= ~BMCR_PDOWN;
+ r8152_mdio_write(tp, MII_BMCR, data);
+ }
+
r8152_aldps_en(tp, false);
if (tp->version == RTL_VER_01) {
@@ -3379,9 +3379,6 @@ static void r8152b_init(struct r8152 *tp)
SPDWN_RXDV_MSK | SPDWN_LINKCHG_MSK;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_GPHY_INTR_IMR, ocp_data);
- r8152b_enable_eee(tp);
- r8152_aldps_en(tp, true);
- r8152b_enable_fc(tp);
rtl_tally_reset(tp);
/* enable rx aggregation */
@@ -3393,12 +3390,12 @@ static void r8152b_init(struct r8152 *tp)
static void r8153_init(struct r8152 *tp)
{
u32 ocp_data;
+ u16 data;
int i;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
- r8153_aldps_en(tp, false);
r8153_u1u2en(tp, false);
for (i = 0; i < 500; i++) {
@@ -3415,6 +3412,23 @@ static void r8153_init(struct r8152 *tp)
msleep(20);
}
+ if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 ||
+ tp->version == RTL_VER_05)
+ ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+
+ data = r8152_mdio_read(tp, MII_BMCR);
+ if (data & BMCR_PDOWN) {
+ data &= ~BMCR_PDOWN;
+ r8152_mdio_write(tp, MII_BMCR, data);
+ }
+
+ for (i = 0; i < 500; i++) {
+ ocp_data = ocp_reg_read(tp, OCP_PHY_STATUS) & PHY_STAT_MASK;
+ if (ocp_data == PHY_STAT_LAN_ON)
+ break;
+ msleep(20);
+ }
+
usb_disable_lpm(tp->udev);
r8153_u2p3en(tp, false);
@@ -3482,9 +3496,6 @@ static void r8153_init(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
- r8153_enable_eee(tp);
- r8153_aldps_en(tp, true);
- r8152b_enable_fc(tp);
rtl_tally_reset(tp);
r8153_u2p3en(tp, true);
}
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 1ce7420322ee..85c271c70d42 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -37,9 +37,6 @@
#include <net/l3mdev.h>
#include <net/fib_rules.h>
-#define RT_FL_TOS(oldflp4) \
- ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
-
#define DRV_NAME "vrf"
#define DRV_VERSION "1.0"
@@ -137,6 +134,20 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
}
#if IS_ENABLED(CONFIG_IPV6)
+static int vrf_ip6_local_out(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ int err;
+
+ err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net,
+ sk, skb, NULL, skb_dst(skb)->dev, dst_output);
+
+ if (likely(err == 1))
+ err = dst_output(net, sk, skb);
+
+ return err;
+}
+
static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
struct net_device *dev)
{
@@ -151,7 +162,7 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
.flowlabel = ip6_flowinfo(iph),
.flowi6_mark = skb->mark,
.flowi6_proto = iph->nexthdr,
- .flowi6_flags = FLOWI_FLAG_L3MDEV_SRC | FLOWI_FLAG_SKIP_NH_OIF,
+ .flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF,
};
int ret = NET_XMIT_DROP;
struct dst_entry *dst;
@@ -207,7 +218,7 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
/* strip the ethernet header added for pass through VRF device */
__skb_pull(skb, skb_network_offset(skb));
- ret = ip6_local_out(net, skb->sk, skb);
+ ret = vrf_ip6_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(ret)))
dev->stats.tx_errors++;
else
@@ -227,6 +238,20 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
}
#endif
+/* based on ip_local_out; can't use it b/c the dst is switched pointing to us */
+static int vrf_ip_local_out(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
+{
+ int err;
+
+ err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
+ skb, NULL, skb_dst(skb)->dev, dst_output);
+ if (likely(err == 1))
+ err = dst_output(net, sk, skb);
+
+ return err;
+}
+
static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
struct net_device *vrf_dev)
{
@@ -237,8 +262,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
.flowi4_oif = vrf_dev->ifindex,
.flowi4_iif = LOOPBACK_IFINDEX,
.flowi4_tos = RT_TOS(ip4h->tos),
- .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_L3MDEV_SRC |
- FLOWI_FLAG_SKIP_NH_OIF,
+ .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
.daddr = ip4h->daddr,
};
struct net *net = dev_net(vrf_dev);
@@ -292,7 +316,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
RT_SCOPE_LINK);
}
- ret = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
+ ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
if (unlikely(net_xmit_eval(ret)))
vrf_dev->stats.tx_errors++;
else
@@ -377,6 +401,43 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
+/* set dst on skb to send packet to us via dev_xmit path. Allows
+ * packet to go through device based features such as qdisc, netfilter
+ * hooks and packet sockets with skb->dev set to vrf device.
+ */
+static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
+ struct sock *sk,
+ struct sk_buff *skb)
+{
+ struct net_vrf *vrf = netdev_priv(vrf_dev);
+ struct dst_entry *dst = NULL;
+ struct rt6_info *rt6;
+
+ /* don't divert link scope packets */
+ if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
+ return skb;
+
+ rcu_read_lock();
+
+ rt6 = rcu_dereference(vrf->rt6);
+ if (likely(rt6)) {
+ dst = &rt6->dst;
+ dst_hold(dst);
+ }
+
+ rcu_read_unlock();
+
+ if (unlikely(!dst)) {
+ vrf_tx_error(vrf_dev, skb);
+ return NULL;
+ }
+
+ skb_dst_drop(skb);
+ skb_dst_set(skb, dst);
+
+ return skb;
+}
+
/* holding rtnl */
static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
{
@@ -463,6 +524,13 @@ out:
return rc;
}
#else
+static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
+ struct sock *sk,
+ struct sk_buff *skb)
+{
+ return skb;
+}
+
static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
{
}
@@ -531,6 +599,55 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
+/* set dst on skb to send packet to us via dev_xmit path. Allows
+ * packet to go through device based features such as qdisc, netfilter
+ * hooks and packet sockets with skb->dev set to vrf device.
+ */
+static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
+ struct sock *sk,
+ struct sk_buff *skb)
+{
+ struct net_vrf *vrf = netdev_priv(vrf_dev);
+ struct dst_entry *dst = NULL;
+ struct rtable *rth;
+
+ rcu_read_lock();
+
+ rth = rcu_dereference(vrf->rth);
+ if (likely(rth)) {
+ dst = &rth->dst;
+ dst_hold(dst);
+ }
+
+ rcu_read_unlock();
+
+ if (unlikely(!dst)) {
+ vrf_tx_error(vrf_dev, skb);
+ return NULL;
+ }
+
+ skb_dst_drop(skb);
+ skb_dst_set(skb, dst);
+
+ return skb;
+}
+
+/* called with rcu lock held */
+static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev,
+ struct sock *sk,
+ struct sk_buff *skb,
+ u16 proto)
+{
+ switch (proto) {
+ case AF_INET:
+ return vrf_ip_out(vrf_dev, sk, skb);
+ case AF_INET6:
+ return vrf_ip6_out(vrf_dev, sk, skb);
+ }
+
+ return skb;
+}
+
/* holding rtnl */
static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf)
{
@@ -722,63 +839,6 @@ static u32 vrf_fib_table(const struct net_device *dev)
return vrf->tb_id;
}
-static struct rtable *vrf_get_rtable(const struct net_device *dev,
- const struct flowi4 *fl4)
-{
- struct rtable *rth = NULL;
-
- if (!(fl4->flowi4_flags & FLOWI_FLAG_L3MDEV_SRC)) {
- struct net_vrf *vrf = netdev_priv(dev);
-
- rcu_read_lock();
-
- rth = rcu_dereference(vrf->rth);
- if (likely(rth))
- dst_hold(&rth->dst);
-
- rcu_read_unlock();
- }
-
- return rth;
-}
-
-/* called under rcu_read_lock */
-static int vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4)
-{
- struct fib_result res = { .tclassid = 0 };
- struct net *net = dev_net(dev);
- u32 orig_tos = fl4->flowi4_tos;
- u8 flags = fl4->flowi4_flags;
- u8 scope = fl4->flowi4_scope;
- u8 tos = RT_FL_TOS(fl4);
- int rc;
-
- if (unlikely(!fl4->daddr))
- return 0;
-
- fl4->flowi4_flags |= FLOWI_FLAG_SKIP_NH_OIF;
- fl4->flowi4_iif = LOOPBACK_IFINDEX;
- /* make sure oif is set to VRF device for lookup */
- fl4->flowi4_oif = dev->ifindex;
- fl4->flowi4_tos = tos & IPTOS_RT_MASK;
- fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
- RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
-
- rc = fib_lookup(net, fl4, &res, 0);
- if (!rc) {
- if (res.type == RTN_LOCAL)
- fl4->saddr = res.fi->fib_prefsrc ? : fl4->daddr;
- else
- fib_select_path(net, &res, fl4, -1);
- }
-
- fl4->flowi4_flags = flags;
- fl4->flowi4_tos = orig_tos;
- fl4->flowi4_scope = scope;
-
- return rc;
-}
-
static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
return 0;
@@ -970,106 +1030,44 @@ static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
}
#if IS_ENABLED(CONFIG_IPV6)
-static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
- struct flowi6 *fl6)
+/* send to link-local or multicast address via interface enslaved to
+ * VRF device. Force lookup to VRF table without changing flow struct
+ */
+static struct dst_entry *vrf_link_scope_lookup(const struct net_device *dev,
+ struct flowi6 *fl6)
{
- bool need_strict = rt6_need_strict(&fl6->daddr);
- struct net_vrf *vrf = netdev_priv(dev);
struct net *net = dev_net(dev);
+ int flags = RT6_LOOKUP_F_IFACE;
struct dst_entry *dst = NULL;
struct rt6_info *rt;
- /* send to link-local or multicast address */
- if (need_strict) {
- int flags = RT6_LOOKUP_F_IFACE;
-
- /* VRF device does not have a link-local address and
- * sending packets to link-local or mcast addresses over
- * a VRF device does not make sense
- */
- if (fl6->flowi6_oif == dev->ifindex) {
- struct dst_entry *dst = &net->ipv6.ip6_null_entry->dst;
-
- dst_hold(dst);
- return dst;
- }
-
- if (!ipv6_addr_any(&fl6->saddr))
- flags |= RT6_LOOKUP_F_HAS_SADDR;
-
- rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, flags);
- if (rt)
- dst = &rt->dst;
-
- } else if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) {
-
- rcu_read_lock();
-
- rt = rcu_dereference(vrf->rt6);
- if (likely(rt)) {
- dst = &rt->dst;
- dst_hold(dst);
- }
-
- rcu_read_unlock();
+ /* VRF device does not have a link-local address and
+ * sending packets to link-local or mcast addresses over
+ * a VRF device does not make sense
+ */
+ if (fl6->flowi6_oif == dev->ifindex) {
+ dst = &net->ipv6.ip6_null_entry->dst;
+ dst_hold(dst);
+ return dst;
}
- /* make sure oif is set to VRF device for lookup */
- if (!need_strict)
- fl6->flowi6_oif = dev->ifindex;
-
- return dst;
-}
-
-/* called under rcu_read_lock */
-static int vrf_get_saddr6(struct net_device *dev, const struct sock *sk,
- struct flowi6 *fl6)
-{
- struct net *net = dev_net(dev);
- struct dst_entry *dst;
- struct rt6_info *rt;
- int err;
-
- if (rt6_need_strict(&fl6->daddr)) {
- rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif,
- RT6_LOOKUP_F_IFACE);
- if (unlikely(!rt))
- return 0;
+ if (!ipv6_addr_any(&fl6->saddr))
+ flags |= RT6_LOOKUP_F_HAS_SADDR;
+ rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, flags);
+ if (rt)
dst = &rt->dst;
- } else {
- __u8 flags = fl6->flowi6_flags;
- fl6->flowi6_flags |= FLOWI_FLAG_L3MDEV_SRC;
- fl6->flowi6_flags |= FLOWI_FLAG_SKIP_NH_OIF;
-
- dst = ip6_route_output(net, sk, fl6);
- rt = (struct rt6_info *)dst;
-
- fl6->flowi6_flags = flags;
- }
-
- err = dst->error;
- if (!err) {
- err = ip6_route_get_saddr(net, rt, &fl6->daddr,
- sk ? inet6_sk(sk)->srcprefs : 0,
- &fl6->saddr);
- }
-
- dst_release(dst);
-
- return err;
+ return dst;
}
#endif
static const struct l3mdev_ops vrf_l3mdev_ops = {
.l3mdev_fib_table = vrf_fib_table,
- .l3mdev_get_rtable = vrf_get_rtable,
- .l3mdev_get_saddr = vrf_get_saddr,
.l3mdev_l3_rcv = vrf_l3_rcv,
+ .l3mdev_l3_out = vrf_l3_out,
#if IS_ENABLED(CONFIG_IPV6)
- .l3mdev_get_rt6_dst = vrf_get_rt6_dst,
- .l3mdev_get_saddr6 = vrf_get_saddr6,
+ .l3mdev_link_scope_lookup = vrf_link_scope_lookup,
#endif
};
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 199dec033cf8..e7d16687538b 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1291,7 +1291,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
struct metadata_dst *tun_dst;
tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
- vxlan_vni_to_tun_id(vni), sizeof(*md));
+ key32_to_tunnel_id(vni), sizeof(*md));
if (!tun_dst)
goto drop;
@@ -1945,7 +1945,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto drop;
}
dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
- vni = vxlan_tun_id_to_vni(info->key.tun_id);
+ vni = tunnel_id_to_key32(info->key.tun_id);
remote_ip.sa.sa_family = ip_tunnel_info_af(info);
if (remote_ip.sa.sa_family == AF_INET) {
remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
@@ -2780,14 +2780,15 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
struct net_device *lowerdev = NULL;
if (conf->flags & VXLAN_F_GPE) {
- if (conf->flags & ~VXLAN_F_ALLOWED_GPE)
- return -EINVAL;
/* For now, allow GPE only together with COLLECT_METADATA.
* This can be relaxed later; in such case, the other side
* of the PtP link will have to be provided.
*/
- if (!(conf->flags & VXLAN_F_COLLECT_METADATA))
+ if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) ||
+ !(conf->flags & VXLAN_F_COLLECT_METADATA)) {
+ pr_info("unsupported combination of extensions\n");
return -EINVAL;
+ }
vxlan_raw_setup(dev);
} else {
@@ -2840,6 +2841,9 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
needed_headroom = lowerdev->hard_header_len;
+ } else if (vxlan_addr_multicast(&dst->remote_ip)) {
+ pr_info("multicast destination requires interface to be specified\n");
+ return -EINVAL;
}
if (conf->mtu) {
@@ -2872,8 +2876,10 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
tmp->cfg.dst_port == vxlan->cfg.dst_port &&
(tmp->flags & VXLAN_F_RCV_FLAGS) ==
- (vxlan->flags & VXLAN_F_RCV_FLAGS))
- return -EEXIST;
+ (vxlan->flags & VXLAN_F_RCV_FLAGS)) {
+ pr_info("duplicate VNI %u\n", be32_to_cpu(conf->vni));
+ return -EEXIST;
+ }
}
dev->ethtool_ops = &vxlan_ethtool_ops;
@@ -2907,7 +2913,6 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct vxlan_config conf;
- int err;
memset(&conf, 0, sizeof(conf));
@@ -3016,26 +3021,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
if (tb[IFLA_MTU])
conf.mtu = nla_get_u32(tb[IFLA_MTU]);
- err = vxlan_dev_configure(src_net, dev, &conf);
- switch (err) {
- case -ENODEV:
- pr_info("ifindex %d does not exist\n", conf.remote_ifindex);
- break;
-
- case -EPERM:
- pr_info("IPv6 is disabled via sysctl\n");
- break;
-
- case -EEXIST:
- pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni));
- break;
-
- case -EINVAL:
- pr_info("unsupported combination of extensions\n");
- break;
- }
-
- return err;
+ return vxlan_dev_configure(src_net, dev, &conf);
}
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index acec16b9cf49..b99ad5df383d 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -462,13 +462,13 @@ static void ath10k_ahb_halt_chip(struct ath10k *ar)
static irqreturn_t ath10k_ahb_interrupt_handler(int irq, void *arg)
{
struct ath10k *ar = arg;
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (!ath10k_pci_irq_pending(ar))
return IRQ_NONE;
ath10k_pci_disable_and_clear_legacy_irq(ar);
- tasklet_schedule(&ar_pci->intr_tq);
+ ath10k_pci_irq_msi_fw_mask(ar);
+ napi_schedule(&ar->napi);
return IRQ_HANDLED;
}
@@ -577,7 +577,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "irq: %d\n", ar_ahb->irq);
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%p mem_len: %lu gcc mem: 0x%p tcsr_mem: 0x%p\n",
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%pK mem_len: %lu gcc mem: 0x%pK tcsr_mem: 0x%pK\n",
ar_ahb->mem, ar_ahb->mem_len,
ar_ahb->gcc_mem, ar_ahb->tcsr_mem);
return 0;
@@ -717,6 +717,9 @@ static void ath10k_ahb_hif_stop(struct ath10k *ar)
synchronize_irq(ar_ahb->irq);
ath10k_pci_flush(ar);
+
+ napi_synchronize(&ar->napi);
+ napi_disable(&ar->napi);
}
static int ath10k_ahb_hif_power_up(struct ath10k *ar)
@@ -748,6 +751,7 @@ static int ath10k_ahb_hif_power_up(struct ath10k *ar)
ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
goto err_ce_deinit;
}
+ napi_enable(&ar->napi);
return 0;
@@ -831,7 +835,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
goto err_resource_deinit;
}
- ath10k_pci_init_irq_tasklets(ar);
+ ath10k_pci_init_napi(ar);
ret = ath10k_ahb_request_irq_legacy(ar);
if (ret)
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 3d29b0875b3e..2872d347ea78 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -221,7 +221,7 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
u32 txlen;
int ret;
- ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
buffer, length);
if (ar->bmi.done_sent) {
@@ -287,7 +287,7 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI,
- "bmi fast download address 0x%x buffer 0x%p length %d\n",
+ "bmi fast download address 0x%x buffer 0x%pK length %d\n",
address, buffer, length);
ret = ath10k_bmi_lz_stream_start(ar, address);
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 9fb8d7472d18..65d8d714e917 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -840,7 +840,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot init ce src ring id %d entries %d base_addr %p\n",
+ "boot init ce src ring id %d entries %d base_addr %pK\n",
ce_id, nentries, src_ring->base_addr_owner_space);
return 0;
@@ -874,7 +874,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot ce dest ring id %d entries %d base_addr %p\n",
+ "boot ce dest ring id %d entries %d base_addr %pK\n",
ce_id, nentries, dest_ring->base_addr_owner_space);
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index e88982921aa3..3a8984ba9f74 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -60,7 +60,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
- .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
.cal_data_len = 2116,
.fw = {
.dir = QCA988X_HW_2_0_FW_DIR,
@@ -68,6 +67,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA988X_BOARD_DATA_SZ,
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
},
{
.id = QCA9887_HW_1_0_VERSION,
@@ -79,7 +79,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
- .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
.cal_data_len = 2116,
.fw = {
.dir = QCA9887_HW_1_0_FW_DIR,
@@ -87,6 +86,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA9887_BOARD_DATA_SZ,
.board_ext_size = QCA9887_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -104,6 +104,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -114,7 +115,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
- .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
.cal_data_len = 8124,
.fw = {
.dir = QCA6174_HW_2_1_FW_DIR,
@@ -122,6 +122,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
},
{
.id = QCA6174_HW_3_0_VERSION,
@@ -132,7 +133,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
- .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
.cal_data_len = 8124,
.fw = {
.dir = QCA6174_HW_3_0_FW_DIR,
@@ -140,6 +140,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -150,7 +151,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
- .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
.cal_data_len = 8124,
.fw = {
/* uses same binaries as hw3.0 */
@@ -159,6 +159,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -171,7 +172,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cck_rate_map_rev2 = true,
.channel_counters_freq_hz = 150000,
.max_probe_resp_desc_thres = 24,
- .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
.tx_chain_mask = 0xf,
.rx_chain_mask = 0xf,
.max_spatial_stream = 4,
@@ -182,6 +182,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA99X0_BOARD_DATA_SZ,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
+ .sw_decrypt_mcast_mgmt = true,
+ .hw_ops = &qca99x0_ops,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
@@ -194,7 +196,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cck_rate_map_rev2 = true,
.channel_counters_freq_hz = 150000,
.max_probe_resp_desc_thres = 24,
- .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
.tx_chain_mask = 0xf,
.rx_chain_mask = 0xf,
.max_spatial_stream = 4,
@@ -205,6 +206,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA99X0_BOARD_DATA_SZ,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
+ .sw_decrypt_mcast_mgmt = true,
+ .hw_ops = &qca99x0_ops,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
@@ -216,7 +219,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.continuous_frag_desc = true,
.channel_counters_freq_hz = 150000,
.max_probe_resp_desc_thres = 24,
- .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
.tx_chain_mask = 3,
.rx_chain_mask = 3,
.max_spatial_stream = 2,
@@ -227,6 +229,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA99X0_BOARD_DATA_SZ,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
+ .sw_decrypt_mcast_mgmt = true,
+ .hw_ops = &qca99x0_ops,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -244,6 +248,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -261,6 +266,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@@ -274,7 +280,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cck_rate_map_rev2 = true,
.channel_counters_freq_hz = 125000,
.max_probe_resp_desc_thres = 24,
- .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
.tx_chain_mask = 0x3,
.rx_chain_mask = 0x3,
.max_spatial_stream = 2,
@@ -285,6 +290,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA4019_BOARD_DATA_SZ,
.board_ext_size = QCA4019_BOARD_EXT_DATA_SZ,
},
+ .sw_decrypt_mcast_mgmt = true,
+ .hw_ops = &qca99x0_ops,
},
};
@@ -304,6 +311,7 @@ static const char *const ath10k_core_fw_feature_str[] = {
[ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp",
[ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl",
[ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param",
+ [ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR] = "skip-null-func-war",
};
static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@@ -699,7 +707,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
if (!ar->running_fw->fw_file.otp_data ||
!ar->running_fw->fw_file.otp_len) {
- ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
+ ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %pK otp_len %zd)!\n",
ar->running_fw->fw_file.otp_data,
ar->running_fw->fw_file.otp_len);
return 0;
@@ -745,7 +753,7 @@ static int ath10k_download_fw(struct ath10k *ar)
data = ar->running_fw->fw_file.firmware_data;
data_len = ar->running_fw->fw_file.firmware_len;
- ret = ath10k_swap_code_seg_configure(ar);
+ ret = ath10k_swap_code_seg_configure(ar, &ar->running_fw->fw_file);
if (ret) {
ath10k_err(ar, "failed to configure fw code swap: %d\n",
ret);
@@ -753,7 +761,7 @@ static int ath10k_download_fw(struct ath10k *ar)
}
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot uploading firmware image %p len %d\n",
+ "boot uploading firmware image %pK len %d\n",
data, data_len);
ret = ath10k_bmi_fast_download(ar, address, data, data_len);
@@ -787,7 +795,7 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
if (!IS_ERR(ar->pre_cal_file))
release_firmware(ar->pre_cal_file);
- ath10k_swap_code_seg_release(ar);
+ ath10k_swap_code_seg_release(ar, &ar->normal_mode_fw.fw_file);
ar->normal_mode_fw.fw_file.otp_data = NULL;
ar->normal_mode_fw.fw_file.otp_len = 0;
@@ -1497,14 +1505,14 @@ static void ath10k_core_restart(struct work_struct *work)
ieee80211_stop_queues(ar->hw);
ath10k_drain_tx(ar);
- complete_all(&ar->scan.started);
- complete_all(&ar->scan.completed);
- complete_all(&ar->scan.on_channel);
- complete_all(&ar->offchan_tx_completed);
- complete_all(&ar->install_key_done);
- complete_all(&ar->vdev_setup_done);
- complete_all(&ar->thermal.wmi_sync);
- complete_all(&ar->bss_survey_done);
+ complete(&ar->scan.started);
+ complete(&ar->scan.completed);
+ complete(&ar->scan.on_channel);
+ complete(&ar->offchan_tx_completed);
+ complete(&ar->install_key_done);
+ complete(&ar->vdev_setup_done);
+ complete(&ar->thermal.wmi_sync);
+ complete(&ar->bss_survey_done);
wake_up(&ar->htt.empty_tx_wq);
wake_up(&ar->wmi.tx_credits_wq);
wake_up(&ar->peer_mapping_wq);
@@ -1705,6 +1713,55 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
return 0;
}
+static int ath10k_core_reset_rx_filter(struct ath10k *ar)
+{
+ int ret;
+ int vdev_id;
+ int vdev_type;
+ int vdev_subtype;
+ const u8 *vdev_addr;
+
+ vdev_id = 0;
+ vdev_type = WMI_VDEV_TYPE_STA;
+ vdev_subtype = ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
+ vdev_addr = ar->mac_addr;
+
+ ret = ath10k_wmi_vdev_create(ar, vdev_id, vdev_type, vdev_subtype,
+ vdev_addr);
+ if (ret) {
+ ath10k_err(ar, "failed to create dummy vdev: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_wmi_vdev_delete(ar, vdev_id);
+ if (ret) {
+ ath10k_err(ar, "failed to delete dummy vdev: %d\n", ret);
+ return ret;
+ }
+
+ /* WMI and HTT may use separate HIF pipes and are not guaranteed to be
+ * serialized properly implicitly.
+ *
+ * Moreover (most) WMI commands have no explicit acknowledges. It is
+ * possible to infer it implicitly by poking firmware with echo
+ * command - getting a reply means all preceding comments have been
+ * (mostly) processed.
+ *
+ * In case of vdev create/delete this is sufficient.
+ *
+ * Without this it's possible to end up with a race when HTT Rx ring is
+ * started before vdev create/delete hack is complete allowing a short
+ * window of opportunity to receive (and Tx ACK) a bunch of frames.
+ */
+ ret = ath10k_wmi_barrier(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to ping firmware: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
const struct ath10k_fw_components *fw)
{
@@ -1872,6 +1929,25 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_hif_stop;
}
+ /* Some firmware revisions do not properly set up hardware rx filter
+ * registers.
+ *
+ * A known example from QCA9880 and 10.2.4 is that MAC_PCU_ADDR1_MASK
+ * is filled with 0s instead of 1s allowing HW to respond with ACKs to
+ * any frames that matches MAC_PCU_RX_FILTER which is also
+ * misconfigured to accept anything.
+ *
+ * The ADDR1 is programmed using internal firmware structure field and
+ * can't be (easily/sanely) reached from the driver explicitly. It is
+ * possible to implicitly make it correct by creating a dummy vdev and
+ * then deleting it.
+ */
+ status = ath10k_core_reset_rx_filter(ar);
+ if (status) {
+ ath10k_err(ar, "failed to reset rx filter: %d\n", status);
+ goto err_hif_stop;
+ }
+
/* If firmware indicates Full Rx Reorder support it must be used in a
* slightly different manner. Let HTT code know.
*/
@@ -2031,7 +2107,7 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
goto err_free_firmware_files;
}
- ret = ath10k_swap_code_seg_init(ar);
+ ret = ath10k_swap_code_seg_init(ar, &ar->normal_mode_fw.fw_file);
if (ret) {
ath10k_err(ar, "failed to initialize code swap segment: %d\n",
ret);
@@ -2072,6 +2148,9 @@ static void ath10k_core_register_work(struct work_struct *work)
struct ath10k *ar = container_of(work, struct ath10k, register_work);
int status;
+ /* peer stats are enabled by default */
+ set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+
status = ath10k_core_probe_fw(ar);
if (status) {
ath10k_err(ar, "could not probe fw (%d)\n", status);
@@ -2249,6 +2328,8 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
INIT_WORK(&ar->register_work, ath10k_core_register_work);
INIT_WORK(&ar->restart_work, ath10k_core_restart);
+ init_dummy_netdev(&ar->napi_dev);
+
ret = ath10k_debug_create(ar);
if (ret)
goto err_free_aux_wq;
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 30ae5bf81611..6ec9495bcc04 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -65,6 +65,10 @@
#define ATH10K_KEEPALIVE_MAX_IDLE 3895
#define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900
+/* NAPI poll budget */
+#define ATH10K_NAPI_BUDGET 64
+#define ATH10K_NAPI_QUOTA_LIMIT 60
+
struct ath10k;
enum ath10k_bus {
@@ -142,6 +146,7 @@ struct ath10k_wmi {
enum ath10k_htc_ep_id eid;
struct completion service_ready;
struct completion unified_ready;
+ struct completion barrier;
wait_queue_head_t tx_credits_wq;
DECLARE_BITMAP(svc_map, WMI_SERVICE_MAX);
struct wmi_cmd_map *cmd;
@@ -440,7 +445,7 @@ struct ath10k_debug {
struct completion tpc_complete;
/* protected by conf_mutex */
- u32 fw_dbglog_mask;
+ u64 fw_dbglog_mask;
u32 fw_dbglog_level;
u32 pktlog_filter;
u32 reg_addr;
@@ -551,6 +556,13 @@ enum ath10k_fw_features {
*/
ATH10K_FW_FEATURE_BTCOEX_PARAM = 14,
+ /* Older firmware with HTT delivers incorrect tx status for null func
+ * frames to driver, but this fixed in 10.2 and 10.4 firmware versions.
+ * Also this workaround results in reporting of incorrect null func
+ * status for 10.4. This flag is used to skip the workaround.
+ */
+ ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR = 15,
+
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
@@ -663,6 +675,15 @@ struct ath10k_fw_file {
const void *codeswap_data;
size_t codeswap_len;
+
+ /* The original idea of struct ath10k_fw_file was that it only
+ * contains struct firmware and pointers to various parts (actual
+ * firmware binary, otp, metadata etc) of the file. This seg_info
+ * is actually created separate but as this is used similarly as
+ * the other firmware components it's more convenient to have it
+ * here.
+ */
+ struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
};
struct ath10k_fw_components {
@@ -715,53 +736,7 @@ struct ath10k {
struct ath10k_htc htc;
struct ath10k_htt htt;
- struct ath10k_hw_params {
- u32 id;
- u16 dev_id;
- const char *name;
- u32 patch_load_addr;
- int uart_pin;
- u32 otp_exe_param;
-
- /* Type of hw cycle counter wraparound logic, for more info
- * refer enum ath10k_hw_cc_wraparound_type.
- */
- enum ath10k_hw_cc_wraparound_type cc_wraparound_type;
-
- /* Some of chip expects fragment descriptor to be continuous
- * memory for any TX operation. Set continuous_frag_desc flag
- * for the hardware which have such requirement.
- */
- bool continuous_frag_desc;
-
- /* CCK hardware rate table mapping for the newer chipsets
- * like QCA99X0, QCA4019 got revised. The CCK h/w rate values
- * are in a proper order with respect to the rate/preamble
- */
- bool cck_rate_map_rev2;
-
- u32 channel_counters_freq_hz;
-
- /* Mgmt tx descriptors threshold for limiting probe response
- * frames.
- */
- u32 max_probe_resp_desc_thres;
-
- /* The padding bytes's location is different on various chips */
- enum ath10k_hw_4addr_pad hw_4addr_pad;
-
- u32 tx_chain_mask;
- u32 rx_chain_mask;
- u32 max_spatial_stream;
- u32 cal_data_len;
-
- struct ath10k_hw_params_fw {
- const char *dir;
- const char *board;
- size_t board_size;
- size_t board_ext_size;
- } fw;
- } hw_params;
+ struct ath10k_hw_params hw_params;
/* contains the firmware images used with ATH10K_FIRMWARE_MODE_NORMAL */
struct ath10k_fw_components normal_mode_fw;
@@ -775,10 +750,6 @@ struct ath10k {
const struct firmware *cal_file;
struct {
- struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
- } swap;
-
- struct {
u32 vendor;
u32 device;
u32 subsystem_vendor;
@@ -936,6 +907,10 @@ struct ath10k {
struct ath10k_thermal thermal;
struct ath10k_wow wow;
+ /* NAPI */
+ struct net_device napi_dev;
+ struct napi_struct napi;
+
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 8f0fd41dfd4b..832da6ed9f13 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1228,9 +1228,9 @@ static ssize_t ath10k_read_fw_dbglog(struct file *file,
{
struct ath10k *ar = file->private_data;
unsigned int len;
- char buf[64];
+ char buf[96];
- len = scnprintf(buf, sizeof(buf), "0x%08x %u\n",
+ len = scnprintf(buf, sizeof(buf), "0x%16llx %u\n",
ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
@@ -1242,15 +1242,16 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file,
{
struct ath10k *ar = file->private_data;
int ret;
- char buf[64];
- unsigned int log_level, mask;
+ char buf[96];
+ unsigned int log_level;
+ u64 mask;
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
/* make sure that buf is null terminated */
buf[sizeof(buf) - 1] = 0;
- ret = sscanf(buf, "%x %u", &mask, &log_level);
+ ret = sscanf(buf, "%llx %u", &mask, &log_level);
if (!ret)
return -EINVAL;
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 5b3c6bcf9598..175aae38c375 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -44,7 +44,7 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
skb_cb = ATH10K_SKB_CB(skb);
memset(skb_cb, 0, sizeof(*skb_cb));
- ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
return skb;
}
@@ -62,7 +62,7 @@ static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
{
struct ath10k *ar = ep->htc->ar;
- ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
ep->eid, skb);
ath10k_htc_restore_tx_skb(ep->htc, skb);
@@ -404,7 +404,7 @@ void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
goto out;
}
- ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
eid, skb);
ep->ep_ops.ep_rx_complete(ar, skb);
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 430a83e142aa..98c14247021b 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -1665,7 +1665,6 @@ struct ath10k_htt {
/* This is used to group tx/rx completions separately and process them
* in batches to reduce cache stalls */
- struct tasklet_struct txrx_compl_task;
struct sk_buff_head rx_compl_q;
struct sk_buff_head rx_in_ord_compl_q;
struct sk_buff_head tx_fetch_ind_q;
@@ -1798,5 +1797,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt,
struct sk_buff *msdu);
void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
struct sk_buff *skb);
+int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 78db5d679f19..a3785a9aa843 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -34,7 +34,6 @@
#define HTT_RX_RING_REFILL_RESCHED_MS 5
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
-static void ath10k_htt_txrx_compl_task(unsigned long ptr);
static struct sk_buff *
ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
@@ -226,7 +225,6 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
void ath10k_htt_rx_free(struct ath10k_htt *htt)
{
del_timer_sync(&htt->rx_ring.refill_retry_timer);
- tasklet_kill(&htt->txrx_compl_task);
skb_queue_purge(&htt->rx_compl_q);
skb_queue_purge(&htt->rx_in_ord_compl_q);
@@ -520,9 +518,6 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
skb_queue_head_init(&htt->tx_fetch_ind_q);
atomic_set(&htt->num_mpdus_ready, 0);
- tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
- (unsigned long)htt);
-
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
htt->rx_ring.size, htt->rx_ring.fill_level);
return 0;
@@ -931,7 +926,7 @@ static void ath10k_process_rx(struct ath10k *ar,
*status = *rx_status;
ath10k_dbg(ar, ATH10K_DBG_DATA,
- "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
skb,
skb->len,
ieee80211_get_SA(hdr),
@@ -958,7 +953,7 @@ static void ath10k_process_rx(struct ath10k *ar,
trace_ath10k_rx_hdr(ar, skb->data, skb->len);
trace_ath10k_rx_payload(ar, skb->data, skb->len);
- ieee80211_rx(ar->hw, skb);
+ ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
}
static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
@@ -1056,9 +1051,11 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
const u8 first_hdr[64])
{
struct ieee80211_hdr *hdr;
+ struct htt_rx_desc *rxd;
size_t hdr_len;
u8 da[ETH_ALEN];
u8 sa[ETH_ALEN];
+ int l3_pad_bytes;
/* Delivered decapped frame:
* [nwifi 802.11 header] <-- replaced with 802.11 hdr
@@ -1072,19 +1069,12 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
*/
/* pull decapped header and copy SA & DA */
- if ((ar->hw_params.hw_4addr_pad == ATH10K_HW_4ADDR_PAD_BEFORE) &&
- ieee80211_has_a4(((struct ieee80211_hdr *)first_hdr)->frame_control)) {
- /* The QCA99X0 4 address mode pad 2 bytes at the
- * beginning of MSDU
- */
- hdr = (struct ieee80211_hdr *)(msdu->data + 2);
- /* The skb length need be extended 2 as the 2 bytes at the tail
- * be excluded due to the padding
- */
- skb_put(msdu, 2);
- } else {
- hdr = (struct ieee80211_hdr *)(msdu->data);
- }
+ rxd = (void *)msdu->data - sizeof(*rxd);
+
+ l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+ skb_put(msdu, l3_pad_bytes);
+
+ hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
ether_addr_copy(da, ieee80211_get_DA(hdr));
@@ -1151,6 +1141,8 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
void *rfc1042;
u8 da[ETH_ALEN];
u8 sa[ETH_ALEN];
+ int l3_pad_bytes;
+ struct htt_rx_desc *rxd;
/* Delivered decapped frame:
* [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
@@ -1161,6 +1153,11 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
if (WARN_ON_ONCE(!rfc1042))
return;
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+ skb_put(msdu, l3_pad_bytes);
+ skb_pull(msdu, l3_pad_bytes);
+
/* pull decapped header and copy SA & DA */
eth = (struct ethhdr *)msdu->data;
ether_addr_copy(da, eth->h_dest);
@@ -1191,6 +1188,8 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
{
struct ieee80211_hdr *hdr;
size_t hdr_len;
+ int l3_pad_bytes;
+ struct htt_rx_desc *rxd;
/* Delivered decapped frame:
* [amsdu header] <-- replaced with 802.11 hdr
@@ -1198,7 +1197,11 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
* [payload]
*/
- skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+
+ skb_put(msdu, l3_pad_bytes);
+ skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
@@ -1525,9 +1528,9 @@ static void ath10k_htt_rx_h_filter(struct ath10k *ar,
static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
- static struct ieee80211_rx_status rx_status;
+ struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct sk_buff_head amsdu;
- int ret;
+ int ret, num_msdus;
__skb_queue_head_init(&amsdu);
@@ -1549,13 +1552,14 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
return ret;
}
- ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff);
+ num_msdus = skb_queue_len(&amsdu);
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
- ath10k_htt_rx_h_filter(ar, &amsdu, &rx_status);
- ath10k_htt_rx_h_mpdu(ar, &amsdu, &rx_status);
- ath10k_htt_rx_h_deliver(ar, &amsdu, &rx_status);
+ ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
- return 0;
+ return num_msdus;
}
static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
@@ -1579,15 +1583,6 @@ static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
mpdu_count += mpdu_ranges[i].mpdu_count;
atomic_add(mpdu_count, &htt->num_mpdus_ready);
-
- tasklet_schedule(&htt->txrx_compl_task);
-}
-
-static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt)
-{
- atomic_inc(&htt->num_mpdus_ready);
-
- tasklet_schedule(&htt->txrx_compl_task);
}
static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
@@ -1772,14 +1767,15 @@ static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
RX_FLAG_MMIC_STRIPPED;
}
-static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
- struct sk_buff_head *list)
+static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
+ struct sk_buff_head *list)
{
struct ath10k_htt *htt = &ar->htt;
struct ieee80211_rx_status *status = &htt->rx_status;
struct htt_rx_offload_msdu *rx;
struct sk_buff *msdu;
size_t offset;
+ int num_msdu = 0;
while ((msdu = __skb_dequeue(list))) {
/* Offloaded frames don't have Rx descriptor. Instead they have
@@ -1819,10 +1815,12 @@ static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
ath10k_htt_rx_h_rx_offload_prot(status, msdu);
ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
ath10k_process_rx(ar, status, msdu);
+ num_msdu++;
}
+ return num_msdu;
}
-static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
+static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
struct htt_resp *resp = (void *)skb->data;
@@ -1835,12 +1833,12 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
u8 tid;
bool offload;
bool frag;
- int ret;
+ int ret, num_msdus = 0;
lockdep_assert_held(&htt->rx_ring.lock);
if (htt->rx_confused)
- return;
+ return -EIO;
skb_pull(skb, sizeof(resp->hdr));
skb_pull(skb, sizeof(resp->rx_in_ord_ind));
@@ -1859,7 +1857,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
ath10k_warn(ar, "dropping invalid in order rx indication\n");
- return;
+ return -EINVAL;
}
/* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
@@ -1870,14 +1868,14 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
if (ret < 0) {
ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
htt->rx_confused = true;
- return;
+ return -EIO;
}
/* Offloaded frames are very different and need to be handled
* separately.
*/
if (offload)
- ath10k_htt_rx_h_rx_offload(ar, &list);
+ num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
while (!skb_queue_empty(&list)) {
__skb_queue_head_init(&amsdu);
@@ -1890,6 +1888,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
* better to report something than nothing though. This
* should still give an idea about rx rate to the user.
*/
+ num_msdus += skb_queue_len(&amsdu);
ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
ath10k_htt_rx_h_filter(ar, &amsdu, status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
@@ -1902,9 +1901,10 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
htt->rx_confused = true;
__skb_queue_purge(&list);
- return;
+ return -EIO;
}
}
+ return num_msdus;
}
static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
@@ -2267,7 +2267,6 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
}
case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
- tasklet_schedule(&htt->txrx_compl_task);
break;
case HTT_T2H_MSG_TYPE_SEC_IND: {
struct ath10k *ar = htt->ar;
@@ -2284,7 +2283,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
skb->data, skb->len);
- ath10k_htt_rx_frag_handler(htt);
+ atomic_inc(&htt->num_mpdus_ready);
break;
}
case HTT_T2H_MSG_TYPE_TEST:
@@ -2320,8 +2319,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
- skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
- tasklet_schedule(&htt->txrx_compl_task);
+ __skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
return false;
}
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
@@ -2347,7 +2345,6 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
- tasklet_schedule(&htt->txrx_compl_task);
break;
}
case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
@@ -2376,27 +2373,77 @@ void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
}
EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
-static void ath10k_htt_txrx_compl_task(unsigned long ptr)
+int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
{
- struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
- struct ath10k *ar = htt->ar;
+ struct ath10k_htt *htt = &ar->htt;
struct htt_tx_done tx_done = {};
- struct sk_buff_head rx_ind_q;
struct sk_buff_head tx_ind_q;
struct sk_buff *skb;
unsigned long flags;
- int num_mpdus;
+ int quota = 0, done, num_rx_msdus;
+ bool resched_napi = false;
- __skb_queue_head_init(&rx_ind_q);
__skb_queue_head_init(&tx_ind_q);
- spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags);
- skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q);
- spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags);
+ /* Since in-ord-ind can deliver more than 1 A-MSDU in single event,
+ * process it first to utilize full available quota.
+ */
+ while (quota < budget) {
+ if (skb_queue_empty(&htt->rx_in_ord_compl_q))
+ break;
- spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
- skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
- spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
+ skb = __skb_dequeue(&htt->rx_in_ord_compl_q);
+ if (!skb) {
+ resched_napi = true;
+ goto exit;
+ }
+
+ spin_lock_bh(&htt->rx_ring.lock);
+ num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb);
+ spin_unlock_bh(&htt->rx_ring.lock);
+ if (num_rx_msdus < 0) {
+ resched_napi = true;
+ goto exit;
+ }
+
+ dev_kfree_skb_any(skb);
+ if (num_rx_msdus > 0)
+ quota += num_rx_msdus;
+
+ if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
+ !skb_queue_empty(&htt->rx_in_ord_compl_q)) {
+ resched_napi = true;
+ goto exit;
+ }
+ }
+
+ while (quota < budget) {
+ /* no more data to receive */
+ if (!atomic_read(&htt->num_mpdus_ready))
+ break;
+
+ num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt);
+ if (num_rx_msdus < 0) {
+ resched_napi = true;
+ goto exit;
+ }
+
+ quota += num_rx_msdus;
+ atomic_dec(&htt->num_mpdus_ready);
+ if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
+ atomic_read(&htt->num_mpdus_ready)) {
+ resched_napi = true;
+ goto exit;
+ }
+ }
+
+ /* From NAPI documentation:
+ * The napi poll() function may also process TX completions, in which
+ * case if it processes the entire TX ring then it should count that
+ * work as the rest of the budget.
+ */
+ if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
+ quota = budget;
/* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
* From kfifo_get() documentation:
@@ -2406,27 +2453,24 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
while (kfifo_get(&htt->txdone_fifo, &tx_done))
ath10k_txrx_tx_unref(htt, &tx_done);
- while ((skb = __skb_dequeue(&tx_ind_q))) {
- ath10k_htt_rx_tx_fetch_ind(ar, skb);
- dev_kfree_skb_any(skb);
- }
-
- num_mpdus = atomic_read(&htt->num_mpdus_ready);
-
- while (num_mpdus) {
- if (ath10k_htt_rx_handle_amsdu(htt))
- break;
+ ath10k_mac_tx_push_pending(ar);
- num_mpdus--;
- atomic_dec(&htt->num_mpdus_ready);
- }
+ spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
+ skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
+ spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
- while ((skb = __skb_dequeue(&rx_ind_q))) {
- spin_lock_bh(&htt->rx_ring.lock);
- ath10k_htt_rx_in_ord_ind(ar, skb);
- spin_unlock_bh(&htt->rx_ring.lock);
+ while ((skb = __skb_dequeue(&tx_ind_q))) {
+ ath10k_htt_rx_tx_fetch_ind(ar, skb);
dev_kfree_skb_any(skb);
}
+exit:
ath10k_htt_rx_msdu_buff_replenish(htt);
+ /* In case of rx failure or more data to read, report budget
+ * to reschedule NAPI poll
+ */
+ done = resched_napi ? budget : quota;
+
+ return done;
}
+EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 7c072b605bc7..ae5b33fe5ba8 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -390,8 +390,6 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt)
{
int size;
- tasklet_kill(&htt->txrx_compl_task);
-
idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
idr_destroy(&htt->pending_tx);
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index f903d468dbe6..c2ecb9bd824a 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -219,3 +219,16 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
survey->time = CCNT_TO_MSEC(ar, cc);
survey->time_busy = CCNT_TO_MSEC(ar, rcc);
}
+
+const struct ath10k_hw_ops qca988x_ops = {
+};
+
+static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
+{
+ return MS(__le32_to_cpu(rxd->msdu_end.qca99x0.info1),
+ RX_MSDU_END_INFO1_L3_HDR_PAD);
+}
+
+const struct ath10k_hw_ops qca99x0_ops = {
+ .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
+};
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index e014cd732a0d..308e423d4b99 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -338,11 +338,6 @@ enum ath10k_hw_rate_rev2_cck {
ATH10K_HW_RATE_REV2_CCK_SP_11M,
};
-enum ath10k_hw_4addr_pad {
- ATH10K_HW_4ADDR_PAD_AFTER,
- ATH10K_HW_4ADDR_PAD_BEFORE,
-};
-
enum ath10k_hw_cc_wraparound_type {
ATH10K_HW_CC_WRAP_DISABLED = 0,
@@ -363,6 +358,77 @@ enum ath10k_hw_cc_wraparound_type {
ATH10K_HW_CC_WRAP_SHIFTED_EACH = 2,
};
+struct ath10k_hw_params {
+ u32 id;
+ u16 dev_id;
+ const char *name;
+ u32 patch_load_addr;
+ int uart_pin;
+ u32 otp_exe_param;
+
+ /* Type of hw cycle counter wraparound logic, for more info
+ * refer enum ath10k_hw_cc_wraparound_type.
+ */
+ enum ath10k_hw_cc_wraparound_type cc_wraparound_type;
+
+ /* Some of chip expects fragment descriptor to be continuous
+ * memory for any TX operation. Set continuous_frag_desc flag
+ * for the hardware which have such requirement.
+ */
+ bool continuous_frag_desc;
+
+ /* CCK hardware rate table mapping for the newer chipsets
+ * like QCA99X0, QCA4019 got revised. The CCK h/w rate values
+ * are in a proper order with respect to the rate/preamble
+ */
+ bool cck_rate_map_rev2;
+
+ u32 channel_counters_freq_hz;
+
+ /* Mgmt tx descriptors threshold for limiting probe response
+ * frames.
+ */
+ u32 max_probe_resp_desc_thres;
+
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 max_spatial_stream;
+ u32 cal_data_len;
+
+ struct ath10k_hw_params_fw {
+ const char *dir;
+ const char *board;
+ size_t board_size;
+ size_t board_ext_size;
+ } fw;
+
+ /* qca99x0 family chips deliver broadcast/multicast management
+ * frames encrypted and expect software do decryption.
+ */
+ bool sw_decrypt_mcast_mgmt;
+
+ const struct ath10k_hw_ops *hw_ops;
+};
+
+struct htt_rx_desc;
+
+/* Defines needed for Rx descriptor abstraction */
+struct ath10k_hw_ops {
+ int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
+};
+
+extern const struct ath10k_hw_ops qca988x_ops;
+extern const struct ath10k_hw_ops qca99x0_ops;
+
+static inline int
+ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
+ struct htt_rx_desc *rxd)
+{
+ if (hw->hw_ops->rx_desc_get_l3_pad_bytes)
+ return hw->hw_ops->rx_desc_get_l3_pad_bytes(rxd);
+ return 0;
+}
+
/* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 0bbd0a00edcc..0a44dab5a287 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -824,7 +824,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
*/
for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
if (ar->peer_map[i] == peer) {
- ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %p idx %d)\n",
+ ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
peer->addr, peer, i);
ar->peer_map[i] = NULL;
}
@@ -3255,6 +3255,8 @@ ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
if (ar->htt.target_version_major < 3 &&
(ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
!test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+ ar->running_fw->fw_file.fw_features) &&
+ !test_bit(ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR,
ar->running_fw->fw_file.fw_features))
return ATH10K_HW_TXRX_MGMT;
@@ -3524,7 +3526,7 @@ static int ath10k_mac_tx(struct ath10k *ar,
if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
if (!ath10k_mac_tx_frm_has_freq(ar)) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n",
skb);
skb_queue_tail(&ar->offchan_tx_queue, skb);
@@ -3586,7 +3588,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
mutex_lock(&ar->conf_mutex);
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n",
skb);
hdr = (struct ieee80211_hdr *)skb->data;
@@ -3643,7 +3645,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
time_left =
wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
if (time_left == 0)
- ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
+ ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n",
skb);
if (!peer && tmp_peer_created) {
@@ -3777,7 +3779,9 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
enum ath10k_hw_txrx_mode txmode;
enum ath10k_mac_tx_path txpath;
struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
size_t skb_len;
+ bool is_mgmt, is_presp;
int ret;
spin_lock_bh(&ar->htt.tx_lock);
@@ -3801,6 +3805,22 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
skb_len = skb->len;
txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+ is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
+
+ if (is_mgmt) {
+ hdr = (struct ieee80211_hdr *)skb->data;
+ is_presp = ieee80211_is_probe_resp(hdr->frame_control);
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
+
+ if (ret) {
+ ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ return ret;
+ }
+ spin_unlock_bh(&ar->htt.tx_lock);
+ }
ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
if (unlikely(ret)) {
@@ -3808,6 +3828,8 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
spin_lock_bh(&ar->htt.tx_lock);
ath10k_htt_tx_dec_pending(htt);
+ if (is_mgmt)
+ ath10k_htt_tx_mgmt_dec_pending(htt);
spin_unlock_bh(&ar->htt.tx_lock);
return ret;
@@ -3894,7 +3916,7 @@ void __ath10k_scan_finish(struct ath10k *ar)
ar->scan.roc_freq = 0;
ath10k_offchan_tx_purge(ar);
cancel_delayed_work(&ar->scan.timeout);
- complete_all(&ar->scan.completed);
+ complete(&ar->scan.completed);
break;
}
}
@@ -4100,13 +4122,29 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_txq *artxq = (void *)txq->drv_priv;
+ struct ieee80211_txq *f_txq;
+ struct ath10k_txq *f_artxq;
+ int ret = 0;
+ int max = 16;
spin_lock_bh(&ar->txqs_lock);
if (list_empty(&artxq->list))
list_add_tail(&artxq->list, &ar->txqs);
+
+ f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
+ f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv);
+ list_del_init(&f_artxq->list);
+
+ while (ath10k_mac_tx_can_push(hw, f_txq) && max--) {
+ ret = ath10k_mac_tx_push_txq(hw, f_txq);
+ if (ret)
+ break;
+ }
+ if (ret != -ENOENT)
+ list_add_tail(&f_artxq->list, &ar->txqs);
spin_unlock_bh(&ar->txqs_lock);
- ath10k_mac_tx_push_pending(ar);
+ ath10k_htt_tx_txq_update(hw, f_txq);
ath10k_htt_tx_txq_update(hw, txq);
}
@@ -5186,7 +5224,7 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
ret = ath10k_monitor_recalc(ar);
if (ret)
- ath10k_warn(ar, "failed to recalc montior: %d\n", ret);
+ ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
mutex_unlock(&ar->conf_mutex);
}
@@ -5984,8 +6022,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
* Existing station deletion.
*/
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac vdev %d peer delete %pM (sta gone)\n",
- arvif->vdev_id, sta->addr);
+ "mac vdev %d peer delete %pM sta %pK (sta gone)\n",
+ arvif->vdev_id, sta->addr, sta);
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
@@ -6001,7 +6039,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
continue;
if (peer->sta == sta) {
- ath10k_warn(ar, "found sta peer %pM (ptr %p id %d) entry on vdev %i after it was supposedly removed\n",
+ ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n",
sta->addr, peer, i, arvif->vdev_id);
peer->sta = NULL;
@@ -6538,7 +6576,7 @@ static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
goto exit;
}
- ath10k_mac_update_bss_chan_survey(ar, survey->channel);
+ ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
spin_lock_bh(&ar->data_lock);
memcpy(survey, ar_survey, sizeof(*survey));
@@ -7134,7 +7172,7 @@ ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx add freq %hu width %d ptr %p\n",
+ "mac chanctx add freq %hu width %d ptr %pK\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@@ -7158,7 +7196,7 @@ ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx remove freq %hu width %d ptr %p\n",
+ "mac chanctx remove freq %hu width %d ptr %pK\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@@ -7223,7 +7261,7 @@ ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx change freq %hu width %d ptr %p changed %x\n",
+ "mac chanctx change freq %hu width %d ptr %pK changed %x\n",
ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
/* This shouldn't really happen because channel switching should use
@@ -7281,7 +7319,7 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx assign ptr %p vdev_id %i\n",
+ "mac chanctx assign ptr %pK vdev_id %i\n",
ctx, arvif->vdev_id);
if (WARN_ON(arvif->is_started)) {
@@ -7342,7 +7380,7 @@ ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx unassign ptr %p vdev_id %i\n",
+ "mac chanctx unassign ptr %pK vdev_id %i\n",
ctx, arvif->vdev_id);
WARN_ON(!arvif->is_started);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 9a22c478dd1b..0457e315d336 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -1506,12 +1506,10 @@ void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
ath10k_ce_per_engine_service(ar, pipe);
}
-void ath10k_pci_kill_tasklet(struct ath10k *ar)
+static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- tasklet_kill(&ar_pci->intr_tq);
-
del_timer_sync(&ar_pci->rx_post_retry);
}
@@ -1570,7 +1568,7 @@ void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
ul_pipe, dl_pipe);
}
-static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
+void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
{
u32 val;
@@ -1693,14 +1691,12 @@ static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
{
struct ath10k *ar;
- struct ath10k_pci *ar_pci;
struct ath10k_ce_pipe *ce_pipe;
struct ath10k_ce_ring *ce_ring;
struct sk_buff *skb;
int i;
ar = pci_pipe->hif_ce_state;
- ar_pci = ath10k_pci_priv(ar);
ce_pipe = pci_pipe->ce_hdl;
ce_ring = ce_pipe->src_ring;
@@ -1753,7 +1749,7 @@ void ath10k_pci_ce_deinit(struct ath10k *ar)
void ath10k_pci_flush(struct ath10k *ar)
{
- ath10k_pci_kill_tasklet(ar);
+ ath10k_pci_rx_retry_sync(ar);
ath10k_pci_buffer_cleanup(ar);
}
@@ -1780,6 +1776,8 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_irq_disable(ar);
ath10k_pci_irq_sync(ar);
ath10k_pci_flush(ar);
+ napi_synchronize(&ar->napi);
+ napi_disable(&ar->napi);
spin_lock_irqsave(&ar_pci->ps_lock, flags);
WARN_ON(ar_pci->ps_wake_refcount > 0);
@@ -2533,6 +2531,7 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
goto err_ce;
}
+ napi_enable(&ar->napi);
return 0;
@@ -2725,7 +2724,7 @@ static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
return 0;
err_free:
- kfree(data);
+ kfree(caldata);
return -EINVAL;
}
@@ -2772,35 +2771,53 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
return IRQ_NONE;
}
- if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) {
- if (!ath10k_pci_irq_pending(ar))
- return IRQ_NONE;
-
- ath10k_pci_disable_and_clear_legacy_irq(ar);
- }
+ if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
+ !ath10k_pci_irq_pending(ar))
+ return IRQ_NONE;
- tasklet_schedule(&ar_pci->intr_tq);
+ ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ath10k_pci_irq_msi_fw_mask(ar);
+ napi_schedule(&ar->napi);
return IRQ_HANDLED;
}
-static void ath10k_pci_tasklet(unsigned long data)
+static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
{
- struct ath10k *ar = (struct ath10k *)data;
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+ int done = 0;
if (ath10k_pci_has_fw_crashed(ar)) {
- ath10k_pci_irq_disable(ar);
ath10k_pci_fw_crashed_clear(ar);
ath10k_pci_fw_crashed_dump(ar);
- return;
+ napi_complete(ctx);
+ return done;
}
ath10k_ce_per_engine_service_any(ar);
- /* Re-enable legacy irq that was disabled in the irq handler */
- if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
+ done = ath10k_htt_txrx_compl_task(ar, budget);
+
+ if (done < budget) {
+ napi_complete(ctx);
+ /* In case of MSI, it is possible that interrupts are received
+ * while NAPI poll is inprogress. So pending interrupts that are
+ * received after processing all copy engine pipes by NAPI poll
+ * will not be handled again. This is causing failure to
+ * complete boot sequence in x86 platform. So before enabling
+ * interrupts safer to check for pending interrupts for
+ * immediate servicing.
+ */
+ if (CE_INTERRUPT_SUMMARY(ar)) {
+ napi_reschedule(ctx);
+ goto out;
+ }
ath10k_pci_enable_legacy_irq(ar);
+ ath10k_pci_irq_msi_fw_unmask(ar);
+ }
+
+out:
+ return done;
}
static int ath10k_pci_request_irq_msi(struct ath10k *ar)
@@ -2858,11 +2875,10 @@ static void ath10k_pci_free_irq(struct ath10k *ar)
free_irq(ar_pci->pdev->irq, ar);
}
-void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
+void ath10k_pci_init_napi(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
- tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll,
+ ATH10K_NAPI_BUDGET);
}
static int ath10k_pci_init_irq(struct ath10k *ar)
@@ -2870,7 +2886,7 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
- ath10k_pci_init_irq_tasklets(ar);
+ ath10k_pci_init_napi(ar);
if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
ath10k_info(ar, "limiting irq mode to: %d\n",
@@ -3062,7 +3078,7 @@ static int ath10k_pci_claim(struct ath10k *ar)
goto err_master;
}
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
return 0;
err_master:
@@ -3131,7 +3147,8 @@ int ath10k_pci_setup_resource(struct ath10k *ar)
void ath10k_pci_release_resource(struct ath10k *ar)
{
- ath10k_pci_kill_tasklet(ar);
+ ath10k_pci_rx_retry_sync(ar);
+ netif_napi_del(&ar->napi);
ath10k_pci_ce_deinit(ar);
ath10k_pci_free_pipes(ar);
}
@@ -3162,7 +3179,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
pci_hard_reset = ath10k_pci_qca988x_chip_reset;
break;
case QCA9887_1_0_DEVICE_ID:
- dev_warn(&pdev->dev, "QCA9887 support is still experimental, there are likely bugs. You have been warned.\n");
hw_rev = ATH10K_HW_QCA9887;
pci_ps = false;
pci_soft_reset = ath10k_pci_warm_reset;
@@ -3298,7 +3314,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
err_free_irq:
ath10k_pci_free_irq(ar);
- ath10k_pci_kill_tasklet(ar);
+ ath10k_pci_rx_retry_sync(ar);
err_deinit_irq:
ath10k_pci_deinit_irq(ar);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 6eca1df2ce60..9854ad56b2de 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -177,8 +177,6 @@ struct ath10k_pci {
/* Operating interrupt mode */
enum ath10k_pci_irq_mode oper_irq_mode;
- struct tasklet_struct intr_tq;
-
struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
/* Copy Engine used for Diagnostic Accesses */
@@ -294,8 +292,7 @@ void ath10k_pci_free_pipes(struct ath10k *ar);
void ath10k_pci_free_pipes(struct ath10k *ar);
void ath10k_pci_rx_replenish_retry(unsigned long ptr);
void ath10k_pci_ce_deinit(struct ath10k *ar);
-void ath10k_pci_init_irq_tasklets(struct ath10k *ar);
-void ath10k_pci_kill_tasklet(struct ath10k *ar);
+void ath10k_pci_init_napi(struct ath10k *ar);
int ath10k_pci_init_pipes(struct ath10k *ar);
int ath10k_pci_init_config(struct ath10k *ar);
void ath10k_pci_rx_post(struct ath10k *ar);
@@ -303,6 +300,7 @@ void ath10k_pci_flush(struct ath10k *ar);
void ath10k_pci_enable_legacy_irq(struct ath10k *ar);
bool ath10k_pci_irq_pending(struct ath10k *ar);
void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar);
+void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar);
int ath10k_pci_wait_for_target_init(struct ath10k *ar);
int ath10k_pci_setup_resource(struct ath10k *ar);
void ath10k_pci_release_resource(struct ath10k *ar);
diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c
index 0c5f5863dac8..adf4592374b4 100644
--- a/drivers/net/wireless/ath/ath10k/swap.c
+++ b/drivers/net/wireless/ath/ath10k/swap.c
@@ -134,17 +134,18 @@ ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len)
return seg_info;
}
-int ath10k_swap_code_seg_configure(struct ath10k *ar)
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+ const struct ath10k_fw_file *fw_file)
{
int ret;
struct ath10k_swap_code_seg_info *seg_info = NULL;
- if (!ar->swap.firmware_swap_code_seg_info)
+ if (!fw_file->firmware_swap_code_seg_info)
return 0;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
- seg_info = ar->swap.firmware_swap_code_seg_info;
+ seg_info = fw_file->firmware_swap_code_seg_info;
ret = ath10k_bmi_write_memory(ar, seg_info->target_addr,
&seg_info->seg_hw_info,
@@ -158,28 +159,29 @@ int ath10k_swap_code_seg_configure(struct ath10k *ar)
return 0;
}
-void ath10k_swap_code_seg_release(struct ath10k *ar)
+void ath10k_swap_code_seg_release(struct ath10k *ar,
+ struct ath10k_fw_file *fw_file)
{
- ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info);
+ ath10k_swap_code_seg_free(ar, fw_file->firmware_swap_code_seg_info);
/* FIXME: these two assignments look to bein wrong place! Shouldn't
* they be in ath10k_core_free_firmware_files() like the rest?
*/
- ar->normal_mode_fw.fw_file.codeswap_data = NULL;
- ar->normal_mode_fw.fw_file.codeswap_len = 0;
+ fw_file->codeswap_data = NULL;
+ fw_file->codeswap_len = 0;
- ar->swap.firmware_swap_code_seg_info = NULL;
+ fw_file->firmware_swap_code_seg_info = NULL;
}
-int ath10k_swap_code_seg_init(struct ath10k *ar)
+int ath10k_swap_code_seg_init(struct ath10k *ar, struct ath10k_fw_file *fw_file)
{
int ret;
struct ath10k_swap_code_seg_info *seg_info;
const void *codeswap_data;
size_t codeswap_len;
- codeswap_data = ar->normal_mode_fw.fw_file.codeswap_data;
- codeswap_len = ar->normal_mode_fw.fw_file.codeswap_len;
+ codeswap_data = fw_file->codeswap_data;
+ codeswap_len = fw_file->codeswap_len;
if (!codeswap_len || !codeswap_data)
return 0;
@@ -200,7 +202,7 @@ int ath10k_swap_code_seg_init(struct ath10k *ar)
return ret;
}
- ar->swap.firmware_swap_code_seg_info = seg_info;
+ fw_file->firmware_swap_code_seg_info = seg_info;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h
index 36991c7b07a0..f5dc0476493e 100644
--- a/drivers/net/wireless/ath/ath10k/swap.h
+++ b/drivers/net/wireless/ath/ath10k/swap.h
@@ -23,6 +23,8 @@
/* Currently only one swap segment is supported */
#define ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED 1
+struct ath10k_fw_file;
+
struct ath10k_swap_code_seg_tlv {
__le32 address;
__le32 length;
@@ -58,8 +60,11 @@ struct ath10k_swap_code_seg_info {
dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
};
-int ath10k_swap_code_seg_configure(struct ath10k *ar);
-void ath10k_swap_code_seg_release(struct ath10k *ar);
-int ath10k_swap_code_seg_init(struct ath10k *ar);
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+ const struct ath10k_fw_file *fw_file);
+void ath10k_swap_code_seg_release(struct ath10k *ar,
+ struct ath10k_fw_file *fw_file);
+int ath10k_swap_code_seg_init(struct ath10k *ar,
+ struct ath10k_fw_file *fw_file);
#endif
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index 120f4234d3b0..ed85f938e3c0 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -23,6 +23,7 @@
#include "wmi.h"
#include "hif.h"
#include "hw.h"
+#include "core.h"
#include "testmode_i.h"
@@ -45,7 +46,7 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
int ret;
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
- "testmode event wmi cmd_id %d skb %p skb->len %d\n",
+ "testmode event wmi cmd_id %d skb %pK skb->len %d\n",
cmd_id, skb, skb->len);
ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
@@ -240,6 +241,18 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
goto err;
}
+ if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
+ ar->testmode.utf_mode_fw.fw_file.codeswap_len) {
+ ret = ath10k_swap_code_seg_init(ar,
+ &ar->testmode.utf_mode_fw.fw_file);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to init utf code swap segment: %d\n",
+ ret);
+ goto err_release_utf_mode_fw;
+ }
+ }
+
spin_lock_bh(&ar->data_lock);
ar->testmode.utf_monitor = true;
spin_unlock_bh(&ar->data_lock);
@@ -279,6 +292,11 @@ err_power_down:
ath10k_hif_power_down(ar);
err_release_utf_mode_fw:
+ if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
+ ar->testmode.utf_mode_fw.fw_file.codeswap_len)
+ ath10k_swap_code_seg_release(ar,
+ &ar->testmode.utf_mode_fw.fw_file);
+
release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
@@ -301,6 +319,11 @@ static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar)
spin_unlock_bh(&ar->data_lock);
+ if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
+ ar->testmode.utf_mode_fw.fw_file.codeswap_len)
+ ath10k_swap_code_seg_release(ar,
+ &ar->testmode.utf_mode_fw.fw_file);
+
release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
@@ -360,7 +383,7 @@ static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[])
cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]);
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
- "testmode cmd wmi cmd_id %d buf %p buf_len %d\n",
+ "testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
cmd_id, buf, buf_len);
ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len);
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index 444b52c7e4f3..0a47269be289 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -192,7 +192,7 @@ int ath10k_thermal_register(struct ath10k *ar)
/* Avoid linking error on devm_hwmon_device_register_with_groups, I
* guess linux/hwmon.h is missing proper stubs. */
- if (!config_enabled(CONFIG_HWMON))
+ if (!IS_REACHABLE(CONFIG_HWMON))
return 0;
hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev,
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index b29a86a26c13..9852c5d51139 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -44,7 +44,7 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
complete(&ar->offchan_tx_completed);
ar->offchan_tx_skb = NULL; /* just for sanity */
- ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %pK\n", skb);
out:
spin_unlock_bh(&ar->data_lock);
}
@@ -119,8 +119,6 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */
- ath10k_mac_tx_push_pending(ar);
-
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 64ebd304f907..c9a8bb1186f2 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -51,6 +51,8 @@ struct wmi_ops {
struct wmi_roam_ev_arg *arg);
int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_wow_ev_arg *arg);
+ int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_echo_ev_arg *arg);
enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
@@ -123,7 +125,7 @@ struct wmi_ops {
enum wmi_force_fw_hang_type type,
u32 delay_ms);
struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
- struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
+ struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
u32 log_level);
struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
@@ -194,6 +196,7 @@ struct wmi_ops {
struct sk_buff *(*gen_pdev_bss_chan_info_req)
(struct ath10k *ar,
enum wmi_bss_survey_req_type type);
+ struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
};
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@@ -349,6 +352,16 @@ ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
return ar->wmi.ops->pull_wow_event(ar, skb, arg);
}
+static inline int
+ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_echo_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_echo_ev)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
+}
+
static inline enum wmi_txbf_conf
ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
{
@@ -932,7 +945,7 @@ ath10k_wmi_force_fw_hang(struct ath10k *ar,
}
static inline int
-ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
+ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
{
struct sk_buff *skb;
@@ -1382,4 +1395,20 @@ ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
wmi->cmd->pdev_bss_chan_info_request_cmdid);
}
+static inline int
+ath10k_wmi_echo(struct ath10k *ar, u32 value)
+{
+ struct ath10k_wmi *wmi = &ar->wmi;
+ struct sk_buff *skb;
+
+ if (!wmi->ops->gen_echo)
+ return -EOPNOTSUPP;
+
+ skb = wmi->ops->gen_echo(ar, value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
+}
+
#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index e09337ee7c96..e64f59300a7c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1223,6 +1223,33 @@ ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
return 0;
}
+static int ath10k_wmi_tlv_op_pull_echo_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_echo_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_echo_event *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_ECHO_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->value = ev->value;
+
+ kfree(tb);
+ return 0;
+}
+
static struct sk_buff *
ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
{
@@ -2441,7 +2468,7 @@ ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
}
static struct sk_buff *
-ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
+ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
u32 log_level) {
struct wmi_tlv_dbglog_cmd *cmd;
struct wmi_tlv *tlv;
@@ -3081,6 +3108,34 @@ ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
return skb;
}
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_echo(struct ath10k *ar, u32 value)
+{
+ struct wmi_echo_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ECHO_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->value = cpu_to_le32(value);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv echo value 0x%08x\n", value);
+ return skb;
+}
+
/****************/
/* TLV mappings */
/****************/
@@ -3429,6 +3484,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
.pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
.pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
+ .pull_echo_ev = ath10k_wmi_tlv_op_pull_echo_ev,
.get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
@@ -3485,6 +3541,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+ .gen_echo = ath10k_wmi_tlv_op_gen_echo,
};
static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index d2462886b75c..38993d72f5e6 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -29,6 +29,9 @@
#include "p2p.h"
#include "hw.h"
+#define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
+#define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
+
/* MAIN WMI cmd track */
static struct wmi_cmd_map wmi_cmd_map = {
.init_cmdid = WMI_INIT_CMDID,
@@ -1874,7 +1877,7 @@ ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
memcpy(cmd->buf, msdu->data, msdu->len);
- ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %pK len %d ftype %02x stype %02x\n",
msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
fc & IEEE80211_FCTL_STYPE);
trace_ath10k_tx_hdr(ar, skb->data, skb->len);
@@ -2240,6 +2243,29 @@ static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
return 0;
}
+static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar,
+ struct ieee80211_hdr *hdr)
+{
+ if (!ieee80211_has_protected(hdr->frame_control))
+ return false;
+
+ /* FW delivers WEP Shared Auth frame with Protected Bit set and
+ * encrypted payload. However in case of PMF it delivers decrypted
+ * frames with Protected Bit set.
+ */
+ if (ieee80211_is_auth(hdr->frame_control))
+ return false;
+
+ /* qca99x0 based FW delivers broadcast or multicast management frames
+ * (ex: group privacy action frames in mesh) as encrypted payload.
+ */
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) &&
+ ar->hw_params.sw_decrypt_mcast_mgmt)
+ return false;
+
+ return true;
+}
+
int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_mgmt_rx_ev_arg arg = {};
@@ -2326,11 +2352,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_handle_wep_reauth(ar, skb, status);
- /* FW delivers WEP Shared Auth frame with Protected Bit set and
- * encrypted payload. However in case of PMF it delivers decrypted
- * frames with Protected Bit set. */
- if (ieee80211_has_protected(hdr->frame_control) &&
- !ieee80211_is_auth(hdr->frame_control)) {
+ if (ath10k_wmi_rx_is_decrypted(ar, hdr)) {
status->flag |= RX_FLAG_DECRYPTED;
if (!ieee80211_is_action(hdr->frame_control) &&
@@ -2347,7 +2369,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_mac_handle_beacon(ar, skb);
ath10k_dbg(ar, ATH10K_DBG_MGMT,
- "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
+ "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
skb, skb->len,
fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
@@ -2495,7 +2517,21 @@ exit:
void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
{
- ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
+ struct wmi_echo_ev_arg arg = {};
+ int ret;
+
+ ret = ath10k_wmi_pull_echo_ev(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse echo: %d\n", ret);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event echo value 0x%08x\n",
+ le32_to_cpu(arg.value));
+
+ if (le32_to_cpu(arg.value) == ATH10K_WMI_BARRIER_ECHO_ID)
+ complete(&ar->wmi.barrier);
}
int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
@@ -3527,7 +3563,6 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
ath10k_warn(ar, "failed to map beacon: %d\n",
ret);
dev_kfree_skb_any(bcn);
- ret = -EIO;
goto skip;
}
@@ -4792,6 +4827,17 @@ static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
return 0;
}
+static int ath10k_wmi_op_pull_echo_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_echo_ev_arg *arg)
+{
+ struct wmi_echo_event *ev = (void *)skb->data;
+
+ arg->value = ev->value;
+
+ return 0;
+}
+
int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_rdy_ev_arg arg = {};
@@ -5124,6 +5170,7 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
enum wmi_10_2_event_id id;
+ bool consumed;
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
@@ -5133,6 +5180,18 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+ consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+ /* Ready event must be handled normally also in UTF mode so that we
+ * know the UTF firmware has booted, others we are just bypass WMI
+ * events to testmode.
+ */
+ if (consumed && id != WMI_10_2_READY_EVENTID) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi testmode consumed 0x%x\n", id);
+ goto out;
+ }
+
switch (id) {
case WMI_10_2_MGMT_RX_EVENTID:
ath10k_wmi_event_mgmt_rx(ar, skb);
@@ -5248,6 +5307,7 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
enum wmi_10_4_event_id id;
+ bool consumed;
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
@@ -5257,6 +5317,18 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+ consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+ /* Ready event must be handled normally also in UTF mode so that we
+ * know the UTF firmware has booted, others we are just bypass WMI
+ * events to testmode.
+ */
+ if (consumed && id != WMI_10_4_READY_EVENTID) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi testmode consumed 0x%x\n", id);
+ goto out;
+ }
+
switch (id) {
case WMI_10_4_MGMT_RX_EVENTID:
ath10k_wmi_event_mgmt_rx(ar, skb);
@@ -5306,6 +5378,7 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
break;
case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
case WMI_10_4_PEER_RATECODE_LIST_EVENTID:
+ case WMI_10_4_WDS_PEER_EVENTID:
ath10k_dbg(ar, ATH10K_DBG_WMI,
"received event id %d not implemented\n", id);
break;
@@ -6863,7 +6936,7 @@ ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
}
static struct sk_buff *
-ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
+ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
u32 log_level)
{
struct wmi_dbglog_cfg_cmd *cmd;
@@ -6901,6 +6974,44 @@ ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
}
static struct sk_buff *
+ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
+ u32 log_level)
+{
+ struct wmi_10_4_dbglog_cfg_cmd *cmd;
+ struct sk_buff *skb;
+ u32 cfg;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_10_4_dbglog_cfg_cmd *)skb->data;
+
+ if (module_enable) {
+ cfg = SM(log_level,
+ ATH10K_DBGLOG_CFG_LOG_LVL);
+ } else {
+ /* set back defaults, all modules with WARN level */
+ cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
+ ATH10K_DBGLOG_CFG_LOG_LVL);
+ module_enable = ~0;
+ }
+
+ cmd->module_enable = __cpu_to_le64(module_enable);
+ cmd->module_valid = __cpu_to_le64(~0);
+ cmd->config_enable = __cpu_to_le32(cfg);
+ cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi dbglog cfg modules 0x%016llx 0x%016llx config %08x %08x\n",
+ __le64_to_cpu(cmd->module_enable),
+ __le64_to_cpu(cmd->module_valid),
+ __le32_to_cpu(cmd->config_enable),
+ __le32_to_cpu(cmd->config_valid));
+ return skb;
+}
+
+static struct sk_buff *
ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
{
struct wmi_pdev_pktlog_enable_cmd *cmd;
@@ -7649,6 +7760,48 @@ ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
return skb;
}
+static struct sk_buff *
+ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value)
+{
+ struct wmi_echo_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_echo_cmd *)skb->data;
+ cmd->value = cpu_to_le32(value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi echo value 0x%08x\n", value);
+ return skb;
+}
+
+int
+ath10k_wmi_barrier(struct ath10k *ar)
+{
+ int ret;
+ int time_left;
+
+ spin_lock_bh(&ar->data_lock);
+ reinit_completion(&ar->wmi.barrier);
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath10k_wmi_echo(ar, ATH10K_WMI_BARRIER_ECHO_ID);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit wmi echo: %d\n", ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->wmi.barrier,
+ ATH10K_WMI_BARRIER_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
static const struct wmi_ops wmi_ops = {
.rx = ath10k_wmi_op_rx,
.map_svc = wmi_main_svc_map,
@@ -7665,6 +7818,7 @@ static const struct wmi_ops wmi_ops = {
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -7709,6 +7863,7 @@ static const struct wmi_ops wmi_ops = {
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+ .gen_echo = ath10k_wmi_op_gen_echo,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@@ -7738,6 +7893,7 @@ static const struct wmi_ops wmi_10_1_ops = {
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -7777,6 +7933,7 @@ static const struct wmi_ops wmi_10_1_ops = {
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+ .gen_echo = ath10k_wmi_op_gen_echo,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@@ -7796,6 +7953,7 @@ static const struct wmi_ops wmi_10_2_ops = {
.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+ .gen_echo = ath10k_wmi_op_gen_echo,
.pull_scan = ath10k_wmi_op_pull_scan_ev,
.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
@@ -7807,6 +7965,7 @@ static const struct wmi_ops wmi_10_2_ops = {
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -7862,6 +8021,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+ .gen_echo = ath10k_wmi_op_gen_echo,
.pull_scan = ath10k_wmi_op_pull_scan_ev,
.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
@@ -7873,6 +8033,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -7968,7 +8129,7 @@ static const struct wmi_ops wmi_10_4_ops = {
.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
- .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+ .gen_dbglog_cfg = ath10k_wmi_10_4_op_gen_dbglog_cfg,
.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
@@ -7980,10 +8141,12 @@ static const struct wmi_ops wmi_10_4_ops = {
.ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
/* shared with 10.2 */
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
.gen_request_stats = ath10k_wmi_op_gen_request_stats,
.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
.get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype,
.gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
+ .gen_echo = ath10k_wmi_op_gen_echo,
};
int ath10k_wmi_attach(struct ath10k *ar)
@@ -8036,6 +8199,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
init_completion(&ar->wmi.service_ready);
init_completion(&ar->wmi.unified_ready);
+ init_completion(&ar->wmi.barrier);
INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 3ef468893b3f..48e04b92e231 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -180,6 +180,7 @@ enum wmi_service {
WMI_SERVICE_MESH_NON_11S,
WMI_SERVICE_PEER_STATS,
WMI_SERVICE_RESTRT_CHNL_SUPPORT,
+ WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
WMI_SERVICE_TX_MODE_PUSH_ONLY,
WMI_SERVICE_TX_MODE_PUSH_PULL,
WMI_SERVICE_TX_MODE_DYNAMIC,
@@ -305,6 +306,7 @@ enum wmi_10_4_service {
WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT,
WMI_10_4_SERVICE_PEER_STATS,
WMI_10_4_SERVICE_MESH_11S,
+ WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
@@ -402,6 +404,7 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_MESH_NON_11S);
SVCSTR(WMI_SERVICE_PEER_STATS);
SVCSTR(WMI_SERVICE_RESTRT_CHNL_SUPPORT);
+ SVCSTR(WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT);
SVCSTR(WMI_SERVICE_TX_MODE_PUSH_ONLY);
SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL);
SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC);
@@ -652,6 +655,8 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_PEER_STATS, len);
SVCMAP(WMI_10_4_SERVICE_MESH_11S,
WMI_SERVICE_MESH_11S, len);
+ SVCMAP(WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+ WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, len);
SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
WMI_SERVICE_TX_MODE_PUSH_ONLY, len);
SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
@@ -6169,6 +6174,20 @@ struct wmi_dbglog_cfg_cmd {
__le32 config_valid;
} __packed;
+struct wmi_10_4_dbglog_cfg_cmd {
+ /* bitmask to hold mod id config*/
+ __le64 module_enable;
+
+ /* see ATH10K_DBGLOG_CFG_ */
+ __le32 config_enable;
+
+ /* mask of module id bits to be changed */
+ __le64 module_valid;
+
+ /* mask of config bits to be changed, see ATH10K_DBGLOG_CFG_ */
+ __le32 config_valid;
+} __packed;
+
enum wmi_roam_reason {
WMI_ROAM_REASON_BETTER_AP = 1,
WMI_ROAM_REASON_BEACON_MISS = 2,
@@ -6296,6 +6315,10 @@ struct wmi_roam_ev_arg {
__le32 rssi;
};
+struct wmi_echo_ev_arg {
+ __le32 value;
+};
+
struct wmi_pdev_temperature_event {
/* temperature value in Celcius degree */
__le32 temperature;
@@ -6624,5 +6647,6 @@ void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
char *buf);
int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
enum wmi_vdev_subtype subtype);
+int ath10k_wmi_barrier(struct ath10k *ar);
#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 929d7ccc031c..4f8d9ed04f5e 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -909,7 +909,7 @@ static int open_file_eeprom(struct inode *inode, struct file *file)
struct ath5k_hw *ah = inode->i_private;
bool res;
int i, ret;
- u32 eesize;
+ u32 eesize; /* NB: in 16-bit words */
u16 val, *buf;
/* Get eeprom size */
@@ -932,7 +932,7 @@ static int open_file_eeprom(struct inode *inode, struct file *file)
/* Create buffer and read in eeprom */
- buf = vmalloc(eesize);
+ buf = vmalloc(eesize * 2);
if (!buf) {
ret = -ENOMEM;
goto err;
@@ -952,7 +952,7 @@ static int open_file_eeprom(struct inode *inode, struct file *file)
}
ep->buf = buf;
- ep->len = i;
+ ep->len = eesize * 2;
file->private_data = (void *)ep;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 72e2ec67768d..b7fe0af4cb24 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1449,14 +1449,14 @@ static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy,
return -EIO;
if (test_bit(CONNECTED, &vif->flags)) {
- ar->tx_pwr = 0;
+ ar->tx_pwr = 255;
if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx) != 0) {
ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n");
return -EIO;
}
- wait_event_interruptible_timeout(ar->event_wq, ar->tx_pwr != 0,
+ wait_event_interruptible_timeout(ar->event_wq, ar->tx_pwr != 255,
5 * HZ);
if (signal_pending(current)) {
diff --git a/drivers/net/wireless/ath/ath6kl/hif.c b/drivers/net/wireless/ath/ath6kl/hif.c
index 18c070850a09..d1942537ea10 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.c
+++ b/drivers/net/wireless/ath/ath6kl/hif.c
@@ -64,7 +64,7 @@ int ath6kl_hif_rw_comp_handler(void *context, int status)
}
EXPORT_SYMBOL(ath6kl_hif_rw_comp_handler);
-#define REG_DUMP_COUNT_AR6003 60
+#define REGISTER_DUMP_COUNT 60
#define REGISTER_DUMP_LEN_MAX 60
static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
@@ -73,9 +73,6 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
u32 i, address, regdump_addr = 0;
int ret;
- if (ar->target_type != TARGET_TYPE_AR6003)
- return;
-
/* the reg dump pointer is copied to the host interest area */
address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
address = TARG_VTOP(ar->target_type, address);
@@ -95,7 +92,7 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
/* fetch register dump data */
ret = ath6kl_diag_read(ar, regdump_addr, (u8 *)&regdump_val[0],
- REG_DUMP_COUNT_AR6003 * (sizeof(u32)));
+ REGISTER_DUMP_COUNT * (sizeof(u32)));
if (ret) {
ath6kl_warn("failed to get register dump: %d\n", ret);
return;
@@ -105,9 +102,9 @@ static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
ath6kl_info("hw 0x%x fw %s\n", ar->wiphy->hw_version,
ar->wiphy->fw_version);
- BUILD_BUG_ON(REG_DUMP_COUNT_AR6003 % 4);
+ BUILD_BUG_ON(REGISTER_DUMP_COUNT % 4);
- for (i = 0; i < REG_DUMP_COUNT_AR6003; i += 4) {
+ for (i = 0; i < REGISTER_DUMP_COUNT; i += 4) {
ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n",
i,
le32_to_cpu(regdump_val[i]),
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 1b271b99c49e..8eea8d22e72e 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -260,8 +260,8 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
int cur_bin;
int upper, lower, cur_vit_mask;
int i;
- int8_t mask_m[123];
- int8_t mask_p[123];
+ int8_t mask_m[123] = {0};
+ int8_t mask_p[123] = {0};
int8_t mask_amt;
int tmp_mask;
static const int pilot_mask_reg[4] = {
@@ -274,9 +274,6 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
};
static const int inc[4] = { 0, 100, 0, 0 };
- memset(&mask_m, 0, sizeof(int8_t) * 123);
- memset(&mask_p, 0, sizeof(int8_t) * 123);
-
cur_bin = -6000;
upper = bin + 100;
lower = bin - 100;
@@ -302,7 +299,7 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
upper = bin + 120;
lower = bin - 120;
- for (i = 0; i < 123; i++) {
+ for (i = 0; i < ARRAY_SIZE(mask_m); i++) {
if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
/* workaround for gcc bug #37014 */
volatile int tmp_v = abs(cur_vit_mask - bin);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 5bd2cbaf582d..08607d7fdb56 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3252,7 +3252,8 @@ static int ar9300_eeprom_restore_flash(struct ath_hw *ah, u8 *mptr,
int i;
for (i = 0; i < mdata_size / 2; i++, data++)
- ath9k_hw_nvram_read(ah, i, data);
+ if (!ath9k_hw_nvram_read(ah, i, data))
+ return -EIO;
return 0;
}
@@ -3282,7 +3283,8 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
if (ath9k_hw_use_flash(ah)) {
u8 txrx;
- ar9300_eeprom_restore_flash(ah, mptr, mdata_size);
+ if (ar9300_eeprom_restore_flash(ah, mptr, mdata_size))
+ return -EIO;
/* check if eeprom contains valid data */
eep = (struct ar9300_eeprom *) mptr;
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 490f74d9ddf0..ddb28861e7fe 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -22,7 +22,7 @@
#ifdef CONFIG_MAC80211_LEDS
-void ath_fill_led_pin(struct ath_softc *sc)
+static void ath_fill_led_pin(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index d1d0c06d627c..14b13f07cd1f 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -2482,6 +2482,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
return -EINVAL;
}
+ ath9k_gpio_cap_init(ah);
+
if (AR_SREV_9485(ah) ||
AR_SREV_9285(ah) ||
AR_SREV_9330(ah) ||
@@ -2531,8 +2533,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
else
pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
- ath9k_gpio_cap_init(ah);
-
if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
else
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index a394622c9022..e9f32b52fc8c 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -718,9 +718,12 @@ static int ath9k_start(struct ieee80211_hw *hw)
if (!ath_complete_reset(sc, false))
ah->reset_power_on = false;
- if (ah->led_pin >= 0)
+ if (ah->led_pin >= 0) {
ath9k_hw_set_gpio(ah, ah->led_pin,
(ah->config.led_active_high) ? 1 : 0);
+ ath9k_hw_gpio_request_out(ah, ah->led_pin, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ }
/*
* Reset key cache to sane defaults (all entries cleared) instead of
@@ -864,9 +867,11 @@ static void ath9k_stop(struct ieee80211_hw *hw)
spin_lock_bh(&sc->sc_pcu_lock);
- if (ah->led_pin >= 0)
+ if (ah->led_pin >= 0) {
ath9k_hw_set_gpio(ah, ah->led_pin,
(ah->config.led_active_high) ? 0 : 1);
+ ath9k_hw_gpio_request_in(ah, ah->led_pin, NULL);
+ }
ath_prepare_reset(sc);
@@ -919,7 +924,7 @@ static void ath9k_vif_iter_set_beacon(struct ath9k_vif_iter_data *iter_data,
} else {
if (iter_data->primary_beacon_vif->type != NL80211_IFTYPE_AP &&
vif->type == NL80211_IFTYPE_AP)
- iter_data->primary_beacon_vif = vif;
+ iter_data->primary_beacon_vif = vif;
}
iter_data->beacons = true;
@@ -1154,6 +1159,7 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
bool changed = (iter_data.primary_sta != ctx->primary_sta);
if (iter_data.primary_sta) {
+ iter_data.primary_beacon_vif = iter_data.primary_sta;
iter_data.beacons = true;
ath9k_set_assoc_state(sc, iter_data.primary_sta,
changed);
@@ -1563,13 +1569,13 @@ static int ath9k_sta_state(struct ieee80211_hw *hw,
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
int ret = 0;
- if (old_state == IEEE80211_STA_AUTH &&
- new_state == IEEE80211_STA_ASSOC) {
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
ret = ath9k_sta_add(hw, vif, sta);
ath_dbg(common, CONFIG,
"Add station: %pM\n", sta->addr);
- } else if (old_state == IEEE80211_STA_ASSOC &&
- new_state == IEEE80211_STA_AUTH) {
+ } else if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST) {
ret = ath9k_sta_remove(hw, vif, sta);
ath_dbg(common, CONFIG,
"Remove station: %pM\n", sta->addr);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 8ddd604bd00c..52bfbb988611 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -50,9 +50,11 @@ static u16 bits_per_symbol[][2] = {
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid, struct sk_buff *skb);
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
- int tx_flags, struct ath_txq *txq);
+ int tx_flags, struct ath_txq *txq,
+ struct ieee80211_sta *sta);
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, struct list_head *bf_q,
+ struct ieee80211_sta *sta,
struct ath_tx_status *ts, int txok);
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *head, bool internal);
@@ -77,6 +79,22 @@ enum {
/* Aggregation logic */
/*********************/
+static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = info->status.status_driver_data[0];
+
+ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
+ ieee80211_tx_status(hw, skb);
+ return;
+ }
+
+ if (sta)
+ ieee80211_tx_status_noskb(hw, sta, info);
+
+ dev_kfree_skb(skb);
+}
+
void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
__acquires(&txq->axq_lock)
{
@@ -92,6 +110,7 @@ void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
__releases(&txq->axq_lock)
{
+ struct ieee80211_hw *hw = sc->hw;
struct sk_buff_head q;
struct sk_buff *skb;
@@ -100,7 +119,7 @@ void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
spin_unlock_bh(&txq->axq_lock);
while ((skb = __skb_dequeue(&q)))
- ieee80211_tx_status(sc->hw, skb);
+ ath_tx_status(hw, skb);
}
static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
@@ -253,7 +272,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
}
list_add_tail(&bf->list, &bf_head);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
}
if (sendbar) {
@@ -318,12 +337,12 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
bf = fi->bf;
if (!bf) {
- ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
+ ath_tx_complete(sc, skb, ATH_TX_ERROR, txq, NULL);
continue;
}
list_add_tail(&bf->list, &bf_head);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
}
}
@@ -426,15 +445,14 @@ static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf, struct list_head *bf_q,
+ struct ieee80211_sta *sta,
+ struct ath_atx_tid *tid,
struct ath_tx_status *ts, int txok)
{
struct ath_node *an = NULL;
struct sk_buff *skb;
- struct ieee80211_sta *sta;
- struct ieee80211_hw *hw = sc->hw;
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *tx_info;
- struct ath_atx_tid *tid = NULL;
struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
struct list_head bf_head;
struct sk_buff_head bf_pending;
@@ -460,12 +478,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
for (i = 0; i < ts->ts_rateindex; i++)
retries += rates[i].count;
- rcu_read_lock();
-
- sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
if (!sta) {
- rcu_read_unlock();
-
INIT_LIST_HEAD(&bf_head);
while (bf) {
bf_next = bf->bf_next;
@@ -473,7 +486,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (!bf->bf_state.stale || bf_next != NULL)
list_move_tail(&bf->list, &bf_head);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, ts, 0);
bf = bf_next;
}
@@ -481,7 +494,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
}
an = (struct ath_node *)sta->drv_priv;
- tid = ath_get_skb_tid(sc, an, skb);
seq_first = tid->seq_start;
isba = ts->ts_flags & ATH9K_TX_BA;
@@ -583,7 +595,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ts);
}
- ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, sta, ts,
!txfail);
} else {
if (tx_info->flags & IEEE80211_TX_STATUS_EOSP) {
@@ -604,7 +616,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_update_baw(sc, tid, seqno);
ath_tx_complete_buf(sc, bf, txq,
- &bf_head, ts, 0);
+ &bf_head, NULL, ts,
+ 0);
bar_index = max_t(int, bar_index,
ATH_BA_INDEX(seq_first, seqno));
break;
@@ -648,8 +661,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ath_txq_lock(sc, txq);
}
- rcu_read_unlock();
-
if (needreset)
ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
}
@@ -664,7 +675,11 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
struct ath_tx_status *ts, struct ath_buf *bf,
struct list_head *bf_head)
{
+ struct ieee80211_hw *hw = sc->hw;
struct ieee80211_tx_info *info;
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr;
+ struct ath_atx_tid *tid = NULL;
bool txok, flush;
txok = !(ts->ts_status & ATH9K_TXERR_MASK);
@@ -677,6 +692,16 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
ts->duration = ath9k_hw_get_duration(sc->sc_ah, bf->bf_desc,
ts->ts_rateindex);
+
+ hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
+ sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
+ if (sta) {
+ struct ath_node *an = (struct ath_node *)sta->drv_priv;
+ tid = ath_get_skb_tid(sc, an, bf->bf_mpdu);
+ if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
+ tid->clear_ps_filter = true;
+ }
+
if (!bf_isampdu(bf)) {
if (!flush) {
info = IEEE80211_SKB_CB(bf->bf_mpdu);
@@ -685,9 +710,9 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts);
}
- ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
+ ath_tx_complete_buf(sc, bf, txq, bf_head, sta, ts, txok);
} else
- ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);
+ ath_tx_complete_aggr(sc, txq, bf, bf_head, sta, tid, ts, txok);
if (!flush)
ath_txq_schedule(sc, txq);
@@ -923,7 +948,7 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
list_add(&bf->list, &bf_head);
__skb_unlink(skb, *q);
ath_tx_update_baw(sc, tid, seqno);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
continue;
}
@@ -1832,6 +1857,7 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
*/
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
{
+ rcu_read_lock();
ath_txq_lock(sc, txq);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
@@ -1850,6 +1876,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq)
ath_drain_txq_list(sc, txq, &txq->axq_q);
ath_txq_unlock_complete(sc, txq);
+ rcu_read_unlock();
}
bool ath_drain_all_txq(struct ath_softc *sc)
@@ -2472,7 +2499,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
/*****************/
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
- int tx_flags, struct ath_txq *txq)
+ int tx_flags, struct ath_txq *txq,
+ struct ieee80211_sta *sta)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -2492,15 +2520,17 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
tx_info->flags |= IEEE80211_TX_STAT_ACK;
}
- padpos = ieee80211_hdrlen(hdr->frame_control);
- padsize = padpos & 3;
- if (padsize && skb->len>padpos+padsize) {
- /*
- * Remove MAC header padding before giving the frame back to
- * mac80211.
- */
- memmove(skb->data + padsize, skb->data, padpos);
- skb_pull(skb, padsize);
+ if (tx_info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
+ padpos = ieee80211_hdrlen(hdr->frame_control);
+ padsize = padpos & 3;
+ if (padsize && skb->len>padpos+padsize) {
+ /*
+ * Remove MAC header padding before giving the frame back to
+ * mac80211.
+ */
+ memmove(skb->data + padsize, skb->data, padpos);
+ skb_pull(skb, padsize);
+ }
}
spin_lock_irqsave(&sc->sc_pm_lock, flags);
@@ -2515,12 +2545,14 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
}
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
- __skb_queue_tail(&txq->complete_q, skb);
ath_txq_skb_done(sc, txq, skb);
+ tx_info->status.status_driver_data[0] = sta;
+ __skb_queue_tail(&txq->complete_q, skb);
}
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, struct list_head *bf_q,
+ struct ieee80211_sta *sta,
struct ath_tx_status *ts, int txok)
{
struct sk_buff *skb = bf->bf_mpdu;
@@ -2548,7 +2580,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
complete(&sc->paprd_complete);
} else {
ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
- ath_tx_complete(sc, skb, tx_flags, txq);
+ ath_tx_complete(sc, skb, tx_flags, txq, sta);
}
skip_tx_complete:
/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
@@ -2700,10 +2732,12 @@ void ath_tx_tasklet(struct ath_softc *sc)
u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1) & ah->intr_txqs;
int i;
+ rcu_read_lock();
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
ath_tx_processq(sc, &sc->tx.txq[i]);
}
+ rcu_read_unlock();
}
void ath_tx_edma_tasklet(struct ath_softc *sc)
@@ -2717,6 +2751,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
struct list_head *fifo_list;
int status;
+ rcu_read_lock();
for (;;) {
if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
break;
@@ -2787,6 +2822,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
ath_txq_unlock_complete(sc, txq);
}
+ rcu_read_unlock();
}
/*****************/
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 76842e6ca38e..99ab20334d21 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -670,6 +670,7 @@ int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
ar->readlen = outlen;
spin_unlock_bh(&ar->cmd_lock);
+ reinit_completion(&ar->cmd_wait);
err = __carl9170_exec_cmd(ar, &ar->cmd, false);
if (!(cmd & CARL9170_CMD_ASYNC_FLAG)) {
@@ -778,10 +779,7 @@ void carl9170_usb_stop(struct ar9170 *ar)
spin_lock_bh(&ar->cmd_lock);
ar->readlen = 0;
spin_unlock_bh(&ar->cmd_lock);
- complete_all(&ar->cmd_wait);
-
- /* This is required to prevent an early completion on _start */
- reinit_completion(&ar->cmd_wait);
+ complete(&ar->cmd_wait);
/*
* Note:
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index 2f8136d50f78..4100ffd42a43 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -338,7 +338,7 @@ static bool dpd_set_domain(struct dfs_pattern_detector *dpd,
return true;
}
-static struct dfs_pattern_detector default_dpd = {
+static const struct dfs_pattern_detector default_dpd = {
.exit = dpd_exit,
.set_dfs_domain = dpd_set_domain,
.add_pulse = dpd_add_pulse,
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index f0e1175fb76a..d117240d9a73 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -354,10 +354,13 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
wil_dbg_misc(wil, "%s(), wdev=0x%p iftype=%d\n",
__func__, wdev, wdev->iftype);
+ mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
wil_err(wil, "Already scanning\n");
+ mutex_unlock(&wil->p2p_wdev_mutex);
return -EAGAIN;
}
+ mutex_unlock(&wil->p2p_wdev_mutex);
/* check we are client side */
switch (wdev->iftype) {
@@ -760,14 +763,11 @@ static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
return rc;
}
-static struct wil_tid_crypto_rx_single *
-wil_find_crypto_ctx(struct wil6210_priv *wil, u8 key_index,
- enum wmi_key_usage key_usage, const u8 *mac_addr)
+static struct wil_sta_info *
+wil_find_sta_by_key_usage(struct wil6210_priv *wil,
+ enum wmi_key_usage key_usage, const u8 *mac_addr)
{
int cid = -EINVAL;
- int tid = 0;
- struct wil_sta_info *s;
- struct wil_tid_crypto_rx *c;
if (key_usage == WMI_KEY_USE_TX_GROUP)
return NULL; /* not needed */
@@ -778,18 +778,72 @@ wil_find_crypto_ctx(struct wil6210_priv *wil, u8 key_index,
else if (key_usage == WMI_KEY_USE_RX_GROUP)
cid = wil_find_cid_by_idx(wil, 0);
if (cid < 0) {
- wil_err(wil, "No CID for %pM %s[%d]\n", mac_addr,
- key_usage_str[key_usage], key_index);
+ wil_err(wil, "No CID for %pM %s\n", mac_addr,
+ key_usage_str[key_usage]);
return ERR_PTR(cid);
}
- s = &wil->sta[cid];
- if (key_usage == WMI_KEY_USE_PAIRWISE)
- c = &s->tid_crypto_rx[tid];
- else
- c = &s->group_crypto_rx;
+ return &wil->sta[cid];
+}
+
+static void wil_set_crypto_rx(u8 key_index, enum wmi_key_usage key_usage,
+ struct wil_sta_info *cs,
+ struct key_params *params)
+{
+ struct wil_tid_crypto_rx_single *cc;
+ int tid;
+
+ if (!cs)
+ return;
- return &c->key_id[key_index];
+ switch (key_usage) {
+ case WMI_KEY_USE_PAIRWISE:
+ for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
+ cc = &cs->tid_crypto_rx[tid].key_id[key_index];
+ if (params->seq)
+ memcpy(cc->pn, params->seq,
+ IEEE80211_GCMP_PN_LEN);
+ else
+ memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
+ cc->key_set = true;
+ }
+ break;
+ case WMI_KEY_USE_RX_GROUP:
+ cc = &cs->group_crypto_rx.key_id[key_index];
+ if (params->seq)
+ memcpy(cc->pn, params->seq, IEEE80211_GCMP_PN_LEN);
+ else
+ memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
+ cc->key_set = true;
+ break;
+ default:
+ break;
+ }
+}
+
+static void wil_del_rx_key(u8 key_index, enum wmi_key_usage key_usage,
+ struct wil_sta_info *cs)
+{
+ struct wil_tid_crypto_rx_single *cc;
+ int tid;
+
+ if (!cs)
+ return;
+
+ switch (key_usage) {
+ case WMI_KEY_USE_PAIRWISE:
+ for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
+ cc = &cs->tid_crypto_rx[tid].key_id[key_index];
+ cc->key_set = false;
+ }
+ break;
+ case WMI_KEY_USE_RX_GROUP:
+ cc = &cs->group_crypto_rx.key_id[key_index];
+ cc->key_set = false;
+ break;
+ default:
+ break;
+ }
}
static int wil_cfg80211_add_key(struct wiphy *wiphy,
@@ -801,24 +855,26 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
- struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
- key_index,
- key_usage,
- mac_addr);
+ struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, key_usage,
+ mac_addr);
+
+ if (!params) {
+ wil_err(wil, "NULL params\n");
+ return -EINVAL;
+ }
wil_dbg_misc(wil, "%s(%pM %s[%d] PN %*phN)\n", __func__,
mac_addr, key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
- if (IS_ERR(cc)) {
+ if (IS_ERR(cs)) {
wil_err(wil, "Not connected, %s(%pM %s[%d] PN %*phN)\n",
__func__, mac_addr, key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
return -EINVAL;
}
- if (cc)
- cc->key_set = false;
+ wil_del_rx_key(key_index, key_usage, cs);
if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) {
wil_err(wil,
@@ -831,13 +887,8 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
rc = wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len,
params->key, key_usage);
- if ((rc == 0) && cc) {
- if (params->seq)
- memcpy(cc->pn, params->seq, IEEE80211_GCMP_PN_LEN);
- else
- memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
- cc->key_set = true;
- }
+ if (!rc)
+ wil_set_crypto_rx(key_index, key_usage, cs, params);
return rc;
}
@@ -849,20 +900,18 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
- struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
- key_index,
- key_usage,
- mac_addr);
+ struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, key_usage,
+ mac_addr);
wil_dbg_misc(wil, "%s(%pM %s[%d])\n", __func__, mac_addr,
key_usage_str[key_usage], key_index);
- if (IS_ERR(cc))
+ if (IS_ERR(cs))
wil_info(wil, "Not connected, %s(%pM %s[%d])\n", __func__,
mac_addr, key_usage_str[key_usage], key_index);
- if (!IS_ERR_OR_NULL(cc))
- cc->key_set = false;
+ if (!IS_ERR_OR_NULL(cs))
+ wil_del_rx_key(key_index, key_usage, cs);
return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage);
}
@@ -1363,23 +1412,16 @@ static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
struct wireless_dev *wdev)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- u8 started;
+ struct wil_p2p_info *p2p = &wil->p2p;
+
+ if (!p2p->p2p_dev_started)
+ return;
wil_dbg_misc(wil, "%s: entered\n", __func__);
mutex_lock(&wil->mutex);
- started = wil_p2p_stop_discovery(wil);
- if (started && wil->scan_request) {
- struct cfg80211_scan_info info = {
- .aborted = true,
- };
-
- cfg80211_scan_done(wil->scan_request, &info);
- wil->scan_request = NULL;
- wil->radio_wdev = wil->wdev;
- }
+ wil_p2p_stop_radio_operations(wil);
+ p2p->p2p_dev_started = 0;
mutex_unlock(&wil->mutex);
-
- wil->p2p.p2p_dev_started = 0;
}
static struct cfg80211_ops wil_cfg80211_ops = {
@@ -1464,14 +1506,8 @@ struct wireless_dev *wil_cfg80211_init(struct device *dev)
set_wiphy_dev(wdev->wiphy, dev);
wil_wiphy_init(wdev->wiphy);
- rc = wiphy_register(wdev->wiphy);
- if (rc < 0)
- goto out_failed_reg;
-
return wdev;
-out_failed_reg:
- wiphy_free(wdev->wiphy);
out:
kfree(wdev);
@@ -1487,7 +1523,6 @@ void wil_wdev_free(struct wil6210_priv *wil)
if (!wdev)
return;
- wiphy_unregister(wdev->wiphy);
wiphy_free(wdev->wiphy);
kfree(wdev);
}
@@ -1498,11 +1533,11 @@ void wil_p2p_wdev_free(struct wil6210_priv *wil)
mutex_lock(&wil->p2p_wdev_mutex);
p2p_wdev = wil->p2p_wdev;
+ wil->p2p_wdev = NULL;
+ wil->radio_wdev = wil_to_wdev(wil);
+ mutex_unlock(&wil->p2p_wdev_mutex);
if (p2p_wdev) {
- wil->p2p_wdev = NULL;
- wil->radio_wdev = wil_to_wdev(wil);
cfg80211_unregister_wdev(p2p_wdev);
kfree(p2p_wdev);
}
- mutex_unlock(&wil->p2p_wdev_mutex);
}
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index a8098b406cc0..5e4058a4037b 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1553,6 +1553,56 @@ static const struct file_operations fops_led_blink_time = {
.open = simple_open,
};
+/*---------FW capabilities------------*/
+static int wil_fw_capabilities_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+
+ seq_printf(s, "fw_capabilities : %*pb\n", WMI_FW_CAPABILITY_MAX,
+ wil->fw_capabilities);
+
+ return 0;
+}
+
+static int wil_fw_capabilities_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_fw_capabilities_debugfs_show,
+ inode->i_private);
+}
+
+static const struct file_operations fops_fw_capabilities = {
+ .open = wil_fw_capabilities_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+/*---------FW version------------*/
+static int wil_fw_version_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+
+ if (wil->fw_version[0])
+ seq_printf(s, "%s\n", wil->fw_version);
+ else
+ seq_puts(s, "N/A\n");
+
+ return 0;
+}
+
+static int wil_fw_version_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_fw_version_debugfs_show,
+ inode->i_private);
+}
+
+static const struct file_operations fops_fw_version = {
+ .open = wil_fw_version_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
/*----------------*/
static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
struct dentry *dbg)
@@ -1603,6 +1653,8 @@ static const struct {
{"recovery", S_IRUGO | S_IWUSR, &fops_recovery},
{"led_cfg", S_IRUGO | S_IWUSR, &fops_led_cfg},
{"led_blink_time", S_IRUGO | S_IWUSR, &fops_led_blink_time},
+ {"fw_capabilities", S_IRUGO, &fops_fw_capabilities},
+ {"fw_version", S_IRUGO, &fops_fw_version},
};
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@@ -1643,7 +1695,6 @@ static void wil6210_debugfs_init_isr(struct wil6210_priv *wil,
static const struct dbg_off dbg_wil_off[] = {
WIL_FIELD(privacy, S_IRUGO, doff_u32),
WIL_FIELD(status[0], S_IRUGO | S_IWUSR, doff_ulong),
- WIL_FIELD(fw_version, S_IRUGO, doff_u32),
WIL_FIELD(hw_version, S_IRUGO, doff_x32),
WIL_FIELD(recovery_count, S_IRUGO, doff_u32),
WIL_FIELD(ap_isolate, S_IRUGO, doff_u32),
diff --git a/drivers/net/wireless/ath/wil6210/fw.h b/drivers/net/wireless/ath/wil6210/fw.h
index 7a2c6c129ad5..2f2b910501ba 100644
--- a/drivers/net/wireless/ath/wil6210/fw.h
+++ b/drivers/net/wireless/ath/wil6210/fw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014,2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -58,6 +58,15 @@ struct wil_fw_record_comment { /* type == wil_fw_type_comment */
u8 data[0]; /* free-form data [data_size], see above */
} __packed;
+/* FW capabilities encoded inside a comment record */
+#define WIL_FW_CAPABILITIES_MAGIC (0xabcddcba)
+struct wil_fw_record_capabilities { /* type == wil_fw_type_comment */
+ /* identifies capabilities record */
+ __le32 magic;
+ /* capabilities (variable size), see enum wmi_fw_capability */
+ u8 capabilities[0];
+};
+
/* perform action
* data_size = @head.size - offsetof(struct wil_fw_record_action, data)
*/
@@ -93,6 +102,9 @@ struct wil_fw_record_verify { /* type == wil_fw_verify */
/* file header
* First record of every file
*/
+/* the FW version prefix in the comment */
+#define WIL_FW_VERSION_PREFIX "FW version: "
+#define WIL_FW_VERSION_PREFIX_LEN (sizeof(WIL_FW_VERSION_PREFIX) - 1)
struct wil_fw_record_file_header {
__le32 signature ; /* Wilocity signature */
__le32 reserved;
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index d30657ee7e83..8f40eb301924 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -118,6 +118,12 @@ static int wil_fw_verify(struct wil6210_priv *wil, const u8 *data, size_t size)
return (int)dlen;
}
+static int fw_ignore_section(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ return 0;
+}
+
static int fw_handle_comment(struct wil6210_priv *wil, const void *data,
size_t size)
{
@@ -126,6 +132,27 @@ static int fw_handle_comment(struct wil6210_priv *wil, const void *data,
return 0;
}
+static int
+fw_handle_capabilities(struct wil6210_priv *wil, const void *data,
+ size_t size)
+{
+ const struct wil_fw_record_capabilities *rec = data;
+ size_t capa_size;
+
+ if (size < sizeof(*rec) ||
+ le32_to_cpu(rec->magic) != WIL_FW_CAPABILITIES_MAGIC)
+ return 0;
+
+ capa_size = size - offsetof(struct wil_fw_record_capabilities,
+ capabilities);
+ bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
+ memcpy(wil->fw_capabilities, rec->capabilities,
+ min(sizeof(wil->fw_capabilities), capa_size));
+ wil_hex_dump_fw("CAPA", DUMP_PREFIX_OFFSET, 16, 1,
+ rec->capabilities, capa_size, false);
+ return 0;
+}
+
static int fw_handle_data(struct wil6210_priv *wil, const void *data,
size_t size)
{
@@ -196,6 +223,13 @@ static int fw_handle_file_header(struct wil6210_priv *wil, const void *data,
wil_hex_dump_fw("", DUMP_PREFIX_OFFSET, 16, 1, d->comment,
sizeof(d->comment), true);
+ if (!memcmp(d->comment, WIL_FW_VERSION_PREFIX,
+ WIL_FW_VERSION_PREFIX_LEN))
+ memcpy(wil->fw_version,
+ d->comment + WIL_FW_VERSION_PREFIX_LEN,
+ min(sizeof(d->comment) - WIL_FW_VERSION_PREFIX_LEN,
+ sizeof(wil->fw_version) - 1));
+
return 0;
}
@@ -383,42 +417,51 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data,
static const struct {
int type;
- int (*handler)(struct wil6210_priv *wil, const void *data, size_t size);
+ int (*load_handler)(struct wil6210_priv *wil, const void *data,
+ size_t size);
+ int (*parse_handler)(struct wil6210_priv *wil, const void *data,
+ size_t size);
} wil_fw_handlers[] = {
- {wil_fw_type_comment, fw_handle_comment},
- {wil_fw_type_data, fw_handle_data},
- {wil_fw_type_fill, fw_handle_fill},
+ {wil_fw_type_comment, fw_handle_comment, fw_handle_capabilities},
+ {wil_fw_type_data, fw_handle_data, fw_ignore_section},
+ {wil_fw_type_fill, fw_handle_fill, fw_ignore_section},
/* wil_fw_type_action */
/* wil_fw_type_verify */
- {wil_fw_type_file_header, fw_handle_file_header},
- {wil_fw_type_direct_write, fw_handle_direct_write},
- {wil_fw_type_gateway_data, fw_handle_gateway_data},
- {wil_fw_type_gateway_data4, fw_handle_gateway_data4},
+ {wil_fw_type_file_header, fw_handle_file_header,
+ fw_handle_file_header},
+ {wil_fw_type_direct_write, fw_handle_direct_write, fw_ignore_section},
+ {wil_fw_type_gateway_data, fw_handle_gateway_data, fw_ignore_section},
+ {wil_fw_type_gateway_data4, fw_handle_gateway_data4,
+ fw_ignore_section},
};
static int wil_fw_handle_record(struct wil6210_priv *wil, int type,
- const void *data, size_t size)
+ const void *data, size_t size, bool load)
{
int i;
- for (i = 0; i < ARRAY_SIZE(wil_fw_handlers); i++) {
+ for (i = 0; i < ARRAY_SIZE(wil_fw_handlers); i++)
if (wil_fw_handlers[i].type == type)
- return wil_fw_handlers[i].handler(wil, data, size);
- }
+ return load ?
+ wil_fw_handlers[i].load_handler(
+ wil, data, size) :
+ wil_fw_handlers[i].parse_handler(
+ wil, data, size);
wil_err_fw(wil, "unknown record type: %d\n", type);
return -EINVAL;
}
/**
- * wil_fw_load - load FW into device
- *
- * Load the FW and uCode code and data to the corresponding device
- * memory regions
+ * wil_fw_process - process section from FW file
+ * if load is true: Load the FW and uCode code and data to the
+ * corresponding device memory regions,
+ * otherwise only parse and look for capabilities
*
* Return error code
*/
-static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
+static int wil_fw_process(struct wil6210_priv *wil, const void *data,
+ size_t size, bool load)
{
int rc = 0;
const struct wil_fw_record_head *hdr;
@@ -437,7 +480,7 @@ static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
return -EINVAL;
}
rc = wil_fw_handle_record(wil, le16_to_cpu(hdr->type),
- &hdr[1], hdr_sz);
+ &hdr[1], hdr_sz, load);
if (rc)
return rc;
}
@@ -456,13 +499,16 @@ static int wil_fw_load(struct wil6210_priv *wil, const void *data, size_t size)
}
/**
- * wil_request_firmware - Request firmware and load to device
+ * wil_request_firmware - Request firmware
*
- * Request firmware image from the file and load it to device
+ * Request firmware image from the file
+ * If load is true, load firmware to device, otherwise
+ * only parse and extract capabilities
*
* Return error code
*/
-int wil_request_firmware(struct wil6210_priv *wil, const char *name)
+int wil_request_firmware(struct wil6210_priv *wil, const char *name,
+ bool load)
{
int rc, rc1;
const struct firmware *fw;
@@ -482,7 +528,7 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name)
rc = rc1;
goto out;
}
- rc = wil_fw_load(wil, d, rc1);
+ rc = wil_fw_process(wil, d, rc1, load);
if (rc < 0)
goto out;
}
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 011e7412dcc0..64046e0bd0a2 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -101,7 +101,7 @@ static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
mask_halp ? WIL6210_IRQ_DISABLE : WIL6210_IRQ_DISABLE_NO_HALP);
}
-static void wil6210_mask_halp(struct wil6210_priv *wil)
+void wil6210_mask_halp(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "%s()\n", __func__);
@@ -503,6 +503,13 @@ static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
offsetof(struct RGF_ICR, ICR));
u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
offsetof(struct RGF_ICR, IMV));
+
+ /* HALP interrupt can be unmasked when misc interrupts are
+ * masked
+ */
+ if (icr_misc & BIT_DMA_EP_MISC_ICR_HALP)
+ return 0;
+
wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
"Rx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
"Tx icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
@@ -592,7 +599,7 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
void wil6210_set_halp(struct wil6210_priv *wil)
{
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "%s()\n", __func__);
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICS),
BIT_DMA_EP_MISC_ICR_HALP);
@@ -600,7 +607,7 @@ void wil6210_set_halp(struct wil6210_priv *wil)
void wil6210_clear_halp(struct wil6210_priv *wil)
{
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "%s()\n", __func__);
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICR),
BIT_DMA_EP_MISC_ICR_HALP);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 4bc92e54984a..e7130b54d1d8 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -232,6 +232,9 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
struct net_device *ndev = wil_to_ndev(wil);
struct wireless_dev *wdev = wil->wdev;
+ if (unlikely(!ndev))
+ return;
+
might_sleep();
wil_info(wil, "%s(bssid=%pM, reason=%d, ev%s)\n", __func__, bssid,
reason_code, from_event ? "+" : "-");
@@ -849,6 +852,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
bitmap_zero(wil->status, wil_status_last);
mutex_unlock(&wil->wmi_mutex);
+ mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
struct cfg80211_scan_info info = {
.aborted = true,
@@ -860,6 +864,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
cfg80211_scan_done(wil->scan_request, &info);
wil->scan_request = NULL;
}
+ mutex_unlock(&wil->p2p_wdev_mutex);
wil_mask_irq(wil);
@@ -888,11 +893,12 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
WIL_FW2_NAME);
wil_halt_cpu(wil);
+ memset(wil->fw_version, 0, sizeof(wil->fw_version));
/* Loading f/w from the file */
- rc = wil_request_firmware(wil, WIL_FW_NAME);
+ rc = wil_request_firmware(wil, WIL_FW_NAME, true);
if (rc)
return rc;
- rc = wil_request_firmware(wil, WIL_FW2_NAME);
+ rc = wil_request_firmware(wil, WIL_FW2_NAME, true);
if (rc)
return rc;
@@ -1035,10 +1041,10 @@ int wil_up(struct wil6210_priv *wil)
int __wil_down(struct wil6210_priv *wil)
{
- int rc;
-
WARN_ON(!mutex_is_locked(&wil->mutex));
+ set_bit(wil_status_resetting, wil->status);
+
if (wil->platform_ops.bus_request)
wil->platform_ops.bus_request(wil->platform_handle, 0);
@@ -1050,8 +1056,9 @@ int __wil_down(struct wil6210_priv *wil)
}
wil_enable_irq(wil);
- (void)wil_p2p_stop_discovery(wil);
+ wil_p2p_stop_radio_operations(wil);
+ mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
struct cfg80211_scan_info info = {
.aborted = true,
@@ -1063,18 +1070,7 @@ int __wil_down(struct wil6210_priv *wil)
cfg80211_scan_done(wil->scan_request, &info);
wil->scan_request = NULL;
}
-
- if (test_bit(wil_status_fwconnected, wil->status) ||
- test_bit(wil_status_fwconnecting, wil->status)) {
-
- mutex_unlock(&wil->mutex);
- rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0,
- WMI_DISCONNECT_EVENTID, NULL, 0,
- WIL6210_DISCONNECT_TO_MS);
- mutex_lock(&wil->mutex);
- if (rc)
- wil_err(wil, "timeout waiting for disconnect\n");
- }
+ mutex_unlock(&wil->p2p_wdev_mutex);
wil_reset(wil, false);
@@ -1118,23 +1114,26 @@ void wil_halp_vote(struct wil6210_priv *wil)
mutex_lock(&wil->halp.lock);
- wil_dbg_misc(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
- wil->halp.ref_cnt);
+ wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
+ wil->halp.ref_cnt);
if (++wil->halp.ref_cnt == 1) {
wil6210_set_halp(wil);
rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies);
- if (!rc)
+ if (!rc) {
wil_err(wil, "%s: HALP vote timed out\n", __func__);
- else
- wil_dbg_misc(wil,
- "%s: HALP vote completed after %d ms\n",
- __func__,
- jiffies_to_msecs(to_jiffies - rc));
+ /* Mask HALP as done in case the interrupt is raised */
+ wil6210_mask_halp(wil);
+ } else {
+ wil_dbg_irq(wil,
+ "%s: HALP vote completed after %d ms\n",
+ __func__,
+ jiffies_to_msecs(to_jiffies - rc));
+ }
}
- wil_dbg_misc(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
- wil->halp.ref_cnt);
+ wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
+ wil->halp.ref_cnt);
mutex_unlock(&wil->halp.lock);
}
@@ -1145,16 +1144,16 @@ void wil_halp_unvote(struct wil6210_priv *wil)
mutex_lock(&wil->halp.lock);
- wil_dbg_misc(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
- wil->halp.ref_cnt);
+ wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
+ wil->halp.ref_cnt);
if (--wil->halp.ref_cnt == 0) {
wil6210_clear_halp(wil);
- wil_dbg_misc(wil, "%s: HALP unvote\n", __func__);
+ wil_dbg_irq(wil, "%s: HALP unvote\n", __func__);
}
- wil_dbg_misc(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
- wil->halp.ref_cnt);
+ wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
+ wil->halp.ref_cnt);
mutex_unlock(&wil->halp.lock);
}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 098409753d5b..61de5e9f8ef0 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -179,13 +179,6 @@ void *wil_if_alloc(struct device *dev)
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
wdev->netdev = ndev;
- netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
- WIL6210_NAPI_BUDGET);
- netif_tx_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx,
- WIL6210_NAPI_BUDGET);
-
- netif_tx_stop_all_queues(ndev);
-
return wil;
out_priv:
@@ -216,25 +209,48 @@ void wil_if_free(struct wil6210_priv *wil)
int wil_if_add(struct wil6210_priv *wil)
{
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+ struct wiphy *wiphy = wdev->wiphy;
struct net_device *ndev = wil_to_ndev(wil);
int rc;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "entered");
+
+ strlcpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version));
+
+ rc = wiphy_register(wiphy);
+ if (rc < 0) {
+ wil_err(wil, "failed to register wiphy, err %d\n", rc);
+ return rc;
+ }
+
+ netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
+ WIL6210_NAPI_BUDGET);
+ netif_tx_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx,
+ WIL6210_NAPI_BUDGET);
+
+ netif_tx_stop_all_queues(ndev);
rc = register_netdev(ndev);
if (rc < 0) {
dev_err(&ndev->dev, "Failed to register netdev: %d\n", rc);
- return rc;
+ goto out_wiphy;
}
return 0;
+
+out_wiphy:
+ wiphy_unregister(wdev->wiphy);
+ return rc;
}
void wil_if_remove(struct wil6210_priv *wil)
{
struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *wdev = wil_to_wdev(wil);
wil_dbg_misc(wil, "%s()\n", __func__);
unregister_netdev(ndev);
+ wiphy_unregister(wdev->wiphy);
}
diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c
index e0f8aa0ebfac..4087785d3090 100644
--- a/drivers/net/wireless/ath/wil6210/p2p.c
+++ b/drivers/net/wireless/ath/wil6210/p2p.c
@@ -263,3 +263,49 @@ void wil_p2p_search_expired(struct work_struct *work)
mutex_unlock(&wil->p2p_wdev_mutex);
}
}
+
+void wil_p2p_stop_radio_operations(struct wil6210_priv *wil)
+{
+ struct wil_p2p_info *p2p = &wil->p2p;
+ struct cfg80211_scan_info info = {
+ .aborted = true,
+ };
+
+ lockdep_assert_held(&wil->mutex);
+
+ mutex_lock(&wil->p2p_wdev_mutex);
+
+ if (wil->radio_wdev != wil->p2p_wdev)
+ goto out;
+
+ if (!p2p->discovery_started) {
+ /* Regular scan on the p2p device */
+ if (wil->scan_request &&
+ wil->scan_request->wdev == wil->p2p_wdev) {
+ cfg80211_scan_done(wil->scan_request, &info);
+ wil->scan_request = NULL;
+ }
+ goto out;
+ }
+
+ /* Search or listen on p2p device */
+ mutex_unlock(&wil->p2p_wdev_mutex);
+ wil_p2p_stop_discovery(wil);
+ mutex_lock(&wil->p2p_wdev_mutex);
+
+ if (wil->scan_request) {
+ /* search */
+ cfg80211_scan_done(wil->scan_request, &info);
+ wil->scan_request = NULL;
+ } else {
+ /* listen */
+ cfg80211_remain_on_channel_expired(wil->radio_wdev,
+ p2p->cookie,
+ &p2p->listen_chan,
+ GFP_KERNEL);
+ }
+
+out:
+ wil->radio_wdev = wil->wdev;
+ mutex_unlock(&wil->p2p_wdev_mutex);
+}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 7b5c4222bc33..44746ca0d2e6 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -20,6 +20,7 @@
#include <linux/interrupt.h>
#include <linux/suspend.h>
#include "wil6210.h"
+#include <linux/rtnetlink.h>
static bool use_msi = true;
module_param(use_msi, bool, S_IRUGO);
@@ -38,6 +39,7 @@ void wil_set_capabilities(struct wil6210_priv *wil)
u32 rev_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
bitmap_zero(wil->hw_capabilities, hw_capability_last);
+ bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
switch (rev_id) {
case JTAG_DEV_ID_SPARROW_B0:
@@ -51,6 +53,9 @@ void wil_set_capabilities(struct wil6210_priv *wil)
}
wil_info(wil, "Board hardware is %s\n", wil->hw_name);
+
+ /* extract FW capabilities from file without loading the FW */
+ wil_request_firmware(wil, WIL_FW_NAME, false);
}
void wil_disable_irq(struct wil6210_priv *wil)
@@ -293,6 +298,9 @@ static void wil_pcie_remove(struct pci_dev *pdev)
#endif /* CONFIG_PM */
wil6210_debugfs_remove(wil);
+ rtnl_lock();
+ wil_p2p_wdev_free(wil);
+ rtnl_unlock();
wil_if_remove(wil);
wil_if_pcie_disable(wil);
pci_iounmap(pdev, csr);
@@ -300,7 +308,6 @@ static void wil_pcie_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
if (wil->platform_ops.uninit)
wil->platform_ops.uninit(wil->platform_handle);
- wil_p2p_wdev_free(wil);
wil_if_free(wil);
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index f2f6a404d3d1..4c38520d4dd2 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -873,9 +873,12 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
rc = -EINVAL;
goto out_free;
}
- vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
+ spin_lock_bh(&txdata->lock);
+ vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
txdata->enabled = 1;
+ spin_unlock_bh(&txdata->lock);
+
if (txdata->dot1x_open && (agg_wsize >= 0))
wil_addba_tx_request(wil, id, agg_wsize);
@@ -950,9 +953,11 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
rc = -EINVAL;
goto out_free;
}
- vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
+ spin_lock_bh(&txdata->lock);
+ vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
txdata->enabled = 1;
+ spin_unlock_bh(&txdata->lock);
return 0;
out_free:
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index ecab4af90602..a949cd62bc4e 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -17,6 +17,7 @@
#ifndef __WIL6210_H__
#define __WIL6210_H__
+#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
#include <net/cfg80211.h>
@@ -576,10 +577,11 @@ struct wil6210_priv {
struct wireless_dev *wdev;
void __iomem *csr;
DECLARE_BITMAP(status, wil_status_last);
- u32 fw_version;
+ u8 fw_version[ETHTOOL_FWVERS_LEN];
u32 hw_version;
const char *hw_name;
DECLARE_BITMAP(hw_capabilities, hw_capability_last);
+ DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX);
u8 n_mids; /* number of additional MIDs as reported by FW */
u32 recovery_count; /* num of FW recovery attempts in a short time */
u32 recovery_state; /* FW recovery state machine */
@@ -657,7 +659,7 @@ struct wil6210_priv {
/* P2P_DEVICE vif */
struct wireless_dev *p2p_wdev;
- struct mutex p2p_wdev_mutex; /* protect @p2p_wdev */
+ struct mutex p2p_wdev_mutex; /* protect @p2p_wdev and @scan_request */
struct wireless_dev *radio_wdev;
/* High Access Latency Policy voting */
@@ -828,6 +830,7 @@ void wil_unmask_irq(struct wil6210_priv *wil);
void wil_configure_interrupt_moderation(struct wil6210_priv *wil);
void wil_disable_irq(struct wil6210_priv *wil);
void wil_enable_irq(struct wil6210_priv *wil);
+void wil6210_mask_halp(struct wil6210_priv *wil);
/* P2P */
bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request);
@@ -840,6 +843,7 @@ u8 wil_p2p_stop_discovery(struct wil6210_priv *wil);
int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie);
void wil_p2p_listen_expired(struct work_struct *work);
void wil_p2p_search_expired(struct work_struct *work);
+void wil_p2p_stop_radio_operations(struct wil6210_priv *wil);
/* WMI for P2P */
int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi);
@@ -893,7 +897,8 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
int wil_iftype_nl2wmi(enum nl80211_iftype type);
int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
-int wil_request_firmware(struct wil6210_priv *wil, const char *name);
+int wil_request_firmware(struct wil6210_priv *wil, const char *name,
+ bool load);
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 4d92541913c0..fae4f1285d08 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -312,14 +312,14 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
struct wireless_dev *wdev = wil->wdev;
struct wmi_ready_event *evt = d;
- wil->fw_version = le32_to_cpu(evt->sw_version);
wil->n_mids = evt->numof_additional_mids;
- wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
+ wil_info(wil, "FW ver. %s(SW %d); MAC %pM; %d MID's\n",
+ wil->fw_version, le32_to_cpu(evt->sw_version),
evt->mac, wil->n_mids);
/* ignore MAC address, we already have it from the boot loader */
- snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
- "%d", wil->fw_version);
+ strlcpy(wdev->wiphy->fw_version, wil->fw_version,
+ sizeof(wdev->wiphy->fw_version));
wil_set_recovery_state(wil, fw_recovery_idle);
set_bit(wil_status_fwready, wil->status);
@@ -424,6 +424,7 @@ static void wmi_evt_tx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
void *d, int len)
{
+ mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
struct wmi_scan_complete_event *data = d;
struct cfg80211_scan_info info = {
@@ -435,14 +436,13 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
wil->scan_request, info.aborted);
del_timer_sync(&wil->scan_timer);
- mutex_lock(&wil->p2p_wdev_mutex);
cfg80211_scan_done(wil->scan_request, &info);
wil->radio_wdev = wil->wdev;
- mutex_unlock(&wil->p2p_wdev_mutex);
wil->scan_request = NULL;
} else {
wil_err(wil, "SCAN_COMPLETE while not scanning\n");
}
+ mutex_unlock(&wil->p2p_wdev_mutex);
}
static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 685fe0ddea26..f430e8a80603 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -46,6 +46,16 @@ enum wmi_mid {
MID_BROADCAST = 0xFF,
};
+/* FW capability IDs
+ * Each ID maps to a bit in a 32-bit bitmask value provided by the FW to
+ * the host
+ */
+enum wmi_fw_capability {
+ WMI_FW_CAPABILITY_FTM = 0,
+ WMI_FW_CAPABILITY_PS_CONFIG = 1,
+ WMI_FW_CAPABILITY_MAX,
+};
+
/* WMI_CMD_HDR */
struct wmi_cmd_hdr {
u8 mid;
@@ -120,6 +130,8 @@ enum wmi_command_id {
WMI_BF_SM_MGMT_CMDID = 0x838,
WMI_BF_RXSS_MGMT_CMDID = 0x839,
WMI_BF_TRIG_CMDID = 0x83A,
+ WMI_LINK_MAINTAIN_CFG_WRITE_CMDID = 0x842,
+ WMI_LINK_MAINTAIN_CFG_READ_CMDID = 0x843,
WMI_SET_SECTORS_CMDID = 0x849,
WMI_MAINTAIN_PAUSE_CMDID = 0x850,
WMI_MAINTAIN_RESUME_CMDID = 0x851,
@@ -134,10 +146,15 @@ enum wmi_command_id {
WMI_BF_CTRL_CMDID = 0x862,
WMI_NOTIFY_REQ_CMDID = 0x863,
WMI_GET_STATUS_CMDID = 0x864,
+ WMI_GET_RF_STATUS_CMDID = 0x866,
+ WMI_GET_BASEBAND_TYPE_CMDID = 0x867,
WMI_UNIT_TEST_CMDID = 0x900,
WMI_HICCUP_CMDID = 0x901,
WMI_FLASH_READ_CMDID = 0x902,
WMI_FLASH_WRITE_CMDID = 0x903,
+ /* Power management */
+ WMI_TRAFFIC_DEFERRAL_CMDID = 0x904,
+ WMI_TRAFFIC_RESUME_CMDID = 0x905,
/* P2P */
WMI_P2P_CFG_CMDID = 0x910,
WMI_PORT_ALLOCATE_CMDID = 0x911,
@@ -150,6 +167,26 @@ enum wmi_command_id {
WMI_PCP_START_CMDID = 0x918,
WMI_PCP_STOP_CMDID = 0x919,
WMI_GET_PCP_FACTOR_CMDID = 0x91B,
+ /* Power Save Configuration Commands */
+ WMI_PS_DEV_PROFILE_CFG_CMDID = 0x91C,
+ /* Not supported yet */
+ WMI_PS_DEV_CFG_CMDID = 0x91D,
+ /* Not supported yet */
+ WMI_PS_DEV_CFG_READ_CMDID = 0x91E,
+ /* Per MAC Power Save Configuration commands
+ * Not supported yet
+ */
+ WMI_PS_MID_CFG_CMDID = 0x91F,
+ /* Not supported yet */
+ WMI_PS_MID_CFG_READ_CMDID = 0x920,
+ WMI_RS_CFG_CMDID = 0x921,
+ WMI_GET_DETAILED_RS_RES_CMDID = 0x922,
+ WMI_AOA_MEAS_CMDID = 0x923,
+ WMI_TOF_SESSION_START_CMDID = 0x991,
+ WMI_TOF_GET_CAPABILITIES_CMDID = 0x992,
+ WMI_TOF_SET_LCR_CMDID = 0x993,
+ WMI_TOF_SET_LCI_CMDID = 0x994,
+ WMI_TOF_CHANNEL_INFO_CMDID = 0x995,
WMI_SET_MAC_ADDRESS_CMDID = 0xF003,
WMI_ABORT_SCAN_CMDID = 0xF007,
WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041,
@@ -291,9 +328,8 @@ enum wmi_scan_type {
/* WMI_START_SCAN_CMDID */
struct wmi_start_scan_cmd {
u8 direct_scan_mac_addr[WMI_MAC_LEN];
- /* DMG Beacon frame is transmitted during active scanning */
+ /* run scan with discovery beacon. Relevant for ACTIVE scan only. */
u8 discovery_mode;
- /* reserved */
u8 reserved;
/* Max duration in the home channel(ms) */
__le32 dwell_time;
@@ -453,6 +489,12 @@ struct wmi_port_delete_cmd {
u8 reserved[3];
} __packed;
+/* WMI_TRAFFIC_DEFERRAL_CMDID */
+struct wmi_traffic_deferral_cmd {
+ /* Bit vector: bit[0] - wake on Unicast, bit[1] - wake on Broadcast */
+ u8 wakeup_trigger;
+} __packed;
+
/* WMI_P2P_CFG_CMDID */
enum wmi_discovery_mode {
WMI_DISCOVERY_MODE_NON_OFFLOAD = 0x00,
@@ -818,85 +860,193 @@ struct wmi_pmc_cmd {
__le64 mem_base;
} __packed;
+enum wmi_aoa_meas_type {
+ WMI_AOA_PHASE_MEAS = 0x00,
+ WMI_AOA_PHASE_AMP_MEAS = 0x01,
+};
+
+/* WMI_AOA_MEAS_CMDID */
+struct wmi_aoa_meas_cmd {
+ u8 mac_addr[WMI_MAC_LEN];
+ /* channels IDs:
+ * 0 - 58320 MHz
+ * 1 - 60480 MHz
+ * 2 - 62640 MHz
+ */
+ u8 channel;
+ /* enum wmi_aoa_meas_type */
+ u8 aoa_meas_type;
+ __le32 meas_rf_mask;
+} __packed;
+
+enum wmi_tof_burst_duration {
+ WMI_TOF_BURST_DURATION_250_USEC = 2,
+ WMI_TOF_BURST_DURATION_500_USEC = 3,
+ WMI_TOF_BURST_DURATION_1_MSEC = 4,
+ WMI_TOF_BURST_DURATION_2_MSEC = 5,
+ WMI_TOF_BURST_DURATION_4_MSEC = 6,
+ WMI_TOF_BURST_DURATION_8_MSEC = 7,
+ WMI_TOF_BURST_DURATION_16_MSEC = 8,
+ WMI_TOF_BURST_DURATION_32_MSEC = 9,
+ WMI_TOF_BURST_DURATION_64_MSEC = 10,
+ WMI_TOF_BURST_DURATION_128_MSEC = 11,
+ WMI_TOF_BURST_DURATION_NO_PREFERENCES = 15,
+};
+
+enum wmi_tof_session_start_flags {
+ WMI_TOF_SESSION_START_FLAG_SECURED = 0x1,
+ WMI_TOF_SESSION_START_FLAG_ASAP = 0x2,
+ WMI_TOF_SESSION_START_FLAG_LCI_REQ = 0x4,
+ WMI_TOF_SESSION_START_FLAG_LCR_REQ = 0x8,
+};
+
+/* WMI_TOF_SESSION_START_CMDID */
+struct wmi_ftm_dest_info {
+ u8 channel;
+ /* wmi_tof_session_start_flags_e */
+ u8 flags;
+ u8 initial_token;
+ u8 num_of_ftm_per_burst;
+ u8 num_of_bursts_exp;
+ /* wmi_tof_burst_duration_e */
+ u8 burst_duration;
+ /* Burst Period indicate interval between two consecutive burst
+ * instances, in units of 100 ms
+ */
+ __le16 burst_period;
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 reserved;
+} __packed;
+
+/* WMI_TOF_SESSION_START_CMDID */
+struct wmi_tof_session_start_cmd {
+ __le32 session_id;
+ u8 num_of_aoa_measures;
+ u8 aoa_type;
+ __le16 num_of_dest;
+ u8 reserved[4];
+ struct wmi_ftm_dest_info ftm_dest_info[0];
+} __packed;
+
+enum wmi_tof_channel_info_report_type {
+ WMI_TOF_CHANNEL_INFO_TYPE_CIR = 0x1,
+ WMI_TOF_CHANNEL_INFO_TYPE_RSSI = 0x2,
+ WMI_TOF_CHANNEL_INFO_TYPE_SNR = 0x4,
+ WMI_TOF_CHANNEL_INFO_TYPE_DEBUG_DATA = 0x8,
+ WMI_TOF_CHANNEL_INFO_TYPE_VENDOR_SPECIFIC = 0x10,
+};
+
+/* WMI_TOF_CHANNEL_INFO_CMDID */
+struct wmi_tof_channel_info_cmd {
+ /* wmi_tof_channel_info_report_type_e */
+ __le32 channel_info_report_request;
+} __packed;
+
/* WMI Events
* List of Events (target to host)
*/
enum wmi_event_id {
- WMI_READY_EVENTID = 0x1001,
- WMI_CONNECT_EVENTID = 0x1002,
- WMI_DISCONNECT_EVENTID = 0x1003,
- WMI_SCAN_COMPLETE_EVENTID = 0x100A,
- WMI_REPORT_STATISTICS_EVENTID = 0x100B,
- WMI_RD_MEM_RSP_EVENTID = 0x1800,
- WMI_FW_READY_EVENTID = 0x1801,
- WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x200,
- WMI_ECHO_RSP_EVENTID = 0x1803,
- WMI_FS_TUNE_DONE_EVENTID = 0x180A,
- WMI_CORR_MEASURE_EVENTID = 0x180B,
- WMI_READ_RSSI_EVENTID = 0x180C,
- WMI_TEMP_SENSE_DONE_EVENTID = 0x180E,
- WMI_DC_CALIB_DONE_EVENTID = 0x180F,
- WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811,
- WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812,
- WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815,
- WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816,
- WMI_MARLON_R_READ_DONE_EVENTID = 0x1818,
- WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819,
- WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181A,
- WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181D,
- WMI_RF_RX_TEST_DONE_EVENTID = 0x181E,
- WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820,
- WMI_VRING_CFG_DONE_EVENTID = 0x1821,
- WMI_BA_STATUS_EVENTID = 0x1823,
- WMI_RCP_ADDBA_REQ_EVENTID = 0x1824,
- WMI_RCP_ADDBA_RESP_SENT_EVENTID = 0x1825,
- WMI_DELBA_EVENTID = 0x1826,
- WMI_GET_SSID_EVENTID = 0x1828,
- WMI_GET_PCP_CHANNEL_EVENTID = 0x182A,
- WMI_SW_TX_COMPLETE_EVENTID = 0x182B,
- WMI_READ_MAC_RXQ_EVENTID = 0x1830,
- WMI_READ_MAC_TXQ_EVENTID = 0x1831,
- WMI_WRITE_MAC_RXQ_EVENTID = 0x1832,
- WMI_WRITE_MAC_TXQ_EVENTID = 0x1833,
- WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834,
- WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836,
- WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
- WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
- WMI_RS_MGMT_DONE_EVENTID = 0x1852,
- WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
- WMI_THERMAL_THROTTLING_STATUS_EVENTID = 0x1855,
- WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
- WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
- WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
- WMI_OTP_READ_RESULT_EVENTID = 0x1856,
- WMI_LED_CFG_DONE_EVENTID = 0x1858,
+ WMI_READY_EVENTID = 0x1001,
+ WMI_CONNECT_EVENTID = 0x1002,
+ WMI_DISCONNECT_EVENTID = 0x1003,
+ WMI_SCAN_COMPLETE_EVENTID = 0x100A,
+ WMI_REPORT_STATISTICS_EVENTID = 0x100B,
+ WMI_RD_MEM_RSP_EVENTID = 0x1800,
+ WMI_FW_READY_EVENTID = 0x1801,
+ WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x200,
+ WMI_ECHO_RSP_EVENTID = 0x1803,
+ WMI_FS_TUNE_DONE_EVENTID = 0x180A,
+ WMI_CORR_MEASURE_EVENTID = 0x180B,
+ WMI_READ_RSSI_EVENTID = 0x180C,
+ WMI_TEMP_SENSE_DONE_EVENTID = 0x180E,
+ WMI_DC_CALIB_DONE_EVENTID = 0x180F,
+ WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811,
+ WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812,
+ WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815,
+ WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816,
+ WMI_MARLON_R_READ_DONE_EVENTID = 0x1818,
+ WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819,
+ WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181A,
+ WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181D,
+ WMI_RF_RX_TEST_DONE_EVENTID = 0x181E,
+ WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820,
+ WMI_VRING_CFG_DONE_EVENTID = 0x1821,
+ WMI_BA_STATUS_EVENTID = 0x1823,
+ WMI_RCP_ADDBA_REQ_EVENTID = 0x1824,
+ WMI_RCP_ADDBA_RESP_SENT_EVENTID = 0x1825,
+ WMI_DELBA_EVENTID = 0x1826,
+ WMI_GET_SSID_EVENTID = 0x1828,
+ WMI_GET_PCP_CHANNEL_EVENTID = 0x182A,
+ WMI_SW_TX_COMPLETE_EVENTID = 0x182B,
+ WMI_READ_MAC_RXQ_EVENTID = 0x1830,
+ WMI_READ_MAC_TXQ_EVENTID = 0x1831,
+ WMI_WRITE_MAC_RXQ_EVENTID = 0x1832,
+ WMI_WRITE_MAC_TXQ_EVENTID = 0x1833,
+ WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834,
+ WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836,
+ WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
+ WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
+ WMI_RS_MGMT_DONE_EVENTID = 0x1852,
+ WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
+ WMI_THERMAL_THROTTLING_STATUS_EVENTID = 0x1855,
+ WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
+ WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
+ WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
+ WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID = 0x1842,
+ WMI_LINK_MAINTAIN_CFG_READ_DONE_EVENTID = 0x1843,
+ WMI_OTP_READ_RESULT_EVENTID = 0x1856,
+ WMI_LED_CFG_DONE_EVENTID = 0x1858,
/* Performance monitoring events */
- WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
- WMI_WBE_LINK_DOWN_EVENTID = 0x1861,
- WMI_BF_CTRL_DONE_EVENTID = 0x1862,
- WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
- WMI_GET_STATUS_DONE_EVENTID = 0x1864,
- WMI_VRING_EN_EVENTID = 0x1865,
- WMI_UNIT_TEST_EVENTID = 0x1900,
- WMI_FLASH_READ_DONE_EVENTID = 0x1902,
- WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
+ WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
+ WMI_WBE_LINK_DOWN_EVENTID = 0x1861,
+ WMI_BF_CTRL_DONE_EVENTID = 0x1862,
+ WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
+ WMI_GET_STATUS_DONE_EVENTID = 0x1864,
+ WMI_VRING_EN_EVENTID = 0x1865,
+ WMI_GET_RF_STATUS_EVENTID = 0x1866,
+ WMI_GET_BASEBAND_TYPE_EVENTID = 0x1867,
+ WMI_UNIT_TEST_EVENTID = 0x1900,
+ WMI_FLASH_READ_DONE_EVENTID = 0x1902,
+ WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
+ /* Power management */
+ WMI_TRAFFIC_DEFERRAL_EVENTID = 0x1904,
+ WMI_TRAFFIC_RESUME_EVENTID = 0x1905,
/* P2P */
- WMI_P2P_CFG_DONE_EVENTID = 0x1910,
- WMI_PORT_ALLOCATED_EVENTID = 0x1911,
- WMI_PORT_DELETED_EVENTID = 0x1912,
- WMI_LISTEN_STARTED_EVENTID = 0x1914,
- WMI_SEARCH_STARTED_EVENTID = 0x1915,
- WMI_DISCOVERY_STARTED_EVENTID = 0x1916,
- WMI_DISCOVERY_STOPPED_EVENTID = 0x1917,
- WMI_PCP_STARTED_EVENTID = 0x1918,
- WMI_PCP_STOPPED_EVENTID = 0x1919,
- WMI_PCP_FACTOR_EVENTID = 0x191A,
- WMI_SET_CHANNEL_EVENTID = 0x9000,
- WMI_ASSOC_REQ_EVENTID = 0x9001,
- WMI_EAPOL_RX_EVENTID = 0x9002,
- WMI_MAC_ADDR_RESP_EVENTID = 0x9003,
- WMI_FW_VER_EVENTID = 0x9004,
- WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID = 0x9005,
+ WMI_P2P_CFG_DONE_EVENTID = 0x1910,
+ WMI_PORT_ALLOCATED_EVENTID = 0x1911,
+ WMI_PORT_DELETED_EVENTID = 0x1912,
+ WMI_LISTEN_STARTED_EVENTID = 0x1914,
+ WMI_SEARCH_STARTED_EVENTID = 0x1915,
+ WMI_DISCOVERY_STARTED_EVENTID = 0x1916,
+ WMI_DISCOVERY_STOPPED_EVENTID = 0x1917,
+ WMI_PCP_STARTED_EVENTID = 0x1918,
+ WMI_PCP_STOPPED_EVENTID = 0x1919,
+ WMI_PCP_FACTOR_EVENTID = 0x191A,
+ /* Power Save Configuration Events */
+ WMI_PS_DEV_PROFILE_CFG_EVENTID = 0x191C,
+ /* Not supported yet */
+ WMI_PS_DEV_CFG_EVENTID = 0x191D,
+ /* Not supported yet */
+ WMI_PS_DEV_CFG_READ_EVENTID = 0x191E,
+ /* Not supported yet */
+ WMI_PS_MID_CFG_EVENTID = 0x191F,
+ /* Not supported yet */
+ WMI_PS_MID_CFG_READ_EVENTID = 0x1920,
+ WMI_RS_CFG_DONE_EVENTID = 0x1921,
+ WMI_GET_DETAILED_RS_RES_EVENTID = 0x1922,
+ WMI_AOA_MEAS_EVENTID = 0x1923,
+ WMI_TOF_SESSION_END_EVENTID = 0x1991,
+ WMI_TOF_GET_CAPABILITIES_EVENTID = 0x1992,
+ WMI_TOF_SET_LCR_EVENTID = 0x1993,
+ WMI_TOF_SET_LCI_EVENTID = 0x1994,
+ WMI_TOF_FTM_PER_DEST_RES_EVENTID = 0x1995,
+ WMI_TOF_CHANNEL_INFO_EVENTID = 0x1996,
+ WMI_SET_CHANNEL_EVENTID = 0x9000,
+ WMI_ASSOC_REQ_EVENTID = 0x9001,
+ WMI_EAPOL_RX_EVENTID = 0x9002,
+ WMI_MAC_ADDR_RESP_EVENTID = 0x9003,
+ WMI_FW_VER_EVENTID = 0x9004,
+ WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID = 0x9005,
};
/* Events data structures */
@@ -943,10 +1093,85 @@ struct wmi_get_status_done_event {
/* WMI_FW_VER_EVENTID */
struct wmi_fw_ver_event {
- u8 major;
- u8 minor;
- __le16 subminor;
- __le16 build;
+ /* FW image version */
+ __le32 fw_major;
+ __le32 fw_minor;
+ __le32 fw_subminor;
+ __le32 fw_build;
+ /* FW image build time stamp */
+ __le32 hour;
+ __le32 minute;
+ __le32 second;
+ __le32 day;
+ __le32 month;
+ __le32 year;
+ /* Boot Loader image version */
+ __le32 bl_major;
+ __le32 bl_minor;
+ __le32 bl_subminor;
+ __le32 bl_build;
+ /* The number of entries in the FW capabilies array */
+ u8 fw_capabilities_len;
+ u8 reserved[3];
+ /* FW capabilities info
+ * Must be the last member of the struct
+ */
+ __le32 fw_capabilities[0];
+} __packed;
+
+/* WMI_GET_RF_STATUS_EVENTID */
+enum rf_type {
+ RF_UNKNOWN = 0x00,
+ RF_MARLON = 0x01,
+ RF_SPARROW = 0x02,
+};
+
+/* WMI_GET_RF_STATUS_EVENTID */
+enum board_file_rf_type {
+ BF_RF_MARLON = 0x00,
+ BF_RF_SPARROW = 0x01,
+};
+
+/* WMI_GET_RF_STATUS_EVENTID */
+enum rf_status {
+ RF_OK = 0x00,
+ RF_NO_COMM = 0x01,
+ RF_WRONG_BOARD_FILE = 0x02,
+};
+
+/* WMI_GET_RF_STATUS_EVENTID */
+struct wmi_get_rf_status_event {
+ /* enum rf_type */
+ __le32 rf_type;
+ /* attached RFs bit vector */
+ __le32 attached_rf_vector;
+ /* enabled RFs bit vector */
+ __le32 enabled_rf_vector;
+ /* enum rf_status, refers to enabled RFs */
+ u8 rf_status[32];
+ /* enum board file RF type */
+ __le32 board_file_rf_type;
+ /* board file platform type */
+ __le32 board_file_platform_type;
+ /* board file version */
+ __le32 board_file_version;
+ __le32 reserved[2];
+} __packed;
+
+/* WMI_GET_BASEBAND_TYPE_EVENTID */
+enum baseband_type {
+ BASEBAND_UNKNOWN = 0x00,
+ BASEBAND_SPARROW_M_A0 = 0x03,
+ BASEBAND_SPARROW_M_A1 = 0x04,
+ BASEBAND_SPARROW_M_B0 = 0x05,
+ BASEBAND_SPARROW_M_C0 = 0x06,
+ BASEBAND_SPARROW_M_D0 = 0x07,
+};
+
+/* WMI_GET_BASEBAND_TYPE_EVENTID */
+struct wmi_get_baseband_type_event {
+ /* enum baseband_type */
+ __le32 baseband_type;
} __packed;
/* WMI_MAC_ADDR_RESP_EVENTID */
@@ -1410,4 +1635,553 @@ struct wmi_led_cfg_done_event {
__le32 status;
} __packed;
+#define WMI_NUM_MCS (13)
+
+/* Rate search parameters configuration per connection */
+struct wmi_rs_cfg {
+ /* The maximal allowed PER for each MCS
+ * MCS will be considered as failed if PER during RS is higher
+ */
+ u8 per_threshold[WMI_NUM_MCS];
+ /* Number of MPDUs for each MCS
+ * this is the minimal statistic required to make an educated
+ * decision
+ */
+ u8 min_frame_cnt[WMI_NUM_MCS];
+ /* stop threshold [0-100] */
+ u8 stop_th;
+ /* MCS1 stop threshold [0-100] */
+ u8 mcs1_fail_th;
+ u8 max_back_failure_th;
+ /* Debug feature for disabling internal RS trigger (which is
+ * currently triggered by BF Done)
+ */
+ u8 dbg_disable_internal_trigger;
+ __le32 back_failure_mask;
+ __le32 mcs_en_vec;
+} __packed;
+
+/* WMI_RS_CFG_CMDID */
+struct wmi_rs_cfg_cmd {
+ /* connection id */
+ u8 cid;
+ /* enable or disable rate search */
+ u8 rs_enable;
+ /* rate search configuration */
+ struct wmi_rs_cfg rs_cfg;
+} __packed;
+
+/* WMI_RS_CFG_DONE_EVENTID */
+struct wmi_rs_cfg_done_event {
+ u8 cid;
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[2];
+} __packed;
+
+/* WMI_GET_DETAILED_RS_RES_CMDID */
+struct wmi_get_detailed_rs_res_cmd {
+ /* connection id */
+ u8 cid;
+ u8 reserved[3];
+} __packed;
+
+/* RS results status */
+enum wmi_rs_results_status {
+ WMI_RS_RES_VALID = 0x00,
+ WMI_RS_RES_INVALID = 0x01,
+};
+
+/* Rate search results */
+struct wmi_rs_results {
+ /* number of sent MPDUs */
+ u8 num_of_tx_pkt[WMI_NUM_MCS];
+ /* number of non-acked MPDUs */
+ u8 num_of_non_acked_pkt[WMI_NUM_MCS];
+ /* RS timestamp */
+ __le32 tsf;
+ /* RS selected MCS */
+ u8 mcs;
+} __packed;
+
+/* WMI_GET_DETAILED_RS_RES_EVENTID */
+struct wmi_get_detailed_rs_res_event {
+ u8 cid;
+ /* enum wmi_rs_results_status */
+ u8 status;
+ /* detailed rs results */
+ struct wmi_rs_results rs_results;
+ u8 reserved[3];
+} __packed;
+
+/* broadcast connection ID */
+#define WMI_LINK_MAINTAIN_CFG_CID_BROADCAST (0xFFFFFFFF)
+
+/* Types wmi_link_maintain_cfg presets for WMI_LINK_MAINTAIN_CFG_WRITE_CMD */
+enum wmi_link_maintain_cfg_type {
+ /* AP/PCP default normal (non-FST) configuration settings */
+ WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_NORMAL_AP = 0x00,
+ /* AP/PCP default FST configuration settings */
+ WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_FST_AP = 0x01,
+ /* STA default normal (non-FST) configuration settings */
+ WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_NORMAL_STA = 0x02,
+ /* STA default FST configuration settings */
+ WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_FST_STA = 0x03,
+ /* custom configuration settings */
+ WMI_LINK_MAINTAIN_CFG_TYPE_CUSTOM = 0x04,
+ /* number of defined configuration types */
+ WMI_LINK_MAINTAIN_CFG_TYPES_NUM = 0x05,
+};
+
+/* Response status codes for WMI_LINK_MAINTAIN_CFG_WRITE/READ commands */
+enum wmi_link_maintain_cfg_response_status {
+ /* WMI_LINK_MAINTAIN_CFG_WRITE/READ command successfully accomplished
+ */
+ WMI_LINK_MAINTAIN_CFG_RESPONSE_STATUS_OK = 0x00,
+ /* ERROR due to bad argument in WMI_LINK_MAINTAIN_CFG_WRITE/READ
+ * command request
+ */
+ WMI_LINK_MAINTAIN_CFG_RESPONSE_STATUS_BAD_ARGUMENT = 0x01,
+};
+
+/* Link Loss and Keep Alive configuration */
+struct wmi_link_maintain_cfg {
+ /* link_loss_enable_detectors_vec */
+ __le32 link_loss_enable_detectors_vec;
+ /* detectors check period usec */
+ __le32 check_link_loss_period_usec;
+ /* max allowed tx ageing */
+ __le32 tx_ageing_threshold_usec;
+ /* keep alive period for high SNR */
+ __le32 keep_alive_period_usec_high_snr;
+ /* keep alive period for low SNR */
+ __le32 keep_alive_period_usec_low_snr;
+ /* lower snr limit for keep alive period update */
+ __le32 keep_alive_snr_threshold_low_db;
+ /* upper snr limit for keep alive period update */
+ __le32 keep_alive_snr_threshold_high_db;
+ /* num of successive bad bcons causing link-loss */
+ __le32 bad_beacons_num_threshold;
+ /* SNR limit for bad_beacons_detector */
+ __le32 bad_beacons_snr_threshold_db;
+} __packed;
+
+/* WMI_LINK_MAINTAIN_CFG_WRITE_CMDID */
+struct wmi_link_maintain_cfg_write_cmd {
+ /* enum wmi_link_maintain_cfg_type_e - type of requested default
+ * configuration to be applied
+ */
+ __le32 cfg_type;
+ /* requested connection ID or WMI_LINK_MAINTAIN_CFG_CID_BROADCAST */
+ __le32 cid;
+ /* custom configuration settings to be applied (relevant only if
+ * cfg_type==WMI_LINK_MAINTAIN_CFG_TYPE_CUSTOM)
+ */
+ struct wmi_link_maintain_cfg lm_cfg;
+} __packed;
+
+/* WMI_LINK_MAINTAIN_CFG_READ_CMDID */
+struct wmi_link_maintain_cfg_read_cmd {
+ /* connection ID which configuration settings are requested */
+ __le32 cid;
+} __packed;
+
+/* WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID */
+struct wmi_link_maintain_cfg_write_done_event {
+ /* requested connection ID */
+ __le32 cid;
+ /* wmi_link_maintain_cfg_response_status_e - write status */
+ __le32 status;
+} __packed;
+
+/* \WMI_LINK_MAINTAIN_CFG_READ_DONE_EVENT */
+struct wmi_link_maintain_cfg_read_done_event {
+ /* requested connection ID */
+ __le32 cid;
+ /* wmi_link_maintain_cfg_response_status_e - read status */
+ __le32 status;
+ /* Retrieved configuration settings */
+ struct wmi_link_maintain_cfg lm_cfg;
+} __packed;
+
+enum wmi_traffic_deferral_status {
+ WMI_TRAFFIC_DEFERRAL_APPROVED = 0x0,
+ WMI_TRAFFIC_DEFERRAL_REJECTED = 0x1,
+};
+
+/* WMI_TRAFFIC_DEFERRAL_EVENTID */
+struct wmi_traffic_deferral_event {
+ /* enum wmi_traffic_deferral_status_e */
+ u8 status;
+} __packed;
+
+enum wmi_traffic_resume_status {
+ WMI_TRAFFIC_RESUME_SUCCESS = 0x0,
+ WMI_TRAFFIC_RESUME_FAILED = 0x1,
+};
+
+/* WMI_TRAFFIC_RESUME_EVENTID */
+struct wmi_traffic_resume_event {
+ /* enum wmi_traffic_resume_status_e */
+ u8 status;
+} __packed;
+
+/* Power Save command completion status codes */
+enum wmi_ps_cfg_cmd_status {
+ WMI_PS_CFG_CMD_STATUS_SUCCESS = 0x00,
+ WMI_PS_CFG_CMD_STATUS_BAD_PARAM = 0x01,
+ /* other error */
+ WMI_PS_CFG_CMD_STATUS_ERROR = 0x02,
+};
+
+/* Device Power Save Profiles */
+enum wmi_ps_profile_type {
+ WMI_PS_PROFILE_TYPE_DEFAULT = 0x00,
+ WMI_PS_PROFILE_TYPE_PS_DISABLED = 0x01,
+ WMI_PS_PROFILE_TYPE_MAX_PS = 0x02,
+ WMI_PS_PROFILE_TYPE_LOW_LATENCY_PS = 0x03,
+};
+
+/* WMI_PS_DEV_PROFILE_CFG_CMDID
+ *
+ * Power save profile to be used by the device
+ *
+ * Returned event:
+ * - WMI_PS_DEV_PROFILE_CFG_EVENTID
+ */
+struct wmi_ps_dev_profile_cfg_cmd {
+ /* wmi_ps_profile_type_e */
+ u8 ps_profile;
+ u8 reserved[3];
+} __packed;
+
+/* WMI_PS_DEV_PROFILE_CFG_EVENTID */
+struct wmi_ps_dev_profile_cfg_event {
+ /* wmi_ps_cfg_cmd_status_e */
+ __le32 status;
+} __packed;
+
+enum wmi_ps_level {
+ WMI_PS_LEVEL_DEEP_SLEEP = 0x00,
+ WMI_PS_LEVEL_SHALLOW_SLEEP = 0x01,
+ /* awake = all PS mechanisms are disabled */
+ WMI_PS_LEVEL_AWAKE = 0x02,
+};
+
+enum wmi_ps_deep_sleep_clk_level {
+ /* 33k */
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_RTC = 0x00,
+ /* 10k */
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_OSC = 0x01,
+ /* @RTC Low latency */
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_RTC_LT = 0x02,
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_XTAL = 0x03,
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_SYSCLK = 0x04,
+ /* Not Applicable */
+ WMI_PS_DEEP_SLEEP_CLK_LEVEL_N_A = 0xFF,
+};
+
+/* Response by the FW to a D3 entry request */
+enum wmi_ps_d3_resp_policy {
+ WMI_PS_D3_RESP_POLICY_DEFAULT = 0x00,
+ /* debug -D3 req is always denied */
+ WMI_PS_D3_RESP_POLICY_DENIED = 0x01,
+ /* debug -D3 req is always approved */
+ WMI_PS_D3_RESP_POLICY_APPROVED = 0x02,
+};
+
+/* Device common power save configurations */
+struct wmi_ps_dev_cfg {
+ /* lowest level of PS allowed while unassociated, enum wmi_ps_level_e
+ */
+ u8 ps_unassoc_min_level;
+ /* lowest deep sleep clock level while nonassoc, enum
+ * wmi_ps_deep_sleep_clk_level_e
+ */
+ u8 ps_unassoc_deep_sleep_min_level;
+ /* lowest level of PS allowed while associated, enum wmi_ps_level_e */
+ u8 ps_assoc_min_level;
+ /* lowest deep sleep clock level while assoc, enum
+ * wmi_ps_deep_sleep_clk_level_e
+ */
+ u8 ps_assoc_deep_sleep_min_level;
+ /* enum wmi_ps_deep_sleep_clk_level_e */
+ u8 ps_assoc_low_latency_ds_min_level;
+ /* enum wmi_ps_d3_resp_policy_e */
+ u8 ps_D3_response_policy;
+ /* BOOL */
+ u8 ps_D3_pm_pme_enabled;
+ /* BOOL */
+ u8 ps_halp_enable;
+ u8 ps_deep_sleep_enter_thresh_msec;
+ /* BOOL */
+ u8 ps_voltage_scaling_en;
+} __packed;
+
+/* WMI_PS_DEV_CFG_CMDID
+ *
+ * Configure common Power Save parameters of the device and all MIDs.
+ *
+ * Returned event:
+ * - WMI_PS_DEV_CFG_EVENTID
+ */
+struct wmi_ps_dev_cfg_cmd {
+ /* Device Power Save configuration to be applied */
+ struct wmi_ps_dev_cfg ps_dev_cfg;
+ /* alignment to 32b */
+ u8 reserved[2];
+} __packed;
+
+/* WMI_PS_DEV_CFG_EVENTID */
+struct wmi_ps_dev_cfg_event {
+ /* wmi_ps_cfg_cmd_status_e */
+ __le32 status;
+} __packed;
+
+/* WMI_PS_DEV_CFG_READ_CMDID
+ *
+ * request to retrieve device Power Save configuration
+ * (WMI_PS_DEV_CFG_CMD params)
+ *
+ * Returned event:
+ * - WMI_PS_DEV_CFG_READ_EVENTID
+ */
+struct wmi_ps_dev_cfg_read_cmd {
+ __le32 reserved;
+} __packed;
+
+/* WMI_PS_DEV_CFG_READ_EVENTID */
+struct wmi_ps_dev_cfg_read_event {
+ /* wmi_ps_cfg_cmd_status_e */
+ __le32 status;
+ /* Retrieved device Power Save configuration (WMI_PS_DEV_CFG_CMD
+ * params)
+ */
+ struct wmi_ps_dev_cfg dev_ps_cfg;
+ /* alignment to 32b */
+ u8 reserved[2];
+} __packed;
+
+/* Per Mac Power Save configurations */
+struct wmi_ps_mid_cfg {
+ /* Low power RX in BTI is enabled, BOOL */
+ u8 beacon_lprx_enable;
+ /* Sync to sector ID enabled, BOOL */
+ u8 beacon_sync_to_sectorId_enable;
+ /* Low power RX in DTI is enabled, BOOL */
+ u8 frame_exchange_lprx_enable;
+ /* Sleep Cycle while in scheduled PS, 1-31 */
+ u8 scheduled_sleep_cycle_pow2;
+ /* Stay Awake for k BIs every (sleep_cycle - k) BIs, 1-31 */
+ u8 scheduled_num_of_awake_bis;
+ u8 am_to_traffic_load_thresh_mbp;
+ u8 traffic_to_am_load_thresh_mbps;
+ u8 traffic_to_am_num_of_no_traffic_bis;
+ /* BOOL */
+ u8 continuous_traffic_psm;
+ __le16 no_traffic_to_min_usec;
+ __le16 no_traffic_to_max_usec;
+ __le16 snoozing_sleep_interval_milisec;
+ u8 max_no_data_awake_events;
+ /* Trigger WEB after k failed beacons */
+ u8 num_of_failed_beacons_rx_to_trigger_web;
+ /* Trigger BF after k failed beacons */
+ u8 num_of_failed_beacons_rx_to_trigger_bf;
+ /* Trigger SOB after k successful beacons */
+ u8 num_of_successful_beacons_rx_to_trigger_sob;
+} __packed;
+
+/* WMI_PS_MID_CFG_CMDID
+ *
+ * Configure Power Save parameters of a specific MID.
+ * These parameters are relevant for the specific BSS this MID belongs to.
+ *
+ * Returned event:
+ * - WMI_PS_MID_CFG_EVENTID
+ */
+struct wmi_ps_mid_cfg_cmd {
+ /* MAC ID */
+ u8 mid;
+ /* mid PS configuration to be applied */
+ struct wmi_ps_mid_cfg ps_mid_cfg;
+} __packed;
+
+/* WMI_PS_MID_CFG_EVENTID */
+struct wmi_ps_mid_cfg_event {
+ /* MAC ID */
+ u8 mid;
+ /* alignment to 32b */
+ u8 reserved[3];
+ /* wmi_ps_cfg_cmd_status_e */
+ __le32 status;
+} __packed;
+
+/* WMI_PS_MID_CFG_READ_CMDID
+ *
+ * request to retrieve Power Save configuration of mid
+ * (WMI_PS_MID_CFG_CMD params)
+ *
+ * Returned event:
+ * - WMI_PS_MID_CFG_READ_EVENTID
+ */
+struct wmi_ps_mid_cfg_read_cmd {
+ /* MAC ID */
+ u8 mid;
+ /* alignment to 32b */
+ u8 reserved[3];
+} __packed;
+
+/* WMI_PS_MID_CFG_READ_EVENTID */
+struct wmi_ps_mid_cfg_read_event {
+ /* MAC ID */
+ u8 mid;
+ /* Retrieved MID Power Save configuration(WMI_PS_MID_CFG_CMD params) */
+ struct wmi_ps_mid_cfg mid_ps_cfg;
+ /* wmi_ps_cfg_cmd_status_e */
+ __le32 status;
+} __packed;
+
+#define WMI_AOA_MAX_DATA_SIZE (128)
+
+enum wmi_aoa_meas_status {
+ WMI_AOA_MEAS_SUCCESS = 0x00,
+ WMI_AOA_MEAS_PEER_INCAPABLE = 0x01,
+ WMI_AOA_MEAS_FAILURE = 0x02,
+};
+
+/* WMI_AOA_MEAS_EVENTID */
+struct wmi_aoa_meas_event {
+ u8 mac_addr[WMI_MAC_LEN];
+ /* channels IDs:
+ * 0 - 58320 MHz
+ * 1 - 60480 MHz
+ * 2 - 62640 MHz
+ */
+ u8 channel;
+ /* enum wmi_aoa_meas_type */
+ u8 aoa_meas_type;
+ /* Measurments are from RFs, defined by the mask */
+ __le32 meas_rf_mask;
+ /* enum wmi_aoa_meas_status */
+ u8 meas_status;
+ u8 reserved;
+ /* Length of meas_data in bytes */
+ __le16 length;
+ u8 meas_data[WMI_AOA_MAX_DATA_SIZE];
+} __packed;
+
+/* WMI_TOF_GET_CAPABILITIES_EVENTID */
+struct wmi_tof_get_capabilities_event {
+ u8 ftm_capability;
+ /* maximum supported number of destination to start TOF */
+ u8 max_num_of_dest;
+ /* maximum supported number of measurements per burst */
+ u8 max_num_of_meas_per_burst;
+ u8 reserved;
+ /* maximum supported multi bursts */
+ __le16 max_multi_bursts_sessions;
+ /* maximum supported FTM burst duration , wmi_tof_burst_duration_e */
+ __le16 max_ftm_burst_duration;
+ /* AOA supported types */
+ __le32 aoa_supported_types;
+} __packed;
+
+enum wmi_tof_session_end_status {
+ WMI_TOF_SESSION_END_NO_ERROR = 0x00,
+ WMI_TOF_SESSION_END_FAIL = 0x01,
+ WMI_TOF_SESSION_END_PARAMS_ERROR = 0x02,
+ WMI_TOF_SESSION_END_ABORTED = 0x03,
+};
+
+/* WMI_TOF_SESSION_END_EVENTID */
+struct wmi_tof_session_end_event {
+ /* FTM session ID */
+ __le32 session_id;
+ /* wmi_tof_session_end_status_e */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
+/* Responder FTM Results */
+struct wmi_responder_ftm_res {
+ u8 t1[6];
+ u8 t2[6];
+ u8 t3[6];
+ u8 t4[6];
+ __le16 tod_err;
+ __le16 toa_err;
+ __le16 tod_err_initiator;
+ __le16 toa_err_initiator;
+} __packed;
+
+enum wmi_tof_ftm_per_dest_res_status {
+ WMI_PER_DEST_RES_NO_ERROR = 0x00,
+ WMI_PER_DEST_RES_TX_RX_FAIL = 0x01,
+ WMI_PER_DEST_RES_PARAM_DONT_MATCH = 0x02,
+};
+
+enum wmi_tof_ftm_per_dest_res_flags {
+ WMI_PER_DEST_RES_REQ_START = 0x01,
+ WMI_PER_DEST_RES_BURST_REPORT_END = 0x02,
+ WMI_PER_DEST_RES_REQ_END = 0x04,
+ WMI_PER_DEST_RES_PARAM_UPDATE = 0x08,
+};
+
+/* WMI_TOF_FTM_PER_DEST_RES_EVENTID */
+struct wmi_tof_ftm_per_dest_res_event {
+ /* FTM session ID */
+ __le32 session_id;
+ /* destination MAC address */
+ u8 dst_mac[WMI_MAC_LEN];
+ /* wmi_tof_ftm_per_dest_res_flags_e */
+ u8 flags;
+ /* wmi_tof_ftm_per_dest_res_status_e */
+ u8 status;
+ /* responder ASAP */
+ u8 responder_asap;
+ /* responder number of FTM per burst */
+ u8 responder_num_ftm_per_burst;
+ /* responder number of FTM burst exponent */
+ u8 responder_num_ftm_bursts_exp;
+ /* responder burst duration ,wmi_tof_burst_duration_e */
+ u8 responder_burst_duration;
+ /* responder burst period, indicate interval between two consecutive
+ * burst instances, in units of 100 ms
+ */
+ __le16 responder_burst_period;
+ /* receive burst counter */
+ __le16 bursts_cnt;
+ /* tsf of responder start burst */
+ __le32 tsf_sync;
+ /* actual received ftm per burst */
+ u8 actual_ftm_per_burst;
+ u8 reserved0[7];
+ struct wmi_responder_ftm_res responder_ftm_res[0];
+} __packed;
+
+enum wmi_tof_channel_info_type {
+ WMI_TOF_CHANNEL_INFO_AOA = 0x00,
+ WMI_TOF_CHANNEL_INFO_LCI = 0x01,
+ WMI_TOF_CHANNEL_INFO_LCR = 0x02,
+ WMI_TOF_CHANNEL_INFO_VENDOR_SPECIFIC = 0x03,
+ WMI_TOF_CHANNEL_INFO_CIR = 0x04,
+ WMI_TOF_CHANNEL_INFO_RSSI = 0x05,
+ WMI_TOF_CHANNEL_INFO_SNR = 0x06,
+ WMI_TOF_CHANNEL_INFO_DEBUG = 0x07,
+};
+
+/* WMI_TOF_CHANNEL_INFO_EVENTID */
+struct wmi_tof_channel_info_event {
+ /* FTM session ID */
+ __le32 session_id;
+ /* destination MAC address */
+ u8 dst_mac[WMI_MAC_LEN];
+ /* wmi_tof_channel_info_type_e */
+ u8 type;
+ /* data report length */
+ u8 len;
+ /* data report payload */
+ u8 report[0];
+} __packed;
+
#endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index f549c25608d6..03404cbe9237 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -1101,6 +1101,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 2628d5e12c64..748eaa68cf07 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -3884,11 +3884,11 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
if (!check_vif_up(ifp->vif))
return -EIO;
- brcmf_dbg(CONN, "del_pmksa - PMK bssid = %pM\n", &pmksa->bssid);
+ brcmf_dbg(CONN, "del_pmksa - PMK bssid = %pM\n", pmksa->bssid);
npmk = le32_to_cpu(cfg->pmk_list.npmk);
for (i = 0; i < npmk; i++)
- if (!memcmp(&pmksa->bssid, &pmk[i].bssid, ETH_ALEN))
+ if (!memcmp(pmksa->bssid, pmk[i].bssid, ETH_ALEN))
break;
if ((npmk > 0) && (i < npmk)) {
@@ -4527,7 +4527,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
(u8 *)&settings->beacon.head[ie_offset],
settings->beacon.head_len - ie_offset,
WLAN_EID_SSID);
- if (!ssid_ie)
+ if (!ssid_ie || ssid_ie->len > IEEE80211_MAX_SSID_LEN)
return -EINVAL;
memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len);
@@ -5635,7 +5635,7 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
ifevent->action, ifevent->flags, ifevent->ifidx,
ifevent->bsscfgidx);
- mutex_lock(&event->vif_event_lock);
+ spin_lock(&event->vif_event_lock);
event->action = ifevent->action;
vif = event->vif;
@@ -5643,7 +5643,7 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
case BRCMF_E_IF_ADD:
/* waiting process may have timed out */
if (!cfg->vif_event.vif) {
- mutex_unlock(&event->vif_event_lock);
+ spin_unlock(&event->vif_event_lock);
return -EBADF;
}
@@ -5654,24 +5654,24 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp,
ifp->ndev->ieee80211_ptr = &vif->wdev;
SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy));
}
- mutex_unlock(&event->vif_event_lock);
+ spin_unlock(&event->vif_event_lock);
wake_up(&event->vif_wq);
return 0;
case BRCMF_E_IF_DEL:
- mutex_unlock(&event->vif_event_lock);
+ spin_unlock(&event->vif_event_lock);
/* event may not be upon user request */
if (brcmf_cfg80211_vif_event_armed(cfg))
wake_up(&event->vif_wq);
return 0;
case BRCMF_E_IF_CHANGE:
- mutex_unlock(&event->vif_event_lock);
+ spin_unlock(&event->vif_event_lock);
wake_up(&event->vif_wq);
return 0;
default:
- mutex_unlock(&event->vif_event_lock);
+ spin_unlock(&event->vif_event_lock);
break;
}
return -EINVAL;
@@ -5792,7 +5792,7 @@ static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
static void init_vif_event(struct brcmf_cfg80211_vif_event *event)
{
init_waitqueue_head(&event->vif_wq);
- mutex_init(&event->vif_event_lock);
+ spin_lock_init(&event->vif_event_lock);
}
static s32 brcmf_dongle_roam(struct brcmf_if *ifp)
@@ -6691,9 +6691,9 @@ static inline bool vif_event_equals(struct brcmf_cfg80211_vif_event *event,
{
u8 evt_action;
- mutex_lock(&event->vif_event_lock);
+ spin_lock(&event->vif_event_lock);
evt_action = event->action;
- mutex_unlock(&event->vif_event_lock);
+ spin_unlock(&event->vif_event_lock);
return evt_action == action;
}
@@ -6702,10 +6702,10 @@ void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg,
{
struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
- mutex_lock(&event->vif_event_lock);
+ spin_lock(&event->vif_event_lock);
event->vif = vif;
event->action = 0;
- mutex_unlock(&event->vif_event_lock);
+ spin_unlock(&event->vif_event_lock);
}
bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg)
@@ -6713,9 +6713,9 @@ bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg)
struct brcmf_cfg80211_vif_event *event = &cfg->vif_event;
bool armed;
- mutex_lock(&event->vif_event_lock);
+ spin_lock(&event->vif_event_lock);
armed = event->vif != NULL;
- mutex_unlock(&event->vif_event_lock);
+ spin_unlock(&event->vif_event_lock);
return armed;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 7d77f869b7f1..8889832c17e0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -227,7 +227,7 @@ struct escan_info {
*/
struct brcmf_cfg80211_vif_event {
wait_queue_head_t vif_wq;
- struct mutex vif_event_lock;
+ spinlock_t vif_event_lock;
u8 action;
struct brcmf_cfg80211_vif *vif;
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 8d16f0204985..65e8c8766441 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -743,7 +743,7 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
* serious troublesome side effects. The p2p module will clean
* up the ifp if needed.
*/
- brcmf_p2p_ifp_removed(ifp);
+ brcmf_p2p_ifp_removed(ifp, rtnl_locked);
kfree(ifp);
}
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 66f942f7448e..de19c7c92bc6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2297,7 +2297,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
return err;
}
-void brcmf_p2p_ifp_removed(struct brcmf_if *ifp)
+void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked)
{
struct brcmf_cfg80211_info *cfg;
struct brcmf_cfg80211_vif *vif;
@@ -2306,9 +2306,11 @@ void brcmf_p2p_ifp_removed(struct brcmf_if *ifp)
vif = ifp->vif;
cfg = wdev_to_cfg(&vif->wdev);
cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
- rtnl_lock();
+ if (!rtnl_locked)
+ rtnl_lock();
cfg80211_unregister_wdev(&vif->wdev);
- rtnl_unlock();
+ if (!rtnl_locked)
+ rtnl_unlock();
brcmf_free_vif(vif);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
index a3bd18c2360b..8ce9447533ef 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h
@@ -155,7 +155,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name,
int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev);
int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
enum brcmf_fil_p2p_if_types if_type);
-void brcmf_p2p_ifp_removed(struct brcmf_if *ifp);
+void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked);
int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev);
void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev);
int brcmf_p2p_scan_prep(struct wiphy *wiphy,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 68ab3ac15650..b892dac70f4b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -313,6 +313,7 @@ struct rte_console {
#define KSO_WAIT_US 50
#define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
+#define BRCMF_SDIO_MAX_ACCESS_ERRORS 5
/*
* Conversion of 802.1D priority to precedence level
@@ -677,6 +678,7 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
{
u8 wr_val = 0, rd_val, cmp_val, bmask;
int err = 0;
+ int err_cnt = 0;
int try_cnt = 0;
brcmf_dbg(TRACE, "Enter: on=%d\n", on);
@@ -712,9 +714,14 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
*/
rd_val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
&err);
- if (((rd_val & bmask) == cmp_val) && !err)
+ if (!err) {
+ if ((rd_val & bmask) == cmp_val)
+ break;
+ err_cnt = 0;
+ }
+ /* bail out upon subsequent access errors */
+ if (err && (err_cnt++ > BRCMF_SDIO_MAX_ACCESS_ERRORS))
break;
-
udelay(KSO_WAIT_US);
brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR,
wr_val, &err);
@@ -3757,7 +3764,8 @@ static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
u32 val, rev;
val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
- if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
+ if ((sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 ||
+ sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4339) &&
addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
if (rev >= 2) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c
index a10f35c5eb3d..fe6755944b7b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c
@@ -19,6 +19,7 @@
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "tracepoint.h"
+#include "debug.h"
void __brcmf_err(const char *func, const char *fmt, ...)
{
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index fa26619a7945..2f978a39b58a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1458,11 +1458,15 @@ static int brcmf_usb_reset_resume(struct usb_interface *intf)
#define BRCMF_USB_DEVICE(dev_id) \
{ USB_DEVICE(BRCM_USB_VENDOR_ID_BROADCOM, dev_id) }
+#define LINKSYS_USB_DEVICE(dev_id) \
+ { USB_DEVICE(BRCM_USB_VENDOR_ID_LINKSYS, dev_id) }
+
static struct usb_device_id brcmf_usb_devid_table[] = {
BRCMF_USB_DEVICE(BRCM_USB_43143_DEVICE_ID),
BRCMF_USB_DEVICE(BRCM_USB_43236_DEVICE_ID),
BRCMF_USB_DEVICE(BRCM_USB_43242_DEVICE_ID),
BRCMF_USB_DEVICE(BRCM_USB_43569_DEVICE_ID),
+ LINKSYS_USB_DEVICE(BRCM_USB_43235_LINKSYS_DEVICE_ID),
{ USB_DEVICE(BRCM_USB_VENDOR_ID_LG, BRCM_USB_43242_LG_DEVICE_ID) },
/* special entry for device with firmware loaded and running */
BRCMF_USB_DEVICE(BRCM_USB_BCMFW_DEVICE_ID),
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index 3cc42bef6245..d0407d9ad782 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -22,6 +22,7 @@
#define BRCM_USB_VENDOR_ID_BROADCOM 0x0a5c
#define BRCM_USB_VENDOR_ID_LG 0x043e
+#define BRCM_USB_VENDOR_ID_LINKSYS 0x13b1
#define BRCM_PCIE_VENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
/* Chipcommon Core Chip IDs */
@@ -58,6 +59,7 @@
/* USB Device IDs */
#define BRCM_USB_43143_DEVICE_ID 0xbd1e
+#define BRCM_USB_43235_LINKSYS_DEVICE_ID 0x0039
#define BRCM_USB_43236_DEVICE_ID 0xbd17
#define BRCM_USB_43242_DEVICE_ID 0xbd1f
#define BRCM_USB_43242_LG_DEVICE_ID 0x3101
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index 209dc9988455..4db327a95414 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -2671,7 +2671,7 @@ const struct il_ops il3945_ops = {
.send_led_cmd = il3945_send_led_cmd,
};
-static struct il_cfg il3945_bg_cfg = {
+static const struct il_cfg il3945_bg_cfg = {
.name = "3945BG",
.fw_name_pre = IL3945_FW_PRE,
.ucode_api_max = IL3945_UCODE_API_MAX,
@@ -2700,7 +2700,7 @@ static struct il_cfg il3945_bg_cfg = {
},
};
-static struct il_cfg il3945_abg_cfg = {
+static const struct il_cfg il3945_abg_cfg = {
.name = "3945ABG",
.fw_name_pre = IL3945_FW_PRE,
.ucode_api_max = IL3945_UCODE_API_MAX,
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index 726ede391cb9..3bba521d2cd9 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -1320,7 +1320,7 @@ struct il_priv {
u64 timestamp;
union {
-#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
+#if IS_ENABLED(CONFIG_IWL3945)
struct {
void *shared_virt;
dma_addr_t shared_phys;
@@ -1351,7 +1351,7 @@ struct il_priv {
} _3945;
#endif
-#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
+#if IS_ENABLED(CONFIG_IWL4965)
struct {
struct il_rx_phy_res last_phy_res;
bool last_phy_res_valid;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
index fbaf705f3fa7..1fec6afbe041 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
@@ -72,15 +72,15 @@
#define IWL9000_SMEM_OFFSET 0x400000
#define IWL9000_SMEM_LEN 0x68000
-#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-lc-a0-"
+#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
#define IWL9260_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
-#define IWL9260LC_FW_PRE "iwlwifi-9260-th-a0-lc-a0-"
+#define IWL9000LC_FW_PRE "iwlwifi-9000-pu-a0-lc-a0-"
#define IWL9000_MODULE_FIRMWARE(api) \
IWL9000_FW_PRE "-" __stringify(api) ".ucode"
#define IWL9260_MODULE_FIRMWARE(api) \
IWL9260_FW_PRE "-" __stringify(api) ".ucode"
-#define IWL9260LC_MODULE_FIRMWARE(api) \
- IWL9260LC_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL9000LC_MODULE_FIRMWARE(api) \
+ IWL9000LC_FW_PRE "-" __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_9000 10
@@ -146,41 +146,62 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.mac_addr_from_csr = true, \
.rf_id = true
+const struct iwl_cfg iwl9160_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 9160",
+ .fw_name_pre = IWL9260_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
const struct iwl_cfg iwl9260_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 9260",
- .fw_name_pre = IWL9260_FW_PRE,
- IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .name = "Intel(R) Dual Band Wireless AC 9260",
+ .fw_name_pre = IWL9260_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl9270_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 9270",
+ .fw_name_pre = IWL9260_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl9460_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 9460",
+ .fw_name_pre = IWL9000_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
};
/*
* TODO the struct below is for internal testing only this should be
* removed by EO 2016~
*/
-const struct iwl_cfg iwl9260lc_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 9260",
- .fw_name_pre = IWL9260LC_FW_PRE,
- IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
-};
-
-const struct iwl_cfg iwl5165_2ac_cfg = {
- .name = "Intel(R) Dual Band Wireless AC 5165",
- .fw_name_pre = IWL9000_FW_PRE,
- IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
- .integrated = true,
+const struct iwl_cfg iwl9000lc_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 9000",
+ .fw_name_pre = IWL9000LC_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
};
MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL9260LC_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9000LC_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 423b23320d4f..7008319532ef 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -449,9 +449,11 @@ extern const struct iwl_cfg iwl4165_2ac_cfg;
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
extern const struct iwl_cfg iwl8265_2ac_sdio_cfg;
extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
+extern const struct iwl_cfg iwl9000lc_2ac_cfg;
+extern const struct iwl_cfg iwl9160_2ac_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg;
-extern const struct iwl_cfg iwl9260lc_2ac_cfg;
-extern const struct iwl_cfg iwl5165_2ac_cfg;
+extern const struct iwl_cfg iwl9270_2ac_cfg;
+extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwla000_2ac_cfg;
#endif /* CONFIG_IWLMVM */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 1d6f5d21a663..dd75ea7c936e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -77,7 +77,6 @@
*/
#define FH_MEM_LOWER_BOUND (0x1000)
#define FH_MEM_UPPER_BOUND (0x2000)
-#define TFH_MEM_LOWER_BOUND (0xA06000)
/**
* Keep-Warm (KW) buffer base address.
@@ -120,7 +119,7 @@
#define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20)
#define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80)
/* a000 TFD table address, 64 bit */
-#define TFH_TFDQ_CBB_TABLE (TFH_MEM_LOWER_BOUND + 0x1C00)
+#define TFH_TFDQ_CBB_TABLE (0x1C00)
/* Find TFD CB base pointer for given queue */
static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
@@ -156,7 +155,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
* In case of DRAM read address which is not aligned to 128B, the TFH will
* enable transfer size which doesn't cross 64B DRAM address boundary.
*/
-#define TFH_TRANSFER_MODE (TFH_MEM_LOWER_BOUND + 0x1F40)
+#define TFH_TRANSFER_MODE (0x1F40)
#define TFH_TRANSFER_MAX_PENDING_REQ 0xc
#define TFH_CHUNK_SIZE_128 BIT(8)
#define TFH_CHUNK_SPLIT_MODE BIT(10)
@@ -167,7 +166,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
* the start of the TFD first TB.
* In case of a DRAM Tx CMD update the TFH will update PN and Key ID
*/
-#define TFH_TXCMD_UPDATE_CFG (TFH_MEM_LOWER_BOUND + 0x1F48)
+#define TFH_TXCMD_UPDATE_CFG (0x1F48)
/*
* Controls TX DMA operation
*
@@ -181,22 +180,22 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
* set to 1 - interrupt is sent to the driver
* Bit 0: Indicates the snoop configuration
*/
-#define TFH_SRV_DMA_CHNL0_CTRL (TFH_MEM_LOWER_BOUND + 0x1F60)
+#define TFH_SRV_DMA_CHNL0_CTRL (0x1F60)
#define TFH_SRV_DMA_SNOOP BIT(0)
#define TFH_SRV_DMA_TO_DRIVER BIT(24)
#define TFH_SRV_DMA_START BIT(31)
/* Defines the DMA SRAM write start address to transfer a data block */
-#define TFH_SRV_DMA_CHNL0_SRAM_ADDR (TFH_MEM_LOWER_BOUND + 0x1F64)
+#define TFH_SRV_DMA_CHNL0_SRAM_ADDR (0x1F64)
/* Defines the 64bits DRAM start address to read the DMA data block from */
-#define TFH_SRV_DMA_CHNL0_DRAM_ADDR (TFH_MEM_LOWER_BOUND + 0x1F68)
+#define TFH_SRV_DMA_CHNL0_DRAM_ADDR (0x1F68)
/*
* Defines the number of bytes to transfer from DRAM to SRAM.
* Note that this register may be configured with non-dword aligned size.
*/
-#define TFH_SRV_DMA_CHNL0_BC (TFH_MEM_LOWER_BOUND + 0x1F70)
+#define TFH_SRV_DMA_CHNL0_BC (0x1F70)
/**
* Rx SRAM Control and Status Registers (RSCSR)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 459bf736fd5b..406ef301b8ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -302,22 +302,17 @@
#define OSC_CLK_FORCE_CONTROL (0x8)
#define FH_UCODE_LOAD_STATUS (0x1AF0)
-#define CSR_UCODE_LOAD_STATUS_ADDR (0x1E70)
-enum secure_load_status_reg {
- LMPM_CPU_UCODE_LOADING_STARTED = 0x00000001,
- LMPM_CPU_HDRS_LOADING_COMPLETED = 0x00000003,
- LMPM_CPU_UCODE_LOADING_COMPLETED = 0x00000007,
- LMPM_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
- LMPM_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
-};
-#define LMPM_SECURE_INSPECTOR_CODE_ADDR (0x1E38)
-#define LMPM_SECURE_INSPECTOR_DATA_ADDR (0x1E3C)
+/*
+ * Replacing FH_UCODE_LOAD_STATUS
+ * This register is writen by driver and is read by uCode during boot flow.
+ * Note this address is cleared after MAC reset.
+ */
+#define UREG_UCODE_LOAD_STATUS (0xa05c40)
+
#define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78)
#define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C)
-#define LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE (0x400000)
-#define LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE (0x402000)
#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000)
#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
index d1c4fb849111..6c8e3ca79323 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
@@ -433,25 +433,42 @@ struct iwl_mvm_rm_sta_cmd {
} __packed; /* REMOVE_STA_CMD_API_S_VER_2 */
/**
+ * struct iwl_mvm_mgmt_mcast_key_cmd_v1
+ * ( MGMT_MCAST_KEY = 0x1f )
+ * @ctrl_flags: %iwl_sta_key_flag
+ * @igtk:
+ * @k1: unused
+ * @k2: unused
+ * @sta_id: station ID that support IGTK
+ * @key_id:
+ * @receive_seq_cnt: initial RSC/PN needed for replay check
+ */
+struct iwl_mvm_mgmt_mcast_key_cmd_v1 {
+ __le32 ctrl_flags;
+ u8 igtk[16];
+ u8 k1[16];
+ u8 k2[16];
+ __le32 key_id;
+ __le32 sta_id;
+ __le64 receive_seq_cnt;
+} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */
+
+/**
* struct iwl_mvm_mgmt_mcast_key_cmd
* ( MGMT_MCAST_KEY = 0x1f )
* @ctrl_flags: %iwl_sta_key_flag
- * @IGTK:
- * @K1: unused
- * @K2: unused
+ * @igtk: IGTK master key
* @sta_id: station ID that support IGTK
* @key_id:
* @receive_seq_cnt: initial RSC/PN needed for replay check
*/
struct iwl_mvm_mgmt_mcast_key_cmd {
__le32 ctrl_flags;
- u8 IGTK[16];
- u8 K1[16];
- u8 K2[16];
+ u8 igtk[32];
__le32 key_id;
__le32 sta_id;
__le64 receive_seq_cnt;
-} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */
+} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_2 */
struct iwl_mvm_wep_key {
u8 key_index;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
index 4144623e1616..6b4c63a5e625 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
@@ -675,13 +675,21 @@ static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm_tx_resp *tx_resp)
tx_resp->frame_count) & 0xfff;
}
+/* Available options for the SCD_QUEUE_CFG HCMD */
+enum iwl_scd_cfg_actions {
+ SCD_CFG_DISABLE_QUEUE = 0x0,
+ SCD_CFG_ENABLE_QUEUE = 0x1,
+ SCD_CFG_UPDATE_QUEUE_TID = 0x2,
+};
+
/**
* struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command
* @token:
* @sta_id: station id
* @tid:
* @scd_queue: scheduler queue to confiug
- * @enable: 1 queue enable, 0 queue disable
+ * @action: 1 queue enable, 0 queue disable, 2 change txq's tid owner
+ * Value is one of %iwl_scd_cfg_actions options
* @aggregate: 1 aggregated queue, 0 otherwise
* @tx_fifo: %enum iwl_mvm_tx_fifo
* @window: BA window size
@@ -692,7 +700,7 @@ struct iwl_scd_txq_cfg_cmd {
u8 sta_id;
u8 tid;
u8 scd_queue;
- u8 enable;
+ u8 action;
u8 aggregate;
u8 tx_fifo;
u8 window;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
index 71076f02796e..57b574b2a092 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -482,13 +482,17 @@ struct iwl_nvm_access_cmd {
* @block_size: the block size in powers of 2
* @block_num: number of blocks specified in the command.
* @device_phy_addr: virtual addresses from device side
+ * 32 bit address for API version 1, 64 bit address for API version 2.
*/
struct iwl_fw_paging_cmd {
__le32 flags;
__le32 block_size;
__le32 block_num;
- __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
-} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
+ union {
+ __le32 addr32[NUM_OF_FW_PAGING_BLOCKS];
+ __le64 addr64[NUM_OF_FW_PAGING_BLOCKS];
+ } device_phy_addr;
+} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_2 */
/*
* Fw items ID's
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index 1abcabb9b6cd..46b52bf705fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -960,5 +960,6 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
}
mvm->fw_dbg_conf = conf_id;
- return ret;
+
+ return 0;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h
index f7dff7612c9c..e9f1be9da7d4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h
@@ -105,7 +105,8 @@ iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig,
{
u32 trig_vif = le32_to_cpu(trig->vif_type);
- return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || vif->type == trig_vif;
+ return trig_vif == IWL_FW_DBG_CONF_VIF_ANY ||
+ ieee80211_vif_type_p2p(vif) == trig_vif;
}
static inline bool
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 7e0cdbf8bf74..47e8e70dcfac 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -385,9 +385,7 @@ static int iwl_save_fw_paging(struct iwl_mvm *mvm,
/* send paging cmd to FW in case CPU2 has paging image */
static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
{
- int blk_idx;
- __le32 dev_phy_addr;
- struct iwl_fw_paging_cmd fw_paging_cmd = {
+ struct iwl_fw_paging_cmd paging_cmd = {
.flags =
cpu_to_le32(PAGING_CMD_IS_SECURED |
PAGING_CMD_IS_ENABLED |
@@ -396,18 +394,32 @@ static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
.block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
.block_num = cpu_to_le32(mvm->num_of_paging_blk),
};
+ int blk_idx, size = sizeof(paging_cmd);
+
+ /* A bit hard coded - but this is the old API and will be deprecated */
+ if (!iwl_mvm_has_new_tx_api(mvm))
+ size -= NUM_OF_FW_PAGING_BLOCKS * 4;
/* loop for for all paging blocks + CSS block */
for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
- dev_phy_addr =
- cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
- PAGE_2_EXP_SIZE);
- fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
+ dma_addr_t addr = mvm->fw_paging_db[blk_idx].fw_paging_phys;
+
+ addr = addr >> PAGE_2_EXP_SIZE;
+
+ if (iwl_mvm_has_new_tx_api(mvm)) {
+ __le64 phy_addr = cpu_to_le64(addr);
+
+ paging_cmd.device_phy_addr.addr64[blk_idx] = phy_addr;
+ } else {
+ __le32 phy_addr = cpu_to_le32(addr);
+
+ paging_cmd.device_phy_addr.addr32[blk_idx] = phy_addr;
+ }
}
return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
IWL_ALWAYS_LONG_GROUP, 0),
- 0, sizeof(fw_paging_cmd), &fw_paging_cmd);
+ 0, size, &paging_cmd);
}
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 6d6064534d59..9506e658abd3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -465,7 +465,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
- BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 4);
+ BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6);
memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
hw->wiphy->cipher_suites = mvm->ciphers;
@@ -490,6 +490,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
mvm->ciphers[hw->wiphy->n_cipher_suites] =
WLAN_CIPHER_SUITE_AES_CMAC;
hw->wiphy->n_cipher_suites++;
+ if (iwl_mvm_has_new_rx_api(mvm)) {
+ mvm->ciphers[hw->wiphy->n_cipher_suites] =
+ WLAN_CIPHER_SUITE_BIP_GMAC_128;
+ hw->wiphy->n_cipher_suites++;
+ mvm->ciphers[hw->wiphy->n_cipher_suites] =
+ WLAN_CIPHER_SUITE_BIP_GMAC_256;
+ hw->wiphy->n_cipher_suites++;
+ }
}
/* currently FW API supports only one optional cipher scheme */
@@ -624,6 +632,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
NL80211_FEATURE_LOW_PRIORITY_SCAN |
NL80211_FEATURE_P2P_GO_OPPPS |
+ NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
NL80211_FEATURE_DYNAMIC_SMPS |
NL80211_FEATURE_STATIC_SMPS |
NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
@@ -2746,6 +2755,8 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
break;
case WLAN_CIPHER_SUITE_WEP40:
@@ -2779,9 +2790,11 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
* GTK on AP interface is a TX-only key, return 0;
* on IBSS they're per-station and because we're lazy
* we don't support them for RX, so do the same.
- * CMAC in AP/IBSS modes must be done in software.
+ * CMAC/GMAC in AP/IBSS modes must be done in software.
*/
- if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
ret = -EOPNOTSUPP;
else
ret = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index b4fc86d5d7ef..2e30990c717e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -467,6 +467,8 @@ struct iwl_mvm_vif {
static inline struct iwl_mvm_vif *
iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
{
+ if (!vif)
+ return NULL;
return (void *)vif->drv_priv;
}
@@ -697,6 +699,10 @@ struct iwl_mvm_baid_data {
* it. In this state, when a new queue is needed to be allocated but no
* such free queue exists, an inactive queue might be freed and given to
* the new RA/TID.
+ * @IWL_MVM_QUEUE_RECONFIGURING: queue is being reconfigured
+ * This is the state of a queue that has had traffic pass through it, but
+ * needs to be reconfigured for some reason, e.g. the queue needs to
+ * become unshared and aggregations re-enabled on.
*/
enum iwl_mvm_queue_status {
IWL_MVM_QUEUE_FREE,
@@ -704,10 +710,11 @@ enum iwl_mvm_queue_status {
IWL_MVM_QUEUE_READY,
IWL_MVM_QUEUE_SHARED,
IWL_MVM_QUEUE_INACTIVE,
+ IWL_MVM_QUEUE_RECONFIGURING,
};
#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
-#define IWL_MVM_NUM_CIPHERS 8
+#define IWL_MVM_NUM_CIPHERS 10
struct iwl_mvm {
/* for logger access */
@@ -767,6 +774,7 @@ struct iwl_mvm {
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
bool reserved; /* Is this the TXQ reserved for a STA */
u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
+ u8 txq_tid; /* The TID "owner" of this queue*/
u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
/* Timestamp for inactivation per TID of this queue */
unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
@@ -1122,6 +1130,18 @@ static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
(mvm->trans->runtime_pm_mode != IWL_PLAT_PM_MODE_D0I3);
}
+static inline bool iwl_mvm_is_dqa_data_queue(struct iwl_mvm *mvm, u8 queue)
+{
+ return (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE) &&
+ (queue <= IWL_MVM_DQA_MAX_DATA_QUEUE);
+}
+
+static inline bool iwl_mvm_is_dqa_mgmt_queue(struct iwl_mvm *mvm, u8 queue)
+{
+ return (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) &&
+ (queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE);
+}
+
static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
{
bool nvm_lar = mvm->nvm_data->lar_enabled;
@@ -1192,6 +1212,12 @@ static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT);
}
+static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
+{
+ /* TODO - replace with TLV once defined */
+ return mvm->trans->cfg->use_tfh;
+}
+
static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
{
#ifdef CONFIG_THERMAL
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index df6c32caa5f0..08d8a8abb918 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -132,7 +132,8 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
IEEE80211_CCMP_PN_LEN) <= 0)
return -1;
- memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
+ if (!(stats->flag & RX_FLAG_AMSDU_MORE))
+ memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
stats->flag |= RX_FLAG_PN_VALIDATED;
return 0;
@@ -883,6 +884,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
u8 *qc = ieee80211_get_qos_ctl(hdr);
*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ if (!(desc->amsdu_info &
+ IWL_RX_MPDU_AMSDU_LAST_SUBFRAME))
+ rx_status->flag |= RX_FLAG_AMSDU_MORE;
}
if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
iwl_mvm_agg_rx_received(mvm, baid);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 3130b9c68a74..30fc3afe5241 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -468,6 +468,11 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
continue;
+ /* Don't try and take queues being reconfigured */
+ if (mvm->queue_info[queue].status ==
+ IWL_MVM_QUEUE_RECONFIGURING)
+ continue;
+
ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
}
@@ -501,31 +506,37 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
queue = ac_to_queue[IEEE80211_AC_VO];
/* Make sure queue found (or not) is legal */
- if (!((queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE &&
- queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE) ||
- (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE &&
- queue <= IWL_MVM_DQA_MAX_DATA_QUEUE) ||
- (queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE))) {
+ if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
+ !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
+ (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
IWL_ERR(mvm, "No DATA queues available to share\n");
- queue = -ENOSPC;
+ return -ENOSPC;
+ }
+
+ /* Make sure the queue isn't in the middle of being reconfigured */
+ if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
+ IWL_ERR(mvm,
+ "TXQ %d is in the middle of re-config - try again\n",
+ queue);
+ return -EBUSY;
}
return queue;
}
/*
- * If a given queue has a higher AC than the TID stream that is being added to
- * it, the queue needs to be redirected to the lower AC. This function does that
+ * If a given queue has a higher AC than the TID stream that is being compared
+ * to, the queue needs to be redirected to the lower AC. This function does that
* in such a case, otherwise - if no redirection required - it does nothing,
* unless the %force param is true.
*/
-static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
- int ac, int ssn, unsigned int wdg_timeout,
- bool force)
+int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
+ int ac, int ssn, unsigned int wdg_timeout,
+ bool force)
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
- .enable = 0,
+ .action = SCD_CFG_DISABLE_QUEUE,
};
bool shared_queue;
unsigned long mq;
@@ -551,11 +562,12 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
+ cmd.tid = mvm->queue_info[queue].txq_tid;
mq = mvm->queue_info[queue].hw_queue_to_mac80211;
shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
spin_unlock_bh(&mvm->queue_info_lock);
- IWL_DEBUG_TX_QUEUES(mvm, "Redirecting shared TXQ #%d to FIFO #%d\n",
+ IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
queue, iwl_mvm_ac_to_tx_fifo[ac]);
/* Stop MAC queues and wait for this queue to empty */
@@ -580,6 +592,11 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
ssn, wdg_timeout);
+ /* Update the TID "owner" of the queue */
+ spin_lock_bh(&mvm->queue_info_lock);
+ mvm->queue_info[queue].txq_tid = tid;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
/* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
/* Redirect to lower AC */
@@ -709,7 +726,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
if (WARN_ON(queue <= 0)) {
IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
tid, cfg.sta_id);
- return -ENOSPC;
+ return queue;
}
/*
@@ -728,7 +745,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
if (using_inactive_queue) {
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
- .enable = 0,
+ .action = SCD_CFG_DISABLE_QUEUE,
};
u8 ac;
@@ -738,11 +755,13 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
ac = mvm->queue_info[queue].mac80211_ac;
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[ac];
+ cmd.tid = mvm->queue_info[queue].txq_tid;
spin_unlock_bh(&mvm->queue_info_lock);
/* Disable the queue */
- iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids,
- true);
+ if (disable_agg_tids)
+ iwl_mvm_invalidate_sta_queue(mvm, queue,
+ disable_agg_tids, false);
iwl_trans_txq_disable(mvm->trans, queue, false);
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
&cmd);
@@ -758,6 +777,10 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
return ret;
}
+
+ /* If TXQ is allocated to another STA, update removal in FW */
+ if (cmd.sta_id != mvmsta->sta_id)
+ iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
}
IWL_DEBUG_TX_QUEUES(mvm,
@@ -827,6 +850,119 @@ out_err:
return ret;
}
+static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .action = SCD_CFG_UPDATE_QUEUE_TID,
+ };
+ s8 sta_id;
+ int tid;
+ unsigned long tid_bitmap;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ sta_id = mvm->queue_info[queue].ra_sta_id;
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
+ return;
+
+ /* Find any TID for queue */
+ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
+ cmd.tid = tid;
+ cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
+ queue, ret);
+ else
+ IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
+ queue, tid);
+}
+
+static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ s8 sta_id;
+ int tid = -1;
+ unsigned long tid_bitmap;
+ unsigned int wdg_timeout;
+ int ssn;
+ int ret = true;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ sta_id = mvm->queue_info[queue].ra_sta_id;
+ tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ /* Find TID for queue, and make sure it is the only one on the queue */
+ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
+ if (tid_bitmap != BIT(tid)) {
+ IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
+ queue, tid_bitmap);
+ return;
+ }
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
+ tid);
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ return;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+
+ ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
+
+ ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
+ tid_to_mac80211_ac[tid], ssn,
+ wdg_timeout, true);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
+ return;
+ }
+
+ /* If aggs should be turned back on - do it */
+ if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
+ struct iwl_mvm_add_sta_cmd cmd;
+
+ mvmsta->tid_disable_agg &= ~BIT(tid);
+
+ cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
+ cmd.sta_id = mvmsta->sta_id;
+ cmd.add_modify = STA_MODE_MODIFY;
+ cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+ cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
+ cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+ iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+ if (!ret) {
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "TXQ #%d is now aggregated again\n",
+ queue);
+
+ /* Mark queue intenally as aggregating again */
+ iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
+ }
+ }
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
+ spin_unlock_bh(&mvm->queue_info_lock);
+}
+
static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
{
if (tid == IWL_MAX_TID_COUNT)
@@ -894,13 +1030,42 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
unsigned long deferred_tid_traffic;
- int sta_id, tid;
+ int queue, sta_id, tid;
/* Check inactivity of queues */
iwl_mvm_inactivity_check(mvm);
mutex_lock(&mvm->mutex);
+ /* Reconfigure queues requiring reconfiguation */
+ for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) {
+ bool reconfig;
+ bool change_owner;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ reconfig = (mvm->queue_info[queue].status ==
+ IWL_MVM_QUEUE_RECONFIGURING);
+
+ /*
+ * We need to take into account a situation in which a TXQ was
+ * allocated to TID x, and then turned shared by adding TIDs y
+ * and z. If TID x becomes inactive and is removed from the TXQ,
+ * ownership must be given to one of the remaining TIDs.
+ * This is mainly because if TID x continues - a new queue can't
+ * be allocated for it as long as it is an owner of another TXQ.
+ */
+ change_owner = !(mvm->queue_info[queue].tid_bitmap &
+ BIT(mvm->queue_info[queue].txq_tid)) &&
+ (mvm->queue_info[queue].status ==
+ IWL_MVM_QUEUE_SHARED);
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ if (reconfig)
+ iwl_mvm_unshare_queue(mvm, queue);
+ else if (change_owner)
+ iwl_mvm_change_queue_owner(mvm, queue);
+ }
+
/* Go over all stations with deferred traffic */
for_each_set_bit(sta_id, mvm->sta_deferred_frames,
IWL_MVM_STATION_COUNT) {
@@ -963,6 +1128,61 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
return 0;
}
+/*
+ * In DQA mode, after a HW restart the queues should be allocated as before, in
+ * order to avoid race conditions when there are shared queues. This function
+ * does the re-mapping and queue allocation.
+ *
+ * Note that re-enabling aggregations isn't done in this function.
+ */
+static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta)
+{
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
+ int i;
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .sta_id = mvm_sta->sta_id,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+
+ /* Make sure reserved queue is still marked as such (or allocated) */
+ mvm->queue_info[mvm_sta->reserved_queue].status =
+ IWL_MVM_QUEUE_RESERVED;
+
+ for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
+ struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
+ int txq_id = tid_data->txq_id;
+ int ac;
+ u8 mac_queue;
+
+ if (txq_id == IEEE80211_INVAL_HW_QUEUE)
+ continue;
+
+ skb_queue_head_init(&tid_data->deferred_tx_frames);
+
+ ac = tid_to_mac80211_ac[i];
+ mac_queue = mvm_sta->vif->hw_queue[ac];
+
+ cfg.tid = i;
+ cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac];
+ cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+ txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
+
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Re-mapping sta %d tid %d to queue %d\n",
+ mvm_sta->sta_id, i, txq_id);
+
+ iwl_mvm_enable_txq(mvm, txq_id, mac_queue,
+ IEEE80211_SEQ_TO_SN(tid_data->seq_number),
+ &cfg, wdg_timeout);
+
+ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
+ }
+
+ atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0);
+}
+
int iwl_mvm_add_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -985,6 +1205,13 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
spin_lock_init(&mvm_sta->lock);
+ /* In DQA mode, if this is a HW restart, re-alloc existing queues */
+ if (iwl_mvm_is_dqa_supported(mvm) &&
+ test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
+ goto update_fw;
+ }
+
mvm_sta->sta_id = sta_id;
mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color);
@@ -1048,6 +1275,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
goto err;
}
+update_fw:
ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
if (ret)
goto err;
@@ -1956,7 +2184,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EIO;
}
- spin_lock_bh(&mvm->queue_info_lock);
+ spin_lock(&mvm->queue_info_lock);
/*
* Note the possible cases:
@@ -1967,14 +2195,20 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* non-DQA mode, since the TXQ hasn't yet been allocated
*/
txq_id = mvmsta->tid_data[tid].txq_id;
- if (!iwl_mvm_is_dqa_supported(mvm) ||
+ if (iwl_mvm_is_dqa_supported(mvm) &&
+ unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) {
+ ret = -ENXIO;
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Can't start tid %d agg on shared queue!\n",
+ tid);
+ goto release_locks;
+ } else if (!iwl_mvm_is_dqa_supported(mvm) ||
mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
mvm->first_agg_queue,
mvm->last_agg_queue);
if (txq_id < 0) {
ret = txq_id;
- spin_unlock_bh(&mvm->queue_info_lock);
IWL_ERR(mvm, "Failed to allocate agg queue\n");
goto release_locks;
}
@@ -1982,7 +2216,8 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/* TXQ hasn't yet been enabled, so mark it only as reserved */
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
}
- spin_unlock_bh(&mvm->queue_info_lock);
+
+ spin_unlock(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm,
"AGG for tid %d will be on queue #%d\n",
@@ -2006,8 +2241,11 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}
ret = 0;
+ goto out;
release_locks:
+ spin_unlock(&mvm->queue_info_lock);
+out:
spin_unlock_bh(&mvmsta->lock);
return ret;
@@ -2023,6 +2261,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
int queue, ret;
bool alloc_queue = true;
+ enum iwl_mvm_queue_status queue_status;
u16 ssn;
struct iwl_trans_txq_scd_cfg cfg = {
@@ -2048,13 +2287,15 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
+ spin_lock_bh(&mvm->queue_info_lock);
+ queue_status = mvm->queue_info[queue].status;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
/* In DQA mode, the existing queue might need to be reconfigured */
if (iwl_mvm_is_dqa_supported(mvm)) {
- spin_lock_bh(&mvm->queue_info_lock);
/* Maybe there is no need to even alloc a queue... */
if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
alloc_queue = false;
- spin_unlock_bh(&mvm->queue_info_lock);
/*
* Only reconfig the SCD for the queue if the window size has
@@ -2089,9 +2330,12 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
&cfg, wdg_timeout);
- ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
- if (ret)
- return -EIO;
+ /* Send ADD_STA command to enable aggs only if the queue isn't shared */
+ if (queue_status != IWL_MVM_QUEUE_SHARED) {
+ ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
+ if (ret)
+ return -EIO;
+ }
/* No need to mark as reserved */
spin_lock_bh(&mvm->queue_info_lock);
@@ -2123,7 +2367,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
u16 txq_id;
int err;
-
/*
* If mac80211 is cleaning its state, then say that we finished since
* our state has been cleared anyway.
@@ -2152,6 +2395,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
*/
if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
+
spin_unlock_bh(&mvm->queue_info_lock);
switch (tid_data->state) {
@@ -2412,9 +2656,15 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
/* verify the key details match the required command's expectations */
- if (WARN_ON((keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC) ||
- (keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
- (keyconf->keyidx != 4 && keyconf->keyidx != 5)))
+ if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
+ (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
+ (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
+ keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
+ keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
+ return -EINVAL;
+
+ if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
+ keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
return -EINVAL;
igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
@@ -2430,11 +2680,18 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
case WLAN_CIPHER_SUITE_AES_CMAC:
igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
+ break;
default:
return -EINVAL;
}
- memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
+ memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
+ igtk_cmd.ctrl_flags |=
+ cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
pn = seq.aes_cmac.pn;
igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
@@ -2449,6 +2706,19 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
remove_key ? "removing" : "installing",
igtk_cmd.sta_id);
+ if (!iwl_mvm_has_new_rx_api(mvm)) {
+ struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
+ .ctrl_flags = igtk_cmd.ctrl_flags,
+ .key_id = igtk_cmd.key_id,
+ .sta_id = igtk_cmd.sta_id,
+ .receive_seq_cnt = igtk_cmd.receive_seq_cnt
+ };
+
+ memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
+ ARRAY_SIZE(igtk_cmd_v1.igtk));
+ return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
+ sizeof(igtk_cmd_v1), &igtk_cmd_v1);
+ }
return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
sizeof(igtk_cmd), &igtk_cmd);
}
@@ -2573,7 +2843,9 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
}
sta_id = mvm_sta->sta_id;
- if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
goto end;
}
@@ -2659,7 +2931,9 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
keyconf->keyidx, sta_id);
- if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+ if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index bbc1cab2c3bf..709542bbfce5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -554,4 +554,8 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
+int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
+ int ac, int ssn, unsigned int wdg_timeout,
+ bool force);
+
#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index c6585ab48df3..f9150249af76 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -513,6 +513,15 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
int queue;
+ /* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
+ * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
+ * queue. STATION (HS2.0) uses the auxiliary context of the FW,
+ * and hence needs to be sent on the aux queue
+ */
+ if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
+ skb_info->control.vif->type == NL80211_IFTYPE_STATION)
+ IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
+
memcpy(&info, skb->cb, sizeof(info));
if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
@@ -526,16 +535,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
/* This holds the amsdu headers length */
skb_info->driver_data[0] = (void *)(uintptr_t)0;
- /*
- * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
- * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
- * queue. STATION (HS2.0) uses the auxiliary context of the FW,
- * and hence needs to be sent on the aux queue
- */
- if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
- info.control.vif->type == NL80211_IFTYPE_STATION)
- IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
-
queue = info.hw_queue;
/*
@@ -838,6 +837,22 @@ static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
}
}
+/* Check if there are any timed-out TIDs on a given shared TXQ */
+static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
+{
+ unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap;
+ unsigned long now = jiffies;
+ int tid;
+
+ for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+ if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
+ IWL_MVM_DQA_QUEUE_TIMEOUT, now))
+ return true;
+ }
+
+ return false;
+}
+
/*
* Sets the fields in the Tx cmd that are crypto related
*/
@@ -940,7 +955,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
spin_unlock(&mvmsta->lock);
return 0;
-
}
/* If we are here - TXQ exists and needs to be re-activated */
@@ -953,8 +967,25 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
txq_id);
}
- /* Keep track of the time of the last frame for this RA/TID */
- mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ /* Keep track of the time of the last frame for this RA/TID */
+ mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
+
+ /*
+ * If we have timed-out TIDs - schedule the worker that will
+ * reconfig the queues and update them
+ *
+ * Note that the mvm->queue_info_lock isn't being taken here in
+ * order to not serialize the TX flow. This isn't dangerous
+ * because scheduling mvm->add_stream_wk can't ruin the state,
+ * and if we DON'T schedule it due to some race condition then
+ * next TX we get here we will.
+ */
+ if (unlikely(mvm->queue_info[txq_id].status ==
+ IWL_MVM_QUEUE_SHARED &&
+ iwl_mvm_txq_should_update(mvm, txq_id)))
+ schedule_work(&mvm->add_stream_wk);
+ }
IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 68f4e7fdfc11..7c138fedcb19 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -610,7 +610,7 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
- .enable = 1,
+ .action = SCD_CFG_ENABLE_QUEUE,
.window = frame_limit,
.sta_id = sta_id,
.ssn = cpu_to_le16(ssn),
@@ -669,6 +669,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
tid_to_mac80211_ac[cfg->tid];
else
mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
+
+ mvm->queue_info[queue].txq_tid = cfg->tid;
}
IWL_DEBUG_TX_QUEUES(mvm,
@@ -682,7 +684,7 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
if (enable_queue) {
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
- .enable = 1,
+ .action = SCD_CFG_ENABLE_QUEUE,
.window = cfg->frame_limit,
.sta_id = cfg->sta_id,
.ssn = cpu_to_le16(ssn),
@@ -709,7 +711,7 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
- .enable = 0,
+ .action = SCD_CFG_DISABLE_QUEUE,
};
bool remove_mac_queue = true;
int ret;
@@ -744,8 +746,9 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
~BIT(mac80211_queue);
mvm->queue_info[queue].hw_queue_refcount--;
- cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0;
- if (!cmd.enable)
+ cmd.action = mvm->queue_info[queue].hw_queue_refcount ?
+ SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
+ if (cmd.action == SCD_CFG_DISABLE_QUEUE)
mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
IWL_DEBUG_TX_QUEUES(mvm,
@@ -755,12 +758,13 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
mvm->queue_info[queue].hw_queue_to_mac80211);
/* If the queue is still enabled - nothing left to do in this func */
- if (cmd.enable) {
+ if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
spin_unlock_bh(&mvm->queue_info_lock);
return;
}
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
+ cmd.tid = mvm->queue_info[queue].txq_tid;
/* Make sure queue info is correct even though we overwrite it */
WARN(mvm->queue_info[queue].hw_queue_refcount ||
@@ -1131,7 +1135,13 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
}
- /* TODO: if queue was shared - need to re-enable AGGs */
+ /* If the queue is marked as shared - "unshare" it */
+ if (mvm->queue_info[queue].hw_queue_refcount == 1 &&
+ mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING;
+ IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
+ queue);
+ }
}
void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 78cf9a7f3eac..c6e24fb286be 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -502,20 +502,27 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)},
/* 9000 Series */
+ {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
/* a000 Series */
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg)},
@@ -608,7 +615,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
- const struct iwl_cfg *cfg_9260lc __maybe_unused = NULL;
struct iwl_trans *iwl_trans;
int ret;
@@ -637,11 +643,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
if (iwl_trans->cfg->rf_id) {
- if (cfg == &iwl9260_2ac_cfg)
- cfg_9260lc = &iwl9260lc_2ac_cfg;
- if (cfg_9260lc && iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) {
- cfg = cfg_9260lc;
- iwl_trans->cfg = cfg_9260lc;
+ if (cfg == &iwl9460_2ac_cfg &&
+ iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) {
+ cfg = &iwl9000lc_2ac_cfg;
+ iwl_trans->cfg = cfg;
}
}
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 74f2f035bd28..2f46eedd7c4d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -827,10 +827,16 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
if (ret)
return ret;
- /* Notify the ucode of the loaded section number and status */
- val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
- val = val | (sec_num << shift_param);
- iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
+ /* Notify ucode of loaded section number and status */
+ if (trans->cfg->use_tfh) {
+ val = iwl_read_prph(trans, UREG_UCODE_LOAD_STATUS);
+ val = val | (sec_num << shift_param);
+ iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, val);
+ } else {
+ val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
+ val = val | (sec_num << shift_param);
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
+ }
sec_num = (sec_num << 1) | 0x1;
}
@@ -838,10 +844,21 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
iwl_enable_interrupts(trans);
- if (cpu == 1)
- iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF);
- else
- iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
+ if (trans->cfg->use_tfh) {
+ if (cpu == 1)
+ iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
+ 0xFFFF);
+ else
+ iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
+ 0xFFFFFFFF);
+ } else {
+ if (cpu == 1)
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
+ 0xFFFF);
+ else
+ iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
+ 0xFFFFFFFF);
+ }
return 0;
}
@@ -886,14 +903,6 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
return ret;
}
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
- iwl_set_bits_prph(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- (LMPM_CPU_UCODE_LOADING_COMPLETED |
- LMPM_CPU_HDRS_LOADING_COMPLETED |
- LMPM_CPU_UCODE_LOADING_STARTED) <<
- shift_param);
-
*first_ucode_section = last_read_idx;
return 0;
@@ -1960,6 +1969,10 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
txq->q.read_ptr, txq->q.write_ptr);
+ if (trans->cfg->use_tfh)
+ /* TODO: access new SCD registers and dump them */
+ return;
+
scd_sram_addr = trans_pcie->scd_base_addr +
SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 18650dccdb58..9636dc89f6bd 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -703,6 +703,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+ if (trans->cfg->use_tfh)
+ return;
+
trans_pcie->scd_base_addr =
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
@@ -970,11 +973,13 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
}
}
- if (trans->cfg->use_tfh)
+ if (trans->cfg->use_tfh) {
iwl_write_direct32(trans, TFH_TRANSFER_MODE,
TFH_TRANSFER_MAX_PENDING_REQ |
TFH_CHUNK_SIZE_128 |
TFH_CHUNK_SPLIT_MODE);
+ return 0;
+ }
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
if (trans->cfg->base_params->num_of_queues > 20)
@@ -1249,6 +1254,9 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
if (test_and_set_bit(txq_id, trans_pcie->queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
+ if (cfg && trans->cfg->use_tfh)
+ WARN_ONCE(1, "Expected no calls to SCD configuration");
+
txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
if (cfg) {
@@ -1366,6 +1374,9 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
return;
}
+ if (configure_scd && trans->cfg->use_tfh)
+ WARN_ONCE(1, "Expected no calls to SCD configuration");
+
if (configure_scd) {
iwl_scd_txq_set_inactive(trans, txq_id);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
index 3e5fa7872b64..a5656bc0e6aa 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
@@ -3041,13 +3041,9 @@ static int prism2_ioctl_priv_download(local_info_t *local, struct iw_point *p)
p->length > 1024 || !p->pointer)
return -EINVAL;
- param = kmalloc(p->length, GFP_KERNEL);
- if (param == NULL)
- return -ENOMEM;
-
- if (copy_from_user(param, p->pointer, p->length)) {
- ret = -EFAULT;
- goto out;
+ param = memdup_user(p->pointer, p->length);
+ if (IS_ERR(param)) {
+ return PTR_ERR(param);
}
if (p->length < sizeof(struct prism2_download_param) +
@@ -3803,13 +3799,9 @@ static int prism2_ioctl_priv_hostapd(local_info_t *local, struct iw_point *p)
p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
return -EINVAL;
- param = kmalloc(p->length, GFP_KERNEL);
- if (param == NULL)
- return -ENOMEM;
-
- if (copy_from_user(param, p->pointer, p->length)) {
- ret = -EFAULT;
- goto out;
+ param = memdup_user(p->pointer, p->length);
+ if (IS_ERR(param)) {
+ return PTR_ERR(param);
}
switch (param->cmd) {
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 8c35ac838fce..431f13b4faf6 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -487,7 +487,7 @@ static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
};
static spinlock_t hwsim_radio_lock;
-static struct list_head hwsim_radios;
+static LIST_HEAD(hwsim_radios);
static int hwsim_radio_idx;
static struct platform_driver mac80211_hwsim_driver = {
@@ -3376,7 +3376,6 @@ static int __init init_mac80211_hwsim(void)
mac80211_hwsim_unassign_vif_chanctx;
spin_lock_init(&hwsim_radio_lock);
- INIT_LIST_HEAD(&hwsim_radios);
err = register_pernet_device(&hwsim_net_ops);
if (err)
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index 81c60d0a1bda..43dccd5b0291 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -260,22 +260,17 @@ int mwifiex_11h_handle_radar_detected(struct mwifiex_private *priv,
rdr_event = (void *)(skb->data + sizeof(u32));
- if (le32_to_cpu(rdr_event->passed)) {
- mwifiex_dbg(priv->adapter, MSG,
- "radar detected; indicating kernel\n");
- if (mwifiex_stop_radar_detection(priv, &priv->dfs_chandef))
- mwifiex_dbg(priv->adapter, ERROR,
- "Failed to stop CAC in FW\n");
- cfg80211_radar_event(priv->adapter->wiphy, &priv->dfs_chandef,
- GFP_KERNEL);
- mwifiex_dbg(priv->adapter, MSG, "regdomain: %d\n",
- rdr_event->reg_domain);
- mwifiex_dbg(priv->adapter, MSG, "radar detection type: %d\n",
- rdr_event->det_type);
- } else {
- mwifiex_dbg(priv->adapter, MSG,
- "false radar detection event!\n");
- }
+ mwifiex_dbg(priv->adapter, MSG,
+ "radar detected; indicating kernel\n");
+ if (mwifiex_stop_radar_detection(priv, &priv->dfs_chandef))
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Failed to stop CAC in FW\n");
+ cfg80211_radar_event(priv->adapter->wiphy, &priv->dfs_chandef,
+ GFP_KERNEL);
+ mwifiex_dbg(priv->adapter, MSG, "regdomain: %d\n",
+ rdr_event->reg_domain);
+ mwifiex_dbg(priv->adapter, MSG, "radar detection type: %d\n",
+ rdr_event->det_type);
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.h b/drivers/net/wireless/marvell/mwifiex/11n.h
index afdd58aa90de..ea0fa68b9913 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n.h
@@ -171,9 +171,10 @@ mwifiex_find_stream_to_delete(struct mwifiex_private *priv, int ptr_tid,
static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
struct mwifiex_sta_node *node)
{
-
- if (!node || (priv->bss_role != MWIFIEX_BSS_ROLE_UAP) ||
- !priv->ap_11n_enabled)
+ if (!node || ((priv->bss_role == MWIFIEX_BSS_ROLE_UAP) &&
+ !priv->ap_11n_enabled) ||
+ ((priv->bss_mode == NL80211_IFTYPE_ADHOC) &&
+ !priv->adapter->adhoc_11n_enabled))
return 0;
return node->is_11n_enabled;
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
index dc49c3de1f25..c47d6366875d 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c
@@ -205,7 +205,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
do {
/* Check if AMSDU can accommodate this MSDU */
- if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN))
+ if ((skb_aggr->len + skb_src->len + LLC_SNAP_LEN) >
+ adapter->tx_buf_size)
break;
skb_src = skb_dequeue(&pra_list->skb_head);
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index a74cc43b1953..94480123efa3 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -78,8 +78,15 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
*/
static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
{
- int ret = mwifiex_11n_dispatch_amsdu_pkt(priv, payload);
+ int ret;
+
+ if (!payload) {
+ mwifiex_dbg(priv->adapter, INFO, "info: fw drop data\n");
+ return 0;
+ }
+
+ ret = mwifiex_11n_dispatch_amsdu_pkt(priv, payload);
if (!ret)
return 0;
@@ -921,3 +928,72 @@ void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter)
else
mwifiex_update_ampdu_rxwinsize(adapter, false);
}
+
+/* This function handles rxba_sync event
+ */
+void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
+ u8 *event_buf, u16 len)
+{
+ struct mwifiex_ie_types_rxba_sync *tlv_rxba = (void *)event_buf;
+ u16 tlv_type, tlv_len;
+ struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
+ u8 i, j;
+ u16 seq_num, tlv_seq_num, tlv_bitmap_len;
+ int tlv_buf_left = len;
+ int ret;
+ u8 *tmp;
+
+ mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
+ event_buf, len);
+ while (tlv_buf_left >= sizeof(*tlv_rxba)) {
+ tlv_type = le16_to_cpu(tlv_rxba->header.type);
+ tlv_len = le16_to_cpu(tlv_rxba->header.len);
+ if (tlv_type != TLV_TYPE_RXBA_SYNC) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Wrong TLV id=0x%x\n", tlv_type);
+ return;
+ }
+
+ tlv_seq_num = le16_to_cpu(tlv_rxba->seq_num);
+ tlv_bitmap_len = le16_to_cpu(tlv_rxba->bitmap_len);
+ mwifiex_dbg(priv->adapter, INFO,
+ "%pM tid=%d seq_num=%d bitmap_len=%d\n",
+ tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
+ tlv_bitmap_len);
+
+ rx_reor_tbl_ptr =
+ mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
+ tlv_rxba->mac);
+ if (!rx_reor_tbl_ptr) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Can not find rx_reorder_tbl!");
+ return;
+ }
+
+ for (i = 0; i < tlv_bitmap_len; i++) {
+ for (j = 0 ; j < 8; j++) {
+ if (tlv_rxba->bitmap[i] & (1 << j)) {
+ seq_num = (MAX_TID_VALUE - 1) &
+ (tlv_seq_num + i * 8 + j);
+
+ mwifiex_dbg(priv->adapter, ERROR,
+ "drop packet,seq=%d\n",
+ seq_num);
+
+ ret = mwifiex_11n_rx_reorder_pkt
+ (priv, seq_num, tlv_rxba->tid,
+ tlv_rxba->mac, 0, NULL);
+
+ if (ret)
+ mwifiex_dbg(priv->adapter,
+ ERROR,
+ "Fail to drop packet");
+ }
+ }
+ }
+
+ tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len);
+ tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba);
+ tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp;
+ }
+}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h
index 63ecea89b4ab..22d991f514c8 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h
@@ -81,5 +81,6 @@ struct mwifiex_rx_reorder_tbl *
mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta);
void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta);
void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags);
-
+void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
+ u8 *event_buf, u16 len);
#endif /* _MWIFIEX_11N_RXREORDER_H_ */
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index a8ff969c95c2..c7f2faa81921 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -2012,10 +2012,6 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
if (mwifiex_deauthenticate(priv, NULL))
return -EFAULT;
- mwifiex_dbg(priv->adapter, MSG,
- "info: successfully disconnected from %pM:\t"
- "reason code %d\n", priv->cfg_bssid, reason_code);
-
eth_zero_addr(priv->cfg_bssid);
priv->hs2_enabled = false;
@@ -2485,6 +2481,16 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
priv->scan_request = request;
+ if (request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ ether_addr_copy(priv->random_mac, request->mac_addr);
+ for (i = 0; i < ETH_ALEN; i++) {
+ priv->random_mac[i] &= request->mac_addr_mask[i];
+ priv->random_mac[i] |= get_random_int() &
+ ~(request->mac_addr_mask[i]);
+ }
+ }
+
+ ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
user_scan_cfg->num_ssids = request->n_ssids;
user_scan_cfg->ssid_list = request->ssids;
@@ -2726,7 +2732,7 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
ht_info->cap &= ~IEEE80211_HT_CAP_SGI_40;
if (adapter->user_dev_mcs_support == HT_STREAM_2X2)
- ht_info->cap |= 3 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ ht_info->cap |= 2 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
else
ht_info->cap |= 1 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
@@ -3913,6 +3919,88 @@ static int mwifiex_cfg80211_get_channel(struct wiphy *wiphy,
return ret;
}
+#ifdef CONFIG_NL80211_TESTMODE
+
+enum mwifiex_tm_attr {
+ __MWIFIEX_TM_ATTR_INVALID = 0,
+ MWIFIEX_TM_ATTR_CMD = 1,
+ MWIFIEX_TM_ATTR_DATA = 2,
+
+ /* keep last */
+ __MWIFIEX_TM_ATTR_AFTER_LAST,
+ MWIFIEX_TM_ATTR_MAX = __MWIFIEX_TM_ATTR_AFTER_LAST - 1,
+};
+
+static const struct nla_policy mwifiex_tm_policy[MWIFIEX_TM_ATTR_MAX + 1] = {
+ [MWIFIEX_TM_ATTR_CMD] = { .type = NLA_U32 },
+ [MWIFIEX_TM_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = MWIFIEX_SIZE_OF_CMD_BUFFER },
+};
+
+enum mwifiex_tm_command {
+ MWIFIEX_TM_CMD_HOSTCMD = 0,
+};
+
+static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
+ void *data, int len)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
+ struct mwifiex_ds_misc_cmd *hostcmd;
+ struct nlattr *tb[MWIFIEX_TM_ATTR_MAX + 1];
+ struct mwifiex_adapter *adapter;
+ struct sk_buff *skb;
+ int err;
+
+ if (!priv)
+ return -EINVAL;
+ adapter = priv->adapter;
+
+ err = nla_parse(tb, MWIFIEX_TM_ATTR_MAX, data, len,
+ mwifiex_tm_policy);
+ if (err)
+ return err;
+
+ if (!tb[MWIFIEX_TM_ATTR_CMD])
+ return -EINVAL;
+
+ switch (nla_get_u32(tb[MWIFIEX_TM_ATTR_CMD])) {
+ case MWIFIEX_TM_CMD_HOSTCMD:
+ if (!tb[MWIFIEX_TM_ATTR_DATA])
+ return -EINVAL;
+
+ hostcmd = kzalloc(sizeof(*hostcmd), GFP_KERNEL);
+ if (!hostcmd)
+ return -ENOMEM;
+
+ hostcmd->len = nla_len(tb[MWIFIEX_TM_ATTR_DATA]);
+ memcpy(hostcmd->cmd, nla_data(tb[MWIFIEX_TM_ATTR_DATA]),
+ hostcmd->len);
+
+ if (mwifiex_send_cmd(priv, 0, 0, 0, hostcmd, true)) {
+ dev_err(priv->adapter->dev, "Failed to process hostcmd\n");
+ return -EFAULT;
+ }
+
+ /* process hostcmd response*/
+ skb = cfg80211_testmode_alloc_reply_skb(wiphy, hostcmd->len);
+ if (!skb)
+ return -ENOMEM;
+ err = nla_put(skb, MWIFIEX_TM_ATTR_DATA,
+ hostcmd->len, hostcmd->cmd);
+ if (err) {
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
+
+ err = cfg80211_testmode_reply(skb);
+ kfree(hostcmd);
+ return err;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+#endif
+
static int
mwifiex_cfg80211_start_radar_detection(struct wiphy *wiphy,
struct net_device *dev,
@@ -4025,6 +4113,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.tdls_cancel_channel_switch = mwifiex_cfg80211_tdls_cancel_chan_switch,
.add_station = mwifiex_cfg80211_add_station,
.change_station = mwifiex_cfg80211_change_station,
+ CFG80211_TESTMODE_CMD(mwifiex_tm_cmd)
.get_channel = mwifiex_cfg80211_get_channel,
.start_radar_detection = mwifiex_cfg80211_start_radar_detection,
.channel_switch = mwifiex_cfg80211_channel_switch,
@@ -4135,9 +4224,12 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->cipher_suites = mwifiex_cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(mwifiex_cipher_suites);
- if (adapter->region_code)
- wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS |
+ if (adapter->regd) {
+ wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+ REGULATORY_DISABLE_BEACON_HINTS |
REGULATORY_COUNTRY_IE_IGNORE;
+ wiphy_apply_custom_regulatory(wiphy, adapter->regd);
+ }
ether_addr_copy(wiphy->perm_addr, adapter->perm_addr);
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
@@ -4173,7 +4265,10 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->features |= NL80211_FEATURE_HT_IBSS |
NL80211_FEATURE_INACTIVITY_TIMER |
NL80211_FEATURE_LOW_PRIORITY_SCAN |
- NL80211_FEATURE_NEED_OBSS_SCAN;
+ NL80211_FEATURE_NEED_OBSS_SCAN |
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+ NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
@@ -4200,19 +4295,27 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
return ret;
}
- if (reg_alpha2 && mwifiex_is_valid_alpha2(reg_alpha2)) {
- mwifiex_dbg(adapter, INFO,
- "driver hint alpha2: %2.2s\n", reg_alpha2);
- regulatory_hint(wiphy, reg_alpha2);
- } else {
- if (adapter->region_code == 0x00) {
- mwifiex_dbg(adapter, WARN, "Ignore world regulatory domain\n");
+ if (!adapter->regd) {
+ if (reg_alpha2 && mwifiex_is_valid_alpha2(reg_alpha2)) {
+ mwifiex_dbg(adapter, INFO,
+ "driver hint alpha2: %2.2s\n", reg_alpha2);
+ regulatory_hint(wiphy, reg_alpha2);
} else {
- country_code =
- mwifiex_11d_code_2_region(adapter->region_code);
- if (country_code &&
- regulatory_hint(wiphy, country_code))
- mwifiex_dbg(priv->adapter, ERROR, "regulatory_hint() failed\n");
+ if (adapter->region_code == 0x00) {
+ mwifiex_dbg(adapter, WARN,
+ "Ignore world regulatory domain\n");
+ } else {
+ wiphy->regulatory_flags |=
+ REGULATORY_DISABLE_BEACON_HINTS |
+ REGULATORY_COUNTRY_IE_IGNORE;
+ country_code =
+ mwifiex_11d_code_2_region(
+ adapter->region_code);
+ if (country_code &&
+ regulatory_hint(wiphy, country_code))
+ mwifiex_dbg(priv->adapter, ERROR,
+ "regulatory_hint() failed\n");
+ }
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index c29f26d8baf2..53477280f39c 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -480,13 +480,27 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
*/
int mwifiex_process_event(struct mwifiex_adapter *adapter)
{
- int ret;
+ int ret, i;
struct mwifiex_private *priv =
mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
struct sk_buff *skb = adapter->event_skb;
- u32 eventcause = adapter->event_cause;
+ u32 eventcause;
struct mwifiex_rxinfo *rx_info;
+ if ((adapter->event_cause & EVENT_ID_MASK) == EVENT_RADAR_DETECTED) {
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+ if (priv && mwifiex_is_11h_active(priv)) {
+ adapter->event_cause |=
+ ((priv->bss_num & 0xff) << 16) |
+ ((priv->bss_type & 0xff) << 24);
+ break;
+ }
+ }
+ }
+
+ eventcause = adapter->event_cause;
+
/* Save the last event to debug log */
adapter->dbg.last_event_index =
(adapter->dbg.last_event_index + 1) % DBG_CMD_NUM;
@@ -581,6 +595,14 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
return -1;
}
}
+ /* We don't expect commands in manufacturing mode. They are cooked
+ * in application and ready to download buffer is passed to the driver
+ */
+ if (adapter->mfg_mode && cmd_no) {
+ dev_dbg(adapter->dev, "Ignoring commands in manufacturing mode\n");
+ return -1;
+ }
+
/* Get a new command node */
cmd_node = mwifiex_get_cmd_node(adapter);
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index bccf17ad588e..b9284b533294 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -118,6 +118,8 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
p += sprintf(p, "bssid=\"%pM\"\n", info.bssid);
p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan);
p += sprintf(p, "country_code = \"%s\"\n", info.country_code);
+ p += sprintf(p, "region_code=\"0x%x\"\n",
+ priv->adapter->region_code);
netdev_for_each_mc_addr(ha, netdev)
p += sprintf(p, "multicast_address[%d]=\"%pM\"\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 5596b6be1898..18aa5256e88b 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -176,6 +176,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145)
#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146)
#define TLV_TYPE_TX_PAUSE (PROPRIETARY_TLV_BASE_ID + 148)
+#define TLV_TYPE_RXBA_SYNC (PROPRIETARY_TLV_BASE_ID + 153)
#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154)
#define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156)
#define TLV_TYPE_REPEAT_COUNT (PROPRIETARY_TLV_BASE_ID + 176)
@@ -188,6 +189,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_BTCOEX_WL_AGGR_WINSIZE (PROPRIETARY_TLV_BASE_ID + 202)
#define TLV_BTCOEX_WL_SCANTIME (PROPRIETARY_TLV_BASE_ID + 203)
#define TLV_TYPE_BSS_MODE (PROPRIETARY_TLV_BASE_ID + 206)
+#define TLV_TYPE_RANDOM_MAC (PROPRIETARY_TLV_BASE_ID + 236)
+#define TLV_TYPE_CHAN_ATTR_CFG (PROPRIETARY_TLV_BASE_ID + 237)
#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
@@ -208,6 +211,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define MWIFIEX_TX_DATA_BUF_SIZE_4K 4096
#define MWIFIEX_TX_DATA_BUF_SIZE_8K 8192
+#define MWIFIEX_TX_DATA_BUF_SIZE_12K 12288
#define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
#define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14))
@@ -379,6 +383,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_MC_POLICY 0x0121
#define HostCmd_CMD_TDLS_OPER 0x0122
#define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG 0x0223
+#define HostCmd_CMD_CHAN_REGION_CFG 0x0242
#define PROTOCOL_NO_SECURITY 0x01
#define PROTOCOL_STATIC_WEP 0x02
@@ -411,6 +416,14 @@ enum P2P_MODES {
P2P_MODE_CLIENT = 3,
};
+enum mwifiex_channel_flags {
+ MWIFIEX_CHANNEL_PASSIVE = BIT(0),
+ MWIFIEX_CHANNEL_DFS = BIT(1),
+ MWIFIEX_CHANNEL_NOHT40 = BIT(2),
+ MWIFIEX_CHANNEL_NOHT80 = BIT(3),
+ MWIFIEX_CHANNEL_DISABLED = BIT(7),
+};
+
#define HostCmd_RET_BIT 0x8000
#define HostCmd_ACT_GEN_GET 0x0000
#define HostCmd_ACT_GEN_SET 0x0001
@@ -504,6 +517,8 @@ enum P2P_MODES {
#define EVENT_RSSI_HIGH 0x0000001c
#define EVENT_SNR_HIGH 0x0000001d
#define EVENT_IBSS_COALESCED 0x0000001e
+#define EVENT_IBSS_STA_CONNECT 0x00000020
+#define EVENT_IBSS_STA_DISCONNECT 0x00000021
#define EVENT_DATA_RSSI_LOW 0x00000024
#define EVENT_DATA_SNR_LOW 0x00000025
#define EVENT_DATA_RSSI_HIGH 0x00000026
@@ -531,6 +546,7 @@ enum P2P_MODES {
#define EVENT_CHANNEL_REPORT_RDY 0x00000054
#define EVENT_TX_DATA_PAUSE 0x00000055
#define EVENT_EXT_SCAN_REPORT 0x00000058
+#define EVENT_RXBA_SYNC 0x00000059
#define EVENT_BG_SCAN_STOPPED 0x00000065
#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
#define EVENT_MULTI_CHAN_INFO 0x0000006a
@@ -734,6 +750,16 @@ struct mwifiex_ie_types_chan_list_param_set {
struct mwifiex_chan_scan_param_set chan_scan_param[1];
} __packed;
+struct mwifiex_ie_types_rxba_sync {
+ struct mwifiex_ie_types_header header;
+ u8 mac[ETH_ALEN];
+ u8 tid;
+ u8 reserved;
+ __le16 seq_num;
+ __le16 bitmap_len;
+ u8 bitmap[1];
+} __packed;
+
struct chan_band_param_set {
u8 radio_type;
u8 chan_number;
@@ -780,6 +806,11 @@ struct mwifiex_ie_types_scan_chan_gap {
__le16 chan_gap;
} __packed;
+struct mwifiex_ie_types_random_mac {
+ struct mwifiex_ie_types_header header;
+ u8 mac[ETH_ALEN];
+} __packed;
+
struct mwifiex_ietypes_chanstats {
struct mwifiex_ie_types_header header;
struct mwifiex_fw_chan_stats chanstats[0];
@@ -1464,6 +1495,7 @@ struct mwifiex_user_scan_cfg {
/* Variable number (fixed maximum) of channels to scan up */
struct mwifiex_user_scan_chan chan_list[MWIFIEX_USER_SCAN_CHAN_MAX];
u16 scan_chan_gap;
+ u8 random_mac[ETH_ALEN];
} __packed;
#define MWIFIEX_BG_SCAN_CHAN_MAX 38
@@ -1646,7 +1678,7 @@ struct mwifiex_ie_types_sta_info {
};
struct host_cmd_ds_sta_list {
- u16 sta_count;
+ __le16 sta_count;
u8 tlv[0];
} __packed;
@@ -1667,6 +1699,12 @@ struct mwifiex_ie_types_wmm_param_set {
u8 wmm_ie[1];
};
+struct mwifiex_ie_types_mgmt_frame {
+ struct mwifiex_ie_types_header header;
+ __le16 frame_control;
+ u8 frame_contents[0];
+};
+
struct mwifiex_ie_types_wmm_queue_status {
struct mwifiex_ie_types_header header;
u8 queue_index;
@@ -2034,26 +2072,26 @@ struct host_cmd_ds_set_bss_mode {
struct host_cmd_ds_pcie_details {
/* TX buffer descriptor ring address */
- u32 txbd_addr_lo;
- u32 txbd_addr_hi;
+ __le32 txbd_addr_lo;
+ __le32 txbd_addr_hi;
/* TX buffer descriptor ring count */
- u32 txbd_count;
+ __le32 txbd_count;
/* RX buffer descriptor ring address */
- u32 rxbd_addr_lo;
- u32 rxbd_addr_hi;
+ __le32 rxbd_addr_lo;
+ __le32 rxbd_addr_hi;
/* RX buffer descriptor ring count */
- u32 rxbd_count;
+ __le32 rxbd_count;
/* Event buffer descriptor ring address */
- u32 evtbd_addr_lo;
- u32 evtbd_addr_hi;
+ __le32 evtbd_addr_lo;
+ __le32 evtbd_addr_hi;
/* Event buffer descriptor ring count */
- u32 evtbd_count;
+ __le32 evtbd_count;
/* Sleep cookie buffer physical address */
- u32 sleep_cookie_addr_lo;
- u32 sleep_cookie_addr_hi;
+ __le32 sleep_cookie_addr_lo;
+ __le32 sleep_cookie_addr_hi;
} __packed;
struct mwifiex_ie_types_rssi_threshold {
@@ -2093,8 +2131,8 @@ struct mwifiex_ie_types_mc_group_info {
u8 chan_buf_weight;
u8 band_config;
u8 chan_num;
- u32 chan_time;
- u32 reserved;
+ __le32 chan_time;
+ __le32 reserved;
union {
u8 sdio_func_num;
u8 usb_ep_num;
@@ -2185,7 +2223,7 @@ struct host_cmd_ds_robust_coex {
} __packed;
struct host_cmd_ds_wakeup_reason {
- u16 wakeup_reason;
+ __le16 wakeup_reason;
} __packed;
struct host_cmd_ds_gtk_rekey_params {
@@ -2196,6 +2234,10 @@ struct host_cmd_ds_gtk_rekey_params {
__le32 replay_ctr_high;
} __packed;
+struct host_cmd_ds_chan_region_cfg {
+ __le16 action;
+} __packed;
+
struct host_cmd_ds_command {
__le16 command;
__le16 size;
@@ -2270,6 +2312,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_robust_coex coex;
struct host_cmd_ds_wakeup_reason hs_wakeup_reason;
struct host_cmd_ds_gtk_rekey_params rekey;
+ struct host_cmd_ds_chan_region_cfg reg_cfg;
} params;
} __packed;
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index 1489c90192bd..82839d9f079f 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -298,6 +298,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
adapter->arp_filter_size = 0;
adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
+ adapter->mfg_mode = mfg_mode;
adapter->key_api_major_ver = 0;
adapter->key_api_minor_ver = 0;
eth_broadcast_addr(adapter->perm_addr);
@@ -553,15 +554,22 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter)
return -1;
}
}
+ if (adapter->mfg_mode) {
+ adapter->hw_status = MWIFIEX_HW_STATUS_READY;
+ ret = -EINPROGRESS;
+ } else {
+ for (i = 0; i < adapter->priv_num; i++) {
+ if (adapter->priv[i]) {
+ ret = mwifiex_sta_init_cmd(adapter->priv[i],
+ first_sta, true);
+ if (ret == -1)
+ return -1;
+
+ first_sta = false;
+ }
+
- for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i]) {
- ret = mwifiex_sta_init_cmd(adapter->priv[i], first_sta,
- true);
- if (ret == -1)
- return -1;
- first_sta = false;
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c
index 1c7b00630b90..b89596c18b41 100644
--- a/drivers/net/wireless/marvell/mwifiex/join.c
+++ b/drivers/net/wireless/marvell/mwifiex/join.c
@@ -669,9 +669,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
priv->assoc_rsp_size = min(le16_to_cpu(resp->size) - S_DS_GEN,
sizeof(priv->assoc_rsp_buf));
- memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
-
assoc_rsp->a_id = cpu_to_le16(aid);
+ memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
if (status_code) {
priv->adapter->dbg.num_cmd_assoc_failure++;
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index db4925db39aa..9b2e98cfe090 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -23,6 +23,7 @@
#include "11n.h"
#define VERSION "1.0"
+#define MFG_FIRMWARE "mwifiex_mfg.bin"
static unsigned int debug_mask = MWIFIEX_DEFAULT_DEBUG_MASK;
module_param(debug_mask, uint, 0);
@@ -37,6 +38,10 @@ module_param(driver_mode, ushort, 0);
MODULE_PARM_DESC(driver_mode,
"station=0x1(default), ap-sta=0x3, station-p2p=0x5, ap-sta-p2p=0x7");
+bool mfg_mode;
+module_param(mfg_mode, bool, 0);
+MODULE_PARM_DESC(mfg_mode, "manufacturing mode enable:1, disable:0");
+
/*
* This function registers the device and performs all the necessary
* initializations.
@@ -139,6 +144,8 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
adapter->nd_info = NULL;
}
+ kfree(adapter->regd);
+
vfree(adapter->chan_stats);
kfree(adapter);
return 0;
@@ -486,9 +493,11 @@ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter)
*/
static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter)
{
- flush_workqueue(adapter->workqueue);
- destroy_workqueue(adapter->workqueue);
- adapter->workqueue = NULL;
+ if (adapter->workqueue) {
+ flush_workqueue(adapter->workqueue);
+ destroy_workqueue(adapter->workqueue);
+ adapter->workqueue = NULL;
+ }
if (adapter->rx_workqueue) {
flush_workqueue(adapter->rx_workqueue);
@@ -559,16 +568,21 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
goto done;
}
/* Wait for mwifiex_init to complete */
- wait_event_interruptible(adapter->init_wait_q,
- adapter->init_wait_q_woken);
- if (adapter->hw_status != MWIFIEX_HW_STATUS_READY)
- goto err_init_fw;
+ if (!adapter->mfg_mode) {
+ wait_event_interruptible(adapter->init_wait_q,
+ adapter->init_wait_q_woken);
+ if (adapter->hw_status != MWIFIEX_HW_STATUS_READY)
+ goto err_init_fw;
+ }
priv = adapter->priv[MWIFIEX_BSS_ROLE_STA];
- if (mwifiex_register_cfg80211(adapter)) {
- mwifiex_dbg(adapter, ERROR,
- "cannot register with cfg80211\n");
- goto err_init_fw;
+
+ if (!adapter->wiphy) {
+ if (mwifiex_register_cfg80211(adapter)) {
+ mwifiex_dbg(adapter, ERROR,
+ "cannot register with cfg80211\n");
+ goto err_init_fw;
+ }
}
if (mwifiex_init_channel_scan_gap(adapter)) {
@@ -662,16 +676,41 @@ done:
/*
* This function initializes the hardware and gets firmware.
*/
-static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
+static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter,
+ bool req_fw_nowait)
{
int ret;
- ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name,
- adapter->dev, GFP_KERNEL, adapter,
- mwifiex_fw_dpc);
- if (ret < 0)
- mwifiex_dbg(adapter, ERROR,
- "request_firmware_nowait error %d\n", ret);
+ /* Override default firmware with manufacturing one if
+ * manufacturing mode is enabled
+ */
+ if (mfg_mode) {
+ if (strlcpy(adapter->fw_name, MFG_FIRMWARE,
+ sizeof(adapter->fw_name)) >=
+ sizeof(adapter->fw_name)) {
+ pr_err("%s: fw_name too long!\n", __func__);
+ return -1;
+ }
+ }
+
+ if (req_fw_nowait) {
+ ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name,
+ adapter->dev, GFP_KERNEL, adapter,
+ mwifiex_fw_dpc);
+ if (ret < 0)
+ mwifiex_dbg(adapter, ERROR,
+ "request_firmware_nowait error %d\n", ret);
+ } else {
+ ret = request_firmware(&adapter->firmware,
+ adapter->fw_name,
+ adapter->dev);
+ if (ret < 0)
+ mwifiex_dbg(adapter, ERROR,
+ "request_firmware error %d\n", ret);
+ else
+ mwifiex_fw_dpc(adapter->firmware, (void *)adapter);
+ }
+
return ret;
}
@@ -1321,6 +1360,199 @@ static void mwifiex_main_work_queue(struct work_struct *work)
}
/*
+ * This function gets called during PCIe function level reset. Required
+ * code is extracted from mwifiex_remove_card()
+ */
+static int
+mwifiex_shutdown_sw(struct mwifiex_adapter *adapter, struct semaphore *sem)
+{
+ struct mwifiex_private *priv;
+ int i;
+
+ if (down_interruptible(sem))
+ goto exit_sem_err;
+
+ if (!adapter)
+ goto exit_remove;
+
+ priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
+ mwifiex_deauthenticate(priv, NULL);
+
+ /* We can no longer handle interrupts once we start doing the teardown
+ * below.
+ */
+ if (adapter->if_ops.disable_int)
+ adapter->if_ops.disable_int(adapter);
+
+ adapter->surprise_removed = true;
+ mwifiex_terminate_workqueue(adapter);
+
+ /* Stop data */
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+ if (priv && priv->netdev) {
+ mwifiex_stop_net_dev_queue(priv->netdev, adapter);
+ if (netif_carrier_ok(priv->netdev))
+ netif_carrier_off(priv->netdev);
+ netif_device_detach(priv->netdev);
+ }
+ }
+
+ mwifiex_dbg(adapter, CMD, "cmd: calling mwifiex_shutdown_drv...\n");
+ adapter->init_wait_q_woken = false;
+
+ if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
+ wait_event_interruptible(adapter->init_wait_q,
+ adapter->init_wait_q_woken);
+ if (adapter->if_ops.down_dev)
+ adapter->if_ops.down_dev(adapter);
+
+ mwifiex_dbg(adapter, CMD, "cmd: mwifiex_shutdown_drv done\n");
+ if (atomic_read(&adapter->rx_pending) ||
+ atomic_read(&adapter->tx_pending) ||
+ atomic_read(&adapter->cmd_pending)) {
+ mwifiex_dbg(adapter, ERROR,
+ "rx_pending=%d, tx_pending=%d,\t"
+ "cmd_pending=%d\n",
+ atomic_read(&adapter->rx_pending),
+ atomic_read(&adapter->tx_pending),
+ atomic_read(&adapter->cmd_pending));
+ }
+
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+ if (!priv)
+ continue;
+ rtnl_lock();
+ if (priv->netdev &&
+ priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED)
+ mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev);
+ rtnl_unlock();
+ }
+
+exit_remove:
+ up(sem);
+exit_sem_err:
+ mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
+ return 0;
+}
+
+/* This function gets called during PCIe function level reset. Required
+ * code is extracted from mwifiex_add_card()
+ */
+static int
+mwifiex_reinit_sw(struct mwifiex_adapter *adapter, struct semaphore *sem,
+ struct mwifiex_if_ops *if_ops, u8 iface_type)
+{
+ char fw_name[32];
+ struct pcie_service_card *card = adapter->card;
+
+ if (down_interruptible(sem))
+ goto exit_sem_err;
+
+ mwifiex_init_lock_list(adapter);
+ if (adapter->if_ops.up_dev)
+ adapter->if_ops.up_dev(adapter);
+
+ adapter->iface_type = iface_type;
+ adapter->card_sem = sem;
+
+ adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
+ adapter->surprise_removed = false;
+ init_waitqueue_head(&adapter->init_wait_q);
+ adapter->is_suspended = false;
+ adapter->hs_activated = false;
+ init_waitqueue_head(&adapter->hs_activate_wait_q);
+ init_waitqueue_head(&adapter->cmd_wait_q.wait);
+ adapter->cmd_wait_q.status = 0;
+ adapter->scan_wait_q_woken = false;
+
+ if ((num_possible_cpus() > 1) || adapter->iface_type == MWIFIEX_USB)
+ adapter->rx_work_enabled = true;
+
+ adapter->workqueue =
+ alloc_workqueue("MWIFIEX_WORK_QUEUE",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+ if (!adapter->workqueue)
+ goto err_kmalloc;
+
+ INIT_WORK(&adapter->main_work, mwifiex_main_work_queue);
+
+ if (adapter->rx_work_enabled) {
+ adapter->rx_workqueue = alloc_workqueue("MWIFIEX_RX_WORK_QUEUE",
+ WQ_HIGHPRI |
+ WQ_MEM_RECLAIM |
+ WQ_UNBOUND, 1);
+ if (!adapter->rx_workqueue)
+ goto err_kmalloc;
+ INIT_WORK(&adapter->rx_work, mwifiex_rx_work_queue);
+ }
+
+ /* Register the device. Fill up the private data structure with
+ * relevant information from the card. Some code extracted from
+ * mwifiex_register_dev()
+ */
+ mwifiex_dbg(adapter, INFO, "%s, mwifiex_init_hw_fw()...\n", __func__);
+ strcpy(fw_name, adapter->fw_name);
+ strcpy(adapter->fw_name, PCIE8997_DEFAULT_WIFIFW_NAME);
+
+ adapter->tx_buf_size = card->pcie.tx_buf_size;
+ adapter->ext_scan = card->pcie.can_ext_scan;
+ if (mwifiex_init_hw_fw(adapter, false)) {
+ strcpy(adapter->fw_name, fw_name);
+ mwifiex_dbg(adapter, ERROR,
+ "%s: firmware init failed\n", __func__);
+ goto err_init_fw;
+ }
+ strcpy(adapter->fw_name, fw_name);
+ mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
+ up(sem);
+ return 0;
+
+err_init_fw:
+ mwifiex_dbg(adapter, ERROR, "info: %s: unregister device\n", __func__);
+ if (adapter->if_ops.unregister_dev)
+ adapter->if_ops.unregister_dev(adapter);
+ if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
+ mwifiex_dbg(adapter, ERROR,
+ "info: %s: shutdown mwifiex\n", __func__);
+ adapter->init_wait_q_woken = false;
+
+ if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
+ wait_event_interruptible(adapter->init_wait_q,
+ adapter->init_wait_q_woken);
+ }
+
+err_kmalloc:
+ mwifiex_terminate_workqueue(adapter);
+ adapter->surprise_removed = true;
+ up(sem);
+exit_sem_err:
+ mwifiex_dbg(adapter, INFO, "%s, error\n", __func__);
+
+ return -1;
+}
+
+/* This function processes pre and post PCIe function level resets.
+ * It performs software cleanup without touching PCIe specific code.
+ * Also, during initialization PCIe stuff is skipped.
+ */
+void mwifiex_do_flr(struct mwifiex_adapter *adapter, bool prepare)
+{
+ struct mwifiex_if_ops if_ops;
+
+ if (!prepare) {
+ mwifiex_reinit_sw(adapter, adapter->card_sem, &if_ops,
+ adapter->iface_type);
+ } else {
+ memcpy(&if_ops, &adapter->if_ops,
+ sizeof(struct mwifiex_if_ops));
+ mwifiex_shutdown_sw(adapter, adapter->card_sem);
+ }
+}
+EXPORT_SYMBOL_GPL(mwifiex_do_flr);
+
+/*
* This function adds the card.
*
* This function follows the following major steps to set up the device -
@@ -1391,7 +1623,7 @@ mwifiex_add_card(void *card, struct semaphore *sem,
goto err_registerdev;
}
- if (mwifiex_init_hw_fw(adapter)) {
+ if (mwifiex_init_hw_fw(adapter, true)) {
pr_err("%s: firmware init failed\n", __func__);
goto err_init_fw;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 9f6bb400bdae..26df28f4bfb2 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -58,6 +58,7 @@
#include "sdio.h"
extern const char driver_version[];
+extern bool mfg_mode;
struct mwifiex_adapter;
struct mwifiex_private;
@@ -675,6 +676,7 @@ struct mwifiex_private {
struct mwifiex_user_scan_chan hidden_chan[MWIFIEX_USER_SCAN_CHAN_MAX];
u8 assoc_resp_ht_param;
bool ht_param_present;
+ u8 random_mac[ETH_ALEN];
};
@@ -827,6 +829,8 @@ struct mwifiex_if_ops {
void (*deaggr_pkt)(struct mwifiex_adapter *, struct sk_buff *);
void (*multi_port_resync)(struct mwifiex_adapter *);
bool (*is_port_ready)(struct mwifiex_private *);
+ void (*down_dev)(struct mwifiex_adapter *);
+ void (*up_dev)(struct mwifiex_adapter *);
};
struct mwifiex_adapter {
@@ -989,6 +993,7 @@ struct mwifiex_adapter {
u32 drv_info_size;
bool scan_chan_gap_enabled;
struct sk_buff_head rx_data_q;
+ bool mfg_mode;
struct mwifiex_chan_stats *chan_stats;
u32 num_in_chan_stats;
int survey_idx;
@@ -1004,6 +1009,7 @@ struct mwifiex_adapter {
bool usb_mc_status;
bool usb_mc_setup;
struct cfg80211_wowlan_nd_info *nd_info;
+ struct ieee80211_regdomain *regd;
};
void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
@@ -1625,4 +1631,5 @@ void mwifiex_debugfs_remove(void);
void mwifiex_dev_debugfs_init(struct mwifiex_private *priv);
void mwifiex_dev_debugfs_remove(struct mwifiex_private *priv);
#endif
+void mwifiex_do_flr(struct mwifiex_adapter *adapter, bool prepare);
#endif /* !_MWIFIEX_MAIN_H_ */
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 453ab6ad4784..3c3c4f197da8 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -225,7 +225,7 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
if (!adapter || !adapter->priv_num)
return;
- if (user_rmmod) {
+ if (user_rmmod && !adapter->mfg_mode) {
#ifdef CONFIG_PM_SLEEP
if (adapter->is_suspended)
mwifiex_pcie_resume(&pdev->dev);
@@ -277,6 +277,52 @@ static const struct pci_device_id mwifiex_ids[] = {
MODULE_DEVICE_TABLE(pci, mwifiex_ids);
+static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare)
+{
+ struct mwifiex_adapter *adapter;
+ struct pcie_service_card *card;
+
+ if (!pdev) {
+ pr_err("%s: PCIe device is not specified\n", __func__);
+ return;
+ }
+
+ card = (struct pcie_service_card *)pci_get_drvdata(pdev);
+ if (!card || !card->adapter) {
+ pr_err("%s: Card or adapter structure is not valid (%ld)\n",
+ __func__, (long)card);
+ return;
+ }
+
+ adapter = card->adapter;
+ mwifiex_dbg(adapter, INFO,
+ "%s: vendor=0x%4.04x device=0x%4.04x rev=%d %s\n",
+ __func__, pdev->vendor, pdev->device,
+ pdev->revision,
+ prepare ? "Pre-FLR" : "Post-FLR");
+
+ if (prepare) {
+ /* Kernel would be performing FLR after this notification.
+ * Cleanup all software without cleaning anything related to
+ * PCIe and HW.
+ */
+ mwifiex_do_flr(adapter, prepare);
+ adapter->surprise_removed = true;
+ } else {
+ /* Kernel stores and restores PCIe function context before and
+ * after performing FLR respectively. Reconfigure the software
+ * and firmware including firmware redownload
+ */
+ adapter->surprise_removed = false;
+ mwifiex_do_flr(adapter, prepare);
+ }
+ mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
+}
+
+static const struct pci_error_handlers mwifiex_pcie_err_handler[] = {
+ { .reset_notify = mwifiex_pcie_reset_notify, },
+};
+
#ifdef CONFIG_PM_SLEEP
/* Power Management Hooks */
static SIMPLE_DEV_PM_OPS(mwifiex_pcie_pm_ops, mwifiex_pcie_suspend,
@@ -295,6 +341,7 @@ static struct pci_driver __refdata mwifiex_pcie = {
},
#endif
.shutdown = mwifiex_pcie_shutdown,
+ .err_handler = mwifiex_pcie_err_handler,
};
/*
@@ -1956,8 +2003,6 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
if (firmware_len - offset < txlen)
txlen = firmware_len - offset;
- mwifiex_dbg(adapter, INFO, ".");
-
tx_blocks = (txlen + card->pcie.blksz_fw_dl - 1) /
card->pcie.blksz_fw_dl;
@@ -2043,6 +2088,10 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
ret = -1;
else
ret = 0;
+
+ mwifiex_dbg(adapter, INFO, "Try %d if FW is ready <%d,%#x>",
+ tries, ret, firmware_stat);
+
if (ret)
continue;
if (firmware_stat == FIRMWARE_READY_PCIE) {
@@ -2074,8 +2123,7 @@ mwifiex_check_winner_status(struct mwifiex_adapter *adapter)
adapter->winner = 1;
} else {
mwifiex_dbg(adapter, ERROR,
- "PCI-E is not the winner <%#x,%d>, exit dnld\n",
- ret, adapter->winner);
+ "PCI-E is not the winner <%#x>", winner);
}
return ret;
@@ -2863,7 +2911,7 @@ static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter)
static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter)
{
int revision_id = 0;
- int version;
+ int version, magic;
struct pcie_service_card *card = adapter->card;
switch (card->dev->device) {
@@ -2888,30 +2936,19 @@ static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter)
}
break;
case PCIE_DEVICE_ID_MARVELL_88W8997:
- mwifiex_read_reg(adapter, 0x0c48, &revision_id);
+ mwifiex_read_reg(adapter, 0x8, &revision_id);
mwifiex_read_reg(adapter, 0x0cd0, &version);
+ mwifiex_read_reg(adapter, 0x0cd4, &magic);
+ revision_id &= 0xff;
version &= 0x7;
- switch (revision_id) {
- case PCIE8997_V2:
- if (version == CHIP_VER_PCIEUART)
- strcpy(adapter->fw_name,
- PCIEUART8997_FW_NAME_V2);
- else
- strcpy(adapter->fw_name,
- PCIEUSB8997_FW_NAME_V2);
- break;
- case PCIE8997_Z:
- if (version == CHIP_VER_PCIEUART)
- strcpy(adapter->fw_name,
- PCIEUART8997_FW_NAME_Z);
- else
- strcpy(adapter->fw_name,
- PCIEUSB8997_FW_NAME_Z);
- break;
- default:
- strcpy(adapter->fw_name, PCIE8997_DEFAULT_FW_NAME);
- break;
- }
+ magic &= 0xff;
+ if (revision_id == PCIE8997_A1 &&
+ magic == CHIP_MAGIC_VALUE &&
+ version == CHIP_VER_PCIEUART)
+ strcpy(adapter->fw_name, PCIEUART8997_FW_NAME_V4);
+ else
+ strcpy(adapter->fw_name, PCIEUSB8997_FW_NAME_V4);
+ break;
default:
break;
}
@@ -2952,7 +2989,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
- const struct mwifiex_pcie_card_reg *reg;
struct pci_dev *pdev;
int i;
@@ -2976,8 +3012,90 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
if (card->msi_enable)
pci_disable_msi(pdev);
}
+ }
+}
+
+/* This function initializes the PCI-E host memory space, WCB rings, etc.
+ *
+ * The following initializations steps are followed -
+ * - Allocate TXBD ring buffers
+ * - Allocate RXBD ring buffers
+ * - Allocate event BD ring buffers
+ * - Allocate command response ring buffer
+ * - Allocate sleep cookie buffer
+ * Part of mwifiex_pcie_init(), not reset the PCIE registers
+ */
+static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ int ret;
+ struct pci_dev *pdev = card->dev;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
- reg = card->pcie.reg;
+ card->cmdrsp_buf = NULL;
+ ret = mwifiex_pcie_create_txbd_ring(adapter);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR, "Failed to create txbd ring\n");
+ goto err_cre_txbd;
+ }
+
+ ret = mwifiex_pcie_create_rxbd_ring(adapter);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR, "Failed to create rxbd ring\n");
+ goto err_cre_rxbd;
+ }
+
+ ret = mwifiex_pcie_create_evtbd_ring(adapter);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR, "Failed to create evtbd ring\n");
+ goto err_cre_evtbd;
+ }
+
+ ret = mwifiex_pcie_alloc_cmdrsp_buf(adapter);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR, "Failed to allocate cmdbuf buffer\n");
+ goto err_alloc_cmdbuf;
+ }
+
+ if (reg->sleep_cookie) {
+ ret = mwifiex_pcie_alloc_sleep_cookie_buf(adapter);
+ if (ret) {
+ mwifiex_dbg(adapter, ERROR, "Failed to allocate sleep_cookie buffer\n");
+ goto err_alloc_cookie;
+ }
+ } else {
+ card->sleep_cookie_vbase = NULL;
+ }
+ return;
+
+err_alloc_cookie:
+ mwifiex_pcie_delete_cmdrsp_buf(adapter);
+err_alloc_cmdbuf:
+ mwifiex_pcie_delete_evtbd_ring(adapter);
+err_cre_evtbd:
+ mwifiex_pcie_delete_rxbd_ring(adapter);
+err_cre_rxbd:
+ mwifiex_pcie_delete_txbd_ring(adapter);
+err_cre_txbd:
+ pci_iounmap(pdev, card->pci_mmap1);
+}
+
+/* This function cleans up the PCI-E host memory space.
+ * Some code is extracted from mwifiex_unregister_dev()
+ *
+ */
+static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
+{
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
+ if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000))
+ mwifiex_dbg(adapter, ERROR, "Failed to write driver not-ready signature\n");
+
+ adapter->seq_num = 0;
+ adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
+
+ if (card) {
if (reg->sleep_cookie)
mwifiex_pcie_delete_sleep_cookie_buf(adapter);
@@ -2987,6 +3105,8 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
mwifiex_pcie_delete_txbd_ring(adapter);
card->cmdrsp_buf = NULL;
}
+
+ return;
}
static struct mwifiex_if_ops pcie_ops = {
@@ -3013,6 +3133,8 @@ static struct mwifiex_if_ops pcie_ops = {
.clean_pcie_ring = mwifiex_clean_pcie_ring_buf,
.reg_dump = mwifiex_pcie_reg_dump,
.device_dump = mwifiex_pcie_device_dump,
+ .down_dev = mwifiex_pcie_down_dev,
+ .up_dev = mwifiex_pcie_up_dev,
};
/*
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
index f05061cea5cd..46f99cae9399 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.h
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
@@ -32,11 +32,9 @@
#define PCIE8897_DEFAULT_FW_NAME "mrvl/pcie8897_uapsta.bin"
#define PCIE8897_A0_FW_NAME "mrvl/pcie8897_uapsta_a0.bin"
#define PCIE8897_B0_FW_NAME "mrvl/pcie8897_uapsta.bin"
-#define PCIE8997_DEFAULT_FW_NAME "mrvl/pcieusb8997_combo_v2.bin"
-#define PCIEUART8997_FW_NAME_Z "mrvl/pcieuart8997_combo.bin"
-#define PCIEUART8997_FW_NAME_V2 "mrvl/pcieuart8997_combo_v2.bin"
-#define PCIEUSB8997_FW_NAME_Z "mrvl/pcieusb8997_combo.bin"
-#define PCIEUSB8997_FW_NAME_V2 "mrvl/pcieusb8997_combo_v2.bin"
+#define PCIEUART8997_FW_NAME_V4 "mrvl/pcieuart8997_combo_v4.bin"
+#define PCIEUSB8997_FW_NAME_V4 "mrvl/pcieusb8997_combo_v4.bin"
+#define PCIE8997_DEFAULT_WIFIFW_NAME "mrvl/pcie8997_wlan_v4.bin"
#define PCIE_VENDOR_ID_MARVELL (0x11ab)
#define PCIE_VENDOR_ID_V2_MARVELL (0x1b4b)
@@ -46,9 +44,10 @@
#define PCIE8897_A0 0x1100
#define PCIE8897_B0 0x1200
-#define PCIE8997_Z 0x0
-#define PCIE8997_V2 0x471
+#define PCIE8997_A0 0x10
+#define PCIE8997_A1 0x11
#define CHIP_VER_PCIEUART 0x3
+#define CHIP_MAGIC_VALUE 0x24
/* Constants for Buffer Descriptor (BD) rings */
#define MWIFIEX_MAX_TXRX_BD 0x20
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 21ec84794d0c..97c9765b5bc6 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -820,6 +820,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_ie_types_num_probes *num_probes_tlv;
struct mwifiex_ie_types_scan_chan_gap *chan_gap_tlv;
+ struct mwifiex_ie_types_random_mac *random_mac_tlv;
struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
struct mwifiex_ie_types_bssid_list *bssid_tlv;
u8 *tlv_pos;
@@ -835,6 +836,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
u8 ssid_filter;
struct mwifiex_ie_types_htcap *ht_cap;
struct mwifiex_ie_types_bss_mode *bss_mode;
+ const u8 zero_mac[6] = {0, 0, 0, 0, 0, 0};
/* The tlv_buf_len is calculated for each scan command. The TLVs added
in this routine will be preserved since the routine that sends the
@@ -967,6 +969,18 @@ mwifiex_config_scan(struct mwifiex_private *priv,
tlv_pos +=
sizeof(struct mwifiex_ie_types_scan_chan_gap);
}
+
+ if (!ether_addr_equal(user_scan_in->random_mac, zero_mac)) {
+ random_mac_tlv = (void *)tlv_pos;
+ random_mac_tlv->header.type =
+ cpu_to_le16(TLV_TYPE_RANDOM_MAC);
+ random_mac_tlv->header.len =
+ cpu_to_le16(sizeof(random_mac_tlv->mac));
+ ether_addr_copy(random_mac_tlv->mac,
+ user_scan_in->random_mac);
+ tlv_pos +=
+ sizeof(struct mwifiex_ie_types_random_mac);
+ }
} else {
scan_cfg_out->bss_mode = (u8) adapter->scan_mode;
num_probes = adapter->scan_probes;
@@ -1922,6 +1936,7 @@ mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv)
}
adapter->active_scan_triggered = true;
+ ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
user_scan_cfg->num_ssids = priv->scan_request->n_ssids;
user_scan_cfg->ssid_list = priv->scan_request->ssids;
@@ -2179,18 +2194,14 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
if (chan_band_tlv && adapter->nd_info) {
adapter->nd_info->matches[idx] =
- kzalloc(sizeof(*pmatch) +
- sizeof(u32), GFP_ATOMIC);
+ kzalloc(sizeof(*pmatch) + sizeof(u32),
+ GFP_ATOMIC);
pmatch = adapter->nd_info->matches[idx];
if (pmatch) {
- memset(pmatch, 0, sizeof(*pmatch));
- if (chan_band_tlv) {
- pmatch->n_channels = 1;
- pmatch->channels[0] =
- chan_band->chan_number;
- }
+ pmatch->n_channels = 1;
+ pmatch->channels[0] = chan_band->chan_number;
}
}
@@ -2761,6 +2772,7 @@ static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv,
if (!scan_cfg)
return -ENOMEM;
+ ether_addr_copy(scan_cfg->random_mac, priv->random_mac);
scan_cfg->ssid_list = req_ssid;
scan_cfg->num_ssids = 1;
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index d3e1561ca075..8718950004f3 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -122,9 +122,11 @@ static int mwifiex_sdio_probe_of(struct device *dev, struct sdio_mmc_card *card)
IRQF_TRIGGER_LOW,
"wifi_wake", cfg);
if (ret) {
- dev_err(dev,
+ dev_dbg(dev,
"Failed to request irq_wifi %d (%d)\n",
cfg->irq_wifi, ret);
+ card->plt_wake_cfg = NULL;
+ return 0;
}
disable_irq(cfg->irq_wifi);
}
@@ -289,7 +291,7 @@ mwifiex_sdio_remove(struct sdio_func *func)
mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num);
- if (user_rmmod) {
+ if (user_rmmod && !adapter->mfg_mode) {
if (adapter->is_suspended)
mwifiex_sdio_resume(adapter->dev);
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index 7897037b0992..49048b41fd36 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -706,15 +706,10 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
(priv->wep_key_curr_index & KEY_INDEX_MASK))
key_info |= KEY_DEFAULT;
} else {
- if (mac) {
- if (is_broadcast_ether_addr(mac))
- key_info |= KEY_MCAST;
- else
- key_info |= KEY_UNICAST |
- KEY_DEFAULT;
- } else {
+ if (is_broadcast_ether_addr(mac))
key_info |= KEY_MCAST;
- }
+ else
+ key_info |= KEY_UNICAST | KEY_DEFAULT;
}
}
km->key_param_set.key_info = cpu_to_le16(key_info);
@@ -1244,20 +1239,23 @@ mwifiex_cmd_pcie_host_spec(struct mwifiex_private *priv,
return 0;
/* Send the ring base addresses and count to firmware */
- host_spec->txbd_addr_lo = (u32)(card->txbd_ring_pbase);
- host_spec->txbd_addr_hi = (u32)(((u64)card->txbd_ring_pbase)>>32);
- host_spec->txbd_count = MWIFIEX_MAX_TXRX_BD;
- host_spec->rxbd_addr_lo = (u32)(card->rxbd_ring_pbase);
- host_spec->rxbd_addr_hi = (u32)(((u64)card->rxbd_ring_pbase)>>32);
- host_spec->rxbd_count = MWIFIEX_MAX_TXRX_BD;
- host_spec->evtbd_addr_lo = (u32)(card->evtbd_ring_pbase);
- host_spec->evtbd_addr_hi = (u32)(((u64)card->evtbd_ring_pbase)>>32);
- host_spec->evtbd_count = MWIFIEX_MAX_EVT_BD;
+ host_spec->txbd_addr_lo = cpu_to_le32((u32)(card->txbd_ring_pbase));
+ host_spec->txbd_addr_hi =
+ cpu_to_le32((u32)(((u64)card->txbd_ring_pbase) >> 32));
+ host_spec->txbd_count = cpu_to_le32(MWIFIEX_MAX_TXRX_BD);
+ host_spec->rxbd_addr_lo = cpu_to_le32((u32)(card->rxbd_ring_pbase));
+ host_spec->rxbd_addr_hi =
+ cpu_to_le32((u32)(((u64)card->rxbd_ring_pbase) >> 32));
+ host_spec->rxbd_count = cpu_to_le32(MWIFIEX_MAX_TXRX_BD);
+ host_spec->evtbd_addr_lo = cpu_to_le32((u32)(card->evtbd_ring_pbase));
+ host_spec->evtbd_addr_hi =
+ cpu_to_le32((u32)(((u64)card->evtbd_ring_pbase) >> 32));
+ host_spec->evtbd_count = cpu_to_le32(MWIFIEX_MAX_EVT_BD);
if (card->sleep_cookie_vbase) {
host_spec->sleep_cookie_addr_lo =
- (u32)(card->sleep_cookie_pbase);
- host_spec->sleep_cookie_addr_hi =
- (u32)(((u64)(card->sleep_cookie_pbase)) >> 32);
+ cpu_to_le32((u32)(card->sleep_cookie_pbase));
+ host_spec->sleep_cookie_addr_hi = cpu_to_le32((u32)(((u64)
+ (card->sleep_cookie_pbase)) >> 32));
mwifiex_dbg(priv->adapter, INFO,
"sleep_cook_lo phy addr: 0x%x\n",
host_spec->sleep_cookie_addr_lo);
@@ -1482,7 +1480,7 @@ int mwifiex_dnld_dt_cfgdata(struct mwifiex_private *priv,
continue;
/* property header is 6 bytes, data must fit in cmd buffer */
- if (prop && prop->value && prop->length > 6 &&
+ if (prop->value && prop->length > 6 &&
prop->length <= MWIFIEX_SIZE_OF_CMD_BUFFER - S_DS_GEN) {
ret = mwifiex_send_cmd(priv, HostCmd_CMD_CFG_DATA,
HostCmd_ACT_GEN_SET, 0,
@@ -1596,6 +1594,21 @@ static int mwifiex_cmd_gtk_rekey_offload(struct mwifiex_private *priv,
return 0;
}
+static int mwifiex_cmd_chan_region_cfg(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action)
+{
+ struct host_cmd_ds_chan_region_cfg *reg = &cmd->params.reg_cfg;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_CHAN_REGION_CFG);
+ cmd->size = cpu_to_le16(sizeof(*reg) + S_DS_GEN);
+
+ if (cmd_action == HostCmd_ACT_GEN_GET)
+ reg->action = cpu_to_le16(cmd_action);
+
+ return 0;
+}
+
static int
mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
@@ -2136,6 +2149,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
ret = mwifiex_cmd_gtk_rekey_offload(priv, cmd_ptr, cmd_action,
data_buf);
break;
+ case HostCmd_CMD_CHAN_REGION_CFG:
+ ret = mwifiex_cmd_chan_region_cfg(priv, cmd_ptr, cmd_action);
+ break;
default:
mwifiex_dbg(priv->adapter, ERROR,
"PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -2273,6 +2289,9 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
if (ret)
return -1;
}
+
+ mwifiex_send_cmd(priv, HostCmd_CMD_CHAN_REGION_CFG,
+ HostCmd_ACT_GEN_GET, 0, NULL, true);
}
/* get tx rate */
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index ccf54932e321..3344a26bed03 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -962,7 +962,7 @@ static int mwifiex_ret_uap_sta_list(struct mwifiex_private *priv,
int i;
struct mwifiex_sta_node *sta_node;
- for (i = 0; i < sta_list->sta_count; i++) {
+ for (i = 0; i < (le16_to_cpu(sta_list->sta_count)); i++) {
sta_node = mwifiex_get_sta_entry(priv, sta_info->mac);
if (unlikely(!sta_node))
continue;
@@ -1022,6 +1022,135 @@ static int mwifiex_ret_robust_coex(struct mwifiex_private *priv,
return 0;
}
+static struct ieee80211_regdomain *
+mwifiex_create_custom_regdomain(struct mwifiex_private *priv,
+ u8 *buf, u16 buf_len)
+{
+ u16 num_chan = buf_len / 2;
+ struct ieee80211_regdomain *regd;
+ struct ieee80211_reg_rule *rule;
+ bool new_rule;
+ int regd_size, idx, freq, prev_freq = 0;
+ u32 bw, prev_bw = 0;
+ u8 chflags, prev_chflags = 0, valid_rules = 0;
+
+ if (WARN_ON_ONCE(num_chan > NL80211_MAX_SUPP_REG_RULES))
+ return ERR_PTR(-EINVAL);
+
+ regd_size = sizeof(struct ieee80211_regdomain) +
+ num_chan * sizeof(struct ieee80211_reg_rule);
+
+ regd = kzalloc(regd_size, GFP_KERNEL);
+ if (!regd)
+ return ERR_PTR(-ENOMEM);
+
+ for (idx = 0; idx < num_chan; idx++) {
+ u8 chan;
+ enum nl80211_band band;
+
+ chan = *buf++;
+ if (!chan)
+ return NULL;
+ chflags = *buf++;
+ band = (chan <= 14) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+ freq = ieee80211_channel_to_frequency(chan, band);
+ new_rule = false;
+
+ if (chflags & MWIFIEX_CHANNEL_DISABLED)
+ continue;
+
+ if (band == NL80211_BAND_5GHZ) {
+ if (!(chflags & MWIFIEX_CHANNEL_NOHT80))
+ bw = MHZ_TO_KHZ(80);
+ else if (!(chflags & MWIFIEX_CHANNEL_NOHT40))
+ bw = MHZ_TO_KHZ(40);
+ else
+ bw = MHZ_TO_KHZ(20);
+ } else {
+ if (!(chflags & MWIFIEX_CHANNEL_NOHT40))
+ bw = MHZ_TO_KHZ(40);
+ else
+ bw = MHZ_TO_KHZ(20);
+ }
+
+ if (idx == 0 || prev_chflags != chflags || prev_bw != bw ||
+ freq - prev_freq > 20) {
+ valid_rules++;
+ new_rule = true;
+ }
+
+ rule = &regd->reg_rules[valid_rules - 1];
+
+ rule->freq_range.end_freq_khz = MHZ_TO_KHZ(freq + 10);
+
+ prev_chflags = chflags;
+ prev_freq = freq;
+ prev_bw = bw;
+
+ if (!new_rule)
+ continue;
+
+ rule->freq_range.start_freq_khz = MHZ_TO_KHZ(freq - 10);
+ rule->power_rule.max_eirp = DBM_TO_MBM(19);
+
+ if (chflags & MWIFIEX_CHANNEL_PASSIVE)
+ rule->flags = NL80211_RRF_NO_IR;
+
+ if (chflags & MWIFIEX_CHANNEL_DFS)
+ rule->flags = NL80211_RRF_DFS;
+
+ rule->freq_range.max_bandwidth_khz = bw;
+ }
+
+ regd->n_reg_rules = valid_rules;
+ regd->alpha2[0] = '9';
+ regd->alpha2[1] = '9';
+
+ return regd;
+}
+
+static int mwifiex_ret_chan_region_cfg(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
+{
+ struct host_cmd_ds_chan_region_cfg *reg = &resp->params.reg_cfg;
+ u16 action = le16_to_cpu(reg->action);
+ u16 tlv, tlv_buf_len, tlv_buf_left;
+ struct mwifiex_ie_types_header *head;
+ u8 *tlv_buf;
+
+ if (action != HostCmd_ACT_GEN_GET)
+ return 0;
+
+ tlv_buf = (u8 *)reg + sizeof(*reg);
+ tlv_buf_left = le16_to_cpu(resp->size) - S_DS_GEN - sizeof(*reg);
+
+ while (tlv_buf_left >= sizeof(*head)) {
+ head = (struct mwifiex_ie_types_header *)tlv_buf;
+ tlv = le16_to_cpu(head->type);
+ tlv_buf_len = le16_to_cpu(head->len);
+
+ if (tlv_buf_left < (sizeof(*head) + tlv_buf_len))
+ break;
+
+ switch (tlv) {
+ case TLV_TYPE_CHAN_ATTR_CFG:
+ mwifiex_dbg_dump(priv->adapter, CMD_D, "CHAN:",
+ (u8 *)head + sizeof(*head),
+ tlv_buf_len);
+ priv->adapter->regd =
+ mwifiex_create_custom_regdomain(priv,
+ (u8 *)head +
+ sizeof(*head), tlv_buf_len);
+ break;
+ }
+
+ tlv_buf += (sizeof(*head) + tlv_buf_len);
+ tlv_buf_left -= (sizeof(*head) + tlv_buf_len);
+ }
+
+ return 0;
+}
+
/*
* This function handles the command responses.
*
@@ -1239,6 +1368,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
break;
case HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG:
break;
+ case HostCmd_CMD_CHAN_REGION_CFG:
+ ret = mwifiex_ret_chan_region_cfg(priv, resp);
+ break;
default:
mwifiex_dbg(adapter, ERROR,
"CMD_RESP: unknown cmd response %#x\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index a422f3306d4d..9df0c4dc06ed 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -25,6 +25,99 @@
#include "wmm.h"
#include "11n.h"
+#define MWIFIEX_IBSS_CONNECT_EVT_FIX_SIZE 12
+
+static int mwifiex_check_ibss_peer_capabilties(struct mwifiex_private *priv,
+ struct mwifiex_sta_node *sta_ptr,
+ struct sk_buff *event)
+{
+ int evt_len, ele_len;
+ u8 *curr;
+ struct ieee_types_header *ele_hdr;
+ struct mwifiex_ie_types_mgmt_frame *tlv_mgmt_frame;
+ const struct ieee80211_ht_cap *ht_cap;
+ const struct ieee80211_vht_cap *vht_cap;
+
+ skb_pull(event, MWIFIEX_IBSS_CONNECT_EVT_FIX_SIZE);
+ evt_len = event->len;
+ curr = event->data;
+
+ mwifiex_dbg_dump(priv->adapter, EVT_D, "ibss peer capabilties:",
+ event->data, event->len);
+
+ skb_push(event, MWIFIEX_IBSS_CONNECT_EVT_FIX_SIZE);
+
+ tlv_mgmt_frame = (void *)curr;
+ if (evt_len >= sizeof(*tlv_mgmt_frame) &&
+ le16_to_cpu(tlv_mgmt_frame->header.type) ==
+ TLV_TYPE_UAP_MGMT_FRAME) {
+ /* Locate curr pointer to the start of beacon tlv,
+ * timestamp 8 bytes, beacon intervel 2 bytes,
+ * capability info 2 bytes, totally 12 byte beacon header
+ */
+ evt_len = le16_to_cpu(tlv_mgmt_frame->header.len);
+ curr += (sizeof(*tlv_mgmt_frame) + 12);
+ } else {
+ mwifiex_dbg(priv->adapter, MSG,
+ "management frame tlv not found!\n");
+ return 0;
+ }
+
+ while (evt_len >= sizeof(*ele_hdr)) {
+ ele_hdr = (struct ieee_types_header *)curr;
+ ele_len = ele_hdr->len;
+
+ if (evt_len < ele_len + sizeof(*ele_hdr))
+ break;
+
+ switch (ele_hdr->element_id) {
+ case WLAN_EID_HT_CAPABILITY:
+ sta_ptr->is_11n_enabled = true;
+ ht_cap = (void *)(ele_hdr + 2);
+ sta_ptr->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
+ IEEE80211_HT_CAP_MAX_AMSDU ?
+ MWIFIEX_TX_DATA_BUF_SIZE_8K :
+ MWIFIEX_TX_DATA_BUF_SIZE_4K;
+ mwifiex_dbg(priv->adapter, INFO,
+ "11n enabled!, max_amsdu : %d\n",
+ sta_ptr->max_amsdu);
+ break;
+
+ case WLAN_EID_VHT_CAPABILITY:
+ sta_ptr->is_11ac_enabled = true;
+ vht_cap = (void *)(ele_hdr + 2);
+ /* check VHT MAXMPDU capability */
+ switch (le32_to_cpu(vht_cap->vht_cap_info) & 0x3) {
+ case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
+ sta_ptr->max_amsdu =
+ MWIFIEX_TX_DATA_BUF_SIZE_12K;
+ break;
+ case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991:
+ sta_ptr->max_amsdu =
+ MWIFIEX_TX_DATA_BUF_SIZE_8K;
+ break;
+ case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895:
+ sta_ptr->max_amsdu =
+ MWIFIEX_TX_DATA_BUF_SIZE_4K;
+ default:
+ break;
+ }
+
+ mwifiex_dbg(priv->adapter, INFO,
+ "11ac enabled!, max_amsdu : %d\n",
+ sta_ptr->max_amsdu);
+ break;
+ default:
+ break;
+ }
+
+ curr += (ele_len + sizeof(*ele_hdr));
+ evt_len -= (ele_len + sizeof(*ele_hdr));
+ }
+
+ return 0;
+}
+
/*
* This function resets the connection state.
*
@@ -519,6 +612,8 @@ void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv,
* - EVENT_LINK_QUALITY
* - EVENT_PRE_BEACON_LOST
* - EVENT_IBSS_COALESCED
+ * - EVENT_IBSS_STA_CONNECT
+ * - EVENT_IBSS_STA_DISCONNECT
* - EVENT_WEP_ICV_ERR
* - EVENT_BW_CHANGE
* - EVENT_HOSTWAKE_STAIE
@@ -547,9 +642,11 @@ void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv,
int mwifiex_process_sta_event(struct mwifiex_private *priv)
{
struct mwifiex_adapter *adapter = priv->adapter;
- int ret = 0;
+ int ret = 0, i;
u32 eventcause = adapter->event_cause;
u16 ctrl, reason_code;
+ u8 ibss_sta_addr[ETH_ALEN];
+ struct mwifiex_sta_node *sta_ptr;
switch (eventcause) {
case EVENT_DUMMY_HOST_WAKEUP_SIGNAL:
@@ -708,7 +805,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_EXT_SCAN_REPORT:
mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n");
- if (adapter->ext_scan && !priv->scan_aborting)
+ /* We intend to skip this event during suspend, but handle
+ * it in interface disabled case
+ */
+ if (adapter->ext_scan && (!priv->scan_aborting ||
+ !netif_running(priv->netdev)))
ret = mwifiex_handle_event_ext_scan_report(priv,
adapter->event_skb->data);
@@ -771,6 +872,39 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
HostCmd_ACT_GEN_GET, 0, NULL, false);
break;
+ case EVENT_IBSS_STA_CONNECT:
+ ether_addr_copy(ibss_sta_addr, adapter->event_body + 2);
+ mwifiex_dbg(adapter, EVENT, "event: IBSS_STA_CONNECT %pM\n",
+ ibss_sta_addr);
+ sta_ptr = mwifiex_add_sta_entry(priv, ibss_sta_addr);
+ if (sta_ptr && adapter->adhoc_11n_enabled) {
+ mwifiex_check_ibss_peer_capabilties(priv, sta_ptr,
+ adapter->event_skb);
+ if (sta_ptr->is_11n_enabled)
+ for (i = 0; i < MAX_NUM_TID; i++)
+ sta_ptr->ampdu_sta[i] =
+ priv->aggr_prio_tbl[i].ampdu_user;
+ else
+ for (i = 0; i < MAX_NUM_TID; i++)
+ sta_ptr->ampdu_sta[i] =
+ BA_STREAM_NOT_ALLOWED;
+ memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq));
+ }
+
+ break;
+ case EVENT_IBSS_STA_DISCONNECT:
+ ether_addr_copy(ibss_sta_addr, adapter->event_body + 2);
+ mwifiex_dbg(adapter, EVENT, "event: IBSS_STA_DISCONNECT %pM\n",
+ ibss_sta_addr);
+ sta_ptr = mwifiex_get_sta_entry(priv, ibss_sta_addr);
+ if (sta_ptr && sta_ptr->is_11n_enabled) {
+ mwifiex_11n_del_rx_reorder_tbl_by_ta(priv,
+ ibss_sta_addr);
+ mwifiex_del_tx_ba_stream_tbl_by_ra(priv, ibss_sta_addr);
+ }
+ mwifiex_wmm_del_peer_ra_list(priv, ibss_sta_addr);
+ mwifiex_del_sta_entry(priv, ibss_sta_addr);
+ break;
case EVENT_ADDBA:
mwifiex_dbg(adapter, EVENT, "event: ADDBA Request\n");
mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP,
@@ -869,6 +1003,12 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
mwifiex_bt_coex_wlan_param_update_event(priv,
adapter->event_skb);
break;
+ case EVENT_RXBA_SYNC:
+ dev_dbg(adapter->dev, "EVENT: RXBA_SYNC\n");
+ mwifiex_11n_rxba_sync_event(priv, adapter->event_body,
+ adapter->event_skb->len -
+ sizeof(eventcause));
+ break;
default:
mwifiex_dbg(adapter, ERROR, "event: unknown event id: %#x\n",
eventcause);
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index e06647a327b6..644f3a248741 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -574,7 +574,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
adapter->hs_activate_wait_q_woken = false;
- memset(&hscfg, 0, sizeof(struct mwifiex_ds_hs_cfg));
+ memset(&hscfg, 0, sizeof(hscfg));
hscfg.is_invoke_hostcmd = true;
adapter->hs_enabling = true;
@@ -1138,7 +1138,7 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
{
struct mwifiex_ds_encrypt_key encrypt_key;
- memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
+ memset(&encrypt_key, 0, sizeof(encrypt_key));
encrypt_key.key_len = key_len;
encrypt_key.key_index = key_index;
@@ -1180,7 +1180,7 @@ mwifiex_get_ver_ext(struct mwifiex_private *priv, u32 version_str_sel)
{
struct mwifiex_ver_ext ver_ext;
- memset(&ver_ext, 0, sizeof(struct host_cmd_ds_version_ext));
+ memset(&ver_ext, 0, sizeof(ver_ext));
ver_ext.version_str_sel = version_str_sel;
if (mwifiex_send_cmd(priv, HostCmd_CMD_VERSION_EXT,
HostCmd_ACT_GEN_GET, 0, &ver_ext, true))
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c
index 86ff54296f39..d24eca34ac11 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c
@@ -306,7 +306,12 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n");
mwifiex_process_multi_chan_event(priv, adapter->event_skb);
break;
-
+ case EVENT_RXBA_SYNC:
+ dev_dbg(adapter->dev, "EVENT: RXBA_SYNC\n");
+ mwifiex_11n_rxba_sync_event(priv, adapter->event_body,
+ adapter->event_skb->len -
+ sizeof(eventcause));
+ break;
default:
mwifiex_dbg(adapter, EVENT,
"event: unknown event id: %#x\n", eventcause);
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 3bd04f52f369..8a20620fa119 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -611,7 +611,7 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
if (!adapter->priv_num)
return;
- if (user_rmmod) {
+ if (user_rmmod && !adapter->mfg_mode) {
#ifdef CONFIG_PM
if (adapter->is_suspended)
mwifiex_usb_resume(intf);
@@ -1026,6 +1026,10 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
dnld_cmd = le32_to_cpu(fwdata->fw_hdr.dnld_cmd);
tlen += sizeof(struct fw_header);
+ /* Command 7 doesn't have data length field */
+ if (dnld_cmd == FW_CMD_7)
+ dlen = 0;
+
memcpy(fwdata->data, &firmware[tlen], dlen);
fwdata->seq_num = cpu_to_le32(fw_seqnum);
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.h b/drivers/net/wireless/marvell/mwifiex/usb.h
index b4e9246bbcdc..30e8eb8c259d 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.h
+++ b/drivers/net/wireless/marvell/mwifiex/usb.h
@@ -46,11 +46,12 @@
#define USB8766_DEFAULT_FW_NAME "mrvl/usb8766_uapsta.bin"
#define USB8797_DEFAULT_FW_NAME "mrvl/usb8797_uapsta.bin"
#define USB8801_DEFAULT_FW_NAME "mrvl/usb8801_uapsta.bin"
-#define USB8997_DEFAULT_FW_NAME "mrvl/usb8997_uapsta.bin"
+#define USB8997_DEFAULT_FW_NAME "mrvl/usbusb8997_combo_v4.bin"
#define FW_DNLD_TX_BUF_SIZE 620
#define FW_DNLD_RX_BUF_SIZE 2048
#define FW_HAS_LAST_BLOCK 0x00000004
+#define FW_CMD_7 0x00000007
#define FW_DATA_XMIT_SIZE \
(sizeof(struct fw_header) + dlen + sizeof(u32))
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index 6681be0511c7..18fbb96a46e9 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -386,6 +386,7 @@ mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len,
"unknown public action frame category %d\n",
category);
}
+ break;
default:
mwifiex_dbg(priv->adapter, INFO,
"unknown mgmt frame subtype %#x\n", stype);
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
index 57a80cfa39b1..a8bc064bc14f 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -103,7 +103,7 @@ static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
- if (unlikely(MT76_GET(MT_RXD_INFO_TYPE, fce_info)))
+ if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
trace_mt_rx(dev, rxwi, fce_info);
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.h b/drivers/net/wireless/mediatek/mt7601u/dma.h
index 978e8a90b87f..270d126880c0 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.h
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.h
@@ -18,8 +18,6 @@
#include <asm/unaligned.h>
#include <linux/skbuff.h>
-#include "util.h"
-
#define MT_DMA_HDR_LEN 4
#define MT_RX_INFO_LEN 4
#define MT_FCE_INFO_LEN 4
@@ -79,9 +77,9 @@ static inline int mt7601u_dma_skb_wrap(struct sk_buff *skb,
*/
info = flags |
- MT76_SET(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
- MT76_SET(MT_TXD_INFO_D_PORT, d_port) |
- MT76_SET(MT_TXD_INFO_TYPE, type);
+ FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+ FIELD_PREP(MT_TXD_INFO_D_PORT, d_port) |
+ FIELD_PREP(MT_TXD_INFO_TYPE, type);
put_unaligned_le32(info, skb_push(skb, sizeof(info)));
return skb_put_padto(skb, round_up(skb->len, 4) + 4);
@@ -90,7 +88,7 @@ static inline int mt7601u_dma_skb_wrap(struct sk_buff *skb,
static inline int
mt7601u_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
{
- flags |= MT76_SET(MT_TXD_PKT_INFO_QSEL, qsel);
+ flags |= FIELD_PREP(MT_TXD_PKT_INFO_QSEL, qsel);
return mt7601u_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
}
diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
index 8d8ee0344f7b..da6faea092d6 100644
--- a/drivers/net/wireless/mediatek/mt7601u/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
@@ -45,8 +45,8 @@ mt7601u_efuse_read(struct mt7601u_dev *dev, u16 addr, u8 *data,
val = mt76_rr(dev, MT_EFUSE_CTRL);
val &= ~(MT_EFUSE_CTRL_AIN |
MT_EFUSE_CTRL_MODE);
- val |= MT76_SET(MT_EFUSE_CTRL_AIN, addr & ~0xf) |
- MT76_SET(MT_EFUSE_CTRL_MODE, mode) |
+ val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf) |
+ FIELD_PREP(MT_EFUSE_CTRL_MODE, mode) |
MT_EFUSE_CTRL_KICK;
mt76_wr(dev, MT_EFUSE_CTRL, val);
@@ -128,8 +128,8 @@ mt7601u_set_chip_cap(struct mt7601u_dev *dev, u8 *eeprom)
if (!field_valid(nic_conf0 >> 8))
return;
- if (MT76_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
- MT76_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
+ if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
+ FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
dev_err(dev->dev,
"Error: device has more than 1 RX/TX stream!\n");
}
@@ -150,7 +150,7 @@ mt7601u_set_macaddr(struct mt7601u_dev *dev, const u8 *eeprom)
mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr));
mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) |
- MT76_SET(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+ FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
return 0;
}
@@ -176,7 +176,7 @@ mt7601u_set_channel_power(struct mt7601u_dev *dev, u8 *eeprom)
u8 max_pwr;
val = mt7601u_rr(dev, MT_TX_ALC_CFG_0);
- max_pwr = MT76_GET(MT_TX_ALC_CFG_0_LIMIT_0, val);
+ max_pwr = FIELD_GET(MT_TX_ALC_CFG_0_LIMIT_0, val);
if (mt7601u_has_tssi(dev, eeprom)) {
mt7601u_set_channel_target_power(dev, eeprom, max_pwr);
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
index 8fa78d7156be..44d46e25db80 100644
--- a/drivers/net/wireless/mediatek/mt7601u/init.c
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -108,8 +108,9 @@ static void mt7601u_init_usb_dma(struct mt7601u_dev *dev)
{
u32 val;
- val = MT76_SET(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) |
- MT76_SET(MT_USB_DMA_CFG_RX_BULK_AGG_LMT, MT_USB_AGGR_SIZE_LIMIT) |
+ val = FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) |
+ FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_LMT,
+ MT_USB_AGGR_SIZE_LIMIT) |
MT_USB_DMA_CFG_RX_BULK_EN |
MT_USB_DMA_CFG_TX_BULK_EN;
if (dev->in_max_packet == 512)
@@ -396,8 +397,9 @@ int mt7601u_init_hardware(struct mt7601u_dev *dev)
mt7601u_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
- mt7601u_wr(dev, MT_TXOP_CTRL_CFG, MT76_SET(MT_TXOP_TRUN_EN, 0x3f) |
- MT76_SET(MT_TXOP_EXT_CCA_DLY, 0x58));
+ mt7601u_wr(dev, MT_TXOP_CTRL_CFG,
+ FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
+ FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58));
ret = mt7601u_eeprom_init(dev);
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c
index e21c53ed09fb..3c576392ed89 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mac.c
+++ b/drivers/net/wireless/mediatek/mt7601u/mac.c
@@ -19,13 +19,13 @@
static void
mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate)
{
- u8 idx = MT76_GET(MT_TXWI_RATE_MCS, rate);
+ u8 idx = FIELD_GET(MT_TXWI_RATE_MCS, rate);
txrate->idx = 0;
txrate->flags = 0;
txrate->count = 1;
- switch (MT76_GET(MT_TXWI_RATE_PHY_MODE, rate)) {
+ switch (FIELD_GET(MT_TXWI_RATE_PHY_MODE, rate)) {
case MT_PHY_TYPE_OFDM:
txrate->idx = idx + 4;
return;
@@ -47,7 +47,7 @@ mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate)
return;
}
- if (MT76_GET(MT_TXWI_RATE_BW, rate) == MT_PHY_BW_40)
+ if (FIELD_GET(MT_TXWI_RATE_BW, rate) == MT_PHY_BW_40)
txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
if (rate & MT_TXWI_RATE_SGI)
@@ -125,9 +125,9 @@ u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev,
bw = 0;
}
- rateval = MT76_SET(MT_RXWI_RATE_MCS, rate_idx);
- rateval |= MT76_SET(MT_RXWI_RATE_PHY, phy);
- rateval |= MT76_SET(MT_RXWI_RATE_BW, bw);
+ rateval = FIELD_PREP(MT_RXWI_RATE_MCS, rate_idx);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
+ rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
rateval |= MT_RXWI_RATE_SGI;
@@ -156,9 +156,9 @@ struct mt76_tx_status mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev)
stat.success = !!(val & MT_TX_STAT_FIFO_SUCCESS);
stat.aggr = !!(val & MT_TX_STAT_FIFO_AGGR);
stat.ack_req = !!(val & MT_TX_STAT_FIFO_ACKREQ);
- stat.pktid = MT76_GET(MT_TX_STAT_FIFO_PID_TYPE, val);
- stat.wcid = MT76_GET(MT_TX_STAT_FIFO_WCID, val);
- stat.rate = MT76_GET(MT_TX_STAT_FIFO_RATE, val);
+ stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_PID_TYPE, val);
+ stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, val);
+ stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, val);
return stat;
}
@@ -270,7 +270,7 @@ void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval)
}
val &= ~MT_BEACON_TIME_CFG_INTVAL;
- val |= MT76_SET(MT_BEACON_TIME_CFG_INTVAL, interval << 4) |
+ val |= FIELD_PREP(MT_BEACON_TIME_CFG_INTVAL, interval << 4) |
MT_BEACON_TIME_CFG_TIMER_EN |
MT_BEACON_TIME_CFG_SYNC_MODE |
MT_BEACON_TIME_CFG_TBTT_EN;
@@ -349,8 +349,8 @@ mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
u8 zmac[ETH_ALEN] = {};
u32 attr;
- attr = MT76_SET(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
- MT76_SET(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+ attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+ FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
mt76_wr(dev, MT_WCID_ATTR(idx), attr);
@@ -382,15 +382,15 @@ void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev)
rcu_read_unlock();
mt7601u_wr(dev, MT_MAX_LEN_CFG, 0xa0fff |
- MT76_SET(MT_MAX_LEN_CFG_AMPDU, min_factor));
+ FIELD_PREP(MT_MAX_LEN_CFG_AMPDU, min_factor));
}
static void
mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
{
- u8 idx = MT76_GET(MT_RXWI_RATE_MCS, rate);
+ u8 idx = FIELD_GET(MT_RXWI_RATE_MCS, rate);
- switch (MT76_GET(MT_RXWI_RATE_PHY, rate)) {
+ switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
case MT_PHY_TYPE_OFDM:
if (WARN_ON(idx >= 8))
idx = 0;
@@ -436,7 +436,7 @@ mt7601u_rx_monitor_beacon(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
u16 rate, int rssi)
{
dev->bcn_freq_off = rxwi->freq_off;
- dev->bcn_phy_mode = MT76_GET(MT_RXWI_RATE_PHY, rate);
+ dev->bcn_phy_mode = FIELD_GET(MT_RXWI_RATE_PHY, rate);
dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8);
}
@@ -458,7 +458,7 @@ u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb,
u16 rate = le16_to_cpu(rxwi->rate);
int rssi;
- len = MT76_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+ len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
if (len < 10)
return 0;
@@ -542,8 +542,8 @@ int mt76_mac_wcid_set_key(struct mt7601u_dev *dev, u8 idx,
val = mt7601u_rr(dev, MT_WCID_ATTR(idx));
val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT;
- val |= MT76_SET(MT_WCID_ATTR_PKEY_MODE, cipher & 7) |
- MT76_SET(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3);
+ val |= FIELD_PREP(MT_WCID_ATTR_PKEY_MODE, cipher & 7) |
+ FIELD_PREP(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3);
val &= ~MT_WCID_ATTR_PAIRWISE;
val |= MT_WCID_ATTR_PAIRWISE *
!!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index e70dd9523911..43ebd460ba86 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -15,7 +15,6 @@
#include "mt7601u.h"
#include "mac.h"
#include <linux/etherdevice.h>
-#include <linux/version.h>
static int mt7601u_start(struct ieee80211_hw *hw)
{
diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.c b/drivers/net/wireless/mediatek/mt7601u/mcu.c
index 91c4b3427965..dbdfb3f5c507 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mcu.c
+++ b/drivers/net/wireless/mediatek/mt7601u/mcu.c
@@ -43,8 +43,8 @@ static inline void mt7601u_dma_skb_wrap_cmd(struct sk_buff *skb,
u8 seq, enum mcu_cmd cmd)
{
WARN_ON(mt7601u_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND,
- MT76_SET(MT_TXD_CMD_INFO_SEQ, seq) |
- MT76_SET(MT_TXD_CMD_INFO_TYPE, cmd)));
+ FIELD_PREP(MT_TXD_CMD_INFO_SEQ, seq) |
+ FIELD_PREP(MT_TXD_CMD_INFO_TYPE, cmd)));
}
static inline void trace_mt_mcu_msg_send_cs(struct mt7601u_dev *dev,
@@ -100,13 +100,13 @@ static int mt7601u_mcu_wait_resp(struct mt7601u_dev *dev, u8 seq)
dev_err(dev->dev, "Error: MCU resp urb failed:%d\n",
urb_status);
- if (MT76_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
- MT76_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
+ if (FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
+ FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
return 0;
- dev_err(dev->dev, "Error: MCU resp evt:%hhx seq:%hhx-%hhx!\n",
- MT76_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
- seq, MT76_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
+ dev_err(dev->dev, "Error: MCU resp evt:%lx seq:%hhx-%lx!\n",
+ FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
+ seq, FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
}
dev_err(dev->dev, "Error: %s timed out\n", __func__);
@@ -291,9 +291,9 @@ static int __mt7601u_dma_fw(struct mt7601u_dev *dev,
u32 val;
int ret;
- reg = cpu_to_le32(MT76_SET(MT_TXD_INFO_TYPE, DMA_PACKET) |
- MT76_SET(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
- MT76_SET(MT_TXD_INFO_LEN, len));
+ reg = cpu_to_le32(FIELD_PREP(MT_TXD_INFO_TYPE, DMA_PACKET) |
+ FIELD_PREP(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
+ FIELD_PREP(MT_TXD_INFO_LEN, len));
memcpy(buf.buf, &reg, sizeof(reg));
memcpy(buf.buf + sizeof(reg), data, len);
memset(buf.buf + sizeof(reg) + len, 0, 8);
diff --git a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
index 428bd2f10b7b..c7ec40475a5f 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
+++ b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
@@ -15,6 +15,7 @@
#ifndef MT7601U_H
#define MT7601U_H
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/mutex.h>
@@ -24,7 +25,6 @@
#include <linux/debugfs.h>
#include "regs.h"
-#include "util.h"
#define MT_CALIBRATE_INTERVAL (4 * HZ)
@@ -299,7 +299,7 @@ bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
/* Compatibility with mt76 */
#define mt76_rmw_field(_dev, _reg, _field, _val) \
- mt76_rmw(_dev, _reg, _field, MT76_SET(_field, _val))
+ mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
static inline u32 mt76_rr(struct mt7601u_dev *dev, u32 offset)
{
diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
index 1908af6add87..ca09a5d4305e 100644
--- a/drivers/net/wireless/mediatek/mt7601u/phy.c
+++ b/drivers/net/wireless/mediatek/mt7601u/phy.c
@@ -41,11 +41,12 @@ mt7601u_rf_wr(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 value)
goto out;
}
- mt7601u_wr(dev, MT_RF_CSR_CFG, MT76_SET(MT_RF_CSR_CFG_DATA, value) |
- MT76_SET(MT_RF_CSR_CFG_REG_BANK, bank) |
- MT76_SET(MT_RF_CSR_CFG_REG_ID, offset) |
- MT_RF_CSR_CFG_WR |
- MT_RF_CSR_CFG_KICK);
+ mt7601u_wr(dev, MT_RF_CSR_CFG,
+ FIELD_PREP(MT_RF_CSR_CFG_DATA, value) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_ID, offset) |
+ MT_RF_CSR_CFG_WR |
+ MT_RF_CSR_CFG_KICK);
trace_rf_write(dev, bank, offset, value);
out:
mutex_unlock(&dev->reg_atomic_mutex);
@@ -74,17 +75,18 @@ mt7601u_rf_rr(struct mt7601u_dev *dev, u8 bank, u8 offset)
if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
goto out;
- mt7601u_wr(dev, MT_RF_CSR_CFG, MT76_SET(MT_RF_CSR_CFG_REG_BANK, bank) |
- MT76_SET(MT_RF_CSR_CFG_REG_ID, offset) |
- MT_RF_CSR_CFG_KICK);
+ mt7601u_wr(dev, MT_RF_CSR_CFG,
+ FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_ID, offset) |
+ MT_RF_CSR_CFG_KICK);
if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
goto out;
val = mt7601u_rr(dev, MT_RF_CSR_CFG);
- if (MT76_GET(MT_RF_CSR_CFG_REG_ID, val) == offset &&
- MT76_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) {
- ret = MT76_GET(MT_RF_CSR_CFG_DATA, val);
+ if (FIELD_GET(MT_RF_CSR_CFG_REG_ID, val) == offset &&
+ FIELD_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) {
+ ret = FIELD_GET(MT_RF_CSR_CFG_DATA, val);
trace_rf_read(dev, bank, offset, ret);
}
out:
@@ -139,8 +141,8 @@ static void mt7601u_bbp_wr(struct mt7601u_dev *dev, u8 offset, u8 val)
}
mt7601u_wr(dev, MT_BBP_CSR_CFG,
- MT76_SET(MT_BBP_CSR_CFG_VAL, val) |
- MT76_SET(MT_BBP_CSR_CFG_REG_NUM, offset) |
+ FIELD_PREP(MT_BBP_CSR_CFG_VAL, val) |
+ FIELD_PREP(MT_BBP_CSR_CFG_REG_NUM, offset) |
MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY);
trace_bbp_write(dev, offset, val);
out:
@@ -163,7 +165,7 @@ static int mt7601u_bbp_rr(struct mt7601u_dev *dev, u8 offset)
goto out;
mt7601u_wr(dev, MT_BBP_CSR_CFG,
- MT76_SET(MT_BBP_CSR_CFG_REG_NUM, offset) |
+ FIELD_PREP(MT_BBP_CSR_CFG_REG_NUM, offset) |
MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY |
MT_BBP_CSR_CFG_READ);
@@ -171,8 +173,8 @@ static int mt7601u_bbp_rr(struct mt7601u_dev *dev, u8 offset)
goto out;
val = mt7601u_rr(dev, MT_BBP_CSR_CFG);
- if (MT76_GET(MT_BBP_CSR_CFG_REG_NUM, val) == offset) {
- ret = MT76_GET(MT_BBP_CSR_CFG_VAL, val);
+ if (FIELD_GET(MT_BBP_CSR_CFG_REG_NUM, val) == offset) {
+ ret = FIELD_GET(MT_BBP_CSR_CFG_VAL, val);
trace_bbp_read(dev, offset, ret);
}
out:
@@ -249,9 +251,9 @@ int mt7601u_phy_get_rssi(struct mt7601u_dev *dev,
/* bw40 */ { -2, 16, 34 }
}
};
- int bw = MT76_GET(MT_RXWI_RATE_BW, rate);
- int aux_lna = MT76_GET(MT_RXWI_ANT_AUX_LNA, rxwi->ant);
- int lna_id = MT76_GET(MT_RXWI_GAIN_RSSI_LNA_ID, rxwi->gain);
+ int bw = FIELD_GET(MT_RXWI_RATE_BW, rate);
+ int aux_lna = FIELD_GET(MT_RXWI_ANT_AUX_LNA, rxwi->ant);
+ int lna_id = FIELD_GET(MT_RXWI_GAIN_RSSI_LNA_ID, rxwi->gain);
int val;
if (lna_id) /* LNA id can be 0, 2, 3. */
@@ -259,7 +261,7 @@ int mt7601u_phy_get_rssi(struct mt7601u_dev *dev,
val = 8;
val -= lna[aux_lna][bw][lna_id];
- val -= MT76_GET(MT_RXWI_GAIN_RSSI_VAL, rxwi->gain);
+ val -= FIELD_GET(MT_RXWI_GAIN_RSSI_VAL, rxwi->gain);
val -= dev->ee->lna_gain;
val -= dev->ee->rssi_offset[0];
@@ -939,7 +941,7 @@ static int mt7601u_tssi_cal(struct mt7601u_dev *dev)
dev_dbg(dev->dev, "final diff: %08x\n", diff_pwr);
val = mt7601u_rr(dev, MT_TX_ALC_CFG_1);
- curr_pwr = s6_to_int(MT76_GET(MT_TX_ALC_CFG_1_TEMP_COMP, val));
+ curr_pwr = s6_to_int(FIELD_GET(MT_TX_ALC_CFG_1_TEMP_COMP, val));
diff_pwr += curr_pwr;
val = (val & ~MT_TX_ALC_CFG_1_TEMP_COMP) | int_to_s6(diff_pwr);
mt7601u_wr(dev, MT_TX_ALC_CFG_1, val);
diff --git a/drivers/net/wireless/mediatek/mt7601u/regs.h b/drivers/net/wireless/mediatek/mt7601u/regs.h
index afd8978e83fa..27a429d90cec 100644
--- a/drivers/net/wireless/mediatek/mt7601u/regs.h
+++ b/drivers/net/wireless/mediatek/mt7601u/regs.h
@@ -17,10 +17,6 @@
#include <linux/bitops.h>
-#ifndef GENMASK
-#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
-#endif
-
#define MT_ASIC_VERSION 0x0000
#define MT76XX_REV_E3 0x22
diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c
index a0a33dc8f6bc..ad77bec1ba0f 100644
--- a/drivers/net/wireless/mediatek/mt7601u/tx.c
+++ b/drivers/net/wireless/mediatek/mt7601u/tx.c
@@ -175,11 +175,12 @@ mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb,
ba_size = min_t(int, 63, ba_size);
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
ba_size = 0;
- txwi->ack_ctl |= MT76_SET(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+ txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
- txwi->flags = cpu_to_le16(MT_TXWI_FLAGS_AMPDU |
- MT76_SET(MT_TXWI_FLAGS_MPDU_DENSITY,
- sta->ht_cap.ampdu_density));
+ txwi->flags =
+ cpu_to_le16(MT_TXWI_FLAGS_AMPDU |
+ FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
+ sta->ht_cap.ampdu_density));
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
txwi->flags = 0;
}
@@ -188,7 +189,7 @@ mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb,
is_probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
pkt_id = mt7601u_tx_pktid_enc(dev, rate_ctl & 0x7, is_probe);
- pkt_len |= MT76_SET(MT_TXWI_LEN_PKTID, pkt_id);
+ pkt_len |= FIELD_PREP(MT_TXWI_LEN_PKTID, pkt_id);
txwi->len_ctl = cpu_to_le16(pkt_len);
return txwi;
@@ -285,9 +286,9 @@ int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
WARN_ON(cw_min > 0xf);
WARN_ON(cw_max > 0xf);
- val = MT76_SET(MT_EDCA_CFG_AIFSN, params->aifs) |
- MT76_SET(MT_EDCA_CFG_CWMIN, cw_min) |
- MT76_SET(MT_EDCA_CFG_CWMAX, cw_max);
+ val = FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
+ FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
+ FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
/* TODO: based on user-controlled EnableTxBurst var vendor drv sets
* a really long txop on AC0 (see connect.c:2009) but only on
* connect? When not connected should be 0.
@@ -295,7 +296,7 @@ int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (!hw_q)
val |= 0x60;
else
- val |= MT76_SET(MT_EDCA_CFG_TXOP, params->txop);
+ val |= FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop);
mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val);
val = mt76_rr(dev, MT_WMM_TXOP(hw_q));
diff --git a/drivers/net/wireless/mediatek/mt7601u/util.h b/drivers/net/wireless/mediatek/mt7601u/util.h
deleted file mode 100644
index b89140bf1210..000000000000
--- a/drivers/net/wireless/mediatek/mt7601u/util.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MT76_UTIL_H
-#define __MT76_UTIL_H
-
-/*
- * Power of two check, this will check
- * if the mask that has been given contains and contiguous set of bits.
- * Note that we cannot use the is_power_of_2() function since this
- * check must be done at compile-time.
- */
-#define is_power_of_two(x) ( !((x) & ((x)-1)) )
-#define low_bit_mask(x) ( ((x)-1) & ~(x) )
-#define is_valid_mask(x) is_power_of_two(1LU + (x) + low_bit_mask(x))
-
-/*
- * Macros to find first set bit in a variable.
- * These macros behave the same as the __ffs() functions but
- * the most important difference that this is done during
- * compile-time rather then run-time.
- */
-#define compile_ffs2(__x) \
- __builtin_choose_expr(((__x) & 0x1), 0, 1)
-
-#define compile_ffs4(__x) \
- __builtin_choose_expr(((__x) & 0x3), \
- (compile_ffs2((__x))), \
- (compile_ffs2((__x) >> 2) + 2))
-
-#define compile_ffs8(__x) \
- __builtin_choose_expr(((__x) & 0xf), \
- (compile_ffs4((__x))), \
- (compile_ffs4((__x) >> 4) + 4))
-
-#define compile_ffs16(__x) \
- __builtin_choose_expr(((__x) & 0xff), \
- (compile_ffs8((__x))), \
- (compile_ffs8((__x) >> 8) + 8))
-
-#define compile_ffs32(__x) \
- __builtin_choose_expr(((__x) & 0xffff), \
- (compile_ffs16((__x))), \
- (compile_ffs16((__x) >> 16) + 16))
-
-/*
- * This macro will check the requirements for the FIELD{8,16,32} macros
- * The mask should be a constant non-zero contiguous set of bits which
- * does not exceed the given typelimit.
- */
-#define FIELD_CHECK(__mask) \
- BUILD_BUG_ON(!(__mask) || !is_valid_mask(__mask))
-
-#define MT76_SET(_mask, _val) \
- ({ \
- FIELD_CHECK(_mask); \
- (((u32) (_val)) << compile_ffs32(_mask)) & _mask; \
- })
-
-#define MT76_GET(_mask, _val) \
- ({ \
- FIELD_CHECK(_mask); \
- (u32) (((_val) & _mask) >> compile_ffs32(_mask)); \
- })
-
-#endif
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 7cf26c6124d1..6005e14213ca 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -831,8 +831,10 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev,
sizeof(struct usb_anchor),
GFP_KERNEL);
- if (!rt2x00dev->anchor)
+ if (!rt2x00dev->anchor) {
+ retval = -ENOMEM;
goto exit_free_reg;
+ }
init_usb_anchor(rt2x00dev->anchor);
return 0;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 4341d56805f8..1f54b8928d3c 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -43,6 +43,7 @@
#define TX_TOTAL_PAGE_NUM 0xf8
#define TX_TOTAL_PAGE_NUM_8192E 0xf3
+#define TX_TOTAL_PAGE_NUM_8723B 0xf7
/* (HPQ + LPQ + NPQ + PUBQ) = TX_TOTAL_PAGE_NUM */
#define TX_PAGE_NUM_PUBQ 0xe7
#define TX_PAGE_NUM_HI_PQ 0x0c
@@ -54,6 +55,11 @@
#define TX_PAGE_NUM_LO_PQ_8192E 0x0c
#define TX_PAGE_NUM_NORM_PQ_8192E 0x00
+#define TX_PAGE_NUM_PUBQ_8723B 0xe7
+#define TX_PAGE_NUM_HI_PQ_8723B 0x0c
+#define TX_PAGE_NUM_LO_PQ_8723B 0x02
+#define TX_PAGE_NUM_NORM_PQ_8723B 0x02
+
#define RTL_FW_PAGE_SIZE 4096
#define RTL8XXXU_FIRMWARE_POLL_MAX 1000
@@ -1330,11 +1336,17 @@ struct rtl8xxxu_fileops {
u32 ramask, int sgi);
void (*report_connect) (struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
+ void (*fill_txdesc) (struct ieee80211_hdr *hdr,
+ struct rtl8xxxu_txdesc32 *tx_desc, u32 rate,
+ u16 rate_flag, bool sgi, bool short_preamble,
+ bool ampdu_enable);
int writeN_block_size;
int rx_agg_buf_size;
char tx_desc_size;
char rx_desc_size;
- char has_s0s1;
+ u8 has_s0s1:1;
+ u8 has_tx_report:1;
+ u8 gen2_thermal_meter:1;
u32 adda_1t_init;
u32 adda_1t_path_on;
u32 adda_2t_path_on_a;
@@ -1421,6 +1433,14 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb);
int rtl8xxxu_gen2_channel_to_group(int channel);
bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv,
int result[][8], int c1, int c2);
+void rtl8xxxu_fill_txdesc_v1(struct ieee80211_hdr *hdr,
+ struct rtl8xxxu_txdesc32 *tx_desc, u32 rate,
+ u16 rate_flag, bool sgi, bool short_preamble,
+ bool ampdu_enable);
+void rtl8xxxu_fill_txdesc_v2(struct ieee80211_hdr *hdr,
+ struct rtl8xxxu_txdesc32 *tx_desc32, u32 rate,
+ u16 rate_flag, bool sgi, bool short_preamble,
+ bool ampdu_enable);
extern struct rtl8xxxu_fileops rtl8192cu_fops;
extern struct rtl8xxxu_fileops rtl8192eu_fops;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
index 69d1a1453ede..f9e2050812ab 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
@@ -567,6 +567,7 @@ struct rtl8xxxu_fileops rtl8192cu_fops = {
.set_tx_power = rtl8xxxu_gen1_set_tx_power,
.update_rate_mask = rtl8xxxu_update_rate_mask,
.report_connect = rtl8xxxu_gen1_report_connect,
+ .fill_txdesc = rtl8xxxu_fill_txdesc_v1,
.writeN_block_size = 128,
.rx_agg_buf_size = 16000,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
@@ -579,5 +580,9 @@ struct rtl8xxxu_fileops rtl8192cu_fops = {
.pbp_rx = PBP_PAGE_SIZE_128,
.pbp_tx = PBP_PAGE_SIZE_128,
.mactable = rtl8xxxu_gen1_mac_init_table,
+ .total_page_num = TX_TOTAL_PAGE_NUM,
+ .page_num_hi = TX_PAGE_NUM_HI_PQ,
+ .page_num_lo = TX_PAGE_NUM_LO_PQ,
+ .page_num_norm = TX_PAGE_NUM_NORM_PQ,
};
#endif
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index 9a1994f49b7b..841522ee6379 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1501,10 +1501,12 @@ struct rtl8xxxu_fileops rtl8192eu_fops = {
.set_tx_power = rtl8192e_set_tx_power,
.update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
.report_connect = rtl8xxxu_gen2_report_connect,
+ .fill_txdesc = rtl8xxxu_fill_txdesc_v2,
.writeN_block_size = 128,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
.has_s0s1 = 0,
+ .gen2_thermal_meter = 1,
.adda_1t_init = 0x0fc01616,
.adda_1t_path_on = 0x0fc01616,
.adda_2t_path_on_a = 0x0fc01616,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
index 686c551581b1..aef373028155 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
@@ -384,6 +384,7 @@ struct rtl8xxxu_fileops rtl8723au_fops = {
.set_tx_power = rtl8xxxu_gen1_set_tx_power,
.update_rate_mask = rtl8xxxu_update_rate_mask,
.report_connect = rtl8xxxu_gen1_report_connect,
+ .fill_txdesc = rtl8xxxu_fill_txdesc_v1,
.writeN_block_size = 1024,
.rx_agg_buf_size = 16000,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
@@ -396,4 +397,8 @@ struct rtl8xxxu_fileops rtl8723au_fops = {
.pbp_rx = PBP_PAGE_SIZE_128,
.pbp_tx = PBP_PAGE_SIZE_128,
.mactable = rtl8xxxu_gen1_mac_init_table,
+ .total_page_num = TX_TOTAL_PAGE_NUM,
+ .page_num_hi = TX_PAGE_NUM_HI_PQ,
+ .page_num_lo = TX_PAGE_NUM_LO_PQ,
+ .page_num_norm = TX_PAGE_NUM_NORM_PQ,
};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index 9d45afb0e3fd..6c086b5657e9 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -1662,10 +1662,13 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
.set_tx_power = rtl8723b_set_tx_power,
.update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
.report_connect = rtl8xxxu_gen2_report_connect,
+ .fill_txdesc = rtl8xxxu_fill_txdesc_v2,
.writeN_block_size = 1024,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
.has_s0s1 = 1,
+ .has_tx_report = 1,
+ .gen2_thermal_meter = 1,
.adda_1t_init = 0x01c00014,
.adda_1t_path_on = 0x01c00014,
.adda_2t_path_on_a = 0x01c00014,
@@ -1674,4 +1677,8 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
.pbp_rx = PBP_PAGE_SIZE_256,
.pbp_tx = PBP_PAGE_SIZE_256,
.mactable = rtl8723b_mac_init_table,
+ .total_page_num = TX_TOTAL_PAGE_NUM_8723B,
+ .page_num_hi = TX_PAGE_NUM_HI_PQ_8723B,
+ .page_num_lo = TX_PAGE_NUM_LO_PQ_8723B,
+ .page_num_norm = TX_PAGE_NUM_NORM_PQ_8723B,
};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 77048db3b32a..ca92022f9d50 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -894,7 +894,7 @@ int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv,
return retval;
}
-int
+static int
rtl8xxxu_gen1_h2c_cmd(struct rtl8xxxu_priv *priv, struct h2c_cmd *h2c, int len)
{
struct device *dev = &priv->udev->dev;
@@ -3847,28 +3847,6 @@ void rtl8xxxu_gen2_disable_rf(struct rtl8xxxu_priv *priv)
rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
}
-static void rtl8xxxu_old_init_queue_reserved_page(struct rtl8xxxu_priv *priv)
-{
- u8 val8;
- u32 val32;
-
- if (priv->ep_tx_normal_queue)
- val8 = TX_PAGE_NUM_NORM_PQ;
- else
- val8 = 0;
-
- rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8);
-
- val32 = (TX_PAGE_NUM_PUBQ << RQPN_PUB_PQ_SHIFT) | RQPN_LOAD;
-
- if (priv->ep_tx_high_queue)
- val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT);
- if (priv->ep_tx_low_queue)
- val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT);
-
- rtl8xxxu_write32(priv, REG_RQPN, val32);
-}
-
static void rtl8xxxu_init_queue_reserved_page(struct rtl8xxxu_priv *priv)
{
struct rtl8xxxu_fileops *fops = priv->fops;
@@ -3929,12 +3907,8 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
goto exit;
}
- if (!macpower) {
- if (priv->fops->total_page_num)
- rtl8xxxu_init_queue_reserved_page(priv);
- else
- rtl8xxxu_old_init_queue_reserved_page(priv);
- }
+ if (!macpower)
+ rtl8xxxu_init_queue_reserved_page(priv);
ret = rtl8xxxu_init_queue_priority(priv);
dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret);
@@ -3947,11 +3921,11 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, priv->fops->trxff_boundary);
ret = rtl8xxxu_download_firmware(priv);
- dev_dbg(dev, "%s: download_fiwmare %i\n", __func__, ret);
+ dev_dbg(dev, "%s: download_firmware %i\n", __func__, ret);
if (ret)
goto exit;
ret = rtl8xxxu_start_firmware(priv);
- dev_dbg(dev, "%s: start_fiwmare %i\n", __func__, ret);
+ dev_dbg(dev, "%s: start_firmware %i\n", __func__, ret);
if (ret)
goto exit;
@@ -3994,13 +3968,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/*
* Set TX buffer boundary
*/
- if (priv->rtl_chip == RTL8192E)
- val8 = TX_TOTAL_PAGE_NUM_8192E + 1;
- else
- val8 = TX_TOTAL_PAGE_NUM + 1;
-
- if (priv->rtl_chip == RTL8723B)
- val8 -= 1;
+ val8 = priv->fops->total_page_num + 1;
rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8);
rtl8xxxu_write8(priv, REG_TXPKTBUF_MGQ_BDNY, val8);
@@ -4032,10 +4000,9 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
priv->fops->usb_quirks(priv);
/*
- * Presumably this is for 8188EU as well
- * Enable TX report and TX report timer
+ * Enable TX report and TX report timer for 8723bu/8188eu/...
*/
- if (priv->rtl_chip == RTL8723B) {
+ if (priv->fops->has_tx_report) {
val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
val8 |= TX_REPORT_CTRL_TIMER_ENABLE;
rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
@@ -4228,7 +4195,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/*
* This should enable thermal meter
*/
- if (priv->fops->tx_desc_size == sizeof(struct rtl8xxxu_txdesc40))
+ if (priv->fops->gen2_thermal_meter)
rtl8xxxu_write_rfreg(priv,
RF_A, RF6052_REG_T_METER_8723B, 0x37cf8);
else
@@ -4783,6 +4750,113 @@ static void rtl8xxxu_dump_action(struct device *dev,
}
}
+/*
+ * Fill in v1 (gen1) specific TX descriptor bits.
+ * This format is used on 8188cu/8192cu/8723au
+ */
+void
+rtl8xxxu_fill_txdesc_v1(struct ieee80211_hdr *hdr,
+ struct rtl8xxxu_txdesc32 *tx_desc, u32 rate,
+ u16 rate_flag, bool sgi, bool short_preamble,
+ bool ampdu_enable)
+{
+ u16 seq_number;
+
+ seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+
+ tx_desc->txdw5 = cpu_to_le32(rate);
+
+ if (ieee80211_is_data(hdr->frame_control))
+ tx_desc->txdw5 |= cpu_to_le32(0x0001ff00);
+
+ tx_desc->txdw3 = cpu_to_le32((u32)seq_number << TXDESC32_SEQ_SHIFT);
+
+ if (ampdu_enable)
+ tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_ENABLE);
+ else
+ tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_BREAK);
+
+ if (ieee80211_is_mgmt(hdr->frame_control)) {
+ tx_desc->txdw5 = cpu_to_le32(rate);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_USE_DRIVER_RATE);
+ tx_desc->txdw5 |= cpu_to_le32(6 << TXDESC32_RETRY_LIMIT_SHIFT);
+ tx_desc->txdw5 |= cpu_to_le32(TXDESC32_RETRY_LIMIT_ENABLE);
+ }
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_QOS);
+
+ if (short_preamble)
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_SHORT_PREAMBLE);
+
+ if (sgi)
+ tx_desc->txdw5 |= cpu_to_le32(TXDESC32_SHORT_GI);
+
+ if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
+ /*
+ * Use RTS rate 24M - does the mac80211 tell
+ * us which to use?
+ */
+ tx_desc->txdw4 |= cpu_to_le32(DESC_RATE_24M <<
+ TXDESC32_RTS_RATE_SHIFT);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_RTS_CTS_ENABLE);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
+ }
+}
+
+/*
+ * Fill in v2 (gen2) specific TX descriptor bits.
+ * This format is used on 8192eu/8723bu
+ */
+void
+rtl8xxxu_fill_txdesc_v2(struct ieee80211_hdr *hdr,
+ struct rtl8xxxu_txdesc32 *tx_desc32, u32 rate,
+ u16 rate_flag, bool sgi, bool short_preamble,
+ bool ampdu_enable)
+{
+ struct rtl8xxxu_txdesc40 *tx_desc40;
+ u16 seq_number;
+
+ tx_desc40 = (struct rtl8xxxu_txdesc40 *)tx_desc32;
+
+ seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+
+ tx_desc40->txdw4 = cpu_to_le32(rate);
+ if (ieee80211_is_data(hdr->frame_control)) {
+ tx_desc40->txdw4 |= cpu_to_le32(0x1f <<
+ TXDESC40_DATA_RATE_FB_SHIFT);
+ }
+
+ tx_desc40->txdw9 = cpu_to_le32((u32)seq_number << TXDESC40_SEQ_SHIFT);
+
+ if (ampdu_enable)
+ tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_ENABLE);
+ else
+ tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_BREAK);
+
+ if (ieee80211_is_mgmt(hdr->frame_control)) {
+ tx_desc40->txdw4 = cpu_to_le32(rate);
+ tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_USE_DRIVER_RATE);
+ tx_desc40->txdw4 |=
+ cpu_to_le32(6 << TXDESC40_RETRY_LIMIT_SHIFT);
+ tx_desc40->txdw4 |= cpu_to_le32(TXDESC40_RETRY_LIMIT_ENABLE);
+ }
+
+ if (short_preamble)
+ tx_desc40->txdw5 |= cpu_to_le32(TXDESC40_SHORT_PREAMBLE);
+
+ if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
+ /*
+ * Use RTS rate 24M - does the mac80211 tell
+ * us which to use?
+ */
+ tx_desc40->txdw4 |= cpu_to_le32(DESC_RATE_24M <<
+ TXDESC40_RTS_RATE_SHIFT);
+ tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_RTS_CTS_ENABLE);
+ tx_desc40->txdw3 |= cpu_to_le32(TXDESC40_HW_RTS_ENABLE);
+ }
+}
+
static void rtl8xxxu_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
@@ -4792,7 +4866,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
struct rtl8xxxu_priv *priv = hw->priv;
struct rtl8xxxu_txdesc32 *tx_desc;
- struct rtl8xxxu_txdesc40 *tx_desc40;
struct rtl8xxxu_tx_urb *tx_urb;
struct ieee80211_sta *sta = NULL;
struct ieee80211_vif *vif = tx_info->control.vif;
@@ -4803,7 +4876,7 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
u16 rate_flag = tx_info->control.rates[0].flags;
int tx_desc_size = priv->fops->tx_desc_size;
int ret;
- bool usedesc40, ampdu_enable;
+ bool usedesc40, ampdu_enable, sgi = false, short_preamble = false;
if (skb_headroom(skb) < tx_desc_size) {
dev_warn(dev,
@@ -4881,107 +4954,26 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
}
}
- if (rate_flag & IEEE80211_TX_RC_MCS)
+ if (rate_flag & IEEE80211_TX_RC_MCS &&
+ !ieee80211_is_mgmt(hdr->frame_control))
rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0;
else
rate = tx_rate->hw_value;
- seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
- if (!usedesc40) {
- tx_desc->txdw5 = cpu_to_le32(rate);
-
- if (ieee80211_is_data(hdr->frame_control))
- tx_desc->txdw5 |= cpu_to_le32(0x0001ff00);
-
- tx_desc->txdw3 =
- cpu_to_le32((u32)seq_number << TXDESC32_SEQ_SHIFT);
-
- if (ampdu_enable)
- tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_ENABLE);
- else
- tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_BREAK);
-
- if (ieee80211_is_mgmt(hdr->frame_control)) {
- tx_desc->txdw5 = cpu_to_le32(tx_rate->hw_value);
- tx_desc->txdw4 |=
- cpu_to_le32(TXDESC32_USE_DRIVER_RATE);
- tx_desc->txdw5 |=
- cpu_to_le32(6 << TXDESC32_RETRY_LIMIT_SHIFT);
- tx_desc->txdw5 |=
- cpu_to_le32(TXDESC32_RETRY_LIMIT_ENABLE);
- }
-
- if (ieee80211_is_data_qos(hdr->frame_control))
- tx_desc->txdw4 |= cpu_to_le32(TXDESC32_QOS);
+ if (rate_flag & IEEE80211_TX_RC_SHORT_GI ||
+ (ieee80211_is_data_qos(hdr->frame_control) &&
+ sta && sta->ht_cap.cap &
+ (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20)))
+ sgi = true;
- if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
- (sta && vif && vif->bss_conf.use_short_preamble))
- tx_desc->txdw4 |= cpu_to_le32(TXDESC32_SHORT_PREAMBLE);
+ if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
+ (sta && vif && vif->bss_conf.use_short_preamble))
+ short_preamble = true;
- if (rate_flag & IEEE80211_TX_RC_SHORT_GI ||
- (ieee80211_is_data_qos(hdr->frame_control) &&
- sta && sta->ht_cap.cap &
- (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))) {
- tx_desc->txdw5 |= cpu_to_le32(TXDESC32_SHORT_GI);
- }
-
- if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
- /*
- * Use RTS rate 24M - does the mac80211 tell
- * us which to use?
- */
- tx_desc->txdw4 |=
- cpu_to_le32(DESC_RATE_24M <<
- TXDESC32_RTS_RATE_SHIFT);
- tx_desc->txdw4 |=
- cpu_to_le32(TXDESC32_RTS_CTS_ENABLE);
- tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
- }
- } else {
- tx_desc40 = (struct rtl8xxxu_txdesc40 *)tx_desc;
-
- tx_desc40->txdw4 = cpu_to_le32(rate);
- if (ieee80211_is_data(hdr->frame_control)) {
- tx_desc->txdw4 |=
- cpu_to_le32(0x1f <<
- TXDESC40_DATA_RATE_FB_SHIFT);
- }
-
- tx_desc40->txdw9 =
- cpu_to_le32((u32)seq_number << TXDESC40_SEQ_SHIFT);
-
- if (ampdu_enable)
- tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_ENABLE);
- else
- tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_BREAK);
-
- if (ieee80211_is_mgmt(hdr->frame_control)) {
- tx_desc40->txdw4 = cpu_to_le32(tx_rate->hw_value);
- tx_desc40->txdw3 |=
- cpu_to_le32(TXDESC40_USE_DRIVER_RATE);
- tx_desc40->txdw4 |=
- cpu_to_le32(6 << TXDESC40_RETRY_LIMIT_SHIFT);
- tx_desc40->txdw4 |=
- cpu_to_le32(TXDESC40_RETRY_LIMIT_ENABLE);
- }
-
- if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
- (sta && vif && vif->bss_conf.use_short_preamble))
- tx_desc40->txdw5 |=
- cpu_to_le32(TXDESC40_SHORT_PREAMBLE);
+ seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
- if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
- /*
- * Use RTS rate 24M - does the mac80211 tell
- * us which to use?
- */
- tx_desc->txdw4 |=
- cpu_to_le32(DESC_RATE_24M <<
- TXDESC40_RTS_RATE_SHIFT);
- tx_desc->txdw3 |= cpu_to_le32(TXDESC40_RTS_CTS_ENABLE);
- tx_desc->txdw3 |= cpu_to_le32(TXDESC40_HW_RTS_ENABLE);
- }
- }
+ priv->fops->fill_txdesc(hdr, tx_desc, rate, rate_flag,
+ sgi, short_preamble, ampdu_enable);
rtl8xxxu_calc_tx_desc_csum(tx_desc);
@@ -5704,7 +5696,7 @@ rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
switch (action) {
case IEEE80211_AMPDU_TX_START:
- dev_info(dev, "%s: IEEE80211_AMPDU_TX_START\n", __func__);
+ dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_START\n", __func__);
ampdu_factor = sta->ht_cap.ampdu_factor;
ampdu_density = sta->ht_cap.ampdu_density;
rtl8xxxu_set_ampdu_factor(priv, ampdu_factor);
@@ -5714,21 +5706,21 @@ rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ampdu_factor, ampdu_density);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
- dev_info(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH\n", __func__);
+ dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH\n", __func__);
rtl8xxxu_set_ampdu_factor(priv, 0);
rtl8xxxu_set_ampdu_min_space(priv, 0);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
- dev_info(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH_CONT\n",
+ dev_dbg(dev, "%s: IEEE80211_AMPDU_TX_STOP_FLUSH_CONT\n",
__func__);
rtl8xxxu_set_ampdu_factor(priv, 0);
rtl8xxxu_set_ampdu_min_space(priv, 0);
break;
case IEEE80211_AMPDU_RX_START:
- dev_info(dev, "%s: IEEE80211_AMPDU_RX_START\n", __func__);
+ dev_dbg(dev, "%s: IEEE80211_AMPDU_RX_START\n", __func__);
break;
case IEEE80211_AMPDU_RX_STOP:
- dev_info(dev, "%s: IEEE80211_AMPDU_RX_STOP\n", __func__);
+ dev_dbg(dev, "%s: IEEE80211_AMPDU_RX_STOP\n", __func__);
break;
default:
break;
@@ -5947,7 +5939,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
struct ieee80211_hw *hw;
struct usb_device *udev;
struct ieee80211_supported_band *sband;
- int ret = 0;
+ int ret;
int untested = 1;
udev = usb_get_dev(interface_to_usbdev(interface));
@@ -5971,6 +5963,18 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
if (id->idProduct == 0x1004)
untested = 0;
break;
+ case 0x20f4:
+ if (id->idProduct == 0x648b)
+ untested = 0;
+ break;
+ case 0x2001:
+ if (id->idProduct == 0x3308)
+ untested = 0;
+ break;
+ case 0x2357:
+ if (id->idProduct == 0x0109)
+ untested = 0;
+ break;
default:
break;
}
@@ -5987,6 +5991,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
hw = ieee80211_alloc_hw(sizeof(struct rtl8xxxu_priv), &rtl8xxxu_ops);
if (!hw) {
ret = -ENOMEM;
+ priv = NULL;
goto exit;
}
@@ -6035,6 +6040,8 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
}
ret = rtl8xxxu_init_device(hw);
+ if (ret)
+ goto exit;
hw->wiphy->max_scan_ssids = 1;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
@@ -6085,9 +6092,20 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
goto exit;
}
+ return 0;
+
exit:
- if (ret < 0)
- usb_put_dev(udev);
+ usb_set_intfdata(interface, NULL);
+
+ if (priv) {
+ kfree(priv->fw_data);
+ mutex_destroy(&priv->usb_buf_mutex);
+ mutex_destroy(&priv->h2c_mutex);
+ }
+ usb_put_dev(udev);
+
+ ieee80211_free_hw(hw);
+
return ret;
}
@@ -6111,6 +6129,11 @@ static void rtl8xxxu_disconnect(struct usb_interface *interface)
mutex_destroy(&priv->usb_buf_mutex);
mutex_destroy(&priv->h2c_mutex);
+ if (priv->udev->state != USB_STATE_NOTATTACHED) {
+ dev_info(&priv->udev->dev,
+ "Device still attached, trying to reset\n");
+ usb_reset_device(priv->udev);
+ }
usb_put_dev(priv->udev);
ieee80211_free_hw(hw);
}
@@ -6124,6 +6147,9 @@ static struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8723au_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818b, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192eu_fops},
+/* Tested by Myckel Habets */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0109, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192eu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0xb720, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8723bu_fops},
#ifdef CONFIG_RTL8XXXU_UNTESTED
@@ -6140,6 +6166,12 @@ static struct usb_device_id dev_table[] = {
/* Tested by Andrea Merello */
{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1004, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+/* Tested by Jocelyn Mayer */
+{USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x648b, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
+/* Tested by Stefano Bravi */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3308, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
/* Currently untested 8188 series devices */
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8191, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
@@ -6187,8 +6219,6 @@ static struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x13d3, 0x3357, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
-{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3308, 0xff, 0xff, 0xff),
- .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330b, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0x4902, 0xff, 0xff, 0xff),
@@ -6199,8 +6229,6 @@ static struct usb_device_id dev_table[] = {
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xed17, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
-{USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x648b, 0xff, 0xff, 0xff),
- .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x4855, 0x0090, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x4856, 0x0091, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
index 921c5653fff2..3555a2f24285 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -213,10 +213,66 @@
#define REG_HMBOX_EXT_1 0x008a
#define REG_HMBOX_EXT_2 0x008c
#define REG_HMBOX_EXT_3 0x008e
+
/* Interrupt registers for 8192e/8723bu/8812 */
#define REG_HIMR0 0x00b0
+#define IMR0_TXCCK BIT(30) /* TXRPT interrupt when CCX bit
+ of the packet is set */
+#define IMR0_PSTIMEOUT BIT(29) /* Power Save Time Out Int */
+#define IMR0_GTINT4 BIT(28) /* Set when GTIMER4 expires */
+#define IMR0_GTINT3 BIT(27) /* Set when GTIMER3 expires */
+#define IMR0_TBDER BIT(26) /* Transmit Beacon0 Error */
+#define IMR0_TBDOK BIT(25) /* Transmit Beacon0 OK */
+#define IMR0_TSF_BIT32_TOGGLE BIT(24) /* TSF Timer BIT32 toggle
+ indication interrupt */
+#define IMR0_BCNDMAINT0 BIT(20) /* Beacon DMA Interrupt 0 */
+#define IMR0_BCNDERR0 BIT(16) /* Beacon Queue DMA Error 0 */
+#define IMR0_HSISR_IND_ON_INT BIT(15) /* HSISR Indicator (HSIMR &
+ HSISR is true) */
+#define IMR0_BCNDMAINT_E BIT(14) /* Beacon DMA Interrupt
+ Extension for Win7 */
+#define IMR0_ATIMEND BIT(12) /* CTWidnow End or
+ ATIM Window End */
+#define IMR0_HISR1_IND_INT BIT(11) /* HISR1 Indicator
+ (HISR1 & HIMR1 is true) */
+#define IMR0_C2HCMD BIT(10) /* CPU to Host Command INT
+ Status, Write 1 to clear */
+#define IMR0_CPWM2 BIT(9) /* CPU power Mode exchange INT
+ Status, Write 1 to clear */
+#define IMR0_CPWM BIT(8) /* CPU power Mode exchange INT
+ Status, Write 1 to clear */
+#define IMR0_HIGHDOK BIT(7) /* High Queue DMA OK */
+#define IMR0_MGNTDOK BIT(6) /* Management Queue DMA OK */
+#define IMR0_BKDOK BIT(5) /* AC_BK DMA OK */
+#define IMR0_BEDOK BIT(4) /* AC_BE DMA OK */
+#define IMR0_VIDOK BIT(3) /* AC_VI DMA OK */
+#define IMR0_VODOK BIT(2) /* AC_VO DMA OK */
+#define IMR0_RDU BIT(1) /* Rx Descriptor Unavailable */
+#define IMR0_ROK BIT(0) /* Receive DMA OK */
#define REG_HISR0 0x00b4
#define REG_HIMR1 0x00b8
+#define IMR1_BCNDMAINT7 BIT(27) /* Beacon DMA Interrupt 7 */
+#define IMR1_BCNDMAINT6 BIT(26) /* Beacon DMA Interrupt 6 */
+#define IMR1_BCNDMAINT5 BIT(25) /* Beacon DMA Interrupt 5 */
+#define IMR1_BCNDMAINT4 BIT(24) /* Beacon DMA Interrupt 4 */
+#define IMR1_BCNDMAINT3 BIT(23) /* Beacon DMA Interrupt 3 */
+#define IMR1_BCNDMAINT2 BIT(22) /* Beacon DMA Interrupt 2 */
+#define IMR1_BCNDMAINT1 BIT(21) /* Beacon DMA Interrupt 1 */
+#define IMR1_BCNDERR7 BIT(20) /* Beacon Queue DMA Err Int 7 */
+#define IMR1_BCNDERR6 BIT(19) /* Beacon Queue DMA Err Int 6 */
+#define IMR1_BCNDERR5 BIT(18) /* Beacon Queue DMA Err Int 5 */
+#define IMR1_BCNDERR4 BIT(17) /* Beacon Queue DMA Err Int 4 */
+#define IMR1_BCNDERR3 BIT(16) /* Beacon Queue DMA Err Int 3 */
+#define IMR1_BCNDERR2 BIT(15) /* Beacon Queue DMA Err Int 2 */
+#define IMR1_BCNDERR1 BIT(14) /* Beacon Queue DMA Err Int 1 */
+#define IMR1_ATIMEND_E BIT(13) /* ATIM Window End Extension
+ for Win7 */
+#define IMR1_TXERR BIT(11) /* Tx Error Flag Int Status,
+ write 1 to clear */
+#define IMR1_RXERR BIT(10) /* Rx Error Flag Int Status,
+ write 1 to clear */
+#define IMR1_TXFOVW BIT(9) /* Transmit FIFO Overflow */
+#define IMR1_RXFOVW BIT(8) /* Receive FIFO Overflow */
#define REG_HISR1 0x00bc
/* Host suspend counter on FPGA platform */
@@ -780,6 +836,10 @@
#define FPGA_RF_MODE_OFDM BIT(25)
#define REG_FPGA0_TX_INFO 0x0804
+#define FPGA0_TX_INFO_OFDM_PATH_A BIT(0)
+#define FPGA0_TX_INFO_OFDM_PATH_B BIT(1)
+#define FPGA0_TX_INFO_OFDM_PATH_C BIT(2)
+#define FPGA0_TX_INFO_OFDM_PATH_D BIT(3)
#define REG_FPGA0_PSD_FUNC 0x0808
#define REG_FPGA0_TX_GAIN 0x080c
#define REG_FPGA0_RF_TIMING1 0x0810
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 41f77f8a309e..7aee5ebb147d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -1135,7 +1135,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
mac->mode = WIRELESS_MODE_AC_24G;
}
- if (vif->type == NL80211_IFTYPE_STATION && sta)
+ if (vif->type == NL80211_IFTYPE_STATION)
rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0);
rcu_read_unlock();
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index 3524441fd516..6ee6bf8e7eaf 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -345,9 +345,9 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
return &rtl_regdom_no_midband;
case COUNTRY_CODE_IC:
return &rtl_regdom_11;
- case COUNTRY_CODE_ETSI:
case COUNTRY_CODE_TELEC_NETGEAR:
return &rtl_regdom_60_64;
+ case COUNTRY_CODE_ETSI:
case COUNTRY_CODE_SPAIN:
case COUNTRY_CODE_FRANCE:
case COUNTRY_CODE_ISRAEL:
@@ -406,6 +406,8 @@ static u8 channel_plan_to_country_code(u8 channelplan)
return COUNTRY_CODE_WORLD_WIDE_13;
case 0x22:
return COUNTRY_CODE_IC;
+ case 0x25:
+ return COUNTRY_CODE_ETSI;
case 0x32:
return COUNTRY_CODE_TELEC_NETGEAR;
case 0x41:
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index 47e32cb0ec1a..e7b11b40e68d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -280,7 +280,7 @@ static struct rtl_mod_params rtl88ee_mod_params = {
.debug = DBG_EMERG,
};
-static struct rtl_hal_cfg rtl88ee_hal_cfg = {
+static const struct rtl_hal_cfg rtl88ee_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl88e_pci",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index 4780bdc63b2b..87aa209ae325 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -258,7 +258,7 @@ static struct rtl_mod_params rtl92ce_mod_params = {
.debug = DBG_EMERG,
};
-static struct rtl_hal_cfg rtl92ce_hal_cfg = {
+static const struct rtl_hal_cfg rtl92ce_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl92c_pci",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index b0f632462335..57205514801c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -1757,7 +1757,7 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
return;
if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params))
- return;
+ goto exit;
_rtl92de_efuse_update_chip_version(hw);
_rtl92de_read_macphymode_and_bandtype(hw, hwinfo);
@@ -1790,6 +1790,7 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
break;
}
rtlefuse->txpwr_fromeprom = true;
+exit:
kfree(hwinfo);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index d334d2a5ea63..2a4810d32182 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -588,7 +588,7 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
* setting. */
udelay(1);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The Rtl819XAGCTAB_Array_Table[0] is %ul Rtl819XPHY_REGArray[1] is %ul\n",
+ "The Rtl819XAGCTAB_Array_Table[0] is %u Rtl819XPHY_REGArray[1] is %u\n",
agctab_array_table[i],
agctab_array_table[i + 1]);
}
@@ -604,7 +604,7 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
* setting. */
udelay(1);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The Rtl819XAGCTAB_Array_Table[0] is %ul Rtl819XPHY_REGArray[1] is %ul\n",
+ "The Rtl819XAGCTAB_Array_Table[0] is %u Rtl819XPHY_REGArray[1] is %u\n",
agctab_array_table[i],
agctab_array_table[i + 1]);
}
@@ -620,7 +620,7 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
* setting. */
udelay(1);
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The Rtl819XAGCTAB_5GArray_Table[0] is %ul Rtl819XPHY_REGArray[1] is %ul\n",
+ "The Rtl819XAGCTAB_5GArray_Table[0] is %u Rtl819XPHY_REGArray[1] is %u\n",
agctab_5garray_table[i],
agctab_5garray_table[i + 1]);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index c6e09a19de1a..0538a4d09568 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -262,7 +262,7 @@ static struct rtl_mod_params rtl92de_mod_params = {
.debug = DBG_EMERG,
};
-static struct rtl_hal_cfg rtl92de_hal_cfg = {
+static const struct rtl_hal_cfg rtl92de_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8192de",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
index c31c6bfb536d..ac299cbe59b0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
@@ -262,7 +262,7 @@ static struct rtl_mod_params rtl92ee_mod_params = {
.debug = DBG_EMERG,
};
-static struct rtl_hal_cfg rtl92ee_hal_cfg = {
+static const struct rtl_hal_cfg rtl92ee_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl92ee_pci",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index 31baca41ac2f..5e8e02d5de8a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -306,7 +306,7 @@ static struct rtl_mod_params rtl92se_mod_params = {
/* Because memory R/W bursting will cause system hang/crash
* for 92se, so we don't read back after every write action */
-static struct rtl_hal_cfg rtl92se_hal_cfg = {
+static const struct rtl_hal_cfg rtl92se_hal_cfg = {
.bar_id = 1,
.write_readback = false,
.name = "rtl92s_pci",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index b88c7ee72dbf..ba30efc2d195 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -1654,7 +1654,7 @@ static void _rtl8723e_read_adapter_info(struct ieee80211_hw *hw,
rtlefuse->autoload_failflag, hwinfo);
if (rtlhal->oem_id != RT_CID_DEFAULT)
- return;
+ goto exit;
switch (rtlefuse->eeprom_oemid) {
case EEPROM_CID_DEFAULT:
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index ff49a8c0ff61..89c828ad89f4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -276,7 +276,7 @@ static struct rtl_mod_params rtl8723e_mod_params = {
.disable_watchdog = false,
};
-static struct rtl_hal_cfg rtl8723e_hal_cfg = {
+static const struct rtl_hal_cfg rtl8723e_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8723e_pci",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 2101793438ed..20b53f035483 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -276,7 +276,7 @@ static struct rtl_mod_params rtl8723be_mod_params = {
.ant_sel = 0,
};
-static struct rtl_hal_cfg rtl8723be_hal_cfg = {
+static const struct rtl_hal_cfg rtl8723be_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8723be_pci",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 4159f9b14db6..22f687b1f133 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -316,7 +316,7 @@ static struct rtl_mod_params rtl8821ae_mod_params = {
.disable_watchdog = 0,
};
-static struct rtl_hal_cfg rtl8821ae_hal_cfg = {
+static const struct rtl_hal_cfg rtl8821ae_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8821ae_pci",
diff --git a/drivers/net/wireless/ti/wl18xx/acx.c b/drivers/net/wireless/ti/wl18xx/acx.c
index 4be0409308cb..b5525a38264b 100644
--- a/drivers/net/wireless/ti/wl18xx/acx.c
+++ b/drivers/net/wireless/ti/wl18xx/acx.c
@@ -309,3 +309,32 @@ out:
kfree(acx);
return ret;
}
+
+int wl18xx_acx_time_sync_cfg(struct wl1271 *wl)
+{
+ struct acx_time_sync_cfg *acx;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx time sync cfg: mode %d, addr: %pM",
+ wl->conf.sg.params[WL18XX_CONF_SG_TIME_SYNC],
+ wl->zone_master_mac_addr);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->sync_mode = wl->conf.sg.params[WL18XX_CONF_SG_TIME_SYNC];
+ memcpy(acx->zone_mac_addr, wl->zone_master_mac_addr, ETH_ALEN);
+
+ ret = wl1271_cmd_configure(wl, ACX_TIME_SYNC_CFG,
+ acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx time sync cfg failed: %d", ret);
+ goto out;
+ }
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/acx.h b/drivers/net/wireless/ti/wl18xx/acx.h
index 342a2993ef98..2edbbbfd8421 100644
--- a/drivers/net/wireless/ti/wl18xx/acx.h
+++ b/drivers/net/wireless/ti/wl18xx/acx.h
@@ -37,6 +37,7 @@ enum {
ACX_RX_BA_FILTER = 0x0058,
ACX_AP_SLEEP_CFG = 0x0059,
ACX_DYNAMIC_TRACES_CFG = 0x005A,
+ ACX_TIME_SYNC_CFG = 0x005B,
};
/* numbers of bits the length field takes (add 1 for the actual number) */
@@ -388,6 +389,17 @@ struct acx_dynamic_fw_traces_cfg {
__le32 dynamic_fw_traces;
} __packed;
+/*
+ * ACX_TIME_SYNC_CFG
+ * configure the time sync parameters
+ */
+struct acx_time_sync_cfg {
+ struct acx_header header;
+ u8 sync_mode;
+ u8 zone_mac_addr[ETH_ALEN];
+ u8 padding[1];
+} __packed;
+
int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
u32 sdio_blk_size, u32 extra_mem_blks,
u32 len_field_size);
@@ -402,5 +414,6 @@ int wl18xx_acx_interrupt_notify_config(struct wl1271 *wl, bool action);
int wl18xx_acx_rx_ba_filter(struct wl1271 *wl, bool action);
int wl18xx_acx_ap_sleep(struct wl1271 *wl);
int wl18xx_acx_dynamic_fw_traces(struct wl1271 *wl);
+int wl18xx_acx_time_sync_cfg(struct wl1271 *wl);
#endif /* __WL18XX_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index 2c5df43b8ed9..b36ce185c9f2 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -22,6 +22,7 @@
#include <net/genetlink.h>
#include "event.h"
#include "scan.h"
+#include "conf.h"
#include "../wlcore/cmd.h"
#include "../wlcore/debug.h"
#include "../wlcore/vendor_cmd.h"
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 9e1f2d9c9865..ef6c15b952cc 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -4986,7 +4986,6 @@ static int wl12xx_sta_add(struct wl1271 *wl,
return ret;
wl_sta = (struct wl1271_station *)sta->drv_priv;
- wl_sta->wl = wl;
hlid = wl_sta->hlid;
ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 6d24040889b8..0ed526e4c0df 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -84,7 +84,7 @@ struct wilink_familiy_data {
char name[8];
};
-const struct wilink_familiy_data *wilink_data;
+static const struct wilink_familiy_data *wilink_data;
static const struct wilink_familiy_data wl18xx_data = {
.name = "wl18xx",
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 8f28aa02230c..1827546ba807 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -501,6 +501,9 @@ struct wl1271 {
/* dynamic fw traces */
u32 dynamic_fw_traces;
+
+ /* time sync zone master */
+ u8 zone_master_mac_addr[ETH_ALEN];
};
int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 242b4e37b94c..0277ae508b8a 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -347,7 +347,6 @@ struct wl1271_station {
* Used in both AP and STA mode.
*/
u64 total_freed_pkts;
- struct wl1271 *wl;
};
struct wl12xx_vif {
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 82d94f83b6b4..932f3f81e8cf 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1258,7 +1258,9 @@ static int wl3501_reset(struct net_device *dev)
{
struct wl3501_card *this = netdev_priv(dev);
int rc = -ENODEV;
+ unsigned long flags;
+ spin_lock_irqsave(&this->lock, flags);
wl3501_block_interrupt(this);
if (wl3501_init_firmware(this)) {
@@ -1280,20 +1282,17 @@ static int wl3501_reset(struct net_device *dev)
pr_debug("%s: device reset", dev->name);
rc = 0;
out:
+ spin_unlock_irqrestore(&this->lock, flags);
return rc;
}
static void wl3501_tx_timeout(struct net_device *dev)
{
- struct wl3501_card *this = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
- unsigned long flags;
int rc;
stats->tx_errors++;
- spin_lock_irqsave(&this->lock, flags);
rc = wl3501_reset(dev);
- spin_unlock_irqrestore(&this->lock, flags);
if (rc)
printk(KERN_ERR "%s: Error %d resetting card on Tx timeout!\n",
dev->name, rc);
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index a912dc051111..c5effd6c6be9 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -193,7 +193,7 @@ static int upload_code(struct usb_device *udev,
0, 0, p, sizeof(ret), 5000 /* ms */);
if (r != sizeof(ret)) {
dev_err(&udev->dev,
- "control request firmeware confirmation failed."
+ "control request firmware confirmation failed."
" Return value %d\n", r);
if (r >= 0)
r = -ENODEV;
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 3a562683603c..b38fb2cf3364 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -292,8 +292,6 @@ struct xenvif {
#endif
struct xen_netif_ctrl_back_ring ctrl;
- struct task_struct *ctrl_task;
- wait_queue_head_t ctrl_wq;
unsigned int ctrl_irq;
/* Miscellaneous private stuff. */
@@ -359,7 +357,7 @@ void xenvif_kick_thread(struct xenvif_queue *queue);
int xenvif_dealloc_kthread(void *data);
-int xenvif_ctrl_kthread(void *data);
+irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
@@ -412,8 +410,4 @@ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb);
-#ifdef CONFIG_DEBUG_FS
-void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m);
-#endif
-
#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index e8c5dddc54ba..613bac057650 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -360,74 +360,6 @@ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
-#ifdef CONFIG_DEBUG_FS
-void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
-{
- unsigned int i;
-
- switch (vif->hash.alg) {
- case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
- seq_puts(m, "Hash Algorithm: TOEPLITZ\n");
- break;
-
- case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
- seq_puts(m, "Hash Algorithm: NONE\n");
- /* FALLTHRU */
- default:
- return;
- }
-
- if (vif->hash.flags) {
- seq_puts(m, "\nHash Flags:\n");
-
- if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4)
- seq_puts(m, "- IPv4\n");
- if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)
- seq_puts(m, "- IPv4 + TCP\n");
- if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6)
- seq_puts(m, "- IPv6\n");
- if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)
- seq_puts(m, "- IPv6 + TCP\n");
- }
-
- seq_puts(m, "\nHash Key:\n");
-
- for (i = 0; i < XEN_NETBK_MAX_HASH_KEY_SIZE; ) {
- unsigned int j, n;
-
- n = 8;
- if (i + n >= XEN_NETBK_MAX_HASH_KEY_SIZE)
- n = XEN_NETBK_MAX_HASH_KEY_SIZE - i;
-
- seq_printf(m, "[%2u - %2u]: ", i, i + n - 1);
-
- for (j = 0; j < n; j++, i++)
- seq_printf(m, "%02x ", vif->hash.key[i]);
-
- seq_puts(m, "\n");
- }
-
- if (vif->hash.size != 0) {
- seq_puts(m, "\nHash Mapping:\n");
-
- for (i = 0; i < vif->hash.size; ) {
- unsigned int j, n;
-
- n = 8;
- if (i + n >= vif->hash.size)
- n = vif->hash.size - i;
-
- seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
-
- for (j = 0; j < n; j++, i++)
- seq_printf(m, "%4u ", vif->hash.mapping[i]);
-
- seq_puts(m, "\n");
- }
- }
-}
-#endif /* CONFIG_DEBUG_FS */
-
void xenvif_init_hash(struct xenvif *vif)
{
if (xenvif_hash_cache_size == 0)
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 83deeebfc2d1..fb50c6d5f6c3 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -128,15 +128,6 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-irqreturn_t xenvif_ctrl_interrupt(int irq, void *dev_id)
-{
- struct xenvif *vif = dev_id;
-
- wake_up(&vif->ctrl_wq);
-
- return IRQ_HANDLED;
-}
-
int xenvif_queue_stopped(struct xenvif_queue *queue)
{
struct net_device *dev = queue->vif->dev;
@@ -570,8 +561,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
struct net_device *dev = vif->dev;
void *addr;
struct xen_netif_ctrl_sring *shared;
- struct task_struct *task;
- int err = -ENOMEM;
+ int err;
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
&ring_ref, 1, &addr);
@@ -581,11 +571,7 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
shared = (struct xen_netif_ctrl_sring *)addr;
BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
- init_waitqueue_head(&vif->ctrl_wq);
-
- err = bind_interdomain_evtchn_to_irqhandler(vif->domid, evtchn,
- xenvif_ctrl_interrupt,
- 0, dev->name, vif);
+ err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn);
if (err < 0)
goto err_unmap;
@@ -593,19 +579,13 @@ int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
xenvif_init_hash(vif);
- task = kthread_create(xenvif_ctrl_kthread, (void *)vif,
- "%s-control", dev->name);
- if (IS_ERR(task)) {
- pr_warn("Could not allocate kthread for %s\n", dev->name);
- err = PTR_ERR(task);
+ err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn,
+ IRQF_ONESHOT, "xen-netback-ctrl", vif);
+ if (err) {
+ pr_warn("Could not setup irq handler for %s\n", dev->name);
goto err_deinit;
}
- get_task_struct(task);
- vif->ctrl_task = task;
-
- wake_up_process(vif->ctrl_task);
-
return 0;
err_deinit:
@@ -774,12 +754,6 @@ void xenvif_disconnect_data(struct xenvif *vif)
void xenvif_disconnect_ctrl(struct xenvif *vif)
{
- if (vif->ctrl_task) {
- kthread_stop(vif->ctrl_task);
- put_task_struct(vif->ctrl_task);
- vif->ctrl_task = NULL;
- }
-
if (vif->ctrl_irq) {
xenvif_deinit_hash(vif);
unbind_from_irqhandler(vif->ctrl_irq, vif);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index edbae0b1e8f0..3d0c989384b5 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -2359,24 +2359,14 @@ static bool xenvif_ctrl_work_todo(struct xenvif *vif)
return 0;
}
-int xenvif_ctrl_kthread(void *data)
+irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
{
struct xenvif *vif = data;
- for (;;) {
- wait_event_interruptible(vif->ctrl_wq,
- xenvif_ctrl_work_todo(vif) ||
- kthread_should_stop());
- if (kthread_should_stop())
- break;
-
- while (xenvif_ctrl_work_todo(vif))
- xenvif_ctrl_action(vif);
+ while (xenvif_ctrl_work_todo(vif))
+ xenvif_ctrl_action(vif);
- cond_resched();
- }
-
- return 0;
+ return IRQ_HANDLED;
}
static int __init netback_init(void)
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index bacf6e0c12b9..daf4c7867102 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -165,7 +165,7 @@ xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count,
return count;
}
-static int xenvif_io_ring_open(struct inode *inode, struct file *filp)
+static int xenvif_dump_open(struct inode *inode, struct file *filp)
{
int ret;
void *queue = NULL;
@@ -179,35 +179,13 @@ static int xenvif_io_ring_open(struct inode *inode, struct file *filp)
static const struct file_operations xenvif_dbg_io_ring_ops_fops = {
.owner = THIS_MODULE,
- .open = xenvif_io_ring_open,
+ .open = xenvif_dump_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = xenvif_write_io_ring,
};
-static int xenvif_read_ctrl(struct seq_file *m, void *v)
-{
- struct xenvif *vif = m->private;
-
- xenvif_dump_hash_info(vif, m);
-
- return 0;
-}
-
-static int xenvif_ctrl_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, xenvif_read_ctrl, inode->i_private);
-}
-
-static const struct file_operations xenvif_dbg_ctrl_ops_fops = {
- .owner = THIS_MODULE,
- .open = xenvif_ctrl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static void xenvif_debugfs_addif(struct xenvif *vif)
{
struct dentry *pfile;
@@ -232,17 +210,6 @@ static void xenvif_debugfs_addif(struct xenvif *vif)
pr_warn("Creation of io_ring file returned %ld!\n",
PTR_ERR(pfile));
}
-
- if (vif->ctrl_task) {
- pfile = debugfs_create_file("ctrl",
- S_IRUSR,
- vif->xenvif_dbg_root,
- vif,
- &xenvif_dbg_ctrl_ops_fops);
- if (IS_ERR_OR_NULL(pfile))
- pr_warn("Creation of ctrl file returned %ld!\n",
- PTR_ERR(pfile));
- }
} else
netdev_warn(vif->dev,
"Creation of vif debugfs dir returned %ld!\n",
@@ -304,6 +271,11 @@ static int netback_probe(struct xenbus_device *dev,
be->dev = dev;
dev_set_drvdata(&dev->dev, be);
+ be->state = XenbusStateInitialising;
+ err = xenbus_switch_state(dev, XenbusStateInitialising);
+ if (err)
+ goto fail;
+
sg = 1;
do {
@@ -416,11 +388,6 @@ static int netback_probe(struct xenbus_device *dev,
be->hotplug_script = script;
- err = xenbus_switch_state(dev, XenbusStateInitWait);
- if (err)
- goto fail;
-
- be->state = XenbusStateInitWait;
/* This kicks hotplug scripts, so do it immediately. */
err = backend_create_xenvif(be);
@@ -525,20 +492,20 @@ static inline void backend_switch_state(struct backend_info *be,
/* Handle backend state transitions:
*
- * The backend state starts in InitWait and the following transitions are
+ * The backend state starts in Initialising and the following transitions are
* allowed.
*
- * InitWait -> Connected
- *
- * ^ \ |
- * | \ |
- * | \ |
- * | \ |
- * | \ |
- * | \ |
- * | V V
+ * Initialising -> InitWait -> Connected
+ * \
+ * \ ^ \ |
+ * \ | \ |
+ * \ | \ |
+ * \ | \ |
+ * \ | \ |
+ * \ | \ |
+ * V | V V
*
- * Closed <-> Closing
+ * Closed <-> Closing
*
* The state argument specifies the eventual state of the backend and the
* function transitions to that state via the shortest path.
@@ -548,6 +515,20 @@ static void set_backend_state(struct backend_info *be,
{
while (be->state != state) {
switch (be->state) {
+ case XenbusStateInitialising:
+ switch (state) {
+ case XenbusStateInitWait:
+ case XenbusStateConnected:
+ case XenbusStateClosing:
+ backend_switch_state(be, XenbusStateInitWait);
+ break;
+ case XenbusStateClosed:
+ backend_switch_state(be, XenbusStateClosed);
+ break;
+ default:
+ BUG();
+ }
+ break;
case XenbusStateClosed:
switch (state) {
case XenbusStateInitWait:
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 96ccd4e943db..e17879dd5d5a 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -565,6 +565,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct netfront_queue *queue = NULL;
unsigned int num_queues = dev->real_num_tx_queues;
u16 queue_index;
+ struct sk_buff *nskb;
/* Drop the packet if no queues are set up */
if (num_queues < 1)
@@ -593,6 +594,20 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
+
+ /* The first req should be at least ETH_HLEN size or the packet will be
+ * dropped by netback.
+ */
+ if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
+ nskb = skb_copy(skb, GFP_ATOMIC);
+ if (!nskb)
+ goto drop;
+ dev_kfree_skb_any(skb);
+ skb = nskb;
+ page = virt_to_page(skb->data);
+ offset = offset_in_page(skb->data);
+ }
+
len = skb_headlen(skb);
spin_lock_irqsave(&queue->tx_lock, flags);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 458daf927336..935866fe5ec2 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -185,8 +185,12 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
return -ENXIO;
nd_desc = nvdimm_bus->nd_desc;
+ /*
+ * if ndctl does not exist, it's PMEM_LEGACY and
+ * we want to just pretend everything is handled.
+ */
if (!nd_desc->ndctl)
- return -ENXIO;
+ return len;
memset(&ars_cap, 0, sizeof(ars_cap));
ars_cap.address = phys;
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index db39d53cdfb9..f7d37a62f874 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -30,8 +30,8 @@ config NVME_FABRICS
config NVME_RDMA
tristate "NVM Express over Fabrics RDMA host driver"
- depends on INFINIBAND
- depends on BLK_DEV_NVME
+ depends on INFINIBAND && BLOCK
+ select NVME_CORE
select NVME_FABRICS
select SG_POOL
help
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index dc996761042f..4eff49174466 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -47,8 +47,10 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)
mutex_lock(&nvmf_hosts_mutex);
host = __nvmf_host_find(hostnqn);
- if (host)
+ if (host) {
+ kref_get(&host->ref);
goto out_unlock;
+ }
host = kmalloc(sizeof(*host), GFP_KERNEL);
if (!host)
@@ -56,7 +58,7 @@ static struct nvmf_host *nvmf_host_add(const char *hostnqn)
kref_init(&host->ref);
memcpy(host->nqn, hostnqn, NVMF_NQN_SIZE);
- uuid_le_gen(&host->id);
+ uuid_be_gen(&host->id);
list_add_tail(&host->list, &nvmf_hosts);
out_unlock:
@@ -73,9 +75,9 @@ static struct nvmf_host *nvmf_host_default(void)
return NULL;
kref_init(&host->ref);
- uuid_le_gen(&host->id);
+ uuid_be_gen(&host->id);
snprintf(host->nqn, NVMF_NQN_SIZE,
- "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUl", &host->id);
+ "nqn.2014-08.org.nvmexpress:NVMf:uuid:%pUb", &host->id);
mutex_lock(&nvmf_hosts_mutex);
list_add_tail(&host->list, &nvmf_hosts);
@@ -363,7 +365,14 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
cmd.connect.opcode = nvme_fabrics_command;
cmd.connect.fctype = nvme_fabrics_type_connect;
cmd.connect.qid = 0;
- cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize);
+
+ /*
+ * fabrics spec sets a minimum of depth 32 for admin queue,
+ * so set the queue with this depth always until
+ * justification otherwise.
+ */
+ cmd.connect.sqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
+
/*
* Set keep-alive timeout in seconds granularity (ms * 1000)
* and add a grace period for controller kato enforcement
@@ -375,7 +384,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
if (!data)
return -ENOMEM;
- memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_le));
+ memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
data->cntlid = cpu_to_le16(0xffff);
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
@@ -434,7 +443,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
if (!data)
return -ENOMEM;
- memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_le));
+ memcpy(&data->hostid, &ctrl->opts->host->id, sizeof(uuid_be));
data->cntlid = cpu_to_le16(ctrl->cntlid);
strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 89df52c8be97..46e460aee52d 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -34,7 +34,7 @@ struct nvmf_host {
struct kref ref;
struct list_head list;
char nqn[NVMF_NQN_SIZE];
- uuid_le id;
+ uuid_be id;
};
/**
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 8dcf5a960951..60f7eab11865 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1693,7 +1693,12 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_suspend_queue(dev->queues[i]);
if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
- nvme_suspend_queue(dev->queues[0]);
+ /* A device might become IO incapable very soon during
+ * probe, before the admin queue is configured. Thus,
+ * queue_count can be 0 here.
+ */
+ if (dev->queue_count)
+ nvme_suspend_queue(dev->queues[0]);
} else {
nvme_disable_io_queues(dev);
nvme_disable_admin_queue(dev, shutdown);
@@ -2112,6 +2117,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
{ PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+ { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
{ 0, }
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 8d2875b4c56d..c2c2c28e6eb5 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -43,10 +43,6 @@
#define NVME_RDMA_MAX_INLINE_SEGMENTS 1
-#define NVME_RDMA_MAX_PAGES_PER_MR 512
-
-#define NVME_RDMA_DEF_RECONNECT_DELAY 20
-
/*
* We handle AEN commands ourselves and don't even let the
* block layer know about them.
@@ -77,7 +73,6 @@ struct nvme_rdma_request {
u32 num_sge;
int nents;
bool inline_data;
- bool need_inval;
struct ib_reg_wr reg_wr;
struct ib_cqe reg_cqe;
struct nvme_rdma_queue *queue;
@@ -87,6 +82,8 @@ struct nvme_rdma_request {
enum nvme_rdma_queue_flags {
NVME_RDMA_Q_CONNECTED = (1 << 0),
+ NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1),
+ NVME_RDMA_Q_DELETING = (1 << 2),
};
struct nvme_rdma_queue {
@@ -286,7 +283,7 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
int ret = 0;
- if (!req->need_inval)
+ if (!req->mr->need_inval)
goto out;
ib_dereg_mr(req->mr);
@@ -296,9 +293,10 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq)
if (IS_ERR(req->mr)) {
ret = PTR_ERR(req->mr);
req->mr = NULL;
+ goto out;
}
- req->need_inval = false;
+ req->mr->need_inval = false;
out:
return ret;
@@ -485,9 +483,14 @@ out_err:
static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
{
- struct nvme_rdma_device *dev = queue->device;
- struct ib_device *ibdev = dev->dev;
+ struct nvme_rdma_device *dev;
+ struct ib_device *ibdev;
+
+ if (!test_and_clear_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags))
+ return;
+ dev = queue->device;
+ ibdev = dev->dev;
rdma_destroy_qp(queue->cm_id);
ib_free_cq(queue->ib_cq);
@@ -538,6 +541,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
ret = -ENOMEM;
goto out_destroy_qp;
}
+ set_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags);
return 0;
@@ -557,6 +561,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
queue = &ctrl->queues[idx];
queue->ctrl = ctrl;
+ queue->flags = 0;
init_completion(&queue->cm_done);
if (idx > 0)
@@ -595,6 +600,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
return 0;
out_destroy_cm_id:
+ nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id);
return ret;
}
@@ -613,7 +619,7 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
static void nvme_rdma_stop_and_free_queue(struct nvme_rdma_queue *queue)
{
- if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags))
+ if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags))
return;
nvme_rdma_stop_queue(queue);
nvme_rdma_free_queue(queue);
@@ -645,7 +651,8 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
int i, ret;
for (i = 1; i < ctrl->queue_count; i++) {
- ret = nvme_rdma_init_queue(ctrl, i, ctrl->ctrl.sqsize);
+ ret = nvme_rdma_init_queue(ctrl, i,
+ ctrl->ctrl.opts->queue_size);
if (ret) {
dev_info(ctrl->ctrl.device,
"failed to initialize i/o queue: %d\n", ret);
@@ -656,7 +663,7 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
return 0;
out_free_queues:
- for (; i >= 1; i--)
+ for (i--; i >= 1; i--)
nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
return ret;
@@ -765,8 +772,13 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
{
struct nvme_rdma_ctrl *ctrl = container_of(work,
struct nvme_rdma_ctrl, err_work);
+ int i;
nvme_stop_keep_alive(&ctrl->ctrl);
+
+ for (i = 0; i < ctrl->queue_count; i++)
+ clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags);
+
if (ctrl->queue_count > 1)
nvme_stop_queues(&ctrl->ctrl);
blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
@@ -849,7 +861,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
if (!blk_rq_bytes(rq))
return;
- if (req->need_inval) {
+ if (req->mr->need_inval) {
res = nvme_rdma_inv_rkey(queue, req);
if (res < 0) {
dev_err(ctrl->ctrl.device,
@@ -935,7 +947,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE;
- req->need_inval = true;
+ req->mr->need_inval = true;
sg->addr = cpu_to_le64(req->mr->iova);
put_unaligned_le24(req->mr->length, sg->length);
@@ -958,7 +970,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
req->num_sge = 1;
req->inline_data = false;
- req->need_inval = false;
+ req->mr->need_inval = false;
c->common.flags |= NVME_CMD_SGL_METABUF;
@@ -1145,7 +1157,7 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
wc->ex.invalidate_rkey == req->mr->rkey)
- req->need_inval = false;
+ req->mr->need_inval = false;
blk_mq_complete_request(rq, status);
@@ -1278,8 +1290,22 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
- priv.hrqsize = cpu_to_le16(queue->queue_size);
- priv.hsqsize = cpu_to_le16(queue->queue_size);
+ /*
+ * set the admin queue depth to the minimum size
+ * specified by the Fabrics standard.
+ */
+ if (priv.qid == 0) {
+ priv.hrqsize = cpu_to_le16(NVMF_AQ_DEPTH);
+ priv.hsqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1);
+ } else {
+ /*
+ * current interpretation of the fabrics spec
+ * is at minimum you make hrqsize sqsize+1, or a
+ * 1's based representation of sqsize.
+ */
+ priv.hrqsize = cpu_to_le16(queue->queue_size);
+ priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
+ }
ret = rdma_connect(queue->cm_id, &param);
if (ret) {
@@ -1295,58 +1321,6 @@ out_destroy_queue_ib:
return ret;
}
-/**
- * nvme_rdma_device_unplug() - Handle RDMA device unplug
- * @queue: Queue that owns the cm_id that caught the event
- *
- * DEVICE_REMOVAL event notifies us that the RDMA device is about
- * to unplug so we should take care of destroying our RDMA resources.
- * This event will be generated for each allocated cm_id.
- *
- * In our case, the RDMA resources are managed per controller and not
- * only per queue. So the way we handle this is we trigger an implicit
- * controller deletion upon the first DEVICE_REMOVAL event we see, and
- * hold the event inflight until the controller deletion is completed.
- *
- * One exception that we need to handle is the destruction of the cm_id
- * that caught the event. Since we hold the callout until the controller
- * deletion is completed, we'll deadlock if the controller deletion will
- * call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership
- * of destroying this queue before-hand, destroy the queue resources,
- * then queue the controller deletion which won't destroy this queue and
- * we destroy the cm_id implicitely by returning a non-zero rc to the callout.
- */
-static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue)
-{
- struct nvme_rdma_ctrl *ctrl = queue->ctrl;
- int ret;
-
- /* Own the controller deletion */
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
- return 0;
-
- dev_warn(ctrl->ctrl.device,
- "Got rdma device removal event, deleting ctrl\n");
-
- /* Get rid of reconnect work if its running */
- cancel_delayed_work_sync(&ctrl->reconnect_work);
-
- /* Disable the queue so ctrl delete won't free it */
- if (test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) {
- /* Free this queue ourselves */
- nvme_rdma_stop_queue(queue);
- nvme_rdma_destroy_queue_ib(queue);
-
- /* Return non-zero so the cm_id will destroy implicitly */
- ret = 1;
- }
-
- /* Queue controller deletion */
- queue_work(nvme_rdma_wq, &ctrl->delete_work);
- flush_work(&ctrl->delete_work);
- return ret;
-}
-
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *ev)
{
@@ -1388,8 +1362,8 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
nvme_rdma_error_recovery(queue->ctrl);
break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
- /* return 1 means impliciy CM ID destroy */
- return nvme_rdma_device_unplug(queue);
+ /* device removal is handled via the ib_client API */
+ break;
default:
dev_err(queue->ctrl->ctrl.device,
"Unexpected RDMA CM event (%d)\n", ev->event);
@@ -1461,7 +1435,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH)
flush = true;
ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
- req->need_inval ? &req->reg_wr.wr : NULL, flush);
+ req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
if (ret) {
nvme_rdma_unmap_data(queue, rq);
goto err;
@@ -1690,15 +1664,19 @@ static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl)
static int nvme_rdma_del_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
- int ret;
+ int ret = 0;
+ /*
+ * Keep a reference until all work is flushed since
+ * __nvme_rdma_del_ctrl can free the ctrl mem
+ */
+ if (!kref_get_unless_zero(&ctrl->ctrl.kref))
+ return -EBUSY;
ret = __nvme_rdma_del_ctrl(ctrl);
- if (ret)
- return ret;
-
- flush_work(&ctrl->delete_work);
-
- return 0;
+ if (!ret)
+ flush_work(&ctrl->delete_work);
+ nvme_put_ctrl(&ctrl->ctrl);
+ return ret;
}
static void nvme_rdma_remove_ctrl_work(struct work_struct *work)
@@ -1816,7 +1794,7 @@ static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl)
memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
ctrl->tag_set.ops = &nvme_rdma_mq_ops;
- ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize;
+ ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
ctrl->tag_set.reserved_tags = 1; /* fabric connect */
ctrl->tag_set.numa_node = NUMA_NO_NODE;
ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
@@ -1914,7 +1892,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
spin_lock_init(&ctrl->lock);
ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
- ctrl->ctrl.sqsize = opts->queue_size;
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
ctrl->ctrl.kato = opts->kato;
ret = -ENOMEM;
@@ -1995,27 +1973,57 @@ static struct nvmf_transport_ops nvme_rdma_transport = {
.create_ctrl = nvme_rdma_create_ctrl,
};
+static void nvme_rdma_add_one(struct ib_device *ib_device)
+{
+}
+
+static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
+{
+ struct nvme_rdma_ctrl *ctrl;
+
+ /* Delete all controllers using this device */
+ mutex_lock(&nvme_rdma_ctrl_mutex);
+ list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
+ if (ctrl->device->dev != ib_device)
+ continue;
+ dev_info(ctrl->ctrl.device,
+ "Removing ctrl: NQN \"%s\", addr %pISp\n",
+ ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
+ __nvme_rdma_del_ctrl(ctrl);
+ }
+ mutex_unlock(&nvme_rdma_ctrl_mutex);
+
+ flush_workqueue(nvme_rdma_wq);
+}
+
+static struct ib_client nvme_rdma_ib_client = {
+ .name = "nvme_rdma",
+ .add = nvme_rdma_add_one,
+ .remove = nvme_rdma_remove_one
+};
+
static int __init nvme_rdma_init_module(void)
{
+ int ret;
+
nvme_rdma_wq = create_workqueue("nvme_rdma_wq");
if (!nvme_rdma_wq)
return -ENOMEM;
+ ret = ib_register_client(&nvme_rdma_ib_client);
+ if (ret) {
+ destroy_workqueue(nvme_rdma_wq);
+ return ret;
+ }
+
nvmf_register_transport(&nvme_rdma_transport);
return 0;
}
static void __exit nvme_rdma_cleanup_module(void)
{
- struct nvme_rdma_ctrl *ctrl;
-
nvmf_unregister_transport(&nvme_rdma_transport);
-
- mutex_lock(&nvme_rdma_ctrl_mutex);
- list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list)
- __nvme_rdma_del_ctrl(ctrl);
- mutex_unlock(&nvme_rdma_ctrl_mutex);
-
+ ib_unregister_client(&nvme_rdma_ib_client);
destroy_workqueue(nvme_rdma_wq);
}
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index a5c31cbeb481..3a5b9d0576cb 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -15,8 +15,8 @@ config NVME_TARGET
config NVME_TARGET_LOOP
tristate "NVMe loopback device support"
- depends on BLK_DEV_NVME
depends on NVME_TARGET
+ select NVME_CORE
select NVME_FABRICS
select SG_POOL
help
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 7affd40a6b33..395e60dad835 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -556,7 +556,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
ctrl->tag_set.ops = &nvme_loop_mq_ops;
- ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize;
+ ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
ctrl->tag_set.reserved_tags = 1; /* fabric connect */
ctrl->tag_set.numa_node = NUMA_NO_NODE;
ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
@@ -620,7 +620,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
ret = -ENOMEM;
- ctrl->ctrl.sqsize = opts->queue_size;
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
ctrl->ctrl.kato = opts->kato;
ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index b4d648536c3e..1cbe6e053b5b 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -978,10 +978,11 @@ static void nvmet_rdma_release_queue_work(struct work_struct *w)
container_of(w, struct nvmet_rdma_queue, release_work);
struct rdma_cm_id *cm_id = queue->cm_id;
struct nvmet_rdma_device *dev = queue->dev;
+ enum nvmet_rdma_queue_state state = queue->state;
nvmet_rdma_free_queue(queue);
- if (queue->state != NVMET_RDMA_IN_DEVICE_REMOVAL)
+ if (state != NVMET_RDMA_IN_DEVICE_REMOVAL)
rdma_destroy_id(cm_id);
kref_put(&dev->ref, nvmet_rdma_free_dev);
@@ -1003,10 +1004,10 @@ nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
queue->host_qid = le16_to_cpu(req->qid);
/*
- * req->hsqsize corresponds to our recv queue size
+ * req->hsqsize corresponds to our recv queue size plus 1
* req->hrqsize corresponds to our send queue size
*/
- queue->recv_queue_size = le16_to_cpu(req->hsqsize);
+ queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
queue->send_queue_size = le16_to_cpu(req->hrqsize);
if (!queue->host_qid && queue->recv_queue_size > NVMF_AQ_DEPTH)
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 37ff0158e45f..44e0ff37480b 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3327,9 +3327,9 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
if (nhi->vendor != PCI_VENDOR_ID_INTEL
|| (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
+ nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI &&
nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
- || nhi->subsystem_vendor != 0x2222
- || nhi->subsystem_device != 0x1111)
+ || nhi->class != PCI_CLASS_SYSTEM_OTHER << 8)
goto out;
dev_info(&dev->dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n");
device_pm_wait_for_dev(&dev->dev, &nhi->dev);
@@ -3344,6 +3344,9 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
quirk_apple_wait_for_thunderbolt);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE,
+ quirk_apple_wait_for_thunderbolt);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE,
quirk_apple_wait_for_thunderbolt);
#endif
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index d1ef7acf6930..f9357e09e9b3 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -40,6 +40,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
list_del(&dev->bus_list);
up_write(&pci_bus_sem);
+ pci_bridge_d3_device_removed(dev);
pci_free_resources(dev);
put_device(&dev->dev);
}
@@ -96,8 +97,6 @@ static void pci_remove_bus_device(struct pci_dev *dev)
dev->subordinate = NULL;
}
- pci_bridge_d3_device_removed(dev);
-
pci_destroy_dev(dev);
}
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 489ea1098c96..69b5e811ea2b 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -977,7 +977,7 @@ static int pcmcia_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
/************************ runtime PM support ***************************/
-static int pcmcia_dev_suspend(struct device *dev, pm_message_t state);
+static int pcmcia_dev_suspend(struct device *dev);
static int pcmcia_dev_resume(struct device *dev);
static int runtime_suspend(struct device *dev)
@@ -985,7 +985,7 @@ static int runtime_suspend(struct device *dev)
int rc;
device_lock(dev);
- rc = pcmcia_dev_suspend(dev, PMSG_SUSPEND);
+ rc = pcmcia_dev_suspend(dev);
device_unlock(dev);
return rc;
}
@@ -1135,7 +1135,7 @@ ATTRIBUTE_GROUPS(pcmcia_dev);
/* PM support, also needed for reset */
-static int pcmcia_dev_suspend(struct device *dev, pm_message_t state)
+static int pcmcia_dev_suspend(struct device *dev)
{
struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
struct pcmcia_driver *p_drv = NULL;
@@ -1410,6 +1410,9 @@ static struct class_interface pcmcia_bus_interface __refdata = {
.remove_dev = &pcmcia_bus_remove_socket,
};
+static const struct dev_pm_ops pcmcia_bus_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pcmcia_dev_suspend, pcmcia_dev_resume)
+};
struct bus_type pcmcia_bus_type = {
.name = "pcmcia",
@@ -1418,8 +1421,7 @@ struct bus_type pcmcia_bus_type = {
.dev_groups = pcmcia_dev_groups,
.probe = pcmcia_device_probe,
.remove = pcmcia_device_remove,
- .suspend = pcmcia_dev_suspend,
- .resume = pcmcia_dev_resume,
+ .pm = &pcmcia_bus_pm_ops,
};
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 483f919e0d2e..91b5f5724cba 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -214,9 +214,8 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
}
#endif
-void pxa2xx_configure_sockets(struct device *dev)
+void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops)
{
- struct pcmcia_low_level *ops = dev->platform_data;
/*
* We have at least one socket, so set MECR:CIT
* (Card Is There)
@@ -322,7 +321,7 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
goto err1;
}
- pxa2xx_configure_sockets(&dev->dev);
+ pxa2xx_configure_sockets(&dev->dev, ops);
dev_set_drvdata(&dev->dev, sinfo);
return 0;
@@ -348,7 +347,9 @@ static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
static int pxa2xx_drv_pcmcia_resume(struct device *dev)
{
- pxa2xx_configure_sockets(dev);
+ struct pcmcia_low_level *ops = (struct pcmcia_low_level *)dev->platform_data;
+
+ pxa2xx_configure_sockets(dev, ops);
return 0;
}
diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h
index b609b45469ed..e58c7a415418 100644
--- a/drivers/pcmcia/pxa2xx_base.h
+++ b/drivers/pcmcia/pxa2xx_base.h
@@ -1,4 +1,4 @@
int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
-void pxa2xx_configure_sockets(struct device *dev);
+void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops);
diff --git a/drivers/pcmcia/sa1111_badge4.c b/drivers/pcmcia/sa1111_badge4.c
index 12f0dd091477..2f490930430d 100644
--- a/drivers/pcmcia/sa1111_badge4.c
+++ b/drivers/pcmcia/sa1111_badge4.c
@@ -134,20 +134,14 @@ static struct pcmcia_low_level badge4_pcmcia_ops = {
int pcmcia_badge4_init(struct sa1111_dev *dev)
{
- int ret = -ENODEV;
-
- if (machine_is_badge4()) {
- printk(KERN_INFO
- "%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n",
- __func__,
- badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc);
-
- sa11xx_drv_pcmcia_ops(&badge4_pcmcia_ops);
- ret = sa1111_pcmcia_add(dev, &badge4_pcmcia_ops,
- sa11xx_drv_pcmcia_add_one);
- }
-
- return ret;
+ printk(KERN_INFO
+ "%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n",
+ __func__,
+ badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc);
+
+ sa11xx_drv_pcmcia_ops(&badge4_pcmcia_ops);
+ return sa1111_pcmcia_add(dev, &badge4_pcmcia_ops,
+ sa11xx_drv_pcmcia_add_one);
}
static int __init pcmv_setup(char *s)
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index a1531feb8460..3d95dffcff7a 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -18,6 +18,7 @@
#include <mach/hardware.h>
#include <asm/hardware/sa1111.h>
+#include <asm/mach-types.h>
#include <asm/irq.h>
#include "sa1111_generic.h"
@@ -203,19 +204,30 @@ static int pcmcia_probe(struct sa1111_dev *dev)
sa1111_writel(PCSSR_S0_SLEEP | PCSSR_S1_SLEEP, base + PCSSR);
sa1111_writel(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR);
+ ret = -ENODEV;
#ifdef CONFIG_SA1100_BADGE4
- pcmcia_badge4_init(dev);
+ if (machine_is_badge4())
+ ret = pcmcia_badge4_init(dev);
#endif
#ifdef CONFIG_SA1100_JORNADA720
- pcmcia_jornada720_init(dev);
+ if (machine_is_jornada720())
+ ret = pcmcia_jornada720_init(dev);
#endif
#ifdef CONFIG_ARCH_LUBBOCK
- pcmcia_lubbock_init(dev);
+ if (machine_is_lubbock())
+ ret = pcmcia_lubbock_init(dev);
#endif
#ifdef CONFIG_ASSABET_NEPONSET
- pcmcia_neponset_init(dev);
+ if (machine_is_assabet())
+ ret = pcmcia_neponset_init(dev);
#endif
- return 0;
+
+ if (ret) {
+ release_mem_region(dev->res.start, 512);
+ sa1111_disable_device(dev);
+ }
+
+ return ret;
}
static int pcmcia_remove(struct sa1111_dev *dev)
diff --git a/drivers/pcmcia/sa1111_jornada720.c b/drivers/pcmcia/sa1111_jornada720.c
index c2c30580c83f..480a3ede27c8 100644
--- a/drivers/pcmcia/sa1111_jornada720.c
+++ b/drivers/pcmcia/sa1111_jornada720.c
@@ -94,22 +94,17 @@ static struct pcmcia_low_level jornada720_pcmcia_ops = {
int pcmcia_jornada720_init(struct sa1111_dev *sadev)
{
- int ret = -ENODEV;
+ unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
- if (machine_is_jornada720()) {
- unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
+ /* Fixme: why messing around with SA11x0's GPIO1? */
+ GRER |= 0x00000002;
- GRER |= 0x00000002;
+ /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
+ sa1111_set_io_dir(sadev, pin, 0, 0);
+ sa1111_set_io(sadev, pin, 0);
+ sa1111_set_sleep_io(sadev, pin, 0);
- /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */
- sa1111_set_io_dir(sadev, pin, 0, 0);
- sa1111_set_io(sadev, pin, 0);
- sa1111_set_sleep_io(sadev, pin, 0);
-
- sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops);
- ret = sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops,
- sa11xx_drv_pcmcia_add_one);
- }
-
- return ret;
+ sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops);
+ return sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops,
+ sa11xx_drv_pcmcia_add_one);
}
diff --git a/drivers/pcmcia/sa1111_lubbock.c b/drivers/pcmcia/sa1111_lubbock.c
index c5caf5790451..e741f499c875 100644
--- a/drivers/pcmcia/sa1111_lubbock.c
+++ b/drivers/pcmcia/sa1111_lubbock.c
@@ -210,27 +210,21 @@ static struct pcmcia_low_level lubbock_pcmcia_ops = {
int pcmcia_lubbock_init(struct sa1111_dev *sadev)
{
- int ret = -ENODEV;
-
- if (machine_is_lubbock()) {
- /*
- * Set GPIO_A<3:0> to be outputs for the MAX1600,
- * and switch to standby mode.
- */
- sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
- sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
- sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
-
- /* Set CF Socket 1 power to standby mode. */
- lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
+ /*
+ * Set GPIO_A<3:0> to be outputs for the MAX1600,
+ * and switch to standby mode.
+ */
+ sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
+ sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
+ sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
- pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
- pxa2xx_configure_sockets(&sadev->dev);
- ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
- pxa2xx_drv_pcmcia_add_one);
- }
+ /* Set CF Socket 1 power to standby mode. */
+ lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
- return ret;
+ pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
+ pxa2xx_configure_sockets(&sadev->dev, &lubbock_pcmcia_ops);
+ return sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
+ pxa2xx_drv_pcmcia_add_one);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/sa1111_neponset.c b/drivers/pcmcia/sa1111_neponset.c
index 1d78739c4c07..019c395eb4bf 100644
--- a/drivers/pcmcia/sa1111_neponset.c
+++ b/drivers/pcmcia/sa1111_neponset.c
@@ -110,20 +110,14 @@ static struct pcmcia_low_level neponset_pcmcia_ops = {
int pcmcia_neponset_init(struct sa1111_dev *sadev)
{
- int ret = -ENODEV;
-
- if (machine_is_assabet()) {
- /*
- * Set GPIO_A<3:0> to be outputs for the MAX1600,
- * and switch to standby mode.
- */
- sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
- sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
- sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
- sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops);
- ret = sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops,
- sa11xx_drv_pcmcia_add_one);
- }
-
- return ret;
+ /*
+ * Set GPIO_A<3:0> to be outputs for the MAX1600,
+ * and switch to standby mode.
+ */
+ sa1111_set_io_dir(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0, 0);
+ sa1111_set_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
+ sa1111_set_sleep_io(sadev, GPIO_A0|GPIO_A1|GPIO_A2|GPIO_A3, 0);
+ sa11xx_drv_pcmcia_ops(&neponset_pcmcia_ops);
+ return sa1111_pcmcia_add(sadev, &neponset_pcmcia_ops,
+ sa11xx_drv_pcmcia_add_one);
}
diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c
index 9f6ec87b9f9e..48140ac73ed6 100644
--- a/drivers/pcmcia/sa11xx_base.c
+++ b/drivers/pcmcia/sa11xx_base.c
@@ -144,19 +144,19 @@ static int
sa1100_pcmcia_show_timing(struct soc_pcmcia_socket *skt, char *buf)
{
struct soc_pcmcia_timing timing;
- unsigned int clock = clk_get_rate(skt->clk);
+ unsigned int clock = clk_get_rate(skt->clk) / 1000;
unsigned long mecr = MECR;
char *p = buf;
soc_common_pcmcia_get_timing(skt, &timing);
- p+=sprintf(p, "I/O : %u (%u)\n", timing.io,
+ p+=sprintf(p, "I/O : %uns (%uns)\n", timing.io,
sa1100_pcmcia_cmd_time(clock, MECR_BSIO_GET(mecr, skt->nr)));
- p+=sprintf(p, "attribute: %u (%u)\n", timing.attr,
+ p+=sprintf(p, "attribute: %uns (%uns)\n", timing.attr,
sa1100_pcmcia_cmd_time(clock, MECR_BSA_GET(mecr, skt->nr)));
- p+=sprintf(p, "common : %u (%u)\n", timing.mem,
+ p+=sprintf(p, "common : %uns (%uns)\n", timing.mem,
sa1100_pcmcia_cmd_time(clock, MECR_BSM_GET(mecr, skt->nr)));
return p - buf;
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index eed5e9c05353..d5ca760c4eb2 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -235,7 +235,7 @@ static unsigned int soc_common_pcmcia_skt_state(struct soc_pcmcia_socket *skt)
stat |= skt->cs_state.Vcc ? SS_POWERON : 0;
if (skt->cs_state.flags & SS_IOCARD)
- stat |= state.bvd1 ? SS_STSCHG : 0;
+ stat |= state.bvd1 ? 0 : SS_STSCHG;
else {
if (state.bvd1 == 0)
stat |= SS_BATDEAD;
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index c494613c1909..f5e1008a223d 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -925,6 +925,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
if (i > 0 && spi != using_spi) {
pr_err("PPI/SPI IRQ type mismatch for %s!\n",
dn->name);
+ of_node_put(dn);
kfree(irqs);
return -EINVAL;
}
@@ -969,7 +970,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
if (cpumask_weight(&pmu->supported_cpus) == 0) {
int irq = platform_get_irq(pdev, 0);
- if (irq_is_percpu(irq)) {
+ if (irq >= 0 && irq_is_percpu(irq)) {
/* If using PPIs, check the affinity of the partition */
int ret;
diff --git a/drivers/phy/phy-brcm-sata.c b/drivers/phy/phy-brcm-sata.c
index 18d662610075..8ffc44afdb75 100644
--- a/drivers/phy/phy-brcm-sata.c
+++ b/drivers/phy/phy-brcm-sata.c
@@ -367,7 +367,7 @@ static int brcm_sata_phy_init(struct phy *phy)
rc = -ENODEV;
};
- return 0;
+ return rc;
}
static const struct phy_ops phy_ops = {
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index 0a45bc6088ae..8c7eb335622e 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -40,6 +40,7 @@
#include <linux/power_supply.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
+#include <linux/usb/of.h>
#include <linux/workqueue.h>
#define REG_ISCR 0x00
@@ -110,6 +111,7 @@ struct sun4i_usb_phy_cfg {
struct sun4i_usb_phy_data {
void __iomem *base;
const struct sun4i_usb_phy_cfg *cfg;
+ enum usb_dr_mode dr_mode;
struct mutex mutex;
struct sun4i_usb_phy {
struct phy *phy;
@@ -120,6 +122,7 @@ struct sun4i_usb_phy_data {
bool regulator_on;
int index;
} phys[MAX_PHYS];
+ int first_phy;
/* phy0 / otg related variables */
struct extcon_dev *extcon;
bool phy0_init;
@@ -285,16 +288,10 @@ static int sun4i_usb_phy_init(struct phy *_phy)
sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_DPDM_PULLUP_EN);
sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_ID_PULLUP_EN);
- if (data->id_det_gpio) {
- /* OTG mode, force ISCR and cable state updates */
- data->id_det = -1;
- data->vbus_det = -1;
- queue_delayed_work(system_wq, &data->detect, 0);
- } else {
- /* Host only mode */
- sun4i_usb_phy0_set_id_detect(_phy, 0);
- sun4i_usb_phy0_set_vbus_detect(_phy, 1);
- }
+ /* Force ISCR and cable state updates */
+ data->id_det = -1;
+ data->vbus_det = -1;
+ queue_delayed_work(system_wq, &data->detect, 0);
}
return 0;
@@ -319,6 +316,19 @@ static int sun4i_usb_phy_exit(struct phy *_phy)
return 0;
}
+static int sun4i_usb_phy0_get_id_det(struct sun4i_usb_phy_data *data)
+{
+ switch (data->dr_mode) {
+ case USB_DR_MODE_OTG:
+ return gpiod_get_value_cansleep(data->id_det_gpio);
+ case USB_DR_MODE_HOST:
+ return 0;
+ case USB_DR_MODE_PERIPHERAL:
+ default:
+ return 1;
+ }
+}
+
static int sun4i_usb_phy0_get_vbus_det(struct sun4i_usb_phy_data *data)
{
if (data->vbus_det_gpio)
@@ -432,7 +442,10 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
struct phy *phy0 = data->phys[0].phy;
int id_det, vbus_det, id_notify = 0, vbus_notify = 0;
- id_det = gpiod_get_value_cansleep(data->id_det_gpio);
+ if (phy0 == NULL)
+ return;
+
+ id_det = sun4i_usb_phy0_get_id_det(data);
vbus_det = sun4i_usb_phy0_get_vbus_det(data);
mutex_lock(&phy0->mutex);
@@ -448,7 +461,8 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
* without vbus detection report vbus low for long enough for
* the musb-ip to end the current device session.
*/
- if (!sun4i_usb_phy0_have_vbus_det(data) && id_det == 0) {
+ if (data->dr_mode == USB_DR_MODE_OTG &&
+ !sun4i_usb_phy0_have_vbus_det(data) && id_det == 0) {
sun4i_usb_phy0_set_vbus_detect(phy0, 0);
msleep(200);
sun4i_usb_phy0_set_vbus_detect(phy0, 1);
@@ -474,7 +488,8 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
* without vbus detection report vbus low for long enough to
* the musb-ip to end the current host session.
*/
- if (!sun4i_usb_phy0_have_vbus_det(data) && id_det == 1) {
+ if (data->dr_mode == USB_DR_MODE_OTG &&
+ !sun4i_usb_phy0_have_vbus_det(data) && id_det == 1) {
mutex_lock(&phy0->mutex);
sun4i_usb_phy0_set_vbus_detect(phy0, 0);
msleep(1000);
@@ -519,7 +534,8 @@ static struct phy *sun4i_usb_phy_xlate(struct device *dev,
{
struct sun4i_usb_phy_data *data = dev_get_drvdata(dev);
- if (args->args[0] >= data->cfg->num_phys)
+ if (args->args[0] < data->first_phy ||
+ args->args[0] >= data->cfg->num_phys)
return ERR_PTR(-ENODEV);
return data->phys[args->args[0]].phy;
@@ -593,13 +609,17 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- /* vbus_det without id_det makes no sense, and is not supported */
- if (sun4i_usb_phy0_have_vbus_det(data) && !data->id_det_gpio) {
- dev_err(dev, "usb0_id_det missing or invalid\n");
- return -ENODEV;
- }
-
- if (data->id_det_gpio) {
+ data->dr_mode = of_usb_get_dr_mode_by_phy(np, 0);
+ switch (data->dr_mode) {
+ case USB_DR_MODE_OTG:
+ /* otg without id_det makes no sense, and is not supported */
+ if (!data->id_det_gpio) {
+ dev_err(dev, "usb0_id_det missing or invalid\n");
+ return -ENODEV;
+ }
+ /* fall through */
+ case USB_DR_MODE_HOST:
+ case USB_DR_MODE_PERIPHERAL:
data->extcon = devm_extcon_dev_allocate(dev,
sun4i_usb_phy0_cable);
if (IS_ERR(data->extcon))
@@ -610,9 +630,13 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
dev_err(dev, "failed to register extcon: %d\n", ret);
return ret;
}
+ break;
+ default:
+ dev_info(dev, "dr_mode unknown, not registering usb phy0\n");
+ data->first_phy = 1;
}
- for (i = 0; i < data->cfg->num_phys; i++) {
+ for (i = data->first_phy; i < data->cfg->num_phys; i++) {
struct sun4i_usb_phy *phy = data->phys + i;
char name[16];
diff --git a/drivers/phy/phy-sun9i-usb.c b/drivers/phy/phy-sun9i-usb.c
index ac4f31abefe3..28fce4bce638 100644
--- a/drivers/phy/phy-sun9i-usb.c
+++ b/drivers/phy/phy-sun9i-usb.c
@@ -141,9 +141,9 @@ static int sun9i_usb_phy_probe(struct platform_device *pdev)
}
phy->hsic_clk = devm_clk_get(dev, "hsic_12M");
- if (IS_ERR(phy->clk)) {
+ if (IS_ERR(phy->hsic_clk)) {
dev_err(dev, "failed to get hsic_12M clock\n");
- return PTR_ERR(phy->clk);
+ return PTR_ERR(phy->hsic_clk);
}
phy->reset = devm_reset_control_get(dev, "hsic");
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 5749a4eee746..0fe8fad25e4d 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1539,12 +1539,11 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
offset += range->npins;
}
- /* Mask and clear all interrupts */
- chv_writel(0, pctrl->regs + CHV_INTMASK);
+ /* Clear all interrupts */
chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
- handle_simple_irq, IRQ_TYPE_NONE);
+ handle_bad_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(pctrl->dev, "failed to add IRQ chip\n");
goto fail;
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 7bad200bd67c..55375b1b3cc8 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -809,17 +809,17 @@ static const struct pistachio_pin_group pistachio_groups[] = {
PADS_FUNCTION_SELECT2, 12, 0x3),
MFIO_MUX_PIN_GROUP(83, MIPS_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG,
PADS_FUNCTION_SELECT2, 14, 0x3),
- MFIO_MUX_PIN_GROUP(84, SYS_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG,
+ MFIO_MUX_PIN_GROUP(84, AUDIO_PLL_LOCK, MIPS_TRACE_DATA, USB_DEBUG,
PADS_FUNCTION_SELECT2, 16, 0x3),
- MFIO_MUX_PIN_GROUP(85, WIFI_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
+ MFIO_MUX_PIN_GROUP(85, RPU_V_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
PADS_FUNCTION_SELECT2, 18, 0x3),
- MFIO_MUX_PIN_GROUP(86, BT_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
+ MFIO_MUX_PIN_GROUP(86, RPU_L_PLL_LOCK, MIPS_TRACE_DATA, SDHOST_DEBUG,
PADS_FUNCTION_SELECT2, 20, 0x3),
- MFIO_MUX_PIN_GROUP(87, RPU_V_PLL_LOCK, DREQ2, SOCIF_DEBUG,
+ MFIO_MUX_PIN_GROUP(87, SYS_PLL_LOCK, DREQ2, SOCIF_DEBUG,
PADS_FUNCTION_SELECT2, 22, 0x3),
- MFIO_MUX_PIN_GROUP(88, RPU_L_PLL_LOCK, DREQ3, SOCIF_DEBUG,
+ MFIO_MUX_PIN_GROUP(88, WIFI_PLL_LOCK, DREQ3, SOCIF_DEBUG,
PADS_FUNCTION_SELECT2, 24, 0x3),
- MFIO_MUX_PIN_GROUP(89, AUDIO_PLL_LOCK, DREQ4, DREQ5,
+ MFIO_MUX_PIN_GROUP(89, BT_PLL_LOCK, DREQ4, DREQ5,
PADS_FUNCTION_SELECT2, 26, 0x3),
PIN_GROUP(TCK, "tck"),
PIN_GROUP(TRSTN, "trstn"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
index ce483b03a263..f9d661e5c14a 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
@@ -485,12 +485,12 @@ static const struct sunxi_desc_pin sun8i_a23_pins[] = {
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "uart2"), /* RTS */
+ SUNXI_FUNCTION(0x2, "uart1"), /* RTS */
SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 8)), /* PG_EINT8 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "uart2"), /* CTS */
+ SUNXI_FUNCTION(0x2, "uart1"), /* CTS */
SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 9)), /* PG_EINT9 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
SUNXI_FUNCTION(0x0, "gpio_in"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
index 3040abe6f73a..3131cac2b76f 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c
@@ -407,12 +407,12 @@ static const struct sunxi_desc_pin sun8i_a33_pins[] = {
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "uart2"), /* RTS */
+ SUNXI_FUNCTION(0x2, "uart1"), /* RTS */
SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 8)), /* PG_EINT8 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "uart2"), /* CTS */
+ SUNXI_FUNCTION(0x2, "uart1"), /* CTS */
SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 9)), /* PG_EINT9 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
SUNXI_FUNCTION(0x0, "gpio_in"),
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 32f0f014a067..9d19b9a62011 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -1161,7 +1161,7 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
} else if (ibw_start < (ib_win->rstart + ib_win->size) &&
(ibw_start + ibw_size) > ib_win->rstart) {
/* Return error if address translation involved */
- if (direct && ib_win->xlat) {
+ if (!direct || ib_win->xlat) {
ret = -EFAULT;
break;
}
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
index 3fa17ac8df54..cebc296463ad 100644
--- a/drivers/rapidio/rio_cm.c
+++ b/drivers/rapidio/rio_cm.c
@@ -2247,17 +2247,30 @@ static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code,
{
struct rio_channel *ch;
unsigned int i;
+ LIST_HEAD(list);
riocm_debug(EXIT, ".");
+ /*
+ * If there are any channels left in connected state send
+ * close notification to the connection partner.
+ * First build a list of channels that require a closing
+ * notification because function riocm_send_close() should
+ * be called outside of spinlock protected code.
+ */
spin_lock_bh(&idr_lock);
idr_for_each_entry(&ch_idr, ch, i) {
- riocm_debug(EXIT, "close ch %d", ch->id);
- if (ch->state == RIO_CM_CONNECTED)
- riocm_send_close(ch);
+ if (ch->state == RIO_CM_CONNECTED) {
+ riocm_debug(EXIT, "close ch %d", ch->id);
+ idr_remove(&ch_idr, ch->id);
+ list_add(&ch->ch_node, &list);
+ }
}
spin_unlock_bh(&idr_lock);
+ list_for_each_entry(ch, &list, ch_node)
+ riocm_send_close(ch);
+
return NOTIFY_DONE;
}
diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c
index b2daa6641417..c9ff26199711 100644
--- a/drivers/regulator/max14577-regulator.c
+++ b/drivers/regulator/max14577-regulator.c
@@ -2,7 +2,7 @@
* max14577.c - Regulator driver for the Maxim 14577/77836
*
* Copyright (C) 2013,2014 Samsung Electronics
- * Krzysztof Kozlowski <k.kozlowski@samsung.com>
+ * Krzysztof Kozlowski <krzk@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -331,7 +331,7 @@ static void __exit max14577_regulator_exit(void)
}
module_exit(max14577_regulator_exit);
-MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:max14577-regulator");
diff --git a/drivers/regulator/max77693-regulator.c b/drivers/regulator/max77693-regulator.c
index de730fd3f8a5..cfbb9512e486 100644
--- a/drivers/regulator/max77693-regulator.c
+++ b/drivers/regulator/max77693-regulator.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2013-2015 Samsung Electronics
* Jonghwa Lee <jonghwa3.lee@samsung.com>
- * Krzysztof Kozlowski <k.kozlowski.k@gmail.com>
+ * Krzysztof Kozlowski <krzk@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -314,5 +314,5 @@ module_exit(max77693_pmic_cleanup);
MODULE_DESCRIPTION("MAXIM 77693/77843 regulator driver");
MODULE_AUTHOR("Jonghwa Lee <jonghwa3.lee@samsung.com>");
-MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski.k@gmail.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 5022fa8d10c6..8ed46a9a55c8 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -178,20 +178,21 @@ static const struct regulator_desc pma8084_hfsmps = {
static const struct regulator_desc pma8084_ftsmps = {
.linear_ranges = (struct regulator_linear_range[]) {
REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000),
- REGULATOR_LINEAR_RANGE(700000, 185, 339, 10000),
+ REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000),
},
.n_linear_ranges = 2,
- .n_voltages = 340,
+ .n_voltages = 262,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pma8084_pldo = {
.linear_ranges = (struct regulator_linear_range[]) {
- REGULATOR_LINEAR_RANGE(750000, 0, 30, 25000),
- REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000),
+ REGULATOR_LINEAR_RANGE( 750000, 0, 63, 12500),
+ REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
+ REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
},
- .n_linear_ranges = 2,
- .n_voltages = 100,
+ .n_linear_ranges = 3,
+ .n_voltages = 164,
.ops = &rpm_smps_ldo_ops,
};
@@ -221,29 +222,30 @@ static const struct regulator_desc pm8x41_hfsmps = {
static const struct regulator_desc pm8841_ftsmps = {
.linear_ranges = (struct regulator_linear_range[]) {
REGULATOR_LINEAR_RANGE(350000, 0, 184, 5000),
- REGULATOR_LINEAR_RANGE(700000, 185, 339, 10000),
+ REGULATOR_LINEAR_RANGE(1280000, 185, 261, 10000),
},
.n_linear_ranges = 2,
- .n_voltages = 340,
+ .n_voltages = 262,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8941_boost = {
.linear_ranges = (struct regulator_linear_range[]) {
- REGULATOR_LINEAR_RANGE(4000000, 0, 15, 100000),
+ REGULATOR_LINEAR_RANGE(4000000, 0, 30, 50000),
},
.n_linear_ranges = 1,
- .n_voltages = 16,
+ .n_voltages = 31,
.ops = &rpm_smps_ldo_ops,
};
static const struct regulator_desc pm8941_pldo = {
.linear_ranges = (struct regulator_linear_range[]) {
- REGULATOR_LINEAR_RANGE( 750000, 0, 30, 25000),
- REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000),
+ REGULATOR_LINEAR_RANGE( 750000, 0, 63, 12500),
+ REGULATOR_LINEAR_RANGE(1550000, 64, 126, 25000),
+ REGULATOR_LINEAR_RANGE(3100000, 127, 163, 50000),
},
- .n_linear_ranges = 2,
- .n_voltages = 100,
+ .n_linear_ranges = 3,
+ .n_voltages = 164,
.ops = &rpm_smps_ldo_ops,
};
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index bf40063de202..6d4b68c483f3 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -999,6 +999,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
__u16, __u16,
enum qeth_prot_versions);
int qeth_set_features(struct net_device *, netdev_features_t);
+int qeth_recover_features(struct net_device *);
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
/* exports for OSN */
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 7dba6c8537a1..20cf29613043 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3619,7 +3619,8 @@ static void qeth_qdio_cq_handler(struct qeth_card *card,
int e;
e = 0;
- while (buffer->element[e].addr) {
+ while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
+ buffer->element[e].addr) {
unsigned long phys_aob_addr;
phys_aob_addr = (unsigned long) buffer->element[e].addr;
@@ -6131,6 +6132,35 @@ static int qeth_set_ipa_tso(struct qeth_card *card, int on)
return rc;
}
+/* try to restore device features on a device after recovery */
+int qeth_recover_features(struct net_device *dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+ netdev_features_t recover = dev->features;
+
+ if (recover & NETIF_F_IP_CSUM) {
+ if (qeth_set_ipa_csum(card, 1, IPA_OUTBOUND_CHECKSUM))
+ recover ^= NETIF_F_IP_CSUM;
+ }
+ if (recover & NETIF_F_RXCSUM) {
+ if (qeth_set_ipa_csum(card, 1, IPA_INBOUND_CHECKSUM))
+ recover ^= NETIF_F_RXCSUM;
+ }
+ if (recover & NETIF_F_TSO) {
+ if (qeth_set_ipa_tso(card, 1))
+ recover ^= NETIF_F_TSO;
+ }
+
+ if (recover == dev->features)
+ return 0;
+
+ dev_warn(&card->gdev->dev,
+ "Device recovery failed to restore all offload features\n");
+ dev->features = recover;
+ return -EIO;
+}
+EXPORT_SYMBOL_GPL(qeth_recover_features);
+
int qeth_set_features(struct net_device *dev, netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 7bc20c5188bc..bb27058fa9f0 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1124,14 +1124,11 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->dev->hw_features |= NETIF_F_RXCSUM;
card->dev->vlan_features |= NETIF_F_RXCSUM;
}
- /* Turn on SG per default */
- card->dev->features |= NETIF_F_SG;
}
card->info.broadcast_capable = 1;
qeth_l2_request_initial_mac(card);
card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
PAGE_SIZE;
- card->dev->gso_max_segs = (QETH_MAX_BUFFER_ELEMENTS(card) - 1);
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
netif_carrier_off(card->dev);
@@ -1246,6 +1243,9 @@ contin:
}
/* this also sets saved unicast addresses */
qeth_l2_set_rx_mode(card->dev);
+ rtnl_lock();
+ qeth_recover_features(card->dev);
+ rtnl_unlock();
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 72934666fedf..272d9e7419be 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -257,6 +257,11 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
if (addr->in_progress)
return -EINPROGRESS;
+ if (!qeth_card_hw_is_reachable(card)) {
+ addr->disp_flag = QETH_DISP_ADDR_DELETE;
+ return 0;
+ }
+
rc = qeth_l3_deregister_addr_entry(card, addr);
hash_del(&addr->hnode);
@@ -296,6 +301,11 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
hash_add(card->ip_htable, &addr->hnode,
qeth_l3_ipaddr_hash(addr));
+ if (!qeth_card_hw_is_reachable(card)) {
+ addr->disp_flag = QETH_DISP_ADDR_ADD;
+ return 0;
+ }
+
/* qeth_l3_register_addr_entry can go to sleep
* if we add a IPV4 addr. It is caused by the reason
* that SETIP ipa cmd starts ARP staff for IPV4 addr.
@@ -390,12 +400,16 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
int i;
int rc;
- QETH_CARD_TEXT(card, 4, "recoverip");
+ QETH_CARD_TEXT(card, 4, "recovrip");
spin_lock_bh(&card->ip_lock);
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
- if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
+ if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
+ qeth_l3_deregister_addr_entry(card, addr);
+ hash_del(&addr->hnode);
+ kfree(addr);
+ } else if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
if (addr->proto == QETH_PROT_IPV4) {
addr->in_progress = 1;
spin_unlock_bh(&card->ip_lock);
@@ -407,10 +421,8 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
if (!rc) {
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
- if (addr->ref_counter < 1) {
+ if (addr->ref_counter < 1)
qeth_l3_delete_ip(card, addr);
- kfree(addr);
- }
} else {
hash_del(&addr->hnode);
kfree(addr);
@@ -689,7 +701,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
spin_lock_bh(&card->ip_lock);
- if (!qeth_l3_ip_from_hash(card, ipaddr))
+ if (qeth_l3_ip_from_hash(card, ipaddr))
rc = -EEXIST;
else
qeth_l3_add_ip(card, ipaddr);
@@ -757,7 +769,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
spin_lock_bh(&card->ip_lock);
- if (!qeth_l3_ip_from_hash(card, ipaddr))
+ if (qeth_l3_ip_from_hash(card, ipaddr))
rc = -EEXIST;
else
qeth_l3_add_ip(card, ipaddr);
@@ -3108,7 +3120,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->vlan_features = NETIF_F_SG |
NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_TSO;
- card->dev->features = NETIF_F_SG;
}
}
} else if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -3136,7 +3147,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
netif_keep_dst(card->dev);
card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
PAGE_SIZE;
- card->dev->gso_max_segs = (QETH_MAX_BUFFER_ELEMENTS(card) - 1);
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
@@ -3269,6 +3279,7 @@ contin:
else
dev_open(card->dev);
qeth_l3_set_multicast_list(card->dev);
+ qeth_recover_features(card->dev);
rtnl_unlock();
}
qeth_trace_features(card);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 65645b11fc19..0e00a5ce0f00 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -297,7 +297,9 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
addr->u.a6.pfxlen = 0;
addr->type = QETH_IP_TYPE_NORMAL;
+ spin_lock_bh(&card->ip_lock);
qeth_l3_delete_ip(card, addr);
+ spin_unlock_bh(&card->ip_lock);
kfree(addr);
}
@@ -329,7 +331,10 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
addr->type = QETH_IP_TYPE_NORMAL;
} else
return -ENOMEM;
+
+ spin_lock_bh(&card->ip_lock);
qeth_l3_add_ip(card, addr);
+ spin_unlock_bh(&card->ip_lock);
kfree(addr);
return count;
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index 83458f7a2824..6dc96c8dfe75 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -361,8 +361,9 @@ static const char * const snstext[] = {
/* Get sense key string or NULL if not available */
const char *
-scsi_sense_key_string(unsigned char key) {
- if (key <= 0xE)
+scsi_sense_key_string(unsigned char key)
+{
+ if (key < ARRAY_SIZE(snstext))
return snstext[key];
return NULL;
}
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index e4ba2d2616cd..7c0d7af0d3b7 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -84,6 +84,9 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *);
static const struct cxgb4_uld_info cxgb4i_uld_info = {
.name = DRV_MODULE_NAME,
+ .nrxq = MAX_ULD_QSETS,
+ .rxq_size = 1024,
+ .lro = false,
.add = t4_uld_add,
.rx_handler = t4_uld_rx_handler,
.state_change = t4_uld_state_change,
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index eaccd651ccda..246456925335 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -246,6 +246,10 @@ static struct {
{"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"STK", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
{"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
{"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 3f0ff072184b..60b651bfaa01 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -341,22 +341,6 @@ static int do_sas_phy_delete(struct device *dev, void *data)
}
/**
- * is_sas_attached - check if device is SAS attached
- * @sdev: scsi device to check
- *
- * returns true if the device is SAS attached
- */
-int is_sas_attached(struct scsi_device *sdev)
-{
- struct Scsi_Host *shost = sdev->host;
-
- return shost->transportt->host_attrs.ac.class ==
- &sas_host_class.class;
-}
-EXPORT_SYMBOL(is_sas_attached);
-
-
-/**
* sas_remove_children - tear down a devices SAS data structures
* @dev: device belonging to the sas object
*
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 0e8601aa877a..8c9a35c91705 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
- if (is_sas_attached(sdev))
+ if (scsi_is_sas_rphy(&sdev->sdev_gendev))
efd.addr = sas_get_address(sdev);
if (efd.addr) {
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index e3da1a2fdb66..2a9da2e0ea6b 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -962,7 +962,7 @@ static void wd719x_pci_remove(struct pci_dev *pdev)
scsi_host_put(sh);
}
-static DEFINE_PCI_DEVICE_TABLE(wd719x_pci_table) = {
+static const struct pci_device_id wd719x_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_WD, 0x3296) },
{}
};
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 823cbc92d1e7..7a37090dabbe 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -720,8 +720,6 @@ static int img_spfi_remove(struct platform_device *pdev)
clk_disable_unprepare(spfi->sys_clk);
}
- spi_master_put(master);
-
return 0;
}
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 0be89e052428..899d7a8f0889 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -685,7 +685,6 @@ static int mtk_spi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
mtk_spi_reset(mdata);
- spi_master_put(master);
return 0;
}
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index f3df522db93b..58d2d48e16a5 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -214,6 +214,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
return PTR_ERR(ssp->clk);
memset(&pi, 0, sizeof(pi));
+ pi.fwnode = dev->dev.fwnode;
pi.parent = &dev->dev;
pi.name = "pxa2xx-spi";
pi.id = ssp->port_id;
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index c338ef1136f6..7f1555621f8e 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -1030,7 +1030,6 @@ static int spi_qup_remove(struct platform_device *pdev)
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- spi_master_put(master);
return 0;
}
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 0f83ad1d5a58..1de3a772eb7d 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -262,6 +262,9 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) {
brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div);
+ /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */
+ if (sh_msiof_spi_div_table[k].div == 1 && brps > 2)
+ continue;
if (brps <= 32) /* max of brdv is 32 */
break;
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 51ad42fad567..200ca228d885 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -960,7 +960,7 @@ static int spi_transfer_one_message(struct spi_master *master,
struct spi_transfer *xfer;
bool keep_cs = false;
int ret = 0;
- unsigned long ms = 1;
+ unsigned long long ms = 1;
struct spi_statistics *statm = &master->statistics;
struct spi_statistics *stats = &msg->spi->statistics;
@@ -991,9 +991,13 @@ static int spi_transfer_one_message(struct spi_master *master,
if (ret > 0) {
ret = 0;
- ms = xfer->len * 8 * 1000 / xfer->speed_hz;
+ ms = 8LL * 1000LL * xfer->len;
+ do_div(ms, xfer->speed_hz);
ms += ms + 100; /* some tolerance */
+ if (ms > UINT_MAX)
+ ms = UINT_MAX;
+
ms = wait_for_completion_timeout(&master->xfer_completion,
msecs_to_jiffies(ms));
}
@@ -1159,6 +1163,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
if (ret < 0) {
dev_err(&master->dev, "Failed to power device: %d\n",
ret);
+ mutex_unlock(&master->io_mutex);
return;
}
}
@@ -1174,6 +1179,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
if (master->auto_runtime_pm)
pm_runtime_put(master->dev.parent);
+ mutex_unlock(&master->io_mutex);
return;
}
}
diff --git a/drivers/staging/comedi/drivers/adv_pci1760.c b/drivers/staging/comedi/drivers/adv_pci1760.c
index d7dd1e55e347..9f525ff7290c 100644
--- a/drivers/staging/comedi/drivers/adv_pci1760.c
+++ b/drivers/staging/comedi/drivers/adv_pci1760.c
@@ -196,6 +196,7 @@ static int pci1760_pwm_ns_to_div(unsigned int flags, unsigned int ns)
break;
case CMDF_ROUND_DOWN:
divisor = ns / PCI1760_PWM_TIMEBASE;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
index 4ab186669f0c..ec5b9a23494d 100644
--- a/drivers/staging/comedi/drivers/comedi_test.c
+++ b/drivers/staging/comedi/drivers/comedi_test.c
@@ -56,11 +56,6 @@
#define N_CHANS 8
-enum waveform_state_bits {
- WAVEFORM_AI_RUNNING,
- WAVEFORM_AO_RUNNING
-};
-
/* Data unique to this driver */
struct waveform_private {
struct timer_list ai_timer; /* timer for AI commands */
@@ -68,7 +63,6 @@ struct waveform_private {
unsigned int wf_amplitude; /* waveform amplitude in microvolts */
unsigned int wf_period; /* waveform period in microseconds */
unsigned int wf_current; /* current time in waveform period */
- unsigned long state_bits;
unsigned int ai_scan_period; /* AI scan period in usec */
unsigned int ai_convert_period; /* AI conversion period in usec */
struct timer_list ao_timer; /* timer for AO commands */
@@ -191,10 +185,6 @@ static void waveform_ai_timer(unsigned long arg)
unsigned int nsamples;
unsigned int time_increment;
- /* check command is still active */
- if (!test_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits))
- return;
-
now = ktime_to_us(ktime_get());
nsamples = comedi_nsamples_left(s, UINT_MAX);
@@ -386,11 +376,6 @@ static int waveform_ai_cmd(struct comedi_device *dev,
*/
devpriv->ai_timer.expires =
jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1;
-
- /* mark command as active */
- smp_mb__before_atomic();
- set_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
- smp_mb__after_atomic();
add_timer(&devpriv->ai_timer);
return 0;
}
@@ -400,11 +385,12 @@ static int waveform_ai_cancel(struct comedi_device *dev,
{
struct waveform_private *devpriv = dev->private;
- /* mark command as no longer active */
- clear_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
- smp_mb__after_atomic();
- /* cannot call del_timer_sync() as may be called from timer routine */
- del_timer(&devpriv->ai_timer);
+ if (in_softirq()) {
+ /* Assume we were called from the timer routine itself. */
+ del_timer(&devpriv->ai_timer);
+ } else {
+ del_timer_sync(&devpriv->ai_timer);
+ }
return 0;
}
@@ -436,10 +422,6 @@ static void waveform_ao_timer(unsigned long arg)
u64 scans_since;
unsigned int scans_avail = 0;
- /* check command is still active */
- if (!test_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits))
- return;
-
/* determine number of scan periods since last time */
now = ktime_to_us(ktime_get());
scans_since = now - devpriv->ao_last_scan_time;
@@ -518,11 +500,6 @@ static int waveform_ao_inttrig_start(struct comedi_device *dev,
devpriv->ao_last_scan_time = ktime_to_us(ktime_get());
devpriv->ao_timer.expires =
jiffies + usecs_to_jiffies(devpriv->ao_scan_period);
-
- /* mark command as active */
- smp_mb__before_atomic();
- set_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits);
- smp_mb__after_atomic();
add_timer(&devpriv->ao_timer);
return 1;
@@ -608,11 +585,12 @@ static int waveform_ao_cancel(struct comedi_device *dev,
struct waveform_private *devpriv = dev->private;
s->async->inttrig = NULL;
- /* mark command as no longer active */
- clear_bit(WAVEFORM_AO_RUNNING, &devpriv->state_bits);
- smp_mb__after_atomic();
- /* cannot call del_timer_sync() as may be called from timer routine */
- del_timer(&devpriv->ao_timer);
+ if (in_softirq()) {
+ /* Assume we were called from the timer routine itself. */
+ del_timer(&devpriv->ao_timer);
+ } else {
+ del_timer_sync(&devpriv->ao_timer);
+ }
return 0;
}
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index 65daef0c00d5..0f4eb954aa80 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -634,7 +634,7 @@ static const void *daqboard2000_find_boardinfo(struct comedi_device *dev,
const struct daq200_boardtype *board;
int i;
- if (pcidev->subsystem_device != PCI_VENDOR_ID_IOTECH)
+ if (pcidev->subsystem_vendor != PCI_VENDOR_ID_IOTECH)
return NULL;
for (i = 0; i < ARRAY_SIZE(boardtypes); i++) {
diff --git a/drivers/staging/comedi/drivers/dt2811.c b/drivers/staging/comedi/drivers/dt2811.c
index 904f637797b6..8bbd93814340 100644
--- a/drivers/staging/comedi/drivers/dt2811.c
+++ b/drivers/staging/comedi/drivers/dt2811.c
@@ -588,8 +588,8 @@ static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE |
- (it->options[2] == 1) ? SDF_DIFF :
- (it->options[2] == 2) ? SDF_COMMON : SDF_GROUND;
+ ((it->options[2] == 1) ? SDF_DIFF :
+ (it->options[2] == 2) ? SDF_COMMON : SDF_GROUND);
s->n_chan = (it->options[2] == 1) ? 8 : 16;
s->maxdata = 0x0fff;
s->range_table = board->is_pgh ? &dt2811_pgh_ai_ranges
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 8dabb19519a5..0f97d7b611d7 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -2772,7 +2772,15 @@ static int ni_ao_inttrig(struct comedi_device *dev,
int i;
static const int timeout = 1000;
- if (trig_num != cmd->start_arg)
+ /*
+ * Require trig_num == cmd->start_arg when cmd->start_src == TRIG_INT.
+ * For backwards compatibility, also allow trig_num == 0 when
+ * cmd->start_src != TRIG_INT (i.e. when cmd->start_src == TRIG_EXT);
+ * in that case, the internal trigger is being used as a pre-trigger
+ * before the external trigger.
+ */
+ if (!(trig_num == cmd->start_arg ||
+ (trig_num == 0 && cmd->start_src != TRIG_INT)))
return -EINVAL;
/*
@@ -5480,7 +5488,7 @@ static int ni_E_init(struct comedi_device *dev,
s->maxdata = (devpriv->is_m_series) ? 0xffffffff
: 0x00ffffff;
s->insn_read = ni_tio_insn_read;
- s->insn_write = ni_tio_insn_read;
+ s->insn_write = ni_tio_insn_write;
s->insn_config = ni_tio_insn_config;
#ifdef PCIDMA
if (dev->irq && devpriv->mite) {
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 170ac980abcb..24c348d2f5bb 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -419,6 +419,7 @@ static ssize_t ad5933_store(struct device *dev,
mutex_lock(&indio_dev->mlock);
switch ((u32)this_attr->address) {
case AD5933_OUT_RANGE:
+ ret = -EINVAL;
for (i = 0; i < 4; i++)
if (val == st->range_avail[i]) {
st->ctrl_hb &= ~AD5933_CTRL_RANGE(0x3);
@@ -426,7 +427,6 @@ static ssize_t ad5933_store(struct device *dev,
ret = ad5933_cmd(st, 0);
break;
}
- ret = -EINVAL;
break;
case AD5933_IN_PGA_GAIN:
if (sysfs_streq(buf, "1")) {
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 3664bfd0178b..2c4dc69731e8 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -388,6 +388,7 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
struct inode *inode = NULL;
__u64 bits = 0;
int rc = 0;
+ struct dentry *alias;
/* NB 1 request reference will be taken away by ll_intent_lock()
* when I return
@@ -412,26 +413,12 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
*/
}
- /* Only hash *de if it is unhashed (new dentry).
- * Atoimc_open may passing hashed dentries for open.
- */
- if (d_unhashed(*de)) {
- struct dentry *alias;
-
- alias = ll_splice_alias(inode, *de);
- if (IS_ERR(alias)) {
- rc = PTR_ERR(alias);
- goto out;
- }
- *de = alias;
- } else if (!it_disposition(it, DISP_LOOKUP_NEG) &&
- !it_disposition(it, DISP_OPEN_CREATE)) {
- /* With DISP_OPEN_CREATE dentry will be
- * instantiated in ll_create_it.
- */
- LASSERT(!d_inode(*de));
- d_instantiate(*de, inode);
+ alias = ll_splice_alias(inode, *de);
+ if (IS_ERR(alias)) {
+ rc = PTR_ERR(alias);
+ goto out;
}
+ *de = alias;
if (!it_disposition(it, DISP_LOOKUP_NEG)) {
/* we have lookup look - unhide dentry */
@@ -587,6 +574,24 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
dentry, PFID(ll_inode2fid(dir)), dir, file, open_flags, mode,
*opened);
+ /* Only negative dentries enter here */
+ LASSERT(!d_inode(dentry));
+
+ if (!d_in_lookup(dentry)) {
+ /* A valid negative dentry that just passed revalidation,
+ * there's little point to try and open it server-side,
+ * even though there's a minuscle chance it might succeed.
+ * Either way it's a valid race to just return -ENOENT here.
+ */
+ if (!(open_flags & O_CREAT))
+ return -ENOENT;
+
+ /* Otherwise we just unhash it to be rehashed afresh via
+ * lookup if necessary
+ */
+ d_drop(dentry);
+ }
+
it = kzalloc(sizeof(*it), GFP_NOFS);
if (!it)
return -ENOMEM;
diff --git a/drivers/staging/media/cec/TODO b/drivers/staging/media/cec/TODO
index a10d4f82b954..13224694a8ae 100644
--- a/drivers/staging/media/cec/TODO
+++ b/drivers/staging/media/cec/TODO
@@ -12,6 +12,7 @@ Hopefully this will happen later in 2016.
Other TODOs:
+- There are two possible replies to CEC_MSG_INITIATE_ARC. How to handle that?
- Add a flag to inhibit passing CEC RC messages to the rc subsystem.
Applications should be able to choose this when calling S_LOG_ADDRS.
- If the reply field of cec_msg is set then when the reply arrives it
diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/staging/media/cec/cec-adap.c
index b2393bbacb26..946986f3ac0d 100644
--- a/drivers/staging/media/cec/cec-adap.c
+++ b/drivers/staging/media/cec/cec-adap.c
@@ -124,10 +124,10 @@ static void cec_queue_event(struct cec_adapter *adap,
u64 ts = ktime_get_ns();
struct cec_fh *fh;
- mutex_lock(&adap->devnode.fhs_lock);
+ mutex_lock(&adap->devnode.lock);
list_for_each_entry(fh, &adap->devnode.fhs, list)
cec_queue_event_fh(fh, ev, ts);
- mutex_unlock(&adap->devnode.fhs_lock);
+ mutex_unlock(&adap->devnode.lock);
}
/*
@@ -191,12 +191,12 @@ static void cec_queue_msg_monitor(struct cec_adapter *adap,
u32 monitor_mode = valid_la ? CEC_MODE_MONITOR :
CEC_MODE_MONITOR_ALL;
- mutex_lock(&adap->devnode.fhs_lock);
+ mutex_lock(&adap->devnode.lock);
list_for_each_entry(fh, &adap->devnode.fhs, list) {
if (fh->mode_follower >= monitor_mode)
cec_queue_msg_fh(fh, msg);
}
- mutex_unlock(&adap->devnode.fhs_lock);
+ mutex_unlock(&adap->devnode.lock);
}
/*
@@ -207,12 +207,12 @@ static void cec_queue_msg_followers(struct cec_adapter *adap,
{
struct cec_fh *fh;
- mutex_lock(&adap->devnode.fhs_lock);
+ mutex_lock(&adap->devnode.lock);
list_for_each_entry(fh, &adap->devnode.fhs, list) {
if (fh->mode_follower == CEC_MODE_FOLLOWER)
cec_queue_msg_fh(fh, msg);
}
- mutex_unlock(&adap->devnode.fhs_lock);
+ mutex_unlock(&adap->devnode.lock);
}
/* Notify userspace of an adapter state change. */
@@ -851,6 +851,9 @@ void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
if (!valid_la || msg->len <= 1)
return;
+ if (adap->log_addrs.log_addr_mask == 0)
+ return;
+
/*
* Process the message on the protocol level. If is_reply is true,
* then cec_receive_notify() won't pass on the reply to the listener(s)
@@ -1047,11 +1050,17 @@ static int cec_config_thread_func(void *arg)
dprintk(1, "could not claim LA %d\n", i);
}
+ if (adap->log_addrs.log_addr_mask == 0 &&
+ !(las->flags & CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK))
+ goto unconfigure;
+
configured:
if (adap->log_addrs.log_addr_mask == 0) {
/* Fall back to unregistered */
las->log_addr[0] = CEC_LOG_ADDR_UNREGISTERED;
las->log_addr_mask = 1 << las->log_addr[0];
+ for (i = 1; i < las->num_log_addrs; i++)
+ las->log_addr[i] = CEC_LOG_ADDR_INVALID;
}
adap->is_configured = true;
adap->is_configuring = false;
@@ -1070,6 +1079,8 @@ configured:
cec_report_features(adap, i);
cec_report_phys_addr(adap, i);
}
+ for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
+ las->log_addr[i] = CEC_LOG_ADDR_INVALID;
mutex_lock(&adap->lock);
adap->kthread_config = NULL;
mutex_unlock(&adap->lock);
@@ -1398,7 +1409,6 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
u8 init_laddr = cec_msg_initiator(msg);
u8 devtype = cec_log_addr2dev(adap, dest_laddr);
int la_idx = cec_log_addr2idx(adap, dest_laddr);
- bool is_directed = la_idx >= 0;
bool from_unregistered = init_laddr == 0xf;
struct cec_msg tx_cec_msg = { };
@@ -1560,7 +1570,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
* Unprocessed messages are aborted if userspace isn't doing
* any processing either.
*/
- if (is_directed && !is_reply && !adap->follower_cnt &&
+ if (!is_broadcast && !is_reply && !adap->follower_cnt &&
!adap->cec_follower && msg->msg[1] != CEC_MSG_FEATURE_ABORT)
return cec_feature_abort(adap, msg);
break;
diff --git a/drivers/staging/media/cec/cec-api.c b/drivers/staging/media/cec/cec-api.c
index 7be7615a0fdf..e274e2f22398 100644
--- a/drivers/staging/media/cec/cec-api.c
+++ b/drivers/staging/media/cec/cec-api.c
@@ -162,7 +162,7 @@ static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
return -ENOTTY;
if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
return -EFAULT;
- log_addrs.flags = 0;
+ log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK;
mutex_lock(&adap->lock);
if (!adap->is_configuring &&
(!log_addrs.num_log_addrs || !adap->is_configured) &&
@@ -435,7 +435,7 @@ static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
void __user *parg = (void __user *)arg;
if (!devnode->registered)
- return -EIO;
+ return -ENODEV;
switch (cmd) {
case CEC_ADAP_G_CAPS:
@@ -508,14 +508,14 @@ static int cec_open(struct inode *inode, struct file *filp)
filp->private_data = fh;
- mutex_lock(&devnode->fhs_lock);
+ mutex_lock(&devnode->lock);
/* Queue up initial state events */
ev_state.state_change.phys_addr = adap->phys_addr;
ev_state.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
cec_queue_event_fh(fh, &ev_state, 0);
list_add(&fh->list, &devnode->fhs);
- mutex_unlock(&devnode->fhs_lock);
+ mutex_unlock(&devnode->lock);
return 0;
}
@@ -540,9 +540,9 @@ static int cec_release(struct inode *inode, struct file *filp)
cec_monitor_all_cnt_dec(adap);
mutex_unlock(&adap->lock);
- mutex_lock(&devnode->fhs_lock);
+ mutex_lock(&devnode->lock);
list_del(&fh->list);
- mutex_unlock(&devnode->fhs_lock);
+ mutex_unlock(&devnode->lock);
/* Unhook pending transmits from this filehandle. */
mutex_lock(&adap->lock);
diff --git a/drivers/staging/media/cec/cec-core.c b/drivers/staging/media/cec/cec-core.c
index 112a5fae12f5..3b1e4d2b190d 100644
--- a/drivers/staging/media/cec/cec-core.c
+++ b/drivers/staging/media/cec/cec-core.c
@@ -51,31 +51,29 @@ int cec_get_device(struct cec_devnode *devnode)
{
/*
* Check if the cec device is available. This needs to be done with
- * the cec_devnode_lock held to prevent an open/unregister race:
+ * the devnode->lock held to prevent an open/unregister race:
* without the lock, the device could be unregistered and freed between
* the devnode->registered check and get_device() calls, leading to
* a crash.
*/
- mutex_lock(&cec_devnode_lock);
+ mutex_lock(&devnode->lock);
/*
* return ENXIO if the cec device has been removed
* already or if it is not registered anymore.
*/
if (!devnode->registered) {
- mutex_unlock(&cec_devnode_lock);
+ mutex_unlock(&devnode->lock);
return -ENXIO;
}
/* and increase the device refcount */
get_device(&devnode->dev);
- mutex_unlock(&cec_devnode_lock);
+ mutex_unlock(&devnode->lock);
return 0;
}
void cec_put_device(struct cec_devnode *devnode)
{
- mutex_lock(&cec_devnode_lock);
put_device(&devnode->dev);
- mutex_unlock(&cec_devnode_lock);
}
/* Called when the last user of the cec device exits. */
@@ -84,11 +82,10 @@ static void cec_devnode_release(struct device *cd)
struct cec_devnode *devnode = to_cec_devnode(cd);
mutex_lock(&cec_devnode_lock);
-
/* Mark device node number as free */
clear_bit(devnode->minor, cec_devnode_nums);
-
mutex_unlock(&cec_devnode_lock);
+
cec_delete_adapter(to_cec_adapter(devnode));
}
@@ -117,7 +114,7 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
/* Initialization */
INIT_LIST_HEAD(&devnode->fhs);
- mutex_init(&devnode->fhs_lock);
+ mutex_init(&devnode->lock);
/* Part 1: Find a free minor number */
mutex_lock(&cec_devnode_lock);
@@ -160,7 +157,9 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
cdev_del:
cdev_del(&devnode->cdev);
clr_bit:
+ mutex_lock(&cec_devnode_lock);
clear_bit(devnode->minor, cec_devnode_nums);
+ mutex_unlock(&cec_devnode_lock);
return ret;
}
@@ -177,17 +176,21 @@ static void cec_devnode_unregister(struct cec_devnode *devnode)
{
struct cec_fh *fh;
+ mutex_lock(&devnode->lock);
+
/* Check if devnode was never registered or already unregistered */
- if (!devnode->registered || devnode->unregistered)
+ if (!devnode->registered || devnode->unregistered) {
+ mutex_unlock(&devnode->lock);
return;
+ }
- mutex_lock(&devnode->fhs_lock);
list_for_each_entry(fh, &devnode->fhs, list)
wake_up_interruptible(&fh->wait);
- mutex_unlock(&devnode->fhs_lock);
devnode->registered = false;
devnode->unregistered = true;
+ mutex_unlock(&devnode->lock);
+
device_del(&devnode->dev);
cdev_del(&devnode->cdev);
put_device(&devnode->dev);
diff --git a/drivers/staging/media/pulse8-cec/pulse8-cec.c b/drivers/staging/media/pulse8-cec/pulse8-cec.c
index 94f8590492dc..ed8bd95ad6d0 100644
--- a/drivers/staging/media/pulse8-cec/pulse8-cec.c
+++ b/drivers/staging/media/pulse8-cec/pulse8-cec.c
@@ -114,14 +114,11 @@ static void pulse8_irq_work_handler(struct work_struct *work)
cec_transmit_done(pulse8->adap, CEC_TX_STATUS_OK,
0, 0, 0, 0);
break;
- case MSGCODE_TRANSMIT_FAILED_LINE:
- cec_transmit_done(pulse8->adap, CEC_TX_STATUS_ARB_LOST,
- 1, 0, 0, 0);
- break;
case MSGCODE_TRANSMIT_FAILED_ACK:
cec_transmit_done(pulse8->adap, CEC_TX_STATUS_NACK,
0, 1, 0, 0);
break;
+ case MSGCODE_TRANSMIT_FAILED_LINE:
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
cec_transmit_done(pulse8->adap, CEC_TX_STATUS_ERROR,
@@ -170,6 +167,9 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
schedule_work(&pulse8->work);
break;
+ case MSGCODE_HIGH_ERROR:
+ case MSGCODE_LOW_ERROR:
+ case MSGCODE_RECEIVE_FAILED:
case MSGCODE_TIMEOUT_ERROR:
break;
case MSGCODE_COMMAND_ACCEPTED:
@@ -388,7 +388,7 @@ static int pulse8_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
int err;
cmd[0] = MSGCODE_TRANSMIT_IDLETIME;
- cmd[1] = 3;
+ cmd[1] = signal_free_time;
err = pulse8_send_and_wait(pulse8, cmd, 2,
MSGCODE_COMMAND_ACCEPTED, 1);
cmd[0] = MSGCODE_TRANSMIT_ACK_POLARITY;
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 0b1760cba6e3..78f524fcd214 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -3363,7 +3363,7 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
if (!hif_workqueue) {
netdev_err(vif->ndev, "Failed to create workqueue\n");
result = -ENOMEM;
- goto _fail_mq_;
+ goto _fail_;
}
setup_timer(&periodic_rssi, GetPeriodicRSSI,
@@ -3391,7 +3391,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
clients_count++;
-_fail_mq_:
destroy_workqueue(hif_workqueue);
_fail_:
return result;
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 3a66255f14fc..32215110d597 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -648,7 +648,7 @@ void wilc1000_wlan_deinit(struct net_device *dev)
mutex_unlock(&wl->hif_cs);
}
if (&wl->txq_event)
- wait_for_completion(&wl->txq_event);
+ complete(&wl->txq_event);
wlan_deinitialize_threads(dev);
deinit_irq(dev);
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 9092600a1794..2c2e8aca8305 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -1191,7 +1191,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
struct wilc_priv *priv;
struct wilc_vif *vif;
u32 i = 0;
- u32 associatedsta = 0;
+ u32 associatedsta = ~0;
u32 inactive_time = 0;
priv = wiphy_priv(wiphy);
vif = netdev_priv(dev);
@@ -1204,7 +1204,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
}
}
- if (associatedsta == -1) {
+ if (associatedsta == ~0) {
netdev_err(dev, "sta required is not associated\n");
return -ENOENT;
}
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 0ae0b131abfc..2fb1bf1a26c5 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -24,6 +24,7 @@
#include <net/ip6_route.h>
#include <net/addrconf.h>
+#include <libcxgb_cm.h>
#include "cxgbit.h"
#include "clip_tbl.h"
@@ -72,15 +73,6 @@ out:
return wr_waitp->ret;
}
-/* Returns whether a CPL status conveys negative advice.
- */
-static int cxgbit_is_neg_adv(unsigned int status)
-{
- return status == CPL_ERR_RTX_NEG_ADVICE ||
- status == CPL_ERR_PERSIST_NEG_ADVICE ||
- status == CPL_ERR_KEEPALV_NEG_ADVICE;
-}
-
static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
{
return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
@@ -623,21 +615,14 @@ void cxgbit_free_np(struct iscsi_np *np)
static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
{
struct sk_buff *skb;
- struct cpl_close_con_req *req;
- unsigned int len = roundup(sizeof(struct cpl_close_con_req), 16);
+ u32 len = roundup(sizeof(struct cpl_close_con_req), 16);
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb)
return;
- req = (struct cpl_close_con_req *)__skb_put(skb, len);
- memset(req, 0, len);
-
- set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
- INIT_TP_WR(req, csk->tid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
- csk->tid));
- req->rsvd = 0;
+ cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
+ NULL, NULL);
cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
__skb_queue_tail(&csk->txq, skb);
@@ -662,9 +647,8 @@ static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
{
- struct cpl_abort_req *req;
- unsigned int len = roundup(sizeof(*req), 16);
struct sk_buff *skb;
+ u32 len = roundup(sizeof(struct cpl_abort_req), 16);
pr_debug("%s: csk %p tid %u; state %d\n",
__func__, csk, csk->tid, csk->com.state);
@@ -675,15 +659,9 @@ static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
cxgbit_send_tx_flowc_wr(csk);
skb = __skb_dequeue(&csk->skbq);
- req = (struct cpl_abort_req *)__skb_put(skb, len);
- memset(req, 0, len);
+ cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx,
+ csk->com.cdev, cxgbit_abort_arp_failure);
- set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
- t4_set_arp_err_handler(skb, csk->com.cdev, cxgbit_abort_arp_failure);
- INIT_TP_WR(req, csk->tid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ,
- csk->tid));
- req->cmd = CPL_ABORT_SEND_RST;
return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
}
@@ -789,109 +767,6 @@ void _cxgbit_free_csk(struct kref *kref)
kfree(csk);
}
-static void
-cxgbit_get_tuple_info(struct cpl_pass_accept_req *req, int *iptype,
- __u8 *local_ip, __u8 *peer_ip, __be16 *local_port,
- __be16 *peer_port)
-{
- u32 eth_len = ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
- u32 ip_len = IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
- struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
- struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
- struct tcphdr *tcp = (struct tcphdr *)
- ((u8 *)(req + 1) + eth_len + ip_len);
-
- if (ip->version == 4) {
- pr_debug("%s saddr 0x%x daddr 0x%x sport %u dport %u\n",
- __func__,
- ntohl(ip->saddr), ntohl(ip->daddr),
- ntohs(tcp->source),
- ntohs(tcp->dest));
- *iptype = 4;
- memcpy(peer_ip, &ip->saddr, 4);
- memcpy(local_ip, &ip->daddr, 4);
- } else {
- pr_debug("%s saddr %pI6 daddr %pI6 sport %u dport %u\n",
- __func__,
- ip6->saddr.s6_addr, ip6->daddr.s6_addr,
- ntohs(tcp->source),
- ntohs(tcp->dest));
- *iptype = 6;
- memcpy(peer_ip, ip6->saddr.s6_addr, 16);
- memcpy(local_ip, ip6->daddr.s6_addr, 16);
- }
-
- *peer_port = tcp->source;
- *local_port = tcp->dest;
-}
-
-static int
-cxgbit_our_interface(struct cxgbit_device *cdev, struct net_device *egress_dev)
-{
- u8 i;
-
- egress_dev = cxgbit_get_real_dev(egress_dev);
- for (i = 0; i < cdev->lldi.nports; i++)
- if (cdev->lldi.ports[i] == egress_dev)
- return 1;
- return 0;
-}
-
-static struct dst_entry *
-cxgbit_find_route6(struct cxgbit_device *cdev, __u8 *local_ip, __u8 *peer_ip,
- __be16 local_port, __be16 peer_port, u8 tos,
- __u32 sin6_scope_id)
-{
- struct dst_entry *dst = NULL;
-
- if (IS_ENABLED(CONFIG_IPV6)) {
- struct flowi6 fl6;
-
- memset(&fl6, 0, sizeof(fl6));
- memcpy(&fl6.daddr, peer_ip, 16);
- memcpy(&fl6.saddr, local_ip, 16);
- if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
- fl6.flowi6_oif = sin6_scope_id;
- dst = ip6_route_output(&init_net, NULL, &fl6);
- if (!dst)
- goto out;
- if (!cxgbit_our_interface(cdev, ip6_dst_idev(dst)->dev) &&
- !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
- dst_release(dst);
- dst = NULL;
- }
- }
-out:
- return dst;
-}
-
-static struct dst_entry *
-cxgbit_find_route(struct cxgbit_device *cdev, __be32 local_ip, __be32 peer_ip,
- __be16 local_port, __be16 peer_port, u8 tos)
-{
- struct rtable *rt;
- struct flowi4 fl4;
- struct neighbour *n;
-
- rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip,
- local_ip,
- peer_port, local_port, IPPROTO_TCP,
- tos, 0);
- if (IS_ERR(rt))
- return NULL;
- n = dst_neigh_lookup(&rt->dst, &peer_ip);
- if (!n)
- return NULL;
- if (!cxgbit_our_interface(cdev, n->dev) &&
- !(n->dev->flags & IFF_LOOPBACK)) {
- neigh_release(n);
- dst_release(&rt->dst);
- return NULL;
- }
- neigh_release(n);
- return &rt->dst;
-}
-
static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
{
unsigned int linkspeed;
@@ -1072,21 +947,14 @@ int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
{
- struct cpl_tid_release *req;
- unsigned int len = roundup(sizeof(*req), 16);
+ u32 len = roundup(sizeof(struct cpl_tid_release), 16);
struct sk_buff *skb;
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb)
return;
- req = (struct cpl_tid_release *)__skb_put(skb, len);
- memset(req, 0, len);
-
- INIT_TP_WR(req, tid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(
- CPL_TID_RELEASE, tid));
- set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
+ cxgb_mk_tid_release(skb, len, tid, 0);
cxgbit_ofld_send(cdev, skb);
}
@@ -1108,20 +976,6 @@ cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
return ret < 0 ? ret : 0;
}
-static void
-cxgbit_best_mtu(const unsigned short *mtus, unsigned short mtu,
- unsigned int *idx, int use_ts, int ipv6)
-{
- unsigned short hdr_size = (ipv6 ? sizeof(struct ipv6hdr) :
- sizeof(struct iphdr)) +
- sizeof(struct tcphdr) +
- (use_ts ? round_up(TCPOLEN_TIMESTAMP,
- 4) : 0);
- unsigned short data_size = mtu - hdr_size;
-
- cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
-}
-
static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
{
if (csk->com.state != CSK_STATE_ESTABLISHED) {
@@ -1140,22 +994,18 @@ static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
{
struct sk_buff *skb;
- struct cpl_rx_data_ack *req;
- unsigned int len = roundup(sizeof(*req), 16);
+ u32 len = roundup(sizeof(struct cpl_rx_data_ack), 16);
+ u32 credit_dack;
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
return -1;
- req = (struct cpl_rx_data_ack *)__skb_put(skb, len);
- memset(req, 0, len);
+ credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
+ RX_CREDITS_V(csk->rx_credits);
- set_wr_txq(skb, CPL_PRIORITY_ACK, csk->ctrlq_idx);
- INIT_TP_WR(req, csk->tid);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
- csk->tid));
- req->credit_dack = cpu_to_be32(RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
- RX_CREDITS_V(csk->rx_credits));
+ cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
+ credit_dack);
csk->rx_credits = 0;
@@ -1210,15 +1060,6 @@ out:
return -ENOMEM;
}
-static u32 cxgbit_compute_wscale(u32 win)
-{
- u32 wscale = 0;
-
- while (wscale < 14 && (65535 << wscale) < win)
- wscale++;
- return wscale;
-}
-
static void
cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
{
@@ -1246,10 +1087,10 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
INIT_TP_WR(rpl5, csk->tid);
OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
csk->tid));
- cxgbit_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
- req->tcpopt.tstamp,
- (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
- wscale = cxgbit_compute_wscale(csk->rcv_win);
+ cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
+ req->tcpopt.tstamp,
+ (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
+ wscale = cxgb_compute_wscale(csk->rcv_win);
/*
* Specify the largest window that will fit in opt0. The
* remainder will be specified in the rx_data_ack.
@@ -1340,8 +1181,8 @@ cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
goto rel_skb;
}
- cxgbit_get_tuple_info(req, &iptype, local_ip, peer_ip,
- &local_port, &peer_port);
+ cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
+ peer_ip, &local_port, &peer_port);
/* Find output route */
if (iptype == 4) {
@@ -1350,21 +1191,23 @@ cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
, __func__, cnp, tid,
local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss);
- dst = cxgbit_find_route(cdev, *(__be32 *)local_ip,
- *(__be32 *)peer_ip,
- local_port, peer_port,
- PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
+ dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev,
+ *(__be32 *)local_ip,
+ *(__be32 *)peer_ip,
+ local_port, peer_port,
+ PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
} else {
pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
"lport %d rport %d peer_mss %d\n"
, __func__, cnp, tid,
local_ip, peer_ip, ntohs(local_port),
ntohs(peer_port), peer_mss);
- dst = cxgbit_find_route6(cdev, local_ip, peer_ip,
- local_port, peer_port,
- PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
- ((struct sockaddr_in6 *)
- &cnp->com.local_addr)->sin6_scope_id);
+ dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev,
+ local_ip, peer_ip,
+ local_port, peer_port,
+ PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
+ ((struct sockaddr_in6 *)
+ &cnp->com.local_addr)->sin6_scope_id);
}
if (!dst) {
pr_err("%s - failed to find dst entry!\n",
@@ -1795,16 +1638,15 @@ static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
{
struct cpl_abort_req_rss *hdr = cplhdr(skb);
unsigned int tid = GET_TID(hdr);
- struct cpl_abort_rpl *rpl;
struct sk_buff *rpl_skb;
bool release = false;
bool wakeup_thread = false;
- unsigned int len = roundup(sizeof(*rpl), 16);
+ u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
pr_debug("%s: csk %p; tid %u; state %d\n",
__func__, csk, tid, csk->com.state);
- if (cxgbit_is_neg_adv(hdr->status)) {
+ if (cxgb_is_neg_adv(hdr->status)) {
pr_err("%s: got neg advise %d on tid %u\n",
__func__, hdr->status, tid);
goto rel_skb;
@@ -1839,14 +1681,8 @@ static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
cxgbit_send_tx_flowc_wr(csk);
rpl_skb = __skb_dequeue(&csk->skbq);
- set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
-
- rpl = (struct cpl_abort_rpl *)__skb_put(rpl_skb, len);
- memset(rpl, 0, len);
- INIT_TP_WR(rpl, csk->tid);
- OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
- rpl->cmd = CPL_ABORT_NO_RST;
+ cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx);
cxgbit_ofld_send(csk->com.cdev, rpl_skb);
if (wakeup_thread) {
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
index 27dd11aff934..ad26b9372f10 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -652,6 +652,9 @@ static struct iscsit_transport cxgbit_transport = {
static struct cxgb4_uld_info cxgbit_uld_info = {
.name = DRV_NAME,
+ .nrxq = MAX_ULD_QSETS,
+ .rxq_size = 1024,
+ .lro = true,
.add = cxgbit_uld_add,
.state_change = cxgbit_uld_state_change,
.lro_rx_handler = cxgbit_uld_lro_rx_handler,
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 71a339271fa5..5f817923f374 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -504,6 +504,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
if (IS_ERR(priv->zone)) {
dev_err(dev, "can't register thermal zone\n");
ret = PTR_ERR(priv->zone);
+ priv->zone = NULL;
goto error_unregister;
}
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 9c15344b657a..a8c20413dbda 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -651,6 +651,12 @@ static struct pci_device_id nhi_ids[] = {
{
.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
.vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
},
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 1e116f53d6dd..9840fdecb73b 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -372,7 +372,9 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
if (sw->config.device_id != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
sw->config.device_id != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
- sw->config.device_id != PCI_DEVICE_ID_INTEL_PORT_RIDGE)
+ sw->config.device_id != PCI_DEVICE_ID_INTEL_PORT_RIDGE &&
+ sw->config.device_id != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE &&
+ sw->config.device_id != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE)
tb_sw_warn(sw, "unsupported switch device id %#x\n",
sw->config.device_id);
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 122e0e4029fe..1a16feac9a36 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -15,8 +15,6 @@
#include <linux/serial_reg.h>
#include <linux/dmaengine.h>
-#include "../serial_mctrl_gpio.h"
-
struct uart_8250_dma {
int (*tx_dma)(struct uart_8250_port *p);
int (*rx_dma)(struct uart_8250_port *p);
@@ -133,43 +131,12 @@ void serial8250_em485_destroy(struct uart_8250_port *p);
static inline void serial8250_out_MCR(struct uart_8250_port *up, int value)
{
- int mctrl_gpio = 0;
-
serial_out(up, UART_MCR, value);
-
- if (value & UART_MCR_RTS)
- mctrl_gpio |= TIOCM_RTS;
- if (value & UART_MCR_DTR)
- mctrl_gpio |= TIOCM_DTR;
-
- mctrl_gpio_set(up->gpios, mctrl_gpio);
}
static inline int serial8250_in_MCR(struct uart_8250_port *up)
{
- int mctrl, mctrl_gpio = 0;
-
- mctrl = serial_in(up, UART_MCR);
-
- /* save current MCR values */
- if (mctrl & UART_MCR_RTS)
- mctrl_gpio |= TIOCM_RTS;
- if (mctrl & UART_MCR_DTR)
- mctrl_gpio |= TIOCM_DTR;
-
- mctrl_gpio = mctrl_gpio_get_outputs(up->gpios, &mctrl_gpio);
-
- if (mctrl_gpio & TIOCM_RTS)
- mctrl |= UART_MCR_RTS;
- else
- mctrl &= ~UART_MCR_RTS;
-
- if (mctrl_gpio & TIOCM_DTR)
- mctrl |= UART_MCR_DTR;
- else
- mctrl &= ~UART_MCR_DTR;
-
- return mctrl;
+ return serial_in(up, UART_MCR);
}
#if defined(__alpha__) && !defined(CONFIG_PCI)
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 13ad5c3d2e68..dcf43f66404f 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -974,8 +974,6 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
uart = serial8250_find_match_or_unused(&up->port);
if (uart && uart->port.type != PORT_8250_CIR) {
- struct mctrl_gpios *gpios;
-
if (uart->port.dev)
uart_remove_one_port(&serial8250_reg, &uart->port);
@@ -1013,13 +1011,6 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
if (up->port.flags & UPF_FIXED_TYPE)
uart->port.type = up->port.type;
- gpios = mctrl_gpio_init(&uart->port, 0);
- if (IS_ERR(gpios)) {
- if (PTR_ERR(gpios) != -ENOSYS)
- return PTR_ERR(gpios);
- } else
- uart->gpios = gpios;
-
serial8250_set_defaults(uart);
/* Possibly override default I/O functions. */
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 737b4b3957b0..0facc789fe7d 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -31,7 +31,7 @@
#define IO_ADDR2 0x60
#define LDN 0x7
-#define IRQ_MODE 0x70
+#define FINTEK_IRQ_MODE 0x70
#define IRQ_SHARE BIT(4)
#define IRQ_MODE_MASK (BIT(6) | BIT(5))
#define IRQ_LEVEL_LOW 0
@@ -195,7 +195,7 @@ static int fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool level_mode)
outb(LDN, pdata->base_port + ADDR_PORT);
outb(pdata->index, pdata->base_port + DATA_PORT);
- outb(IRQ_MODE, pdata->base_port + ADDR_PORT);
+ outb(FINTEK_IRQ_MODE, pdata->base_port + ADDR_PORT);
tmp = inb(pdata->base_port + DATA_PORT);
tmp &= ~IRQ_MODE_MASK;
diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
index 339de9cd0866..20c5db2f4264 100644
--- a/drivers/tty/serial/8250/8250_mid.c
+++ b/drivers/tty/serial/8250/8250_mid.c
@@ -168,6 +168,9 @@ static void mid8250_set_termios(struct uart_port *p,
unsigned long w = BIT(24) - 1;
unsigned long mul, div;
+ /* Gracefully handle the B0 case: fall back to B9600 */
+ fuart = fuart ? fuart : 9600 * 16;
+
if (mid->board->freq < fuart) {
/* Find prescaler value that satisfies Fuart < Fref */
if (mid->board->freq > baud)
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index e14982f36a04..61ad6c3b20a0 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -134,21 +134,18 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
serial8250_do_set_mctrl(port, mctrl);
- if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(up->gpios,
- UART_GPIO_RTS))) {
- /*
- * Turn off autoRTS if RTS is lowered and restore autoRTS
- * setting if RTS is raised
- */
- lcr = serial_in(up, UART_LCR);
- serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
- if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
- priv->efr |= UART_EFR_RTS;
- else
- priv->efr &= ~UART_EFR_RTS;
- serial_out(up, UART_EFR, priv->efr);
- serial_out(up, UART_LCR, lcr);
- }
+ /*
+ * Turn off autoRTS if RTS is lowered and restore autoRTS setting
+ * if RTS is raised
+ */
+ lcr = serial_in(up, UART_LCR);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+ if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
+ priv->efr |= UART_EFR_RTS;
+ else
+ priv->efr &= ~UART_EFR_RTS;
+ serial_out(up, UART_EFR, priv->efr);
+ serial_out(up, UART_LCR, lcr);
}
/*
@@ -449,9 +446,7 @@ static void omap_8250_set_termios(struct uart_port *port,
priv->efr = 0;
up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF);
- if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW
- && IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(up->gpios,
- UART_GPIO_RTS))) {
+ if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW) {
/* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */
up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
priv->efr |= UART_EFR_CTS;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 20ebaea5c414..bc51b32b2774 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1950,6 +1950,43 @@ pci_wch_ch38x_setup(struct serial_private *priv,
#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954
#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958
+#define PCI_VENDOR_ID_ACCESIO 0x494f
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB 0x1051
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S 0x1053
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB 0x105C
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S 0x105E
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB 0x1091
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2 0x1093
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB 0x1099
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4 0x109B
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB 0x10D1
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM 0x10D3
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB 0x10DA
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM 0x10DC
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1 0x1108
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2 0x1110
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2 0x1111
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4 0x1118
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4 0x1119
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S 0x1152
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S 0x115A
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2 0x1190
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2 0x1191
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4 0x1198
+#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4 0x1199
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM 0x11D0
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4 0x105A
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4 0x105B
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8 0x106A
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8 0x106B
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4 0x1098
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8 0x10A9
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM 0x10D9
+#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM 0x10E9
+#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM 0x11D8
+
+
+
/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
#define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588
@@ -5113,6 +5150,108 @@ static struct pci_device_id serial_pci_tbl[] = {
0,
0, pbn_pericom_PI7C9X7958 },
/*
+ * ACCES I/O Products quad
+ */
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7954 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7958 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7958 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7958 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7958 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7958 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7958 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7958 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7958 },
+ { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_pericom_PI7C9X7958 },
+ /*
* Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
*/
{ PCI_VENDOR_ID_TOPIC, PCI_DEVICE_ID_TOPIC_TP560,
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 7481b95c6d84..bdfa659b9606 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1618,8 +1618,6 @@ static void serial8250_disable_ms(struct uart_port *port)
if (up->bugs & UART_BUG_NOMSR)
return;
- mctrl_gpio_disable_ms(up->gpios);
-
up->ier &= ~UART_IER_MSI;
serial_port_out(port, UART_IER, up->ier);
}
@@ -1632,8 +1630,6 @@ static void serial8250_enable_ms(struct uart_port *port)
if (up->bugs & UART_BUG_NOMSR)
return;
- mctrl_gpio_enable_ms(up->gpios);
-
up->ier |= UART_IER_MSI;
serial8250_rpm_get(up);
@@ -1917,8 +1913,7 @@ unsigned int serial8250_do_get_mctrl(struct uart_port *port)
ret |= TIOCM_DSR;
if (status & UART_MSR_CTS)
ret |= TIOCM_CTS;
-
- return mctrl_gpio_get(up->gpios, &ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(serial8250_do_get_mctrl);
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index c9ec839a5ddf..7c6f7afca5dd 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -6,7 +6,6 @@
config SERIAL_8250
tristate "8250/16550 and compatible serial support"
select SERIAL_CORE
- select SERIAL_MCTRL_GPIO if GPIOLIB
---help---
This selects whether you want to include the driver for the standard
serial ports. The standard answer is Y. People who might say N
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 065f5d97aa67..b93356834bb5 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -949,6 +949,15 @@ static int isr_setup_status_phase(struct ci_hdrc *ci)
int retval;
struct ci_hw_ep *hwep;
+ /*
+ * Unexpected USB controller behavior, caused by bad signal integrity
+ * or ground reference problems, can lead to isr_setup_status_phase
+ * being called with ci->status equal to NULL.
+ * If this situation occurs, you should review your USB hardware design.
+ */
+ if (WARN_ON_ONCE(!ci->status))
+ return -EPIPE;
+
hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
ci->status->context = ci;
ci->status->complete = isr_setup_status_complete;
@@ -1596,8 +1605,11 @@ static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
- /* Data+ pullup controlled by OTG state machine in OTG fsm mode */
- if (ci_otg_is_fsm_mode(ci))
+ /*
+ * Data+ pullup controlled by OTG state machine in OTG fsm mode;
+ * and don't touch Data+ in host mode for dual role config.
+ */
+ if (ci_otg_is_fsm_mode(ci) || ci->role == CI_ROLE_HOST)
return 0;
pm_runtime_get_sync(&ci->gadget.dev);
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 051163189810..a2d90aca779f 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -187,7 +187,7 @@ static const unsigned short high_speed_maxpacket_maxes[4] = {
[USB_ENDPOINT_XFER_CONTROL] = 64,
[USB_ENDPOINT_XFER_ISOC] = 1024,
[USB_ENDPOINT_XFER_BULK] = 512,
- [USB_ENDPOINT_XFER_INT] = 1023,
+ [USB_ENDPOINT_XFER_INT] = 1024,
};
static const unsigned short super_speed_maxpacket_maxes[4] = {
[USB_ENDPOINT_XFER_CONTROL] = 512,
@@ -240,8 +240,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
memcpy(&endpoint->desc, d, n);
INIT_LIST_HEAD(&endpoint->urb_list);
- /* Fix up bInterval values outside the legal range. Use 32 ms if no
- * proper value can be guessed. */
+ /*
+ * Fix up bInterval values outside the legal range.
+ * Use 10 or 8 ms if no proper value can be guessed.
+ */
i = 0; /* i = min, j = max, n = default */
j = 255;
if (usb_endpoint_xfer_int(d)) {
@@ -250,13 +252,15 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
case USB_SPEED_HIGH:
- /* Many device manufacturers are using full-speed
+ /*
+ * Many device manufacturers are using full-speed
* bInterval values in high-speed interrupt endpoint
- * descriptors. Try to fix those and fall back to a
- * 32 ms default value otherwise. */
+ * descriptors. Try to fix those and fall back to an
+ * 8-ms default value otherwise.
+ */
n = fls(d->bInterval*8);
if (n == 0)
- n = 9; /* 32 ms = 2^(9-1) uframes */
+ n = 7; /* 8 ms = 2^(7-1) uframes */
j = 16;
/*
@@ -271,10 +275,12 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
}
break;
default: /* USB_SPEED_FULL or _LOW */
- /* For low-speed, 10 ms is the official minimum.
+ /*
+ * For low-speed, 10 ms is the official minimum.
* But some "overclocked" devices might want faster
- * polling so we'll allow it. */
- n = 32;
+ * polling so we'll allow it.
+ */
+ n = 10;
break;
}
} else if (usb_endpoint_xfer_isoc(d)) {
@@ -282,10 +288,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
j = 16;
switch (to_usb_device(ddev)->speed) {
case USB_SPEED_HIGH:
- n = 9; /* 32 ms = 2^(9-1) uframes */
+ n = 7; /* 8 ms = 2^(7-1) uframes */
break;
default: /* USB_SPEED_FULL */
- n = 6; /* 32 ms = 2^(6-1) frames */
+ n = 4; /* 8 ms = 2^(4-1) frames */
break;
}
}
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index e6a6d67c8705..09c8d9ca61ae 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1709,11 +1709,17 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
as->urb->start_frame = uurb->start_frame;
as->urb->number_of_packets = number_of_packets;
as->urb->stream_id = stream_id;
- if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
- ps->dev->speed == USB_SPEED_HIGH)
- as->urb->interval = 1 << min(15, ep->desc.bInterval - 1);
- else
- as->urb->interval = ep->desc.bInterval;
+
+ if (ep->desc.bInterval) {
+ if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
+ ps->dev->speed == USB_SPEED_HIGH ||
+ ps->dev->speed >= USB_SPEED_SUPER)
+ as->urb->interval = 1 <<
+ min(15, ep->desc.bInterval - 1);
+ else
+ as->urb->interval = ep->desc.bInterval;
+ }
+
as->urb->context = as;
as->urb->complete = async_completed;
for (totlen = u = 0; u < number_of_packets; u++) {
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 9fae0291cd69..d64551243789 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -868,6 +868,7 @@ struct dwc2_hsotg {
void *priv;
int irq;
struct clk *clk;
+ struct reset_control *reset;
unsigned int queuing_high_bandwidth:1;
unsigned int srp_success:1;
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index fc6f5251de5d..530959a8a6d1 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -45,6 +45,7 @@
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_data/s3c-hsotg.h>
+#include <linux/reset.h>
#include <linux/usb/of.h>
@@ -337,6 +338,24 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
{
int i, ret;
+ hsotg->reset = devm_reset_control_get_optional(hsotg->dev, "dwc2");
+ if (IS_ERR(hsotg->reset)) {
+ ret = PTR_ERR(hsotg->reset);
+ switch (ret) {
+ case -ENOENT:
+ case -ENOTSUPP:
+ hsotg->reset = NULL;
+ break;
+ default:
+ dev_err(hsotg->dev, "error getting reset control %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (hsotg->reset)
+ reset_control_deassert(hsotg->reset);
+
/* Set default UTMI width */
hsotg->phyif = GUSBCFG_PHYIF16;
@@ -434,6 +453,9 @@ static int dwc2_driver_remove(struct platform_device *dev)
if (hsotg->ll_hw_enabled)
dwc2_lowlevel_hw_disable(hsotg);
+ if (hsotg->reset)
+ reset_control_assert(hsotg->reset);
+
return 0;
}
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 946643157b78..35d092456bec 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1192,6 +1192,7 @@ static int dwc3_runtime_resume(struct device *dev)
}
pm_runtime_mark_last_busy(dev);
+ pm_runtime_put(dev);
return 0;
}
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 22dfc3dd6a13..33ab2a203c1b 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -192,7 +192,7 @@ dwc3_ep_event_string(const struct dwc3_event_depevt *event)
int ret;
ret = sprintf(str, "ep%d%s: ", epnum >> 1,
- (epnum & 1) ? "in" : "in");
+ (epnum & 1) ? "in" : "out");
if (ret < 0)
return "UNKNOWN";
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 2eb84d6c24a6..6df0f5dad9a4 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -243,6 +243,15 @@ static int dwc3_pci_runtime_suspend(struct device *dev)
return -EBUSY;
}
+static int dwc3_pci_runtime_resume(struct device *dev)
+{
+ struct platform_device *dwc3 = dev_get_drvdata(dev);
+
+ return pm_runtime_get(&dwc3->dev);
+}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_SLEEP
static int dwc3_pci_pm_dummy(struct device *dev)
{
/*
@@ -255,11 +264,11 @@ static int dwc3_pci_pm_dummy(struct device *dev)
*/
return 0;
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
static struct dev_pm_ops dwc3_pci_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_pm_dummy, dwc3_pci_pm_dummy)
- SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_pm_dummy,
+ SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_runtime_resume,
NULL)
};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 1f5597ef945d..122e64df2f4d 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1433,7 +1433,7 @@ static int dwc3_gadget_get_frame(struct usb_gadget *g)
static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
{
- unsigned long timeout;
+ int retries;
int ret;
u32 reg;
@@ -1484,9 +1484,9 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
}
/* poll until Link State changes to ON */
- timeout = jiffies + msecs_to_jiffies(100);
+ retries = 20000;
- while (!time_after(jiffies, timeout)) {
+ while (retries--) {
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
/* in HS, means ON */
diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c
index d58bfc32be9e..007ec6e4a5d4 100644
--- a/drivers/usb/gadget/function/f_eem.c
+++ b/drivers/usb/gadget/function/f_eem.c
@@ -341,11 +341,15 @@ static struct sk_buff *eem_wrap(struct gether *port, struct sk_buff *skb)
{
struct sk_buff *skb2 = NULL;
struct usb_ep *in = port->in_ep;
- int padlen = 0;
- u16 len = skb->len;
+ int headroom, tailroom, padlen = 0;
+ u16 len;
- int headroom = skb_headroom(skb);
- int tailroom = skb_tailroom(skb);
+ if (!skb)
+ return NULL;
+
+ len = skb->len;
+ headroom = skb_headroom(skb);
+ tailroom = skb_tailroom(skb);
/* When (len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) is 0,
* stick two bytes of zero-length EEM packet on the end.
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index c8005823b190..16562e461121 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -374,6 +374,9 @@ static struct sk_buff *rndis_add_header(struct gether *port,
{
struct sk_buff *skb2;
+ if (!skb)
+ return NULL;
+
skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
rndis_add_hdr(skb2);
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 6ded6345cd09..e0cd1e4c8892 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -375,10 +375,15 @@ __acquires(&port->port_lock)
*/
{
struct list_head *pool = &port->write_pool;
- struct usb_ep *in = port->port_usb->in;
+ struct usb_ep *in;
int status = 0;
bool do_tty_wake = false;
+ if (!port->port_usb)
+ return status;
+
+ in = port->port_usb->in;
+
while (!port->write_busy && !list_empty(pool)) {
struct usb_request *req;
int len;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 934f83881c30..40c04bb25f2f 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -827,7 +827,7 @@ void usb_gadget_unmap_request_by_dev(struct device *dev,
return;
if (req->num_mapped_sgs) {
- dma_unmap_sg(dev, req->sg, req->num_mapped_sgs,
+ dma_unmap_sg(dev, req->sg, req->num_sgs,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
req->num_mapped_sgs = 0;
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index cf8819a5c5b2..8bb011ea78f7 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -1878,11 +1878,8 @@ static int qe_get_frame(struct usb_gadget *gadget)
tmp = in_be16(&udc->usb_param->frame_n);
if (tmp & 0x8000)
- tmp = tmp & 0x07ff;
- else
- tmp = -EINVAL;
-
- return (int)tmp;
+ return tmp & 0x07ff;
+ return -EINVAL;
}
static int fsl_qe_start(struct usb_gadget *gadget,
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 93a3bec81df7..fb8fc34827ab 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -106,6 +106,7 @@
/* DRD_CON */
#define DRD_CON_PERI_CON BIT(24)
+#define DRD_CON_VBOUT BIT(0)
/* USB_INT_ENA_1 and USB_INT_STA_1 */
#define USB_INT_1_B3_PLLWKUP BIT(31)
@@ -363,6 +364,7 @@ static void usb3_init_epc_registers(struct renesas_usb3 *usb3)
{
/* FIXME: How to change host / peripheral mode as well? */
usb3_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON);
+ usb3_clear_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON);
usb3_write(usb3, ~0, USB3_USB_INT_STA_1);
usb3_enable_irq_1(usb3, USB_INT_1_VBUS_CNG);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index fd9fd12e4861..797137e26549 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -850,6 +850,10 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
spin_lock_irqsave(&xhci->lock, flags);
ep->stop_cmds_pending--;
+ if (xhci->xhc_state & XHCI_STATE_REMOVING) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ return;
+ }
if (xhci->xhc_state & XHCI_STATE_DYING) {
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Stop EP timer ran, but another timer marked "
@@ -903,7 +907,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Calling usb_hc_died()");
- usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
+ usb_hc_died(xhci_to_hcd(xhci));
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"xHCI host controller is dead.");
}
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 886526b5fcdd..73cfa13fc0dc 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -87,7 +87,7 @@ config USB_MUSB_DA8XX
config USB_MUSB_TUSB6010
tristate "TUSB6010"
depends on HAS_IOMEM
- depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ depends on (ARCH_OMAP2PLUS || COMPILE_TEST) && !BLACKFIN
depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules
config USB_MUSB_OMAP2PLUS
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 192248f974ec..fe08e776fec3 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -290,6 +290,7 @@ int musb_hub_control(
u32 temp;
int retval = 0;
unsigned long flags;
+ bool start_musb = false;
spin_lock_irqsave(&musb->lock, flags);
@@ -390,7 +391,7 @@ int musb_hub_control(
* logic relating to VBUS power-up.
*/
if (!hcd->self.is_b_host && musb_has_gadget(musb))
- musb_start(musb);
+ start_musb = true;
break;
case USB_PORT_FEAT_RESET:
musb_port_reset(musb, true);
@@ -451,5 +452,9 @@ error:
retval = -EPIPE;
}
spin_unlock_irqrestore(&musb->lock, flags);
+
+ if (start_musb)
+ musb_start(musb);
+
return retval;
}
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 980c9dee09eb..427efb5eebae 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -144,14 +144,18 @@ static irqreturn_t nop_gpio_vbus_thread(int irq, void *data)
int usb_gen_phy_init(struct usb_phy *phy)
{
struct usb_phy_generic *nop = dev_get_drvdata(phy->dev);
+ int ret;
if (!IS_ERR(nop->vcc)) {
if (regulator_enable(nop->vcc))
dev_err(phy->dev, "Failed to enable power\n");
}
- if (!IS_ERR(nop->clk))
- clk_prepare_enable(nop->clk);
+ if (!IS_ERR(nop->clk)) {
+ ret = clk_prepare_enable(nop->clk);
+ if (ret)
+ return ret;
+ }
nop_reset(nop);
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index d4be5d594896..28965ef4f824 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -282,9 +282,16 @@ static irqreturn_t usbhs_interrupt(int irq, void *data)
if (usbhs_mod_is_host(priv))
usbhs_write(priv, INTSTS1, ~irq_state.intsts1 & INTSTS1_MAGIC);
- usbhs_write(priv, BRDYSTS, ~irq_state.brdysts);
+ /*
+ * The driver should not clear the xxxSTS after the line of
+ * "call irq callback functions" because each "if" statement is
+ * possible to call the callback function for avoiding any side effects.
+ */
+ if (irq_state.intsts0 & BRDY)
+ usbhs_write(priv, BRDYSTS, ~irq_state.brdysts);
usbhs_write(priv, NRDYSTS, ~irq_state.nrdysts);
- usbhs_write(priv, BEMPSTS, ~irq_state.bempsts);
+ if (irq_state.intsts0 & BEMP)
+ usbhs_write(priv, BEMPSTS, ~irq_state.bempsts);
/*
* call irq callback functions
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 92bc83b92d10..c4c64740a3e7 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -1076,7 +1076,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
dev_info(dev, "%stransceiver found\n",
- gpriv->transceiver ? "" : "no ");
+ !IS_ERR(gpriv->transceiver) ? "" : "no ");
/*
* CAUTION
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 5608af4a369d..de9992b492b0 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -1252,7 +1252,7 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port,
if (urb->transfer_buffer == NULL) {
urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!urb->transfer_buffer)
goto exit;
}
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index ed378fb232e7..57426d703a09 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1340,8 +1340,8 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
}
if (urb->transfer_buffer == NULL) {
- urb->transfer_buffer =
- kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL);
+ urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
+ GFP_ATOMIC);
if (!urb->transfer_buffer)
goto exit;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index bc472584a229..9894e341c6ac 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -525,6 +525,12 @@ static void option_instat_callback(struct urb *urb);
#define VIATELECOM_VENDOR_ID 0x15eb
#define VIATELECOM_PRODUCT_CDS7 0x0001
+/* WeTelecom products */
+#define WETELECOM_VENDOR_ID 0x22de
+#define WETELECOM_PRODUCT_WMD200 0x6801
+#define WETELECOM_PRODUCT_6802 0x6802
+#define WETELECOM_PRODUCT_WMD300 0x6803
+
struct option_blacklist_info {
/* bitmask of interface numbers blacklisted for send_setup */
const unsigned long sendsetup;
@@ -1991,6 +1997,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
+ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index a204782ae530..e98b6e57b703 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -54,7 +54,8 @@ DEVICE(funsoft, FUNSOFT_IDS);
/* Infineon Flashloader driver */
#define FLASHLOADER_IDS() \
{ USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \
- { USB_DEVICE(0x8087, 0x0716) }
+ { USB_DEVICE(0x8087, 0x0716) }, \
+ { USB_DEVICE(0x8087, 0x0801) }
DEVICE(flashloader, FLASHLOADER_IDS);
/* Google Serial USB SubClass */
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index e383ecdaca59..ed9c9eeedfe5 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -167,7 +167,7 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
* making all of the arch DMA ops work on the vring device itself
* is a mess. For now, we use the parent device for DMA ops.
*/
-struct device *vring_dma_dev(const struct vring_virtqueue *vq)
+static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
{
return vq->vq.vdev->dev.parent;
}
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 37608be52abd..59bdaa7527b6 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -18,6 +18,7 @@
struct socket *afs_socket; /* my RxRPC socket */
static struct workqueue_struct *afs_async_calls;
+static struct afs_call *afs_spare_incoming_call;
static atomic_t afs_outstanding_calls;
static void afs_free_call(struct afs_call *);
@@ -26,7 +27,8 @@ static int afs_wait_for_call_to_complete(struct afs_call *);
static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long);
static int afs_dont_wait_for_call_to_complete(struct afs_call *);
static void afs_process_async_call(struct work_struct *);
-static void afs_rx_new_call(struct sock *);
+static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long);
+static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long);
static int afs_deliver_cm_op_id(struct afs_call *);
/* synchronous call management */
@@ -53,9 +55,9 @@ static const struct afs_call_type afs_RXCMxxxx = {
.abort_to_error = afs_abort_to_error,
};
-static void afs_collect_incoming_call(struct work_struct *);
+static void afs_charge_preallocation(struct work_struct *);
-static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call);
+static DECLARE_WORK(afs_charge_preallocation_work, afs_charge_preallocation);
static int afs_wait_atomic_t(atomic_t *p)
{
@@ -100,13 +102,15 @@ int afs_open_socket(void)
if (ret < 0)
goto error_2;
- rxrpc_kernel_new_call_notification(socket, afs_rx_new_call);
+ rxrpc_kernel_new_call_notification(socket, afs_rx_new_call,
+ afs_rx_discard_new_call);
ret = kernel_listen(socket, INT_MAX);
if (ret < 0)
goto error_2;
afs_socket = socket;
+ afs_charge_preallocation(NULL);
_leave(" = 0");
return 0;
@@ -126,12 +130,20 @@ void afs_close_socket(void)
{
_enter("");
+ if (afs_spare_incoming_call) {
+ atomic_inc(&afs_outstanding_calls);
+ afs_free_call(afs_spare_incoming_call);
+ afs_spare_incoming_call = NULL;
+ }
+
_debug("outstanding %u", atomic_read(&afs_outstanding_calls));
wait_on_atomic_t(&afs_outstanding_calls, afs_wait_atomic_t,
TASK_UNINTERRUPTIBLE);
_debug("no outstanding calls");
flush_workqueue(afs_async_calls);
+ kernel_sock_shutdown(afs_socket, SHUT_RDWR);
+ flush_workqueue(afs_async_calls);
sock_release(afs_socket);
_debug("dework");
@@ -377,7 +389,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
return wait_mode->wait(call);
error_do_abort:
- rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT);
+ rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD");
error_kill_call:
afs_end_call(call);
_leave(" = %d", ret);
@@ -425,12 +437,12 @@ static void afs_deliver_to_call(struct afs_call *call)
case -ENOTCONN:
abort_code = RX_CALL_DEAD;
rxrpc_kernel_abort_call(afs_socket, call->rxcall,
- abort_code);
+ abort_code, -ret, "KNC");
goto do_abort;
case -ENOTSUPP:
abort_code = RX_INVALID_OPERATION;
rxrpc_kernel_abort_call(afs_socket, call->rxcall,
- abort_code);
+ abort_code, -ret, "KIV");
goto do_abort;
case -ENODATA:
case -EBADMSG:
@@ -440,7 +452,7 @@ static void afs_deliver_to_call(struct afs_call *call)
if (call->state != AFS_CALL_AWAIT_REPLY)
abort_code = RXGEN_SS_UNMARSHAL;
rxrpc_kernel_abort_call(afs_socket, call->rxcall,
- abort_code);
+ abort_code, EBADMSG, "KUM");
goto do_abort;
}
}
@@ -463,6 +475,7 @@ do_abort:
*/
static int afs_wait_for_call_to_complete(struct afs_call *call)
{
+ const char *abort_why;
int ret;
DECLARE_WAITQUEUE(myself, current);
@@ -481,9 +494,11 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
continue;
}
+ abort_why = "KWC";
ret = call->error;
if (call->state == AFS_CALL_COMPLETE)
break;
+ abort_why = "KWI";
ret = -EINTR;
if (signal_pending(current))
break;
@@ -497,7 +512,7 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
if (call->state < AFS_CALL_COMPLETE) {
_debug("call incomplete");
rxrpc_kernel_abort_call(afs_socket, call->rxcall,
- RX_CALL_DEAD);
+ RX_CALL_DEAD, -ret, abort_why);
}
_debug("call complete");
@@ -587,57 +602,65 @@ static void afs_process_async_call(struct work_struct *work)
_leave("");
}
+static void afs_rx_attach(struct rxrpc_call *rxcall, unsigned long user_call_ID)
+{
+ struct afs_call *call = (struct afs_call *)user_call_ID;
+
+ call->rxcall = rxcall;
+}
+
/*
- * accept the backlog of incoming calls
+ * Charge the incoming call preallocation.
*/
-static void afs_collect_incoming_call(struct work_struct *work)
+static void afs_charge_preallocation(struct work_struct *work)
{
- struct rxrpc_call *rxcall;
- struct afs_call *call = NULL;
-
- _enter("");
+ struct afs_call *call = afs_spare_incoming_call;
- do {
+ for (;;) {
if (!call) {
call = kzalloc(sizeof(struct afs_call), GFP_KERNEL);
- if (!call) {
- rxrpc_kernel_reject_call(afs_socket);
- return;
- }
+ if (!call)
+ break;
INIT_WORK(&call->async_work, afs_process_async_call);
call->wait_mode = &afs_async_incoming_call;
call->type = &afs_RXCMxxxx;
init_waitqueue_head(&call->waitq);
call->state = AFS_CALL_AWAIT_OP_ID;
-
- _debug("CALL %p{%s} [%d]",
- call, call->type->name,
- atomic_read(&afs_outstanding_calls));
- atomic_inc(&afs_outstanding_calls);
}
- rxcall = rxrpc_kernel_accept_call(afs_socket,
- (unsigned long)call,
- afs_wake_up_async_call);
- if (!IS_ERR(rxcall)) {
- call->rxcall = rxcall;
- call->need_attention = true;
- queue_work(afs_async_calls, &call->async_work);
- call = NULL;
- }
- } while (!call);
+ if (rxrpc_kernel_charge_accept(afs_socket,
+ afs_wake_up_async_call,
+ afs_rx_attach,
+ (unsigned long)call,
+ GFP_KERNEL) < 0)
+ break;
+ call = NULL;
+ }
+ afs_spare_incoming_call = call;
+}
- if (call)
- afs_free_call(call);
+/*
+ * Discard a preallocated call when a socket is shut down.
+ */
+static void afs_rx_discard_new_call(struct rxrpc_call *rxcall,
+ unsigned long user_call_ID)
+{
+ struct afs_call *call = (struct afs_call *)user_call_ID;
+
+ atomic_inc(&afs_outstanding_calls);
+ call->rxcall = NULL;
+ afs_free_call(call);
}
/*
* Notification of an incoming call.
*/
-static void afs_rx_new_call(struct sock *sk)
+static void afs_rx_new_call(struct sock *sk, struct rxrpc_call *rxcall,
+ unsigned long user_call_ID)
{
- queue_work(afs_wq, &afs_collect_incoming_call_work);
+ atomic_inc(&afs_outstanding_calls);
+ queue_work(afs_wq, &afs_charge_preallocation_work);
}
/*
@@ -695,7 +718,7 @@ void afs_send_empty_reply(struct afs_call *call)
case -ENOMEM:
_debug("oom");
rxrpc_kernel_abort_call(afs_socket, call->rxcall,
- RX_USER_ABORT);
+ RX_USER_ABORT, ENOMEM, "KOO");
default:
afs_end_call(call);
_leave(" [error]");
@@ -734,7 +757,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
if (n == -ENOMEM) {
_debug("oom");
rxrpc_kernel_abort_call(afs_socket, call->rxcall,
- RX_USER_ABORT);
+ RX_USER_ABORT, ENOMEM, "KOO");
}
afs_end_call(call);
_leave(" [error]");
diff --git a/fs/aio.c b/fs/aio.c
index fb8e45b88cd4..4fe81d1c60f9 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -239,7 +239,12 @@ static struct dentry *aio_mount(struct file_system_type *fs_type,
static const struct dentry_operations ops = {
.d_dname = simple_dname,
};
- return mount_pseudo(fs_type, "aio:", NULL, &ops, AIO_RING_MAGIC);
+ struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, &ops,
+ AIO_RING_MAGIC);
+
+ if (!IS_ERR(root))
+ root->d_sb->s_iflags |= SB_I_NOEXEC;
+ return root;
}
/* aio_setup
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index b493909e7492..d8e6d421c27f 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -417,6 +417,7 @@ static struct dentry *should_expire(struct dentry *dentry,
}
return NULL;
}
+
/*
* Find an eligible tree to time-out
* A tree is eligible if :-
@@ -432,6 +433,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
struct dentry *root = sb->s_root;
struct dentry *dentry;
struct dentry *expired;
+ struct dentry *found;
struct autofs_info *ino;
if (!root)
@@ -442,31 +444,46 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
dentry = NULL;
while ((dentry = get_next_positive_subdir(dentry, root))) {
+ int flags = how;
+
spin_lock(&sbi->fs_lock);
ino = autofs4_dentry_ino(dentry);
- if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
- expired = NULL;
- else
- expired = should_expire(dentry, mnt, timeout, how);
- if (!expired) {
+ if (ino->flags & AUTOFS_INF_WANT_EXPIRE) {
spin_unlock(&sbi->fs_lock);
continue;
}
+ spin_unlock(&sbi->fs_lock);
+
+ expired = should_expire(dentry, mnt, timeout, flags);
+ if (!expired)
+ continue;
+
+ spin_lock(&sbi->fs_lock);
ino = autofs4_dentry_ino(expired);
ino->flags |= AUTOFS_INF_WANT_EXPIRE;
spin_unlock(&sbi->fs_lock);
synchronize_rcu();
- spin_lock(&sbi->fs_lock);
- if (should_expire(expired, mnt, timeout, how)) {
- if (expired != dentry)
- dput(dentry);
- goto found;
- }
+ /* Make sure a reference is not taken on found if
+ * things have changed.
+ */
+ flags &= ~AUTOFS_EXP_LEAVES;
+ found = should_expire(expired, mnt, timeout, how);
+ if (!found || found != expired)
+ /* Something has changed, continue */
+ goto next;
+
+ if (expired != dentry)
+ dput(dentry);
+
+ spin_lock(&sbi->fs_lock);
+ goto found;
+next:
+ spin_lock(&sbi->fs_lock);
ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
+ spin_unlock(&sbi->fs_lock);
if (expired != dentry)
dput(expired);
- spin_unlock(&sbi->fs_lock);
}
return NULL;
@@ -483,6 +500,7 @@ int autofs4_expire_wait(struct dentry *dentry, int rcu_walk)
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
struct autofs_info *ino = autofs4_dentry_ino(dentry);
int status;
+ int state;
/* Block on any pending expire */
if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE))
@@ -490,8 +508,19 @@ int autofs4_expire_wait(struct dentry *dentry, int rcu_walk)
if (rcu_walk)
return -ECHILD;
+retry:
spin_lock(&sbi->fs_lock);
- if (ino->flags & AUTOFS_INF_EXPIRING) {
+ state = ino->flags & (AUTOFS_INF_WANT_EXPIRE | AUTOFS_INF_EXPIRING);
+ if (state == AUTOFS_INF_WANT_EXPIRE) {
+ spin_unlock(&sbi->fs_lock);
+ /*
+ * Possibly being selected for expire, wait until
+ * it's selected or not.
+ */
+ schedule_timeout_uninterruptible(HZ/10);
+ goto retry;
+ }
+ if (state & AUTOFS_INF_EXPIRING) {
spin_unlock(&sbi->fs_lock);
pr_debug("waiting for expire %p name=%pd\n", dentry, dentry);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 7f6aff3f72eb..e5495f37c6ed 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -853,6 +853,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
current->flags |= PF_RANDOMIZE;
setup_new_exec(bprm);
+ install_exec_creds(bprm);
/* Do this so that we can load the interpreter, if need be. We will
change some of these later */
@@ -1044,7 +1045,6 @@ static int load_elf_binary(struct linux_binprm *bprm)
goto out;
#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
- install_exec_creds(bprm);
retval = create_elf_tables(bprm, &loc->elf_ex,
load_addr, interp_load_addr);
if (retval < 0)
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index eff3993c77b3..33fe03551105 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -427,6 +427,7 @@ struct btrfs_space_info {
struct list_head ro_bgs;
struct list_head priority_tickets;
struct list_head tickets;
+ u64 tickets_id;
struct rw_semaphore groups_sem;
/* for block groups in our same type */
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 0450dc410533..38c2df84cabd 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4901,11 +4901,6 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
u64 expected;
u64 to_reclaim = 0;
- to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
- if (can_overcommit(root, space_info, to_reclaim,
- BTRFS_RESERVE_FLUSH_ALL))
- return 0;
-
list_for_each_entry(ticket, &space_info->tickets, list)
to_reclaim += ticket->bytes;
list_for_each_entry(ticket, &space_info->priority_tickets, list)
@@ -4913,6 +4908,11 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
if (to_reclaim)
return to_reclaim;
+ to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
+ if (can_overcommit(root, space_info, to_reclaim,
+ BTRFS_RESERVE_FLUSH_ALL))
+ return 0;
+
used = space_info->bytes_used + space_info->bytes_reserved +
space_info->bytes_pinned + space_info->bytes_readonly +
space_info->bytes_may_use;
@@ -4966,12 +4966,12 @@ static void wake_all_tickets(struct list_head *head)
*/
static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
{
- struct reserve_ticket *last_ticket = NULL;
struct btrfs_fs_info *fs_info;
struct btrfs_space_info *space_info;
u64 to_reclaim;
int flush_state;
int commit_cycles = 0;
+ u64 last_tickets_id;
fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
@@ -4984,8 +4984,7 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
spin_unlock(&space_info->lock);
return;
}
- last_ticket = list_first_entry(&space_info->tickets,
- struct reserve_ticket, list);
+ last_tickets_id = space_info->tickets_id;
spin_unlock(&space_info->lock);
flush_state = FLUSH_DELAYED_ITEMS_NR;
@@ -5005,10 +5004,10 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
space_info);
ticket = list_first_entry(&space_info->tickets,
struct reserve_ticket, list);
- if (last_ticket == ticket) {
+ if (last_tickets_id == space_info->tickets_id) {
flush_state++;
} else {
- last_ticket = ticket;
+ last_tickets_id = space_info->tickets_id;
flush_state = FLUSH_DELAYED_ITEMS_NR;
if (commit_cycles)
commit_cycles--;
@@ -5384,6 +5383,7 @@ again:
list_del_init(&ticket->list);
num_bytes -= ticket->bytes;
ticket->bytes = 0;
+ space_info->tickets_id++;
wake_up(&ticket->wait);
} else {
ticket->bytes -= num_bytes;
@@ -5426,6 +5426,7 @@ again:
num_bytes -= ticket->bytes;
space_info->bytes_may_use += ticket->bytes;
ticket->bytes = 0;
+ space_info->tickets_id++;
wake_up(&ticket->wait);
} else {
trace_btrfs_space_reservation(fs_info, "space_info",
@@ -8216,6 +8217,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
{
int ret;
struct btrfs_block_group_cache *block_group;
+ struct btrfs_space_info *space_info;
/*
* Mixed block groups will exclude before processing the log so we only
@@ -8231,9 +8233,14 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
if (!block_group)
return -EINVAL;
- ret = btrfs_add_reserved_bytes(block_group, ins->offset,
- ins->offset, 0);
- BUG_ON(ret); /* logic error */
+ space_info = block_group->space_info;
+ spin_lock(&space_info->lock);
+ spin_lock(&block_group->lock);
+ space_info->bytes_reserved += ins->offset;
+ block_group->reserved += ins->offset;
+ spin_unlock(&block_group->lock);
+ spin_unlock(&space_info->lock);
+
ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
0, owner, offset, ins, 1);
btrfs_put_block_group(block_group);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 8a2c2a07987b..c0c13dc6fe12 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -4200,9 +4200,11 @@ restart:
err = PTR_ERR(trans);
goto out_free;
}
- err = qgroup_fix_relocated_data_extents(trans, rc);
- if (err < 0) {
- btrfs_abort_transaction(trans, err);
+ ret = qgroup_fix_relocated_data_extents(trans, rc);
+ if (ret < 0) {
+ btrfs_abort_transaction(trans, ret);
+ if (!err)
+ err = ret;
goto out_free;
}
btrfs_commit_transaction(trans, rc->extent_root);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index efe129fe2678..a87675ffd02b 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4268,10 +4268,12 @@ static int process_all_refs(struct send_ctx *sctx,
}
btrfs_release_path(path);
+ /*
+ * We don't actually care about pending_move as we are simply
+ * re-creating this inode and will be rename'ing it into place once we
+ * rename the parent directory.
+ */
ret = process_recorded_refs(sctx, &pending_move);
- /* Only applicable to an incremental send. */
- ASSERT(pending_move == 0);
-
out:
btrfs_free_path(path);
return ret;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index e935035ac034..ef9c55bc7907 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2867,6 +2867,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
blk_finish_plug(&plug);
+ list_del_init(&root_log_ctx.list);
mutex_unlock(&log_root_tree->log_mutex);
ret = root_log_ctx.log_ret;
goto out;
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index c64a0b794d49..df4b3e6fa563 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -597,7 +597,7 @@ static bool need_reset_readdir(struct ceph_file_info *fi, loff_t new_pos)
if (is_hash_order(new_pos)) {
/* no need to reset last_name for a forward seek when
* dentries are sotred in hash order */
- } else if (fi->frag |= fpos_frag(new_pos)) {
+ } else if (fi->frag != fpos_frag(new_pos)) {
return true;
}
rinfo = fi->last_readdir ? &fi->last_readdir->r_reply_info : NULL;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 6bbec5e784cd..14ae4b8e1a3c 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -609,6 +609,9 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
char *s, *p;
char sep;
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
+ return dget(sb->s_root);
+
full_path = cifs_build_path_to_root(vol, cifs_sb,
cifs_sb_master_tcon(cifs_sb));
if (full_path == NULL)
@@ -686,26 +689,22 @@ cifs_do_mount(struct file_system_type *fs_type,
cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
if (cifs_sb->mountdata == NULL) {
root = ERR_PTR(-ENOMEM);
- goto out_cifs_sb;
+ goto out_free;
}
- if (volume_info->prepath) {
- cifs_sb->prepath = kstrdup(volume_info->prepath, GFP_KERNEL);
- if (cifs_sb->prepath == NULL) {
- root = ERR_PTR(-ENOMEM);
- goto out_cifs_sb;
- }
+ rc = cifs_setup_cifs_sb(volume_info, cifs_sb);
+ if (rc) {
+ root = ERR_PTR(rc);
+ goto out_free;
}
- cifs_setup_cifs_sb(volume_info, cifs_sb);
-
rc = cifs_mount(cifs_sb, volume_info);
if (rc) {
if (!(flags & MS_SILENT))
cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
rc);
root = ERR_PTR(rc);
- goto out_mountdata;
+ goto out_free;
}
mnt_data.vol = volume_info;
@@ -735,11 +734,7 @@ cifs_do_mount(struct file_system_type *fs_type,
sb->s_flags |= MS_ACTIVE;
}
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
- root = dget(sb->s_root);
- else
- root = cifs_get_root(volume_info, sb);
-
+ root = cifs_get_root(volume_info, sb);
if (IS_ERR(root))
goto out_super;
@@ -752,9 +747,9 @@ out:
cifs_cleanup_volume_info(volume_info);
return root;
-out_mountdata:
+out_free:
+ kfree(cifs_sb->prepath);
kfree(cifs_sb->mountdata);
-out_cifs_sb:
kfree(cifs_sb);
out_nls:
unload_nls(volume_info->local_nls);
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 1243bd326591..95dab43646f0 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -184,7 +184,7 @@ extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
unsigned int to_read);
extern int cifs_read_page_from_socket(struct TCP_Server_Info *server,
struct page *page, unsigned int to_read);
-extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+extern int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
struct cifs_sb_info *cifs_sb);
extern int cifs_match_super(struct super_block *, void *);
extern void cifs_cleanup_volume_info(struct smb_vol *pvolume_info);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 7ae03283bd61..2e4f4bad8b1e 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2781,6 +2781,24 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
return 1;
}
+static int
+match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
+{
+ struct cifs_sb_info *old = CIFS_SB(sb);
+ struct cifs_sb_info *new = mnt_data->cifs_sb;
+
+ if (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) {
+ if (!(new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH))
+ return 0;
+ /* The prepath should be null terminated strings */
+ if (strcmp(new->prepath, old->prepath))
+ return 0;
+
+ return 1;
+ }
+ return 0;
+}
+
int
cifs_match_super(struct super_block *sb, void *data)
{
@@ -2808,7 +2826,8 @@ cifs_match_super(struct super_block *sb, void *data)
if (!match_server(tcp_srv, volume_info) ||
!match_session(ses, volume_info) ||
- !match_tcon(tcon, volume_info->UNC)) {
+ !match_tcon(tcon, volume_info->UNC) ||
+ !match_prepath(sb, mnt_data)) {
rc = 0;
goto out;
}
@@ -3222,7 +3241,7 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
}
}
-void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
struct cifs_sb_info *cifs_sb)
{
INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
@@ -3316,6 +3335,14 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm))
cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n");
+
+ if (pvolume_info->prepath) {
+ cifs_sb->prepath = kstrdup(pvolume_info->prepath, GFP_KERNEL);
+ if (cifs_sb->prepath == NULL)
+ return -ENOMEM;
+ }
+
+ return 0;
}
static void
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index 0f9961eede1e..ed115acb5dee 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -11,6 +11,7 @@
#include <linux/random.h>
#include <linux/string.h>
#include <linux/fscrypto.h>
+#include <linux/mount.h>
static int inode_has_encryption_context(struct inode *inode)
{
@@ -92,26 +93,42 @@ static int create_encryption_context_from_policy(struct inode *inode,
return inode->i_sb->s_cop->set_context(inode, &ctx, sizeof(ctx), NULL);
}
-int fscrypt_process_policy(struct inode *inode,
+int fscrypt_process_policy(struct file *filp,
const struct fscrypt_policy *policy)
{
+ struct inode *inode = file_inode(filp);
+ int ret;
+
+ if (!inode_owner_or_capable(inode))
+ return -EACCES;
+
if (policy->version != 0)
return -EINVAL;
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
if (!inode_has_encryption_context(inode)) {
- if (!inode->i_sb->s_cop->empty_dir)
- return -EOPNOTSUPP;
- if (!inode->i_sb->s_cop->empty_dir(inode))
- return -ENOTEMPTY;
- return create_encryption_context_from_policy(inode, policy);
+ if (!S_ISDIR(inode->i_mode))
+ ret = -EINVAL;
+ else if (!inode->i_sb->s_cop->empty_dir)
+ ret = -EOPNOTSUPP;
+ else if (!inode->i_sb->s_cop->empty_dir(inode))
+ ret = -ENOTEMPTY;
+ else
+ ret = create_encryption_context_from_policy(inode,
+ policy);
+ } else if (!is_encryption_context_consistent_with_policy(inode,
+ policy)) {
+ printk(KERN_WARNING
+ "%s: Policy inconsistent with encryption context\n",
+ __func__);
+ ret = -EINVAL;
}
- if (is_encryption_context_consistent_with_policy(inode, policy))
- return 0;
-
- printk(KERN_WARNING "%s: Policy inconsistent with encryption context\n",
- __func__);
- return -EINVAL;
+ mnt_drop_write_file(filp);
+ return ret;
}
EXPORT_SYMBOL(fscrypt_process_policy);
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index d116453b0276..79a5941c2474 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -585,7 +585,8 @@ struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
*/
void *devpts_get_priv(struct dentry *dentry)
{
- WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC);
+ if (dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC)
+ return NULL;
return dentry->d_fsdata;
}
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 10686fd67fb4..1bb7df5e4536 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -776,7 +776,7 @@ resizefs_out:
(struct fscrypt_policy __user *)arg,
sizeof(policy)))
return -EFAULT;
- return fscrypt_process_policy(inode, &policy);
+ return fscrypt_process_policy(filp, &policy);
#else
return -EOPNOTSUPP;
#endif
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 47abb96098e4..28f4f4cbb8d8 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1757,21 +1757,14 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
{
struct fscrypt_policy policy;
struct inode *inode = file_inode(filp);
- int ret;
if (copy_from_user(&policy, (struct fscrypt_policy __user *)arg,
sizeof(policy)))
return -EFAULT;
- ret = mnt_want_write_file(filp);
- if (ret)
- return ret;
-
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
- ret = fscrypt_process_policy(inode, &policy);
- mnt_drop_write_file(filp);
- return ret;
+ return fscrypt_process_policy(filp, &policy);
}
static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index f394aff59c36..3988b43c2f5a 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -530,13 +530,13 @@ void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
req->out.args[0].size = count;
}
-static void fuse_release_user_pages(struct fuse_req *req, int write)
+static void fuse_release_user_pages(struct fuse_req *req, bool should_dirty)
{
unsigned i;
for (i = 0; i < req->num_pages; i++) {
struct page *page = req->pages[i];
- if (write)
+ if (should_dirty)
set_page_dirty_lock(page);
put_page(page);
}
@@ -1320,6 +1320,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
loff_t *ppos, int flags)
{
int write = flags & FUSE_DIO_WRITE;
+ bool should_dirty = !write && iter_is_iovec(iter);
int cuse = flags & FUSE_DIO_CUSE;
struct file *file = io->file;
struct inode *inode = file->f_mapping->host;
@@ -1363,7 +1364,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
nres = fuse_send_read(req, io, pos, nbytes, owner);
if (!io->async)
- fuse_release_user_pages(req, !write);
+ fuse_release_user_pages(req, should_dirty);
if (req->out.h.error) {
err = req->out.h.error;
break;
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 0f56deb24ce6..c415668c86d4 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -568,7 +568,7 @@ static int ioctl_fsthaw(struct file *filp)
return thaw_super(sb);
}
-static long ioctl_file_dedupe_range(struct file *file, void __user *arg)
+static int ioctl_file_dedupe_range(struct file *file, void __user *arg)
{
struct file_dedupe_range __user *argp = arg;
struct file_dedupe_range *same = NULL;
@@ -582,6 +582,10 @@ static long ioctl_file_dedupe_range(struct file *file, void __user *arg)
}
size = offsetof(struct file_dedupe_range __user, info[count]);
+ if (size > PAGE_SIZE) {
+ ret = -ENOMEM;
+ goto out;
+ }
same = memdup_user(argp, size);
if (IS_ERR(same)) {
diff --git a/fs/iomap.c b/fs/iomap.c
index 0342254646e3..706270f21b35 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -428,9 +428,12 @@ static int iomap_to_fiemap(struct fiemap_extent_info *fi,
break;
}
+ if (iomap->flags & IOMAP_F_MERGED)
+ flags |= FIEMAP_EXTENT_MERGED;
+
return fiemap_fill_next_extent(fi, iomap->offset,
iomap->blkno != IOMAP_NULL_BLOCK ? iomap->blkno << 9: 0,
- iomap->length, flags | FIEMAP_EXTENT_MERGED);
+ iomap->length, flags);
}
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index e1574008adc9..2bcb86e6e6ca 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -840,21 +840,35 @@ repeat:
mutex_lock(&kernfs_mutex);
list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
+ struct kernfs_node *parent;
struct inode *inode;
- struct dentry *dentry;
+ /*
+ * We want fsnotify_modify() on @kn but as the
+ * modifications aren't originating from userland don't
+ * have the matching @file available. Look up the inodes
+ * and generate the events manually.
+ */
inode = ilookup(info->sb, kn->ino);
if (!inode)
continue;
- dentry = d_find_any_alias(inode);
- if (dentry) {
- fsnotify_parent(NULL, dentry, FS_MODIFY);
- fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE,
- NULL, 0);
- dput(dentry);
+ parent = kernfs_get_parent(kn);
+ if (parent) {
+ struct inode *p_inode;
+
+ p_inode = ilookup(info->sb, parent->ino);
+ if (p_inode) {
+ fsnotify(p_inode, FS_MODIFY | FS_EVENT_ON_CHILD,
+ inode, FSNOTIFY_EVENT_INODE, kn->name, 0);
+ iput(p_inode);
+ }
+
+ kernfs_put(parent);
}
+ fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE,
+ kn->name, 0);
iput(inode);
}
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index f55a4e756047..217847679f0e 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -346,7 +346,7 @@ static void bl_write_cleanup(struct work_struct *work)
PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
- (end - start) >> SECTOR_SHIFT);
+ (end - start) >> SECTOR_SHIFT, end);
}
pnfs_ld_write_done(hdr);
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
index 18e6fd0b9506..efc007f00742 100644
--- a/fs/nfs/blocklayout/blocklayout.h
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -141,6 +141,7 @@ struct pnfs_block_layout {
struct rb_root bl_ext_ro;
spinlock_t bl_ext_lock; /* Protects list manipulation */
bool bl_scsi_layout;
+ u64 bl_lwb;
};
static inline struct pnfs_block_layout *
@@ -182,7 +183,7 @@ int ext_tree_insert(struct pnfs_block_layout *bl,
int ext_tree_remove(struct pnfs_block_layout *bl, bool rw, sector_t start,
sector_t end);
int ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start,
- sector_t len);
+ sector_t len, u64 lwb);
bool ext_tree_lookup(struct pnfs_block_layout *bl, sector_t isect,
struct pnfs_block_extent *ret, bool rw);
int ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg);
diff --git a/fs/nfs/blocklayout/extent_tree.c b/fs/nfs/blocklayout/extent_tree.c
index 992bcb19c11e..c85fbfd2d0d9 100644
--- a/fs/nfs/blocklayout/extent_tree.c
+++ b/fs/nfs/blocklayout/extent_tree.c
@@ -402,7 +402,7 @@ ext_tree_split(struct rb_root *root, struct pnfs_block_extent *be,
int
ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start,
- sector_t len)
+ sector_t len, u64 lwb)
{
struct rb_root *root = &bl->bl_ext_rw;
sector_t end = start + len;
@@ -471,6 +471,8 @@ ext_tree_mark_written(struct pnfs_block_layout *bl, sector_t start,
}
}
out:
+ if (bl->bl_lwb < lwb)
+ bl->bl_lwb = lwb;
spin_unlock(&bl->bl_ext_lock);
__ext_put_deviceids(&tmp);
@@ -518,7 +520,7 @@ static __be32 *encode_scsi_range(struct pnfs_block_extent *be, __be32 *p)
}
static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
- size_t buffer_size, size_t *count)
+ size_t buffer_size, size_t *count, __u64 *lastbyte)
{
struct pnfs_block_extent *be;
int ret = 0;
@@ -542,6 +544,8 @@ static int ext_tree_encode_commit(struct pnfs_block_layout *bl, __be32 *p,
p = encode_block_extent(be, p);
be->be_tag = EXTENT_COMMITTING;
}
+ *lastbyte = bl->bl_lwb - 1;
+ bl->bl_lwb = 0;
spin_unlock(&bl->bl_ext_lock);
return ret;
@@ -564,7 +568,7 @@ ext_tree_prepare_commit(struct nfs4_layoutcommit_args *arg)
arg->layoutupdate_pages = &arg->layoutupdate_page;
retry:
- ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, &count);
+ ret = ext_tree_encode_commit(bl, start_p + 1, buffer_size, &count, &arg->lastbytewritten);
if (unlikely(ret)) {
ext_tree_free_commitdata(arg, buffer_size);
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index a7f2e6e33305..52a28311e2a4 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -275,6 +275,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
err_socks:
svc_rpcb_cleanup(serv, net);
err_bind:
+ nn->cb_users[minorversion]--;
dprintk("NFS: Couldn't create callback socket: err = %d; "
"net = %p\n", ret, net);
return ret;
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index c92a75e066a6..f953ef6b2f2e 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -454,11 +454,8 @@ static bool referring_call_exists(struct nfs_client *clp,
((u32 *)&rclist->rcl_sessionid.data)[3],
ref->rc_sequenceid, ref->rc_slotid);
- spin_lock(&tbl->slot_tbl_lock);
- status = (test_bit(ref->rc_slotid, tbl->used_slots) &&
- tbl->slots[ref->rc_slotid].seq_nr ==
- ref->rc_sequenceid);
- spin_unlock(&tbl->slot_tbl_lock);
+ status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
+ ref->rc_sequenceid, HZ >> 1) < 0;
if (status)
goto out;
}
@@ -487,7 +484,6 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
goto out;
tbl = &clp->cl_session->bc_slot_table;
- slot = tbl->slots + args->csa_slotid;
/* Set up res before grabbing the spinlock */
memcpy(&res->csr_sessionid, &args->csa_sessionid,
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 003ebce4bbc4..1e106780a237 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -426,7 +426,7 @@ EXPORT_SYMBOL_GPL(nfs_mark_client_ready);
* Initialise the timeout values for a connection
*/
void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
- unsigned int timeo, unsigned int retrans)
+ int timeo, int retrans)
{
to->to_initval = timeo * HZ / 10;
to->to_retries = retrans;
@@ -434,9 +434,9 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
switch (proto) {
case XPRT_TRANSPORT_TCP:
case XPRT_TRANSPORT_RDMA:
- if (to->to_retries == 0)
+ if (retrans == NFS_UNSPEC_RETRANS)
to->to_retries = NFS_DEF_TCP_RETRANS;
- if (to->to_initval == 0)
+ if (timeo == NFS_UNSPEC_TIMEO || to->to_retries == 0)
to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10;
if (to->to_initval > NFS_MAX_TCP_TIMEOUT)
to->to_initval = NFS_MAX_TCP_TIMEOUT;
@@ -449,9 +449,9 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
to->to_exponential = 0;
break;
case XPRT_TRANSPORT_UDP:
- if (to->to_retries == 0)
+ if (retrans == NFS_UNSPEC_RETRANS)
to->to_retries = NFS_DEF_UDP_RETRANS;
- if (!to->to_initval)
+ if (timeo == NFS_UNSPEC_TIMEO || to->to_initval == 0)
to->to_initval = NFS_DEF_UDP_TIMEO * HZ / 10;
if (to->to_initval > NFS_MAX_UDP_TIMEOUT)
to->to_initval = NFS_MAX_UDP_TIMEOUT;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 7d620970f2e1..ca699ddc11c1 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -657,7 +657,10 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
if (result <= 0)
goto out;
- written = generic_write_sync(iocb, result);
+ result = generic_write_sync(iocb, result);
+ if (result < 0)
+ goto out;
+ written = result;
iocb->ki_pos += written;
/* Return error values */
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index e6206eaf2bdf..51b51369704c 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -37,6 +37,7 @@ ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
if (ffl) {
INIT_LIST_HEAD(&ffl->error_list);
INIT_LIST_HEAD(&ffl->mirrors);
+ ffl->last_report_time = ktime_get();
return &ffl->generic_hdr;
} else
return NULL;
@@ -640,19 +641,18 @@ nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
{
static const ktime_t notime = {0};
s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
+ struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
if (ktime_equal(mirror->start_time, notime))
mirror->start_time = now;
- if (ktime_equal(mirror->last_report_time, notime))
- mirror->last_report_time = now;
if (mirror->report_interval != 0)
report_interval = (s64)mirror->report_interval * 1000LL;
else if (layoutstats_timer != 0)
report_interval = (s64)layoutstats_timer * 1000LL;
- if (ktime_to_ms(ktime_sub(now, mirror->last_report_time)) >=
+ if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
report_interval) {
- mirror->last_report_time = now;
+ ffl->last_report_time = now;
return true;
}
@@ -806,11 +806,14 @@ ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
{
struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
struct nfs4_pnfs_ds *ds;
+ bool fail_return = false;
int idx;
/* mirrors are sorted by efficiency */
for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
- ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
+ if (idx+1 == fls->mirror_array_cnt)
+ fail_return = true;
+ ds = nfs4_ff_layout_prepare_ds(lseg, idx, fail_return);
if (ds) {
*best_idx = idx;
return ds;
@@ -859,6 +862,7 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
struct nfs4_pnfs_ds *ds;
int ds_idx;
+retry:
/* Use full layout for now */
if (!pgio->pg_lseg)
ff_layout_pg_get_read(pgio, req, false);
@@ -871,10 +875,13 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
if (!ds) {
- if (ff_layout_no_fallback_to_mds(pgio->pg_lseg))
- goto out_pnfs;
- else
+ if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
goto out_mds;
+ pnfs_put_lseg(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+ /* Sleep for 1 second before retrying */
+ ssleep(1);
+ goto retry;
}
mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
@@ -890,12 +897,6 @@ out_mds:
pnfs_put_lseg(pgio->pg_lseg);
pgio->pg_lseg = NULL;
nfs_pageio_reset_read_mds(pgio);
- return;
-
-out_pnfs:
- pnfs_set_lo_fail(pgio->pg_lseg);
- pnfs_put_lseg(pgio->pg_lseg);
- pgio->pg_lseg = NULL;
}
static void
@@ -909,6 +910,7 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
int i;
int status;
+retry:
if (!pgio->pg_lseg) {
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
@@ -940,10 +942,13 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
for (i = 0; i < pgio->pg_mirror_count; i++) {
ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
if (!ds) {
- if (ff_layout_no_fallback_to_mds(pgio->pg_lseg))
- goto out_pnfs;
- else
+ if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
goto out_mds;
+ pnfs_put_lseg(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+ /* Sleep for 1 second before retrying */
+ ssleep(1);
+ goto retry;
}
pgm = &pgio->pg_mirrors[i];
mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
@@ -956,12 +961,6 @@ out_mds:
pnfs_put_lseg(pgio->pg_lseg);
pgio->pg_lseg = NULL;
nfs_pageio_reset_write_mds(pgio);
- return;
-
-out_pnfs:
- pnfs_set_lo_fail(pgio->pg_lseg);
- pnfs_put_lseg(pgio->pg_lseg);
- pgio->pg_lseg = NULL;
}
static unsigned int
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
index 1bcdb15d0c41..3ee0c9fcea76 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.h
+++ b/fs/nfs/flexfilelayout/flexfilelayout.h
@@ -84,7 +84,6 @@ struct nfs4_ff_layout_mirror {
struct nfs4_ff_layoutstat read_stat;
struct nfs4_ff_layoutstat write_stat;
ktime_t start_time;
- ktime_t last_report_time;
u32 report_interval;
};
@@ -101,6 +100,7 @@ struct nfs4_flexfile_layout {
struct pnfs_ds_commit_info commit_info;
struct list_head mirrors;
struct list_head error_list; /* nfs4_ff_layout_ds_err */
+ ktime_t last_report_time; /* Layoutstat report times */
};
static inline struct nfs4_flexfile_layout *
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index 0aa36be71fce..f7a3f6b05369 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -17,8 +17,8 @@
#define NFSDBG_FACILITY NFSDBG_PNFS_LD
-static unsigned int dataserver_timeo = NFS4_DEF_DS_TIMEO;
-static unsigned int dataserver_retrans = NFS4_DEF_DS_RETRANS;
+static unsigned int dataserver_timeo = NFS_DEF_TCP_RETRANS;
+static unsigned int dataserver_retrans;
void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
{
@@ -379,7 +379,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
devid = &mirror->mirror_ds->id_node;
if (ff_layout_test_devid_unavailable(devid))
- goto out;
+ goto out_fail;
ds = mirror->mirror_ds->ds;
/* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
@@ -405,15 +405,16 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
mirror->mirror_ds->ds_versions[0].rsize = max_payload;
if (mirror->mirror_ds->ds_versions[0].wsize > max_payload)
mirror->mirror_ds->ds_versions[0].wsize = max_payload;
- } else {
- ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
- mirror, lseg->pls_range.offset,
- lseg->pls_range.length, NFS4ERR_NXIO,
- OP_ILLEGAL, GFP_NOIO);
- if (fail_return || !ff_layout_has_available_ds(lseg))
- pnfs_error_mark_layout_for_return(ino, lseg);
- ds = NULL;
+ goto out;
}
+ ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
+ mirror, lseg->pls_range.offset,
+ lseg->pls_range.length, NFS4ERR_NXIO,
+ OP_ILLEGAL, GFP_NOIO);
+out_fail:
+ if (fail_return || !ff_layout_has_available_ds(lseg))
+ pnfs_error_mark_layout_for_return(ino, lseg);
+ ds = NULL;
out:
return ds;
}
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 7ce5e023c3c3..74935a19e4bf 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -58,6 +58,9 @@ struct nfs_clone_mount {
*/
#define NFS_UNSPEC_PORT (-1)
+#define NFS_UNSPEC_RETRANS (UINT_MAX)
+#define NFS_UNSPEC_TIMEO (UINT_MAX)
+
/*
* Maximum number of pages that readdir can use for creating
* a vmapped array of pages.
@@ -156,7 +159,7 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *,
int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *, struct nfs_fattr *);
void nfs_server_insert_lists(struct nfs_server *);
void nfs_server_remove_lists(struct nfs_server *);
-void nfs_init_timeout_values(struct rpc_timeout *, int, unsigned int, unsigned int);
+void nfs_init_timeout_values(struct rpc_timeout *to, int proto, int timeo, int retrans);
int nfs_init_server_rpcclient(struct nfs_server *, const struct rpc_timeout *t,
rpc_authflavor_t);
struct nfs_server *nfs_alloc_server(void);
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 6f4752734804..64b43b4ad9dd 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -318,10 +318,22 @@ static void
nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
{
struct nfs42_layoutstat_data *data = calldata;
- struct nfs_server *server = NFS_SERVER(data->args.inode);
+ struct inode *inode = data->inode;
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct pnfs_layout_hdr *lo;
+ spin_lock(&inode->i_lock);
+ lo = NFS_I(inode)->layout;
+ if (!pnfs_layout_is_valid(lo)) {
+ spin_unlock(&inode->i_lock);
+ rpc_exit(task, 0);
+ return;
+ }
+ nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid);
+ spin_unlock(&inode->i_lock);
nfs41_setup_sequence(nfs4_get_session(server), &data->args.seq_args,
&data->res.seq_res, task);
+
}
static void
@@ -341,11 +353,11 @@ nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_DELEG_REVOKED:
case -NFS4ERR_STALE_STATEID:
- case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_BAD_STATEID:
spin_lock(&inode->i_lock);
lo = NFS_I(inode)->layout;
- if (lo && nfs4_stateid_match(&data->args.stateid,
+ if (pnfs_layout_is_valid(lo) &&
+ nfs4_stateid_match(&data->args.stateid,
&lo->plh_stateid)) {
LIST_HEAD(head);
@@ -359,11 +371,23 @@ nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
} else
spin_unlock(&inode->i_lock);
break;
+ case -NFS4ERR_OLD_STATEID:
+ spin_lock(&inode->i_lock);
+ lo = NFS_I(inode)->layout;
+ if (pnfs_layout_is_valid(lo) &&
+ nfs4_stateid_match_other(&data->args.stateid,
+ &lo->plh_stateid)) {
+ /* Do we need to delay before resending? */
+ if (!nfs4_stateid_is_newer(&lo->plh_stateid,
+ &data->args.stateid))
+ rpc_delay(task, HZ);
+ rpc_restart_call_prepare(task);
+ }
+ spin_unlock(&inode->i_lock);
+ break;
case -ENOTSUPP:
case -EOPNOTSUPP:
NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS;
- default:
- break;
}
dprintk("%s server returns %d\n", __func__, task->tk_status);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 8d7d08d4f95f..cd3b7cfdde16 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -817,6 +817,11 @@ static int nfs4_set_client(struct nfs_server *server,
goto error;
}
+ if (server->nfs_client == clp) {
+ error = -ELOOP;
+ goto error;
+ }
+
/*
* Query for the lease time on clientid setup or renewal
*
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 1949bbd806eb..a9dec32ba9ba 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -634,15 +634,11 @@ out_sleep:
}
EXPORT_SYMBOL_GPL(nfs40_setup_sequence);
-static int nfs40_sequence_done(struct rpc_task *task,
- struct nfs4_sequence_res *res)
+static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res)
{
struct nfs4_slot *slot = res->sr_slot;
struct nfs4_slot_table *tbl;
- if (slot == NULL)
- goto out;
-
tbl = slot->table;
spin_lock(&tbl->slot_tbl_lock);
if (!nfs41_wake_and_assign_slot(tbl, slot))
@@ -650,7 +646,13 @@ static int nfs40_sequence_done(struct rpc_task *task,
spin_unlock(&tbl->slot_tbl_lock);
res->sr_slot = NULL;
-out:
+}
+
+static int nfs40_sequence_done(struct rpc_task *task,
+ struct nfs4_sequence_res *res)
+{
+ if (res->sr_slot != NULL)
+ nfs40_sequence_free_slot(res);
return 1;
}
@@ -666,6 +668,11 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
tbl = slot->table;
session = tbl->session;
+ /* Bump the slot sequence number */
+ if (slot->seq_done)
+ slot->seq_nr++;
+ slot->seq_done = 0;
+
spin_lock(&tbl->slot_tbl_lock);
/* Be nice to the server: try to ensure that the last transmitted
* value for highest_user_slotid <= target_highest_slotid
@@ -686,9 +693,12 @@ out_unlock:
res->sr_slot = NULL;
if (send_new_highest_used_slotid)
nfs41_notify_server(session->clp);
+ if (waitqueue_active(&tbl->slot_waitq))
+ wake_up_all(&tbl->slot_waitq);
}
-int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
+static int nfs41_sequence_process(struct rpc_task *task,
+ struct nfs4_sequence_res *res)
{
struct nfs4_session *session;
struct nfs4_slot *slot = res->sr_slot;
@@ -714,7 +724,7 @@ int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
switch (res->sr_status) {
case 0:
/* Update the slot's sequence and clientid lease timer */
- ++slot->seq_nr;
+ slot->seq_done = 1;
clp = session->clp;
do_renew_lease(clp, res->sr_timestamp);
/* Check sequence flags */
@@ -769,16 +779,16 @@ int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
goto retry_nowait;
default:
/* Just update the slot sequence no. */
- ++slot->seq_nr;
+ slot->seq_done = 1;
}
out:
/* The session may be reset by one of the error handlers. */
dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
- nfs41_sequence_free_slot(res);
out_noaction:
return ret;
retry_nowait:
if (rpc_restart_call_prepare(task)) {
+ nfs41_sequence_free_slot(res);
task->tk_status = 0;
ret = 0;
}
@@ -789,8 +799,37 @@ out_retry:
rpc_delay(task, NFS4_POLL_RETRY_MAX);
return 0;
}
+
+int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
+{
+ if (!nfs41_sequence_process(task, res))
+ return 0;
+ if (res->sr_slot != NULL)
+ nfs41_sequence_free_slot(res);
+ return 1;
+
+}
EXPORT_SYMBOL_GPL(nfs41_sequence_done);
+static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
+{
+ if (res->sr_slot == NULL)
+ return 1;
+ if (res->sr_slot->table->session != NULL)
+ return nfs41_sequence_process(task, res);
+ return nfs40_sequence_done(task, res);
+}
+
+static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
+{
+ if (res->sr_slot != NULL) {
+ if (res->sr_slot->table->session != NULL)
+ nfs41_sequence_free_slot(res);
+ else
+ nfs40_sequence_free_slot(res);
+ }
+}
+
int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
{
if (res->sr_slot == NULL)
@@ -920,6 +959,17 @@ static int nfs4_setup_sequence(const struct nfs_server *server,
args, res, task);
}
+static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
+{
+ return nfs40_sequence_done(task, res);
+}
+
+static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
+{
+ if (res->sr_slot != NULL)
+ nfs40_sequence_free_slot(res);
+}
+
int nfs4_sequence_done(struct rpc_task *task,
struct nfs4_sequence_res *res)
{
@@ -1197,6 +1247,7 @@ static void nfs4_opendata_free(struct kref *kref)
struct super_block *sb = p->dentry->d_sb;
nfs_free_seqid(p->o_arg.seqid);
+ nfs4_sequence_free_slot(&p->o_res.seq_res);
if (p->state != NULL)
nfs4_put_open_state(p->state);
nfs4_put_state_owner(p->owner);
@@ -1656,9 +1707,14 @@ err:
static struct nfs4_state *
nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
{
+ struct nfs4_state *ret;
+
if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
- return _nfs4_opendata_reclaim_to_nfs4_state(data);
- return _nfs4_opendata_to_nfs4_state(data);
+ ret =_nfs4_opendata_reclaim_to_nfs4_state(data);
+ else
+ ret = _nfs4_opendata_to_nfs4_state(data);
+ nfs4_sequence_free_slot(&data->o_res.seq_res);
+ return ret;
}
static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
@@ -2056,7 +2112,7 @@ static void nfs4_open_done(struct rpc_task *task, void *calldata)
data->rpc_status = task->tk_status;
- if (!nfs4_sequence_done(task, &data->o_res.seq_res))
+ if (!nfs4_sequence_process(task, &data->o_res.seq_res))
return;
if (task->tk_status == 0) {
@@ -7514,12 +7570,20 @@ static int _nfs4_proc_create_session(struct nfs_client *clp,
status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
trace_nfs4_create_session(clp, status);
+ switch (status) {
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_DELAY:
+ case -ETIMEDOUT:
+ case -EACCES:
+ case -EAGAIN:
+ goto out;
+ };
+
+ clp->cl_seqid++;
if (!status) {
/* Verify the session's negotiated channel_attrs values */
status = nfs4_verify_channel_attrs(&args, &res);
/* Increment the clientid slot sequence id */
- if (clp->cl_seqid == res.seqid)
- clp->cl_seqid++;
if (status)
goto out;
nfs4_update_session(session, &res);
@@ -7864,7 +7928,7 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
struct nfs4_layoutget *lgp = calldata;
dprintk("--> %s\n", __func__);
- nfs41_sequence_done(task, &lgp->res.seq_res);
+ nfs41_sequence_process(task, &lgp->res.seq_res);
dprintk("<-- %s\n", __func__);
}
@@ -8080,6 +8144,7 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags)
/* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
if (status == 0 && lgp->res.layoutp->len)
lseg = pnfs_layout_process(lgp);
+ nfs4_sequence_free_slot(&lgp->res.seq_res);
rpc_put_task(task);
dprintk("<-- %s status=%d\n", __func__, status);
if (status)
@@ -8106,7 +8171,7 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
dprintk("--> %s\n", __func__);
- if (!nfs41_sequence_done(task, &lrp->res.seq_res))
+ if (!nfs41_sequence_process(task, &lrp->res.seq_res))
return;
server = NFS_SERVER(lrp->args.inode);
@@ -8118,6 +8183,7 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
case -NFS4ERR_DELAY:
if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
break;
+ nfs4_sequence_free_slot(&lrp->res.seq_res);
rpc_restart_call_prepare(task);
return;
}
@@ -8132,12 +8198,16 @@ static void nfs4_layoutreturn_release(void *calldata)
dprintk("--> %s\n", __func__);
spin_lock(&lo->plh_inode->i_lock);
- pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range,
- be32_to_cpu(lrp->args.stateid.seqid));
- if (lrp->res.lrs_present && pnfs_layout_is_valid(lo))
+ if (lrp->res.lrs_present) {
+ pnfs_mark_matching_lsegs_invalid(lo, &freeme,
+ &lrp->args.range,
+ be32_to_cpu(lrp->args.stateid.seqid));
pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
+ } else
+ pnfs_mark_layout_stateid_invalid(lo, &freeme);
pnfs_clear_layoutreturn_waitbit(lo);
spin_unlock(&lo->plh_inode->i_lock);
+ nfs4_sequence_free_slot(&lrp->res.seq_res);
pnfs_free_lseg_list(&freeme);
pnfs_put_layout_hdr(lrp->args.layout);
nfs_iput_and_deactive(lrp->inode);
diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c
index 332d06e64fa9..b62973045a3e 100644
--- a/fs/nfs/nfs4session.c
+++ b/fs/nfs/nfs4session.c
@@ -28,6 +28,7 @@ static void nfs4_init_slot_table(struct nfs4_slot_table *tbl, const char *queue)
tbl->highest_used_slotid = NFS4_NO_SLOT;
spin_lock_init(&tbl->slot_tbl_lock);
rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, queue);
+ init_waitqueue_head(&tbl->slot_waitq);
init_completion(&tbl->complete);
}
@@ -172,6 +173,58 @@ struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid)
return ERR_PTR(-E2BIG);
}
+static int nfs4_slot_get_seqid(struct nfs4_slot_table *tbl, u32 slotid,
+ u32 *seq_nr)
+ __must_hold(&tbl->slot_tbl_lock)
+{
+ struct nfs4_slot *slot;
+
+ slot = nfs4_lookup_slot(tbl, slotid);
+ if (IS_ERR(slot))
+ return PTR_ERR(slot);
+ *seq_nr = slot->seq_nr;
+ return 0;
+}
+
+/*
+ * nfs4_slot_seqid_in_use - test if a slot sequence id is still in use
+ *
+ * Given a slot table, slot id and sequence number, determine if the
+ * RPC call in question is still in flight. This function is mainly
+ * intended for use by the callback channel.
+ */
+static bool nfs4_slot_seqid_in_use(struct nfs4_slot_table *tbl,
+ u32 slotid, u32 seq_nr)
+{
+ u32 cur_seq;
+ bool ret = false;
+
+ spin_lock(&tbl->slot_tbl_lock);
+ if (nfs4_slot_get_seqid(tbl, slotid, &cur_seq) == 0 &&
+ cur_seq == seq_nr && test_bit(slotid, tbl->used_slots))
+ ret = true;
+ spin_unlock(&tbl->slot_tbl_lock);
+ return ret;
+}
+
+/*
+ * nfs4_slot_wait_on_seqid - wait until a slot sequence id is complete
+ *
+ * Given a slot table, slot id and sequence number, wait until the
+ * corresponding RPC call completes. This function is mainly
+ * intended for use by the callback channel.
+ */
+int nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl,
+ u32 slotid, u32 seq_nr,
+ unsigned long timeout)
+{
+ if (wait_event_timeout(tbl->slot_waitq,
+ !nfs4_slot_seqid_in_use(tbl, slotid, seq_nr),
+ timeout) == 0)
+ return -ETIMEDOUT;
+ return 0;
+}
+
/*
* nfs4_alloc_slot - efficiently look for a free slot
*
diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h
index 5b51298d1d03..f703b755351b 100644
--- a/fs/nfs/nfs4session.h
+++ b/fs/nfs/nfs4session.h
@@ -21,7 +21,8 @@ struct nfs4_slot {
unsigned long generation;
u32 slot_nr;
u32 seq_nr;
- unsigned int interrupted : 1;
+ unsigned int interrupted : 1,
+ seq_done : 1;
};
/* Sessions */
@@ -36,6 +37,7 @@ struct nfs4_slot_table {
unsigned long used_slots[SLOT_TABLE_SZ]; /* used/unused bitmap */
spinlock_t slot_tbl_lock;
struct rpc_wait_queue slot_tbl_waitq; /* allocators may wait here */
+ wait_queue_head_t slot_waitq; /* Completion wait on slot */
u32 max_slots; /* # slots in table */
u32 max_slotid; /* Max allowed slotid value */
u32 highest_used_slotid; /* sent to server on each SEQ.
@@ -78,6 +80,9 @@ extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl,
extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl);
extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl);
extern struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid);
+extern int nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl,
+ u32 slotid, u32 seq_nr,
+ unsigned long timeout);
extern bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 70806cae0d36..2c93a85eda51 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -365,7 +365,8 @@ pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
/* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
atomic_dec(&lo->plh_refcount);
if (list_empty(&lo->plh_segs)) {
- set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
+ if (atomic_read(&lo->plh_outstanding) == 0)
+ set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
}
rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
@@ -768,17 +769,32 @@ pnfs_destroy_all_layouts(struct nfs_client *clp)
pnfs_destroy_layouts_byclid(clp, false);
}
+static void
+pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
+{
+ lo->plh_return_iomode = 0;
+ lo->plh_return_seq = 0;
+ clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
+}
+
/* update lo->plh_stateid with new if is more recent */
void
pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
bool update_barrier)
{
u32 oldseq, newseq, new_barrier = 0;
- bool invalid = !pnfs_layout_is_valid(lo);
oldseq = be32_to_cpu(lo->plh_stateid.seqid);
newseq = be32_to_cpu(new->seqid);
- if (invalid || pnfs_seqid_is_newer(newseq, oldseq)) {
+
+ if (!pnfs_layout_is_valid(lo)) {
+ nfs4_stateid_copy(&lo->plh_stateid, new);
+ lo->plh_barrier = newseq;
+ pnfs_clear_layoutreturn_info(lo);
+ clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
+ return;
+ }
+ if (pnfs_seqid_is_newer(newseq, oldseq)) {
nfs4_stateid_copy(&lo->plh_stateid, new);
/*
* Because of wraparound, we want to keep the barrier
@@ -790,7 +806,7 @@ pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
new_barrier = be32_to_cpu(new->seqid);
else if (new_barrier == 0)
return;
- if (invalid || pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
+ if (pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
lo->plh_barrier = new_barrier;
}
@@ -886,19 +902,14 @@ void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
}
-static void
-pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
-{
- lo->plh_return_iomode = 0;
- lo->plh_return_seq = 0;
- clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
-}
-
static bool
pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
nfs4_stateid *stateid,
enum pnfs_iomode *iomode)
{
+ /* Serialise LAYOUTGET/LAYOUTRETURN */
+ if (atomic_read(&lo->plh_outstanding) != 0)
+ return false;
if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
return false;
pnfs_get_layout_hdr(lo);
@@ -1555,6 +1566,7 @@ pnfs_update_layout(struct inode *ino,
}
lookup_again:
+ nfs4_client_recover_expired_lease(clp);
first = false;
spin_lock(&ino->i_lock);
lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
@@ -1797,16 +1809,11 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
*/
pnfs_mark_layout_stateid_invalid(lo, &free_me);
- nfs4_stateid_copy(&lo->plh_stateid, &res->stateid);
- lo->plh_barrier = be32_to_cpu(res->stateid.seqid);
+ pnfs_set_layout_stateid(lo, &res->stateid, true);
}
pnfs_get_lseg(lseg);
pnfs_layout_insert_lseg(lo, lseg, &free_me);
- if (!pnfs_layout_is_valid(lo)) {
- pnfs_clear_layoutreturn_info(lo);
- clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
- }
if (res->return_on_close)
@@ -2510,7 +2517,6 @@ pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
data->args.fh = NFS_FH(inode);
data->args.inode = inode;
- nfs4_stateid_copy(&data->args.stateid, &hdr->plh_stateid);
status = ld->prepare_layoutstats(&data->args);
if (status)
goto out_free;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 18d446e1a82b..d39601381adf 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -923,6 +923,8 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(void)
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (data) {
+ data->timeo = NFS_UNSPEC_TIMEO;
+ data->retrans = NFS_UNSPEC_RETRANS;
data->acregmin = NFS_DEF_ACREGMIN;
data->acregmax = NFS_DEF_ACREGMAX;
data->acdirmin = NFS_DEF_ACDIRMIN;
@@ -1189,6 +1191,19 @@ static int nfs_get_option_ul(substring_t args[], unsigned long *option)
return rc;
}
+static int nfs_get_option_ul_bound(substring_t args[], unsigned long *option,
+ unsigned long l_bound, unsigned long u_bound)
+{
+ int ret;
+
+ ret = nfs_get_option_ul(args, option);
+ if (ret != 0)
+ return ret;
+ if (*option < l_bound || *option > u_bound)
+ return -ERANGE;
+ return 0;
+}
+
/*
* Error-check and convert a string of mount options from user space into
* a data structure. The whole mount string is processed; bad options are
@@ -1352,12 +1367,12 @@ static int nfs_parse_mount_options(char *raw,
mnt->bsize = option;
break;
case Opt_timeo:
- if (nfs_get_option_ul(args, &option) || option == 0)
+ if (nfs_get_option_ul_bound(args, &option, 1, INT_MAX))
goto out_invalid_value;
mnt->timeo = option;
break;
case Opt_retrans:
- if (nfs_get_option_ul(args, &option) || option == 0)
+ if (nfs_get_option_ul_bound(args, &option, 0, INT_MAX))
goto out_invalid_value;
mnt->retrans = option;
break;
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index d2f97ecca6a5..e0e5f7c3c99f 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -67,18 +67,7 @@ static int fanotify_get_response(struct fsnotify_group *group,
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
- wait_event(group->fanotify_data.access_waitq, event->response ||
- atomic_read(&group->fanotify_data.bypass_perm));
-
- if (!event->response) { /* bypass_perm set */
- /*
- * Event was canceled because group is being destroyed. Remove
- * it from group's event list because we are responsible for
- * freeing the permission event.
- */
- fsnotify_remove_event(group, &event->fae.fse);
- return 0;
- }
+ wait_event(group->fanotify_data.access_waitq, event->response);
/* userspace responded, convert to something usable */
switch (event->response) {
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 8e8e6bcd1d43..a64313868d3a 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -358,16 +358,20 @@ static int fanotify_release(struct inode *ignored, struct file *file)
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
struct fanotify_perm_event_info *event, *next;
+ struct fsnotify_event *fsn_event;
/*
- * There may be still new events arriving in the notification queue
- * but since userspace cannot use fanotify fd anymore, no event can
- * enter or leave access_list by now.
+ * Stop new events from arriving in the notification queue. since
+ * userspace cannot use fanotify fd anymore, no event can enter or
+ * leave access_list by now either.
*/
- spin_lock(&group->fanotify_data.access_lock);
-
- atomic_inc(&group->fanotify_data.bypass_perm);
+ fsnotify_group_stop_queueing(group);
+ /*
+ * Process all permission events on access_list and notification queue
+ * and simulate reply from userspace.
+ */
+ spin_lock(&group->fanotify_data.access_lock);
list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
fae.fse.list) {
pr_debug("%s: found group=%p event=%p\n", __func__, group,
@@ -379,12 +383,21 @@ static int fanotify_release(struct inode *ignored, struct file *file)
spin_unlock(&group->fanotify_data.access_lock);
/*
- * Since bypass_perm is set, newly queued events will not wait for
- * access response. Wake up the already sleeping ones now.
- * synchronize_srcu() in fsnotify_destroy_group() will wait for all
- * processes sleeping in fanotify_handle_event() waiting for access
- * response and thus also for all permission events to be freed.
+ * Destroy all non-permission events. For permission events just
+ * dequeue them and set the response. They will be freed once the
+ * response is consumed and fanotify_get_response() returns.
*/
+ mutex_lock(&group->notification_mutex);
+ while (!fsnotify_notify_queue_is_empty(group)) {
+ fsn_event = fsnotify_remove_first_event(group);
+ if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS))
+ fsnotify_destroy_event(group, fsn_event);
+ else
+ FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
+ }
+ mutex_unlock(&group->notification_mutex);
+
+ /* Response for all permission events it set, wakeup waiters */
wake_up(&group->fanotify_data.access_waitq);
#endif
@@ -755,7 +768,6 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
spin_lock_init(&group->fanotify_data.access_lock);
init_waitqueue_head(&group->fanotify_data.access_waitq);
INIT_LIST_HEAD(&group->fanotify_data.access_list);
- atomic_set(&group->fanotify_data.bypass_perm, 0);
#endif
switch (flags & FAN_ALL_CLASS_BITS) {
case FAN_CLASS_NOTIF:
diff --git a/fs/notify/group.c b/fs/notify/group.c
index 3e2dd85be5dd..b47f7cfdcaa4 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -40,6 +40,17 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
}
/*
+ * Stop queueing new events for this group. Once this function returns
+ * fsnotify_add_event() will not add any new events to the group's queue.
+ */
+void fsnotify_group_stop_queueing(struct fsnotify_group *group)
+{
+ mutex_lock(&group->notification_mutex);
+ group->shutdown = true;
+ mutex_unlock(&group->notification_mutex);
+}
+
+/*
* Trying to get rid of a group. Remove all marks, flush all events and release
* the group reference.
* Note that another thread calling fsnotify_clear_marks_by_group() may still
@@ -47,6 +58,14 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
*/
void fsnotify_destroy_group(struct fsnotify_group *group)
{
+ /*
+ * Stop queueing new events. The code below is careful enough to not
+ * require this but fanotify needs to stop queuing events even before
+ * fsnotify_destroy_group() is called and this makes the other callers
+ * of fsnotify_destroy_group() to see the same behavior.
+ */
+ fsnotify_group_stop_queueing(group);
+
/* clear all inode marks for this group, attach them to destroy_list */
fsnotify_detach_group_marks(group);
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index a95d8e037aeb..e455e83ceeeb 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -82,7 +82,8 @@ void fsnotify_destroy_event(struct fsnotify_group *group,
* Add an event to the group notification queue. The group can later pull this
* event off the queue to deal with. The function returns 0 if the event was
* added to the queue, 1 if the event was merged with some other queued event,
- * 2 if the queue of events has overflown.
+ * 2 if the event was not queued - either the queue of events has overflown
+ * or the group is shutting down.
*/
int fsnotify_add_event(struct fsnotify_group *group,
struct fsnotify_event *event,
@@ -96,6 +97,11 @@ int fsnotify_add_event(struct fsnotify_group *group,
mutex_lock(&group->notification_mutex);
+ if (group->shutdown) {
+ mutex_unlock(&group->notification_mutex);
+ return 2;
+ }
+
if (group->q_len >= group->max_events) {
ret = 2;
/* Queue overflow event only if it isn't already queued */
@@ -126,21 +132,6 @@ queue:
}
/*
- * Remove @event from group's notification queue. It is the responsibility of
- * the caller to destroy the event.
- */
-void fsnotify_remove_event(struct fsnotify_group *group,
- struct fsnotify_event *event)
-{
- mutex_lock(&group->notification_mutex);
- if (!list_empty(&event->list)) {
- list_del_init(&event->list);
- group->q_len--;
- }
- mutex_unlock(&group->notification_mutex);
-}
-
-/*
* Remove and return the first event from the notification list. It is the
* responsibility of the caller to destroy the obtained event
*/
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 7dabbc31060e..f165f867f332 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -5922,7 +5922,6 @@ bail:
}
static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
- handle_t *handle,
struct inode *data_alloc_inode,
struct buffer_head *data_alloc_bh)
{
@@ -5935,11 +5934,19 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
struct ocfs2_truncate_log *tl;
struct inode *tl_inode = osb->osb_tl_inode;
struct buffer_head *tl_bh = osb->osb_tl_bh;
+ handle_t *handle;
di = (struct ocfs2_dinode *) tl_bh->b_data;
tl = &di->id2.i_dealloc;
i = le16_to_cpu(tl->tl_used) - 1;
while (i >= 0) {
+ handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC);
+ if (IS_ERR(handle)) {
+ status = PTR_ERR(handle);
+ mlog_errno(status);
+ goto bail;
+ }
+
/* Caller has given us at least enough credits to
* update the truncate log dinode */
status = ocfs2_journal_access_di(handle, INODE_CACHE(tl_inode), tl_bh,
@@ -5974,12 +5981,7 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
}
}
- status = ocfs2_extend_trans(handle,
- OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ ocfs2_commit_trans(osb, handle);
i--;
}
@@ -5994,7 +5996,6 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
{
int status;
unsigned int num_to_flush;
- handle_t *handle;
struct inode *tl_inode = osb->osb_tl_inode;
struct inode *data_alloc_inode = NULL;
struct buffer_head *tl_bh = osb->osb_tl_bh;
@@ -6038,21 +6039,11 @@ int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
goto out_mutex;
}
- handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC);
- if (IS_ERR(handle)) {
- status = PTR_ERR(handle);
- mlog_errno(status);
- goto out_unlock;
- }
-
- status = ocfs2_replay_truncate_records(osb, handle, data_alloc_inode,
+ status = ocfs2_replay_truncate_records(osb, data_alloc_inode,
data_alloc_bh);
if (status < 0)
mlog_errno(status);
- ocfs2_commit_trans(osb, handle);
-
-out_unlock:
brelse(data_alloc_bh);
ocfs2_inode_unlock(data_alloc_inode, 1);
@@ -6413,43 +6404,34 @@ static int ocfs2_free_cached_blocks(struct ocfs2_super *osb,
goto out_mutex;
}
- handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- mlog_errno(ret);
- goto out_unlock;
- }
-
while (head) {
if (head->free_bg)
bg_blkno = head->free_bg;
else
bg_blkno = ocfs2_which_suballoc_group(head->free_blk,
head->free_bit);
+ handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out_unlock;
+ }
+
trace_ocfs2_free_cached_blocks(
(unsigned long long)head->free_blk, head->free_bit);
ret = ocfs2_free_suballoc_bits(handle, inode, di_bh,
head->free_bit, bg_blkno, 1);
- if (ret) {
+ if (ret)
mlog_errno(ret);
- goto out_journal;
- }
- ret = ocfs2_extend_trans(handle, OCFS2_SUBALLOC_FREE);
- if (ret) {
- mlog_errno(ret);
- goto out_journal;
- }
+ ocfs2_commit_trans(osb, handle);
tmp = head;
head = head->free_next;
kfree(tmp);
}
-out_journal:
- ocfs2_commit_trans(osb, handle);
-
out_unlock:
ocfs2_inode_unlock(inode, 1);
brelse(di_bh);
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 94b18369b1cc..b95e7df5b76a 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -44,9 +44,6 @@
* version here in tcp_internal.h should not need to be bumped for
* filesystem locking changes.
*
- * New in version 12
- * - Negotiate hb timeout when storage is down.
- *
* New in version 11
* - Negotiation of filesystem locking in the dlm join.
*
@@ -78,7 +75,7 @@
* - full 64 bit i_size in the metadata lock lvbs
* - introduction of "rw" lock and pushing meta/data locking down
*/
-#define O2NET_PROTOCOL_VERSION 12ULL
+#define O2NET_PROTOCOL_VERSION 11ULL
struct o2net_handshake {
__be64 protocol_version;
__be64 connector_id;
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index cdeafb4e7ed6..0bb128659d4b 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -268,7 +268,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
struct dlm_lock *lock, int flags, int type)
{
enum dlm_status status;
- u8 old_owner = res->owner;
mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
@@ -335,7 +334,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
- lock->convert_pending = 0;
/* if it failed, move it back to granted queue.
* if master returns DLM_NORMAL and then down before sending ast,
* it may have already been moved to granted queue, reset to
@@ -344,12 +342,14 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
if (status != DLM_NOTQUEUED)
dlm_error(status);
dlm_revert_pending_convert(res, lock);
- } else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
- (old_owner != res->owner)) {
- mlog(0, "res %.*s is in recovering or has been recovered.\n",
- res->lockname.len, res->lockname.name);
+ } else if (!lock->convert_pending) {
+ mlog(0, "%s: res %.*s, owner died and lock has been moved back "
+ "to granted list, retry convert.\n",
+ dlm->name, res->lockname.len, res->lockname.name);
status = DLM_RECOVERING;
}
+
+ lock->convert_pending = 0;
bail:
spin_unlock(&res->spinlock);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 4e7b0dc22450..0b055bfb8e86 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1506,7 +1506,8 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
u64 start, u64 len)
{
int ret = 0;
- u64 tmpend, end = start + len;
+ u64 tmpend = 0;
+ u64 end = start + len;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
unsigned int csize = osb->s_clustersize;
handle_t *handle;
@@ -1538,18 +1539,31 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
}
/*
- * We want to get the byte offset of the end of the 1st cluster.
+ * If start is on a cluster boundary and end is somewhere in another
+ * cluster, we have not COWed the cluster starting at start, unless
+ * end is also within the same cluster. So, in this case, we skip this
+ * first call to ocfs2_zero_range_for_truncate() truncate and move on
+ * to the next one.
*/
- tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
- if (tmpend > end)
- tmpend = end;
+ if ((start & (csize - 1)) != 0) {
+ /*
+ * We want to get the byte offset of the end of the 1st
+ * cluster.
+ */
+ tmpend = (u64)osb->s_clustersize +
+ (start & ~(osb->s_clustersize - 1));
+ if (tmpend > end)
+ tmpend = end;
- trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start,
- (unsigned long long)tmpend);
+ trace_ocfs2_zero_partial_clusters_range1(
+ (unsigned long long)start,
+ (unsigned long long)tmpend);
- ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
- if (ret)
- mlog_errno(ret);
+ ret = ocfs2_zero_range_for_truncate(inode, handle, start,
+ tmpend);
+ if (ret)
+ mlog_errno(ret);
+ }
if (tmpend < end) {
/*
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index ea47120a85ff..6ad3533940ba 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -1199,14 +1199,24 @@ retry:
inode_unlock((*ac)->ac_inode);
ret = ocfs2_try_to_free_truncate_log(osb, bits_wanted);
- if (ret == 1)
+ if (ret == 1) {
+ iput((*ac)->ac_inode);
+ (*ac)->ac_inode = NULL;
goto retry;
+ }
if (ret < 0)
mlog_errno(ret);
inode_lock((*ac)->ac_inode);
- ocfs2_inode_lock((*ac)->ac_inode, NULL, 1);
+ ret = ocfs2_inode_lock((*ac)->ac_inode, NULL, 1);
+ if (ret < 0) {
+ mlog_errno(ret);
+ inode_unlock((*ac)->ac_inode);
+ iput((*ac)->ac_inode);
+ (*ac)->ac_inode = NULL;
+ goto bail;
+ }
}
if (status < 0) {
if (status != -ENOSPC)
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 54e5d6681786..43fdc2765aea 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -80,6 +80,8 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
}
for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
+ if (ovl_is_private_xattr(name))
+ continue;
retry:
size = vfs_getxattr(old, name, value, value_size);
if (size == -ERANGE)
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 12bcd07b9e32..1560fdc09a5f 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -12,6 +12,8 @@
#include <linux/xattr.h>
#include <linux/security.h>
#include <linux/cred.h>
+#include <linux/posix_acl.h>
+#include <linux/posix_acl_xattr.h>
#include "overlayfs.h"
void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
@@ -186,6 +188,9 @@ static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
struct dentry *newdentry;
int err;
+ if (!hardlink && !IS_POSIXACL(udir))
+ stat->mode &= ~current_umask();
+
inode_lock_nested(udir, I_MUTEX_PARENT);
newdentry = lookup_one_len(dentry->d_name.name, upperdir,
dentry->d_name.len);
@@ -335,6 +340,32 @@ out_free:
return ret;
}
+static int ovl_set_upper_acl(struct dentry *upperdentry, const char *name,
+ const struct posix_acl *acl)
+{
+ void *buffer;
+ size_t size;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !acl)
+ return 0;
+
+ size = posix_acl_to_xattr(NULL, acl, NULL, 0);
+ buffer = kmalloc(size, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ size = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
+ err = size;
+ if (err < 0)
+ goto out_free;
+
+ err = vfs_setxattr(upperdentry, name, buffer, size, XATTR_CREATE);
+out_free:
+ kfree(buffer);
+ return err;
+}
+
static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
struct kstat *stat, const char *link,
struct dentry *hardlink)
@@ -346,10 +377,18 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
struct dentry *upper;
struct dentry *newdentry;
int err;
+ struct posix_acl *acl, *default_acl;
if (WARN_ON(!workdir))
return -EROFS;
+ if (!hardlink) {
+ err = posix_acl_create(dentry->d_parent->d_inode,
+ &stat->mode, &default_acl, &acl);
+ if (err)
+ return err;
+ }
+
err = ovl_lock_rename_workdir(workdir, upperdir);
if (err)
goto out;
@@ -384,6 +423,17 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
if (err)
goto out_cleanup;
}
+ if (!hardlink) {
+ err = ovl_set_upper_acl(newdentry, XATTR_NAME_POSIX_ACL_ACCESS,
+ acl);
+ if (err)
+ goto out_cleanup;
+
+ err = ovl_set_upper_acl(newdentry, XATTR_NAME_POSIX_ACL_DEFAULT,
+ default_acl);
+ if (err)
+ goto out_cleanup;
+ }
if (!hardlink && S_ISDIR(stat->mode)) {
err = ovl_set_opaque(newdentry);
@@ -410,6 +460,10 @@ out_dput:
out_unlock:
unlock_rename(workdir, upperdir);
out:
+ if (!hardlink) {
+ posix_acl_release(acl);
+ posix_acl_release(default_acl);
+ }
return err;
out_cleanup:
@@ -950,9 +1004,9 @@ const struct inode_operations ovl_dir_inode_operations = {
.permission = ovl_permission,
.getattr = ovl_dir_getattr,
.setxattr = generic_setxattr,
- .getxattr = ovl_getxattr,
+ .getxattr = generic_getxattr,
.listxattr = ovl_listxattr,
- .removexattr = ovl_removexattr,
+ .removexattr = generic_removexattr,
.get_acl = ovl_get_acl,
.update_time = ovl_update_time,
};
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 1b885c156028..c75625c1efa3 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -10,6 +10,7 @@
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/xattr.h>
+#include <linux/posix_acl.h>
#include "overlayfs.h"
static int ovl_copy_up_truncate(struct dentry *dentry)
@@ -191,32 +192,44 @@ static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
return err;
}
-static bool ovl_is_private_xattr(const char *name)
+bool ovl_is_private_xattr(const char *name)
{
-#define OVL_XATTR_PRE_NAME OVL_XATTR_PREFIX "."
- return strncmp(name, OVL_XATTR_PRE_NAME,
- sizeof(OVL_XATTR_PRE_NAME) - 1) == 0;
+ return strncmp(name, OVL_XATTR_PREFIX,
+ sizeof(OVL_XATTR_PREFIX) - 1) == 0;
}
-int ovl_setxattr(struct dentry *dentry, struct inode *inode,
- const char *name, const void *value,
- size_t size, int flags)
+int ovl_xattr_set(struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags)
{
int err;
- struct dentry *upperdentry;
+ struct path realpath;
+ enum ovl_path_type type = ovl_path_real(dentry, &realpath);
const struct cred *old_cred;
err = ovl_want_write(dentry);
if (err)
goto out;
+ if (!value && !OVL_TYPE_UPPER(type)) {
+ err = vfs_getxattr(realpath.dentry, name, NULL, 0);
+ if (err < 0)
+ goto out_drop_write;
+ }
+
err = ovl_copy_up(dentry);
if (err)
goto out_drop_write;
- upperdentry = ovl_dentry_upper(dentry);
+ if (!OVL_TYPE_UPPER(type))
+ ovl_path_upper(dentry, &realpath);
+
old_cred = ovl_override_creds(dentry->d_sb);
- err = vfs_setxattr(upperdentry, name, value, size, flags);
+ if (value)
+ err = vfs_setxattr(realpath.dentry, name, value, size, flags);
+ else {
+ WARN_ON(flags != XATTR_REPLACE);
+ err = vfs_removexattr(realpath.dentry, name);
+ }
revert_creds(old_cred);
out_drop_write:
@@ -225,16 +238,13 @@ out:
return err;
}
-ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode,
- const char *name, void *value, size_t size)
+int ovl_xattr_get(struct dentry *dentry, const char *name,
+ void *value, size_t size)
{
struct dentry *realdentry = ovl_dentry_real(dentry);
ssize_t res;
const struct cred *old_cred;
- if (ovl_is_private_xattr(name))
- return -ENODATA;
-
old_cred = ovl_override_creds(dentry->d_sb);
res = vfs_getxattr(realdentry, name, value, size);
revert_creds(old_cred);
@@ -245,7 +255,8 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
{
struct dentry *realdentry = ovl_dentry_real(dentry);
ssize_t res;
- int off;
+ size_t len;
+ char *s;
const struct cred *old_cred;
old_cred = ovl_override_creds(dentry->d_sb);
@@ -255,73 +266,39 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
return res;
/* filter out private xattrs */
- for (off = 0; off < res;) {
- char *s = list + off;
- size_t slen = strlen(s) + 1;
+ for (s = list, len = res; len;) {
+ size_t slen = strnlen(s, len) + 1;
- BUG_ON(off + slen > res);
+ /* underlying fs providing us with an broken xattr list? */
+ if (WARN_ON(slen > len))
+ return -EIO;
+ len -= slen;
if (ovl_is_private_xattr(s)) {
res -= slen;
- memmove(s, s + slen, res - off);
+ memmove(s, s + slen, len);
} else {
- off += slen;
+ s += slen;
}
}
return res;
}
-int ovl_removexattr(struct dentry *dentry, const char *name)
-{
- int err;
- struct path realpath;
- enum ovl_path_type type = ovl_path_real(dentry, &realpath);
- const struct cred *old_cred;
-
- err = ovl_want_write(dentry);
- if (err)
- goto out;
-
- err = -ENODATA;
- if (ovl_is_private_xattr(name))
- goto out_drop_write;
-
- if (!OVL_TYPE_UPPER(type)) {
- err = vfs_getxattr(realpath.dentry, name, NULL, 0);
- if (err < 0)
- goto out_drop_write;
-
- err = ovl_copy_up(dentry);
- if (err)
- goto out_drop_write;
-
- ovl_path_upper(dentry, &realpath);
- }
-
- old_cred = ovl_override_creds(dentry->d_sb);
- err = vfs_removexattr(realpath.dentry, name);
- revert_creds(old_cred);
-out_drop_write:
- ovl_drop_write(dentry);
-out:
- return err;
-}
-
struct posix_acl *ovl_get_acl(struct inode *inode, int type)
{
struct inode *realinode = ovl_inode_real(inode, NULL);
const struct cred *old_cred;
struct posix_acl *acl;
- if (!IS_POSIXACL(realinode))
+ if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !IS_POSIXACL(realinode))
return NULL;
if (!realinode->i_op->get_acl)
return NULL;
old_cred = ovl_override_creds(inode->i_sb);
- acl = realinode->i_op->get_acl(realinode, type);
+ acl = get_acl(realinode, type);
revert_creds(old_cred);
return acl;
@@ -391,9 +368,9 @@ static const struct inode_operations ovl_file_inode_operations = {
.permission = ovl_permission,
.getattr = ovl_getattr,
.setxattr = generic_setxattr,
- .getxattr = ovl_getxattr,
+ .getxattr = generic_getxattr,
.listxattr = ovl_listxattr,
- .removexattr = ovl_removexattr,
+ .removexattr = generic_removexattr,
.get_acl = ovl_get_acl,
.update_time = ovl_update_time,
};
@@ -404,9 +381,9 @@ static const struct inode_operations ovl_symlink_inode_operations = {
.readlink = ovl_readlink,
.getattr = ovl_getattr,
.setxattr = generic_setxattr,
- .getxattr = ovl_getxattr,
+ .getxattr = generic_getxattr,
.listxattr = ovl_listxattr,
- .removexattr = ovl_removexattr,
+ .removexattr = generic_removexattr,
.update_time = ovl_update_time,
};
@@ -415,6 +392,9 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode)
inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_flags |= S_NOCMTIME;
+#ifdef CONFIG_FS_POSIX_ACL
+ inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE;
+#endif
mode &= S_IFMT;
switch (mode) {
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index e4f5c9536bfe..5813ccff8cd9 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -24,8 +24,8 @@ enum ovl_path_type {
(OVL_TYPE_MERGE(type) || !OVL_TYPE_UPPER(type))
-#define OVL_XATTR_PREFIX XATTR_TRUSTED_PREFIX "overlay"
-#define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX ".opaque"
+#define OVL_XATTR_PREFIX XATTR_TRUSTED_PREFIX "overlay."
+#define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX "opaque"
#define OVL_ISUPPER_MASK 1UL
@@ -179,20 +179,21 @@ int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list);
void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list);
void ovl_cache_free(struct list_head *list);
int ovl_check_d_type_supported(struct path *realpath);
+void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
+ struct dentry *dentry, int level);
/* inode.c */
int ovl_setattr(struct dentry *dentry, struct iattr *attr);
int ovl_permission(struct inode *inode, int mask);
-int ovl_setxattr(struct dentry *dentry, struct inode *inode,
- const char *name, const void *value,
- size_t size, int flags);
-ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode,
- const char *name, void *value, size_t size);
+int ovl_xattr_set(struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags);
+int ovl_xattr_get(struct dentry *dentry, const char *name,
+ void *value, size_t size);
ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
-int ovl_removexattr(struct dentry *dentry, const char *name);
struct posix_acl *ovl_get_acl(struct inode *inode, int type);
int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags);
int ovl_update_time(struct inode *inode, struct timespec *ts, int flags);
+bool ovl_is_private_xattr(const char *name);
struct inode *ovl_new_inode(struct super_block *sb, umode_t mode);
struct inode *ovl_get_inode(struct super_block *sb, struct inode *realinode);
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index cf37fc76fc9f..f241b4ee3d8a 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -248,7 +248,7 @@ static inline int ovl_dir_read(struct path *realpath,
err = rdd->err;
} while (!err && rdd->count);
- if (!err && rdd->first_maybe_whiteout)
+ if (!err && rdd->first_maybe_whiteout && rdd->dentry)
err = ovl_check_whiteouts(realpath->dentry, rdd);
fput(realfile);
@@ -606,3 +606,64 @@ int ovl_check_d_type_supported(struct path *realpath)
return rdd.d_type_supported;
}
+
+static void ovl_workdir_cleanup_recurse(struct path *path, int level)
+{
+ int err;
+ struct inode *dir = path->dentry->d_inode;
+ LIST_HEAD(list);
+ struct ovl_cache_entry *p;
+ struct ovl_readdir_data rdd = {
+ .ctx.actor = ovl_fill_merge,
+ .dentry = NULL,
+ .list = &list,
+ .root = RB_ROOT,
+ .is_lowest = false,
+ };
+
+ err = ovl_dir_read(path, &rdd);
+ if (err)
+ goto out;
+
+ inode_lock_nested(dir, I_MUTEX_PARENT);
+ list_for_each_entry(p, &list, l_node) {
+ struct dentry *dentry;
+
+ if (p->name[0] == '.') {
+ if (p->len == 1)
+ continue;
+ if (p->len == 2 && p->name[1] == '.')
+ continue;
+ }
+ dentry = lookup_one_len(p->name, path->dentry, p->len);
+ if (IS_ERR(dentry))
+ continue;
+ if (dentry->d_inode)
+ ovl_workdir_cleanup(dir, path->mnt, dentry, level);
+ dput(dentry);
+ }
+ inode_unlock(dir);
+out:
+ ovl_cache_free(&list);
+}
+
+void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
+ struct dentry *dentry, int level)
+{
+ int err;
+
+ if (!d_is_dir(dentry) || level > 1) {
+ ovl_cleanup(dir, dentry);
+ return;
+ }
+
+ err = ovl_do_rmdir(dir, dentry);
+ if (err) {
+ struct path path = { .mnt = mnt, .dentry = dentry };
+
+ inode_unlock(dir);
+ ovl_workdir_cleanup_recurse(&path, level + 1);
+ inode_lock_nested(dir, I_MUTEX_PARENT);
+ ovl_cleanup(dir, dentry);
+ }
+}
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 4036132842b5..e2a94a26767b 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -814,6 +814,10 @@ retry:
struct kstat stat = {
.mode = S_IFDIR | 0,
};
+ struct iattr attr = {
+ .ia_valid = ATTR_MODE,
+ .ia_mode = stat.mode,
+ };
if (work->d_inode) {
err = -EEXIST;
@@ -821,7 +825,7 @@ retry:
goto out_dput;
retried = true;
- ovl_cleanup(dir, work);
+ ovl_workdir_cleanup(dir, mnt, work, 0);
dput(work);
goto retry;
}
@@ -829,6 +833,21 @@ retry:
err = ovl_create_real(dir, work, &stat, NULL, NULL, true);
if (err)
goto out_dput;
+
+ err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_DEFAULT);
+ if (err && err != -ENODATA && err != -EOPNOTSUPP)
+ goto out_dput;
+
+ err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_ACCESS);
+ if (err && err != -ENODATA && err != -EOPNOTSUPP)
+ goto out_dput;
+
+ /* Clear any inherited mode bits */
+ inode_lock(work->d_inode);
+ err = notify_change(work, &attr, NULL);
+ inode_unlock(work->d_inode);
+ if (err)
+ goto out_dput;
}
out_unlock:
inode_unlock(dir);
@@ -967,10 +986,19 @@ static unsigned int ovl_split_lowerdirs(char *str)
return ctr;
}
-static int ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
- struct dentry *dentry, struct inode *inode,
- const char *name, const void *value,
- size_t size, int flags)
+static int __maybe_unused
+ovl_posix_acl_xattr_get(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, void *buffer, size_t size)
+{
+ return ovl_xattr_get(dentry, handler->name, buffer, size);
+}
+
+static int __maybe_unused
+ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
struct dentry *workdir = ovl_workdir(dentry);
struct inode *realinode = ovl_inode_real(inode, NULL);
@@ -998,19 +1026,22 @@ static int ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
posix_acl_release(acl);
- return ovl_setxattr(dentry, inode, handler->name, value, size, flags);
+ err = ovl_xattr_set(dentry, handler->name, value, size, flags);
+ if (!err)
+ ovl_copyattr(ovl_inode_real(inode, NULL), inode);
+
+ return err;
out_acl_release:
posix_acl_release(acl);
return err;
}
-static int ovl_other_xattr_set(const struct xattr_handler *handler,
- struct dentry *dentry, struct inode *inode,
- const char *name, const void *value,
- size_t size, int flags)
+static int ovl_own_xattr_get(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, void *buffer, size_t size)
{
- return ovl_setxattr(dentry, inode, name, value, size, flags);
+ return -EPERM;
}
static int ovl_own_xattr_set(const struct xattr_handler *handler,
@@ -1021,42 +1052,59 @@ static int ovl_own_xattr_set(const struct xattr_handler *handler,
return -EPERM;
}
-static const struct xattr_handler ovl_posix_acl_access_xattr_handler = {
+static int ovl_other_xattr_get(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, void *buffer, size_t size)
+{
+ return ovl_xattr_get(dentry, name, buffer, size);
+}
+
+static int ovl_other_xattr_set(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
+{
+ return ovl_xattr_set(dentry, name, value, size, flags);
+}
+
+static const struct xattr_handler __maybe_unused
+ovl_posix_acl_access_xattr_handler = {
.name = XATTR_NAME_POSIX_ACL_ACCESS,
.flags = ACL_TYPE_ACCESS,
+ .get = ovl_posix_acl_xattr_get,
.set = ovl_posix_acl_xattr_set,
};
-static const struct xattr_handler ovl_posix_acl_default_xattr_handler = {
+static const struct xattr_handler __maybe_unused
+ovl_posix_acl_default_xattr_handler = {
.name = XATTR_NAME_POSIX_ACL_DEFAULT,
.flags = ACL_TYPE_DEFAULT,
+ .get = ovl_posix_acl_xattr_get,
.set = ovl_posix_acl_xattr_set,
};
static const struct xattr_handler ovl_own_xattr_handler = {
.prefix = OVL_XATTR_PREFIX,
+ .get = ovl_own_xattr_get,
.set = ovl_own_xattr_set,
};
static const struct xattr_handler ovl_other_xattr_handler = {
.prefix = "", /* catch all */
+ .get = ovl_other_xattr_get,
.set = ovl_other_xattr_set,
};
static const struct xattr_handler *ovl_xattr_handlers[] = {
+#ifdef CONFIG_FS_POSIX_ACL
&ovl_posix_acl_access_xattr_handler,
&ovl_posix_acl_default_xattr_handler,
+#endif
&ovl_own_xattr_handler,
&ovl_other_xattr_handler,
NULL
};
-static const struct xattr_handler *ovl_xattr_noacl_handlers[] = {
- &ovl_own_xattr_handler,
- &ovl_other_xattr_handler,
- NULL,
-};
-
static int ovl_fill_super(struct super_block *sb, void *data, int silent)
{
struct path upperpath = { NULL, NULL };
@@ -1132,7 +1180,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
err = -EINVAL;
stacklen = ovl_split_lowerdirs(lowertmp);
if (stacklen > OVL_MAX_STACK) {
- pr_err("overlayfs: too many lower directries, limit is %d\n",
+ pr_err("overlayfs: too many lower directories, limit is %d\n",
OVL_MAX_STACK);
goto out_free_lowertmp;
} else if (!ufs->config.upperdir && stacklen == 1) {
@@ -1269,10 +1317,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
sb->s_magic = OVERLAYFS_SUPER_MAGIC;
sb->s_op = &ovl_super_operations;
- if (IS_ENABLED(CONFIG_FS_POSIX_ACL))
- sb->s_xattr = ovl_xattr_handlers;
- else
- sb->s_xattr = ovl_xattr_noacl_handlers;
+ sb->s_xattr = ovl_xattr_handlers;
sb->s_root = root_dentry;
sb->s_fs_info = ufs;
sb->s_flags |= MS_POSIXACL;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 54e270262979..ac0df4dde823 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1556,18 +1556,13 @@ static const struct file_operations proc_pid_set_comm_operations = {
static int proc_exe_link(struct dentry *dentry, struct path *exe_path)
{
struct task_struct *task;
- struct mm_struct *mm;
struct file *exe_file;
task = get_proc_task(d_inode(dentry));
if (!task)
return -ENOENT;
- mm = get_task_mm(task);
+ exe_file = get_task_exe_file(task);
put_task_struct(task);
- if (!mm)
- return -ENOENT;
- exe_file = get_mm_exe_file(mm);
- mmput(mm);
if (exe_file) {
*exe_path = exe_file->f_path;
path_get(&exe_file->f_path);
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index a939f5ed7f89..5c89a07e3d7f 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -430,6 +430,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
static ssize_t
read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
{
+ char *buf = file->private_data;
ssize_t acc = 0;
size_t size, tsz;
size_t elf_buflen;
@@ -500,23 +501,20 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
if (clear_user(buffer, tsz))
return -EFAULT;
} else if (is_vmalloc_or_module_addr((void *)start)) {
- char * elf_buf;
-
- elf_buf = kzalloc(tsz, GFP_KERNEL);
- if (!elf_buf)
- return -ENOMEM;
- vread(elf_buf, (char *)start, tsz);
+ vread(buf, (char *)start, tsz);
/* we have to zero-fill user buffer even if no read */
- if (copy_to_user(buffer, elf_buf, tsz)) {
- kfree(elf_buf);
+ if (copy_to_user(buffer, buf, tsz))
return -EFAULT;
- }
- kfree(elf_buf);
} else {
if (kern_addr_valid(start)) {
unsigned long n;
- n = copy_to_user(buffer, (char *)start, tsz);
+ /*
+ * Using bounce buffer to bypass the
+ * hardened user copy kernel text checks.
+ */
+ memcpy(buf, (char *) start, tsz);
+ n = copy_to_user(buffer, buf, tsz);
/*
* We cannot distinguish between fault on source
* and fault on destination. When this happens
@@ -549,6 +547,11 @@ static int open_kcore(struct inode *inode, struct file *filp)
{
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
+
+ filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!filp->private_data)
+ return -ENOMEM;
+
if (kcore_need_update)
kcore_update_ram();
if (i_size_read(inode) != proc_root_kcore->size) {
@@ -559,10 +562,16 @@ static int open_kcore(struct inode *inode, struct file *filp)
return 0;
}
+static int release_kcore(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
static const struct file_operations proc_kcore_operations = {
.read = read_kcore,
.open = open_kcore,
+ .release = release_kcore,
.llseek = default_llseek,
};
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 187d84ef9de9..f6fa99eca515 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -581,6 +581,8 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
mss->anonymous_thp += HPAGE_PMD_SIZE;
else if (PageSwapBacked(page))
mss->shmem_thp += HPAGE_PMD_SIZE;
+ else if (is_zone_device_page(page))
+ /* pass */;
else
VM_BUG_ON_PAGE(1, page);
smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c
index 183a212694bf..12af0490322f 100644
--- a/fs/ramfs/file-mmu.c
+++ b/fs/ramfs/file-mmu.c
@@ -27,9 +27,17 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/ramfs.h>
+#include <linux/sched.h>
#include "internal.h"
+static unsigned long ramfs_mmu_get_unmapped_area(struct file *file,
+ unsigned long addr, unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+}
+
const struct file_operations ramfs_file_operations = {
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
@@ -38,6 +46,7 @@ const struct file_operations ramfs_file_operations = {
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.llseek = generic_file_llseek,
+ .get_unmapped_area = ramfs_mmu_get_unmapped_area,
};
const struct inode_operations ramfs_file_inode_operations = {
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index f35523d4fa3a..b803213d1307 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -114,9 +114,15 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
* If buf != of->prealloc_buf, we don't know how
* large it is, so cannot safely pass it to ->show
*/
- if (pos || WARN_ON_ONCE(buf != of->prealloc_buf))
+ if (WARN_ON_ONCE(buf != of->prealloc_buf))
return 0;
len = ops->show(kobj, of->kn->priv, buf);
+ if (pos) {
+ if (len <= pos)
+ return 0;
+ len -= pos;
+ memmove(buf, buf + pos, len);
+ }
return min(count, len);
}
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 3dd8f1d54498..05b5243d89f6 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -2278,6 +2278,8 @@ xfs_alloc_log_agf(
offsetof(xfs_agf_t, agf_btreeblks),
offsetof(xfs_agf_t, agf_uuid),
offsetof(xfs_agf_t, agf_rmap_blocks),
+ /* needed so that we don't log the whole rest of the structure: */
+ offsetof(xfs_agf_t, agf_spare64),
sizeof(xfs_agf_t)
};
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index b5c213a051cd..08569792fe20 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -1814,6 +1814,10 @@ xfs_btree_lookup(
XFS_BTREE_STATS_INC(cur, lookup);
+ /* No such thing as a zero-level tree. */
+ if (cur->bc_nlevels == 0)
+ return -EFSCORRUPTED;
+
block = NULL;
keyno = 0;
@@ -4554,15 +4558,22 @@ xfs_btree_simple_query_range(
if (error)
goto out;
+ /* Nothing? See if there's anything to the right. */
+ if (!stat) {
+ error = xfs_btree_increment(cur, 0, &stat);
+ if (error)
+ goto out;
+ }
+
while (stat) {
/* Find the record. */
error = xfs_btree_get_rec(cur, &recp, &stat);
if (error || !stat)
break;
- cur->bc_ops->init_high_key_from_rec(&rec_key, recp);
/* Skip if high_key(rec) < low_key. */
if (firstrec) {
+ cur->bc_ops->init_high_key_from_rec(&rec_key, recp);
firstrec = false;
diff = cur->bc_ops->diff_two_keys(cur, low_key,
&rec_key);
@@ -4571,6 +4582,7 @@ xfs_btree_simple_query_range(
}
/* Stop if high_key < low_key(rec). */
+ cur->bc_ops->init_key_from_rec(&rec_key, recp);
diff = cur->bc_ops->diff_two_keys(cur, &rec_key, high_key);
if (diff > 0)
break;
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index 054a2032fdb3..c221d0ecd52e 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -194,7 +194,7 @@ xfs_defer_trans_abort(
/* Abort intent items. */
list_for_each_entry(dfp, &dop->dop_pending, dfp_list) {
trace_xfs_defer_pending_abort(tp->t_mountp, dfp);
- if (dfp->dfp_committed)
+ if (!dfp->dfp_done)
dfp->dfp_type->abort_intent(dfp->dfp_intent);
}
@@ -290,7 +290,6 @@ xfs_defer_finish(
struct xfs_defer_pending *dfp;
struct list_head *li;
struct list_head *n;
- void *done_item = NULL;
void *state;
int error = 0;
void (*cleanup_fn)(struct xfs_trans *, void *, int);
@@ -309,19 +308,11 @@ xfs_defer_finish(
if (error)
goto out;
- /* Mark all pending intents as committed. */
- list_for_each_entry_reverse(dfp, &dop->dop_pending, dfp_list) {
- if (dfp->dfp_committed)
- break;
- trace_xfs_defer_pending_commit((*tp)->t_mountp, dfp);
- dfp->dfp_committed = true;
- }
-
/* Log an intent-done item for the first pending item. */
dfp = list_first_entry(&dop->dop_pending,
struct xfs_defer_pending, dfp_list);
trace_xfs_defer_pending_finish((*tp)->t_mountp, dfp);
- done_item = dfp->dfp_type->create_done(*tp, dfp->dfp_intent,
+ dfp->dfp_done = dfp->dfp_type->create_done(*tp, dfp->dfp_intent,
dfp->dfp_count);
cleanup_fn = dfp->dfp_type->finish_cleanup;
@@ -331,7 +322,7 @@ xfs_defer_finish(
list_del(li);
dfp->dfp_count--;
error = dfp->dfp_type->finish_item(*tp, dop, li,
- done_item, &state);
+ dfp->dfp_done, &state);
if (error) {
/*
* Clean up after ourselves and jump out.
@@ -428,8 +419,8 @@ xfs_defer_add(
dfp = kmem_alloc(sizeof(struct xfs_defer_pending),
KM_SLEEP | KM_NOFS);
dfp->dfp_type = defer_op_types[type];
- dfp->dfp_committed = false;
dfp->dfp_intent = NULL;
+ dfp->dfp_done = NULL;
dfp->dfp_count = 0;
INIT_LIST_HEAD(&dfp->dfp_work);
list_add_tail(&dfp->dfp_list, &dop->dop_intake);
diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h
index cc3981c48296..e96533d178cf 100644
--- a/fs/xfs/libxfs/xfs_defer.h
+++ b/fs/xfs/libxfs/xfs_defer.h
@@ -30,8 +30,8 @@ struct xfs_defer_op_type;
struct xfs_defer_pending {
const struct xfs_defer_op_type *dfp_type; /* function pointers */
struct list_head dfp_list; /* pending items */
- bool dfp_committed; /* committed trans? */
void *dfp_intent; /* log intent item */
+ void *dfp_done; /* log done item */
struct list_head dfp_work; /* work items */
unsigned int dfp_count; /* # extent items */
};
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index e6a8bea0f7ba..270fb5cf4fa1 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -674,7 +674,8 @@ typedef struct xfs_agf {
#define XFS_AGF_BTREEBLKS 0x00000800
#define XFS_AGF_UUID 0x00001000
#define XFS_AGF_RMAP_BLOCKS 0x00002000
-#define XFS_AGF_NUM_BITS 14
+#define XFS_AGF_SPARE64 0x00004000
+#define XFS_AGF_NUM_BITS 15
#define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1)
#define XFS_AGF_FLAGS \
@@ -691,7 +692,8 @@ typedef struct xfs_agf {
{ XFS_AGF_LONGEST, "LONGEST" }, \
{ XFS_AGF_BTREEBLKS, "BTREEBLKS" }, \
{ XFS_AGF_UUID, "UUID" }, \
- { XFS_AGF_RMAP_BLOCKS, "RMAP_BLOCKS" }
+ { XFS_AGF_RMAP_BLOCKS, "RMAP_BLOCKS" }, \
+ { XFS_AGF_SPARE64, "SPARE64" }
/* disk block (xfs_daddr_t) in the AG */
#define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log))
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 0e3d4f5ec33c..4aecc5fefe96 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -583,7 +583,8 @@ xfs_sb_verify(
* Only check the in progress field for the primary superblock as
* mkfs.xfs doesn't clear it from secondary superblocks.
*/
- return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR,
+ return xfs_mount_validate_sb(mp, &sb,
+ bp->b_maps[0].bm_bn == XFS_SB_DADDR,
check_version);
}
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 607cc29bba21..b5b9bffe3520 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1611,7 +1611,7 @@ xfs_wait_buftarg(
*/
while (percpu_counter_sum(&btp->bt_io_count))
delay(100);
- drain_workqueue(btp->bt_mount->m_buf_workqueue);
+ flush_workqueue(btp->bt_mount->m_buf_workqueue);
/* loop until there is nothing left on the lru list. */
while (list_lru_count(&btp->bt_lru)) {
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 24ef83ef04de..fd6be45b3a1e 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1574,9 +1574,16 @@ xfs_fs_fill_super(
}
}
- if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
+ if (mp->m_sb.sb_rblocks) {
+ xfs_alert(mp,
+ "EXPERIMENTAL reverse mapping btree not compatible with realtime device!");
+ error = -EINVAL;
+ goto out_filestream_unmount;
+ }
xfs_alert(mp,
"EXPERIMENTAL reverse mapping btree feature enabled. Use at your own risk!");
+ }
error = xfs_mountfs(mp);
if (error)
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 7e88bec3f359..d303a665dba9 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -2295,7 +2295,7 @@ DECLARE_EVENT_CLASS(xfs_defer_pending_class,
__entry->dev = mp ? mp->m_super->s_dev : 0;
__entry->type = dfp->dfp_type->type;
__entry->intent = dfp->dfp_intent;
- __entry->committed = dfp->dfp_committed;
+ __entry->committed = dfp->dfp_done != NULL;
__entry->nr = dfp->dfp_count;
),
TP_printk("dev %d:%d optype %d intent %p committed %d nr %d\n",
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index 1bfa602958f2..6df9b0749671 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -72,6 +72,7 @@ struct exception_table_entry
/* Returns 0 if exception not found and fixup otherwise. */
extern unsigned long search_exception_table(unsigned long);
+
/*
* architectures with an MMU should override these two
*/
@@ -230,14 +231,18 @@ extern int __put_user_bad(void) __attribute__((noreturn));
might_fault(); \
access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \
__get_user((x), (__typeof__(*(ptr)) *)__p) : \
- -EFAULT; \
+ ((x) = (__typeof__(*(ptr)))0,-EFAULT); \
})
#ifndef __get_user_fn
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
{
- size = __copy_from_user(x, ptr, size);
- return size ? -EFAULT : size;
+ size_t n = __copy_from_user(x, ptr, size);
+ if (unlikely(n)) {
+ memset(x + (size - n), 0, n);
+ return -EFAULT;
+ }
+ return 0;
}
#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
@@ -257,11 +262,13 @@ extern int __get_user_bad(void) __attribute__((noreturn));
static inline long copy_from_user(void *to,
const void __user * from, unsigned long n)
{
+ unsigned long res = n;
might_fault();
- if (access_ok(VERIFY_READ, from, n))
- return __copy_from_user(to, from, n);
- else
- return n;
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ res = __copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
}
static inline long copy_to_user(void __user *to,
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 4d8452c2384b..c5eaf2f80a4c 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1056,7 +1056,7 @@ static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev,
return NULL;
}
-#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, validate, data, fn) \
+#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \
static const void * __acpi_table_##name[] \
__attribute__((unused)) \
= { (void *) table_id, \
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
new file mode 100644
index 000000000000..f6505d83069d
--- /dev/null
+++ b/include/linux/bitfield.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_BITFIELD_H
+#define _LINUX_BITFIELD_H
+
+#include <linux/bug.h>
+
+/*
+ * Bitfield access macros
+ *
+ * FIELD_{GET,PREP} macros take as first parameter shifted mask
+ * from which they extract the base mask and shift amount.
+ * Mask must be a compilation time constant.
+ *
+ * Example:
+ *
+ * #define REG_FIELD_A GENMASK(6, 0)
+ * #define REG_FIELD_B BIT(7)
+ * #define REG_FIELD_C GENMASK(15, 8)
+ * #define REG_FIELD_D GENMASK(31, 16)
+ *
+ * Get:
+ * a = FIELD_GET(REG_FIELD_A, reg);
+ * b = FIELD_GET(REG_FIELD_B, reg);
+ *
+ * Set:
+ * reg = FIELD_PREP(REG_FIELD_A, 1) |
+ * FIELD_PREP(REG_FIELD_B, 0) |
+ * FIELD_PREP(REG_FIELD_C, c) |
+ * FIELD_PREP(REG_FIELD_D, 0x40);
+ *
+ * Modify:
+ * reg &= ~REG_FIELD_C;
+ * reg |= FIELD_PREP(REG_FIELD_C, c);
+ */
+
+#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+
+#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
+ ({ \
+ BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
+ _pfx "mask is not constant"); \
+ BUILD_BUG_ON_MSG(!(_mask), _pfx "mask is zero"); \
+ BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
+ ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
+ _pfx "value too large for the field"); \
+ BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \
+ _pfx "type of reg too small for mask"); \
+ __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
+ (1ULL << __bf_shf(_mask))); \
+ })
+
+/**
+ * FIELD_PREP() - prepare a bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to put in the field
+ *
+ * FIELD_PREP() masks and shifts up the value. The result should
+ * be combined with other fields of the bitfield using logical OR.
+ */
+#define FIELD_PREP(_mask, _val) \
+ ({ \
+ __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \
+ ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
+ })
+
+/**
+ * FIELD_GET() - extract a bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_reg: 32bit value of entire bitfield
+ *
+ * FIELD_GET() extracts the field specified by @_mask from the
+ * bitfield passed in as @_reg by masking and shifting it down.
+ */
+#define FIELD_GET(_mask, _reg) \
+ ({ \
+ __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \
+ (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
+ })
+
+#endif
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 9a904f63f8c1..5691fdc83819 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -96,6 +96,7 @@ enum bpf_return_type {
struct bpf_func_proto {
u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool gpl_only;
+ bool pkt_access;
enum bpf_return_type ret_type;
enum bpf_arg_type arg1_type;
enum bpf_arg_type arg2_type;
@@ -151,7 +152,8 @@ struct bpf_verifier_ops {
*/
bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
enum bpf_reg_type *reg_type);
-
+ int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
+ const struct bpf_prog *prog);
u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
int src_reg, int ctx_off,
struct bpf_insn *insn, struct bpf_prog *prog);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
new file mode 100644
index 000000000000..c5cb661712c9
--- /dev/null
+++ b/include/linux/bpf_verifier.h
@@ -0,0 +1,90 @@
+/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#ifndef _LINUX_BPF_VERIFIER_H
+#define _LINUX_BPF_VERIFIER_H 1
+
+#include <linux/bpf.h> /* for enum bpf_reg_type */
+#include <linux/filter.h> /* for MAX_BPF_STACK */
+
+struct bpf_reg_state {
+ enum bpf_reg_type type;
+ union {
+ /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
+ s64 imm;
+
+ /* valid when type == PTR_TO_PACKET* */
+ struct {
+ u32 id;
+ u16 off;
+ u16 range;
+ };
+
+ /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
+ * PTR_TO_MAP_VALUE_OR_NULL
+ */
+ struct bpf_map *map_ptr;
+ };
+};
+
+enum bpf_stack_slot_type {
+ STACK_INVALID, /* nothing was stored in this stack slot */
+ STACK_SPILL, /* register spilled into stack */
+ STACK_MISC /* BPF program wrote some data into this slot */
+};
+
+#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
+
+/* state of the program:
+ * type of all registers and stack info
+ */
+struct bpf_verifier_state {
+ struct bpf_reg_state regs[MAX_BPF_REG];
+ u8 stack_slot_type[MAX_BPF_STACK];
+ struct bpf_reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
+};
+
+/* linked list of verifier states used to prune search */
+struct bpf_verifier_state_list {
+ struct bpf_verifier_state state;
+ struct bpf_verifier_state_list *next;
+};
+
+struct bpf_insn_aux_data {
+ enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
+};
+
+#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
+
+struct bpf_verifier_env;
+struct bpf_ext_analyzer_ops {
+ int (*insn_hook)(struct bpf_verifier_env *env,
+ int insn_idx, int prev_insn_idx);
+};
+
+/* single container for all structs
+ * one verifier_env per bpf_check() call
+ */
+struct bpf_verifier_env {
+ struct bpf_prog *prog; /* eBPF program being verified */
+ struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
+ int stack_size; /* number of states to be processed */
+ struct bpf_verifier_state cur_state; /* current verifier state */
+ struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
+ const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
+ void *analyzer_priv; /* pointer to external analyzer's private data */
+ struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
+ u32 used_map_cnt; /* number of used maps */
+ u32 id_gen; /* used to generate unique reg IDs */
+ bool allow_ptr_leaks;
+ bool seen_direct_write;
+ struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
+};
+
+int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
+ void *priv);
+
+#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/bug.h b/include/linux/bug.h
index e51b0709e78d..292d6a10b0c2 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -13,6 +13,7 @@ enum bug_trap_type {
struct pt_regs;
#ifdef __CHECKER__
+#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
#define BUILD_BUG_ON_ZERO(e) (0)
#define BUILD_BUG_ON_NULL(e) ((void*)0)
@@ -24,6 +25,8 @@ struct pt_regs;
#else /* __CHECKER__ */
/* Force a compilation error if a constant expression is not a power of 2 */
+#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \
+ BUILD_BUG_ON(((n) & ((n) - 1)) != 0)
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
diff --git a/include/linux/cec-funcs.h b/include/linux/cec-funcs.h
index 82c3d3b7269d..138bbf721e70 100644
--- a/include/linux/cec-funcs.h
+++ b/include/linux/cec-funcs.h
@@ -162,10 +162,11 @@ static inline void cec_msg_standby(struct cec_msg *msg)
/* One Touch Record Feature */
-static inline void cec_msg_record_off(struct cec_msg *msg)
+static inline void cec_msg_record_off(struct cec_msg *msg, bool reply)
{
msg->len = 2;
msg->msg[1] = CEC_MSG_RECORD_OFF;
+ msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
}
struct cec_op_arib_data {
@@ -227,7 +228,7 @@ static inline void cec_set_digital_service_id(__u8 *msg,
if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) {
*msg++ = (digital->channel.channel_number_fmt << 2) |
(digital->channel.major >> 8);
- *msg++ = digital->channel.major && 0xff;
+ *msg++ = digital->channel.major & 0xff;
*msg++ = digital->channel.minor >> 8;
*msg++ = digital->channel.minor & 0xff;
*msg++ = 0;
@@ -323,6 +324,7 @@ static inline void cec_msg_record_on_phys_addr(struct cec_msg *msg,
}
static inline void cec_msg_record_on(struct cec_msg *msg,
+ bool reply,
const struct cec_op_record_src *rec_src)
{
switch (rec_src->type) {
@@ -346,6 +348,7 @@ static inline void cec_msg_record_on(struct cec_msg *msg,
rec_src->ext_phys_addr.phys_addr);
break;
}
+ msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
}
static inline void cec_ops_record_on(const struct cec_msg *msg,
@@ -1141,6 +1144,75 @@ static inline void cec_msg_give_device_vendor_id(struct cec_msg *msg,
msg->reply = reply ? CEC_MSG_DEVICE_VENDOR_ID : 0;
}
+static inline void cec_msg_vendor_command(struct cec_msg *msg,
+ __u8 size, const __u8 *vendor_cmd)
+{
+ if (size > 14)
+ size = 14;
+ msg->len = 2 + size;
+ msg->msg[1] = CEC_MSG_VENDOR_COMMAND;
+ memcpy(msg->msg + 2, vendor_cmd, size);
+}
+
+static inline void cec_ops_vendor_command(const struct cec_msg *msg,
+ __u8 *size,
+ const __u8 **vendor_cmd)
+{
+ *size = msg->len - 2;
+
+ if (*size > 14)
+ *size = 14;
+ *vendor_cmd = msg->msg + 2;
+}
+
+static inline void cec_msg_vendor_command_with_id(struct cec_msg *msg,
+ __u32 vendor_id, __u8 size,
+ const __u8 *vendor_cmd)
+{
+ if (size > 11)
+ size = 11;
+ msg->len = 5 + size;
+ msg->msg[1] = CEC_MSG_VENDOR_COMMAND_WITH_ID;
+ msg->msg[2] = vendor_id >> 16;
+ msg->msg[3] = (vendor_id >> 8) & 0xff;
+ msg->msg[4] = vendor_id & 0xff;
+ memcpy(msg->msg + 5, vendor_cmd, size);
+}
+
+static inline void cec_ops_vendor_command_with_id(const struct cec_msg *msg,
+ __u32 *vendor_id, __u8 *size,
+ const __u8 **vendor_cmd)
+{
+ *size = msg->len - 5;
+
+ if (*size > 11)
+ *size = 11;
+ *vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4];
+ *vendor_cmd = msg->msg + 5;
+}
+
+static inline void cec_msg_vendor_remote_button_down(struct cec_msg *msg,
+ __u8 size,
+ const __u8 *rc_code)
+{
+ if (size > 14)
+ size = 14;
+ msg->len = 2 + size;
+ msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN;
+ memcpy(msg->msg + 2, rc_code, size);
+}
+
+static inline void cec_ops_vendor_remote_button_down(const struct cec_msg *msg,
+ __u8 *size,
+ const __u8 **rc_code)
+{
+ *size = msg->len - 2;
+
+ if (*size > 14)
+ *size = 14;
+ *rc_code = msg->msg + 2;
+}
+
static inline void cec_msg_vendor_remote_button_up(struct cec_msg *msg)
{
msg->len = 2;
@@ -1277,7 +1349,7 @@ static inline void cec_msg_user_control_pressed(struct cec_msg *msg,
msg->len += 4;
msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) |
(ui_cmd->channel_identifier.major >> 8);
- msg->msg[4] = ui_cmd->channel_identifier.major && 0xff;
+ msg->msg[4] = ui_cmd->channel_identifier.major & 0xff;
msg->msg[5] = ui_cmd->channel_identifier.minor >> 8;
msg->msg[6] = ui_cmd->channel_identifier.minor & 0xff;
break;
diff --git a/include/linux/cec.h b/include/linux/cec.h
index b3e22893a002..851968e803fa 100644
--- a/include/linux/cec.h
+++ b/include/linux/cec.h
@@ -364,7 +364,7 @@ struct cec_caps {
* @num_log_addrs: how many logical addresses should be claimed. Set by the
* caller.
* @vendor_id: the vendor ID of the device. Set by the caller.
- * @flags: set to 0.
+ * @flags: flags.
* @osd_name: the OSD name of the device. Set by the caller.
* @primary_device_type: the primary device type for each logical address.
* Set by the caller.
@@ -389,6 +389,9 @@ struct cec_log_addrs {
__u8 features[CEC_MAX_LOG_ADDRS][12];
};
+/* Allow a fallback to unregistered */
+#define CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK (1 << 0)
+
/* Events */
/* Event that occurs when the adapter state changes */
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 8dbc8929a6a0..573c5a18908f 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -158,7 +158,7 @@
#define __compiler_offsetof(a, b) \
__builtin_offsetof(a, b)
-#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
+#if GCC_VERSION >= 40100
# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 436aa4e42221..668569844d37 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -527,13 +527,14 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* object's lifetime is managed by something other than RCU. That
* "something other" might be reference counting or simple immortality.
*
- * The seemingly unused size_t variable is to validate @p is indeed a pointer
- * type by making sure it can be dereferenced.
+ * The seemingly unused variable ___typecheck_p validates that @p is
+ * indeed a pointer type by using a pointer to typeof(*p) as the type.
+ * Taking a pointer to typeof(*p) again is needed in case p is void *.
*/
#define lockless_dereference(p) \
({ \
typeof(p) _________p1 = READ_ONCE(p); \
- size_t __maybe_unused __size_of_ptr = sizeof(*(p)); \
+ typeof(*(p)) *___typecheck_p __maybe_unused; \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
(_________p1); \
})
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 242bf530edfc..34bd80512a0c 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -1,6 +1,8 @@
#ifndef __CPUHOTPLUG_H
#define __CPUHOTPLUG_H
+#include <linux/types.h>
+
enum cpuhp_state {
CPUHP_OFFLINE,
CPUHP_CREATE_THREADS,
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 7f5a58225385..0148a3046b48 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -118,6 +118,15 @@ typedef struct {
u32 imagesize;
} efi_capsule_header_t;
+struct efi_boot_memmap {
+ efi_memory_desc_t **map;
+ unsigned long *map_size;
+ unsigned long *desc_size;
+ u32 *desc_ver;
+ unsigned long *key_ptr;
+ unsigned long *buff_size;
+};
+
/*
* EFI capsule flags
*/
@@ -946,7 +955,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
/* Iterate through an efi_memory_map */
#define for_each_efi_memory_desc_in_map(m, md) \
for ((md) = (m)->map; \
- ((void *)(md) + (m)->desc_size) <= (m)->map_end; \
+ (md) && ((void *)(md) + (m)->desc_size) <= (m)->map_end; \
(md) = (void *)(md) + (m)->desc_size)
/**
@@ -1371,11 +1380,7 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
efi_loaded_image_t *image, int *cmd_line_len);
efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
- efi_memory_desc_t **map,
- unsigned long *map_size,
- unsigned long *desc_size,
- u32 *desc_ver,
- unsigned long *key_ptr);
+ struct efi_boot_memmap *map);
efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align,
@@ -1457,4 +1462,14 @@ extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
arch_efi_call_virt_teardown(); \
})
+typedef efi_status_t (*efi_exit_boot_map_processing)(
+ efi_system_table_t *sys_table_arg,
+ struct efi_boot_memmap *map,
+ void *priv);
+
+efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table,
+ void *handle,
+ struct efi_boot_memmap *map,
+ void *priv,
+ efi_exit_boot_map_processing priv_func);
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/fence.h b/include/linux/fence.h
index 8cc719a63728..2ac6fa5f4712 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -49,8 +49,6 @@ struct fence_cb;
* @timestamp: Timestamp when the fence was signaled.
* @status: Optional, only valid if < 0, must be set before calling
* fence_signal, indicates that the fence has completed with an error.
- * @child_list: list of children fences
- * @active_list: list of active fences
*
* the flags member must be manipulated and read using the appropriate
* atomic ops (bit_*), so taking the spinlock will not be needed most
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a16439b99fd9..1f09c521adfe 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -314,6 +314,70 @@ struct bpf_prog_aux;
bpf_size; \
})
+#define BPF_SIZEOF(type) \
+ ({ \
+ const int __size = bytes_to_bpf_size(sizeof(type)); \
+ BUILD_BUG_ON(__size < 0); \
+ __size; \
+ })
+
+#define BPF_FIELD_SIZEOF(type, field) \
+ ({ \
+ const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \
+ BUILD_BUG_ON(__size < 0); \
+ __size; \
+ })
+
+#define __BPF_MAP_0(m, v, ...) v
+#define __BPF_MAP_1(m, v, t, a, ...) m(t, a)
+#define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__)
+#define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__)
+#define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__)
+#define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__)
+
+#define __BPF_REG_0(...) __BPF_PAD(5)
+#define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4)
+#define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3)
+#define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2)
+#define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1)
+#define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__)
+
+#define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__)
+#define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__)
+
+#define __BPF_CAST(t, a) \
+ (__force t) \
+ (__force \
+ typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long), \
+ (unsigned long)0, (t)0))) a
+#define __BPF_V void
+#define __BPF_N
+
+#define __BPF_DECL_ARGS(t, a) t a
+#define __BPF_DECL_REGS(t, a) u64 a
+
+#define __BPF_PAD(n) \
+ __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \
+ u64, __ur_3, u64, __ur_4, u64, __ur_5)
+
+#define BPF_CALL_x(x, name, ...) \
+ static __always_inline \
+ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
+ u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
+ u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
+ { \
+ return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
+ } \
+ static __always_inline \
+ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
+
+#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__)
+#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__)
+#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__)
+#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__)
+#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__)
+#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__)
+
#ifdef CONFIG_COMPAT
/* A struct sock_filter is architecture independent. */
struct compat_sock_fprog {
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 3523bf62f328..901e25d495cc 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -574,6 +574,7 @@ static inline void mapping_allow_writable(struct address_space *mapping)
struct posix_acl;
#define ACL_NOT_CACHED ((void *)(-1))
+#define ACL_DONT_CACHE ((void *)(-3))
static inline struct posix_acl *
uncached_acl_sentinel(struct task_struct *task)
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
index cfa6cde25f8e..76cff18bb032 100644
--- a/include/linux/fscrypto.h
+++ b/include/linux/fscrypto.h
@@ -274,8 +274,7 @@ extern void fscrypt_restore_control_page(struct page *);
extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t,
unsigned int);
/* policy.c */
-extern int fscrypt_process_policy(struct inode *,
- const struct fscrypt_policy *);
+extern int fscrypt_process_policy(struct file *, const struct fscrypt_policy *);
extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *);
extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
extern int fscrypt_inherit_context(struct inode *, struct inode *,
@@ -345,7 +344,7 @@ static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p,
}
/* policy.c */
-static inline int fscrypt_notsupp_process_policy(struct inode *i,
+static inline int fscrypt_notsupp_process_policy(struct file *f,
const struct fscrypt_policy *p)
{
return -EOPNOTSUPP;
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 58205f33af02..7268ed076be8 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -148,6 +148,7 @@ struct fsnotify_group {
#define FS_PRIO_1 1 /* fanotify content based access control */
#define FS_PRIO_2 2 /* fanotify pre-content access */
unsigned int priority;
+ bool shutdown; /* group is being shut down, don't queue more events */
/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
struct mutex mark_mutex; /* protect marks_list */
@@ -179,7 +180,6 @@ struct fsnotify_group {
spinlock_t access_lock;
struct list_head access_list;
wait_queue_head_t access_waitq;
- atomic_t bypass_perm;
#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
int f_flags;
unsigned int max_marks;
@@ -292,6 +292,8 @@ extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *op
extern void fsnotify_get_group(struct fsnotify_group *group);
/* drop reference on a group from fsnotify_alloc_group */
extern void fsnotify_put_group(struct fsnotify_group *group);
+/* group destruction begins, stop queuing new events */
+extern void fsnotify_group_stop_queueing(struct fsnotify_group *group);
/* destroy group */
extern void fsnotify_destroy_group(struct fsnotify_group *group);
/* fasync handler function */
@@ -304,8 +306,6 @@ extern int fsnotify_add_event(struct fsnotify_group *group,
struct fsnotify_event *event,
int (*merge)(struct list_head *,
struct fsnotify_event *));
-/* Remove passed event from groups notification queue */
-extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event);
/* true if the group notification queue is empty */
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
/* return, but do not dequeue the first event on the notification queue */
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index b01c8c3dd531..5df444b1ac18 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1429,4 +1429,88 @@ static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
return false;
}
+/*
+ * An API to support in-place processing of incoming VMBUS packets.
+ */
+#define VMBUS_PKT_TRAILER 8
+
+static inline struct vmpacket_descriptor *
+get_next_pkt_raw(struct vmbus_channel *channel)
+{
+ struct hv_ring_buffer_info *ring_info = &channel->inbound;
+ u32 read_loc = ring_info->priv_read_index;
+ void *ring_buffer = hv_get_ring_buffer(ring_info);
+ struct vmpacket_descriptor *cur_desc;
+ u32 packetlen;
+ u32 dsize = ring_info->ring_datasize;
+ u32 delta = read_loc - ring_info->ring_buffer->read_index;
+ u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
+
+ if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
+ return NULL;
+
+ if ((read_loc + sizeof(*cur_desc)) > dsize)
+ return NULL;
+
+ cur_desc = ring_buffer + read_loc;
+ packetlen = cur_desc->len8 << 3;
+
+ /*
+ * If the packet under consideration is wrapping around,
+ * return failure.
+ */
+ if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > (dsize - 1))
+ return NULL;
+
+ return cur_desc;
+}
+
+/*
+ * A helper function to step through packets "in-place"
+ * This API is to be called after each successful call
+ * get_next_pkt_raw().
+ */
+static inline void put_pkt_raw(struct vmbus_channel *channel,
+ struct vmpacket_descriptor *desc)
+{
+ struct hv_ring_buffer_info *ring_info = &channel->inbound;
+ u32 read_loc = ring_info->priv_read_index;
+ u32 packetlen = desc->len8 << 3;
+ u32 dsize = ring_info->ring_datasize;
+
+ if ((read_loc + packetlen + VMBUS_PKT_TRAILER) > dsize)
+ BUG();
+ /*
+ * Include the packet trailer.
+ */
+ ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
+}
+
+/*
+ * This call commits the read index and potentially signals the host.
+ * Here is the pattern for using the "in-place" consumption APIs:
+ *
+ * while (get_next_pkt_raw() {
+ * process the packet "in-place";
+ * put_pkt_raw();
+ * }
+ * if (packets processed in place)
+ * commit_rd_index();
+ */
+static inline void commit_rd_index(struct vmbus_channel *channel)
+{
+ struct hv_ring_buffer_info *ring_info = &channel->inbound;
+ /*
+ * Make sure all reads are done before we update the read index since
+ * the writer may start writing to the read area once the read index
+ * is updated.
+ */
+ virt_rmb();
+ ring_info->ring_buffer->read_index = ring_info->priv_read_index;
+
+ if (hv_need_to_signal_on_read(ring_info))
+ vmbus_set_event(channel);
+}
+
+
#endif /* _HYPERV_H */
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index f923d15b432c..0b17c585b5cd 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -25,5 +25,6 @@ struct ifla_vf_info {
__u32 max_tx_rate;
__u32 rss_query_en;
__u32 trusted;
+ __be16 vlan_proto;
};
#endif /* _LINUX_IF_LINK_H */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 49d4aef1f789..3319d97d789d 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -272,6 +272,23 @@ static inline int vlan_get_encap_level(struct net_device *dev)
}
#endif
+/**
+ * eth_type_vlan - check for valid vlan ether type.
+ * @ethertype: ether type to check
+ *
+ * Returns true if the ether type is a vlan ether type.
+ */
+static inline bool eth_type_vlan(__be16 ethertype)
+{
+ switch (ethertype) {
+ case htons(ETH_P_8021Q):
+ case htons(ETH_P_8021AD):
+ return true;
+ default:
+ return false;
+ }
+}
+
static inline bool vlan_hw_offload_capable(netdev_features_t features,
__be16 proto)
{
@@ -425,8 +442,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
{
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
- if (veth->h_vlan_proto != htons(ETH_P_8021Q) &&
- veth->h_vlan_proto != htons(ETH_P_8021AD))
+ if (!eth_type_vlan(veth->h_vlan_proto))
return -EINVAL;
*vlan_tci = ntohs(veth->h_vlan_TCI);
@@ -488,7 +504,7 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
* present at mac_len - VLAN_HLEN (if mac_len > 0), or at
* ETH_HLEN otherwise
*/
- if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
+ if (eth_type_vlan(type)) {
if (vlan_depth) {
if (WARN_ON(vlan_depth < VLAN_HLEN))
return 0;
@@ -506,8 +522,7 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
vh = (struct vlan_hdr *)(skb->data + vlan_depth);
type = vh->h_vlan_encapsulated_proto;
vlan_depth += VLAN_HLEN;
- } while (type == htons(ETH_P_8021Q) ||
- type == htons(ETH_P_8021AD));
+ } while (eth_type_vlan(type));
}
if (depth)
@@ -572,8 +587,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
static inline bool skb_vlan_tagged(const struct sk_buff *skb)
{
if (!skb_vlan_tag_present(skb) &&
- likely(skb->protocol != htons(ETH_P_8021Q) &&
- skb->protocol != htons(ETH_P_8021AD)))
+ likely(!eth_type_vlan(skb->protocol)))
return false;
return true;
@@ -593,15 +607,14 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
if (!skb_vlan_tag_present(skb)) {
struct vlan_ethhdr *veh;
- if (likely(protocol != htons(ETH_P_8021Q) &&
- protocol != htons(ETH_P_8021AD)))
+ if (likely(!eth_type_vlan(protocol)))
return false;
veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
}
- if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD))
+ if (!eth_type_vlan(protocol))
return false;
return true;
diff --git a/include/linux/iio/sw_trigger.h b/include/linux/iio/sw_trigger.h
index 5198f8ed08a4..c97eab67558f 100644
--- a/include/linux/iio/sw_trigger.h
+++ b/include/linux/iio/sw_trigger.h
@@ -62,7 +62,7 @@ void iio_swt_group_init_type_name(struct iio_sw_trigger *t,
const char *name,
struct config_item_type *type)
{
-#ifdef CONFIG_CONFIGFS_FS
+#if IS_ENABLED(CONFIG_CONFIGFS_FS)
config_group_init_type_name(&t->group, name, type);
#endif
}
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index feb04ea20f11..65da430e260f 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -37,7 +37,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
struct sk_buff *skb, const struct inet_diag_req_v2 *req,
struct user_namespace *user_ns,
u32 pid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh);
+ const struct nlmsghdr *unlh, bool net_admin);
void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
struct netlink_callback *cb,
const struct inet_diag_req_v2 *r,
@@ -56,7 +56,7 @@ void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
struct inet_diag_msg *r, int ext,
- struct user_namespace *user_ns);
+ struct user_namespace *user_ns, bool net_admin);
extern int inet_diag_register(const struct inet_diag_handler *handler);
extern void inet_diag_unregister(const struct inet_diag_handler *handler);
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 3267df461012..3d70ece10313 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -19,6 +19,11 @@ struct vm_fault;
#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
/*
+ * Flags for iomap mappings:
+ */
+#define IOMAP_F_MERGED 0x01 /* contains multiple blocks/extents */
+
+/*
* Magic value for blkno:
*/
#define IOMAP_NULL_BLOCK -1LL /* blkno is not valid */
@@ -27,7 +32,8 @@ struct iomap {
sector_t blkno; /* 1st sector of mapping, 512b units */
loff_t offset; /* file offset of mapping, bytes */
u64 length; /* length of mapping, bytes */
- int type; /* type of mapping */
+ u16 type; /* type of mapping */
+ u16 flags; /* flags for mapping */
struct block_device *bdev; /* block device for I/O */
};
diff --git a/include/linux/irq.h b/include/linux/irq.h
index b52424eaa0ed..0ac26c892fe2 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -945,6 +945,16 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
#endif
+/*
+ * The irqsave variants are for usage in non interrupt code. Do not use
+ * them in irq_chip callbacks. Use irq_gc_lock() instead.
+ */
+#define irq_gc_lock_irqsave(gc, flags) \
+ raw_spin_lock_irqsave(&(gc)->lock, flags)
+
+#define irq_gc_unlock_irqrestore(gc, flags) \
+ raw_spin_unlock_irqrestore(&(gc)->lock, flags)
+
static inline void irq_reg_writel(struct irq_chip_generic *gc,
u32 val, int reg_offset)
{
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 2b6a204bd8d4..aa118bad1407 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -231,6 +231,11 @@ static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
return ktime_sub_ns(kt, usec * NSEC_PER_USEC);
}
+static inline ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec)
+{
+ return ktime_sub_ns(kt, msec * NSEC_PER_MSEC);
+}
+
extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
/**
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 4429d255c8ab..5e5b2969d931 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -195,6 +195,7 @@ static inline bool vma_migratable(struct vm_area_struct *vma)
}
extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
+extern void mpol_put_task_policy(struct task_struct *);
#else
@@ -297,5 +298,8 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
return -1; /* no node preference */
}
+static inline void mpol_put_task_policy(struct task_struct *task)
+{
+}
#endif /* CONFIG_NUMA */
#endif
diff --git a/include/linux/mfd/da8xx-cfgchip.h b/include/linux/mfd/da8xx-cfgchip.h
new file mode 100644
index 000000000000..304985e288d2
--- /dev/null
+++ b/include/linux/mfd/da8xx-cfgchip.h
@@ -0,0 +1,153 @@
+/*
+ * TI DaVinci DA8xx CHIPCFGx registers for syscon consumers.
+ *
+ * Copyright (C) 2016 David Lechner <david@lechnology.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_MFD_DA8XX_CFGCHIP_H
+#define __LINUX_MFD_DA8XX_CFGCHIP_H
+
+#include <linux/bitops.h>
+
+/* register offset (32-bit registers) */
+#define CFGCHIP(n) ((n) * 4)
+
+/* CFGCHIP0 (PLL0/EDMA3_0) register bits */
+#define CFGCHIP0_PLL_MASTER_LOCK BIT(4)
+#define CFGCHIP0_EDMA30TC1DBS(n) ((n) << 2)
+#define CFGCHIP0_EDMA30TC1DBS_MASK CFGCHIP0_EDMA30TC1DBS(0x3)
+#define CFGCHIP0_EDMA30TC1DBS_16 CFGCHIP0_EDMA30TC1DBS(0x0)
+#define CFGCHIP0_EDMA30TC1DBS_32 CFGCHIP0_EDMA30TC1DBS(0x1)
+#define CFGCHIP0_EDMA30TC1DBS_64 CFGCHIP0_EDMA30TC1DBS(0x2)
+#define CFGCHIP0_EDMA30TC0DBS(n) ((n) << 0)
+#define CFGCHIP0_EDMA30TC0DBS_MASK CFGCHIP0_EDMA30TC0DBS(0x3)
+#define CFGCHIP0_EDMA30TC0DBS_16 CFGCHIP0_EDMA30TC0DBS(0x0)
+#define CFGCHIP0_EDMA30TC0DBS_32 CFGCHIP0_EDMA30TC0DBS(0x1)
+#define CFGCHIP0_EDMA30TC0DBS_64 CFGCHIP0_EDMA30TC0DBS(0x2)
+
+/* CFGCHIP1 (eCAP/HPI/EDMA3_1/eHRPWM TBCLK/McASP0 AMUTEIN) register bits */
+#define CFGCHIP1_CAP2SRC(n) ((n) << 27)
+#define CFGCHIP1_CAP2SRC_MASK CFGCHIP1_CAP2SRC(0x1f)
+#define CFGCHIP1_CAP2SRC_ECAP_PIN CFGCHIP1_CAP2SRC(0x0)
+#define CFGCHIP1_CAP2SRC_MCASP0_TX CFGCHIP1_CAP2SRC(0x1)
+#define CFGCHIP1_CAP2SRC_MCASP0_RX CFGCHIP1_CAP2SRC(0x2)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_RX_THRESHOLD CFGCHIP1_CAP2SRC(0x7)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_RX CFGCHIP1_CAP2SRC(0x8)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_TX CFGCHIP1_CAP2SRC(0x9)
+#define CFGCHIP1_CAP2SRC_EMAC_C0_MISC CFGCHIP1_CAP2SRC(0xa)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_RX_THRESHOLD CFGCHIP1_CAP2SRC(0xb)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_RX CFGCHIP1_CAP2SRC(0xc)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_TX CFGCHIP1_CAP2SRC(0xd)
+#define CFGCHIP1_CAP2SRC_EMAC_C1_MISC CFGCHIP1_CAP2SRC(0xe)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_RX_THRESHOLD CFGCHIP1_CAP2SRC(0xf)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_RX CFGCHIP1_CAP2SRC(0x10)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_TX CFGCHIP1_CAP2SRC(0x11)
+#define CFGCHIP1_CAP2SRC_EMAC_C2_MISC CFGCHIP1_CAP2SRC(0x12)
+#define CFGCHIP1_CAP1SRC(n) ((n) << 22)
+#define CFGCHIP1_CAP1SRC_MASK CFGCHIP1_CAP1SRC(0x1f)
+#define CFGCHIP1_CAP1SRC_ECAP_PIN CFGCHIP1_CAP1SRC(0x0)
+#define CFGCHIP1_CAP1SRC_MCASP0_TX CFGCHIP1_CAP1SRC(0x1)
+#define CFGCHIP1_CAP1SRC_MCASP0_RX CFGCHIP1_CAP1SRC(0x2)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_RX_THRESHOLD CFGCHIP1_CAP1SRC(0x7)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_RX CFGCHIP1_CAP1SRC(0x8)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_TX CFGCHIP1_CAP1SRC(0x9)
+#define CFGCHIP1_CAP1SRC_EMAC_C0_MISC CFGCHIP1_CAP1SRC(0xa)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_RX_THRESHOLD CFGCHIP1_CAP1SRC(0xb)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_RX CFGCHIP1_CAP1SRC(0xc)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_TX CFGCHIP1_CAP1SRC(0xd)
+#define CFGCHIP1_CAP1SRC_EMAC_C1_MISC CFGCHIP1_CAP1SRC(0xe)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_RX_THRESHOLD CFGCHIP1_CAP1SRC(0xf)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_RX CFGCHIP1_CAP1SRC(0x10)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_TX CFGCHIP1_CAP1SRC(0x11)
+#define CFGCHIP1_CAP1SRC_EMAC_C2_MISC CFGCHIP1_CAP1SRC(0x12)
+#define CFGCHIP1_CAP0SRC(n) ((n) << 17)
+#define CFGCHIP1_CAP0SRC_MASK CFGCHIP1_CAP0SRC(0x1f)
+#define CFGCHIP1_CAP0SRC_ECAP_PIN CFGCHIP1_CAP0SRC(0x0)
+#define CFGCHIP1_CAP0SRC_MCASP0_TX CFGCHIP1_CAP0SRC(0x1)
+#define CFGCHIP1_CAP0SRC_MCASP0_RX CFGCHIP1_CAP0SRC(0x2)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_RX_THRESHOLD CFGCHIP1_CAP0SRC(0x7)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_RX CFGCHIP1_CAP0SRC(0x8)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_TX CFGCHIP1_CAP0SRC(0x9)
+#define CFGCHIP1_CAP0SRC_EMAC_C0_MISC CFGCHIP1_CAP0SRC(0xa)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_RX_THRESHOLD CFGCHIP1_CAP0SRC(0xb)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_RX CFGCHIP1_CAP0SRC(0xc)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_TX CFGCHIP1_CAP0SRC(0xd)
+#define CFGCHIP1_CAP0SRC_EMAC_C1_MISC CFGCHIP1_CAP0SRC(0xe)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_RX_THRESHOLD CFGCHIP1_CAP0SRC(0xf)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_RX CFGCHIP1_CAP0SRC(0x10)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_TX CFGCHIP1_CAP0SRC(0x11)
+#define CFGCHIP1_CAP0SRC_EMAC_C2_MISC CFGCHIP1_CAP0SRC(0x12)
+#define CFGCHIP1_HPIBYTEAD BIT(16)
+#define CFGCHIP1_HPIENA BIT(15)
+#define CFGCHIP0_EDMA31TC0DBS(n) ((n) << 13)
+#define CFGCHIP0_EDMA31TC0DBS_MASK CFGCHIP0_EDMA31TC0DBS(0x3)
+#define CFGCHIP0_EDMA31TC0DBS_16 CFGCHIP0_EDMA31TC0DBS(0x0)
+#define CFGCHIP0_EDMA31TC0DBS_32 CFGCHIP0_EDMA31TC0DBS(0x1)
+#define CFGCHIP0_EDMA31TC0DBS_64 CFGCHIP0_EDMA31TC0DBS(0x2)
+#define CFGCHIP1_TBCLKSYNC BIT(12)
+#define CFGCHIP1_AMUTESEL0(n) ((n) << 0)
+#define CFGCHIP1_AMUTESEL0_MASK CFGCHIP1_AMUTESEL0(0xf)
+#define CFGCHIP1_AMUTESEL0_LOW CFGCHIP1_AMUTESEL0(0x0)
+#define CFGCHIP1_AMUTESEL0_BANK_0 CFGCHIP1_AMUTESEL0(0x1)
+#define CFGCHIP1_AMUTESEL0_BANK_1 CFGCHIP1_AMUTESEL0(0x2)
+#define CFGCHIP1_AMUTESEL0_BANK_2 CFGCHIP1_AMUTESEL0(0x3)
+#define CFGCHIP1_AMUTESEL0_BANK_3 CFGCHIP1_AMUTESEL0(0x4)
+#define CFGCHIP1_AMUTESEL0_BANK_4 CFGCHIP1_AMUTESEL0(0x5)
+#define CFGCHIP1_AMUTESEL0_BANK_5 CFGCHIP1_AMUTESEL0(0x6)
+#define CFGCHIP1_AMUTESEL0_BANK_6 CFGCHIP1_AMUTESEL0(0x7)
+#define CFGCHIP1_AMUTESEL0_BANK_7 CFGCHIP1_AMUTESEL0(0x8)
+
+/* CFGCHIP2 (USB PHY) register bits */
+#define CFGCHIP2_PHYCLKGD BIT(17)
+#define CFGCHIP2_VBUSSENSE BIT(16)
+#define CFGCHIP2_RESET BIT(15)
+#define CFGCHIP2_OTGMODE(n) ((n) << 13)
+#define CFGCHIP2_OTGMODE_MASK CFGCHIP2_OTGMODE(0x3)
+#define CFGCHIP2_OTGMODE_NO_OVERRIDE CFGCHIP2_OTGMODE(0x0)
+#define CFGCHIP2_OTGMODE_FORCE_HOST CFGCHIP2_OTGMODE(0x1)
+#define CFGCHIP2_OTGMODE_FORCE_DEVICE CFGCHIP2_OTGMODE(0x2)
+#define CFGCHIP2_OTGMODE_FORCE_HOST_VBUS_LOW CFGCHIP2_OTGMODE(0x3)
+#define CFGCHIP2_USB1PHYCLKMUX BIT(12)
+#define CFGCHIP2_USB2PHYCLKMUX BIT(11)
+#define CFGCHIP2_PHYPWRDN BIT(10)
+#define CFGCHIP2_OTGPWRDN BIT(9)
+#define CFGCHIP2_DATPOL BIT(8)
+#define CFGCHIP2_USB1SUSPENDM BIT(7)
+#define CFGCHIP2_PHY_PLLON BIT(6)
+#define CFGCHIP2_SESENDEN BIT(5)
+#define CFGCHIP2_VBDTCTEN BIT(4)
+#define CFGCHIP2_REFFREQ(n) ((n) << 0)
+#define CFGCHIP2_REFFREQ_MASK CFGCHIP2_REFFREQ(0xf)
+#define CFGCHIP2_REFFREQ_12MHZ CFGCHIP2_REFFREQ(0x1)
+#define CFGCHIP2_REFFREQ_24MHZ CFGCHIP2_REFFREQ(0x2)
+#define CFGCHIP2_REFFREQ_48MHZ CFGCHIP2_REFFREQ(0x3)
+#define CFGCHIP2_REFFREQ_19_2MHZ CFGCHIP2_REFFREQ(0x4)
+#define CFGCHIP2_REFFREQ_38_4MHZ CFGCHIP2_REFFREQ(0x5)
+#define CFGCHIP2_REFFREQ_13MHZ CFGCHIP2_REFFREQ(0x6)
+#define CFGCHIP2_REFFREQ_26MHZ CFGCHIP2_REFFREQ(0x7)
+#define CFGCHIP2_REFFREQ_20MHZ CFGCHIP2_REFFREQ(0x8)
+#define CFGCHIP2_REFFREQ_40MHZ CFGCHIP2_REFFREQ(0x9)
+
+/* CFGCHIP3 (EMAC/uPP/PLL1/ASYNC3/PRU/DIV4.5/EMIFA) register bits */
+#define CFGCHIP3_RMII_SEL BIT(8)
+#define CFGCHIP3_UPP_TX_CLKSRC BIT(6)
+#define CFGCHIP3_PLL1_MASTER_LOCK BIT(5)
+#define CFGCHIP3_ASYNC3_CLKSRC BIT(4)
+#define CFGCHIP3_PRUEVTSEL BIT(3)
+#define CFGCHIP3_DIV45PENA BIT(2)
+#define CFGCHIP3_EMA_CLKSRC BIT(1)
+
+/* CFGCHIP4 (McASP0 AMUNTEIN) register bits */
+#define CFGCHIP4_AMUTECLR0 BIT(0)
+
+#endif /* __LINUX_MFD_DA8XX_CFGCHIP_H */
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index 2567a87872b0..7f55b8b41032 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -138,16 +138,16 @@
/*
* time in us for processing a single channel, calculated as follows:
*
- * num cycles = open delay + (sample delay + conv time) * averaging
+ * max num cycles = open delay + (sample delay + conv time) * averaging
*
- * num cycles: 152 + (1 + 13) * 16 = 376
+ * max num cycles: 262143 + (255 + 13) * 16 = 266431
*
* clock frequency: 26MHz / 8 = 3.25MHz
* clock period: 1 / 3.25MHz = 308ns
*
- * processing time: 376 * 308ns = 116us
+ * max processing time: 266431 * 308ns = 83ms(approx)
*/
-#define IDLE_TIMEOUT 116 /* microsec */
+#define IDLE_TIMEOUT 83 /* milliseconds */
#define TSCADC_CELLS 2
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 116b284bc4ce..1f3568694a57 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -309,7 +309,8 @@ int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
struct ifla_vf_stats *vf_stats);
u32 mlx4_comm_get_version(void);
int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac);
-int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
+int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan,
+ u8 qos, __be16 proto);
int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
int max_tx_rate);
int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 42da3552f7cb..59b50d3eedb4 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -221,6 +221,7 @@ enum {
MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33,
MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER = 1ULL << 34,
MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT = 1ULL << 35,
+ MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP = 1ULL << 36,
};
enum {
@@ -1371,6 +1372,8 @@ int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
+int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
+ bool *vlan_offload_disabled);
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index deaa2217214d..b4ee8f62ce8d 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -160,6 +160,7 @@ struct mlx4_qp_path {
enum { /* fl */
MLX4_FL_CV = 1 << 6,
+ MLX4_FL_SV = 1 << 5,
MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2,
MLX4_FL_ETH_SRC_CHECK_MC_LB = 1 << 1,
MLX4_FL_ETH_SRC_CHECK_UC_LB = 1 << 0,
@@ -267,6 +268,7 @@ enum {
MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32,
MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_UC_LB = 18 + 32,
MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB = 19 + 32,
+ MLX4_UPD_QP_PATH_MASK_SV = 22 + 32,
};
enum { /* param3 */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 5cb9fa7aec61..85c4786427e4 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -828,8 +828,6 @@ void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
-int mlx5_sriov_init(struct mlx5_core_dev *dev);
-int mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
s32 npages);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
@@ -932,6 +930,8 @@ enum {
struct mlx5_interface {
void * (*add)(struct mlx5_core_dev *dev);
void (*remove)(struct mlx5_core_dev *dev, void *context);
+ int (*attach)(struct mlx5_core_dev *dev, void *context);
+ void (*detach)(struct mlx5_core_dev *dev, void *context);
void (*event)(struct mlx5_core_dev *dev, void *context,
enum mlx5_dev_event event, unsigned long param);
void * (*get_dev)(void *context);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 73a720f74a69..6045d4d58065 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -6837,9 +6837,10 @@ struct mlx5_ifc_pude_reg_bits {
};
struct mlx5_ifc_ptys_reg_bits {
- u8 an_disable_cap[0x1];
+ u8 reserved_at_0[0x1];
u8 an_disable_admin[0x1];
- u8 reserved_at_2[0x6];
+ u8 an_disable_cap[0x1];
+ u8 reserved_at_3[0x5];
u8 local_port[0x8];
u8 reserved_at_10[0xd];
u8 proto_mask[0x3];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 08ed53eeedd5..ef815b9cd426 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2014,6 +2014,7 @@ extern void mm_drop_all_locks(struct mm_struct *mm);
extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
extern struct file *get_mm_exe_file(struct mm_struct *mm);
+extern struct file *get_task_exe_file(struct task_struct *task);
extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 0d126aeb3ec0..d43ef96bf075 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -32,6 +32,7 @@
#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c
#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d
#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
+#define SDIO_DEVICE_ID_BROADCOM_4339 0x4339
#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index d572b78b65e1..7f2ae99e5daf 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -828,9 +828,21 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
*/
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
-static inline int populated_zone(struct zone *zone)
+/*
+ * Returns true if a zone has pages managed by the buddy allocator.
+ * All the reclaim decisions have to use this function rather than
+ * populated_zone(). If the whole zone is reserved then we can easily
+ * end up with populated_zone() && !managed_zone().
+ */
+static inline bool managed_zone(struct zone *zone)
+{
+ return zone->managed_pages;
+}
+
+/* Returns true if a zone has memory */
+static inline bool populated_zone(struct zone *zone)
{
- return (!!zone->present_pages);
+ return zone->present_pages;
}
extern int movable_zone;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 41f49f5ab62a..136ae6bbe81e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -789,6 +789,7 @@ enum {
TC_SETUP_CLSU32,
TC_SETUP_CLSFLOWER,
TC_SETUP_MATCHALL,
+ TC_SETUP_CLSBPF,
};
struct tc_cls_u32_offload;
@@ -800,6 +801,7 @@ struct tc_to_netdev {
struct tc_cls_u32_offload *cls_u32;
struct tc_cls_flower_offload *cls_flower;
struct tc_cls_matchall_offload *cls_mall;
+ struct tc_cls_bpf_offload *cls_bpf;
};
};
@@ -924,6 +926,14 @@ struct netdev_xdp {
* 3. Update dev->stats asynchronously and atomically, and define
* neither operation.
*
+ * bool (*ndo_has_offload_stats)(int attr_id)
+ * Return true if this device supports offload stats of this attr_id.
+ *
+ * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
+ * void *attr_data)
+ * Get statistics for offload operations by attr_id. Write it into the
+ * attr_data pointer.
+ *
* int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
* If device supports VLAN filtering this function is called when a
* VLAN id is registered.
@@ -936,7 +946,8 @@ struct netdev_xdp {
*
* SR-IOV management functions.
* int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
- * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
+ * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
+ * u8 qos, __be16 proto);
* int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
* int max_tx_rate);
* int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
@@ -1155,6 +1166,10 @@ struct net_device_ops {
struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
struct rtnl_link_stats64 *storage);
+ bool (*ndo_has_offload_stats)(int attr_id);
+ int (*ndo_get_offload_stats)(int attr_id,
+ const struct net_device *dev,
+ void *attr_data);
struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
@@ -1173,7 +1188,8 @@ struct net_device_ops {
int (*ndo_set_vf_mac)(struct net_device *dev,
int queue, u8 *mac);
int (*ndo_set_vf_vlan)(struct net_device *dev,
- int queue, u16 vlan, u8 qos);
+ int queue, u16 vlan,
+ u8 qos, __be16 proto);
int (*ndo_set_vf_rate)(struct net_device *dev,
int vf, int min_tx_rate,
int max_tx_rate);
@@ -3266,6 +3282,7 @@ static inline void napi_free_frags(struct napi_struct *napi)
napi->skb = NULL;
}
+bool netdev_is_rx_handler_busy(struct net_device *dev);
int netdev_rx_handler_register(struct net_device *dev,
rx_handler_func_t *rx_handler,
void *rx_handler_data);
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 44e20dac98a9..abc7fdcb9eb1 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -140,6 +140,8 @@ int nf_register_hook(struct nf_hook_ops *reg);
void nf_unregister_hook(struct nf_hook_ops *reg);
int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
+int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
+void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
/* Functions to register get/setsockopt ranges (non-inclusive). You
need to check permissions yourself! */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index d8b37bab2887..7676557ce357 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -794,7 +794,7 @@ struct nvmf_connect_command {
};
struct nvmf_connect_data {
- uuid_le hostid;
+ uuid_be hostid;
__le16 cntlid;
char resv4[238];
char subsysnqn[NVMF_NQN_FIELD_LEN];
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 66a1260b33de..7e3d53753612 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -571,56 +571,56 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
*/
static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
{
- int ret = 0;
char __user *end = uaddr + size - 1;
if (unlikely(size == 0))
- return ret;
+ return 0;
+ if (unlikely(uaddr > end))
+ return -EFAULT;
/*
* Writing zeroes into userspace here is OK, because we know that if
* the zero gets there, we'll be overwriting it.
*/
- while (uaddr <= end) {
- ret = __put_user(0, uaddr);
- if (ret != 0)
- return ret;
+ do {
+ if (unlikely(__put_user(0, uaddr) != 0))
+ return -EFAULT;
uaddr += PAGE_SIZE;
- }
+ } while (uaddr <= end);
/* Check whether the range spilled into the next page. */
if (((unsigned long)uaddr & PAGE_MASK) ==
((unsigned long)end & PAGE_MASK))
- ret = __put_user(0, end);
+ return __put_user(0, end);
- return ret;
+ return 0;
}
static inline int fault_in_multipages_readable(const char __user *uaddr,
int size)
{
volatile char c;
- int ret = 0;
const char __user *end = uaddr + size - 1;
if (unlikely(size == 0))
- return ret;
+ return 0;
- while (uaddr <= end) {
- ret = __get_user(c, uaddr);
- if (ret != 0)
- return ret;
+ if (unlikely(uaddr > end))
+ return -EFAULT;
+
+ do {
+ if (unlikely(__get_user(c, uaddr) != 0))
+ return -EFAULT;
uaddr += PAGE_SIZE;
- }
+ } while (uaddr <= end);
/* Check whether the range spilled into the next page. */
if (((unsigned long)uaddr & PAGE_MASK) ==
((unsigned long)end & PAGE_MASK)) {
- ret = __get_user(c, end);
- (void)c;
+ return __get_user(c, end);
}
- return ret;
+ return 0;
}
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
diff --git a/include/linux/pci.h b/include/linux/pci.h
index fbc1fa625c3e..0ab835965669 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -683,15 +683,6 @@ struct pci_driver {
#define to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
/**
- * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table
- * @_table: device table name
- *
- * This macro is deprecated and should not be used in new code.
- */
-#define DEFINE_PCI_DEVICE_TABLE(_table) \
- const struct pci_device_id _table[]
-
-/**
* PCI_DEVICE - macro used to describe a specific pci device
* @vend: the 16 bit PCI Vendor ID
* @dev: the 16 bit PCI Device ID
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 2d24b283aa2d..e25f1830fbcf 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -80,6 +80,7 @@ typedef enum {
PHY_INTERFACE_MODE_XGMII,
PHY_INTERFACE_MODE_MOCA,
PHY_INTERFACE_MODE_QSGMII,
+ PHY_INTERFACE_MODE_TRGMII,
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
@@ -123,6 +124,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "moca";
case PHY_INTERFACE_MODE_QSGMII:
return "qsgmii";
+ case PHY_INTERFACE_MODE_TRGMII:
+ return "trgmii";
default:
return "unknown";
}
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 6b15e168148a..5ad54fc66cf0 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -127,6 +127,11 @@ struct ptp_clock;
*
* @info: Structure describing the new clock.
* @parent: Pointer to the parent device of the new clock.
+ *
+ * Returns a valid pointer on success or PTR_ERR on failure. If PHC
+ * support is missing at the configuration level, this function
+ * returns NULL, and drivers are expected to gracefully handle that
+ * case separately.
*/
extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 70b30e4d3cc4..19027635df0d 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -143,6 +143,9 @@
#define GTT_BYTE_SIZE_BITS (GTT_DWORD_SIZE_BITS + 2)
#define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS)
+/* Tools Version */
+#define TOOLS_VERSION 10
+
/*****************/
/* CDU CONSTANTS */
/*****************/
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index d8dc5c2243d5..e4546abcea08 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -455,6 +455,10 @@ struct qed_common_ops {
void (*simd_handler_clean)(struct qed_dev *cdev,
int index);
+ int (*dbg_all_data) (struct qed_dev *cdev, void *buffer);
+
+ int (*dbg_all_data_size) (struct qed_dev *cdev);
+
/**
* @brief can_link_change - can the instance change the link or not
*
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index fd82584acd48..5c132d3188be 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -1,7 +1,7 @@
/*
* Resizable, Scalable, Concurrent Hash Table
*
- * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au>
* Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
*
@@ -53,6 +53,11 @@ struct rhash_head {
struct rhash_head __rcu *next;
};
+struct rhlist_head {
+ struct rhash_head rhead;
+ struct rhlist_head __rcu *next;
+};
+
/**
* struct bucket_table - Table of hash buckets
* @size: Number of hash buckets
@@ -137,6 +142,7 @@ struct rhashtable_params {
* @key_len: Key length for hashfn
* @elasticity: Maximum chain length before rehash
* @p: Configuration parameters
+ * @rhlist: True if this is an rhltable
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
* @lock: Spin lock to protect walker list
@@ -147,12 +153,21 @@ struct rhashtable {
unsigned int key_len;
unsigned int elasticity;
struct rhashtable_params p;
+ bool rhlist;
struct work_struct run_work;
struct mutex mutex;
spinlock_t lock;
};
/**
+ * struct rhltable - Hash table with duplicate objects in a list
+ * @ht: Underlying rhtable
+ */
+struct rhltable {
+ struct rhashtable ht;
+};
+
+/**
* struct rhashtable_walker - Hash table walker
* @list: List entry on list of walkers
* @tbl: The table that we were walking over
@@ -163,9 +178,10 @@ struct rhashtable_walker {
};
/**
- * struct rhashtable_iter - Hash table iterator, fits into netlink cb
+ * struct rhashtable_iter - Hash table iterator
* @ht: Table to iterate through
* @p: Current pointer
+ * @list: Current hash list pointer
* @walker: Associated rhashtable walker
* @slot: Current slot
* @skip: Number of entries to skip in slot
@@ -173,6 +189,7 @@ struct rhashtable_walker {
struct rhashtable_iter {
struct rhashtable *ht;
struct rhash_head *p;
+ struct rhlist_head *list;
struct rhashtable_walker walker;
unsigned int slot;
unsigned int skip;
@@ -339,13 +356,11 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
int rhashtable_init(struct rhashtable *ht,
const struct rhashtable_params *params);
+int rhltable_init(struct rhltable *hlt,
+ const struct rhashtable_params *params);
-struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
- const void *key,
- struct rhash_head *obj,
- struct bucket_table *old_tbl,
- void **data);
-int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
+void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
+ struct rhash_head *obj);
void rhashtable_walk_enter(struct rhashtable *ht,
struct rhashtable_iter *iter);
@@ -507,6 +522,31 @@ void rhashtable_destroy(struct rhashtable *ht);
rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
tbl, hash, member)
+/**
+ * rhl_for_each_rcu - iterate over rcu hash table list
+ * @pos: the &struct rlist_head to use as a loop cursor.
+ * @list: the head of the list
+ *
+ * This hash chain list-traversal primitive should be used on the
+ * list returned by rhltable_lookup.
+ */
+#define rhl_for_each_rcu(pos, list) \
+ for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
+
+/**
+ * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rlist_head to use as a loop cursor.
+ * @list: the head of the list
+ * @member: name of the &struct rlist_head within the hashable struct.
+ *
+ * This hash chain list-traversal primitive should be used on the
+ * list returned by rhltable_lookup.
+ */
+#define rhl_for_each_entry_rcu(tpos, pos, list, member) \
+ for (pos = list; pos && rht_entry(tpos, pos, member); \
+ pos = rcu_dereference_raw(pos->next))
+
static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
const void *obj)
{
@@ -516,18 +556,8 @@ static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
}
-/**
- * rhashtable_lookup_fast - search hash table, inlined version
- * @ht: hash table
- * @key: the pointer to the key
- * @params: hash table parameters
- *
- * Computes the hash value for the key and traverses the bucket chain looking
- * for a entry with an identical key. The first matching entry is returned.
- *
- * Returns the first entry on which the compare function returned true.
- */
-static inline void *rhashtable_lookup_fast(
+/* Internal function, do not use. */
+static inline struct rhash_head *__rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
{
@@ -539,8 +569,6 @@ static inline void *rhashtable_lookup_fast(
struct rhash_head *he;
unsigned int hash;
- rcu_read_lock();
-
tbl = rht_dereference_rcu(ht->tbl, ht);
restart:
hash = rht_key_hashfn(ht, tbl, key, params);
@@ -549,8 +577,7 @@ restart:
params.obj_cmpfn(&arg, rht_obj(ht, he)) :
rhashtable_compare(&arg, rht_obj(ht, he)))
continue;
- rcu_read_unlock();
- return rht_obj(ht, he);
+ return he;
}
/* Ensure we see any new tables. */
@@ -559,96 +586,165 @@ restart:
tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (unlikely(tbl))
goto restart;
- rcu_read_unlock();
return NULL;
}
+/**
+ * rhashtable_lookup - search hash table
+ * @ht: hash table
+ * @key: the pointer to the key
+ * @params: hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * This must only be called under the RCU read lock.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+static inline void *rhashtable_lookup(
+ struct rhashtable *ht, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhash_head *he = __rhashtable_lookup(ht, key, params);
+
+ return he ? rht_obj(ht, he) : NULL;
+}
+
+/**
+ * rhashtable_lookup_fast - search hash table, without RCU read lock
+ * @ht: hash table
+ * @key: the pointer to the key
+ * @params: hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * Only use this function when you have other mechanisms guaranteeing
+ * that the object won't go away after the RCU read lock is released.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+static inline void *rhashtable_lookup_fast(
+ struct rhashtable *ht, const void *key,
+ const struct rhashtable_params params)
+{
+ void *obj;
+
+ rcu_read_lock();
+ obj = rhashtable_lookup(ht, key, params);
+ rcu_read_unlock();
+
+ return obj;
+}
+
+/**
+ * rhltable_lookup - search hash list table
+ * @hlt: hash table
+ * @key: the pointer to the key
+ * @params: hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. All matching entries are returned
+ * in a list.
+ *
+ * This must only be called under the RCU read lock.
+ *
+ * Returns the list of entries that match the given key.
+ */
+static inline struct rhlist_head *rhltable_lookup(
+ struct rhltable *hlt, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
+
+ return he ? container_of(he, struct rhlist_head, rhead) : NULL;
+}
+
/* Internal function, please use rhashtable_insert_fast() instead. This
* function returns the existing element already in hashes in there is a clash,
* otherwise it returns an error via ERR_PTR().
*/
static inline void *__rhashtable_insert_fast(
struct rhashtable *ht, const void *key, struct rhash_head *obj,
- const struct rhashtable_params params)
+ const struct rhashtable_params params, bool rhlist)
{
struct rhashtable_compare_arg arg = {
.ht = ht,
.key = key,
};
- struct bucket_table *tbl, *new_tbl;
+ struct rhash_head __rcu **pprev;
+ struct bucket_table *tbl;
struct rhash_head *head;
spinlock_t *lock;
- unsigned int elasticity;
unsigned int hash;
- void *data = NULL;
- int err;
+ int elasticity;
+ void *data;
-restart:
rcu_read_lock();
tbl = rht_dereference_rcu(ht->tbl, ht);
+ hash = rht_head_hashfn(ht, tbl, obj, params);
+ lock = rht_bucket_lock(tbl, hash);
+ spin_lock_bh(lock);
- /* All insertions must grab the oldest table containing
- * the hashed bucket that is yet to be rehashed.
- */
- for (;;) {
- hash = rht_head_hashfn(ht, tbl, obj, params);
- lock = rht_bucket_lock(tbl, hash);
- spin_lock_bh(lock);
-
- if (tbl->rehash <= hash)
- break;
-
+ if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) {
+slow_path:
spin_unlock_bh(lock);
- tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ rcu_read_unlock();
+ return rhashtable_insert_slow(ht, key, obj);
}
- new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
- if (unlikely(new_tbl)) {
- tbl = rhashtable_insert_slow(ht, key, obj, new_tbl, &data);
- if (!IS_ERR_OR_NULL(tbl))
- goto slow_path;
+ elasticity = ht->elasticity;
+ pprev = &tbl->buckets[hash];
+ rht_for_each(head, tbl, hash) {
+ struct rhlist_head *plist;
+ struct rhlist_head *list;
+
+ elasticity--;
+ if (!key ||
+ (params.obj_cmpfn ?
+ params.obj_cmpfn(&arg, rht_obj(ht, head)) :
+ rhashtable_compare(&arg, rht_obj(ht, head))))
+ continue;
+
+ data = rht_obj(ht, head);
- err = PTR_ERR(tbl);
- if (err == -EEXIST)
- err = 0;
+ if (!rhlist)
+ goto out;
- goto out;
- }
- err = -E2BIG;
- if (unlikely(rht_grow_above_max(ht, tbl)))
- goto out;
+ list = container_of(obj, struct rhlist_head, rhead);
+ plist = container_of(head, struct rhlist_head, rhead);
- if (unlikely(rht_grow_above_100(ht, tbl))) {
-slow_path:
- spin_unlock_bh(lock);
- err = rhashtable_insert_rehash(ht, tbl);
- rcu_read_unlock();
- if (err)
- return ERR_PTR(err);
+ RCU_INIT_POINTER(list->next, plist);
+ head = rht_dereference_bucket(head->next, tbl, hash);
+ RCU_INIT_POINTER(list->rhead.next, head);
+ rcu_assign_pointer(*pprev, obj);
- goto restart;
+ goto good;
}
- err = 0;
- elasticity = ht->elasticity;
- rht_for_each(head, tbl, hash) {
- if (key &&
- unlikely(!(params.obj_cmpfn ?
- params.obj_cmpfn(&arg, rht_obj(ht, head)) :
- rhashtable_compare(&arg, rht_obj(ht, head))))) {
- data = rht_obj(ht, head);
- goto out;
- }
- if (!--elasticity)
- goto slow_path;
- }
+ if (elasticity <= 0)
+ goto slow_path;
+
+ data = ERR_PTR(-E2BIG);
+ if (unlikely(rht_grow_above_max(ht, tbl)))
+ goto out;
+
+ if (unlikely(rht_grow_above_100(ht, tbl)))
+ goto slow_path;
head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
RCU_INIT_POINTER(obj->next, head);
+ if (rhlist) {
+ struct rhlist_head *list;
+
+ list = container_of(obj, struct rhlist_head, rhead);
+ RCU_INIT_POINTER(list->next, NULL);
+ }
rcu_assign_pointer(tbl->buckets[hash], obj);
@@ -656,11 +752,14 @@ slow_path:
if (rht_grow_above_75(ht, tbl))
schedule_work(&ht->run_work);
+good:
+ data = NULL;
+
out:
spin_unlock_bh(lock);
rcu_read_unlock();
- return err ? ERR_PTR(err) : data;
+ return data;
}
/**
@@ -685,7 +784,7 @@ static inline int rhashtable_insert_fast(
{
void *ret;
- ret = __rhashtable_insert_fast(ht, NULL, obj, params);
+ ret = __rhashtable_insert_fast(ht, NULL, obj, params, false);
if (IS_ERR(ret))
return PTR_ERR(ret);
@@ -693,6 +792,58 @@ static inline int rhashtable_insert_fast(
}
/**
+ * rhltable_insert_key - insert object into hash list table
+ * @hlt: hash list table
+ * @key: the pointer to the key
+ * @list: pointer to hash list head inside object
+ * @params: hash table parameters
+ *
+ * Will take a per bucket spinlock to protect against mutual mutations
+ * on the same bucket. Multiple insertions may occur in parallel unless
+ * they map to the same bucket lock.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhltable_insert_key(
+ struct rhltable *hlt, const void *key, struct rhlist_head *list,
+ const struct rhashtable_params params)
+{
+ return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
+ params, true));
+}
+
+/**
+ * rhltable_insert - insert object into hash list table
+ * @hlt: hash list table
+ * @list: pointer to hash list head inside object
+ * @params: hash table parameters
+ *
+ * Will take a per bucket spinlock to protect against mutual mutations
+ * on the same bucket. Multiple insertions may occur in parallel unless
+ * they map to the same bucket lock.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhltable_insert(
+ struct rhltable *hlt, struct rhlist_head *list,
+ const struct rhashtable_params params)
+{
+ const char *key = rht_obj(&hlt->ht, &list->rhead);
+
+ key += params.key_offset;
+
+ return rhltable_insert_key(hlt, key, list, params);
+}
+
+/**
* rhashtable_lookup_insert_fast - lookup and insert object into hash table
* @ht: hash table
* @obj: pointer to hash head inside object
@@ -722,7 +873,8 @@ static inline int rhashtable_lookup_insert_fast(
BUG_ON(ht->p.obj_hashfn);
- ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params);
+ ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
+ false);
if (IS_ERR(ret))
return PTR_ERR(ret);
@@ -759,7 +911,7 @@ static inline int rhashtable_lookup_insert_key(
BUG_ON(!ht->p.obj_hashfn || !key);
- ret = __rhashtable_insert_fast(ht, key, obj, params);
+ ret = __rhashtable_insert_fast(ht, key, obj, params, false);
if (IS_ERR(ret))
return PTR_ERR(ret);
@@ -783,13 +935,14 @@ static inline void *rhashtable_lookup_get_insert_key(
{
BUG_ON(!ht->p.obj_hashfn || !key);
- return __rhashtable_insert_fast(ht, key, obj, params);
+ return __rhashtable_insert_fast(ht, key, obj, params, false);
}
/* Internal function, please use rhashtable_remove_fast() instead */
-static inline int __rhashtable_remove_fast(
+static inline int __rhashtable_remove_fast_one(
struct rhashtable *ht, struct bucket_table *tbl,
- struct rhash_head *obj, const struct rhashtable_params params)
+ struct rhash_head *obj, const struct rhashtable_params params,
+ bool rhlist)
{
struct rhash_head __rcu **pprev;
struct rhash_head *he;
@@ -804,39 +957,66 @@ static inline int __rhashtable_remove_fast(
pprev = &tbl->buckets[hash];
rht_for_each(he, tbl, hash) {
+ struct rhlist_head *list;
+
+ list = container_of(he, struct rhlist_head, rhead);
+
if (he != obj) {
+ struct rhlist_head __rcu **lpprev;
+
pprev = &he->next;
- continue;
+
+ if (!rhlist)
+ continue;
+
+ do {
+ lpprev = &list->next;
+ list = rht_dereference_bucket(list->next,
+ tbl, hash);
+ } while (list && obj != &list->rhead);
+
+ if (!list)
+ continue;
+
+ list = rht_dereference_bucket(list->next, tbl, hash);
+ RCU_INIT_POINTER(*lpprev, list);
+ err = 0;
+ break;
}
- rcu_assign_pointer(*pprev, obj->next);
- err = 0;
+ obj = rht_dereference_bucket(obj->next, tbl, hash);
+ err = 1;
+
+ if (rhlist) {
+ list = rht_dereference_bucket(list->next, tbl, hash);
+ if (list) {
+ RCU_INIT_POINTER(list->rhead.next, obj);
+ obj = &list->rhead;
+ err = 0;
+ }
+ }
+
+ rcu_assign_pointer(*pprev, obj);
break;
}
spin_unlock_bh(lock);
+ if (err > 0) {
+ atomic_dec(&ht->nelems);
+ if (unlikely(ht->p.automatic_shrinking &&
+ rht_shrink_below_30(ht, tbl)))
+ schedule_work(&ht->run_work);
+ err = 0;
+ }
+
return err;
}
-/**
- * rhashtable_remove_fast - remove object from hash table
- * @ht: hash table
- * @obj: pointer to hash head inside object
- * @params: hash table parameters
- *
- * Since the hash chain is single linked, the removal operation needs to
- * walk the bucket chain upon removal. The removal operation is thus
- * considerable slow if the hash table is not correctly sized.
- *
- * Will automatically shrink the table via rhashtable_expand() if the
- * shrink_decision function specified at rhashtable_init() returns true.
- *
- * Returns zero on success, -ENOENT if the entry could not be found.
- */
-static inline int rhashtable_remove_fast(
+/* Internal function, please use rhashtable_remove_fast() instead */
+static inline int __rhashtable_remove_fast(
struct rhashtable *ht, struct rhash_head *obj,
- const struct rhashtable_params params)
+ const struct rhashtable_params params, bool rhlist)
{
struct bucket_table *tbl;
int err;
@@ -850,24 +1030,60 @@ static inline int rhashtable_remove_fast(
* visible then that guarantees the entry to still be in
* the old tbl if it exists.
*/
- while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
+ while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params,
+ rhlist)) &&
(tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
;
- if (err)
- goto out;
-
- atomic_dec(&ht->nelems);
- if (unlikely(ht->p.automatic_shrinking &&
- rht_shrink_below_30(ht, tbl)))
- schedule_work(&ht->run_work);
-
-out:
rcu_read_unlock();
return err;
}
+/**
+ * rhashtable_remove_fast - remove object from hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Since the hash chain is single linked, the removal operation needs to
+ * walk the bucket chain upon removal. The removal operation is thus
+ * considerable slow if the hash table is not correctly sized.
+ *
+ * Will automatically shrink the table via rhashtable_expand() if the
+ * shrink_decision function specified at rhashtable_init() returns true.
+ *
+ * Returns zero on success, -ENOENT if the entry could not be found.
+ */
+static inline int rhashtable_remove_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ return __rhashtable_remove_fast(ht, obj, params, false);
+}
+
+/**
+ * rhltable_remove - remove object from hash list table
+ * @hlt: hash list table
+ * @list: pointer to hash list head inside object
+ * @params: hash table parameters
+ *
+ * Since the hash chain is single linked, the removal operation needs to
+ * walk the bucket chain upon removal. The removal operation is thus
+ * considerable slow if the hash table is not correctly sized.
+ *
+ * Will automatically shrink the table via rhashtable_expand() if the
+ * shrink_decision function specified at rhashtable_init() returns true.
+ *
+ * Returns zero on success, -ENOENT if the entry could not be found.
+ */
+static inline int rhltable_remove(
+ struct rhltable *hlt, struct rhlist_head *list,
+ const struct rhashtable_params params)
+{
+ return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true);
+}
+
/* Internal function, please use rhashtable_replace_fast() instead */
static inline int __rhashtable_replace_fast(
struct rhashtable *ht, struct bucket_table *tbl,
@@ -958,4 +1174,51 @@ static inline int rhashtable_walk_init(struct rhashtable *ht,
return 0;
}
+/**
+ * rhltable_walk_enter - Initialise an iterator
+ * @hlt: Table to walk over
+ * @iter: Hash table Iterator
+ *
+ * This function prepares a hash table walk.
+ *
+ * Note that if you restart a walk after rhashtable_walk_stop you
+ * may see the same object twice. Also, you may miss objects if
+ * there are removals in between rhashtable_walk_stop and the next
+ * call to rhashtable_walk_start.
+ *
+ * For a completely stable walk you should construct your own data
+ * structure outside the hash table.
+ *
+ * This function may sleep so you must not call it from interrupt
+ * context or with spin locks held.
+ *
+ * You must call rhashtable_walk_exit after this function returns.
+ */
+static inline void rhltable_walk_enter(struct rhltable *hlt,
+ struct rhashtable_iter *iter)
+{
+ return rhashtable_walk_enter(&hlt->ht, iter);
+}
+
+/**
+ * rhltable_free_and_destroy - free elements and destroy hash list table
+ * @hlt: the hash list table to destroy
+ * @free_fn: callback to release resources of element
+ * @arg: pointer passed to free_fn
+ *
+ * See documentation for rhashtable_free_and_destroy.
+ */
+static inline void rhltable_free_and_destroy(struct rhltable *hlt,
+ void (*free_fn)(void *ptr,
+ void *arg),
+ void *arg)
+{
+ return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
+}
+
+static inline void rhltable_destroy(struct rhltable *hlt)
+{
+ return rhltable_free_and_destroy(hlt, NULL, NULL);
+}
+
#endif /* _LINUX_RHASHTABLE_H */
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 923266cd294a..48ec7651989b 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -111,7 +111,6 @@ struct uart_8250_port {
* if no_console_suspend
*/
unsigned char probe;
- struct mctrl_gpios *gpios;
#define UART_PROBE_RSA (1 << 0)
/*
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index cfb7219be665..9bf60b556bd2 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -676,13 +676,23 @@ struct sk_buff {
*/
kmemcheck_bitfield_begin(flags1);
__u16 queue_mapping;
+
+/* if you move cloned around you also must adapt those constants */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define CLONED_MASK (1 << 7)
+#else
+#define CLONED_MASK 1
+#endif
+#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
+
+ __u8 __cloned_offset[0];
__u8 cloned:1,
nohdr:1,
fclone:2,
peeked:1,
head_frag:1,
- xmit_more:1;
- /* one bit hole */
+ xmit_more:1,
+ __unused:1; /* one bit hole */
kmemcheck_bitfield_end(flags1);
/* fields enclosed in headers_start/headers_end are copied
@@ -2402,6 +2412,8 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
kfree_skb(skb);
}
+void skb_rbtree_purge(struct rb_root *root);
+
void *netdev_alloc_frag(unsigned int fragsz);
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
@@ -3073,6 +3085,7 @@ bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
int skb_ensure_writable(struct sk_buff *skb, int write_len);
+int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 7be9b1242354..a17ae7b85218 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -19,6 +19,7 @@
#include <linux/skbuff.h>
+#include <linux/win_minmax.h>
#include <net/sock.h>
#include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h>
@@ -212,7 +213,8 @@ struct tcp_sock {
u8 reord; /* reordering detected */
} rack;
u16 advmss; /* Advertised MSS */
- u8 unused;
+ u8 rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
+ unused:7;
u8 nonagle : 4,/* Disable Nagle algorithm? */
thin_lto : 1,/* Use linear timeouts for thin streams */
thin_dupack : 1,/* Fast retransmit on first dupack */
@@ -234,9 +236,7 @@ struct tcp_sock {
u32 mdev_max_us; /* maximal mdev for the last rtt period */
u32 rttvar_us; /* smoothed mdev_max */
u32 rtt_seq; /* sequence number to update rttvar */
- struct rtt_meas {
- u32 rtt, ts; /* RTT in usec and sampling time in jiffies. */
- } rtt_min[3];
+ struct minmax rtt_min;
u32 packets_out; /* Packets which are "in flight" */
u32 retrans_out; /* Retransmitted packets out */
@@ -268,6 +268,12 @@ struct tcp_sock {
* receiver in Recovery. */
u32 prr_out; /* Total number of pkts sent during Recovery. */
u32 delivered; /* Total data packets delivered incl. rexmits */
+ u32 lost; /* Total data packets lost incl. rexmits */
+ u32 app_limited; /* limited until "delivered" reaches this val */
+ struct skb_mstamp first_tx_mstamp; /* start of window send phase */
+ struct skb_mstamp delivered_mstamp; /* time we reached "delivered" */
+ u32 rate_delivered; /* saved rate sample: packets delivered */
+ u32 rate_interval_us; /* saved rate sample: time elapsed */
u32 rcv_wnd; /* Current receiver window */
u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
@@ -281,10 +287,9 @@ struct tcp_sock {
struct sk_buff* lost_skb_hint;
struct sk_buff *retransmit_skb_hint;
- /* OOO segments go in this list. Note that socket lock must be held,
- * as we do not use sk_buff_head lock.
- */
- struct sk_buff_head out_of_order_queue;
+ /* OOO segments go in this rbtree. Socket lock must be held. */
+ struct rb_root out_of_order_queue;
+ struct sk_buff *ooo_last_skb; /* cache rb_last(out_of_order_queue) */
/* SACKs data, these 2 need to be together (see tcp_options_write) */
struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index cbd8990e2e77..2b5b10eed74f 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -118,10 +118,11 @@ static inline int arch_within_stack_frames(const void * const stack,
extern void __check_object_size(const void *ptr, unsigned long n,
bool to_user);
-static inline void check_object_size(const void *ptr, unsigned long n,
- bool to_user)
+static __always_inline void check_object_size(const void *ptr, unsigned long n,
+ bool to_user)
{
- __check_object_size(ptr, n, to_user);
+ if (!__builtin_constant_p(n))
+ __check_object_size(ptr, n, to_user);
}
#else
static inline void check_object_size(const void *ptr, unsigned long n,
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 1b5d1cd796e2..75b4aaf31a9d 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -76,7 +76,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
-int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes);
+#define iov_iter_fault_in_multipages_readable iov_iter_fault_in_readable
size_t iov_iter_single_seg_count(const struct iov_iter *i);
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
diff --git a/include/linux/win_minmax.h b/include/linux/win_minmax.h
new file mode 100644
index 000000000000..56569604278f
--- /dev/null
+++ b/include/linux/win_minmax.h
@@ -0,0 +1,37 @@
+/**
+ * lib/minmax.c: windowed min/max tracker by Kathleen Nichols.
+ *
+ */
+#ifndef MINMAX_H
+#define MINMAX_H
+
+#include <linux/types.h>
+
+/* A single data point for our parameterized min-max tracker */
+struct minmax_sample {
+ u32 t; /* time measurement was taken */
+ u32 v; /* value measured */
+};
+
+/* State for the parameterized min-max tracker */
+struct minmax {
+ struct minmax_sample s[3];
+};
+
+static inline u32 minmax_get(const struct minmax *m)
+{
+ return m->s[0].v;
+}
+
+static inline u32 minmax_reset(struct minmax *m, u32 t, u32 meas)
+{
+ struct minmax_sample val = { .t = t, .v = meas };
+
+ m->s[2] = m->s[1] = m->s[0] = val;
+ return m->s[0].v;
+}
+
+u32 minmax_running_max(struct minmax *m, u32 win, u32 t, u32 meas);
+u32 minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas);
+
+#endif
diff --git a/include/media/cec.h b/include/media/cec.h
index dc7854b855f3..fdb5d600e4bb 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -57,8 +57,8 @@ struct cec_devnode {
int minor;
bool registered;
bool unregistered;
- struct mutex fhs_lock;
struct list_head fhs;
+ struct mutex lock;
};
struct cec_adapter;
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index b4b6a3664dda..1061a472a3e3 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -21,10 +21,14 @@ struct rxrpc_call;
typedef void (*rxrpc_notify_rx_t)(struct sock *, struct rxrpc_call *,
unsigned long);
-typedef void (*rxrpc_notify_new_call_t)(struct sock *);
+typedef void (*rxrpc_notify_new_call_t)(struct sock *, struct rxrpc_call *,
+ unsigned long);
+typedef void (*rxrpc_discard_new_call_t)(struct rxrpc_call *, unsigned long);
+typedef void (*rxrpc_user_attach_call_t)(struct rxrpc_call *, unsigned long);
void rxrpc_kernel_new_call_notification(struct socket *,
- rxrpc_notify_new_call_t);
+ rxrpc_notify_new_call_t,
+ rxrpc_discard_new_call_t);
struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
struct sockaddr_rxrpc *,
struct key *,
@@ -35,12 +39,12 @@ int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
struct msghdr *, size_t);
int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *,
void *, size_t, size_t *, bool, u32 *);
-void rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *, u32);
+void rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
+ u32, int, const char *);
void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
-struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, unsigned long,
- rxrpc_notify_rx_t);
-int rxrpc_kernel_reject_call(struct socket *);
void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
struct sockaddr_rxrpc *);
+int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
+ rxrpc_user_attach_call_t, unsigned long, gfp_t);
#endif /* _NET_RXRPC_H */
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 9b4c418bebd8..fd60eccb59a6 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -52,7 +52,7 @@ struct unix_sock {
struct sock sk;
struct unix_address *addr;
struct path path;
- struct mutex readlock;
+ struct mutex iolock, bindlock;
struct sock *peer;
struct list_head link;
atomic_long_t inflight;
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index bfd1590821d6..0a1e21d7bce1 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -29,7 +29,8 @@
#include <net/sock.h>
#include <linux/seq_file.h>
-#define BT_SUBSYS_VERSION "2.21"
+#define BT_SUBSYS_VERSION 2
+#define BT_SUBSYS_REVISION 22
#ifndef AF_BLUETOOTH
#define AF_BLUETOOTH 31
@@ -371,6 +372,7 @@ void hci_sock_set_flag(struct sock *sk, int nr);
void hci_sock_clear_flag(struct sock *sk, int nr);
int hci_sock_test_flag(struct sock *sk, int nr);
unsigned short hci_sock_get_channel(struct sock *sk);
+u32 hci_sock_get_cookie(struct sock *sk);
int hci_sock_init(void);
void hci_sock_cleanup(void);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 003b25283407..99aa5e5e3100 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -63,6 +63,7 @@
#define HCI_SDIO 6
#define HCI_SPI 7
#define HCI_I2C 8
+#define HCI_SMD 9
/* HCI controller types */
#define HCI_PRIMARY 0x00
@@ -207,7 +208,11 @@ enum {
HCI_MGMT_INDEX_EVENTS,
HCI_MGMT_UNCONF_INDEX_EVENTS,
HCI_MGMT_EXT_INDEX_EVENTS,
- HCI_MGMT_GENERIC_EVENTS,
+ HCI_MGMT_EXT_INFO_EVENTS,
+ HCI_MGMT_OPTION_EVENTS,
+ HCI_MGMT_SETTING_EVENTS,
+ HCI_MGMT_DEV_CLASS_EVENTS,
+ HCI_MGMT_LOCAL_NAME_EVENTS,
HCI_MGMT_OOB_DATA_EVENTS,
};
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index ee7fc47680a1..f00bf667ec33 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -211,6 +211,7 @@ struct hci_dev {
__u8 dev_name[HCI_MAX_NAME_LENGTH];
__u8 short_name[HCI_MAX_SHORT_NAME_LENGTH];
__u8 eir[HCI_MAX_EIR_LENGTH];
+ __u16 appearance;
__u8 dev_class[3];
__u8 major_class;
__u8 minor_class;
@@ -399,7 +400,9 @@ struct hci_dev {
struct delayed_work rpa_expired;
bdaddr_t rpa;
+#if IS_ENABLED(CONFIG_BT_LEDS)
struct led_trigger *power_led;
+#endif
int (*open)(struct hci_dev *hdev);
int (*close)(struct hci_dev *hdev);
@@ -1026,8 +1029,8 @@ int hci_resume_dev(struct hci_dev *hdev);
int hci_reset_dev(struct hci_dev *hdev);
int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb);
-void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...);
-void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...);
+__printf(2, 3) void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...);
+__printf(2, 3) void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...);
int hci_dev_open(__u16 dev);
int hci_dev_close(__u16 dev);
int hci_dev_do_close(struct hci_dev *hdev);
@@ -1404,6 +1407,9 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
int flag, struct sock *skip_sk);
void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
+void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
+ void *data, u16 data_len, ktime_t tstamp,
+ int flag, struct sock *skip_sk);
void hci_sock_dev_event(struct hci_dev *hdev, int event);
@@ -1449,6 +1455,7 @@ void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
#define DISCOV_BREDR_INQUIRY_LEN 0x08
#define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */
+void mgmt_fill_version_info(void *ver);
int mgmt_new_settings(struct hci_dev *hdev);
void mgmt_index_added(struct hci_dev *hdev);
void mgmt_index_removed(struct hci_dev *hdev);
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
index 587d0131b349..240786b04a46 100644
--- a/include/net/bluetooth/hci_mon.h
+++ b/include/net/bluetooth/hci_mon.h
@@ -45,6 +45,10 @@ struct hci_mon_hdr {
#define HCI_MON_VENDOR_DIAG 11
#define HCI_MON_SYSTEM_NOTE 12
#define HCI_MON_USER_LOGGING 13
+#define HCI_MON_CTRL_OPEN 14
+#define HCI_MON_CTRL_CLOSE 15
+#define HCI_MON_CTRL_COMMAND 16
+#define HCI_MON_CTRL_EVENT 17
struct hci_mon_new_index {
__u8 type;
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 7647964b1efa..72a456bbbcd5 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -586,6 +586,24 @@ struct mgmt_rp_get_adv_size_info {
#define MGMT_OP_START_LIMITED_DISCOVERY 0x0041
+#define MGMT_OP_READ_EXT_INFO 0x0042
+#define MGMT_READ_EXT_INFO_SIZE 0
+struct mgmt_rp_read_ext_info {
+ bdaddr_t bdaddr;
+ __u8 version;
+ __le16 manufacturer;
+ __le32 supported_settings;
+ __le32 current_settings;
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
+
+#define MGMT_OP_SET_APPEARANCE 0x0043
+struct mgmt_cp_set_appearance {
+ __u16 appearance;
+} __packed;
+#define MGMT_SET_APPEARANCE_SIZE 2
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
@@ -800,3 +818,9 @@ struct mgmt_ev_advertising_added {
struct mgmt_ev_advertising_removed {
__u8 instance;
} __packed;
+
+#define MGMT_EV_EXT_INFO_CHANGED 0x0025
+struct mgmt_ev_ext_info_changed {
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 9c23f4d33e06..bd26cc6e2d79 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1102,6 +1102,7 @@ struct station_info {
struct cfg80211_tid_stats pertid[IEEE80211_NUM_TIDS + 1];
};
+#if IS_ENABLED(CONFIG_CFG80211)
/**
* cfg80211_get_station - retrieve information about a given station
* @dev: the device where the station is supposed to be connected to
@@ -1114,6 +1115,14 @@ struct station_info {
*/
int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
struct station_info *sinfo);
+#else
+static inline int cfg80211_get_station(struct net_device *dev,
+ const u8 *mac_addr,
+ struct station_info *sinfo)
+{
+ return -ENOENT;
+}
+#endif
/**
* enum monitor_flags - monitor flags
@@ -2423,7 +2432,8 @@ struct cfg80211_qos_map {
* cases, the result of roaming is indicated with a call to
* cfg80211_roamed() or cfg80211_roamed_bss().
* (invoked with the wireless_dev mutex held)
- * @disconnect: Disconnect from the BSS/ESS.
+ * @disconnect: Disconnect from the BSS/ESS. Once done, call
+ * cfg80211_disconnected().
* (invoked with the wireless_dev mutex held)
*
* @join_ibss: Join the specified IBSS (or create if necessary). Once done, call
@@ -3946,6 +3956,34 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
struct cfg80211_qos_map *qos_map);
/**
+ * cfg80211_find_ie_match - match information element and byte array in data
+ *
+ * @eid: element ID
+ * @ies: data consisting of IEs
+ * @len: length of data
+ * @match: byte array to match
+ * @match_len: number of bytes in the match array
+ * @match_offset: offset in the IE where the byte array should match.
+ * If match_len is zero, this must also be set to zero.
+ * Otherwise this must be set to 2 or more, because the first
+ * byte is the element id, which is already compared to eid, and
+ * the second byte is the IE length.
+ *
+ * Return: %NULL if the element ID could not be found or if
+ * the element is invalid (claims to be longer than the given
+ * data) or if the byte array doesn't match, or a pointer to the first
+ * byte of the requested element, that is the byte containing the
+ * element ID.
+ *
+ * Note: There are no checks on the element length other than
+ * having to fit into the given data and being large enough for the
+ * byte array to match.
+ */
+const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len,
+ const u8 *match, int match_len,
+ int match_offset);
+
+/**
* cfg80211_find_ie - find information element in data
*
* @eid: element ID
@@ -3960,7 +3998,10 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
* Note: There are no checks on the element length other than
* having to fit into the given data.
*/
-const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len);
+static inline const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
+{
+ return cfg80211_find_ie_match(eid, ies, len, NULL, 0, 0);
+}
/**
* cfg80211_find_vendor_ie - find vendor specific information element in data
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 9d97c5214341..b122196d5a1f 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -26,6 +26,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_TRAILER,
DSA_TAG_PROTO_EDSA,
DSA_TAG_PROTO_BRCM,
+ DSA_TAG_PROTO_QCA,
DSA_TAG_LAST, /* MUST BE LAST */
};
@@ -142,6 +143,7 @@ struct dsa_port {
struct net_device *netdev;
struct device_node *dn;
unsigned int ageing_time;
+ u8 stp_state;
};
struct dsa_switch {
@@ -338,6 +340,7 @@ struct dsa_switch_ops {
void (*port_bridge_leave)(struct dsa_switch *ds, int port);
void (*port_stp_state_set)(struct dsa_switch *ds, int port,
u8 state);
+ void (*port_fast_age)(struct dsa_switch *ds, int port);
/*
* VLAN support
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index 5db9f5910428..6965c8f68ade 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -112,12 +112,13 @@ static inline struct ip_tunnel_info *skb_tunnel_info_unclone(struct sk_buff *skb
return &dst->u.tun_info;
}
-static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
- __be16 flags,
- __be64 tunnel_id,
- int md_size)
+static inline struct metadata_dst *__ip_tun_set_dst(__be32 saddr,
+ __be32 daddr,
+ __u8 tos, __u8 ttl,
+ __be16 flags,
+ __be64 tunnel_id,
+ int md_size)
{
- const struct iphdr *iph = ip_hdr(skb);
struct metadata_dst *tun_dst;
tun_dst = tun_rx_dst(md_size);
@@ -125,17 +126,30 @@ static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
return NULL;
ip_tunnel_key_init(&tun_dst->u.tun_info.key,
- iph->saddr, iph->daddr, iph->tos, iph->ttl,
+ saddr, daddr, tos, ttl,
0, 0, 0, tunnel_id, flags);
return tun_dst;
}
-static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
+static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
__be16 flags,
__be64 tunnel_id,
int md_size)
{
- const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ const struct iphdr *iph = ip_hdr(skb);
+
+ return __ip_tun_set_dst(iph->saddr, iph->daddr, iph->tos, iph->ttl,
+ flags, tunnel_id, md_size);
+}
+
+static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u8 tos, __u8 ttl,
+ __be32 label,
+ __be16 flags,
+ __be64 tunnel_id,
+ int md_size)
+{
struct metadata_dst *tun_dst;
struct ip_tunnel_info *info;
@@ -150,14 +164,26 @@ static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
info->key.tp_src = 0;
info->key.tp_dst = 0;
- info->key.u.ipv6.src = ip6h->saddr;
- info->key.u.ipv6.dst = ip6h->daddr;
+ info->key.u.ipv6.src = *saddr;
+ info->key.u.ipv6.dst = *daddr;
- info->key.tos = ipv6_get_dsfield(ip6h);
- info->key.ttl = ip6h->hop_limit;
- info->key.label = ip6_flowlabel(ip6h);
+ info->key.tos = tos;
+ info->key.ttl = ttl;
+ info->key.label = label;
return tun_dst;
}
+static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
+ __be16 flags,
+ __be64 tunnel_id,
+ int md_size)
+{
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+ return __ipv6_tun_set_dst(&ip6h->saddr, &ip6h->daddr,
+ ipv6_get_dsfield(ip6h), ip6h->hop_limit,
+ ip6_flowlabel(ip6h), flags, tunnel_id,
+ md_size);
+}
#endif /* __NET_DST_METADATA_H */
diff --git a/include/net/flow.h b/include/net/flow.h
index d47ef4bb5423..035aa7716967 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -34,8 +34,7 @@ struct flowi_common {
__u8 flowic_flags;
#define FLOWI_FLAG_ANYSRC 0x01
#define FLOWI_FLAG_KNOWN_NH 0x02
-#define FLOWI_FLAG_L3MDEV_SRC 0x04
-#define FLOWI_FLAG_SKIP_NH_OIF 0x08
+#define FLOWI_FLAG_SKIP_NH_OIF 0x04
__u32 flowic_secid;
struct flowi_tunnel flowic_tun_key;
};
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index b0fd9476c538..ba07b9d8ed63 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -190,6 +190,10 @@ struct ieee80211_radiotap_header {
* IEEE80211_RADIOTAP_VHT u16, u8, u8, u8[4], u8, u8, u16
*
* Contains VHT information about this frame.
+ *
+ * IEEE80211_RADIOTAP_TIMESTAMP u64, u16, u8, u8 variable
+ *
+ * Contains timestamp information for this frame.
*/
enum ieee80211_radiotap_type {
IEEE80211_RADIOTAP_TSFT = 0,
@@ -214,6 +218,7 @@ enum ieee80211_radiotap_type {
IEEE80211_RADIOTAP_MCS = 19,
IEEE80211_RADIOTAP_AMPDU_STATUS = 20,
IEEE80211_RADIOTAP_VHT = 21,
+ IEEE80211_RADIOTAP_TIMESTAMP = 22,
/* valid in every it_present bitmap, even vendor namespaces */
IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
@@ -321,6 +326,22 @@ enum ieee80211_radiotap_type {
#define IEEE80211_RADIOTAP_CODING_LDPC_USER2 0x04
#define IEEE80211_RADIOTAP_CODING_LDPC_USER3 0x08
+/* For IEEE80211_RADIOTAP_TIMESTAMP */
+#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MASK 0x000F
+#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MS 0x0000
+#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US 0x0001
+#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_NS 0x0003
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_MASK 0x00F0
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_BEGIN_MDPU 0x0000
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_MPDU 0x0010
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_PPDU 0x0020
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ 0x0030
+#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_UNKNOWN 0x00F0
+
+#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_64BIT 0x00
+#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT 0x01
+#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY 0x02
+
/* helpers */
static inline int ieee80211_get_radiotap_len(unsigned char *data)
{
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 49dcad4fe99e..197a30d221e9 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -134,8 +134,8 @@ struct inet_connection_sock {
} icsk_mtup;
u32 icsk_user_timeout;
- u64 icsk_ca_priv[64 / sizeof(u64)];
-#define ICSK_CA_PRIV_SIZE (8 * sizeof(u64))
+ u64 icsk_ca_priv[88 / sizeof(u64)];
+#define ICSK_CA_PRIV_SIZE (11 * sizeof(u64))
};
#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index d97305d0e71f..e0cd318d5103 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -64,6 +64,9 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
}
void ip6_route_input(struct sk_buff *skb);
+struct dst_entry *ip6_route_input_lookup(struct net *net,
+ struct net_device *dev,
+ struct flowi6 *fl6, int flags);
struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
struct flowi6 *fl6, int flags);
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 43a5a0e4524c..20ed9699fcd4 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -23,6 +23,7 @@ struct __ip6_tnl_parm {
__u8 proto; /* tunnel protocol */
__u8 encap_limit; /* encapsulation limit for tunnel */
__u8 hop_limit; /* hop limit for tunnel */
+ bool collect_md;
__be32 flowinfo; /* traffic class and flowlabel for tunnel */
__u32 flags; /* tunnel flags */
struct in6_addr laddr; /* local tunnel end-point address */
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 4079fc18ffe4..7d4a72e75f33 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -111,6 +111,7 @@ struct fib_info {
unsigned char fib_scope;
unsigned char fib_type;
__be32 fib_prefsrc;
+ u32 fib_tb_id;
u32 fib_priority;
u32 *fib_metrics;
#define fib_mtu fib_metrics[RTAX_MTU-1]
@@ -319,7 +320,7 @@ void fib_flush_external(struct net *net);
/* Exported by fib_semantics.c */
int ip_fib_check_default(__be32 gw, struct net_device *dev);
int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
-int fib_sync_down_addr(struct net *net, __be32 local);
+int fib_sync_down_addr(struct net_device *dev, __be32 local);
int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
extern u32 fib_multipath_secret __read_mostly;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index a5e7035fb93f..59557c07904b 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -222,6 +222,25 @@ static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
}
+static inline __be64 key32_to_tunnel_id(__be32 key)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __be64)key;
+#else
+ return (__force __be64)((__force u64)key << 32);
+#endif
+}
+
+/* Returns the least-significant 32 bits of a __be64. */
+static inline __be32 tunnel_id_to_key32(__be64 tun_id)
+{
+#ifdef __BIG_ENDIAN
+ return (__force __be32)tun_id;
+#else
+ return (__force __be32)((__force u64)tun_id >> 32);
+#endif
+}
+
#ifdef CONFIG_INET
int ip_tunnel_init(struct net_device *dev);
@@ -236,6 +255,8 @@ void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
+void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ const u8 proto);
int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index e90095091aa0..b220dabeab45 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -11,6 +11,7 @@
#ifndef _NET_L3MDEV_H_
#define _NET_L3MDEV_H_
+#include <net/dst.h>
#include <net/fib_rules.h>
/**
@@ -18,30 +19,24 @@
*
* @l3mdev_fib_table: Get FIB table id to use for lookups
*
- * @l3mdev_get_rtable: Get cached IPv4 rtable (dst_entry) for device
+ * @l3mdev_l3_rcv: Hook in L3 receive path
*
- * @l3mdev_get_saddr: Get source address for a flow
+ * @l3mdev_l3_out: Hook in L3 output path
*
- * @l3mdev_get_rt6_dst: Get cached IPv6 rt6_info (dst_entry) for device
+ * @l3mdev_link_scope_lookup: IPv6 lookup for linklocal and mcast destinations
*/
struct l3mdev_ops {
u32 (*l3mdev_fib_table)(const struct net_device *dev);
struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *dev,
struct sk_buff *skb, u16 proto);
-
- /* IPv4 ops */
- struct rtable * (*l3mdev_get_rtable)(const struct net_device *dev,
- const struct flowi4 *fl4);
- int (*l3mdev_get_saddr)(struct net_device *dev,
- struct flowi4 *fl4);
+ struct sk_buff * (*l3mdev_l3_out)(struct net_device *dev,
+ struct sock *sk, struct sk_buff *skb,
+ u16 proto);
/* IPv6 ops */
- struct dst_entry * (*l3mdev_get_rt6_dst)(const struct net_device *dev,
+ struct dst_entry * (*l3mdev_link_scope_lookup)(const struct net_device *dev,
struct flowi6 *fl6);
- int (*l3mdev_get_saddr6)(struct net_device *dev,
- const struct sock *sk,
- struct flowi6 *fl6);
};
#ifdef CONFIG_NET_L3_MASTER_DEV
@@ -49,6 +44,8 @@ struct l3mdev_ops {
int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
struct fib_lookup_arg *arg);
+void l3mdev_update_flow(struct net *net, struct flowi *fl);
+
int l3mdev_master_ifindex_rcu(const struct net_device *dev);
static inline int l3mdev_master_ifindex(struct net_device *dev)
{
@@ -80,7 +77,7 @@ static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
}
static inline
-const struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
+struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
{
/* netdev_master_upper_dev_get_rcu calls
* list_first_or_null_rcu to walk the upper dev list.
@@ -89,7 +86,7 @@ const struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
* typecast to remove the const
*/
struct net_device *dev = (struct net_device *)_dev;
- const struct net_device *master;
+ struct net_device *master;
if (!dev)
return NULL;
@@ -104,26 +101,6 @@ const struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
return master;
}
-/* get index of an interface to use for FIB lookups. For devices
- * enslaved to an L3 master device FIB lookups are based on the
- * master index
- */
-static inline int l3mdev_fib_oif_rcu(struct net_device *dev)
-{
- return l3mdev_master_ifindex_rcu(dev) ? : dev->ifindex;
-}
-
-static inline int l3mdev_fib_oif(struct net_device *dev)
-{
- int oif;
-
- rcu_read_lock();
- oif = l3mdev_fib_oif_rcu(dev);
- rcu_read_unlock();
-
- return oif;
-}
-
u32 l3mdev_fib_table_rcu(const struct net_device *dev);
u32 l3mdev_fib_table_by_index(struct net *net, int ifindex);
static inline u32 l3mdev_fib_table(const struct net_device *dev)
@@ -137,39 +114,7 @@ static inline u32 l3mdev_fib_table(const struct net_device *dev)
return tb_id;
}
-static inline struct rtable *l3mdev_get_rtable(const struct net_device *dev,
- const struct flowi4 *fl4)
-{
- if (netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_get_rtable)
- return dev->l3mdev_ops->l3mdev_get_rtable(dev, fl4);
-
- return NULL;
-}
-
-static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
-{
- struct net_device *dev;
- bool rc = false;
-
- if (ifindex == 0)
- return false;
-
- rcu_read_lock();
-
- dev = dev_get_by_index_rcu(net, ifindex);
- if (dev)
- rc = netif_is_l3_master(dev);
-
- rcu_read_unlock();
-
- return rc;
-}
-
-int l3mdev_get_saddr(struct net *net, int ifindex, struct flowi4 *fl4);
-
-struct dst_entry *l3mdev_get_rt6_dst(struct net *net, struct flowi6 *fl6);
-int l3mdev_get_saddr6(struct net *net, const struct sock *sk,
- struct flowi6 *fl6);
+struct dst_entry *l3mdev_link_scope_lookup(struct net *net, struct flowi6 *fl6);
static inline
struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
@@ -199,6 +144,34 @@ struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
return l3mdev_l3_rcv(skb, AF_INET6);
}
+static inline
+struct sk_buff *l3mdev_l3_out(struct sock *sk, struct sk_buff *skb, u16 proto)
+{
+ struct net_device *dev = skb_dst(skb)->dev;
+
+ if (netif_is_l3_slave(dev)) {
+ struct net_device *master;
+
+ master = netdev_master_upper_dev_get_rcu(dev);
+ if (master && master->l3mdev_ops->l3mdev_l3_out)
+ skb = master->l3mdev_ops->l3mdev_l3_out(master, sk,
+ skb, proto);
+ }
+
+ return skb;
+}
+
+static inline
+struct sk_buff *l3mdev_ip_out(struct sock *sk, struct sk_buff *skb)
+{
+ return l3mdev_l3_out(sk, skb, AF_INET);
+}
+
+static inline
+struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb)
+{
+ return l3mdev_l3_out(sk, skb, AF_INET6);
+}
#else
static inline int l3mdev_master_ifindex_rcu(const struct net_device *dev)
@@ -216,20 +189,11 @@ static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
}
static inline
-const struct net_device *l3mdev_master_dev_rcu(const struct net_device *dev)
+struct net_device *l3mdev_master_dev_rcu(const struct net_device *dev)
{
return NULL;
}
-static inline int l3mdev_fib_oif_rcu(struct net_device *dev)
-{
- return dev ? dev->ifindex : 0;
-}
-static inline int l3mdev_fib_oif(struct net_device *dev)
-{
- return dev ? dev->ifindex : 0;
-}
-
static inline u32 l3mdev_fib_table_rcu(const struct net_device *dev)
{
return 0;
@@ -243,43 +207,32 @@ static inline u32 l3mdev_fib_table_by_index(struct net *net, int ifindex)
return 0;
}
-static inline struct rtable *l3mdev_get_rtable(const struct net_device *dev,
- const struct flowi4 *fl4)
+static inline
+struct dst_entry *l3mdev_link_scope_lookup(struct net *net, struct flowi6 *fl6)
{
return NULL;
}
-static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
-{
- return false;
-}
-
-static inline int l3mdev_get_saddr(struct net *net, int ifindex,
- struct flowi4 *fl4)
-{
- return 0;
-}
-
static inline
-struct dst_entry *l3mdev_get_rt6_dst(struct net *net, struct flowi6 *fl6)
+struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
{
- return NULL;
+ return skb;
}
-static inline int l3mdev_get_saddr6(struct net *net, const struct sock *sk,
- struct flowi6 *fl6)
+static inline
+struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
{
- return 0;
+ return skb;
}
static inline
-struct sk_buff *l3mdev_ip_rcv(struct sk_buff *skb)
+struct sk_buff *l3mdev_ip_out(struct sock *sk, struct sk_buff *skb)
{
return skb;
}
static inline
-struct sk_buff *l3mdev_ip6_rcv(struct sk_buff *skb)
+struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb)
{
return skb;
}
@@ -290,6 +243,10 @@ int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
{
return 1;
}
+static inline
+void l3mdev_update_flow(struct net *net, struct flowi *fl)
+{
+}
#endif
#endif /* _NET_L3MDEV_H_ */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index cca510a585c3..5296100f3889 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1735,6 +1735,9 @@ struct ieee80211_sta_rates {
* @supp_rates: Bitmap of supported rates (per band)
* @ht_cap: HT capabilities of this STA; restricted to our own capabilities
* @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
+ * @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU
+ * that this station is allowed to transmit to us.
+ * Can be modified by driver.
* @wme: indicates whether the STA supports QoS/WME (if local devices does,
* otherwise always false)
* @drv_priv: data area for driver use, will always be aligned to
@@ -1775,6 +1778,7 @@ struct ieee80211_sta {
u16 aid;
struct ieee80211_sta_ht_cap ht_cap;
struct ieee80211_sta_vht_cap vht_cap;
+ u8 max_rx_aggregation_subframes;
bool wme;
u8 uapsd_queues;
u8 max_sp;
@@ -2014,6 +2018,11 @@ struct ieee80211_txq {
* @IEEE80211_HW_TX_FRAG_LIST: Hardware (or driver) supports sending frag_list
* skbs, needed for zero-copy software A-MSDU.
*
+ * @IEEE80211_HW_REPORTS_LOW_ACK: The driver (or firmware) reports low ack event
+ * by ieee80211_report_low_ack() based on its own algorithm. For such
+ * drivers, mac80211 packet loss mechanism will not be triggered and driver
+ * is completely depending on firmware event for station kickout.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2054,6 +2063,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_USES_RSS,
IEEE80211_HW_TX_AMSDU,
IEEE80211_HW_TX_FRAG_LIST,
+ IEEE80211_HW_REPORTS_LOW_ACK,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -2141,6 +2151,14 @@ enum ieee80211_hw_flags {
* the default is _GI | _BANDWIDTH.
* Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values.
*
+ * @radiotap_timestamp: Information for the radiotap timestamp field; if the
+ * 'units_pos' member is set to a non-negative value it must be set to
+ * a combination of a IEEE80211_RADIOTAP_TIMESTAMP_UNIT_* and a
+ * IEEE80211_RADIOTAP_TIMESTAMP_SPOS_* value, and then the timestamp
+ * field will be added and populated from the &struct ieee80211_rx_status
+ * device_timestamp. If the 'accuracy' member is non-negative, it's put
+ * into the accuracy radiotap field and the accuracy known flag is set.
+ *
* @netdev_features: netdev features to be set in each netdev created
* from this HW. Note that not all features are usable with mac80211,
* other features will be rejected during HW registration.
@@ -2184,6 +2202,10 @@ struct ieee80211_hw {
u8 offchannel_tx_hw_queue;
u8 radiotap_mcs_details;
u16 radiotap_vht_details;
+ struct {
+ int units_pos;
+ s16 accuracy;
+ } radiotap_timestamp;
netdev_features_t netdev_features;
u8 uapsd_queues;
u8 uapsd_max_sp_len;
@@ -3085,11 +3107,8 @@ enum ieee80211_reconfig_type {
*
* @sta_add_debugfs: Drivers can use this callback to add debugfs files
* when a station is added to mac80211's station list. This callback
- * and @sta_remove_debugfs should be within a CONFIG_MAC80211_DEBUGFS
- * conditional. This callback can sleep.
- *
- * @sta_remove_debugfs: Remove the debugfs files which were added using
- * @sta_add_debugfs. This callback can sleep.
+ * should be within a CONFIG_MAC80211_DEBUGFS conditional. This
+ * callback can sleep.
*
* @sta_notify: Notifies low level driver about power state transition of an
* associated station, AP, IBSS/WDS/mesh peer etc. For a VIF operating
@@ -3485,10 +3504,6 @@ struct ieee80211_ops {
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct dentry *dir);
- void (*sta_remove_debugfs)(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct dentry *dir);
#endif
void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum sta_notify_cmd, struct ieee80211_sta *sta);
diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h
index 6793614e6502..e6937318546c 100644
--- a/include/net/netfilter/nf_conntrack_synproxy.h
+++ b/include/net/netfilter/nf_conntrack_synproxy.h
@@ -27,6 +27,20 @@ static inline struct nf_conn_synproxy *nfct_synproxy_ext_add(struct nf_conn *ct)
#endif
}
+static inline bool nf_ct_add_synproxy(struct nf_conn *ct,
+ const struct nf_conn *tmpl)
+{
+ if (tmpl && nfct_synproxy(tmpl)) {
+ if (!nfct_seqadj_ext_add(ct))
+ return false;
+
+ if (!nfct_synproxy_ext_add(ct))
+ return false;
+ }
+
+ return true;
+}
+
struct synproxy_stats {
unsigned int syn_received;
unsigned int cookie_invalid;
diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h
index d27588c8dbd9..1139cde0fdc5 100644
--- a/include/net/netfilter/nft_meta.h
+++ b/include/net/netfilter/nft_meta.h
@@ -36,4 +36,8 @@ void nft_meta_set_eval(const struct nft_expr *expr,
void nft_meta_set_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr);
+int nft_meta_set_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data);
+
#endif
diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h
index 60fa1530006b..02e28c529b29 100644
--- a/include/net/netfilter/nft_reject.h
+++ b/include/net/netfilter/nft_reject.h
@@ -8,6 +8,10 @@ struct nft_reject {
extern const struct nla_policy nft_reject_policy[];
+int nft_reject_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data);
+
int nft_reject_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[]);
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 24cd3949a9a4..27bb9633c69d 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -11,7 +11,7 @@
struct ctl_table_header;
struct xfrm_policy_hash {
- struct hlist_head *table;
+ struct hlist_head __rcu *table;
unsigned int hmask;
u8 dbits4;
u8 sbits4;
@@ -38,14 +38,12 @@ struct netns_xfrm {
* mode. Also, it can be used by ah/esp icmp error handler to find
* offending SA.
*/
- struct hlist_head *state_bydst;
- struct hlist_head *state_bysrc;
- struct hlist_head *state_byspi;
+ struct hlist_head __rcu *state_bydst;
+ struct hlist_head __rcu *state_bysrc;
+ struct hlist_head __rcu *state_byspi;
unsigned int state_hmask;
unsigned int state_num;
struct work_struct state_hash_work;
- struct hlist_head state_gc_list;
- struct work_struct state_gc_work;
struct list_head policy_all;
struct hlist_head *policy_byidx;
@@ -73,7 +71,7 @@ struct netns_xfrm {
struct dst_ops xfrm6_dst_ops;
#endif
spinlock_t xfrm_state_lock;
- rwlock_t xfrm_policy_lock;
+ spinlock_t xfrm_policy_lock;
struct mutex xfrm_cfg_mutex;
/* flow cache part */
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index a459be5fe1c2..5ccaa4be7d96 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -486,4 +486,20 @@ struct tc_cls_matchall_offload {
unsigned long cookie;
};
+enum tc_clsbpf_command {
+ TC_CLSBPF_ADD,
+ TC_CLSBPF_REPLACE,
+ TC_CLSBPF_DESTROY,
+ TC_CLSBPF_STATS,
+};
+
+struct tc_cls_bpf_offload {
+ enum tc_clsbpf_command command;
+ struct tcf_exts *exts;
+ struct bpf_prog *prog;
+ const char *name;
+ bool exts_integrated;
+ u32 gen_flags;
+};
+
#endif
diff --git a/include/net/route.h b/include/net/route.h
index ad777d79af94..0429d47cad25 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -29,7 +29,6 @@
#include <net/flow.h>
#include <net/inet_sock.h>
#include <net/ip_fib.h>
-#include <net/l3mdev.h>
#include <linux/in_route.h>
#include <linux/rtnetlink.h>
#include <linux/rcupdate.h>
@@ -285,15 +284,6 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
ip_route_connect_init(fl4, dst, src, tos, oif, protocol,
sport, dport, sk);
- if (!src && oif) {
- int rc;
-
- rc = l3mdev_get_saddr(net, oif, fl4);
- if (rc < 0)
- return ERR_PTR(rc);
-
- src = fl4->saddr;
- }
if (!dst || !src) {
rt = __ip_route_output_key(net, fl4);
if (IS_ERR(rt))
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 52a2015667b4..e6aa0a249672 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -36,6 +36,14 @@ struct qdisc_size_table {
u16 data[];
};
+/* similar to sk_buff_head, but skb->prev pointer is undefined. */
+struct qdisc_skb_head {
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ __u32 qlen;
+ spinlock_t lock;
+};
+
struct Qdisc {
int (*enqueue)(struct sk_buff *skb,
struct Qdisc *sch,
@@ -76,7 +84,7 @@ struct Qdisc {
* For performance sake on SMP, we put highly modified fields at the end
*/
struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
- struct sk_buff_head q;
+ struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
@@ -600,10 +608,27 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
sch->qstats.overlimits++;
}
+static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
+{
+ qh->head = NULL;
+ qh->tail = NULL;
+ qh->qlen = 0;
+}
+
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
- struct sk_buff_head *list)
+ struct qdisc_skb_head *qh)
{
- __skb_queue_tail(list, skb);
+ struct sk_buff *last = qh->tail;
+
+ if (last) {
+ skb->next = NULL;
+ last->next = skb;
+ qh->tail = skb;
+ } else {
+ qh->tail = skb;
+ qh->head = skb;
+ }
+ qh->qlen++;
qdisc_qstats_backlog_inc(sch, skb);
return NET_XMIT_SUCCESS;
@@ -614,14 +639,16 @@ static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
return __qdisc_enqueue_tail(skb, sch, &sch->q);
}
-static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
- struct sk_buff_head *list)
+static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
{
- struct sk_buff *skb = __skb_dequeue(list);
+ struct sk_buff *skb = qh->head;
if (likely(skb != NULL)) {
- qdisc_qstats_backlog_dec(sch, skb);
- qdisc_bstats_update(sch, skb);
+ qh->head = skb->next;
+ qh->qlen--;
+ if (qh->head == NULL)
+ qh->tail = NULL;
+ skb->next = NULL;
}
return skb;
@@ -629,7 +656,14 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
{
- return __qdisc_dequeue_head(sch, &sch->q);
+ struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
+
+ if (likely(skb != NULL)) {
+ qdisc_qstats_backlog_dec(sch, skb);
+ qdisc_bstats_update(sch, skb);
+ }
+
+ return skb;
}
/* Instead of calling kfree_skb() while root qdisc lock is held,
@@ -642,10 +676,10 @@ static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
}
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
- struct sk_buff_head *list,
+ struct qdisc_skb_head *qh,
struct sk_buff **to_free)
{
- struct sk_buff *skb = __skb_dequeue(list);
+ struct sk_buff *skb = __qdisc_dequeue_head(qh);
if (likely(skb != NULL)) {
unsigned int len = qdisc_pkt_len(skb);
@@ -666,7 +700,9 @@ static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
{
- return skb_peek(&sch->q);
+ const struct qdisc_skb_head *qh = &sch->q;
+
+ return qh->head;
}
/* generic pseudo peek method for non-work-conserving qdisc */
@@ -701,15 +737,19 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
return skb;
}
-static inline void __qdisc_reset_queue(struct sk_buff_head *list)
+static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
{
/*
* We do not know the backlog in bytes of this list, it
* is up to the caller to correct it
*/
- if (!skb_queue_empty(list)) {
- rtnl_kfree_skbs(list->next, list->prev);
- __skb_queue_head_init(list);
+ ASSERT_RTNL();
+ if (qh->qlen) {
+ rtnl_kfree_skbs(qh->head, qh->tail);
+
+ qh->head = NULL;
+ qh->tail = NULL;
+ qh->qlen = 0;
}
}
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 632e205ca54b..87a7f42e7639 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -83,9 +83,9 @@
#endif
/* Round an int up to the next multiple of 4. */
-#define WORD_ROUND(s) (((s)+3)&~3)
+#define SCTP_PAD4(s) (((s)+3)&~3)
/* Truncate to the previous multiple of 4. */
-#define WORD_TRUNC(s) ((s)&~3)
+#define SCTP_TRUNC4(s) ((s)&~3)
/*
* Function declarations.
@@ -433,7 +433,7 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
if (asoc->user_frag)
frag = min_t(int, frag, asoc->user_frag);
- frag = WORD_TRUNC(min_t(int, frag, SCTP_MAX_CHUNK_LEN));
+ frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN));
return frag;
}
@@ -462,7 +462,7 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
for (pos.v = chunk->member;\
pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
- pos.v += WORD_ROUND(ntohs(pos.p->length)))
+ pos.v += SCTP_PAD4(ntohs(pos.p->length)))
#define sctp_walk_errors(err, chunk_hdr)\
_sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
@@ -472,7 +472,7 @@ for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
sizeof(sctp_chunkhdr_t));\
(void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
ntohs(err->length) >= sizeof(sctp_errhdr_t); \
- err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length))))
+ err = (sctp_errhdr_t *)((void *)err + SCTP_PAD4(ntohs(err->length))))
#define sctp_walk_fwdtsn(pos, chunk)\
_sctp_walk_fwdtsn((pos), (chunk), ntohs((chunk)->chunk_hdr->length) - sizeof(struct sctp_fwdtsn_chunk))
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index efc01743b9d6..ca6c971dd74a 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -307,85 +307,27 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
}
/* Compare two TSNs */
+#define TSN_lt(a,b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((a) - (b)) < 0))
-/* RFC 1982 - Serial Number Arithmetic
- *
- * 2. Comparison
- * Then, s1 is said to be equal to s2 if and only if i1 is equal to i2,
- * in all other cases, s1 is not equal to s2.
- *
- * s1 is said to be less than s2 if, and only if, s1 is not equal to s2,
- * and
- *
- * (i1 < i2 and i2 - i1 < 2^(SERIAL_BITS - 1)) or
- * (i1 > i2 and i1 - i2 > 2^(SERIAL_BITS - 1))
- *
- * s1 is said to be greater than s2 if, and only if, s1 is not equal to
- * s2, and
- *
- * (i1 < i2 and i2 - i1 > 2^(SERIAL_BITS - 1)) or
- * (i1 > i2 and i1 - i2 < 2^(SERIAL_BITS - 1))
- */
-
-/*
- * RFC 2960
- * 1.6 Serial Number Arithmetic
- *
- * Comparisons and arithmetic on TSNs in this document SHOULD use Serial
- * Number Arithmetic as defined in [RFC1982] where SERIAL_BITS = 32.
- */
-
-enum {
- TSN_SIGN_BIT = (1<<31)
-};
-
-static inline int TSN_lt(__u32 s, __u32 t)
-{
- return ((s) - (t)) & TSN_SIGN_BIT;
-}
-
-static inline int TSN_lte(__u32 s, __u32 t)
-{
- return ((s) == (t)) || (((s) - (t)) & TSN_SIGN_BIT);
-}
+#define TSN_lte(a,b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((a) - (b)) <= 0))
/* Compare two SSNs */
-
-/*
- * RFC 2960
- * 1.6 Serial Number Arithmetic
- *
- * Comparisons and arithmetic on Stream Sequence Numbers in this document
- * SHOULD use Serial Number Arithmetic as defined in [RFC1982] where
- * SERIAL_BITS = 16.
- */
-enum {
- SSN_SIGN_BIT = (1<<15)
-};
-
-static inline int SSN_lt(__u16 s, __u16 t)
-{
- return ((s) - (t)) & SSN_SIGN_BIT;
-}
-
-static inline int SSN_lte(__u16 s, __u16 t)
-{
- return ((s) == (t)) || (((s) - (t)) & SSN_SIGN_BIT);
-}
-
-/*
- * ADDIP 3.1.1
- * The valid range of Serial Number is from 0 to 4294967295 (2**32 - 1). Serial
- * Numbers wrap back to 0 after reaching 4294967295.
- */
-enum {
- ADDIP_SERIAL_SIGN_BIT = (1<<31)
-};
-
-static inline int ADDIP_SERIAL_gte(__u16 s, __u16 t)
-{
- return ((s) == (t)) || (((t) - (s)) & ADDIP_SERIAL_SIGN_BIT);
-}
+#define SSN_lt(a,b) \
+ (typecheck(__u16, a) && \
+ typecheck(__u16, b) && \
+ ((__s16)((a) - (b)) < 0))
+
+/* ADDIP 3.1.1 */
+#define ADDIP_SERIAL_gte(a,b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((b) - (a)) <= 0))
/* Check VTAG of the packet matches the sender's own tag. */
static inline int
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index ce93c4b10d26..8693dc452a7f 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -537,6 +537,7 @@ struct sctp_datamsg {
struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *,
struct sctp_sndrcvinfo *,
struct iov_iter *);
+void sctp_datamsg_free(struct sctp_datamsg *);
void sctp_datamsg_put(struct sctp_datamsg *);
void sctp_chunk_fail(struct sctp_chunk *, int error);
int sctp_chunk_abandoned(struct sctp_chunk *);
@@ -1076,7 +1077,7 @@ struct sctp_outq {
void sctp_outq_init(struct sctp_association *, struct sctp_outq *);
void sctp_outq_teardown(struct sctp_outq *);
void sctp_outq_free(struct sctp_outq*);
-int sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t);
+void sctp_outq_tail(struct sctp_outq *, struct sctp_chunk *chunk, gfp_t);
int sctp_outq_sack(struct sctp_outq *, struct sctp_chunk *);
int sctp_outq_is_empty(const struct sctp_outq *);
void sctp_outq_restart(struct sctp_outq *);
@@ -1084,7 +1085,7 @@ void sctp_outq_restart(struct sctp_outq *);
void sctp_retransmit(struct sctp_outq *, struct sctp_transport *,
sctp_retransmit_reason_t);
void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8);
-int sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
+void sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
void sctp_prsctp_prune(struct sctp_association *asoc,
struct sctp_sndrcvinfo *sinfo, int msg_len);
/* Uncork and flush an outqueue. */
diff --git a/include/net/sock.h b/include/net/sock.h
index c797c57f4d9f..ebf75db08e06 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1339,6 +1339,16 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
if (!sk_has_account(sk))
return;
sk->sk_forward_alloc += size;
+
+ /* Avoid a possible overflow.
+ * TCP send queues can make this happen, if sk_mem_reclaim()
+ * is not called and more than 2 GBytes are released at once.
+ *
+ * If we reach 2 MBytes, reclaim 1 MBytes right now, there is
+ * no need to hold that much forward allocation anyway.
+ */
+ if (unlikely(sk->sk_forward_alloc >= 1 << 21))
+ __sk_mem_reclaim(sk, 1 << 20);
}
static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h
index 5164bd7a38fb..9fd2bea0a6e0 100644
--- a/include/net/tc_act/tc_ife.h
+++ b/include/net/tc_act/tc_ife.h
@@ -50,9 +50,11 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi);
+int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi);
int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi);
int ife_validate_meta_u32(void *val, int len);
int ife_validate_meta_u16(void *val, int len);
+int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi);
void ife_release_meta_gen(struct tcf_meta_info *mi);
int register_ife_op(struct tcf_meta_ops *mops);
int unregister_ife_op(struct tcf_meta_ops *mops);
diff --git a/include/net/tc_act/tc_skbmod.h b/include/net/tc_act/tc_skbmod.h
new file mode 100644
index 000000000000..644a2116b47b
--- /dev/null
+++ b/include/net/tc_act/tc_skbmod.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016, Jamal Hadi Salim
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+*/
+
+#ifndef __NET_TC_SKBMOD_H
+#define __NET_TC_SKBMOD_H
+
+#include <net/act_api.h>
+#include <linux/tc_act/tc_skbmod.h>
+
+struct tcf_skbmod_params {
+ struct rcu_head rcu;
+ u64 flags; /*up to 64 types of operations; extend if needed */
+ u8 eth_dst[ETH_ALEN];
+ u16 eth_type;
+ u8 eth_src[ETH_ALEN];
+};
+
+struct tcf_skbmod {
+ struct tc_action common;
+ struct tcf_skbmod_params __rcu *skbmod_p;
+};
+#define to_skbmod(a) ((struct tcf_skbmod *)a)
+
+#endif /* __NET_TC_SKBMOD_H */
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
new file mode 100644
index 000000000000..253f8da6c2a6
--- /dev/null
+++ b/include/net/tc_act/tc_tunnel_key.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __NET_TC_TUNNEL_KEY_H
+#define __NET_TC_TUNNEL_KEY_H
+
+#include <net/act_api.h>
+
+struct tcf_tunnel_key_params {
+ struct rcu_head rcu;
+ int tcft_action;
+ int action;
+ struct metadata_dst *tcft_enc_metadata;
+};
+
+struct tcf_tunnel_key {
+ struct tc_action common;
+ struct tcf_tunnel_key_params __rcu *params;
+};
+
+#define to_tunnel_key(a) ((struct tcf_tunnel_key *)a)
+
+#endif /* __NET_TC_TUNNEL_KEY_H */
diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
index 6b835889ea30..48cca321ee6c 100644
--- a/include/net/tc_act/tc_vlan.h
+++ b/include/net/tc_act/tc_vlan.h
@@ -11,6 +11,7 @@
#define __NET_TC_VLAN_H
#include <net/act_api.h>
+#include <linux/tc_act/tc_vlan.h>
#define VLAN_F_POP 0x1
#define VLAN_F_PUSH 0x2
@@ -24,4 +25,28 @@ struct tcf_vlan {
};
#define to_vlan(a) ((struct tcf_vlan *)a)
+static inline bool is_tcf_vlan(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (a->ops && a->ops->type == TCA_ACT_VLAN)
+ return true;
+#endif
+ return false;
+}
+
+static inline u32 tcf_vlan_action(const struct tc_action *a)
+{
+ return to_vlan(a)->tcfv_action;
+}
+
+static inline u16 tcf_vlan_push_vid(const struct tc_action *a)
+{
+ return to_vlan(a)->tcfv_push_vid;
+}
+
+static inline __be16 tcf_vlan_push_proto(const struct tc_action *a)
+{
+ return to_vlan(a)->tcfv_push_proto;
+}
+
#endif /* __NET_TC_VLAN_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index d6ae36512429..f83b7f220a65 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -533,6 +533,8 @@ __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
#endif
/* tcp_output.c */
+u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
+ int min_tso_segs);
void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
int nonagle);
bool tcp_may_send_now(struct sock *sk);
@@ -640,7 +642,7 @@ static inline void tcp_fast_path_check(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- if (skb_queue_empty(&tp->out_of_order_queue) &&
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
tp->rcv_wnd &&
atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
!tp->urg_data)
@@ -671,7 +673,7 @@ static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
/* Minimum RTT in usec. ~0 means not available. */
static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
{
- return tp->rtt_min[0].rtt;
+ return minmax_get(&tp->rtt_min);
}
/* Compute the actual receive window we are currently advertising.
@@ -763,8 +765,16 @@ struct tcp_skb_cb {
__u32 ack_seq; /* Sequence number ACK'd */
union {
struct {
- /* There is space for up to 20 bytes */
- __u32 in_flight;/* Bytes in flight when packet sent */
+ /* There is space for up to 24 bytes */
+ __u32 in_flight:30,/* Bytes in flight at transmit */
+ is_app_limited:1, /* cwnd not fully used? */
+ unused:1;
+ /* pkts S/ACKed so far upon tx of skb, incl retrans: */
+ __u32 delivered;
+ /* start of send pipeline phase */
+ struct skb_mstamp first_tx_mstamp;
+ /* when we reached the "delivered" count */
+ struct skb_mstamp delivered_mstamp;
} tx; /* only used for outgoing skbs */
union {
struct inet_skb_parm h4;
@@ -860,6 +870,27 @@ struct ack_sample {
u32 in_flight;
};
+/* A rate sample measures the number of (original/retransmitted) data
+ * packets delivered "delivered" over an interval of time "interval_us".
+ * The tcp_rate.c code fills in the rate sample, and congestion
+ * control modules that define a cong_control function to run at the end
+ * of ACK processing can optionally chose to consult this sample when
+ * setting cwnd and pacing rate.
+ * A sample is invalid if "delivered" or "interval_us" is negative.
+ */
+struct rate_sample {
+ struct skb_mstamp prior_mstamp; /* starting timestamp for interval */
+ u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
+ s32 delivered; /* number of packets delivered over interval */
+ long interval_us; /* time for tp->delivered to incr "delivered" */
+ long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
+ int losses; /* number of packets marked lost upon ACK */
+ u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
+ u32 prior_in_flight; /* in flight before this ACK */
+ bool is_app_limited; /* is sample from packet with bubble in pipe? */
+ bool is_retrans; /* is sample from retransmission? */
+};
+
struct tcp_congestion_ops {
struct list_head list;
u32 key;
@@ -884,6 +915,14 @@ struct tcp_congestion_ops {
u32 (*undo_cwnd)(struct sock *sk);
/* hook for packet ack accounting (optional) */
void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
+ /* suggest number of segments for each skb to transmit (optional) */
+ u32 (*tso_segs_goal)(struct sock *sk);
+ /* returns the multiplier used in tcp_sndbuf_expand (optional) */
+ u32 (*sndbuf_expand)(struct sock *sk);
+ /* call when packets are delivered to update cwnd and pacing rate,
+ * after all the ca_state processing. (optional)
+ */
+ void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
/* get info for inet_diag (optional) */
size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
union tcp_cc_info *info);
@@ -946,6 +985,14 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
icsk->icsk_ca_ops->cwnd_event(sk, event);
}
+/* From tcp_rate.c */
+void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
+void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
+ struct rate_sample *rs);
+void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
+ struct skb_mstamp *now, struct rate_sample *rs);
+void tcp_rate_check_app_limited(struct sock *sk);
+
/* These functions determine how the current flow behaves in respect of SACK
* handling. SACK is negotiated with the peer, and therefore it can vary
* between different flows.
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index b96d0360c095..0255613a54a4 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -350,24 +350,6 @@ static inline __be32 vxlan_vni_field(__be32 vni)
#endif
}
-static inline __be32 vxlan_tun_id_to_vni(__be64 tun_id)
-{
-#if defined(__BIG_ENDIAN)
- return (__force __be32)tun_id;
-#else
- return (__force __be32)((__force u64)tun_id >> 32);
-#endif
-}
-
-static inline __be64 vxlan_vni_to_tun_id(__be32 vni)
-{
-#if defined(__BIG_ENDIAN)
- return (__force __be64)vni;
-#else
- return (__force __be64)((u64)(__force u32)vni << 32);
-#endif
-}
-
static inline size_t vxlan_rco_start(__be32 vni_field)
{
return be32_to_cpu(vni_field & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index adfebd6f243c..31947b9c21d6 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -187,7 +187,7 @@ struct xfrm_state {
struct xfrm_replay_state_esn *preplay_esn;
/* The functions for replay detection. */
- struct xfrm_replay *repl;
+ const struct xfrm_replay *repl;
/* internal flag that only holds state for delayed aevent at the
* moment
@@ -1540,8 +1540,10 @@ int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
int xfrm6_extract_header(struct sk_buff *skb);
int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
-int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
+int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
+ struct ip6_tnl *t);
int xfrm6_transport_finish(struct sk_buff *skb, int async);
+int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t);
int xfrm6_rcv(struct sk_buff *skb);
int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
xfrm_address_t *saddr, u8 proto);
diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h
index 3c6128e1fdbe..703a64b4681a 100644
--- a/include/rxrpc/packet.h
+++ b/include/rxrpc/packet.h
@@ -34,8 +34,6 @@ struct rxrpc_wire_header {
#define RXRPC_CID_INC (1 << RXRPC_CIDSHIFT) /* connection ID increment */
__be32 callNumber; /* call ID (0 for connection-level packets) */
-#define RXRPC_PROCESS_MAXCALLS (1<<2) /* maximum number of active calls per conn (power of 2) */
-
__be32 seq; /* sequence number of pkt in call stream */
__be32 serial; /* serial number of pkt sent to network */
@@ -93,10 +91,14 @@ struct rxrpc_wire_header {
struct rxrpc_jumbo_header {
uint8_t flags; /* packet flags (as per rxrpc_header) */
uint8_t pad;
- __be16 _rsvd; /* reserved (used by kerberos security as cksum) */
+ union {
+ __be16 _rsvd; /* reserved */
+ __be16 cksum; /* kerberos security checksum */
+ };
};
#define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */
+#define RXRPC_JUMBO_SUBPKTLEN (RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header))
/*****************************************************************************/
/*
@@ -121,6 +123,7 @@ struct rxrpc_ackpacket {
#define RXRPC_ACK_PING_RESPONSE 7 /* response to RXRPC_ACK_PING */
#define RXRPC_ACK_DELAY 8 /* nothing happened since received packet */
#define RXRPC_ACK_IDLE 9 /* ACK due to fully received ACK window */
+#define RXRPC_ACK__INVALID 10 /* Representation of invalid ACK reason */
uint8_t nAcks; /* number of ACKs */
#define RXRPC_MAXACKS 255
@@ -131,6 +134,13 @@ struct rxrpc_ackpacket {
} __packed;
+/* Some ACKs refer to specific packets and some are general and can be updated. */
+#define RXRPC_ACK_UPDATEABLE ((1 << RXRPC_ACK_REQUESTED) | \
+ (1 << RXRPC_ACK_PING_RESPONSE) | \
+ (1 << RXRPC_ACK_DELAY) | \
+ (1 << RXRPC_ACK_IDLE))
+
+
/*
* ACK packets can have a further piece of information tagged on the end
*/
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 13c0b2ba1b6c..73d870918939 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -11,12 +11,12 @@ struct sas_rphy;
struct request;
#if !IS_ENABLED(CONFIG_SCSI_SAS_ATTRS)
-static inline int is_sas_attached(struct scsi_device *sdev)
+static inline int scsi_is_sas_rphy(const struct device *sdev)
{
return 0;
}
#else
-extern int is_sas_attached(struct scsi_device *sdev);
+extern int scsi_is_sas_rphy(const struct device *);
#endif
static inline int sas_protocol_ata(enum sas_protocol proto)
@@ -202,7 +202,6 @@ extern int sas_rphy_add(struct sas_rphy *);
extern void sas_rphy_remove(struct sas_rphy *);
extern void sas_rphy_delete(struct sas_rphy *);
extern void sas_rphy_unlink(struct sas_rphy *);
-extern int scsi_is_sas_rphy(const struct device *);
struct sas_port *sas_port_alloc(struct device *, int);
struct sas_port *sas_port_alloc_num(struct device *);
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index cbe574ea674b..ada12d00118c 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -16,17 +16,76 @@
#include <linux/tracepoint.h>
+TRACE_EVENT(rxrpc_conn,
+ TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op,
+ int usage, const void *where),
+
+ TP_ARGS(conn, op, usage, where),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_connection *, conn )
+ __field(int, op )
+ __field(int, usage )
+ __field(const void *, where )
+ ),
+
+ TP_fast_assign(
+ __entry->conn = conn;
+ __entry->op = op;
+ __entry->usage = usage;
+ __entry->where = where;
+ ),
+
+ TP_printk("C=%p %s u=%d sp=%pSR",
+ __entry->conn,
+ rxrpc_conn_traces[__entry->op],
+ __entry->usage,
+ __entry->where)
+ );
+
+TRACE_EVENT(rxrpc_client,
+ TP_PROTO(struct rxrpc_connection *conn, int channel,
+ enum rxrpc_client_trace op),
+
+ TP_ARGS(conn, channel, op),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_connection *, conn )
+ __field(u32, cid )
+ __field(int, channel )
+ __field(int, usage )
+ __field(enum rxrpc_client_trace, op )
+ __field(enum rxrpc_conn_cache_state, cs )
+ ),
+
+ TP_fast_assign(
+ __entry->conn = conn;
+ __entry->channel = channel;
+ __entry->usage = atomic_read(&conn->usage);
+ __entry->op = op;
+ __entry->cid = conn->proto.cid;
+ __entry->cs = conn->cache_state;
+ ),
+
+ TP_printk("C=%p h=%2d %s %s i=%08x u=%d",
+ __entry->conn,
+ __entry->channel,
+ rxrpc_client_traces[__entry->op],
+ rxrpc_conn_cache_states[__entry->cs],
+ __entry->cid,
+ __entry->usage)
+ );
+
TRACE_EVENT(rxrpc_call,
- TP_PROTO(struct rxrpc_call *call, int op, int usage, int nskb,
- const void *where, const void *aux),
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_call_trace op,
+ int usage, const void *where, const void *aux),
- TP_ARGS(call, op, usage, nskb, where, aux),
+ TP_ARGS(call, op, usage, where, aux),
TP_STRUCT__entry(
__field(struct rxrpc_call *, call )
__field(int, op )
__field(int, usage )
- __field(int, nskb )
__field(const void *, where )
__field(const void *, aux )
),
@@ -35,35 +94,27 @@ TRACE_EVENT(rxrpc_call,
__entry->call = call;
__entry->op = op;
__entry->usage = usage;
- __entry->nskb = nskb;
__entry->where = where;
__entry->aux = aux;
),
- TP_printk("c=%p %s u=%d s=%d p=%pSR a=%p",
+ TP_printk("c=%p %s u=%d sp=%pSR a=%p",
__entry->call,
- (__entry->op == 0 ? "NWc" :
- __entry->op == 1 ? "NWs" :
- __entry->op == 2 ? "SEE" :
- __entry->op == 3 ? "GET" :
- __entry->op == 4 ? "Gsb" :
- __entry->op == 5 ? "PUT" :
- "Psb"),
+ rxrpc_call_traces[__entry->op],
__entry->usage,
- __entry->nskb,
__entry->where,
__entry->aux)
);
TRACE_EVENT(rxrpc_skb,
- TP_PROTO(struct sk_buff *skb, int op, int usage, int mod_count,
- const void *where),
+ TP_PROTO(struct sk_buff *skb, enum rxrpc_skb_trace op,
+ int usage, int mod_count, const void *where),
TP_ARGS(skb, op, usage, mod_count, where),
TP_STRUCT__entry(
__field(struct sk_buff *, skb )
- __field(int, op )
+ __field(enum rxrpc_skb_trace, op )
__field(int, usage )
__field(int, mod_count )
__field(const void *, where )
@@ -79,16 +130,491 @@ TRACE_EVENT(rxrpc_skb,
TP_printk("s=%p %s u=%d m=%d p=%pSR",
__entry->skb,
- (__entry->op == 0 ? "NEW" :
- __entry->op == 1 ? "SEE" :
- __entry->op == 2 ? "GET" :
- __entry->op == 3 ? "FRE" :
- "PUR"),
+ rxrpc_skb_traces[__entry->op],
__entry->usage,
__entry->mod_count,
__entry->where)
);
+TRACE_EVENT(rxrpc_rx_packet,
+ TP_PROTO(struct rxrpc_skb_priv *sp),
+
+ TP_ARGS(sp),
+
+ TP_STRUCT__entry(
+ __field_struct(struct rxrpc_host_header, hdr )
+ ),
+
+ TP_fast_assign(
+ memcpy(&__entry->hdr, &sp->hdr, sizeof(__entry->hdr));
+ ),
+
+ TP_printk("%08x:%08x:%08x:%04x %08x %08x %02x %02x %s",
+ __entry->hdr.epoch, __entry->hdr.cid,
+ __entry->hdr.callNumber, __entry->hdr.serviceId,
+ __entry->hdr.serial, __entry->hdr.seq,
+ __entry->hdr.type, __entry->hdr.flags,
+ __entry->hdr.type <= 15 ? rxrpc_pkts[__entry->hdr.type] : "?UNK")
+ );
+
+TRACE_EVENT(rxrpc_rx_done,
+ TP_PROTO(int result, int abort_code),
+
+ TP_ARGS(result, abort_code),
+
+ TP_STRUCT__entry(
+ __field(int, result )
+ __field(int, abort_code )
+ ),
+
+ TP_fast_assign(
+ __entry->result = result;
+ __entry->abort_code = abort_code;
+ ),
+
+ TP_printk("r=%d a=%d", __entry->result, __entry->abort_code)
+ );
+
+TRACE_EVENT(rxrpc_abort,
+ TP_PROTO(const char *why, u32 cid, u32 call_id, rxrpc_seq_t seq,
+ int abort_code, int error),
+
+ TP_ARGS(why, cid, call_id, seq, abort_code, error),
+
+ TP_STRUCT__entry(
+ __array(char, why, 4 )
+ __field(u32, cid )
+ __field(u32, call_id )
+ __field(rxrpc_seq_t, seq )
+ __field(int, abort_code )
+ __field(int, error )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->why, why, 4);
+ __entry->cid = cid;
+ __entry->call_id = call_id;
+ __entry->abort_code = abort_code;
+ __entry->error = error;
+ __entry->seq = seq;
+ ),
+
+ TP_printk("%08x:%08x s=%u a=%d e=%d %s",
+ __entry->cid, __entry->call_id, __entry->seq,
+ __entry->abort_code, __entry->error, __entry->why)
+ );
+
+TRACE_EVENT(rxrpc_transmit,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_transmit_trace why),
+
+ TP_ARGS(call, why),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_transmit_trace, why )
+ __field(rxrpc_seq_t, tx_hard_ack )
+ __field(rxrpc_seq_t, tx_top )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->tx_hard_ack = call->tx_hard_ack;
+ __entry->tx_top = call->tx_top;
+ ),
+
+ TP_printk("c=%p %s f=%08x n=%u",
+ __entry->call,
+ rxrpc_transmit_traces[__entry->why],
+ __entry->tx_hard_ack + 1,
+ __entry->tx_top - __entry->tx_hard_ack)
+ );
+
+TRACE_EVENT(rxrpc_rx_ack,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t first, u8 reason, u8 n_acks),
+
+ TP_ARGS(call, first, reason, n_acks),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_seq_t, first )
+ __field(u8, reason )
+ __field(u8, n_acks )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->first = first;
+ __entry->reason = reason;
+ __entry->n_acks = n_acks;
+ ),
+
+ TP_printk("c=%p %s f=%08x n=%u",
+ __entry->call,
+ rxrpc_ack_names[__entry->reason],
+ __entry->first,
+ __entry->n_acks)
+ );
+
+TRACE_EVENT(rxrpc_tx_data,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
+ rxrpc_serial_t serial, u8 flags, bool lose),
+
+ TP_ARGS(call, seq, serial, flags, lose),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_seq_t, seq )
+ __field(rxrpc_serial_t, serial )
+ __field(u8, flags )
+ __field(bool, lose )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->seq = seq;
+ __entry->serial = serial;
+ __entry->flags = flags;
+ __entry->lose = lose;
+ ),
+
+ TP_printk("c=%p DATA %08x q=%08x fl=%02x%s",
+ __entry->call,
+ __entry->serial,
+ __entry->seq,
+ __entry->flags,
+ __entry->lose ? " *LOSE*" : "")
+ );
+
+TRACE_EVENT(rxrpc_tx_ack,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
+ rxrpc_seq_t ack_first, rxrpc_serial_t ack_serial,
+ u8 reason, u8 n_acks),
+
+ TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_serial_t, serial )
+ __field(rxrpc_seq_t, ack_first )
+ __field(rxrpc_serial_t, ack_serial )
+ __field(u8, reason )
+ __field(u8, n_acks )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->serial = serial;
+ __entry->ack_first = ack_first;
+ __entry->ack_serial = ack_serial;
+ __entry->reason = reason;
+ __entry->n_acks = n_acks;
+ ),
+
+ TP_printk(" c=%p ACK %08x %s f=%08x r=%08x n=%u",
+ __entry->call,
+ __entry->serial,
+ rxrpc_ack_names[__entry->reason],
+ __entry->ack_first,
+ __entry->ack_serial,
+ __entry->n_acks)
+ );
+
+TRACE_EVENT(rxrpc_receive,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_receive_trace why,
+ rxrpc_serial_t serial, rxrpc_seq_t seq),
+
+ TP_ARGS(call, why, serial, seq),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_receive_trace, why )
+ __field(rxrpc_serial_t, serial )
+ __field(rxrpc_seq_t, seq )
+ __field(rxrpc_seq_t, hard_ack )
+ __field(rxrpc_seq_t, top )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->serial = serial;
+ __entry->seq = seq;
+ __entry->hard_ack = call->rx_hard_ack;
+ __entry->top = call->rx_top;
+ ),
+
+ TP_printk("c=%p %s r=%08x q=%08x w=%08x-%08x",
+ __entry->call,
+ rxrpc_receive_traces[__entry->why],
+ __entry->serial,
+ __entry->seq,
+ __entry->hard_ack,
+ __entry->top)
+ );
+
+TRACE_EVENT(rxrpc_recvmsg,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_recvmsg_trace why,
+ rxrpc_seq_t seq, unsigned int offset, unsigned int len,
+ int ret),
+
+ TP_ARGS(call, why, seq, offset, len, ret),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_recvmsg_trace, why )
+ __field(rxrpc_seq_t, seq )
+ __field(unsigned int, offset )
+ __field(unsigned int, len )
+ __field(int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->seq = seq;
+ __entry->offset = offset;
+ __entry->len = len;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("c=%p %s q=%08x o=%u l=%u ret=%d",
+ __entry->call,
+ rxrpc_recvmsg_traces[__entry->why],
+ __entry->seq,
+ __entry->offset,
+ __entry->len,
+ __entry->ret)
+ );
+
+TRACE_EVENT(rxrpc_rtt_tx,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_tx_trace why,
+ rxrpc_serial_t send_serial),
+
+ TP_ARGS(call, why, send_serial),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_rtt_tx_trace, why )
+ __field(rxrpc_serial_t, send_serial )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->send_serial = send_serial;
+ ),
+
+ TP_printk("c=%p %s sr=%08x",
+ __entry->call,
+ rxrpc_rtt_tx_traces[__entry->why],
+ __entry->send_serial)
+ );
+
+TRACE_EVENT(rxrpc_rtt_rx,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
+ rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
+ s64 rtt, u8 nr, s64 avg),
+
+ TP_ARGS(call, why, send_serial, resp_serial, rtt, nr, avg),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_rtt_rx_trace, why )
+ __field(u8, nr )
+ __field(rxrpc_serial_t, send_serial )
+ __field(rxrpc_serial_t, resp_serial )
+ __field(s64, rtt )
+ __field(u64, avg )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->send_serial = send_serial;
+ __entry->resp_serial = resp_serial;
+ __entry->rtt = rtt;
+ __entry->nr = nr;
+ __entry->avg = avg;
+ ),
+
+ TP_printk("c=%p %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld",
+ __entry->call,
+ rxrpc_rtt_rx_traces[__entry->why],
+ __entry->send_serial,
+ __entry->resp_serial,
+ __entry->rtt,
+ __entry->nr,
+ __entry->avg)
+ );
+
+TRACE_EVENT(rxrpc_timer,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
+ unsigned long now),
+
+ TP_ARGS(call, why, now),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_timer_trace, why )
+ __field(unsigned long, now )
+ __field(unsigned long, expire_at )
+ __field(unsigned long, ack_at )
+ __field(unsigned long, resend_at )
+ __field(unsigned long, timer )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->now = now;
+ __entry->expire_at = call->expire_at;
+ __entry->ack_at = call->ack_at;
+ __entry->resend_at = call->resend_at;
+ __entry->timer = call->timer.expires;
+ ),
+
+ TP_printk("c=%p %s now=%lx x=%ld a=%ld r=%ld t=%ld",
+ __entry->call,
+ rxrpc_timer_traces[__entry->why],
+ __entry->now,
+ __entry->expire_at - __entry->now,
+ __entry->ack_at - __entry->now,
+ __entry->resend_at - __entry->now,
+ __entry->timer - __entry->now)
+ );
+
+TRACE_EVENT(rxrpc_rx_lose,
+ TP_PROTO(struct rxrpc_skb_priv *sp),
+
+ TP_ARGS(sp),
+
+ TP_STRUCT__entry(
+ __field_struct(struct rxrpc_host_header, hdr )
+ ),
+
+ TP_fast_assign(
+ memcpy(&__entry->hdr, &sp->hdr, sizeof(__entry->hdr));
+ ),
+
+ TP_printk("%08x:%08x:%08x:%04x %08x %08x %02x %02x %s *LOSE*",
+ __entry->hdr.epoch, __entry->hdr.cid,
+ __entry->hdr.callNumber, __entry->hdr.serviceId,
+ __entry->hdr.serial, __entry->hdr.seq,
+ __entry->hdr.type, __entry->hdr.flags,
+ __entry->hdr.type <= 15 ? rxrpc_pkts[__entry->hdr.type] : "?UNK")
+ );
+
+TRACE_EVENT(rxrpc_propose_ack,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_propose_ack_trace why,
+ u8 ack_reason, rxrpc_serial_t serial, bool immediate,
+ bool background, enum rxrpc_propose_ack_outcome outcome),
+
+ TP_ARGS(call, why, ack_reason, serial, immediate, background,
+ outcome),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_propose_ack_trace, why )
+ __field(rxrpc_serial_t, serial )
+ __field(u8, ack_reason )
+ __field(bool, immediate )
+ __field(bool, background )
+ __field(enum rxrpc_propose_ack_outcome, outcome )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->serial = serial;
+ __entry->ack_reason = ack_reason;
+ __entry->immediate = immediate;
+ __entry->background = background;
+ __entry->outcome = outcome;
+ ),
+
+ TP_printk("c=%p %s %s r=%08x i=%u b=%u%s",
+ __entry->call,
+ rxrpc_propose_ack_traces[__entry->why],
+ rxrpc_ack_names[__entry->ack_reason],
+ __entry->serial,
+ __entry->immediate,
+ __entry->background,
+ rxrpc_propose_ack_outcomes[__entry->outcome])
+ );
+
+TRACE_EVENT(rxrpc_retransmit,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, u8 annotation,
+ s64 expiry),
+
+ TP_ARGS(call, seq, annotation, expiry),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_seq_t, seq )
+ __field(u8, annotation )
+ __field(s64, expiry )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->seq = seq;
+ __entry->annotation = annotation;
+ __entry->expiry = expiry;
+ ),
+
+ TP_printk("c=%p q=%x a=%02x xp=%lld",
+ __entry->call,
+ __entry->seq,
+ __entry->annotation,
+ __entry->expiry)
+ );
+
+TRACE_EVENT(rxrpc_congest,
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary,
+ rxrpc_serial_t ack_serial, enum rxrpc_congest_change change),
+
+ TP_ARGS(call, summary, ack_serial, change),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_congest_change, change )
+ __field(rxrpc_seq_t, hard_ack )
+ __field(rxrpc_seq_t, top )
+ __field(rxrpc_seq_t, lowest_nak )
+ __field(rxrpc_serial_t, ack_serial )
+ __field_struct(struct rxrpc_ack_summary, sum )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->change = change;
+ __entry->hard_ack = call->tx_hard_ack;
+ __entry->top = call->tx_top;
+ __entry->lowest_nak = call->acks_lowest_nak;
+ __entry->ack_serial = ack_serial;
+ memcpy(&__entry->sum, summary, sizeof(__entry->sum));
+ ),
+
+ TP_printk("c=%p %08x %s %08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
+ __entry->call,
+ __entry->ack_serial,
+ rxrpc_ack_names[__entry->sum.ack_reason],
+ __entry->hard_ack,
+ rxrpc_congest_modes[__entry->sum.mode],
+ __entry->sum.cwnd,
+ __entry->sum.ssthresh,
+ __entry->sum.nr_acks, __entry->sum.nr_nacks,
+ __entry->sum.nr_new_acks, __entry->sum.nr_new_nacks,
+ __entry->sum.nr_rot_new_acks,
+ __entry->top - __entry->hard_ack,
+ __entry->sum.cumulative_acks,
+ __entry->sum.dup_acks,
+ __entry->lowest_nak, __entry->sum.new_low_nack ? "!" : "",
+ rxrpc_congest_changes[__entry->change],
+ __entry->sum.retrans_timeo ? " rTxTo" : "")
+ );
+
#endif /* _TRACE_RXRPC_H */
/* This part must be outside protection */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index f896dfac4ac0..f09c70b97eca 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -398,6 +398,34 @@ enum bpf_func_id {
*/
BPF_FUNC_skb_change_tail,
+ /**
+ * bpf_skb_pull_data(skb, len)
+ * The helper will pull in non-linear data in case the
+ * skb is non-linear and not all of len are part of the
+ * linear section. Only needed for read/write with direct
+ * packet access.
+ * @skb: pointer to skb
+ * @len: len to make read/writeable
+ * Return: 0 on success or negative error
+ */
+ BPF_FUNC_skb_pull_data,
+
+ /**
+ * bpf_csum_update(skb, csum)
+ * Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
+ * @skb: pointer to skb
+ * @csum: csum to add
+ * Return: csum on success or negative error
+ */
+ BPF_FUNC_csum_update,
+
+ /**
+ * bpf_set_hash_invalid(skb)
+ * Invalidate current skb>hash.
+ * @skb: pointer to skb
+ */
+ BPF_FUNC_set_hash_invalid,
+
__BPF_FUNC_MAX_ID,
};
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 9bf3aecfe05b..b4fba662cd32 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -464,6 +464,7 @@ enum {
enum ipvlan_mode {
IPVLAN_MODE_L2 = 0,
IPVLAN_MODE_L3,
+ IPVLAN_MODE_L3S,
IPVLAN_MODE_MAX
};
@@ -618,7 +619,7 @@ enum {
enum {
IFLA_VF_UNSPEC,
IFLA_VF_MAC, /* Hardware queue specific attributes */
- IFLA_VF_VLAN,
+ IFLA_VF_VLAN, /* VLAN ID and QoS */
IFLA_VF_TX_RATE, /* Max TX Bandwidth Allocation */
IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */
IFLA_VF_LINK_STATE, /* link state enable/disable/auto switch */
@@ -630,6 +631,7 @@ enum {
IFLA_VF_TRUST, /* Trust VF */
IFLA_VF_IB_NODE_GUID, /* VF Infiniband node GUID */
IFLA_VF_IB_PORT_GUID, /* VF Infiniband port GUID */
+ IFLA_VF_VLAN_LIST, /* nested list of vlans, option for QinQ */
__IFLA_VF_MAX,
};
@@ -646,6 +648,22 @@ struct ifla_vf_vlan {
__u32 qos;
};
+enum {
+ IFLA_VF_VLAN_INFO_UNSPEC,
+ IFLA_VF_VLAN_INFO, /* VLAN ID, QoS and VLAN protocol */
+ __IFLA_VF_VLAN_INFO_MAX,
+};
+
+#define IFLA_VF_VLAN_INFO_MAX (__IFLA_VF_VLAN_INFO_MAX - 1)
+#define MAX_VLAN_LIST_LEN 1
+
+struct ifla_vf_vlan_info {
+ __u32 vf;
+ __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */
+ __u32 qos;
+ __be16 vlan_proto; /* VLAN protocol either 802.1Q or 802.1ad */
+};
+
struct ifla_vf_tx_rate {
__u32 vf;
__u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */
@@ -826,6 +844,7 @@ enum {
IFLA_STATS_LINK_64,
IFLA_STATS_LINK_XSTATS,
IFLA_STATS_LINK_XSTATS_SLAVE,
+ IFLA_STATS_LINK_OFFLOAD_XSTATS,
__IFLA_STATS_MAX,
};
@@ -845,6 +864,14 @@ enum {
};
#define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1)
+/* These are stats embedded into IFLA_STATS_LINK_OFFLOAD_XSTATS */
+enum {
+ IFLA_OFFLOAD_XSTATS_UNSPEC,
+ IFLA_OFFLOAD_XSTATS_CPU_HIT, /* struct rtnl_link_stats64 */
+ __IFLA_OFFLOAD_XSTATS_MAX
+};
+#define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1)
+
/* XDP section */
enum {
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index fb7337d6b985..92f3c8677523 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -74,6 +74,7 @@ enum {
IFLA_IPTUN_ENCAP_FLAGS,
IFLA_IPTUN_ENCAP_SPORT,
IFLA_IPTUN_ENCAP_DPORT,
+ IFLA_IPTUN_COLLECT_METADATA,
__IFLA_IPTUN_MAX,
};
#define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1)
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index 5581206a08ae..509cd961068d 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -123,6 +123,8 @@ enum {
INET_DIAG_LOCALS,
INET_DIAG_PEERS,
INET_DIAG_PAD,
+ INET_DIAG_MARK,
+ INET_DIAG_BBRINFO,
__INET_DIAG_MAX,
};
@@ -156,8 +158,20 @@ struct tcp_dctcp_info {
__u32 dctcp_ab_tot;
};
+/* INET_DIAG_BBRINFO */
+
+struct tcp_bbr_info {
+ /* u64 bw: max-filtered BW (app throughput) estimate in Byte per sec: */
+ __u32 bbr_bw_lo; /* lower 32 bits of bw */
+ __u32 bbr_bw_hi; /* upper 32 bits of bw */
+ __u32 bbr_min_rtt; /* min-filtered RTT in uSec */
+ __u32 bbr_pacing_gain; /* pacing gain shifted left 8 bits */
+ __u32 bbr_cwnd_gain; /* cwnd gain shifted left 8 bits */
+};
+
union tcp_cc_info {
struct tcpvegas_info vegas;
struct tcp_dctcp_info dctcp;
+ struct tcp_bbr_info bbr;
};
#endif /* _UAPI_INET_DIAG_H_ */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 54c3b4f4aceb..59ed3992c760 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -605,13 +605,13 @@ struct ovs_action_push_mpls {
* @vlan_tci: Tag control identifier (TCI) to push. The CFI bit must be set
* (but it will not be set in the 802.1Q header that is pushed).
*
- * The @vlan_tpid value is typically %ETH_P_8021Q. The only acceptable TPID
- * values are those that the kernel module also parses as 802.1Q headers, to
- * prevent %OVS_ACTION_ATTR_PUSH_VLAN followed by %OVS_ACTION_ATTR_POP_VLAN
- * from having surprising results.
+ * The @vlan_tpid value is typically %ETH_P_8021Q or %ETH_P_8021AD.
+ * The only acceptable TPID values are those that the kernel module also parses
+ * as 802.1Q or 802.1AD headers, to prevent %OVS_ACTION_ATTR_PUSH_VLAN followed
+ * by %OVS_ACTION_ATTR_POP_VLAN from having surprising results.
*/
struct ovs_action_push_vlan {
- __be16 vlan_tpid; /* 802.1Q TPID. */
+ __be16 vlan_tpid; /* 802.1Q or 802.1ad TPID. */
__be16 vlan_tci; /* 802.1Q TCI (VLAN ID and priority). */
};
@@ -721,9 +721,10 @@ enum ovs_nat_attr {
* is copied from the value to the packet header field, rest of the bits are
* left unchanged. The non-masked value bits must be passed in as zeroes.
* Masking is not supported for the %OVS_KEY_ATTR_TUNNEL attribute.
- * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q header onto the
- * packet.
- * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q header off the packet.
+ * @OVS_ACTION_ATTR_PUSH_VLAN: Push a new outermost 802.1Q or 802.1ad header
+ * onto the packet.
+ * @OVS_ACTION_ATTR_POP_VLAN: Pop the outermost 802.1Q or 802.1ad header
+ * from the packet.
* @OVS_ACTION_ATTR_SAMPLE: Probabilitically executes actions, as specified in
* the nested %OVS_SAMPLE_ATTR_* attributes.
* @OVS_ACTION_ATTR_PUSH_MPLS: Push a new MPLS label stack entry onto the
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 51b5b247fb5a..8fd715f806a2 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -396,6 +396,7 @@ enum {
TCA_BPF_FD,
TCA_BPF_NAME,
TCA_BPF_FLAGS,
+ TCA_BPF_FLAGS_GEN,
__TCA_BPF_MAX,
};
@@ -428,9 +429,24 @@ enum {
TCA_FLOWER_KEY_UDP_DST, /* be16 */
TCA_FLOWER_FLAGS,
- TCA_FLOWER_KEY_VLAN_ID,
- TCA_FLOWER_KEY_VLAN_PRIO,
- TCA_FLOWER_KEY_VLAN_ETH_TYPE,
+ TCA_FLOWER_KEY_VLAN_ID, /* be16 */
+ TCA_FLOWER_KEY_VLAN_PRIO, /* u8 */
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE, /* be16 */
+
+ TCA_FLOWER_KEY_ENC_KEY_ID, /* be32 */
+ TCA_FLOWER_KEY_ENC_IPV4_SRC, /* be32 */
+ TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,/* be32 */
+ TCA_FLOWER_KEY_ENC_IPV4_DST, /* be32 */
+ TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,/* be32 */
+ TCA_FLOWER_KEY_ENC_IPV6_SRC, /* struct in6_addr */
+ TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,/* struct in6_addr */
+ TCA_FLOWER_KEY_ENC_IPV6_DST, /* struct in6_addr */
+ TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,/* struct in6_addr */
+
+ TCA_FLOWER_KEY_TCP_SRC_MASK, /* be16 */
+ TCA_FLOWER_KEY_TCP_DST_MASK, /* be16 */
+ TCA_FLOWER_KEY_UDP_SRC_MASK, /* be16 */
+ TCA_FLOWER_KEY_UDP_DST_MASK, /* be16 */
__TCA_FLOWER_MAX,
};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 2382eed50278..df7451d35131 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -792,6 +792,8 @@ enum {
TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */
+ TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
+
__TCA_FQ_MAX
};
@@ -809,7 +811,7 @@ struct tc_fq_qd_stats {
__u32 flows;
__u32 inactive_flows;
__u32 throttled_flows;
- __u32 pad;
+ __u32 unthrottle_latency_ns;
};
/* Heavy-Hitter Filter */
diff --git a/include/uapi/linux/tc_act/tc_ife.h b/include/uapi/linux/tc_act/tc_ife.h
index 4ece02a77b9a..cd18360eca24 100644
--- a/include/uapi/linux/tc_act/tc_ife.h
+++ b/include/uapi/linux/tc_act/tc_ife.h
@@ -32,8 +32,9 @@ enum {
#define IFE_META_HASHID 2
#define IFE_META_PRIO 3
#define IFE_META_QMAP 4
+#define IFE_META_TCINDEX 5
/*Can be overridden at runtime by module option*/
-#define __IFE_META_MAX 5
+#define __IFE_META_MAX 6
#define IFE_META_MAX (__IFE_META_MAX - 1)
#endif
diff --git a/include/uapi/linux/tc_act/tc_skbmod.h b/include/uapi/linux/tc_act/tc_skbmod.h
new file mode 100644
index 000000000000..10fc07da6c69
--- /dev/null
+++ b/include/uapi/linux/tc_act/tc_skbmod.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016, Jamal Hadi Salim
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+*/
+
+#ifndef __LINUX_TC_SKBMOD_H
+#define __LINUX_TC_SKBMOD_H
+
+#include <linux/pkt_cls.h>
+
+#define TCA_ACT_SKBMOD 15
+
+#define SKBMOD_F_DMAC 0x1
+#define SKBMOD_F_SMAC 0x2
+#define SKBMOD_F_ETYPE 0x4
+#define SKBMOD_F_SWAPMAC 0x8
+
+struct tc_skbmod {
+ tc_gen;
+ __u64 flags;
+};
+
+enum {
+ TCA_SKBMOD_UNSPEC,
+ TCA_SKBMOD_TM,
+ TCA_SKBMOD_PARMS,
+ TCA_SKBMOD_DMAC,
+ TCA_SKBMOD_SMAC,
+ TCA_SKBMOD_ETYPE,
+ TCA_SKBMOD_PAD,
+ __TCA_SKBMOD_MAX
+};
+#define TCA_SKBMOD_MAX (__TCA_SKBMOD_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/tc_act/tc_tunnel_key.h b/include/uapi/linux/tc_act/tc_tunnel_key.h
new file mode 100644
index 000000000000..890106ff16e6
--- /dev/null
+++ b/include/uapi/linux/tc_act/tc_tunnel_key.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_TC_TUNNEL_KEY_H
+#define __LINUX_TC_TUNNEL_KEY_H
+
+#include <linux/pkt_cls.h>
+
+#define TCA_ACT_TUNNEL_KEY 17
+
+#define TCA_TUNNEL_KEY_ACT_SET 1
+#define TCA_TUNNEL_KEY_ACT_RELEASE 2
+
+struct tc_tunnel_key {
+ tc_gen;
+ int t_action;
+};
+
+enum {
+ TCA_TUNNEL_KEY_UNSPEC,
+ TCA_TUNNEL_KEY_TM,
+ TCA_TUNNEL_KEY_PARMS,
+ TCA_TUNNEL_KEY_ENC_IPV4_SRC, /* be32 */
+ TCA_TUNNEL_KEY_ENC_IPV4_DST, /* be32 */
+ TCA_TUNNEL_KEY_ENC_IPV6_SRC, /* struct in6_addr */
+ TCA_TUNNEL_KEY_ENC_IPV6_DST, /* struct in6_addr */
+ TCA_TUNNEL_KEY_ENC_KEY_ID, /* be64 */
+ TCA_TUNNEL_KEY_PAD,
+ __TCA_TUNNEL_KEY_MAX,
+};
+
+#define TCA_TUNNEL_KEY_MAX (__TCA_TUNNEL_KEY_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h
index be72b6e3843b..bddb272b843f 100644
--- a/include/uapi/linux/tc_act/tc_vlan.h
+++ b/include/uapi/linux/tc_act/tc_vlan.h
@@ -16,6 +16,7 @@
#define TCA_VLAN_ACT_POP 1
#define TCA_VLAN_ACT_PUSH 2
+#define TCA_VLAN_ACT_MODIFY 3
struct tc_vlan {
tc_gen;
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 482898fc433a..73ac0db487f8 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -167,6 +167,7 @@ struct tcp_info {
__u8 tcpi_backoff;
__u8 tcpi_options;
__u8 tcpi_snd_wscale : 4, tcpi_rcv_wscale : 4;
+ __u8 tcpi_delivery_rate_app_limited:1;
__u32 tcpi_rto;
__u32 tcpi_ato;
@@ -211,6 +212,8 @@ struct tcp_info {
__u32 tcpi_min_rtt;
__u32 tcpi_data_segs_in; /* RFC4898 tcpEStatsDataSegsIn */
__u32 tcpi_data_segs_out; /* RFC4898 tcpEStatsDataSegsOut */
+
+ __u64 tcpi_delivery_rate;
};
/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 143338978b48..1fc62b239f1b 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -298,7 +298,7 @@ enum xfrm_attr_type_t {
XFRMA_ALG_AUTH_TRUNC, /* struct xfrm_algo_auth */
XFRMA_MARK, /* struct xfrm_mark */
XFRMA_TFCPAD, /* __u32 */
- XFRMA_REPLAY_ESN_VAL, /* struct xfrm_replay_esn */
+ XFRMA_REPLAY_ESN_VAL, /* struct xfrm_replay_state_esn */
XFRMA_SA_EXTRA_FLAGS, /* __u32 */
XFRMA_PROTO, /* __u8 */
XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index d6709eb70970..0d302a87f21b 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -19,6 +19,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/file.h>
#include <linux/kernel.h>
#include <linux/audit.h>
#include <linux/kthread.h>
@@ -544,10 +545,11 @@ int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark *mark)
unsigned long ino;
dev_t dev;
- rcu_read_lock();
- exe_file = rcu_dereference(tsk->mm->exe_file);
+ exe_file = get_task_exe_file(tsk);
+ if (!exe_file)
+ return 0;
ino = exe_file->f_inode->i_ino;
dev = exe_file->f_inode->i_sb->s_dev;
- rcu_read_unlock();
+ fput(exe_file);
return audit_mark_compare(mark, ino, dev);
}
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 03fd23d4d587..7b7baaed9ed4 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1018,7 +1018,7 @@ void bpf_user_rnd_init_once(void)
prandom_init_once(&bpf_user_rnd_state);
}
-u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_0(bpf_user_rnd_u32)
{
/* Should someone ever have the rather unwise idea to use some
* of the registers passed into this function, then note that
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 1ea3afba1a4f..39918402e6e9 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -16,6 +16,7 @@
#include <linux/ktime.h>
#include <linux/sched.h>
#include <linux/uidgid.h>
+#include <linux/filter.h>
/* If kernel subsystem is allowing eBPF programs to call this function,
* inside its own verifier_ops->get_func_proto() callback it should return
@@ -26,48 +27,32 @@
* if program is allowed to access maps, so check rcu_read_lock_held in
* all three functions.
*/
-static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
{
- /* verifier checked that R1 contains a valid pointer to bpf_map
- * and R2 points to a program stack and map->key_size bytes were
- * initialized
- */
- struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
- void *key = (void *) (unsigned long) r2;
- void *value;
-
WARN_ON_ONCE(!rcu_read_lock_held());
-
- value = map->ops->map_lookup_elem(map, key);
-
- /* lookup() returns either pointer to element value or NULL
- * which is the meaning of PTR_TO_MAP_VALUE_OR_NULL type
- */
- return (unsigned long) value;
+ return (unsigned long) map->ops->map_lookup_elem(map, key);
}
const struct bpf_func_proto bpf_map_lookup_elem_proto = {
.func = bpf_map_lookup_elem,
.gpl_only = false,
+ .pkt_access = true,
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_MAP_KEY,
};
-static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
+ void *, value, u64, flags)
{
- struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
- void *key = (void *) (unsigned long) r2;
- void *value = (void *) (unsigned long) r3;
-
WARN_ON_ONCE(!rcu_read_lock_held());
-
- return map->ops->map_update_elem(map, key, value, r4);
+ return map->ops->map_update_elem(map, key, value, flags);
}
const struct bpf_func_proto bpf_map_update_elem_proto = {
.func = bpf_map_update_elem,
.gpl_only = false,
+ .pkt_access = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_MAP_KEY,
@@ -75,19 +60,16 @@ const struct bpf_func_proto bpf_map_update_elem_proto = {
.arg4_type = ARG_ANYTHING,
};
-static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
{
- struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
- void *key = (void *) (unsigned long) r2;
-
WARN_ON_ONCE(!rcu_read_lock_held());
-
return map->ops->map_delete_elem(map, key);
}
const struct bpf_func_proto bpf_map_delete_elem_proto = {
.func = bpf_map_delete_elem,
.gpl_only = false,
+ .pkt_access = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_MAP_KEY,
@@ -99,7 +81,7 @@ const struct bpf_func_proto bpf_get_prandom_u32_proto = {
.ret_type = RET_INTEGER,
};
-static u64 bpf_get_smp_processor_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_0(bpf_get_smp_processor_id)
{
return smp_processor_id();
}
@@ -110,7 +92,7 @@ const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
.ret_type = RET_INTEGER,
};
-static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_0(bpf_ktime_get_ns)
{
/* NMI safe access to clock monotonic */
return ktime_get_mono_fast_ns();
@@ -122,11 +104,11 @@ const struct bpf_func_proto bpf_ktime_get_ns_proto = {
.ret_type = RET_INTEGER,
};
-static u64 bpf_get_current_pid_tgid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_0(bpf_get_current_pid_tgid)
{
struct task_struct *task = current;
- if (!task)
+ if (unlikely(!task))
return -EINVAL;
return (u64) task->tgid << 32 | task->pid;
@@ -138,18 +120,18 @@ const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
.ret_type = RET_INTEGER,
};
-static u64 bpf_get_current_uid_gid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_0(bpf_get_current_uid_gid)
{
struct task_struct *task = current;
kuid_t uid;
kgid_t gid;
- if (!task)
+ if (unlikely(!task))
return -EINVAL;
current_uid_gid(&uid, &gid);
return (u64) from_kgid(&init_user_ns, gid) << 32 |
- from_kuid(&init_user_ns, uid);
+ from_kuid(&init_user_ns, uid);
}
const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
@@ -158,10 +140,9 @@ const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
.ret_type = RET_INTEGER,
};
-static u64 bpf_get_current_comm(u64 r1, u64 size, u64 r3, u64 r4, u64 r5)
+BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
{
struct task_struct *task = current;
- char *buf = (char *) (long) r1;
if (unlikely(!task))
goto err_clear;
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index bf4495fcd25d..732ae16d12b7 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -116,10 +116,9 @@ free_smap:
return ERR_PTR(err);
}
-u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
+BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
+ u64, flags)
{
- struct pt_regs *regs = (struct pt_regs *) (long) r1;
- struct bpf_map *map = (struct bpf_map *) (long) r2;
struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
struct perf_callchain_entry *trace;
struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 48c2705db22c..8c3f794c7028 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
#include <linux/filter.h>
#include <net/netlink.h>
#include <linux/file.h>
@@ -126,76 +127,16 @@
* are set to NOT_INIT to indicate that they are no longer readable.
*/
-struct reg_state {
- enum bpf_reg_type type;
- union {
- /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
- s64 imm;
-
- /* valid when type == PTR_TO_PACKET* */
- struct {
- u32 id;
- u16 off;
- u16 range;
- };
-
- /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
- * PTR_TO_MAP_VALUE_OR_NULL
- */
- struct bpf_map *map_ptr;
- };
-};
-
-enum bpf_stack_slot_type {
- STACK_INVALID, /* nothing was stored in this stack slot */
- STACK_SPILL, /* register spilled into stack */
- STACK_MISC /* BPF program wrote some data into this slot */
-};
-
-#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
-
-/* state of the program:
- * type of all registers and stack info
- */
-struct verifier_state {
- struct reg_state regs[MAX_BPF_REG];
- u8 stack_slot_type[MAX_BPF_STACK];
- struct reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
-};
-
-/* linked list of verifier states used to prune search */
-struct verifier_state_list {
- struct verifier_state state;
- struct verifier_state_list *next;
-};
-
/* verifier_state + insn_idx are pushed to stack when branch is encountered */
-struct verifier_stack_elem {
+struct bpf_verifier_stack_elem {
/* verifer state is 'st'
* before processing instruction 'insn_idx'
* and after processing instruction 'prev_insn_idx'
*/
- struct verifier_state st;
+ struct bpf_verifier_state st;
int insn_idx;
int prev_insn_idx;
- struct verifier_stack_elem *next;
-};
-
-#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
-
-/* single container for all structs
- * one verifier_env per bpf_check() call
- */
-struct verifier_env {
- struct bpf_prog *prog; /* eBPF program being verified */
- struct verifier_stack_elem *head; /* stack of verifier states to be processed */
- int stack_size; /* number of states to be processed */
- struct verifier_state cur_state; /* current verifier state */
- struct verifier_state_list **explored_states; /* search pruning optimization */
- struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
- u32 used_map_cnt; /* number of used maps */
- u32 id_gen; /* used to generate unique reg IDs */
- bool allow_ptr_leaks;
+ struct bpf_verifier_stack_elem *next;
};
#define BPF_COMPLEXITY_LIMIT_INSNS 65536
@@ -204,6 +145,7 @@ struct verifier_env {
struct bpf_call_arg_meta {
struct bpf_map *map_ptr;
bool raw_mode;
+ bool pkt_access;
int regno;
int access_size;
};
@@ -247,9 +189,9 @@ static const char * const reg_type_str[] = {
[PTR_TO_PACKET_END] = "pkt_end",
};
-static void print_verifier_state(struct verifier_state *state)
+static void print_verifier_state(struct bpf_verifier_state *state)
{
- struct reg_state *reg;
+ struct bpf_reg_state *reg;
enum bpf_reg_type t;
int i;
@@ -425,9 +367,9 @@ static void print_bpf_insn(struct bpf_insn *insn)
}
}
-static int pop_stack(struct verifier_env *env, int *prev_insn_idx)
+static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx)
{
- struct verifier_stack_elem *elem;
+ struct bpf_verifier_stack_elem *elem;
int insn_idx;
if (env->head == NULL)
@@ -444,12 +386,12 @@ static int pop_stack(struct verifier_env *env, int *prev_insn_idx)
return insn_idx;
}
-static struct verifier_state *push_stack(struct verifier_env *env, int insn_idx,
- int prev_insn_idx)
+static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
+ int insn_idx, int prev_insn_idx)
{
- struct verifier_stack_elem *elem;
+ struct bpf_verifier_stack_elem *elem;
- elem = kmalloc(sizeof(struct verifier_stack_elem), GFP_KERNEL);
+ elem = kmalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
if (!elem)
goto err;
@@ -475,7 +417,7 @@ static const int caller_saved[CALLER_SAVED_REGS] = {
BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
};
-static void init_reg_state(struct reg_state *regs)
+static void init_reg_state(struct bpf_reg_state *regs)
{
int i;
@@ -491,7 +433,7 @@ static void init_reg_state(struct reg_state *regs)
regs[BPF_REG_1].type = PTR_TO_CTX;
}
-static void mark_reg_unknown_value(struct reg_state *regs, u32 regno)
+static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno)
{
BUG_ON(regno >= MAX_BPF_REG);
regs[regno].type = UNKNOWN_VALUE;
@@ -504,7 +446,7 @@ enum reg_arg_type {
DST_OP_NO_MARK /* same as above, check only, don't mark */
};
-static int check_reg_arg(struct reg_state *regs, u32 regno,
+static int check_reg_arg(struct bpf_reg_state *regs, u32 regno,
enum reg_arg_type t)
{
if (regno >= MAX_BPF_REG) {
@@ -564,8 +506,8 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
/* check_stack_read/write functions track spill/fill of registers,
* stack boundary and alignment are checked in check_mem_access()
*/
-static int check_stack_write(struct verifier_state *state, int off, int size,
- int value_regno)
+static int check_stack_write(struct bpf_verifier_state *state, int off,
+ int size, int value_regno)
{
int i;
/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
@@ -590,7 +532,7 @@ static int check_stack_write(struct verifier_state *state, int off, int size,
} else {
/* regular write of data into stack */
state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
- (struct reg_state) {};
+ (struct bpf_reg_state) {};
for (i = 0; i < size; i++)
state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC;
@@ -598,7 +540,7 @@ static int check_stack_write(struct verifier_state *state, int off, int size,
return 0;
}
-static int check_stack_read(struct verifier_state *state, int off, int size,
+static int check_stack_read(struct bpf_verifier_state *state, int off, int size,
int value_regno)
{
u8 *slot_type;
@@ -639,7 +581,7 @@ static int check_stack_read(struct verifier_state *state, int off, int size,
}
/* check read/write into map element returned by bpf_map_lookup_elem() */
-static int check_map_access(struct verifier_env *env, u32 regno, int off,
+static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
int size)
{
struct bpf_map *map = env->cur_state.regs[regno].map_ptr;
@@ -654,24 +596,31 @@ static int check_map_access(struct verifier_env *env, u32 regno, int off,
#define MAX_PACKET_OFF 0xffff
-static bool may_write_pkt_data(enum bpf_prog_type type)
+static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
+ const struct bpf_call_arg_meta *meta)
{
- switch (type) {
+ switch (env->prog->type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_SCHED_ACT:
case BPF_PROG_TYPE_XDP:
+ if (meta)
+ return meta->pkt_access;
+
+ env->seen_direct_write = true;
return true;
default:
return false;
}
}
-static int check_packet_access(struct verifier_env *env, u32 regno, int off,
+static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
int size)
{
- struct reg_state *regs = env->cur_state.regs;
- struct reg_state *reg = &regs[regno];
+ struct bpf_reg_state *regs = env->cur_state.regs;
+ struct bpf_reg_state *reg = &regs[regno];
off += reg->off;
- if (off < 0 || off + size > reg->range) {
+ if (off < 0 || size <= 0 || off + size > reg->range) {
verbose("invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
off, size, regno, reg->id, reg->off, reg->range);
return -EACCES;
@@ -680,9 +629,13 @@ static int check_packet_access(struct verifier_env *env, u32 regno, int off,
}
/* check access to 'struct bpf_context' fields */
-static int check_ctx_access(struct verifier_env *env, int off, int size,
+static int check_ctx_access(struct bpf_verifier_env *env, int off, int size,
enum bpf_access_type t, enum bpf_reg_type *reg_type)
{
+ /* for analyzer ctx accesses are already validated and converted */
+ if (env->analyzer_ops)
+ return 0;
+
if (env->prog->aux->ops->is_valid_access &&
env->prog->aux->ops->is_valid_access(off, size, t, reg_type)) {
/* remember the offset of last byte accessed in ctx */
@@ -695,7 +648,7 @@ static int check_ctx_access(struct verifier_env *env, int off, int size,
return -EACCES;
}
-static bool is_pointer_value(struct verifier_env *env, int regno)
+static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
{
if (env->allow_ptr_leaks)
return false;
@@ -709,12 +662,13 @@ static bool is_pointer_value(struct verifier_env *env, int regno)
}
}
-static int check_ptr_alignment(struct verifier_env *env, struct reg_state *reg,
- int off, int size)
+static int check_ptr_alignment(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg, int off, int size)
{
if (reg->type != PTR_TO_PACKET) {
if (off % size != 0) {
- verbose("misaligned access off %d size %d\n", off, size);
+ verbose("misaligned access off %d size %d\n",
+ off, size);
return -EACCES;
} else {
return 0;
@@ -755,12 +709,12 @@ static int check_ptr_alignment(struct verifier_env *env, struct reg_state *reg,
* if t==write && value_regno==-1, some unknown value is stored into memory
* if t==read && value_regno==-1, don't care what we read from memory
*/
-static int check_mem_access(struct verifier_env *env, u32 regno, int off,
+static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
int bpf_size, enum bpf_access_type t,
int value_regno)
{
- struct verifier_state *state = &env->cur_state;
- struct reg_state *reg = &state->regs[regno];
+ struct bpf_verifier_state *state = &env->cur_state;
+ struct bpf_reg_state *reg = &state->regs[regno];
int size, err = 0;
if (reg->type == PTR_TO_STACK)
@@ -817,7 +771,7 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off,
err = check_stack_read(state, off, size, value_regno);
}
} else if (state->regs[regno].type == PTR_TO_PACKET) {
- if (t == BPF_WRITE && !may_write_pkt_data(env->prog->type)) {
+ if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL)) {
verbose("cannot write into packet\n");
return -EACCES;
}
@@ -846,9 +800,9 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off,
return err;
}
-static int check_xadd(struct verifier_env *env, struct bpf_insn *insn)
+static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
- struct reg_state *regs = env->cur_state.regs;
+ struct bpf_reg_state *regs = env->cur_state.regs;
int err;
if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
@@ -882,12 +836,12 @@ static int check_xadd(struct verifier_env *env, struct bpf_insn *insn)
* bytes from that pointer, make sure that it's within stack boundary
* and all elements of stack are initialized
*/
-static int check_stack_boundary(struct verifier_env *env, int regno,
+static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
int access_size, bool zero_size_allowed,
struct bpf_call_arg_meta *meta)
{
- struct verifier_state *state = &env->cur_state;
- struct reg_state *regs = state->regs;
+ struct bpf_verifier_state *state = &env->cur_state;
+ struct bpf_reg_state *regs = state->regs;
int off, i;
if (regs[regno].type != PTR_TO_STACK) {
@@ -926,11 +880,11 @@ static int check_stack_boundary(struct verifier_env *env, int regno,
return 0;
}
-static int check_func_arg(struct verifier_env *env, u32 regno,
+static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
enum bpf_arg_type arg_type,
struct bpf_call_arg_meta *meta)
{
- struct reg_state *regs = env->cur_state.regs, *reg = &regs[regno];
+ struct bpf_reg_state *regs = env->cur_state.regs, *reg = &regs[regno];
enum bpf_reg_type expected_type, type = reg->type;
int err = 0;
@@ -950,8 +904,8 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
return 0;
}
- if (type == PTR_TO_PACKET && !may_write_pkt_data(env->prog->type)) {
- verbose("helper access to the packet is not allowed for clsact\n");
+ if (type == PTR_TO_PACKET && !may_access_direct_pkt_data(env, meta)) {
+ verbose("helper access to the packet is not allowed\n");
return -EACCES;
}
@@ -1135,10 +1089,10 @@ static int check_raw_mode(const struct bpf_func_proto *fn)
return count > 1 ? -EINVAL : 0;
}
-static void clear_all_pkt_pointers(struct verifier_env *env)
+static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
{
- struct verifier_state *state = &env->cur_state;
- struct reg_state *regs = state->regs, *reg;
+ struct bpf_verifier_state *state = &env->cur_state;
+ struct bpf_reg_state *regs = state->regs, *reg;
int i;
for (i = 0; i < MAX_BPF_REG; i++)
@@ -1158,12 +1112,12 @@ static void clear_all_pkt_pointers(struct verifier_env *env)
}
}
-static int check_call(struct verifier_env *env, int func_id)
+static int check_call(struct bpf_verifier_env *env, int func_id)
{
- struct verifier_state *state = &env->cur_state;
+ struct bpf_verifier_state *state = &env->cur_state;
const struct bpf_func_proto *fn = NULL;
- struct reg_state *regs = state->regs;
- struct reg_state *reg;
+ struct bpf_reg_state *regs = state->regs;
+ struct bpf_reg_state *reg;
struct bpf_call_arg_meta meta;
bool changes_data;
int i, err;
@@ -1191,6 +1145,7 @@ static int check_call(struct verifier_env *env, int func_id)
changes_data = bpf_helper_changes_skb_data(fn->func);
memset(&meta, 0, sizeof(meta));
+ meta.pkt_access = fn->pkt_access;
/* We only support one arg being in raw mode at the moment, which
* is sufficient for the helper functions we have right now.
@@ -1265,12 +1220,13 @@ static int check_call(struct verifier_env *env, int func_id)
return 0;
}
-static int check_packet_ptr_add(struct verifier_env *env, struct bpf_insn *insn)
+static int check_packet_ptr_add(struct bpf_verifier_env *env,
+ struct bpf_insn *insn)
{
- struct reg_state *regs = env->cur_state.regs;
- struct reg_state *dst_reg = &regs[insn->dst_reg];
- struct reg_state *src_reg = &regs[insn->src_reg];
- struct reg_state tmp_reg;
+ struct bpf_reg_state *regs = env->cur_state.regs;
+ struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
+ struct bpf_reg_state *src_reg = &regs[insn->src_reg];
+ struct bpf_reg_state tmp_reg;
s32 imm;
if (BPF_SRC(insn->code) == BPF_K) {
@@ -1338,10 +1294,10 @@ add_imm:
return 0;
}
-static int evaluate_reg_alu(struct verifier_env *env, struct bpf_insn *insn)
+static int evaluate_reg_alu(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
- struct reg_state *regs = env->cur_state.regs;
- struct reg_state *dst_reg = &regs[insn->dst_reg];
+ struct bpf_reg_state *regs = env->cur_state.regs;
+ struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
u8 opcode = BPF_OP(insn->code);
s64 imm_log2;
@@ -1351,7 +1307,7 @@ static int evaluate_reg_alu(struct verifier_env *env, struct bpf_insn *insn)
*/
if (BPF_SRC(insn->code) == BPF_X) {
- struct reg_state *src_reg = &regs[insn->src_reg];
+ struct bpf_reg_state *src_reg = &regs[insn->src_reg];
if (src_reg->type == UNKNOWN_VALUE && src_reg->imm > 0 &&
dst_reg->imm && opcode == BPF_ADD) {
@@ -1440,11 +1396,12 @@ static int evaluate_reg_alu(struct verifier_env *env, struct bpf_insn *insn)
return 0;
}
-static int evaluate_reg_imm_alu(struct verifier_env *env, struct bpf_insn *insn)
+static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
+ struct bpf_insn *insn)
{
- struct reg_state *regs = env->cur_state.regs;
- struct reg_state *dst_reg = &regs[insn->dst_reg];
- struct reg_state *src_reg = &regs[insn->src_reg];
+ struct bpf_reg_state *regs = env->cur_state.regs;
+ struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
+ struct bpf_reg_state *src_reg = &regs[insn->src_reg];
u8 opcode = BPF_OP(insn->code);
/* dst_reg->type == CONST_IMM here, simulate execution of 'add' insn.
@@ -1461,9 +1418,9 @@ static int evaluate_reg_imm_alu(struct verifier_env *env, struct bpf_insn *insn)
}
/* check validity of 32-bit and 64-bit arithmetic operations */
-static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
+static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
- struct reg_state *regs = env->cur_state.regs, *dst_reg;
+ struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg;
u8 opcode = BPF_OP(insn->code);
int err;
@@ -1637,21 +1594,42 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
return 0;
}
-static void find_good_pkt_pointers(struct verifier_env *env,
- struct reg_state *dst_reg)
+static void find_good_pkt_pointers(struct bpf_verifier_state *state,
+ struct bpf_reg_state *dst_reg)
{
- struct verifier_state *state = &env->cur_state;
- struct reg_state *regs = state->regs, *reg;
+ struct bpf_reg_state *regs = state->regs, *reg;
int i;
- /* r2 = r3;
- * r2 += 8
- * if (r2 > pkt_end) goto somewhere
- * r2 == dst_reg, pkt_end == src_reg,
- * r2=pkt(id=n,off=8,r=0)
- * r3=pkt(id=n,off=0,r=0)
- * find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
- * so that range of bytes [r3, r3 + 8) is safe to access
+
+ /* LLVM can generate two kind of checks:
+ *
+ * Type 1:
+ *
+ * r2 = r3;
+ * r2 += 8;
+ * if (r2 > pkt_end) goto <handle exception>
+ * <access okay>
+ *
+ * Where:
+ * r2 == dst_reg, pkt_end == src_reg
+ * r2=pkt(id=n,off=8,r=0)
+ * r3=pkt(id=n,off=0,r=0)
+ *
+ * Type 2:
+ *
+ * r2 = r3;
+ * r2 += 8;
+ * if (pkt_end >= r2) goto <access okay>
+ * <handle exception>
+ *
+ * Where:
+ * pkt_end == dst_reg, r2 == src_reg
+ * r2=pkt(id=n,off=8,r=0)
+ * r3=pkt(id=n,off=0,r=0)
+ *
+ * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
+ * so that range of bytes [r3, r3 + 8) is safe to access.
*/
+
for (i = 0; i < MAX_BPF_REG; i++)
if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
regs[i].range = dst_reg->off;
@@ -1665,11 +1643,11 @@ static void find_good_pkt_pointers(struct verifier_env *env,
}
}
-static int check_cond_jmp_op(struct verifier_env *env,
+static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_insn *insn, int *insn_idx)
{
- struct reg_state *regs = env->cur_state.regs, *dst_reg;
- struct verifier_state *other_branch;
+ struct bpf_verifier_state *other_branch, *this_branch = &env->cur_state;
+ struct bpf_reg_state *regs = this_branch->regs, *dst_reg;
u8 opcode = BPF_OP(insn->code);
int err;
@@ -1731,7 +1709,7 @@ static int check_cond_jmp_op(struct verifier_env *env,
if (!other_branch)
return -EFAULT;
- /* detect if R == 0 where R is returned value from bpf_map_lookup_elem() */
+ /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
if (BPF_SRC(insn->code) == BPF_K &&
insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
@@ -1750,13 +1728,17 @@ static int check_cond_jmp_op(struct verifier_env *env,
} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
dst_reg->type == PTR_TO_PACKET &&
regs[insn->src_reg].type == PTR_TO_PACKET_END) {
- find_good_pkt_pointers(env, dst_reg);
+ find_good_pkt_pointers(this_branch, dst_reg);
+ } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
+ dst_reg->type == PTR_TO_PACKET_END &&
+ regs[insn->src_reg].type == PTR_TO_PACKET) {
+ find_good_pkt_pointers(other_branch, &regs[insn->src_reg]);
} else if (is_pointer_value(env, insn->dst_reg)) {
verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
return -EACCES;
}
if (log_level)
- print_verifier_state(&env->cur_state);
+ print_verifier_state(this_branch);
return 0;
}
@@ -1769,9 +1751,9 @@ static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
}
/* verify BPF_LD_IMM64 instruction */
-static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn)
+static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
- struct reg_state *regs = env->cur_state.regs;
+ struct bpf_reg_state *regs = env->cur_state.regs;
int err;
if (BPF_SIZE(insn->code) != BPF_DW) {
@@ -1787,9 +1769,19 @@ static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn)
if (err)
return err;
- if (insn->src_reg == 0)
- /* generic move 64-bit immediate into a register */
+ if (insn->src_reg == 0) {
+ /* generic move 64-bit immediate into a register,
+ * only analyzer needs to collect the ld_imm value.
+ */
+ u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
+
+ if (!env->analyzer_ops)
+ return 0;
+
+ regs[insn->dst_reg].type = CONST_IMM;
+ regs[insn->dst_reg].imm = imm;
return 0;
+ }
/* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
@@ -1826,11 +1818,11 @@ static bool may_access_skb(enum bpf_prog_type type)
* Output:
* R0 - 8/16/32-bit skb data converted to cpu endianness
*/
-static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
+static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
- struct reg_state *regs = env->cur_state.regs;
+ struct bpf_reg_state *regs = env->cur_state.regs;
u8 mode = BPF_MODE(insn->code);
- struct reg_state *reg;
+ struct bpf_reg_state *reg;
int i, err;
if (!may_access_skb(env->prog->type)) {
@@ -1916,7 +1908,7 @@ enum {
BRANCH = 2,
};
-#define STATE_LIST_MARK ((struct verifier_state_list *) -1L)
+#define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L)
static int *insn_stack; /* stack of insns to process */
static int cur_stack; /* current stack index */
@@ -1927,7 +1919,7 @@ static int *insn_state;
* w - next instruction
* e - edge
*/
-static int push_insn(int t, int w, int e, struct verifier_env *env)
+static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
{
if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
return 0;
@@ -1968,7 +1960,7 @@ static int push_insn(int t, int w, int e, struct verifier_env *env)
/* non-recursive depth-first-search to detect loops in BPF program
* loop == back-edge in directed graph
*/
-static int check_cfg(struct verifier_env *env)
+static int check_cfg(struct bpf_verifier_env *env)
{
struct bpf_insn *insns = env->prog->insnsi;
int insn_cnt = env->prog->len;
@@ -2077,7 +2069,8 @@ err_free:
/* the following conditions reduce the number of explored insns
* from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet
*/
-static bool compare_ptrs_to_packet(struct reg_state *old, struct reg_state *cur)
+static bool compare_ptrs_to_packet(struct bpf_reg_state *old,
+ struct bpf_reg_state *cur)
{
if (old->id != cur->id)
return false;
@@ -2152,9 +2145,10 @@ static bool compare_ptrs_to_packet(struct reg_state *old, struct reg_state *cur)
* whereas register type in current state is meaningful, it means that
* the current state will reach 'bpf_exit' instruction safely
*/
-static bool states_equal(struct verifier_state *old, struct verifier_state *cur)
+static bool states_equal(struct bpf_verifier_state *old,
+ struct bpf_verifier_state *cur)
{
- struct reg_state *rold, *rcur;
+ struct bpf_reg_state *rold, *rcur;
int i;
for (i = 0; i < MAX_BPF_REG; i++) {
@@ -2194,9 +2188,9 @@ static bool states_equal(struct verifier_state *old, struct verifier_state *cur)
* the same, check that stored pointers types
* are the same as well.
* Ex: explored safe path could have stored
- * (struct reg_state) {.type = PTR_TO_STACK, .imm = -8}
+ * (bpf_reg_state) {.type = PTR_TO_STACK, .imm = -8}
* but current path has stored:
- * (struct reg_state) {.type = PTR_TO_STACK, .imm = -16}
+ * (bpf_reg_state) {.type = PTR_TO_STACK, .imm = -16}
* such verifier states are not equivalent.
* return false to continue verification of this path
*/
@@ -2207,10 +2201,10 @@ static bool states_equal(struct verifier_state *old, struct verifier_state *cur)
return true;
}
-static int is_state_visited(struct verifier_env *env, int insn_idx)
+static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
{
- struct verifier_state_list *new_sl;
- struct verifier_state_list *sl;
+ struct bpf_verifier_state_list *new_sl;
+ struct bpf_verifier_state_list *sl;
sl = env->explored_states[insn_idx];
if (!sl)
@@ -2234,7 +2228,7 @@ static int is_state_visited(struct verifier_env *env, int insn_idx)
* it will be rejected. Since there are no loops, we won't be
* seeing this 'insn_idx' instruction again on the way to bpf_exit
*/
- new_sl = kmalloc(sizeof(struct verifier_state_list), GFP_USER);
+ new_sl = kmalloc(sizeof(struct bpf_verifier_state_list), GFP_USER);
if (!new_sl)
return -ENOMEM;
@@ -2245,11 +2239,20 @@ static int is_state_visited(struct verifier_env *env, int insn_idx)
return 0;
}
-static int do_check(struct verifier_env *env)
+static int ext_analyzer_insn_hook(struct bpf_verifier_env *env,
+ int insn_idx, int prev_insn_idx)
{
- struct verifier_state *state = &env->cur_state;
+ if (!env->analyzer_ops || !env->analyzer_ops->insn_hook)
+ return 0;
+
+ return env->analyzer_ops->insn_hook(env, insn_idx, prev_insn_idx);
+}
+
+static int do_check(struct bpf_verifier_env *env)
+{
+ struct bpf_verifier_state *state = &env->cur_state;
struct bpf_insn *insns = env->prog->insnsi;
- struct reg_state *regs = state->regs;
+ struct bpf_reg_state *regs = state->regs;
int insn_cnt = env->prog->len;
int insn_idx, prev_insn_idx = 0;
int insn_processed = 0;
@@ -2303,13 +2306,17 @@ static int do_check(struct verifier_env *env)
print_bpf_insn(insn);
}
+ err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
+ if (err)
+ return err;
+
if (class == BPF_ALU || class == BPF_ALU64) {
err = check_alu_op(env, insn);
if (err)
return err;
} else if (class == BPF_LDX) {
- enum bpf_reg_type src_reg_type;
+ enum bpf_reg_type *prev_src_type, src_reg_type;
/* check for reserved fields is already done */
@@ -2339,16 +2346,18 @@ static int do_check(struct verifier_env *env)
continue;
}
- if (insn->imm == 0) {
+ prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
+
+ if (*prev_src_type == NOT_INIT) {
/* saw a valid insn
* dst_reg = *(u32 *)(src_reg + off)
- * use reserved 'imm' field to mark this insn
+ * save type to validate intersecting paths
*/
- insn->imm = src_reg_type;
+ *prev_src_type = src_reg_type;
- } else if (src_reg_type != insn->imm &&
+ } else if (src_reg_type != *prev_src_type &&
(src_reg_type == PTR_TO_CTX ||
- insn->imm == PTR_TO_CTX)) {
+ *prev_src_type == PTR_TO_CTX)) {
/* ABuser program is trying to use the same insn
* dst_reg = *(u32*) (src_reg + off)
* with different pointer types:
@@ -2361,7 +2370,7 @@ static int do_check(struct verifier_env *env)
}
} else if (class == BPF_STX) {
- enum bpf_reg_type dst_reg_type;
+ enum bpf_reg_type *prev_dst_type, dst_reg_type;
if (BPF_MODE(insn->code) == BPF_XADD) {
err = check_xadd(env, insn);
@@ -2389,11 +2398,13 @@ static int do_check(struct verifier_env *env)
if (err)
return err;
- if (insn->imm == 0) {
- insn->imm = dst_reg_type;
- } else if (dst_reg_type != insn->imm &&
+ prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type;
+
+ if (*prev_dst_type == NOT_INIT) {
+ *prev_dst_type = dst_reg_type;
+ } else if (dst_reg_type != *prev_dst_type &&
(dst_reg_type == PTR_TO_CTX ||
- insn->imm == PTR_TO_CTX)) {
+ *prev_dst_type == PTR_TO_CTX)) {
verbose("same insn cannot be used with different pointers\n");
return -EINVAL;
}
@@ -2528,7 +2539,7 @@ static int check_map_prog_compatibility(struct bpf_map *map,
/* look for pseudo eBPF instructions that access map FDs and
* replace them with actual map pointers
*/
-static int replace_map_fd_with_map_ptr(struct verifier_env *env)
+static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
@@ -2625,7 +2636,7 @@ next_insn:
}
/* drop refcnt of maps used by the rejected program */
-static void release_maps(struct verifier_env *env)
+static void release_maps(struct bpf_verifier_env *env)
{
int i;
@@ -2634,7 +2645,7 @@ static void release_maps(struct verifier_env *env)
}
/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
-static void convert_pseudo_ld_imm64(struct verifier_env *env)
+static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
@@ -2648,21 +2659,37 @@ static void convert_pseudo_ld_imm64(struct verifier_env *env)
/* convert load instructions that access fields of 'struct __sk_buff'
* into sequence of instructions that access fields of 'struct sk_buff'
*/
-static int convert_ctx_accesses(struct verifier_env *env)
+static int convert_ctx_accesses(struct bpf_verifier_env *env)
{
- struct bpf_insn *insn = env->prog->insnsi;
- int insn_cnt = env->prog->len;
- struct bpf_insn insn_buf[16];
+ const struct bpf_verifier_ops *ops = env->prog->aux->ops;
+ const int insn_cnt = env->prog->len;
+ struct bpf_insn insn_buf[16], *insn;
struct bpf_prog *new_prog;
enum bpf_access_type type;
- int i;
+ int i, cnt, delta = 0;
- if (!env->prog->aux->ops->convert_ctx_access)
+ if (ops->gen_prologue) {
+ cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
+ env->prog);
+ if (cnt >= ARRAY_SIZE(insn_buf)) {
+ verbose("bpf verifier is misconfigured\n");
+ return -EINVAL;
+ } else if (cnt) {
+ new_prog = bpf_patch_insn_single(env->prog, 0,
+ insn_buf, cnt);
+ if (!new_prog)
+ return -ENOMEM;
+ env->prog = new_prog;
+ delta += cnt - 1;
+ }
+ }
+
+ if (!ops->convert_ctx_access)
return 0;
- for (i = 0; i < insn_cnt; i++, insn++) {
- u32 insn_delta, cnt;
+ insn = env->prog->insnsi + delta;
+ for (i = 0; i < insn_cnt; i++, insn++) {
if (insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
type = BPF_READ;
@@ -2672,40 +2699,34 @@ static int convert_ctx_accesses(struct verifier_env *env)
else
continue;
- if (insn->imm != PTR_TO_CTX) {
- /* clear internal mark */
- insn->imm = 0;
+ if (env->insn_aux_data[i].ptr_type != PTR_TO_CTX)
continue;
- }
- cnt = env->prog->aux->ops->
- convert_ctx_access(type, insn->dst_reg, insn->src_reg,
- insn->off, insn_buf, env->prog);
+ cnt = ops->convert_ctx_access(type, insn->dst_reg, insn->src_reg,
+ insn->off, insn_buf, env->prog);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
verbose("bpf verifier is misconfigured\n");
return -EINVAL;
}
- new_prog = bpf_patch_insn_single(env->prog, i, insn_buf, cnt);
+ new_prog = bpf_patch_insn_single(env->prog, i + delta, insn_buf,
+ cnt);
if (!new_prog)
return -ENOMEM;
- insn_delta = cnt - 1;
+ delta += cnt - 1;
/* keep walking new program and skip insns we just inserted */
env->prog = new_prog;
- insn = new_prog->insnsi + i + insn_delta;
-
- insn_cnt += insn_delta;
- i += insn_delta;
+ insn = new_prog->insnsi + i + delta;
}
return 0;
}
-static void free_states(struct verifier_env *env)
+static void free_states(struct bpf_verifier_env *env)
{
- struct verifier_state_list *sl, *sln;
+ struct bpf_verifier_state_list *sl, *sln;
int i;
if (!env->explored_states)
@@ -2728,19 +2749,24 @@ static void free_states(struct verifier_env *env)
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
{
char __user *log_ubuf = NULL;
- struct verifier_env *env;
+ struct bpf_verifier_env *env;
int ret = -EINVAL;
if ((*prog)->len <= 0 || (*prog)->len > BPF_MAXINSNS)
return -E2BIG;
- /* 'struct verifier_env' can be global, but since it's not small,
+ /* 'struct bpf_verifier_env' can be global, but since it's not small,
* allocate/free it every time bpf_check() is called
*/
- env = kzalloc(sizeof(struct verifier_env), GFP_KERNEL);
+ env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
if (!env)
return -ENOMEM;
+ env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
+ (*prog)->len);
+ ret = -ENOMEM;
+ if (!env->insn_aux_data)
+ goto err_free_env;
env->prog = *prog;
/* grab the mutex to protect few globals used by verifier */
@@ -2759,12 +2785,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
/* log_* values have to be sane */
if (log_size < 128 || log_size > UINT_MAX >> 8 ||
log_level == 0 || log_ubuf == NULL)
- goto free_env;
+ goto err_unlock;
ret = -ENOMEM;
log_buf = vmalloc(log_size);
if (!log_buf)
- goto free_env;
+ goto err_unlock;
} else {
log_level = 0;
}
@@ -2774,7 +2800,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
goto skip_full_check;
env->explored_states = kcalloc(env->prog->len,
- sizeof(struct verifier_state_list *),
+ sizeof(struct bpf_verifier_state_list *),
GFP_USER);
ret = -ENOMEM;
if (!env->explored_states)
@@ -2833,14 +2859,67 @@ skip_full_check:
free_log_buf:
if (log_level)
vfree(log_buf);
-free_env:
if (!env->prog->aux->used_maps)
/* if we didn't copy map pointers into bpf_prog_info, release
* them now. Otherwise free_bpf_prog_info() will release them.
*/
release_maps(env);
*prog = env->prog;
+err_unlock:
+ mutex_unlock(&bpf_verifier_lock);
+ vfree(env->insn_aux_data);
+err_free_env:
kfree(env);
+ return ret;
+}
+
+int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
+ void *priv)
+{
+ struct bpf_verifier_env *env;
+ int ret;
+
+ env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
+ if (!env)
+ return -ENOMEM;
+
+ env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) *
+ prog->len);
+ ret = -ENOMEM;
+ if (!env->insn_aux_data)
+ goto err_free_env;
+ env->prog = prog;
+ env->analyzer_ops = ops;
+ env->analyzer_priv = priv;
+
+ /* grab the mutex to protect few globals used by verifier */
+ mutex_lock(&bpf_verifier_lock);
+
+ log_level = 0;
+
+ env->explored_states = kcalloc(env->prog->len,
+ sizeof(struct bpf_verifier_state_list *),
+ GFP_KERNEL);
+ ret = -ENOMEM;
+ if (!env->explored_states)
+ goto skip_full_check;
+
+ ret = check_cfg(env);
+ if (ret < 0)
+ goto skip_full_check;
+
+ env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
+
+ ret = do_check(env);
+
+skip_full_check:
+ while (pop_stack(env, NULL) >= 0);
+ free_states(env);
+
mutex_unlock(&bpf_verifier_lock);
+ vfree(env->insn_aux_data);
+err_free_env:
+ kfree(env);
return ret;
}
+EXPORT_SYMBOL_GPL(bpf_analyzer);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index d1c51b7f5221..5e8dab5bf9ad 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -6270,6 +6270,12 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
if (cgroup_sk_alloc_disabled)
return;
+ /* Socket clone path */
+ if (skcd->val) {
+ cgroup_get(sock_cgroup_ptr(skcd));
+ return;
+ }
+
rcu_read_lock();
while (true) {
diff --git a/kernel/configs/tiny.config b/kernel/configs/tiny.config
index c2de56ab0fce..7fa0c4ae6394 100644
--- a/kernel/configs/tiny.config
+++ b/kernel/configs/tiny.config
@@ -1,4 +1,12 @@
+# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_KERNEL_GZIP is not set
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
CONFIG_KERNEL_XZ=y
+# CONFIG_KERNEL_LZO is not set
+# CONFIG_KERNEL_LZ4 is not set
CONFIG_OPTIMIZE_INLINING=y
+# CONFIG_SLAB is not set
+# CONFIG_SLUB is not set
CONFIG_SLOB=y
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index c7fd2778ed50..c27e53326bef 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2069,6 +2069,20 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
mutex_unlock(&cpuset_mutex);
}
+/*
+ * Make sure the new task conform to the current state of its parent,
+ * which could have been changed by cpuset just after it inherits the
+ * state from the parent and before it sits on the cgroup's task list.
+ */
+void cpuset_fork(struct task_struct *task)
+{
+ if (task_css_is_root(task, cpuset_cgrp_id))
+ return;
+
+ set_cpus_allowed_ptr(task, &current->cpus_allowed);
+ task->mems_allowed = current->mems_allowed;
+}
+
struct cgroup_subsys cpuset_cgrp_subsys = {
.css_alloc = cpuset_css_alloc,
.css_online = cpuset_css_online,
@@ -2079,6 +2093,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
.attach = cpuset_attach,
.post_attach = cpuset_post_attach,
.bind = cpuset_bind,
+ .fork = cpuset_fork,
.legacy_cftypes = files,
.early_init = true,
};
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a7b8c1c75fa7..9fc3be0c7d01 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2496,11 +2496,11 @@ static int __perf_event_stop(void *info)
return 0;
}
-static int perf_event_restart(struct perf_event *event)
+static int perf_event_stop(struct perf_event *event, int restart)
{
struct stop_event_data sd = {
.event = event,
- .restart = 1,
+ .restart = restart,
};
int ret = 0;
@@ -3549,10 +3549,18 @@ static int perf_event_read(struct perf_event *event, bool group)
.group = group,
.ret = 0,
};
- ret = smp_call_function_single(event->oncpu, __perf_event_read, &data, 1);
- /* The event must have been read from an online CPU: */
- WARN_ON_ONCE(ret);
- ret = ret ? : data.ret;
+ /*
+ * Purposely ignore the smp_call_function_single() return
+ * value.
+ *
+ * If event->oncpu isn't a valid CPU it means the event got
+ * scheduled out and that will have updated the event count.
+ *
+ * Therefore, either way, we'll have an up-to-date event count
+ * after this.
+ */
+ (void)smp_call_function_single(event->oncpu, __perf_event_read, &data, 1);
+ ret = data.ret;
} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
struct perf_event_context *ctx = event->ctx;
unsigned long flags;
@@ -4837,6 +4845,19 @@ static void ring_buffer_attach(struct perf_event *event,
spin_unlock_irqrestore(&rb->event_lock, flags);
}
+ /*
+ * Avoid racing with perf_mmap_close(AUX): stop the event
+ * before swizzling the event::rb pointer; if it's getting
+ * unmapped, its aux_mmap_count will be 0 and it won't
+ * restart. See the comment in __perf_pmu_output_stop().
+ *
+ * Data will inevitably be lost when set_output is done in
+ * mid-air, but then again, whoever does it like this is
+ * not in for the data anyway.
+ */
+ if (has_aux(event))
+ perf_event_stop(event, 0);
+
rcu_assign_pointer(event->rb, rb);
if (old_rb) {
@@ -6112,7 +6133,7 @@ static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
raw_spin_unlock_irqrestore(&ifh->lock, flags);
if (restart)
- perf_event_restart(event);
+ perf_event_stop(event, 1);
}
void perf_event_exec(void)
@@ -6156,7 +6177,13 @@ static void __perf_event_output_stop(struct perf_event *event, void *data)
/*
* In case of inheritance, it will be the parent that links to the
- * ring-buffer, but it will be the child that's actually using it:
+ * ring-buffer, but it will be the child that's actually using it.
+ *
+ * We are using event::rb to determine if the event should be stopped,
+ * however this may race with ring_buffer_attach() (through set_output),
+ * which will make us skip the event that actually needs to be stopped.
+ * So ring_buffer_attach() has to stop an aux event before re-assigning
+ * its rb pointer.
*/
if (rcu_dereference(parent->rb) == rb)
ro->err = __perf_event_stop(&sd);
@@ -6670,7 +6697,7 @@ static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
raw_spin_unlock_irqrestore(&ifh->lock, flags);
if (restart)
- perf_event_restart(event);
+ perf_event_stop(event, 1);
}
/*
@@ -7933,7 +7960,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
mmput(mm);
restart:
- perf_event_restart(event);
+ perf_event_stop(event, 1);
}
/*
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index ae9b90dc9a5a..257fa460b846 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -330,15 +330,22 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
if (!rb)
return NULL;
- if (!rb_has_aux(rb) || !atomic_inc_not_zero(&rb->aux_refcount))
+ if (!rb_has_aux(rb))
goto err;
/*
- * If rb::aux_mmap_count is zero (and rb_has_aux() above went through),
- * the aux buffer is in perf_mmap_close(), about to get freed.
+ * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
+ * about to get freed, so we leave immediately.
+ *
+ * Checking rb::aux_mmap_count and rb::refcount has to be done in
+ * the same order, see perf_mmap_close. Otherwise we end up freeing
+ * aux pages in this path, which is a bug, because in_atomic().
*/
if (!atomic_read(&rb->aux_mmap_count))
- goto err_put;
+ goto err;
+
+ if (!atomic_inc_not_zero(&rb->aux_refcount))
+ goto err;
/*
* Nesting is not supported for AUX area, make sure nested
diff --git a/kernel/exit.c b/kernel/exit.c
index 2f974ae042a6..091a78be3b09 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -848,12 +848,7 @@ void do_exit(long code)
TASKS_RCU(preempt_enable());
exit_notify(tsk, group_dead);
proc_exit_connector(tsk);
-#ifdef CONFIG_NUMA
- task_lock(tsk);
- mpol_put(tsk->mempolicy);
- tsk->mempolicy = NULL;
- task_unlock(tsk);
-#endif
+ mpol_put_task_policy(tsk);
#ifdef CONFIG_FUTEX
if (unlikely(current->pi_state_cache))
kfree(current->pi_state_cache);
diff --git a/kernel/fork.c b/kernel/fork.c
index 52e725d4a866..beb31725f7e2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -799,6 +799,29 @@ struct file *get_mm_exe_file(struct mm_struct *mm)
EXPORT_SYMBOL(get_mm_exe_file);
/**
+ * get_task_exe_file - acquire a reference to the task's executable file
+ *
+ * Returns %NULL if task's mm (if any) has no associated executable file or
+ * this is a kernel thread with borrowed mm (see the comment above get_task_mm).
+ * User must release file via fput().
+ */
+struct file *get_task_exe_file(struct task_struct *task)
+{
+ struct file *exe_file = NULL;
+ struct mm_struct *mm;
+
+ task_lock(task);
+ mm = task->mm;
+ if (mm) {
+ if (!(task->flags & PF_KTHREAD))
+ exe_file = get_mm_exe_file(mm);
+ }
+ task_unlock(task);
+ return exe_file;
+}
+EXPORT_SYMBOL(get_task_exe_file);
+
+/**
* get_task_mm - acquire a reference to the task's mm
*
* Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
@@ -913,14 +936,12 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
deactivate_mm(tsk, mm);
/*
- * If we're exiting normally, clear a user-space tid field if
- * requested. We leave this alone when dying by signal, to leave
- * the value intact in a core dump, and to save the unnecessary
- * trouble, say, a killed vfork parent shouldn't touch this mm.
- * Userland only wants this done for a sys_exit.
+ * Signal userspace if we're not exiting with a core dump
+ * because we want to leave the value intact for debugging
+ * purposes.
*/
if (tsk->clear_child_tid) {
- if (!(tsk->flags & PF_SIGNALED) &&
+ if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) &&
atomic_read(&mm->mm_users) > 1) {
/*
* We don't check the error code - if userspace has
@@ -1404,7 +1425,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->real_start_time = ktime_get_boot_ns();
p->io_context = NULL;
p->audit_context = NULL;
- threadgroup_change_begin(current);
cgroup_fork(p);
#ifdef CONFIG_NUMA
p->mempolicy = mpol_dup(p->mempolicy);
@@ -1556,6 +1576,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
INIT_LIST_HEAD(&p->thread_group);
p->task_works = NULL;
+ threadgroup_change_begin(current);
/*
* Ensure that the cgroup subsystem policies allow the new process to be
* forked. It should be noted the the new process's css_set can be changed
@@ -1656,6 +1677,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
bad_fork_cancel_cgroup:
cgroup_cancel_fork(p);
bad_fork_free_pid:
+ threadgroup_change_end(current);
if (pid != &init_struct_pid)
free_pid(pid);
bad_fork_cleanup_thread:
@@ -1688,7 +1710,6 @@ bad_fork_cleanup_policy:
mpol_put(p->mempolicy);
bad_fork_cleanup_threadgroup_lock:
#endif
- threadgroup_change_end(current);
delayacct_tsk_free(p);
bad_fork_cleanup_count:
atomic_dec(&p->cred->user->processes);
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index 503bc2d348e5..037c321c5618 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -887,7 +887,10 @@ int kexec_load_purgatory(struct kimage *image, unsigned long min,
return 0;
out:
vfree(pi->sechdrs);
+ pi->sechdrs = NULL;
+
vfree(pi->purgatory_buf);
+ pi->purgatory_buf = NULL;
return ret;
}
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 251d16b4cb41..b501e390bb34 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -247,6 +247,7 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
align_start = res->start & ~(SECTION_SIZE - 1);
align_size = ALIGN(resource_size(res), SECTION_SIZE);
arch_remove_memory(align_start, align_size);
+ untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
pgmap_radix_release(res);
dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
"%s: failed to free all reserved pages\n", __func__);
@@ -282,6 +283,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
struct percpu_ref *ref, struct vmem_altmap *altmap)
{
resource_size_t key, align_start, align_size, align_end;
+ pgprot_t pgprot = PAGE_KERNEL;
struct dev_pagemap *pgmap;
struct page_map *page_map;
int error, nid, is_ram;
@@ -351,6 +353,11 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
if (nid < 0)
nid = numa_mem_id();
+ error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
+ align_size);
+ if (error)
+ goto err_pfn_remap;
+
error = arch_add_memory(nid, align_start, align_size, true);
if (error)
goto err_add_memory;
@@ -371,6 +378,8 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
return __va(res->start);
err_add_memory:
+ untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
+ err_pfn_remap:
err_radix:
pgmap_radix_release(res);
devres_free(page_map);
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 97b0df71303e..168ff442ebde 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -482,7 +482,16 @@ void pm_qos_update_request(struct pm_qos_request *req,
return;
}
- cancel_delayed_work_sync(&req->work);
+ /*
+ * This function may be called very early during boot, for example,
+ * from of_clk_init(), where irq needs to stay disabled.
+ * cancel_delayed_work_sync() assumes that irq is enabled on
+ * invocation and re-enables it on return. Avoid calling it until
+ * workqueue is initialized.
+ */
+ if (keventd_up())
+ cancel_delayed_work_sync(&req->work);
+
__pm_qos_update_request(req, new_value);
}
EXPORT_SYMBOL_GPL(pm_qos_update_request);
diff --git a/kernel/printk/nmi.c b/kernel/printk/nmi.c
index b69eb8a2876f..16bab471c7e2 100644
--- a/kernel/printk/nmi.c
+++ b/kernel/printk/nmi.c
@@ -99,27 +99,33 @@ again:
return add;
}
-/*
- * printk one line from the temporary buffer from @start index until
- * and including the @end index.
- */
-static void print_nmi_seq_line(struct nmi_seq_buf *s, int start, int end)
+static void printk_nmi_flush_line(const char *text, int len)
{
- const char *buf = s->buffer + start;
-
/*
* The buffers are flushed in NMI only on panic. The messages must
* go only into the ring buffer at this stage. Consoles will get
* explicitly called later when a crashdump is not generated.
*/
if (in_nmi())
- printk_deferred("%.*s", (end - start) + 1, buf);
+ printk_deferred("%.*s", len, text);
else
- printk("%.*s", (end - start) + 1, buf);
+ printk("%.*s", len, text);
}
/*
+ * printk one line from the temporary buffer from @start index until
+ * and including the @end index.
+ */
+static void printk_nmi_flush_seq_line(struct nmi_seq_buf *s,
+ int start, int end)
+{
+ const char *buf = s->buffer + start;
+
+ printk_nmi_flush_line(buf, (end - start) + 1);
+}
+
+/*
* Flush data from the associated per_CPU buffer. The function
* can be called either via IRQ work or independently.
*/
@@ -150,9 +156,11 @@ more:
* the buffer an unexpected way. If we printed something then
* @len must only increase.
*/
- if (i && i >= len)
- pr_err("printk_nmi_flush: internal error: i=%d >= len=%zu\n",
- i, len);
+ if (i && i >= len) {
+ const char *msg = "printk_nmi_flush: internal error\n";
+
+ printk_nmi_flush_line(msg, strlen(msg));
+ }
if (!len)
goto out; /* Someone else has already flushed the buffer. */
@@ -166,14 +174,14 @@ more:
/* Print line by line. */
for (; i < size; i++) {
if (s->buffer[i] == '\n') {
- print_nmi_seq_line(s, last_i, i);
+ printk_nmi_flush_seq_line(s, last_i, i);
last_i = i + 1;
}
}
/* Check if there was a partial line. */
if (last_i < size) {
- print_nmi_seq_line(s, last_i, size - 1);
- pr_cont("\n");
+ printk_nmi_flush_seq_line(s, last_i, size - 1);
+ printk_nmi_flush_line("\n", strlen("\n"));
}
/*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2a906f20fba7..44817c640e99 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2016,6 +2016,28 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
success = 1; /* we're going to change ->state */
cpu = task_cpu(p);
+ /*
+ * Ensure we load p->on_rq _after_ p->state, otherwise it would
+ * be possible to, falsely, observe p->on_rq == 0 and get stuck
+ * in smp_cond_load_acquire() below.
+ *
+ * sched_ttwu_pending() try_to_wake_up()
+ * [S] p->on_rq = 1; [L] P->state
+ * UNLOCK rq->lock -----.
+ * \
+ * +--- RMB
+ * schedule() /
+ * LOCK rq->lock -----'
+ * UNLOCK rq->lock
+ *
+ * [task p]
+ * [S] p->state = UNINTERRUPTIBLE [L] p->on_rq
+ *
+ * Pairs with the UNLOCK+LOCK on rq->lock from the
+ * last wakeup of our task and the schedule that got our task
+ * current.
+ */
+ smp_rmb();
if (p->on_rq && ttwu_remote(p, wake_flags))
goto stat;
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index ef6c6c3f9d8a..0db7c8a2afe2 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -605,12 +605,16 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
ptrace_event(PTRACE_EVENT_SECCOMP, data);
/*
* The delivery of a fatal signal during event
- * notification may silently skip tracer notification.
- * Terminating the task now avoids executing a system
- * call that may not be intended.
+ * notification may silently skip tracer notification,
+ * which could leave us with a potentially unmodified
+ * syscall that the tracer would have liked to have
+ * changed. Since the process is about to die, we just
+ * force the syscall to be skipped and let the signal
+ * kill the process and correctly handle any tracer exit
+ * notifications.
*/
if (fatal_signal_pending(current))
- do_exit(SIGSYS);
+ goto skip;
/* Check if the tracer forced the syscall to be skipped. */
this_syscall = syscall_get_nr(current, task_pt_regs(current));
if (this_syscall < 0)
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 204fdc86863d..2ec7c00228f3 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -908,10 +908,11 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
ktime_t now, expires;
int cpu = smp_processor_id();
+ now = tick_nohz_start_idle(ts);
+
if (can_stop_idle_tick(cpu, ts)) {
int was_stopped = ts->tick_stopped;
- now = tick_nohz_start_idle(ts);
ts->idle_calls++;
expires = tick_nohz_stop_sched_tick(ts, now, cpu);
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index d3869b03d9fe..5dcb99281259 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -61,11 +61,9 @@ unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
}
EXPORT_SYMBOL_GPL(trace_call_bpf);
-static u64 bpf_probe_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
{
- void *dst = (void *) (long) r1;
- int ret, size = (int) r2;
- void *unsafe_ptr = (void *) (long) r3;
+ int ret;
ret = probe_kernel_read(dst, unsafe_ptr, size);
if (unlikely(ret < 0))
@@ -83,12 +81,9 @@ static const struct bpf_func_proto bpf_probe_read_proto = {
.arg3_type = ARG_ANYTHING,
};
-static u64 bpf_probe_write_user(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
+ u32, size)
{
- void *unsafe_ptr = (void *) (long) r1;
- void *src = (void *) (long) r2;
- int size = (int) r3;
-
/*
* Ensure we're in user context which is safe for the helper to
* run. This helper has no business in a kthread.
@@ -130,9 +125,9 @@ static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
* limited trace_printk()
* only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed
*/
-static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5)
+BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
+ u64, arg2, u64, arg3)
{
- char *fmt = (char *) (long) r1;
bool str_seen = false;
int mod[3] = {};
int fmt_cnt = 0;
@@ -178,16 +173,16 @@ static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5)
switch (fmt_cnt) {
case 1:
- unsafe_addr = r3;
- r3 = (long) buf;
+ unsafe_addr = arg1;
+ arg1 = (long) buf;
break;
case 2:
- unsafe_addr = r4;
- r4 = (long) buf;
+ unsafe_addr = arg2;
+ arg2 = (long) buf;
break;
case 3:
- unsafe_addr = r5;
- r5 = (long) buf;
+ unsafe_addr = arg3;
+ arg3 = (long) buf;
break;
}
buf[0] = 0;
@@ -209,9 +204,9 @@ static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5)
}
return __trace_printk(1/* fake ip will not be printed */, fmt,
- mod[0] == 2 ? r3 : mod[0] == 1 ? (long) r3 : (u32) r3,
- mod[1] == 2 ? r4 : mod[1] == 1 ? (long) r4 : (u32) r4,
- mod[2] == 2 ? r5 : mod[2] == 1 ? (long) r5 : (u32) r5);
+ mod[0] == 2 ? arg1 : mod[0] == 1 ? (long) arg1 : (u32) arg1,
+ mod[1] == 2 ? arg2 : mod[1] == 1 ? (long) arg2 : (u32) arg2,
+ mod[2] == 2 ? arg3 : mod[2] == 1 ? (long) arg3 : (u32) arg3);
}
static const struct bpf_func_proto bpf_trace_printk_proto = {
@@ -233,9 +228,8 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
return &bpf_trace_printk_proto;
}
-static u64 bpf_perf_event_read(u64 r1, u64 flags, u64 r3, u64 r4, u64 r5)
+BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
{
- struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
struct bpf_array *array = container_of(map, struct bpf_array, map);
unsigned int cpu = smp_processor_id();
u64 index = flags & BPF_F_INDEX_MASK;
@@ -312,11 +306,9 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
return 0;
}
-static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
+BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
+ u64, flags, void *, data, u64, size)
{
- struct pt_regs *regs = (struct pt_regs *)(long) r1;
- struct bpf_map *map = (struct bpf_map *)(long) r2;
- void *data = (void *)(long) r4;
struct perf_raw_record raw = {
.frag = {
.size = size,
@@ -367,7 +359,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
return __bpf_perf_event_output(regs, map, flags, &raw);
}
-static u64 bpf_get_current_task(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_0(bpf_get_current_task)
{
return (long) current;
}
@@ -378,16 +370,13 @@ static const struct bpf_func_proto bpf_get_current_task_proto = {
.ret_type = RET_INTEGER,
};
-static u64 bpf_current_task_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
{
- struct bpf_map *map = (struct bpf_map *)(long)r1;
struct bpf_array *array = container_of(map, struct bpf_array, map);
struct cgroup *cgrp;
- u32 idx = (u32)r2;
if (unlikely(in_interrupt()))
return -EINVAL;
-
if (unlikely(idx >= array->map.max_entries))
return -E2BIG;
@@ -481,16 +470,17 @@ static struct bpf_prog_type_list kprobe_tl = {
.type = BPF_PROG_TYPE_KPROBE,
};
-static u64 bpf_perf_event_output_tp(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
+BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
+ u64, flags, void *, data, u64, size)
{
+ struct pt_regs *regs = *(struct pt_regs **)tp_buff;
+
/*
* r1 points to perf tracepoint buffer where first 8 bytes are hidden
* from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
- * from there and call the same bpf_perf_event_output() helper
+ * from there and call the same bpf_perf_event_output() helper inline.
*/
- u64 ctx = *(long *)(uintptr_t)r1;
-
- return bpf_perf_event_output(ctx, r2, index, r4, size);
+ return ____bpf_perf_event_output(regs, map, flags, data, size);
}
static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
@@ -504,11 +494,18 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
.arg5_type = ARG_CONST_STACK_SIZE,
};
-static u64 bpf_get_stackid_tp(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
+ u64, flags)
{
- u64 ctx = *(long *)(uintptr_t)r1;
+ struct pt_regs *regs = *(struct pt_regs **)tp_buff;
- return bpf_get_stackid(ctx, r2, r3, r4, r5);
+ /*
+ * Same comment as in bpf_perf_event_output_tp(), only that this time
+ * the other helper's function body cannot be inlined due to being
+ * external, thus we need to call raw helper function.
+ */
+ return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
+ flags, 0, 0);
}
static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
@@ -583,18 +580,18 @@ static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, int dst_reg,
switch (ctx_off) {
case offsetof(struct bpf_perf_event_data, sample_period):
BUILD_BUG_ON(FIELD_SIZEOF(struct perf_sample_data, period) != sizeof(u64));
- *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct bpf_perf_event_data_kern, data)),
- dst_reg, src_reg,
+
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
+ data), dst_reg, src_reg,
offsetof(struct bpf_perf_event_data_kern, data));
*insn++ = BPF_LDX_MEM(BPF_DW, dst_reg, dst_reg,
offsetof(struct perf_sample_data, period));
break;
default:
- *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct bpf_perf_event_data_kern, regs)),
- dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
+ regs), dst_reg, src_reg,
offsetof(struct bpf_perf_event_data_kern, regs));
- *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(sizeof(long)),
- dst_reg, dst_reg, ctx_off);
+ *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), dst_reg, dst_reg, ctx_off);
break;
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2307d7c89dac..2e2cca509231 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1686,24 +1686,6 @@ config LATENCYTOP
Enable this option if you want to use the LatencyTOP tool
to find out which userspace is blocking on what kernel operations.
-config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
- bool
-
-config DEBUG_STRICT_USER_COPY_CHECKS
- bool "Strict user copy size checks"
- depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
- help
- Enabling this option turns a certain set of sanity checks for user
- copy operations into compile time failures.
-
- The copy_from_user() etc checks are there to help test if there
- are sufficient security checks on the length argument of
- the copy operation, by having gcc prove that the argument is
- within bounds.
-
- If unsure, say N.
-
source kernel/trace/Kconfig
menu "Runtime Testing"
diff --git a/lib/Makefile b/lib/Makefile
index cfa68eb269e4..df747e5eeb7a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -22,9 +22,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
sha1.o chacha20.o md5.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
- earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o
+ earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o win_minmax.o
-obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
lib-$(CONFIG_HAS_DMA) += dma-noop.o
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 9e8c7386b3a0..7e3138cfc8c9 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -291,33 +291,13 @@ done:
}
/*
- * Fault in the first iovec of the given iov_iter, to a maximum length
- * of bytes. Returns 0 on success, or non-zero if the memory could not be
- * accessed (ie. because it is an invalid address).
- *
- * writev-intensive code may want this to prefault several iovecs -- that
- * would be possible (callers must not rely on the fact that _only_ the
- * first iovec will be faulted with the current implementation).
- */
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
-{
- if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
- char __user *buf = i->iov->iov_base + i->iov_offset;
- bytes = min(bytes, i->iov->iov_len - i->iov_offset);
- return fault_in_pages_readable(buf, bytes);
- }
- return 0;
-}
-EXPORT_SYMBOL(iov_iter_fault_in_readable);
-
-/*
* Fault in one or more iovecs of the given iov_iter, to a maximum length of
* bytes. For each iovec, fault in each page that constitutes the iovec.
*
* Return 0 on success, or non-zero if the memory could not be accessed (i.e.
* because it is an invalid address).
*/
-int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
+int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
{
size_t skip = i->iov_offset;
const struct iovec *iov;
@@ -334,7 +314,7 @@ int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
}
return 0;
}
-EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
+EXPORT_SYMBOL(iov_iter_fault_in_readable);
void iov_iter_init(struct iov_iter *i, int direction,
const struct iovec *iov, unsigned long nr_segs,
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 06c28728bb53..32d0ad058380 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -378,22 +378,8 @@ static void rht_deferred_worker(struct work_struct *work)
schedule_work(&ht->run_work);
}
-static bool rhashtable_check_elasticity(struct rhashtable *ht,
- struct bucket_table *tbl,
- unsigned int hash)
-{
- unsigned int elasticity = ht->elasticity;
- struct rhash_head *head;
-
- rht_for_each(head, tbl, hash)
- if (!--elasticity)
- return true;
-
- return false;
-}
-
-int rhashtable_insert_rehash(struct rhashtable *ht,
- struct bucket_table *tbl)
+static int rhashtable_insert_rehash(struct rhashtable *ht,
+ struct bucket_table *tbl)
{
struct bucket_table *old_tbl;
struct bucket_table *new_tbl;
@@ -439,57 +425,165 @@ fail:
return err;
}
-EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
-struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
- const void *key,
- struct rhash_head *obj,
- struct bucket_table *tbl,
- void **data)
+static void *rhashtable_lookup_one(struct rhashtable *ht,
+ struct bucket_table *tbl, unsigned int hash,
+ const void *key, struct rhash_head *obj)
{
+ struct rhashtable_compare_arg arg = {
+ .ht = ht,
+ .key = key,
+ };
+ struct rhash_head __rcu **pprev;
struct rhash_head *head;
- unsigned int hash;
- int err;
+ int elasticity;
- tbl = rhashtable_last_table(ht, tbl);
- hash = head_hashfn(ht, tbl, obj);
- spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
-
- err = -EEXIST;
- if (key) {
- *data = rhashtable_lookup_fast(ht, key, ht->p);
- if (*data)
- goto exit;
+ elasticity = ht->elasticity;
+ pprev = &tbl->buckets[hash];
+ rht_for_each(head, tbl, hash) {
+ struct rhlist_head *list;
+ struct rhlist_head *plist;
+
+ elasticity--;
+ if (!key ||
+ (ht->p.obj_cmpfn ?
+ ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
+ rhashtable_compare(&arg, rht_obj(ht, head))))
+ continue;
+
+ if (!ht->rhlist)
+ return rht_obj(ht, head);
+
+ list = container_of(obj, struct rhlist_head, rhead);
+ plist = container_of(head, struct rhlist_head, rhead);
+
+ RCU_INIT_POINTER(list->next, plist);
+ head = rht_dereference_bucket(head->next, tbl, hash);
+ RCU_INIT_POINTER(list->rhead.next, head);
+ rcu_assign_pointer(*pprev, obj);
+
+ return NULL;
}
- err = -E2BIG;
- if (unlikely(rht_grow_above_max(ht, tbl)))
- goto exit;
+ if (elasticity <= 0)
+ return ERR_PTR(-EAGAIN);
+
+ return ERR_PTR(-ENOENT);
+}
+
+static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
+ struct bucket_table *tbl,
+ unsigned int hash,
+ struct rhash_head *obj,
+ void *data)
+{
+ struct bucket_table *new_tbl;
+ struct rhash_head *head;
+
+ if (!IS_ERR_OR_NULL(data))
+ return ERR_PTR(-EEXIST);
- err = -EAGAIN;
- if (rhashtable_check_elasticity(ht, tbl, hash) ||
- rht_grow_above_100(ht, tbl))
- goto exit;
+ if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
+ return ERR_CAST(data);
- err = 0;
+ new_tbl = rcu_dereference(tbl->future_tbl);
+ if (new_tbl)
+ return new_tbl;
+
+ if (PTR_ERR(data) != -ENOENT)
+ return ERR_CAST(data);
+
+ if (unlikely(rht_grow_above_max(ht, tbl)))
+ return ERR_PTR(-E2BIG);
+
+ if (unlikely(rht_grow_above_100(ht, tbl)))
+ return ERR_PTR(-EAGAIN);
head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
RCU_INIT_POINTER(obj->next, head);
+ if (ht->rhlist) {
+ struct rhlist_head *list;
+
+ list = container_of(obj, struct rhlist_head, rhead);
+ RCU_INIT_POINTER(list->next, NULL);
+ }
rcu_assign_pointer(tbl->buckets[hash], obj);
atomic_inc(&ht->nelems);
+ if (rht_grow_above_75(ht, tbl))
+ schedule_work(&ht->run_work);
-exit:
- spin_unlock(rht_bucket_lock(tbl, hash));
+ return NULL;
+}
- if (err == 0)
- return NULL;
- else if (err == -EAGAIN)
- return tbl;
- else
- return ERR_PTR(err);
+static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
+ struct rhash_head *obj)
+{
+ struct bucket_table *new_tbl;
+ struct bucket_table *tbl;
+ unsigned int hash;
+ spinlock_t *lock;
+ void *data;
+
+ tbl = rcu_dereference(ht->tbl);
+
+ /* All insertions must grab the oldest table containing
+ * the hashed bucket that is yet to be rehashed.
+ */
+ for (;;) {
+ hash = rht_head_hashfn(ht, tbl, obj, ht->p);
+ lock = rht_bucket_lock(tbl, hash);
+ spin_lock_bh(lock);
+
+ if (tbl->rehash <= hash)
+ break;
+
+ spin_unlock_bh(lock);
+ tbl = rcu_dereference(tbl->future_tbl);
+ }
+
+ data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
+ new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
+ if (PTR_ERR(new_tbl) != -EEXIST)
+ data = ERR_CAST(new_tbl);
+
+ while (!IS_ERR_OR_NULL(new_tbl)) {
+ tbl = new_tbl;
+ hash = rht_head_hashfn(ht, tbl, obj, ht->p);
+ spin_lock_nested(rht_bucket_lock(tbl, hash),
+ SINGLE_DEPTH_NESTING);
+
+ data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
+ new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
+ if (PTR_ERR(new_tbl) != -EEXIST)
+ data = ERR_CAST(new_tbl);
+
+ spin_unlock(rht_bucket_lock(tbl, hash));
+ }
+
+ spin_unlock_bh(lock);
+
+ if (PTR_ERR(data) == -EAGAIN)
+ data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
+ -EAGAIN);
+
+ return data;
+}
+
+void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
+ struct rhash_head *obj)
+{
+ void *data;
+
+ do {
+ rcu_read_lock();
+ data = rhashtable_try_insert(ht, key, obj);
+ rcu_read_unlock();
+ } while (PTR_ERR(data) == -EAGAIN);
+
+ return data;
}
EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
@@ -593,11 +687,16 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_start);
void *rhashtable_walk_next(struct rhashtable_iter *iter)
{
struct bucket_table *tbl = iter->walker.tbl;
+ struct rhlist_head *list = iter->list;
struct rhashtable *ht = iter->ht;
struct rhash_head *p = iter->p;
+ bool rhlist = ht->rhlist;
if (p) {
- p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
+ if (!rhlist || !(list = rcu_dereference(list->next))) {
+ p = rcu_dereference(p->next);
+ list = container_of(p, struct rhlist_head, rhead);
+ }
goto next;
}
@@ -605,6 +704,18 @@ void *rhashtable_walk_next(struct rhashtable_iter *iter)
int skip = iter->skip;
rht_for_each_rcu(p, tbl, iter->slot) {
+ if (rhlist) {
+ list = container_of(p, struct rhlist_head,
+ rhead);
+ do {
+ if (!skip)
+ goto next;
+ skip--;
+ list = rcu_dereference(list->next);
+ } while (list);
+
+ continue;
+ }
if (!skip)
break;
skip--;
@@ -614,7 +725,8 @@ next:
if (!rht_is_a_nulls(p)) {
iter->skip++;
iter->p = p;
- return rht_obj(ht, p);
+ iter->list = list;
+ return rht_obj(ht, rhlist ? &list->rhead : p);
}
iter->skip = 0;
@@ -803,6 +915,48 @@ int rhashtable_init(struct rhashtable *ht,
EXPORT_SYMBOL_GPL(rhashtable_init);
/**
+ * rhltable_init - initialize a new hash list table
+ * @hlt: hash list table to be initialized
+ * @params: configuration parameters
+ *
+ * Initializes a new hash list table.
+ *
+ * See documentation for rhashtable_init.
+ */
+int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
+{
+ int err;
+
+ /* No rhlist NULLs marking for now. */
+ if (params->nulls_base)
+ return -EINVAL;
+
+ err = rhashtable_init(&hlt->ht, params);
+ hlt->ht.rhlist = true;
+ return err;
+}
+EXPORT_SYMBOL_GPL(rhltable_init);
+
+static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
+ void (*free_fn)(void *ptr, void *arg),
+ void *arg)
+{
+ struct rhlist_head *list;
+
+ if (!ht->rhlist) {
+ free_fn(rht_obj(ht, obj), arg);
+ return;
+ }
+
+ list = container_of(obj, struct rhlist_head, rhead);
+ do {
+ obj = &list->rhead;
+ list = rht_dereference(list->next, ht);
+ free_fn(rht_obj(ht, obj), arg);
+ } while (list);
+}
+
+/**
* rhashtable_free_and_destroy - free elements and destroy hash table
* @ht: the hash table to destroy
* @free_fn: callback to release resources of element
@@ -839,7 +993,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
pos = next,
next = !rht_is_a_nulls(pos) ?
rht_dereference(pos->next, ht) : NULL)
- free_fn(rht_obj(ht, pos), arg);
+ rhashtable_free_one(ht, pos, free_fn, arg);
}
}
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 93f45011a59d..94346b4d8984 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -5485,6 +5485,7 @@ static struct sk_buff *populate_skb(char *buf, int size)
skb->hash = SKB_HASH;
skb->queue_mapping = SKB_QUEUE_MAP;
skb->vlan_tci = SKB_VLAN_TCI;
+ skb->vlan_proto = htons(ETH_P_IP);
skb->dev = &dev;
skb->dev->ifindex = SKB_DEV_IFINDEX;
skb->dev->type = SKB_DEV_TYPE;
diff --git a/lib/test_hash.c b/lib/test_hash.c
index 66c5fc8351e8..cac20c5fb304 100644
--- a/lib/test_hash.c
+++ b/lib/test_hash.c
@@ -143,7 +143,7 @@ static int __init
test_hash_init(void)
{
char buf[SIZE+1];
- u32 string_or = 0, hash_or[2][33] = { 0 };
+ u32 string_or = 0, hash_or[2][33] = { { 0, } };
unsigned tests = 0;
unsigned long long h64 = 0;
int i, j;
@@ -219,21 +219,27 @@ test_hash_init(void)
}
/* Issue notices about skipped tests. */
-#ifndef HAVE_ARCH__HASH_32
- pr_info("__hash_32() has no arch implementation to test.");
-#elif HAVE_ARCH__HASH_32 != 1
+#ifdef HAVE_ARCH__HASH_32
+#if HAVE_ARCH__HASH_32 != 1
pr_info("__hash_32() is arch-specific; not compared to generic.");
#endif
-#ifndef HAVE_ARCH_HASH_32
- pr_info("hash_32() has no arch implementation to test.");
-#elif HAVE_ARCH_HASH_32 != 1
+#else
+ pr_info("__hash_32() has no arch implementation to test.");
+#endif
+#ifdef HAVE_ARCH_HASH_32
+#if HAVE_ARCH_HASH_32 != 1
pr_info("hash_32() is arch-specific; not compared to generic.");
#endif
-#ifndef HAVE_ARCH_HASH_64
- pr_info("hash_64() has no arch implementation to test.");
-#elif HAVE_ARCH_HASH_64 != 1
+#else
+ pr_info("hash_32() has no arch implementation to test.");
+#endif
+#ifdef HAVE_ARCH_HASH_64
+#if HAVE_ARCH_HASH_64 != 1
pr_info("hash_64() is arch-specific; not compared to generic.");
#endif
+#else
+ pr_info("hash_64() has no arch implementation to test.");
+#endif
pr_notice("%u tests passed.", tests);
diff --git a/lib/usercopy.c b/lib/usercopy.c
deleted file mode 100644
index 4f5b1ddbcd25..000000000000
--- a/lib/usercopy.c
+++ /dev/null
@@ -1,9 +0,0 @@
-#include <linux/export.h>
-#include <linux/bug.h>
-#include <linux/uaccess.h>
-
-void copy_from_user_overflow(void)
-{
- WARN(1, "Buffer overflow detected!\n");
-}
-EXPORT_SYMBOL(copy_from_user_overflow);
diff --git a/lib/win_minmax.c b/lib/win_minmax.c
new file mode 100644
index 000000000000..c8420d404926
--- /dev/null
+++ b/lib/win_minmax.c
@@ -0,0 +1,98 @@
+/**
+ * lib/minmax.c: windowed min/max tracker
+ *
+ * Kathleen Nichols' algorithm for tracking the minimum (or maximum)
+ * value of a data stream over some fixed time interval. (E.g.,
+ * the minimum RTT over the past five minutes.) It uses constant
+ * space and constant time per update yet almost always delivers
+ * the same minimum as an implementation that has to keep all the
+ * data in the window.
+ *
+ * The algorithm keeps track of the best, 2nd best & 3rd best min
+ * values, maintaining an invariant that the measurement time of
+ * the n'th best >= n-1'th best. It also makes sure that the three
+ * values are widely separated in the time window since that bounds
+ * the worse case error when that data is monotonically increasing
+ * over the window.
+ *
+ * Upon getting a new min, we can forget everything earlier because
+ * it has no value - the new min is <= everything else in the window
+ * by definition and it's the most recent. So we restart fresh on
+ * every new min and overwrites 2nd & 3rd choices. The same property
+ * holds for 2nd & 3rd best.
+ */
+#include <linux/module.h>
+#include <linux/win_minmax.h>
+
+/* As time advances, update the 1st, 2nd, and 3rd choices. */
+static u32 minmax_subwin_update(struct minmax *m, u32 win,
+ const struct minmax_sample *val)
+{
+ u32 dt = val->t - m->s[0].t;
+
+ if (unlikely(dt > win)) {
+ /*
+ * Passed entire window without a new val so make 2nd
+ * choice the new val & 3rd choice the new 2nd choice.
+ * we may have to iterate this since our 2nd choice
+ * may also be outside the window (we checked on entry
+ * that the third choice was in the window).
+ */
+ m->s[0] = m->s[1];
+ m->s[1] = m->s[2];
+ m->s[2] = *val;
+ if (unlikely(val->t - m->s[0].t > win)) {
+ m->s[0] = m->s[1];
+ m->s[1] = m->s[2];
+ m->s[2] = *val;
+ }
+ } else if (unlikely(m->s[1].t == m->s[0].t) && dt > win/4) {
+ /*
+ * We've passed a quarter of the window without a new val
+ * so take a 2nd choice from the 2nd quarter of the window.
+ */
+ m->s[2] = m->s[1] = *val;
+ } else if (unlikely(m->s[2].t == m->s[1].t) && dt > win/2) {
+ /*
+ * We've passed half the window without finding a new val
+ * so take a 3rd choice from the last half of the window
+ */
+ m->s[2] = *val;
+ }
+ return m->s[0].v;
+}
+
+/* Check if new measurement updates the 1st, 2nd or 3rd choice max. */
+u32 minmax_running_max(struct minmax *m, u32 win, u32 t, u32 meas)
+{
+ struct minmax_sample val = { .t = t, .v = meas };
+
+ if (unlikely(val.v >= m->s[0].v) || /* found new max? */
+ unlikely(val.t - m->s[2].t > win)) /* nothing left in window? */
+ return minmax_reset(m, t, meas); /* forget earlier samples */
+
+ if (unlikely(val.v >= m->s[1].v))
+ m->s[2] = m->s[1] = val;
+ else if (unlikely(val.v >= m->s[2].v))
+ m->s[2] = val;
+
+ return minmax_subwin_update(m, win, &val);
+}
+EXPORT_SYMBOL(minmax_running_max);
+
+/* Check if new measurement updates the 1st, 2nd or 3rd choice min. */
+u32 minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas)
+{
+ struct minmax_sample val = { .t = t, .v = meas };
+
+ if (unlikely(val.v <= m->s[0].v) || /* found new min? */
+ unlikely(val.t - m->s[2].t > win)) /* nothing left in window? */
+ return minmax_reset(m, t, meas); /* forget earlier samples */
+
+ if (unlikely(val.v <= m->s[1].v))
+ m->s[2] = m->s[1] = val;
+ else if (unlikely(val.v <= m->s[2].v))
+ m->s[2] = val;
+
+ return minmax_subwin_update(m, win, &val);
+}
diff --git a/mm/debug.c b/mm/debug.c
index 8865bfb41b0b..74c7cae4f683 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -42,9 +42,11 @@ const struct trace_print_flags vmaflag_names[] = {
void __dump_page(struct page *page, const char *reason)
{
+ int mapcount = PageSlab(page) ? 0 : page_mapcount(page);
+
pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx",
- page, page_ref_count(page), page_mapcount(page),
- page->mapping, page->index);
+ page, page_ref_count(page), mapcount,
+ page->mapping, page_to_pgoff(page));
if (PageCompound(page))
pr_cont(" compound_mapcount: %d", compound_mapcount(page));
pr_cont("\n");
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 2db2112aa31e..a6abd76baa72 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1078,7 +1078,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
goto out;
page = pmd_page(*pmd);
- VM_BUG_ON_PAGE(!PageHead(page), page);
+ VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
if (flags & FOLL_TOUCH)
touch_pmd(vma, addr, pmd);
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
@@ -1116,7 +1116,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
}
skip_mlock:
page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
- VM_BUG_ON_PAGE(!PageCompound(page), page);
+ VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
if (flags & FOLL_GET)
get_page(page);
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 79c52d0061af..728d7790dc2d 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -838,7 +838,8 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
* value (scan code).
*/
-static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address)
+static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
+ struct vm_area_struct **vmap)
{
struct vm_area_struct *vma;
unsigned long hstart, hend;
@@ -846,7 +847,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address)
if (unlikely(khugepaged_test_exit(mm)))
return SCAN_ANY_PROCESS;
- vma = find_vma(mm, address);
+ *vmap = vma = find_vma(mm, address);
if (!vma)
return SCAN_VMA_NULL;
@@ -881,6 +882,11 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
.pmd = pmd,
};
+ /* we only decide to swapin, if there is enough young ptes */
+ if (referenced < HPAGE_PMD_NR/2) {
+ trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
+ return false;
+ }
fe.pte = pte_offset_map(pmd, address);
for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE;
fe.pte++, fe.address += PAGE_SIZE) {
@@ -888,17 +894,12 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
if (!is_swap_pte(pteval))
continue;
swapped_in++;
- /* we only decide to swapin, if there is enough young ptes */
- if (referenced < HPAGE_PMD_NR/2) {
- trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
- return false;
- }
ret = do_swap_page(&fe, pteval);
/* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
if (ret & VM_FAULT_RETRY) {
down_read(&mm->mmap_sem);
- if (hugepage_vma_revalidate(mm, address)) {
+ if (hugepage_vma_revalidate(mm, address, &fe.vma)) {
/* vma is no longer available, don't continue to swapin */
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
return false;
@@ -923,7 +924,6 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
static void collapse_huge_page(struct mm_struct *mm,
unsigned long address,
struct page **hpage,
- struct vm_area_struct *vma,
int node, int referenced)
{
pmd_t *pmd, _pmd;
@@ -933,6 +933,7 @@ static void collapse_huge_page(struct mm_struct *mm,
spinlock_t *pmd_ptl, *pte_ptl;
int isolated = 0, result = 0;
struct mem_cgroup *memcg;
+ struct vm_area_struct *vma;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
gfp_t gfp;
@@ -961,7 +962,7 @@ static void collapse_huge_page(struct mm_struct *mm,
}
down_read(&mm->mmap_sem);
- result = hugepage_vma_revalidate(mm, address);
+ result = hugepage_vma_revalidate(mm, address, &vma);
if (result) {
mem_cgroup_cancel_charge(new_page, memcg, true);
up_read(&mm->mmap_sem);
@@ -994,7 +995,7 @@ static void collapse_huge_page(struct mm_struct *mm,
* handled by the anon_vma lock + PG_lock.
*/
down_write(&mm->mmap_sem);
- result = hugepage_vma_revalidate(mm, address);
+ result = hugepage_vma_revalidate(mm, address, &vma);
if (result)
goto out;
/* check if the pmd is still valid */
@@ -1202,7 +1203,7 @@ out_unmap:
if (ret) {
node = khugepaged_find_target_node();
/* collapse_huge_page will return with the mmap_sem released */
- collapse_huge_page(mm, address, hpage, vma, node, referenced);
+ collapse_huge_page(mm, address, hpage, node, referenced);
}
out:
trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9a6a51a7c416..4be518d4e68a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1740,17 +1740,22 @@ static DEFINE_MUTEX(percpu_charge_mutex);
static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
{
struct memcg_stock_pcp *stock;
+ unsigned long flags;
bool ret = false;
if (nr_pages > CHARGE_BATCH)
return ret;
- stock = &get_cpu_var(memcg_stock);
+ local_irq_save(flags);
+
+ stock = this_cpu_ptr(&memcg_stock);
if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
stock->nr_pages -= nr_pages;
ret = true;
}
- put_cpu_var(memcg_stock);
+
+ local_irq_restore(flags);
+
return ret;
}
@@ -1771,15 +1776,18 @@ static void drain_stock(struct memcg_stock_pcp *stock)
stock->cached = NULL;
}
-/*
- * This must be called under preempt disabled or must be called by
- * a thread which is pinned to local cpu.
- */
static void drain_local_stock(struct work_struct *dummy)
{
- struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
+ struct memcg_stock_pcp *stock;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ stock = this_cpu_ptr(&memcg_stock);
drain_stock(stock);
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
+
+ local_irq_restore(flags);
}
/*
@@ -1788,14 +1796,19 @@ static void drain_local_stock(struct work_struct *dummy)
*/
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
{
- struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
+ struct memcg_stock_pcp *stock;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ stock = this_cpu_ptr(&memcg_stock);
if (stock->cached != memcg) { /* reset if necessary */
drain_stock(stock);
stock->cached = memcg;
}
stock->nr_pages += nr_pages;
- put_cpu_var(memcg_stock);
+
+ local_irq_restore(flags);
}
/*
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 41266dc29f33..b58906b6215c 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1567,7 +1567,9 @@ static struct page *new_node_page(struct page *page, unsigned long private,
return alloc_huge_page_node(page_hstate(compound_head(page)),
next_node_in(nid, nmask));
- node_clear(nid, nmask);
+ if (nid != next_node_in(nid, nmask))
+ node_clear(nid, nmask);
+
if (PageHighMem(page)
|| (zone_idx(page_zone(page)) == ZONE_MOVABLE))
gfp_mask |= __GFP_HIGHMEM;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index d8c4e38fb5f4..2da72a5b6ecc 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2336,6 +2336,23 @@ out:
return ret;
}
+/*
+ * Drop the (possibly final) reference to task->mempolicy. It needs to be
+ * dropped after task->mempolicy is set to NULL so that any allocation done as
+ * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
+ * policy.
+ */
+void mpol_put_task_policy(struct task_struct *task)
+{
+ struct mempolicy *pol;
+
+ task_lock(task);
+ pol = task->mempolicy;
+ task->mempolicy = NULL;
+ task_unlock(task);
+ mpol_put(pol);
+}
+
static void sp_delete(struct shared_policy *sp, struct sp_node *n)
{
pr_debug("deleting %lx-l%lx\n", n->start, n->end);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3fbe73a6fe4b..a2214c64ed3c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3137,54 +3137,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
return NULL;
}
-static inline bool
-should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
- enum compact_result compact_result,
- enum compact_priority *compact_priority,
- int compaction_retries)
-{
- int max_retries = MAX_COMPACT_RETRIES;
-
- if (!order)
- return false;
-
- /*
- * compaction considers all the zone as desperately out of memory
- * so it doesn't really make much sense to retry except when the
- * failure could be caused by insufficient priority
- */
- if (compaction_failed(compact_result)) {
- if (*compact_priority > MIN_COMPACT_PRIORITY) {
- (*compact_priority)--;
- return true;
- }
- return false;
- }
-
- /*
- * make sure the compaction wasn't deferred or didn't bail out early
- * due to locks contention before we declare that we should give up.
- * But do not retry if the given zonelist is not suitable for
- * compaction.
- */
- if (compaction_withdrawn(compact_result))
- return compaction_zonelist_suitable(ac, order, alloc_flags);
-
- /*
- * !costly requests are much more important than __GFP_REPEAT
- * costly ones because they are de facto nofail and invoke OOM
- * killer to move on while costly can fail and users are ready
- * to cope with that. 1/4 retries is rather arbitrary but we
- * would need much more detailed feedback from compaction to
- * make a better decision.
- */
- if (order > PAGE_ALLOC_COSTLY_ORDER)
- max_retries /= 4;
- if (compaction_retries <= max_retries)
- return true;
-
- return false;
-}
#else
static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
@@ -3195,6 +3147,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
return NULL;
}
+#endif /* CONFIG_COMPACTION */
+
static inline bool
should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
enum compact_result compact_result,
@@ -3221,7 +3175,6 @@ should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_fla
}
return false;
}
-#endif /* CONFIG_COMPACTION */
/* Perform direct synchronous page reclaim */
static int
@@ -4407,7 +4360,7 @@ static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
do {
zone_type--;
zone = pgdat->node_zones + zone_type;
- if (populated_zone(zone)) {
+ if (managed_zone(zone)) {
zoneref_set_zone(zone,
&zonelist->_zonerefs[nr_zones++]);
check_highest_zone(zone_type);
@@ -4645,7 +4598,7 @@ static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
for (j = 0; j < nr_nodes; j++) {
node = node_order[j];
z = &NODE_DATA(node)->node_zones[zone_type];
- if (populated_zone(z)) {
+ if (managed_zone(z)) {
zoneref_set_zone(z,
&zonelist->_zonerefs[pos++]);
check_highest_zone(zone_type);
diff --git a/mm/page_io.c b/mm/page_io.c
index 16bd82fad38c..eafe5ddc2b54 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -264,6 +264,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
int ret;
struct swap_info_struct *sis = page_swap_info(page);
+ BUG_ON(!PageSwapCache(page));
if (sis->flags & SWP_FILE) {
struct kiocb kiocb;
struct file *swap_file = sis->swap_file;
@@ -337,6 +338,7 @@ int swap_readpage(struct page *page)
int ret = 0;
struct swap_info_struct *sis = page_swap_info(page);
+ BUG_ON(!PageSwapCache(page));
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageUptodate(page), page);
if (frontswap_load(page) == 0) {
@@ -386,6 +388,7 @@ int swap_set_page_dirty(struct page *page)
if (sis->flags & SWP_FILE) {
struct address_space *mapping = sis->swap_file->f_mapping;
+ BUG_ON(!PageSwapCache(page));
return mapping->a_ops->set_page_dirty(page);
} else {
return __set_page_dirty_no_writeback(page);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 78cfa292a29a..2657accc6e2b 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2724,7 +2724,6 @@ int swapcache_prepare(swp_entry_t entry)
struct swap_info_struct *page_swap_info(struct page *page)
{
swp_entry_t swap = { .val = page_private(page) };
- BUG_ON(!PageSwapCache(page));
return swap_info[swp_type(swap)];
}
diff --git a/mm/usercopy.c b/mm/usercopy.c
index a3cc3052f830..3c8da0af9695 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -134,31 +134,16 @@ static inline const char *check_bogus_address(const void *ptr, unsigned long n)
return NULL;
}
-static inline const char *check_heap_object(const void *ptr, unsigned long n,
- bool to_user)
+/* Checks for allocs that are marked in some way as spanning multiple pages. */
+static inline const char *check_page_span(const void *ptr, unsigned long n,
+ struct page *page, bool to_user)
{
- struct page *page, *endpage;
+#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
const void *end = ptr + n - 1;
+ struct page *endpage;
bool is_reserved, is_cma;
/*
- * Some architectures (arm64) return true for virt_addr_valid() on
- * vmalloced addresses. Work around this by checking for vmalloc
- * first.
- */
- if (is_vmalloc_addr(ptr))
- return NULL;
-
- if (!virt_addr_valid(ptr))
- return NULL;
-
- page = virt_to_head_page(ptr);
-
- /* Check slab allocator for flags and size. */
- if (PageSlab(page))
- return __check_heap_object(ptr, n, page);
-
- /*
* Sometimes the kernel data regions are not marked Reserved (see
* check below). And sometimes [_sdata,_edata) does not cover
* rodata and/or bss, so check each range explicitly.
@@ -186,7 +171,7 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
((unsigned long)end & (unsigned long)PAGE_MASK)))
return NULL;
- /* Allow if start and end are inside the same compound page. */
+ /* Allow if fully inside the same compound (__GFP_COMP) page. */
endpage = virt_to_head_page(end);
if (likely(endpage == page))
return NULL;
@@ -199,20 +184,47 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
is_reserved = PageReserved(page);
is_cma = is_migrate_cma_page(page);
if (!is_reserved && !is_cma)
- goto reject;
+ return "<spans multiple pages>";
for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
page = virt_to_head_page(ptr);
if (is_reserved && !PageReserved(page))
- goto reject;
+ return "<spans Reserved and non-Reserved pages>";
if (is_cma && !is_migrate_cma_page(page))
- goto reject;
+ return "<spans CMA and non-CMA pages>";
}
+#endif
return NULL;
+}
+
+static inline const char *check_heap_object(const void *ptr, unsigned long n,
+ bool to_user)
+{
+ struct page *page;
+
+ /*
+ * Some architectures (arm64) return true for virt_addr_valid() on
+ * vmalloced addresses. Work around this by checking for vmalloc
+ * first.
+ *
+ * We also need to check for module addresses explicitly since we
+ * may copy static data from modules to userspace
+ */
+ if (is_vmalloc_or_module_addr(ptr))
+ return NULL;
+
+ if (!virt_addr_valid(ptr))
+ return NULL;
+
+ page = virt_to_head_page(ptr);
+
+ /* Check slab allocator for flags and size. */
+ if (PageSlab(page))
+ return __check_heap_object(ptr, n, page);
-reject:
- return "<spans multiple pages>";
+ /* Verify object does not incorrectly span multiple pages. */
+ return check_page_span(ptr, n, page, to_user);
}
/*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 374d95d04178..b1e12a1ea9cf 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1665,7 +1665,7 @@ static bool inactive_reclaimable_pages(struct lruvec *lruvec,
for (zid = sc->reclaim_idx; zid >= 0; zid--) {
zone = &pgdat->node_zones[zid];
- if (!populated_zone(zone))
+ if (!managed_zone(zone))
continue;
if (zone_page_state_snapshot(zone, NR_ZONE_LRU_BASE +
@@ -2036,7 +2036,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
struct zone *zone = &pgdat->node_zones[zid];
unsigned long inactive_zone, active_zone;
- if (!populated_zone(zone))
+ if (!managed_zone(zone))
continue;
inactive_zone = zone_page_state(zone,
@@ -2171,7 +2171,7 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
for (z = 0; z < MAX_NR_ZONES; z++) {
struct zone *zone = &pgdat->node_zones[z];
- if (!populated_zone(zone))
+ if (!managed_zone(zone))
continue;
total_high_wmark += high_wmark_pages(zone);
@@ -2510,7 +2510,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
/* If compaction would go ahead or the allocation would succeed, stop */
for (z = 0; z <= sc->reclaim_idx; z++) {
struct zone *zone = &pgdat->node_zones[z];
- if (!populated_zone(zone))
+ if (!managed_zone(zone))
continue;
switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
@@ -2840,7 +2840,7 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
for (i = 0; i <= ZONE_NORMAL; i++) {
zone = &pgdat->node_zones[i];
- if (!populated_zone(zone) ||
+ if (!managed_zone(zone) ||
pgdat_reclaimable_pages(pgdat) == 0)
continue;
@@ -3141,7 +3141,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)
for (i = 0; i <= classzone_idx; i++) {
struct zone *zone = pgdat->node_zones + i;
- if (!populated_zone(zone))
+ if (!managed_zone(zone))
continue;
if (!zone_balanced(zone, order, classzone_idx))
@@ -3169,7 +3169,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
sc->nr_to_reclaim = 0;
for (z = 0; z <= sc->reclaim_idx; z++) {
zone = pgdat->node_zones + z;
- if (!populated_zone(zone))
+ if (!managed_zone(zone))
continue;
sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
@@ -3242,7 +3242,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
if (buffer_heads_over_limit) {
for (i = MAX_NR_ZONES - 1; i >= 0; i--) {
zone = pgdat->node_zones + i;
- if (!populated_zone(zone))
+ if (!managed_zone(zone))
continue;
sc.reclaim_idx = i;
@@ -3262,7 +3262,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
*/
for (i = classzone_idx; i >= 0; i--) {
zone = pgdat->node_zones + i;
- if (!populated_zone(zone))
+ if (!managed_zone(zone))
continue;
if (zone_balanced(zone, sc.order, classzone_idx))
@@ -3508,7 +3508,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
pg_data_t *pgdat;
int z;
- if (!populated_zone(zone))
+ if (!managed_zone(zone))
return;
if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL))
@@ -3522,7 +3522,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
/* Only wake kswapd if all zones are unbalanced */
for (z = 0; z <= classzone_idx; z++) {
zone = pgdat->node_zones + z;
- if (!populated_zone(zone))
+ if (!managed_zone(zone))
continue;
if (zone_balanced(zone, order, classzone_idx))
diff --git a/net/6lowpan/ndisc.c b/net/6lowpan/ndisc.c
index 86450b7e2899..941df2fa4448 100644
--- a/net/6lowpan/ndisc.c
+++ b/net/6lowpan/ndisc.c
@@ -101,8 +101,6 @@ static void lowpan_ndisc_802154_update(struct neighbour *n, u32 flags,
ieee802154_be16_to_le16(&neigh->short_addr, lladdr_short);
if (!lowpan_802154_is_valid_src_short_addr(neigh->short_addr))
neigh->short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC);
- } else {
- neigh->short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC);
}
write_unlock_bh(&n->lock);
}
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index f066781be3c8..10d2bdce686e 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1278,7 +1278,7 @@ out:
return err;
}
-#if defined(CONFIG_IPDDP) || defined(CONFIG_IPDDP_MODULE)
+#if IS_ENABLED(CONFIG_IPDDP)
static __inline__ int is_ip_over_ddp(struct sk_buff *skb)
{
return skb->data[12] == 22;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index e574a7e9db6f..5d2693826afb 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -31,7 +31,7 @@
#include <linux/atmlec.h>
/* Proxy LEC knows about bridging */
-#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+#if IS_ENABLED(CONFIG_BRIDGE)
#include "../bridge/br_private.h"
static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 };
@@ -121,7 +121,7 @@ static unsigned char bus_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
/* Device structures */
static struct net_device *dev_lec[MAX_LEC_ITF];
-#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+#if IS_ENABLED(CONFIG_BRIDGE)
static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
{
char *buff;
@@ -155,7 +155,7 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
sk->sk_data_ready(sk);
}
}
-#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
+#endif /* IS_ENABLED(CONFIG_BRIDGE) */
/*
* Open/initialize the netdevice. This is called (in the current kernel)
@@ -222,7 +222,7 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
pr_debug("skbuff head:%lx data:%lx tail:%lx end:%lx\n",
(long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb),
(long)skb_end_pointer(skb));
-#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+#if IS_ENABLED(CONFIG_BRIDGE)
if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0)
lec_handle_bridge(skb, dev);
#endif
@@ -426,7 +426,7 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
(unsigned short)(0xffff & mesg->content.normal.flag);
break;
case l_should_bridge:
-#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+#if IS_ENABLED(CONFIG_BRIDGE)
{
pr_debug("%s: bridge zeppelin asks about %pM\n",
dev->name, mesg->content.proxy.mac_addr);
@@ -452,7 +452,7 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
sk->sk_data_ready(sk);
}
}
-#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
+#endif /* IS_ENABLED(CONFIG_BRIDGE) */
break;
default:
pr_info("%s: Unknown message type %d\n", dev->name, mesg->type);
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index 7d170010beb9..ee08540ce503 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -335,7 +335,7 @@ int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface)
goto out;
skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN);
- elp_buff = skb_push(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN);
+ elp_buff = skb_put(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN);
elp_packet = (struct batadv_elp_packet *)elp_buff;
memset(elp_packet, 0, BATADV_ELP_HLEN);
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 610f2c45edcd..7e8dc648b95a 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -461,6 +461,29 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
}
/**
+ * batadv_last_bonding_get - Get last_bonding_candidate of orig_node
+ * @orig_node: originator node whose last bonding candidate should be retrieved
+ *
+ * Return: last bonding candidate of router or NULL if not found
+ *
+ * The object is returned with refcounter increased by 1.
+ */
+static struct batadv_orig_ifinfo *
+batadv_last_bonding_get(struct batadv_orig_node *orig_node)
+{
+ struct batadv_orig_ifinfo *last_bonding_candidate;
+
+ spin_lock_bh(&orig_node->neigh_list_lock);
+ last_bonding_candidate = orig_node->last_bonding_candidate;
+
+ if (last_bonding_candidate)
+ kref_get(&last_bonding_candidate->refcount);
+ spin_unlock_bh(&orig_node->neigh_list_lock);
+
+ return last_bonding_candidate;
+}
+
+/**
* batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node
* @orig_node: originator node whose bonding candidates should be replaced
* @new_candidate: new bonding candidate or NULL
@@ -530,7 +553,7 @@ batadv_find_router(struct batadv_priv *bat_priv,
* router - obviously there are no other candidates.
*/
rcu_read_lock();
- last_candidate = orig_node->last_bonding_candidate;
+ last_candidate = batadv_last_bonding_get(orig_node);
if (last_candidate)
last_cand_router = rcu_dereference(last_candidate->router);
@@ -622,6 +645,9 @@ next:
batadv_orig_ifinfo_put(next_candidate);
}
+ if (last_candidate)
+ batadv_orig_ifinfo_put(last_candidate);
+
return router;
}
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 0b5f729d08d2..1aff2da9bc74 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -26,11 +26,13 @@
#include <linux/module.h>
#include <linux/debugfs.h>
+#include <linux/stringify.h>
#include <asm/ioctls.h>
#include <net/bluetooth/bluetooth.h>
#include <linux/proc_fs.h>
+#include "leds.h"
#include "selftest.h"
/* Bluetooth sockets */
@@ -712,13 +714,16 @@ static struct net_proto_family bt_sock_family_ops = {
struct dentry *bt_debugfs;
EXPORT_SYMBOL_GPL(bt_debugfs);
+#define VERSION __stringify(BT_SUBSYS_VERSION) "." \
+ __stringify(BT_SUBSYS_REVISION)
+
static int __init bt_init(void)
{
int err;
sock_skb_cb_check_size(sizeof(struct bt_skb_cb));
- BT_INFO("Core ver %s", BT_SUBSYS_VERSION);
+ BT_INFO("Core ver %s", VERSION);
err = bt_selftest();
if (err < 0)
@@ -726,6 +731,8 @@ static int __init bt_init(void)
bt_debugfs = debugfs_create_dir("bluetooth", NULL);
+ bt_leds_init();
+
err = bt_sysfs_init();
if (err < 0)
return err;
@@ -785,6 +792,8 @@ static void __exit bt_exit(void)
bt_sysfs_cleanup();
+ bt_leds_cleanup();
+
debugfs_remove_recursive(bt_debugfs);
}
@@ -792,7 +801,7 @@ subsys_initcall(bt_init);
module_exit(bt_exit);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
-MODULE_DESCRIPTION("Bluetooth Core ver " BT_SUBSYS_VERSION);
-MODULE_VERSION(BT_SUBSYS_VERSION);
+MODULE_DESCRIPTION("Bluetooth Core ver " VERSION);
+MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index ddf8432fe8fb..3ac89e9ace71 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1562,6 +1562,7 @@ int hci_dev_do_close(struct hci_dev *hdev)
auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
+ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
hci_dev_test_flag(hdev, HCI_MGMT))
__mgmt_power_off(hdev);
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index b0e23dfc5c34..c8135680c43e 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -971,14 +971,14 @@ void __hci_req_enable_advertising(struct hci_request *req)
hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}
-static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
+static u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
{
- u8 ad_len = 0;
size_t name_len;
+ int max_len;
+ max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
name_len = strlen(hdev->dev_name);
- if (name_len > 0) {
- size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
+ if (name_len > 0 && max_len > 0) {
if (name_len > max_len) {
name_len = max_len;
@@ -997,22 +997,42 @@ static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
return ad_len;
}
+static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
+{
+ return append_local_name(hdev, ptr, 0);
+}
+
static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
u8 *ptr)
{
struct adv_info *adv_instance;
+ u32 instance_flags;
+ u8 scan_rsp_len = 0;
adv_instance = hci_find_adv_instance(hdev, instance);
if (!adv_instance)
return 0;
- /* TODO: Set the appropriate entries based on advertising instance flags
- * here once flags other than 0 are supported.
- */
+ instance_flags = adv_instance->flags;
+
+ if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
+ ptr[0] = 3;
+ ptr[1] = EIR_APPEARANCE;
+ put_unaligned_le16(hdev->appearance, ptr + 2);
+ scan_rsp_len += 4;
+ ptr += 4;
+ }
+
memcpy(ptr, adv_instance->scan_rsp_data,
adv_instance->scan_rsp_len);
- return adv_instance->scan_rsp_len;
+ scan_rsp_len += adv_instance->scan_rsp_len;
+ ptr += adv_instance->scan_rsp_len;
+
+ if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
+ scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
+
+ return scan_rsp_len;
}
void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
@@ -1194,7 +1214,7 @@ static void adv_timeout_expire(struct work_struct *work)
hci_req_init(&req, hdev);
- hci_req_clear_adv_instance(hdev, &req, instance, false);
+ hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
if (list_empty(&hdev->adv_instances))
__hci_req_disable_advertising(&req);
@@ -1284,8 +1304,9 @@ static void cancel_adv_timeout(struct hci_dev *hdev)
* setting.
* - force == false: Only instances that have a timeout will be removed.
*/
-void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
- u8 instance, bool force)
+void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
+ struct hci_request *req, u8 instance,
+ bool force)
{
struct adv_info *adv_instance, *n, *next_instance = NULL;
int err;
@@ -1311,7 +1332,7 @@ void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
rem_inst = adv_instance->instance;
err = hci_remove_adv_instance(hdev, rem_inst);
if (!err)
- mgmt_advertising_removed(NULL, hdev, rem_inst);
+ mgmt_advertising_removed(sk, hdev, rem_inst);
}
} else {
adv_instance = hci_find_adv_instance(hdev, instance);
@@ -1325,7 +1346,7 @@ void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
err = hci_remove_adv_instance(hdev, instance);
if (!err)
- mgmt_advertising_removed(NULL, hdev, instance);
+ mgmt_advertising_removed(sk, hdev, instance);
}
}
@@ -1716,7 +1737,7 @@ void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
* function. To be safe hard-code one of the
* values that's suitable for SCO.
*/
- rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
+ rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
sizeof(rej), &rej);
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
index b2d044bdc732..ac1e11006f38 100644
--- a/net/bluetooth/hci_request.h
+++ b/net/bluetooth/hci_request.h
@@ -73,8 +73,9 @@ void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance);
int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
bool force);
-void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
- u8 instance, bool force);
+void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
+ struct hci_request *req, u8 instance,
+ bool force);
void __hci_req_update_class(struct hci_request *req);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 96f04b7b9556..48f9471e7c85 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -26,6 +26,7 @@
#include <linux/export.h>
#include <linux/utsname.h>
+#include <linux/sched.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -38,6 +39,8 @@
static LIST_HEAD(mgmt_chan_list);
static DEFINE_MUTEX(mgmt_chan_list_lock);
+static DEFINE_IDA(sock_cookie_ida);
+
static atomic_t monitor_promisc = ATOMIC_INIT(0);
/* ----- HCI socket interface ----- */
@@ -52,6 +55,8 @@ struct hci_pinfo {
__u32 cmsg_mask;
unsigned short channel;
unsigned long flags;
+ __u32 cookie;
+ char comm[TASK_COMM_LEN];
};
void hci_sock_set_flag(struct sock *sk, int nr)
@@ -74,6 +79,38 @@ unsigned short hci_sock_get_channel(struct sock *sk)
return hci_pi(sk)->channel;
}
+u32 hci_sock_get_cookie(struct sock *sk)
+{
+ return hci_pi(sk)->cookie;
+}
+
+static bool hci_sock_gen_cookie(struct sock *sk)
+{
+ int id = hci_pi(sk)->cookie;
+
+ if (!id) {
+ id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
+ if (id < 0)
+ id = 0xffffffff;
+
+ hci_pi(sk)->cookie = id;
+ get_task_comm(hci_pi(sk)->comm, current);
+ return true;
+ }
+
+ return false;
+}
+
+static void hci_sock_free_cookie(struct sock *sk)
+{
+ int id = hci_pi(sk)->cookie;
+
+ if (id) {
+ hci_pi(sk)->cookie = 0xffffffff;
+ ida_simple_remove(&sock_cookie_ida, id);
+ }
+}
+
static inline int hci_test_bit(int nr, const void *addr)
{
return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
@@ -305,6 +342,60 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
kfree_skb(skb_copy);
}
+void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
+ void *data, u16 data_len, ktime_t tstamp,
+ int flag, struct sock *skip_sk)
+{
+ struct sock *sk;
+ __le16 index;
+
+ if (hdev)
+ index = cpu_to_le16(hdev->id);
+ else
+ index = cpu_to_le16(MGMT_INDEX_NONE);
+
+ read_lock(&hci_sk_list.lock);
+
+ sk_for_each(sk, &hci_sk_list.head) {
+ struct hci_mon_hdr *hdr;
+ struct sk_buff *skb;
+
+ if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
+ continue;
+
+ /* Ignore socket without the flag set */
+ if (!hci_sock_test_flag(sk, flag))
+ continue;
+
+ /* Skip the original socket */
+ if (sk == skip_sk)
+ continue;
+
+ skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
+ if (!skb)
+ continue;
+
+ put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
+ put_unaligned_le16(event, skb_put(skb, 2));
+
+ if (data)
+ memcpy(skb_put(skb, data_len), data, data_len);
+
+ skb->tstamp = tstamp;
+
+ hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
+ hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
+ hdr->index = index;
+ hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
+
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(skb);
+ }
+
+ read_unlock(&hci_sk_list.lock);
+}
+
static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
{
struct hci_mon_hdr *hdr;
@@ -384,6 +475,129 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
return skb;
}
+static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
+{
+ struct hci_mon_hdr *hdr;
+ struct sk_buff *skb;
+ u16 format;
+ u8 ver[3];
+ u32 flags;
+
+ /* No message needed when cookie is not present */
+ if (!hci_pi(sk)->cookie)
+ return NULL;
+
+ switch (hci_pi(sk)->channel) {
+ case HCI_CHANNEL_RAW:
+ format = 0x0000;
+ ver[0] = BT_SUBSYS_VERSION;
+ put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
+ break;
+ case HCI_CHANNEL_USER:
+ format = 0x0001;
+ ver[0] = BT_SUBSYS_VERSION;
+ put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
+ break;
+ case HCI_CHANNEL_CONTROL:
+ format = 0x0002;
+ mgmt_fill_version_info(ver);
+ break;
+ default:
+ /* No message for unsupported format */
+ return NULL;
+ }
+
+ skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
+
+ put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
+ put_unaligned_le16(format, skb_put(skb, 2));
+ memcpy(skb_put(skb, sizeof(ver)), ver, sizeof(ver));
+ put_unaligned_le32(flags, skb_put(skb, 4));
+ *skb_put(skb, 1) = TASK_COMM_LEN;
+ memcpy(skb_put(skb, TASK_COMM_LEN), hci_pi(sk)->comm, TASK_COMM_LEN);
+
+ __net_timestamp(skb);
+
+ hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
+ hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
+ if (hci_pi(sk)->hdev)
+ hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
+ else
+ hdr->index = cpu_to_le16(HCI_DEV_NONE);
+ hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
+
+ return skb;
+}
+
+static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
+{
+ struct hci_mon_hdr *hdr;
+ struct sk_buff *skb;
+
+ /* No message needed when cookie is not present */
+ if (!hci_pi(sk)->cookie)
+ return NULL;
+
+ switch (hci_pi(sk)->channel) {
+ case HCI_CHANNEL_RAW:
+ case HCI_CHANNEL_USER:
+ case HCI_CHANNEL_CONTROL:
+ break;
+ default:
+ /* No message for unsupported format */
+ return NULL;
+ }
+
+ skb = bt_skb_alloc(4, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
+
+ __net_timestamp(skb);
+
+ hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
+ hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
+ if (hci_pi(sk)->hdev)
+ hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
+ else
+ hdr->index = cpu_to_le16(HCI_DEV_NONE);
+ hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
+
+ return skb;
+}
+
+static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
+ u16 opcode, u16 len,
+ const void *buf)
+{
+ struct hci_mon_hdr *hdr;
+ struct sk_buff *skb;
+
+ skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
+ put_unaligned_le16(opcode, skb_put(skb, 2));
+
+ if (buf)
+ memcpy(skb_put(skb, len), buf, len);
+
+ __net_timestamp(skb);
+
+ hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
+ hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
+ hdr->index = cpu_to_le16(index);
+ hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
+
+ return skb;
+}
+
static void __printf(2, 3)
send_monitor_note(struct sock *sk, const char *fmt, ...)
{
@@ -458,6 +672,26 @@ static void send_monitor_replay(struct sock *sk)
read_unlock(&hci_dev_list_lock);
}
+static void send_monitor_control_replay(struct sock *mon_sk)
+{
+ struct sock *sk;
+
+ read_lock(&hci_sk_list.lock);
+
+ sk_for_each(sk, &hci_sk_list.head) {
+ struct sk_buff *skb;
+
+ skb = create_monitor_ctrl_open(sk);
+ if (!skb)
+ continue;
+
+ if (sock_queue_rcv_skb(mon_sk, skb))
+ kfree_skb(skb);
+ }
+
+ read_unlock(&hci_sk_list.lock);
+}
+
/* Generate internal stack event */
static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
{
@@ -585,6 +819,7 @@ static int hci_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct hci_dev *hdev;
+ struct sk_buff *skb;
BT_DBG("sock %p sk %p", sock, sk);
@@ -593,8 +828,24 @@ static int hci_sock_release(struct socket *sock)
hdev = hci_pi(sk)->hdev;
- if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
+ switch (hci_pi(sk)->channel) {
+ case HCI_CHANNEL_MONITOR:
atomic_dec(&monitor_promisc);
+ break;
+ case HCI_CHANNEL_RAW:
+ case HCI_CHANNEL_USER:
+ case HCI_CHANNEL_CONTROL:
+ /* Send event to monitor */
+ skb = create_monitor_ctrl_close(sk);
+ if (skb) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(skb);
+ }
+
+ hci_sock_free_cookie(sk);
+ break;
+ }
bt_sock_unlink(&hci_sk_list, sk);
@@ -721,6 +972,27 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
goto done;
}
+ /* When calling an ioctl on an unbound raw socket, then ensure
+ * that the monitor gets informed. Ensure that the resulting event
+ * is only send once by checking if the cookie exists or not. The
+ * socket cookie will be only ever generated once for the lifetime
+ * of a given socket.
+ */
+ if (hci_sock_gen_cookie(sk)) {
+ struct sk_buff *skb;
+
+ if (capable(CAP_NET_ADMIN))
+ hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
+
+ /* Send event to monitor */
+ skb = create_monitor_ctrl_open(sk);
+ if (skb) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(skb);
+ }
+ }
+
release_sock(sk);
switch (cmd) {
@@ -784,6 +1056,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
struct sockaddr_hci haddr;
struct sock *sk = sock->sk;
struct hci_dev *hdev = NULL;
+ struct sk_buff *skb;
int len, err = 0;
BT_DBG("sock %p sk %p", sock, sk);
@@ -822,7 +1095,35 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
atomic_inc(&hdev->promisc);
}
+ hci_pi(sk)->channel = haddr.hci_channel;
+
+ if (!hci_sock_gen_cookie(sk)) {
+ /* In the case when a cookie has already been assigned,
+ * then there has been already an ioctl issued against
+ * an unbound socket and with that triggerd an open
+ * notification. Send a close notification first to
+ * allow the state transition to bounded.
+ */
+ skb = create_monitor_ctrl_close(sk);
+ if (skb) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(skb);
+ }
+ }
+
+ if (capable(CAP_NET_ADMIN))
+ hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
+
hci_pi(sk)->hdev = hdev;
+
+ /* Send event to monitor */
+ skb = create_monitor_ctrl_open(sk);
+ if (skb) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(skb);
+ }
break;
case HCI_CHANNEL_USER:
@@ -884,9 +1185,38 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
}
}
- atomic_inc(&hdev->promisc);
+ hci_pi(sk)->channel = haddr.hci_channel;
+
+ if (!hci_sock_gen_cookie(sk)) {
+ /* In the case when a cookie has already been assigned,
+ * this socket will transition from a raw socket into
+ * an user channel socket. For a clean transition, send
+ * the close notification first.
+ */
+ skb = create_monitor_ctrl_close(sk);
+ if (skb) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(skb);
+ }
+ }
+
+ /* The user channel is restricted to CAP_NET_ADMIN
+ * capabilities and with that implicitly trusted.
+ */
+ hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
hci_pi(sk)->hdev = hdev;
+
+ /* Send event to monitor */
+ skb = create_monitor_ctrl_open(sk);
+ if (skb) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(skb);
+ }
+
+ atomic_inc(&hdev->promisc);
break;
case HCI_CHANNEL_MONITOR:
@@ -900,6 +1230,8 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
goto done;
}
+ hci_pi(sk)->channel = haddr.hci_channel;
+
/* The monitor interface is restricted to CAP_NET_RAW
* capabilities and with that implicitly trusted.
*/
@@ -908,9 +1240,10 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
send_monitor_note(sk, "Linux version %s (%s)",
init_utsname()->release,
init_utsname()->machine);
- send_monitor_note(sk, "Bluetooth subsystem version %s",
- BT_SUBSYS_VERSION);
+ send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
+ BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
send_monitor_replay(sk);
+ send_monitor_control_replay(sk);
atomic_inc(&monitor_promisc);
break;
@@ -925,6 +1258,8 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
err = -EPERM;
goto done;
}
+
+ hci_pi(sk)->channel = haddr.hci_channel;
break;
default:
@@ -946,6 +1281,8 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
if (capable(CAP_NET_ADMIN))
hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
+ hci_pi(sk)->channel = haddr.hci_channel;
+
/* At the moment the index and unconfigured index events
* are enabled unconditionally. Setting them on each
* socket when binding keeps this functionality. They
@@ -956,16 +1293,40 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
* received by untrusted users. Example for such events
* are changes to settings, class of device, name etc.
*/
- if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
+ if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
+ if (!hci_sock_gen_cookie(sk)) {
+ /* In the case when a cookie has already been
+ * assigned, this socket will transtion from
+ * a raw socket into a control socket. To
+ * allow for a clean transtion, send the
+ * close notification first.
+ */
+ skb = create_monitor_ctrl_close(sk);
+ if (skb) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(skb);
+ }
+ }
+
+ /* Send event to monitor */
+ skb = create_monitor_ctrl_open(sk);
+ if (skb) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(skb);
+ }
+
hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
- hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
+ hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
+ hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
+ hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
+ hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
}
break;
}
-
- hci_pi(sk)->channel = haddr.hci_channel;
sk->sk_state = BT_BOUND;
done:
@@ -1133,6 +1494,19 @@ static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
goto done;
}
+ if (chan->channel == HCI_CHANNEL_CONTROL) {
+ struct sk_buff *skb;
+
+ /* Send event to monitor */
+ skb = create_monitor_ctrl_command(sk, index, opcode, len,
+ buf + sizeof(*hdr));
+ if (skb) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(skb);
+ }
+ }
+
if (opcode >= chan->handler_count ||
chan->handlers[opcode].func == NULL) {
BT_DBG("Unknown op %u", opcode);
@@ -1440,6 +1814,9 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
BT_DBG("sk %p, opt %d", sk, optname);
+ if (level != SOL_HCI)
+ return -ENOPROTOOPT;
+
lock_sock(sk);
if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
@@ -1523,6 +1900,9 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
BT_DBG("sk %p, opt %d", sk, optname);
+ if (level != SOL_HCI)
+ return -ENOPROTOOPT;
+
if (get_user(len, optlen))
return -EFAULT;
diff --git a/net/bluetooth/leds.c b/net/bluetooth/leds.c
index 8319c8440c89..cb670b5594eb 100644
--- a/net/bluetooth/leds.c
+++ b/net/bluetooth/leds.c
@@ -11,6 +11,8 @@
#include "leds.h"
+DEFINE_LED_TRIGGER(bt_power_led_trigger);
+
struct hci_basic_led_trigger {
struct led_trigger led_trigger;
struct hci_dev *hdev;
@@ -24,6 +26,21 @@ void hci_leds_update_powered(struct hci_dev *hdev, bool enabled)
if (hdev->power_led)
led_trigger_event(hdev->power_led,
enabled ? LED_FULL : LED_OFF);
+
+ if (!enabled) {
+ struct hci_dev *d;
+
+ read_lock(&hci_dev_list_lock);
+
+ list_for_each_entry(d, &hci_dev_list, list) {
+ if (test_bit(HCI_UP, &d->flags))
+ enabled = true;
+ }
+
+ read_unlock(&hci_dev_list_lock);
+ }
+
+ led_trigger_event(bt_power_led_trigger, enabled ? LED_FULL : LED_OFF);
}
static void power_activate(struct led_classdev *led_cdev)
@@ -72,3 +89,13 @@ void hci_leds_init(struct hci_dev *hdev)
/* initialize power_led */
hdev->power_led = led_allocate_basic(hdev, power_activate, "power");
}
+
+void bt_leds_init(void)
+{
+ led_trigger_register_simple("bluetooth-power", &bt_power_led_trigger);
+}
+
+void bt_leds_cleanup(void)
+{
+ led_trigger_unregister_simple(bt_power_led_trigger);
+}
diff --git a/net/bluetooth/leds.h b/net/bluetooth/leds.h
index a9c4d6ea01cf..08725a2fbd9b 100644
--- a/net/bluetooth/leds.h
+++ b/net/bluetooth/leds.h
@@ -7,10 +7,20 @@
*/
#if IS_ENABLED(CONFIG_BT_LEDS)
+
void hci_leds_update_powered(struct hci_dev *hdev, bool enabled);
void hci_leds_init(struct hci_dev *hdev);
+
+void bt_leds_init(void);
+void bt_leds_cleanup(void);
+
#else
+
static inline void hci_leds_update_powered(struct hci_dev *hdev,
bool enabled) {}
static inline void hci_leds_init(struct hci_dev *hdev) {}
+
+static inline void bt_leds_init(void) {}
+static inline void bt_leds_cleanup(void) {}
+
#endif
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 7639290b6de3..7b2bac492fb1 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -38,7 +38,7 @@
#include "mgmt_util.h"
#define MGMT_VERSION 1
-#define MGMT_REVISION 13
+#define MGMT_REVISION 14
static const u16 mgmt_commands[] = {
MGMT_OP_READ_INDEX_LIST,
@@ -104,6 +104,8 @@ static const u16 mgmt_commands[] = {
MGMT_OP_REMOVE_ADVERTISING,
MGMT_OP_GET_ADV_SIZE_INFO,
MGMT_OP_START_LIMITED_DISCOVERY,
+ MGMT_OP_READ_EXT_INFO,
+ MGMT_OP_SET_APPEARANCE,
};
static const u16 mgmt_events[] = {
@@ -141,6 +143,7 @@ static const u16 mgmt_events[] = {
MGMT_EV_LOCAL_OOB_DATA_UPDATED,
MGMT_EV_ADVERTISING_ADDED,
MGMT_EV_ADVERTISING_REMOVED,
+ MGMT_EV_EXT_INFO_CHANGED,
};
static const u16 mgmt_untrusted_commands[] = {
@@ -149,6 +152,7 @@ static const u16 mgmt_untrusted_commands[] = {
MGMT_OP_READ_UNCONF_INDEX_LIST,
MGMT_OP_READ_CONFIG_INFO,
MGMT_OP_READ_EXT_INDEX_LIST,
+ MGMT_OP_READ_EXT_INFO,
};
static const u16 mgmt_untrusted_events[] = {
@@ -162,6 +166,7 @@ static const u16 mgmt_untrusted_events[] = {
MGMT_EV_NEW_CONFIG_OPTIONS,
MGMT_EV_EXT_INDEX_ADDED,
MGMT_EV_EXT_INDEX_REMOVED,
+ MGMT_EV_EXT_INFO_CHANGED,
};
#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
@@ -256,13 +261,6 @@ static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
flag, skip_sk);
}
-static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
- u16 len, struct sock *skip_sk)
-{
- return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
- HCI_MGMT_GENERIC_EVENTS, skip_sk);
-}
-
static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
struct sock *skip_sk)
{
@@ -278,6 +276,14 @@ static u8 le_addr_type(u8 mgmt_addr_type)
return ADDR_LE_DEV_RANDOM;
}
+void mgmt_fill_version_info(void *ver)
+{
+ struct mgmt_rp_read_version *rp = ver;
+
+ rp->version = MGMT_VERSION;
+ rp->revision = cpu_to_le16(MGMT_REVISION);
+}
+
static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
@@ -285,8 +291,7 @@ static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("sock %p", sk);
- rp.version = MGMT_VERSION;
- rp.revision = cpu_to_le16(MGMT_REVISION);
+ mgmt_fill_version_info(&rp);
return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
&rp, sizeof(rp));
@@ -572,8 +577,8 @@ static int new_options(struct hci_dev *hdev, struct sock *skip)
{
__le32 options = get_missing_options(hdev);
- return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
- sizeof(options), skip);
+ return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
+ sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
}
static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
@@ -862,6 +867,107 @@ static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
sizeof(rp));
}
+static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
+ u8 data_len)
+{
+ eir[eir_len++] = sizeof(type) + data_len;
+ eir[eir_len++] = type;
+ memcpy(&eir[eir_len], data, data_len);
+ eir_len += data_len;
+
+ return eir_len;
+}
+
+static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data)
+{
+ eir[eir_len++] = sizeof(type) + sizeof(data);
+ eir[eir_len++] = type;
+ put_unaligned_le16(data, &eir[eir_len]);
+ eir_len += sizeof(data);
+
+ return eir_len;
+}
+
+static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
+{
+ u16 eir_len = 0;
+ size_t name_len;
+
+ if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+ eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
+ hdev->dev_class, 3);
+
+ if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
+ eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
+ hdev->appearance);
+
+ name_len = strlen(hdev->dev_name);
+ eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
+ hdev->dev_name, name_len);
+
+ name_len = strlen(hdev->short_name);
+ eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
+ hdev->short_name, name_len);
+
+ return eir_len;
+}
+
+static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 data_len)
+{
+ char buf[512];
+ struct mgmt_rp_read_ext_info *rp = (void *)buf;
+ u16 eir_len;
+
+ BT_DBG("sock %p %s", sk, hdev->name);
+
+ memset(&buf, 0, sizeof(buf));
+
+ hci_dev_lock(hdev);
+
+ bacpy(&rp->bdaddr, &hdev->bdaddr);
+
+ rp->version = hdev->hci_ver;
+ rp->manufacturer = cpu_to_le16(hdev->manufacturer);
+
+ rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
+ rp->current_settings = cpu_to_le32(get_current_settings(hdev));
+
+
+ eir_len = append_eir_data_to_buf(hdev, rp->eir);
+ rp->eir_len = cpu_to_le16(eir_len);
+
+ hci_dev_unlock(hdev);
+
+ /* If this command is called at least once, then the events
+ * for class of device and local name changes are disabled
+ * and only the new extended controller information event
+ * is used.
+ */
+ hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
+ hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
+ hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
+
+ return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
+ sizeof(*rp) + eir_len);
+}
+
+static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
+{
+ char buf[512];
+ struct mgmt_ev_ext_info_changed *ev = (void *)buf;
+ u16 eir_len;
+
+ memset(buf, 0, sizeof(buf));
+
+ eir_len = append_eir_data_to_buf(hdev, ev->eir);
+ ev->eir_len = cpu_to_le16(eir_len);
+
+ return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
+ sizeof(*ev) + eir_len,
+ HCI_MGMT_EXT_INFO_EVENTS, skip);
+}
+
static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
{
__le32 settings = cpu_to_le32(get_current_settings(hdev));
@@ -922,7 +1028,7 @@ static int clean_up_hci_state(struct hci_dev *hdev)
hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
}
- hci_req_clear_adv_instance(hdev, NULL, 0x00, false);
+ hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
if (hci_dev_test_flag(hdev, HCI_LE_ADV))
__hci_req_disable_advertising(&req);
@@ -1000,8 +1106,8 @@ static int new_settings(struct hci_dev *hdev, struct sock *skip)
{
__le32 ev = cpu_to_le32(get_current_settings(hdev));
- return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
- sizeof(ev), skip);
+ return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
+ sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
}
int mgmt_new_settings(struct hci_dev *hdev)
@@ -1690,7 +1796,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
enabled = lmp_host_le_capable(hdev);
if (!val)
- hci_req_clear_adv_instance(hdev, NULL, 0x00, true);
+ hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
if (!hdev_is_powered(hdev) || val == enabled) {
bool changed = false;
@@ -2513,8 +2619,8 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
BT_DBG("");
if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
- return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
- MGMT_STATUS_INVALID_PARAMS, NULL, 0);
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
+ MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
@@ -2932,6 +3038,35 @@ static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
}
+static void adv_expire(struct hci_dev *hdev, u32 flags)
+{
+ struct adv_info *adv_instance;
+ struct hci_request req;
+ int err;
+
+ adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
+ if (!adv_instance)
+ return;
+
+ /* stop if current instance doesn't need to be changed */
+ if (!(adv_instance->flags & flags))
+ return;
+
+ cancel_adv_timeout(hdev);
+
+ adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
+ if (!adv_instance)
+ return;
+
+ hci_req_init(&req, hdev);
+ err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
+ true);
+ if (err)
+ return;
+
+ hci_req_run(&req, NULL);
+}
+
static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
struct mgmt_cp_set_local_name *cp;
@@ -2947,13 +3082,17 @@ static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
cp = cmd->param;
- if (status)
+ if (status) {
mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
mgmt_status(status));
- else
+ } else {
mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
cp, sizeof(*cp));
+ if (hci_dev_test_flag(hdev, HCI_LE_ADV))
+ adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
+ }
+
mgmt_pending_remove(cmd);
unlock:
@@ -2993,8 +3132,9 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
if (err < 0)
goto failed;
- err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
- data, len, sk);
+ err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
+ len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
+ ext_info_changed(hdev, sk);
goto failed;
}
@@ -3029,6 +3169,40 @@ failed:
return err;
}
+static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
+ u16 len)
+{
+ struct mgmt_cp_set_appearance *cp = data;
+ u16 apperance;
+ int err;
+
+ BT_DBG("");
+
+ if (!lmp_le_capable(hdev))
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
+ MGMT_STATUS_NOT_SUPPORTED);
+
+ apperance = le16_to_cpu(cp->appearance);
+
+ hci_dev_lock(hdev);
+
+ if (hdev->appearance != apperance) {
+ hdev->appearance = apperance;
+
+ if (hci_dev_test_flag(hdev, HCI_LE_ADV))
+ adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
+
+ ext_info_changed(hdev, sk);
+ }
+
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
+ 0);
+
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
u16 opcode, struct sk_buff *skb)
{
@@ -4869,7 +5043,7 @@ static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
int err;
memset(&rp, 0, sizeof(rp));
- memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
+ memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
if (status)
goto complete;
@@ -5501,17 +5675,6 @@ unlock:
return err;
}
-static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
- u8 data_len)
-{
- eir[eir_len++] = sizeof(type) + data_len;
- eir[eir_len++] = type;
- memcpy(&eir[eir_len], data, data_len);
- eir_len += data_len;
-
- return eir_len;
-}
-
static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
u16 opcode, struct sk_buff *skb)
{
@@ -5815,6 +5978,8 @@ static u32 get_supported_adv_flags(struct hci_dev *hdev)
flags |= MGMT_ADV_FLAG_DISCOV;
flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
+ flags |= MGMT_ADV_FLAG_APPEARANCE;
+ flags |= MGMT_ADV_FLAG_LOCAL_NAME;
if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
flags |= MGMT_ADV_FLAG_TX_POWER;
@@ -5871,28 +6036,59 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
return err;
}
-static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
- u8 len, bool is_adv_data)
+static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
{
u8 max_len = HCI_MAX_AD_LENGTH;
- int i, cur_len;
- bool flags_managed = false;
- bool tx_power_managed = false;
if (is_adv_data) {
if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
MGMT_ADV_FLAG_LIMITED_DISCOV |
- MGMT_ADV_FLAG_MANAGED_FLAGS)) {
- flags_managed = true;
+ MGMT_ADV_FLAG_MANAGED_FLAGS))
max_len -= 3;
- }
- if (adv_flags & MGMT_ADV_FLAG_TX_POWER) {
- tx_power_managed = true;
+ if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
max_len -= 3;
- }
+ } else {
+ /* at least 1 byte of name should fit in */
+ if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
+ max_len -= 3;
+
+ if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
+ max_len -= 4;
}
+ return max_len;
+}
+
+static bool flags_managed(u32 adv_flags)
+{
+ return adv_flags & (MGMT_ADV_FLAG_DISCOV |
+ MGMT_ADV_FLAG_LIMITED_DISCOV |
+ MGMT_ADV_FLAG_MANAGED_FLAGS);
+}
+
+static bool tx_power_managed(u32 adv_flags)
+{
+ return adv_flags & MGMT_ADV_FLAG_TX_POWER;
+}
+
+static bool name_managed(u32 adv_flags)
+{
+ return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
+}
+
+static bool appearance_managed(u32 adv_flags)
+{
+ return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
+}
+
+static bool tlv_data_is_valid(u32 adv_flags, u8 *data, u8 len, bool is_adv_data)
+{
+ int i, cur_len;
+ u8 max_len;
+
+ max_len = tlv_data_max_len(adv_flags, is_adv_data);
+
if (len > max_len)
return false;
@@ -5900,10 +6096,21 @@ static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
cur_len = data[i];
- if (flags_managed && data[i + 1] == EIR_FLAGS)
+ if (data[i + 1] == EIR_FLAGS &&
+ (!is_adv_data || flags_managed(adv_flags)))
+ return false;
+
+ if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
+ return false;
+
+ if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
return false;
- if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
+ if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
+ return false;
+
+ if (data[i + 1] == EIR_APPEARANCE &&
+ appearance_managed(adv_flags))
return false;
/* If the current field length would exceed the total data
@@ -6027,8 +6234,8 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
- if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
- !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
+ if (!tlv_data_is_valid(flags, cp->data, cp->adv_data_len, true) ||
+ !tlv_data_is_valid(flags, cp->data + cp->adv_data_len,
cp->scan_rsp_len, false)) {
err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
MGMT_STATUS_INVALID_PARAMS);
@@ -6175,7 +6382,7 @@ static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
hci_req_init(&req, hdev);
- hci_req_clear_adv_instance(hdev, &req, cp->instance, true);
+ hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
if (list_empty(&hdev->adv_instances))
__hci_req_disable_advertising(&req);
@@ -6211,23 +6418,6 @@ unlock:
return err;
}
-static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
-{
- u8 max_len = HCI_MAX_AD_LENGTH;
-
- if (is_adv_data) {
- if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
- MGMT_ADV_FLAG_LIMITED_DISCOV |
- MGMT_ADV_FLAG_MANAGED_FLAGS))
- max_len -= 3;
-
- if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
- max_len -= 3;
- }
-
- return max_len;
-}
-
static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
void *data, u16 data_len)
{
@@ -6356,6 +6546,9 @@ static const struct hci_mgmt_handler mgmt_handlers[] = {
{ remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
{ get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
+ { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
+ HCI_MGMT_UNTRUSTED },
+ { set_appearance, MGMT_SET_APPEARANCE_SIZE },
};
void mgmt_index_added(struct hci_dev *hdev)
@@ -6494,9 +6687,12 @@ void __mgmt_power_off(struct hci_dev *hdev)
mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
- if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
- mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
- zero_cod, sizeof(zero_cod), NULL);
+ if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
+ mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
+ zero_cod, sizeof(zero_cod),
+ HCI_MGMT_DEV_CLASS_EVENTS, NULL);
+ ext_info_changed(hdev, NULL);
+ }
new_settings(hdev, match.sk);
@@ -7092,9 +7288,11 @@ void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
- if (!status)
- mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
- dev_class, 3, NULL);
+ if (!status) {
+ mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
+ 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
+ ext_info_changed(hdev, NULL);
+ }
if (match.sk)
sock_put(match.sk);
@@ -7123,8 +7321,9 @@ void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
return;
}
- mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
- cmd ? cmd->sk : NULL);
+ mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
+ HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
+ ext_info_changed(hdev, cmd ? cmd->sk : NULL);
}
static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c
index 8c30c7eb8bef..c933bd08c1fe 100644
--- a/net/bluetooth/mgmt_util.c
+++ b/net/bluetooth/mgmt_util.c
@@ -21,12 +21,41 @@
SOFTWARE IS DISCLAIMED.
*/
+#include <asm/unaligned.h>
+
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci_mon.h>
#include <net/bluetooth/mgmt.h>
#include "mgmt_util.h"
+static struct sk_buff *create_monitor_ctrl_event(__le16 index, u32 cookie,
+ u16 opcode, u16 len, void *buf)
+{
+ struct hci_mon_hdr *hdr;
+ struct sk_buff *skb;
+
+ skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ put_unaligned_le32(cookie, skb_put(skb, 4));
+ put_unaligned_le16(opcode, skb_put(skb, 2));
+
+ if (buf)
+ memcpy(skb_put(skb, len), buf, len);
+
+ __net_timestamp(skb);
+
+ hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
+ hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
+ hdr->index = index;
+ hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
+
+ return skb;
+}
+
int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel,
void *data, u16 data_len, int flag, struct sock *skip_sk)
{
@@ -52,14 +81,18 @@ int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel,
__net_timestamp(skb);
hci_send_to_channel(channel, skb, flag, skip_sk);
- kfree_skb(skb);
+ if (channel == HCI_CHANNEL_CONTROL)
+ hci_send_monitor_ctrl_event(hdev, event, data, data_len,
+ skb_get_ktime(skb), flag, skip_sk);
+
+ kfree_skb(skb);
return 0;
}
int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
{
- struct sk_buff *skb;
+ struct sk_buff *skb, *mskb;
struct mgmt_hdr *hdr;
struct mgmt_ev_cmd_status *ev;
int err;
@@ -80,17 +113,30 @@ int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
ev->status = status;
ev->opcode = cpu_to_le16(cmd);
+ mskb = create_monitor_ctrl_event(hdr->index, hci_sock_get_cookie(sk),
+ MGMT_EV_CMD_STATUS, sizeof(*ev), ev);
+ if (mskb)
+ skb->tstamp = mskb->tstamp;
+ else
+ __net_timestamp(skb);
+
err = sock_queue_rcv_skb(sk, skb);
if (err < 0)
kfree_skb(skb);
+ if (mskb) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, mskb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(mskb);
+ }
+
return err;
}
int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
void *rp, size_t rp_len)
{
- struct sk_buff *skb;
+ struct sk_buff *skb, *mskb;
struct mgmt_hdr *hdr;
struct mgmt_ev_cmd_complete *ev;
int err;
@@ -114,10 +160,24 @@ int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
if (rp)
memcpy(ev->data, rp, rp_len);
+ mskb = create_monitor_ctrl_event(hdr->index, hci_sock_get_cookie(sk),
+ MGMT_EV_CMD_COMPLETE,
+ sizeof(*ev) + rp_len, ev);
+ if (mskb)
+ skb->tstamp = mskb->tstamp;
+ else
+ __net_timestamp(skb);
+
err = sock_queue_rcv_skb(sk, skb);
if (err < 0)
kfree_skb(skb);
+ if (mskb) {
+ hci_send_to_channel(HCI_CHANNEL_MONITOR, mskb,
+ HCI_SOCK_TRUSTED, NULL);
+ kfree_skb(mskb);
+ }
+
return err;
}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 4c1a16a96ae5..43faf2aea2ab 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -3387,7 +3387,10 @@ int smp_register(struct hci_dev *hdev)
if (!lmp_sc_capable(hdev)) {
debugfs_create_file("force_bredr_smp", 0644, hdev->debugfs,
hdev, &force_bredr_smp_fops);
- return 0;
+
+ /* Flag can be already set here (due to power toggle) */
+ if (!hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP))
+ return 0;
}
if (WARN_ON(hdev->smp_bredr_data)) {
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 8a4368461fb0..855b72fbe1da 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -80,13 +80,10 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
BR_INPUT_SKB_CB(skb)->proxyarp_replied = false;
- if (dev->flags & IFF_NOARP)
+ if ((dev->flags & IFF_NOARP) ||
+ !pskb_may_pull(skb, arp_hdr_len(dev)))
return;
- if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
- dev->stats.tx_dropped++;
- return;
- }
parp = arp_hdr(skb);
if (parp->ar_pro != htons(ETH_P_IP) ||
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index a5423a1eec05..c5fea9393946 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1138,7 +1138,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
} else {
err = br_ip6_multicast_add_group(br, port,
&grec->grec_mca, vid);
- if (!err)
+ if (err)
break;
}
}
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 341caa0ca63a..d8ad73b38de2 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -134,17 +134,36 @@ void br_stp_disable_port(struct net_bridge_port *p)
br_become_root_bridge(br);
}
-static void br_stp_start(struct net_bridge *br)
+static int br_stp_call_user(struct net_bridge *br, char *arg)
{
- int r;
- char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL };
+ char *argv[] = { BR_STP_PROG, br->dev->name, arg, NULL };
char *envp[] = { NULL };
+ int rc;
+
+ /* call userspace STP and report program errors */
+ rc = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
+ if (rc > 0) {
+ if (rc & 0xff)
+ br_debug(br, BR_STP_PROG " received signal %d\n",
+ rc & 0x7f);
+ else
+ br_debug(br, BR_STP_PROG " exited with code %d\n",
+ (rc >> 8) & 0xff);
+ }
+
+ return rc;
+}
+
+static void br_stp_start(struct net_bridge *br)
+{
struct net_bridge_port *p;
+ int err = -ENOENT;
if (net_eq(dev_net(br->dev), &init_net))
- r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
- else
- r = -ENOENT;
+ err = br_stp_call_user(br, "start");
+
+ if (err && err != -ENOENT)
+ br_err(br, "failed to start userspace STP (%d)\n", err);
spin_lock_bh(&br->lock);
@@ -153,9 +172,10 @@ static void br_stp_start(struct net_bridge *br)
else if (br->bridge_forward_delay > BR_MAX_FORWARD_DELAY)
__br_set_forward_delay(br, BR_MAX_FORWARD_DELAY);
- if (r == 0) {
+ if (!err) {
br->stp_enabled = BR_USER_STP;
br_debug(br, "userspace STP started\n");
+
/* Stop hello and hold timers */
del_timer(&br->hello_timer);
list_for_each_entry(p, &br->port_list, list)
@@ -173,14 +193,13 @@ static void br_stp_start(struct net_bridge *br)
static void br_stp_stop(struct net_bridge *br)
{
- int r;
- char *argv[] = { BR_STP_PROG, br->dev->name, "stop", NULL };
- char *envp[] = { NULL };
struct net_bridge_port *p;
+ int err;
if (br->stp_enabled == BR_USER_STP) {
- r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
- br_info(br, "userspace STP stopped, return code %d\n", r);
+ err = br_stp_call_user(br, "stop");
+ if (err)
+ br_err(br, "failed to stop userspace STP (%d)\n", err);
/* To start timers on any ports left in blocking */
mod_timer(&br->hello_timer, jiffies + br->hello_time);
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index dd7133216c9c..f5c11bbe27db 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -368,6 +368,8 @@ ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) {
+ if (!IS_ERR(match))
+ module_put(match->me);
request_module("ebt_%s", m->u.name);
match = xt_find_match(NFPROTO_BRIDGE, m->u.name, 0);
}
diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
index 4b901d9f2e7c..ad47a921b701 100644
--- a/net/bridge/netfilter/nft_meta_bridge.c
+++ b/net/bridge/netfilter/nft_meta_bridge.c
@@ -86,6 +86,7 @@ static const struct nft_expr_ops nft_meta_bridge_set_ops = {
.init = nft_meta_set_init,
.destroy = nft_meta_set_destroy,
.dump = nft_meta_set_dump,
+ .validate = nft_meta_set_validate,
};
static const struct nft_expr_ops *
diff --git a/net/core/dev.c b/net/core/dev.c
index 064919425b7d..c0c291f721d6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3904,8 +3904,7 @@ static void net_tx_action(struct softirq_action *h)
}
}
-#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
- (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
+#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
/* This hook is defined here for ATM LANE */
int (*br_fdb_test_addr_hook)(struct net_device *dev,
unsigned char *addr) __read_mostly;
@@ -3965,6 +3964,22 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
}
/**
+ * netdev_is_rx_handler_busy - check if receive handler is registered
+ * @dev: device to check
+ *
+ * Check if a receive handler is already registered for a given device.
+ * Return true if there one.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+bool netdev_is_rx_handler_busy(struct net_device *dev)
+{
+ ASSERT_RTNL();
+ return dev && rtnl_dereference(dev->rx_handler);
+}
+EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
+
+/**
* netdev_rx_handler_register - register receive handler
* @dev: device to register a handler for
* @rx_handler: receive handler to register
diff --git a/net/core/filter.c b/net/core/filter.c
index a83766be1ad2..00351cdf7d0c 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -94,14 +94,13 @@ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
}
EXPORT_SYMBOL(sk_filter_trim_cap);
-static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
+BPF_CALL_1(__skb_get_pay_offset, struct sk_buff *, skb)
{
- return skb_get_poff((struct sk_buff *)(unsigned long) ctx);
+ return skb_get_poff(skb);
}
-static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
+BPF_CALL_3(__skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x)
{
- struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
struct nlattr *nla;
if (skb_is_nonlinear(skb))
@@ -120,9 +119,8 @@ static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
return 0;
}
-static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
+BPF_CALL_3(__skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
{
- struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx;
struct nlattr *nla;
if (skb_is_nonlinear(skb))
@@ -145,7 +143,7 @@ static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
return 0;
}
-static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
+BPF_CALL_0(__get_raw_cpu_id)
{
return raw_smp_processor_id();
}
@@ -233,9 +231,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
case SKF_AD_OFF + SKF_AD_HATYPE:
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
- BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0);
- *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
BPF_REG_TMP, BPF_REG_CTX,
offsetof(struct sk_buff, dev));
/* if (tmp != 0) goto pc + 1 */
@@ -1365,6 +1362,11 @@ static inline int bpf_try_make_writable(struct sk_buff *skb,
return err;
}
+static int bpf_try_make_head_writable(struct sk_buff *skb)
+{
+ return bpf_try_make_writable(skb, skb_headlen(skb));
+}
+
static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
{
if (skb_at_tc_ingress(skb))
@@ -1377,12 +1379,9 @@ static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
}
-static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
+BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
+ const void *, from, u32, len, u64, flags)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
- unsigned int offset = (unsigned int) r2;
- void *from = (void *) (long) r3;
- unsigned int len = (unsigned int) r4;
void *ptr;
if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
@@ -1417,12 +1416,9 @@ static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
.arg5_type = ARG_ANYTHING,
};
-static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
+ void *, to, u32, len)
{
- const struct sk_buff *skb = (const struct sk_buff *)(unsigned long) r1;
- unsigned int offset = (unsigned int) r2;
- void *to = (void *)(unsigned long) r3;
- unsigned int len = (unsigned int) r4;
void *ptr;
if (unlikely(offset > 0xffff))
@@ -1450,10 +1446,31 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
.arg4_type = ARG_CONST_STACK_SIZE,
};
-static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
+BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
+{
+ /* Idea is the following: should the needed direct read/write
+ * test fail during runtime, we can pull in more data and redo
+ * again, since implicitly, we invalidate previous checks here.
+ *
+ * Or, since we know how much we need to make read/writeable,
+ * this can be done once at the program beginning for direct
+ * access case. By this we overcome limitations of only current
+ * headroom being accessible.
+ */
+ return bpf_try_make_writable(skb, len ? : skb_headlen(skb));
+}
+
+static const struct bpf_func_proto bpf_skb_pull_data_proto = {
+ .func = bpf_skb_pull_data,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+};
+
+BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
+ u64, from, u64, to, u64, flags)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
- unsigned int offset = (unsigned int) r2;
__sum16 *ptr;
if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
@@ -1495,12 +1512,11 @@ static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
.arg5_type = ARG_ANYTHING,
};
-static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
+BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
+ u64, from, u64, to, u64, flags)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
- unsigned int offset = (unsigned int) r2;
__sum16 *ptr;
if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR |
@@ -1548,12 +1564,11 @@ static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
.arg5_type = ARG_ANYTHING,
};
-static u64 bpf_csum_diff(u64 r1, u64 from_size, u64 r3, u64 to_size, u64 seed)
+BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
+ __be32 *, to, u32, to_size, __wsum, seed)
{
struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
- u64 diff_size = from_size + to_size;
- __be32 *from = (__be32 *) (long) r1;
- __be32 *to = (__be32 *) (long) r3;
+ u32 diff_size = from_size + to_size;
int i, j = 0;
/* This is quite flexible, some examples:
@@ -1579,6 +1594,7 @@ static u64 bpf_csum_diff(u64 r1, u64 from_size, u64 r3, u64 to_size, u64 seed)
static const struct bpf_func_proto bpf_csum_diff_proto = {
.func = bpf_csum_diff,
.gpl_only = false,
+ .pkt_access = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_STACK,
.arg2_type = ARG_CONST_STACK_SIZE_OR_ZERO,
@@ -1587,6 +1603,26 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
.arg5_type = ARG_ANYTHING,
};
+BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum)
+{
+ /* The interface is to be used in combination with bpf_csum_diff()
+ * for direct packet writes. csum rotation for alignment as well
+ * as emulating csum_sub() can be done from the eBPF program.
+ */
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ return (skb->csum = csum_add(skb->csum, csum));
+
+ return -ENOTSUPP;
+}
+
+static const struct bpf_func_proto bpf_csum_update_proto = {
+ .func = bpf_csum_update,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+};
+
static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
{
return dev_forward_skb(dev, skb);
@@ -1611,10 +1647,11 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
return ret;
}
-static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
+BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
struct net_device *dev;
+ struct sk_buff *clone;
+ int ret;
if (unlikely(flags & ~(BPF_F_INGRESS)))
return -EINVAL;
@@ -1623,14 +1660,25 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
if (unlikely(!dev))
return -EINVAL;
- skb = skb_clone(skb, GFP_ATOMIC);
- if (unlikely(!skb))
+ clone = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!clone))
return -ENOMEM;
- bpf_push_mac_rcsum(skb);
+ /* For direct write, we need to keep the invariant that the skbs
+ * we're dealing with need to be uncloned. Should uncloning fail
+ * here, we need to free the just generated clone to unclone once
+ * again.
+ */
+ ret = bpf_try_make_head_writable(skb);
+ if (unlikely(ret)) {
+ kfree_skb(clone);
+ return -ENOMEM;
+ }
+
+ bpf_push_mac_rcsum(clone);
return flags & BPF_F_INGRESS ?
- __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
+ __bpf_rx_skb(dev, clone) : __bpf_tx_skb(dev, clone);
}
static const struct bpf_func_proto bpf_clone_redirect_proto = {
@@ -1649,7 +1697,7 @@ struct redirect_info {
static DEFINE_PER_CPU(struct redirect_info, redirect_info);
-static u64 bpf_redirect(u64 ifindex, u64 flags, u64 r3, u64 r4, u64 r5)
+BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
{
struct redirect_info *ri = this_cpu_ptr(&redirect_info);
@@ -1688,9 +1736,9 @@ static const struct bpf_func_proto bpf_redirect_proto = {
.arg2_type = ARG_ANYTHING,
};
-static u64 bpf_get_cgroup_classid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
{
- return task_get_classid((struct sk_buff *) (unsigned long) r1);
+ return task_get_classid(skb);
}
static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
@@ -1700,9 +1748,9 @@ static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
.arg1_type = ARG_PTR_TO_CTX,
};
-static u64 bpf_get_route_realm(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb)
{
- return dst_tclassid((struct sk_buff *) (unsigned long) r1);
+ return dst_tclassid(skb);
}
static const struct bpf_func_proto bpf_get_route_realm_proto = {
@@ -1712,14 +1760,14 @@ static const struct bpf_func_proto bpf_get_route_realm_proto = {
.arg1_type = ARG_PTR_TO_CTX,
};
-static u64 bpf_get_hash_recalc(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb)
{
/* If skb_clear_hash() was called due to mangling, we can
* trigger SW recalculation here. Later access to hash
* can then use the inline skb->hash via context directly
* instead of calling this helper again.
*/
- return skb_get_hash((struct sk_buff *) (unsigned long) r1);
+ return skb_get_hash(skb);
}
static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
@@ -1729,10 +1777,25 @@ static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
.arg1_type = ARG_PTR_TO_CTX,
};
-static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
+BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb)
+{
+ /* After all direct packet write, this can be used once for
+ * triggering a lazy recalc on next skb_get_hash() invocation.
+ */
+ skb_clear_hash(skb);
+ return 0;
+}
+
+static const struct bpf_func_proto bpf_set_hash_invalid_proto = {
+ .func = bpf_set_hash_invalid,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
+
+BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
+ u16, vlan_tci)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
- __be16 vlan_proto = (__force __be16) r2;
int ret;
if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
@@ -1757,9 +1820,8 @@ const struct bpf_func_proto bpf_skb_vlan_push_proto = {
};
EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto);
-static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
int ret;
bpf_push_mac_rcsum(skb);
@@ -1934,10 +1996,9 @@ static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
return -ENOTSUPP;
}
-static u64 bpf_skb_change_proto(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
+BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
+ u64, flags)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
- __be16 proto = (__force __be16) r2;
int ret;
if (unlikely(flags))
@@ -1974,11 +2035,8 @@ static const struct bpf_func_proto bpf_skb_change_proto_proto = {
.arg3_type = ARG_ANYTHING,
};
-static u64 bpf_skb_change_type(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
- u32 pkt_type = r2;
-
/* We only allow a restricted subset to be changed for now. */
if (unlikely(!skb_pkt_type_ok(skb->pkt_type) ||
!skb_pkt_type_ok(pkt_type)))
@@ -2010,8 +2068,7 @@ static u32 __bpf_skb_min_len(const struct sk_buff *skb)
static u32 __bpf_skb_max_len(const struct sk_buff *skb)
{
- return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len :
- 65536;
+ return skb->dev->mtu + skb->dev->hard_header_len;
}
static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len)
@@ -2030,12 +2087,11 @@ static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
return __skb_trim_rcsum(skb, new_len);
}
-static u64 bpf_skb_change_tail(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
+BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
+ u64, flags)
{
- struct sk_buff *skb = (struct sk_buff *)(long) r1;
u32 max_len = __bpf_skb_max_len(skb);
u32 min_len = __bpf_skb_min_len(skb);
- u32 new_len = (u32) r2;
int ret;
if (unlikely(flags || new_len > max_len || new_len < min_len))
@@ -2084,19 +2140,14 @@ static const struct bpf_func_proto bpf_skb_change_tail_proto = {
bool bpf_helper_changes_skb_data(void *func)
{
- if (func == bpf_skb_vlan_push)
- return true;
- if (func == bpf_skb_vlan_pop)
- return true;
- if (func == bpf_skb_store_bytes)
- return true;
- if (func == bpf_skb_change_proto)
- return true;
- if (func == bpf_skb_change_tail)
- return true;
- if (func == bpf_l3_csum_replace)
- return true;
- if (func == bpf_l4_csum_replace)
+ if (func == bpf_skb_vlan_push ||
+ func == bpf_skb_vlan_pop ||
+ func == bpf_skb_store_bytes ||
+ func == bpf_skb_change_proto ||
+ func == bpf_skb_change_tail ||
+ func == bpf_skb_pull_data ||
+ func == bpf_l3_csum_replace ||
+ func == bpf_l4_csum_replace)
return true;
return false;
@@ -2115,13 +2166,10 @@ static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
return 0;
}
-static u64 bpf_skb_event_output(u64 r1, u64 r2, u64 flags, u64 r4,
- u64 meta_size)
+BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
+ u64, flags, void *, meta, u64, meta_size)
{
- struct sk_buff *skb = (struct sk_buff *)(long) r1;
- struct bpf_map *map = (struct bpf_map *)(long) r2;
u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
- void *meta = (void *)(long) r4;
if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
return -EINVAL;
@@ -2148,10 +2196,9 @@ static unsigned short bpf_tunnel_key_af(u64 flags)
return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
}
-static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
+BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to,
+ u32, size, u64, flags)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
- struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2;
const struct ip_tunnel_info *info = skb_tunnel_info(skb);
u8 compat[sizeof(struct bpf_tunnel_key)];
void *to_orig = to;
@@ -2216,10 +2263,8 @@ static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
.arg4_type = ARG_ANYTHING,
};
-static u64 bpf_skb_get_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5)
+BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
- u8 *to = (u8 *) (long) r2;
const struct ip_tunnel_info *info = skb_tunnel_info(skb);
int err;
@@ -2254,10 +2299,9 @@ static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
static struct metadata_dst __percpu *md_dst;
-static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
+BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
+ const struct bpf_tunnel_key *, from, u32, size, u64, flags)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
- struct bpf_tunnel_key *from = (struct bpf_tunnel_key *) (long) r2;
struct metadata_dst *md = this_cpu_ptr(md_dst);
u8 compat[sizeof(struct bpf_tunnel_key)];
struct ip_tunnel_info *info;
@@ -2275,7 +2319,7 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
*/
memcpy(compat, from, size);
memset(compat + size, 0, sizeof(compat) - size);
- from = (struct bpf_tunnel_key *)compat;
+ from = (const struct bpf_tunnel_key *) compat;
break;
default:
return -EINVAL;
@@ -2325,10 +2369,9 @@ static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
.arg4_type = ARG_ANYTHING,
};
-static u64 bpf_skb_set_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5)
+BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
+ const u8 *, from, u32, size)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1;
- u8 *from = (u8 *) (long) r2;
struct ip_tunnel_info *info = skb_tunnel_info(skb);
const struct metadata_dst *md = this_cpu_ptr(md_dst);
@@ -2374,23 +2417,20 @@ bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
}
}
-static u64 bpf_skb_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map,
+ u32, idx)
{
- struct sk_buff *skb = (struct sk_buff *)(long)r1;
- struct bpf_map *map = (struct bpf_map *)(long)r2;
struct bpf_array *array = container_of(map, struct bpf_array, map);
struct cgroup *cgrp;
struct sock *sk;
- u32 i = (u32)r3;
- sk = skb->sk;
+ sk = skb_to_full_sk(skb);
if (!sk || !sk_fullsock(sk))
return -ENOENT;
-
- if (unlikely(i >= array->map.max_entries))
+ if (unlikely(idx >= array->map.max_entries))
return -E2BIG;
- cgrp = READ_ONCE(array->ptrs[i]);
+ cgrp = READ_ONCE(array->ptrs[idx]);
if (unlikely(!cgrp))
return -EAGAIN;
@@ -2413,13 +2453,10 @@ static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
return 0;
}
-static u64 bpf_xdp_event_output(u64 r1, u64 r2, u64 flags, u64 r4,
- u64 meta_size)
+BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
+ u64, flags, void *, meta, u64, meta_size)
{
- struct xdp_buff *xdp = (struct xdp_buff *)(long) r1;
- struct bpf_map *map = (struct bpf_map *)(long) r2;
u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
- void *meta = (void *)(long) r4;
if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
return -EINVAL;
@@ -2475,8 +2512,12 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
return &bpf_skb_store_bytes_proto;
case BPF_FUNC_skb_load_bytes:
return &bpf_skb_load_bytes_proto;
+ case BPF_FUNC_skb_pull_data:
+ return &bpf_skb_pull_data_proto;
case BPF_FUNC_csum_diff:
return &bpf_csum_diff_proto;
+ case BPF_FUNC_csum_update:
+ return &bpf_csum_update_proto;
case BPF_FUNC_l3_csum_replace:
return &bpf_l3_csum_replace_proto;
case BPF_FUNC_l4_csum_replace:
@@ -2509,6 +2550,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
return &bpf_get_route_realm_proto;
case BPF_FUNC_get_hash_recalc:
return &bpf_get_hash_recalc_proto;
+ case BPF_FUNC_set_hash_invalid:
+ return &bpf_set_hash_invalid_proto;
case BPF_FUNC_perf_event_output:
return &bpf_skb_event_output_proto;
case BPF_FUNC_get_smp_processor_id:
@@ -2526,6 +2569,8 @@ xdp_func_proto(enum bpf_func_id func_id)
switch (func_id) {
case BPF_FUNC_perf_event_output:
return &bpf_xdp_event_output_proto;
+ case BPF_FUNC_get_smp_processor_id:
+ return &bpf_get_smp_processor_id_proto;
default:
return sk_filter_func_proto(func_id);
}
@@ -2568,6 +2613,45 @@ static bool sk_filter_is_valid_access(int off, int size,
return __is_valid_access(off, size, type);
}
+static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
+ const struct bpf_prog *prog)
+{
+ struct bpf_insn *insn = insn_buf;
+
+ if (!direct_write)
+ return 0;
+
+ /* if (!skb->cloned)
+ * goto start;
+ *
+ * (Fast-path, otherwise approximation that we might be
+ * a clone, do the rest in helper.)
+ */
+ *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET());
+ *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK);
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7);
+
+ /* ret = bpf_skb_pull_data(skb, 0); */
+ *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
+ *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2);
+ *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_skb_pull_data);
+ /* if (!ret)
+ * goto restore;
+ * return TC_ACT_SHOT;
+ */
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2);
+ *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, TC_ACT_SHOT);
+ *insn++ = BPF_EXIT_INSN();
+
+ /* restore: */
+ *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
+ /* start: */
+ *insn++ = prog->insnsi[0];
+
+ return insn - insn_buf;
+}
+
static bool tc_cls_act_is_valid_access(int off, int size,
enum bpf_access_type type,
enum bpf_reg_type *reg_type)
@@ -2605,7 +2689,7 @@ static bool __is_valid_xdp_access(int off, int size,
return false;
if (off % size != 0)
return false;
- if (size != 4)
+ if (size != sizeof(__u32))
return false;
return true;
@@ -2636,10 +2720,10 @@ void bpf_warn_invalid_xdp_action(u32 act)
}
EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
-static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
- int src_reg, int ctx_off,
- struct bpf_insn *insn_buf,
- struct bpf_prog *prog)
+static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg,
+ int src_reg, int ctx_off,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog)
{
struct bpf_insn *insn = insn_buf;
@@ -2686,7 +2770,7 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
case offsetof(struct __sk_buff, ifindex):
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
- *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
dst_reg, src_reg,
offsetof(struct sk_buff, dev));
*insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1);
@@ -2727,7 +2811,7 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
dst_reg, src_reg, insn);
case offsetof(struct __sk_buff, cb[0]) ...
- offsetof(struct __sk_buff, cb[4]):
+ offsetof(struct __sk_buff, cb[4]):
BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
prog->cb_access = 1;
@@ -2751,7 +2835,7 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
break;
case offsetof(struct __sk_buff, data):
- *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, data)),
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
dst_reg, src_reg,
offsetof(struct sk_buff, data));
break;
@@ -2760,8 +2844,8 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
ctx_off -= offsetof(struct __sk_buff, data_end);
ctx_off += offsetof(struct sk_buff, cb);
ctx_off += offsetof(struct bpf_skb_data_end, data_end);
- *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(sizeof(void *)),
- dst_reg, src_reg, ctx_off);
+ *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), dst_reg, src_reg,
+ ctx_off);
break;
case offsetof(struct __sk_buff, tc_index):
@@ -2787,6 +2871,31 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
return insn - insn_buf;
}
+static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type, int dst_reg,
+ int src_reg, int ctx_off,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog)
+{
+ struct bpf_insn *insn = insn_buf;
+
+ switch (ctx_off) {
+ case offsetof(struct __sk_buff, ifindex):
+ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
+
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
+ dst_reg, src_reg,
+ offsetof(struct sk_buff, dev));
+ *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg,
+ offsetof(struct net_device, ifindex));
+ break;
+ default:
+ return sk_filter_convert_ctx_access(type, dst_reg, src_reg,
+ ctx_off, insn_buf, prog);
+ }
+
+ return insn - insn_buf;
+}
+
static u32 xdp_convert_ctx_access(enum bpf_access_type type, int dst_reg,
int src_reg, int ctx_off,
struct bpf_insn *insn_buf,
@@ -2796,12 +2905,12 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type, int dst_reg,
switch (ctx_off) {
case offsetof(struct xdp_md, data):
- *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct xdp_buff, data)),
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data),
dst_reg, src_reg,
offsetof(struct xdp_buff, data));
break;
case offsetof(struct xdp_md, data_end):
- *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct xdp_buff, data_end)),
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end),
dst_reg, src_reg,
offsetof(struct xdp_buff, data_end));
break;
@@ -2813,13 +2922,14 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type, int dst_reg,
static const struct bpf_verifier_ops sk_filter_ops = {
.get_func_proto = sk_filter_func_proto,
.is_valid_access = sk_filter_is_valid_access,
- .convert_ctx_access = bpf_net_convert_ctx_access,
+ .convert_ctx_access = sk_filter_convert_ctx_access,
};
static const struct bpf_verifier_ops tc_cls_act_ops = {
.get_func_proto = tc_cls_act_func_proto,
.is_valid_access = tc_cls_act_is_valid_access,
- .convert_ctx_access = bpf_net_convert_ctx_access,
+ .convert_ctx_access = tc_cls_act_convert_ctx_access,
+ .gen_prologue = tc_cls_act_prologue,
};
static const struct bpf_verifier_ops xdp_ops = {
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index a2879c0f6c4c..1a7b80f73376 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -750,11 +750,13 @@ EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
void __skb_get_hash(struct sk_buff *skb)
{
struct flow_keys keys;
+ u32 hash;
__flow_hash_secret_init();
- __skb_set_sw_hash(skb, ___skb_get_hash(skb, &keys, hashrnd),
- flow_keys_have_l4(&keys));
+ hash = ___skb_get_hash(skb, &keys, hashrnd);
+
+ __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
}
EXPORT_SYMBOL(__skb_get_hash);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 1dfca1c3f8f5..3ac8946bf244 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -843,7 +843,10 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev,
size += nla_total_size(num_vfs * sizeof(struct nlattr));
size += num_vfs *
(nla_total_size(sizeof(struct ifla_vf_mac)) +
- nla_total_size(sizeof(struct ifla_vf_vlan)) +
+ nla_total_size(MAX_VLAN_LIST_LEN *
+ sizeof(struct nlattr)) +
+ nla_total_size(MAX_VLAN_LIST_LEN *
+ sizeof(struct ifla_vf_vlan_info)) +
nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
nla_total_size(sizeof(struct ifla_vf_rate)) +
nla_total_size(sizeof(struct ifla_vf_link_state)) +
@@ -1111,14 +1114,15 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
struct nlattr *vfinfo)
{
struct ifla_vf_rss_query_en vf_rss_query_en;
+ struct nlattr *vf, *vfstats, *vfvlanlist;
struct ifla_vf_link_state vf_linkstate;
+ struct ifla_vf_vlan_info vf_vlan_info;
struct ifla_vf_spoofchk vf_spoofchk;
struct ifla_vf_tx_rate vf_tx_rate;
struct ifla_vf_stats vf_stats;
struct ifla_vf_trust vf_trust;
struct ifla_vf_vlan vf_vlan;
struct ifla_vf_rate vf_rate;
- struct nlattr *vf, *vfstats;
struct ifla_vf_mac vf_mac;
struct ifla_vf_info ivi;
@@ -1135,11 +1139,14 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
* IFLA_VF_LINK_STATE_AUTO which equals zero
*/
ivi.linkstate = 0;
+ /* VLAN Protocol by default is 802.1Q */
+ ivi.vlan_proto = htons(ETH_P_8021Q);
if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
return 0;
vf_mac.vf =
vf_vlan.vf =
+ vf_vlan_info.vf =
vf_rate.vf =
vf_tx_rate.vf =
vf_spoofchk.vf =
@@ -1150,6 +1157,9 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
vf_vlan.vlan = ivi.vlan;
vf_vlan.qos = ivi.qos;
+ vf_vlan_info.vlan = ivi.vlan;
+ vf_vlan_info.qos = ivi.qos;
+ vf_vlan_info.vlan_proto = ivi.vlan_proto;
vf_tx_rate.rate = ivi.max_tx_rate;
vf_rate.min_tx_rate = ivi.min_tx_rate;
vf_rate.max_tx_rate = ivi.max_tx_rate;
@@ -1158,10 +1168,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
vf_rss_query_en.setting = ivi.rss_query_en;
vf_trust.setting = ivi.trusted;
vf = nla_nest_start(skb, IFLA_VF_INFO);
- if (!vf) {
- nla_nest_cancel(skb, vfinfo);
- return -EMSGSIZE;
- }
+ if (!vf)
+ goto nla_put_vfinfo_failure;
if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
@@ -1177,17 +1185,23 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
&vf_rss_query_en) ||
nla_put(skb, IFLA_VF_TRUST,
sizeof(vf_trust), &vf_trust))
- return -EMSGSIZE;
+ goto nla_put_vf_failure;
+ vfvlanlist = nla_nest_start(skb, IFLA_VF_VLAN_LIST);
+ if (!vfvlanlist)
+ goto nla_put_vf_failure;
+ if (nla_put(skb, IFLA_VF_VLAN_INFO, sizeof(vf_vlan_info),
+ &vf_vlan_info)) {
+ nla_nest_cancel(skb, vfvlanlist);
+ goto nla_put_vf_failure;
+ }
+ nla_nest_end(skb, vfvlanlist);
memset(&vf_stats, 0, sizeof(vf_stats));
if (dev->netdev_ops->ndo_get_vf_stats)
dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
&vf_stats);
vfstats = nla_nest_start(skb, IFLA_VF_STATS);
- if (!vfstats) {
- nla_nest_cancel(skb, vf);
- nla_nest_cancel(skb, vfinfo);
- return -EMSGSIZE;
- }
+ if (!vfstats)
+ goto nla_put_vf_failure;
if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS,
vf_stats.rx_packets, IFLA_VF_STATS_PAD) ||
nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS,
@@ -1199,11 +1213,19 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST,
vf_stats.broadcast, IFLA_VF_STATS_PAD) ||
nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST,
- vf_stats.multicast, IFLA_VF_STATS_PAD))
- return -EMSGSIZE;
+ vf_stats.multicast, IFLA_VF_STATS_PAD)) {
+ nla_nest_cancel(skb, vfstats);
+ goto nla_put_vf_failure;
+ }
nla_nest_end(skb, vfstats);
nla_nest_end(skb, vf);
return 0;
+
+nla_put_vf_failure:
+ nla_nest_cancel(skb, vf);
+nla_put_vfinfo_failure:
+ nla_nest_cancel(skb, vfinfo);
+ return -EMSGSIZE;
}
static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
@@ -1448,6 +1470,7 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
[IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) },
[IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) },
+ [IFLA_VF_VLAN_LIST] = { .type = NLA_NESTED },
[IFLA_VF_TX_RATE] = { .len = sizeof(struct ifla_vf_tx_rate) },
[IFLA_VF_SPOOFCHK] = { .len = sizeof(struct ifla_vf_spoofchk) },
[IFLA_VF_RATE] = { .len = sizeof(struct ifla_vf_rate) },
@@ -1704,7 +1727,34 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_vlan)
err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
- ivv->qos);
+ ivv->qos,
+ htons(ETH_P_8021Q));
+ if (err < 0)
+ return err;
+ }
+
+ if (tb[IFLA_VF_VLAN_LIST]) {
+ struct ifla_vf_vlan_info *ivvl[MAX_VLAN_LIST_LEN];
+ struct nlattr *attr;
+ int rem, len = 0;
+
+ err = -EOPNOTSUPP;
+ if (!ops->ndo_set_vf_vlan)
+ return err;
+
+ nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) {
+ if (nla_type(attr) != IFLA_VF_VLAN_INFO ||
+ nla_len(attr) < NLA_HDRLEN) {
+ return -EINVAL;
+ }
+ if (len >= MAX_VLAN_LIST_LEN)
+ return -EOPNOTSUPP;
+ ivvl[len] = nla_data(attr);
+
+ len++;
+ }
+ err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan,
+ ivvl[0]->qos, ivvl[0]->vlan_proto);
if (err < 0)
return err;
}
@@ -3577,6 +3627,91 @@ static bool stats_attr_valid(unsigned int mask, int attrid, int idxattr)
(!idxattr || idxattr == attrid);
}
+#define IFLA_OFFLOAD_XSTATS_FIRST (IFLA_OFFLOAD_XSTATS_UNSPEC + 1)
+static int rtnl_get_offload_stats_attr_size(int attr_id)
+{
+ switch (attr_id) {
+ case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+ return sizeof(struct rtnl_link_stats64);
+ }
+
+ return 0;
+}
+
+static int rtnl_get_offload_stats(struct sk_buff *skb, struct net_device *dev,
+ int *prividx)
+{
+ struct nlattr *attr = NULL;
+ int attr_id, size;
+ void *attr_data;
+ int err;
+
+ if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
+ dev->netdev_ops->ndo_get_offload_stats))
+ return -ENODATA;
+
+ for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
+ attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
+ if (attr_id < *prividx)
+ continue;
+
+ size = rtnl_get_offload_stats_attr_size(attr_id);
+ if (!size)
+ continue;
+
+ if (!dev->netdev_ops->ndo_has_offload_stats(attr_id))
+ continue;
+
+ attr = nla_reserve_64bit(skb, attr_id, size,
+ IFLA_OFFLOAD_XSTATS_UNSPEC);
+ if (!attr)
+ goto nla_put_failure;
+
+ attr_data = nla_data(attr);
+ memset(attr_data, 0, size);
+ err = dev->netdev_ops->ndo_get_offload_stats(attr_id, dev,
+ attr_data);
+ if (err)
+ goto get_offload_stats_failure;
+ }
+
+ if (!attr)
+ return -ENODATA;
+
+ *prividx = 0;
+ return 0;
+
+nla_put_failure:
+ err = -EMSGSIZE;
+get_offload_stats_failure:
+ *prividx = attr_id;
+ return err;
+}
+
+static int rtnl_get_offload_stats_size(const struct net_device *dev)
+{
+ int nla_size = 0;
+ int attr_id;
+ int size;
+
+ if (!(dev->netdev_ops && dev->netdev_ops->ndo_has_offload_stats &&
+ dev->netdev_ops->ndo_get_offload_stats))
+ return 0;
+
+ for (attr_id = IFLA_OFFLOAD_XSTATS_FIRST;
+ attr_id <= IFLA_OFFLOAD_XSTATS_MAX; attr_id++) {
+ if (!dev->netdev_ops->ndo_has_offload_stats(attr_id))
+ continue;
+ size = rtnl_get_offload_stats_attr_size(attr_id);
+ nla_size += nla_total_size_64bit(size);
+ }
+
+ if (nla_size != 0)
+ nla_size += nla_total_size(0);
+
+ return nla_size;
+}
+
static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
int type, u32 pid, u32 seq, u32 change,
unsigned int flags, unsigned int filter_mask,
@@ -3586,6 +3721,7 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
struct nlmsghdr *nlh;
struct nlattr *attr;
int s_prividx = *prividx;
+ int err;
ASSERT_RTNL();
@@ -3614,8 +3750,6 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
if (ops && ops->fill_linkxstats) {
- int err;
-
*idxattr = IFLA_STATS_LINK_XSTATS;
attr = nla_nest_start(skb,
IFLA_STATS_LINK_XSTATS);
@@ -3639,8 +3773,6 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
if (master)
ops = master->rtnl_link_ops;
if (ops && ops->fill_linkxstats) {
- int err;
-
*idxattr = IFLA_STATS_LINK_XSTATS_SLAVE;
attr = nla_nest_start(skb,
IFLA_STATS_LINK_XSTATS_SLAVE);
@@ -3655,6 +3787,24 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
}
}
+ if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS,
+ *idxattr)) {
+ *idxattr = IFLA_STATS_LINK_OFFLOAD_XSTATS;
+ attr = nla_nest_start(skb, IFLA_STATS_LINK_OFFLOAD_XSTATS);
+ if (!attr)
+ goto nla_put_failure;
+
+ err = rtnl_get_offload_stats(skb, dev, prividx);
+ if (err == -ENODATA)
+ nla_nest_cancel(skb, attr);
+ else
+ nla_nest_end(skb, attr);
+
+ if (err && err != -ENODATA)
+ goto nla_put_failure;
+ *idxattr = 0;
+ }
+
nlmsg_end(skb, nlh);
return 0;
@@ -3669,10 +3819,6 @@ nla_put_failure:
return -EMSGSIZE;
}
-static const struct nla_policy ifla_stats_policy[IFLA_STATS_MAX + 1] = {
- [IFLA_STATS_LINK_64] = { .len = sizeof(struct rtnl_link_stats64) },
-};
-
static size_t if_nlmsg_stats_size(const struct net_device *dev,
u32 filter_mask)
{
@@ -3712,6 +3858,9 @@ static size_t if_nlmsg_stats_size(const struct net_device *dev,
}
}
+ if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0))
+ size += rtnl_get_offload_stats_size(dev);
+
return size;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3864b4b68fa1..d36c7548952f 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2445,6 +2445,25 @@ void skb_queue_purge(struct sk_buff_head *list)
EXPORT_SYMBOL(skb_queue_purge);
/**
+ * skb_rbtree_purge - empty a skb rbtree
+ * @root: root of the rbtree to empty
+ *
+ * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
+ * the list and one reference dropped. This function does not take
+ * any lock. Synchronization should be handled by the caller (e.g., TCP
+ * out-of-order queue is protected by the socket lock).
+ */
+void skb_rbtree_purge(struct rb_root *root)
+{
+ struct sk_buff *skb, *next;
+
+ rbtree_postorder_for_each_entry_safe(skb, next, root, rbnode)
+ kfree_skb(skb);
+
+ *root = RB_ROOT;
+}
+
+/**
* skb_queue_head - queue a buffer at the list head
* @list: list to use
* @newsk: buffer to queue
@@ -3078,11 +3097,31 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
sg = !!(features & NETIF_F_SG);
csum = !!can_checksum_protocol(features, proto);
- /* GSO partial only requires that we trim off any excess that
- * doesn't fit into an MSS sized block, so take care of that
- * now.
- */
- if (sg && csum && (features & NETIF_F_GSO_PARTIAL)) {
+ if (sg && csum && (mss != GSO_BY_FRAGS)) {
+ if (!(features & NETIF_F_GSO_PARTIAL)) {
+ struct sk_buff *iter;
+
+ if (!list_skb ||
+ !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
+ goto normal;
+
+ /* Split the buffer at the frag_list pointer.
+ * This is based on the assumption that all
+ * buffers in the chain excluding the last
+ * containing the same amount of data.
+ */
+ skb_walk_frags(head_skb, iter) {
+ if (skb_headlen(iter))
+ goto normal;
+
+ len -= iter->len;
+ }
+ }
+
+ /* GSO partial only requires that we trim off any excess that
+ * doesn't fit into an MSS sized block, so take care of that
+ * now.
+ */
partial_segs = len / mss;
if (partial_segs > 1)
mss *= partial_segs;
@@ -3090,6 +3129,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
partial_segs = 0;
}
+normal:
headroom = skb_headroom(head_skb);
pos = skb_headlen(head_skb);
@@ -3281,21 +3321,29 @@ perform_csum_check:
*/
segs->prev = tail;
- /* Update GSO info on first skb in partial sequence. */
if (partial_segs) {
+ struct sk_buff *iter;
int type = skb_shinfo(head_skb)->gso_type;
+ unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
/* Update type to add partial and then remove dodgy if set */
- type |= SKB_GSO_PARTIAL;
+ type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
type &= ~SKB_GSO_DODGY;
/* Update GSO info and prepare to start updating headers on
* our way back down the stack of protocols.
*/
- skb_shinfo(segs)->gso_size = skb_shinfo(head_skb)->gso_size;
- skb_shinfo(segs)->gso_segs = partial_segs;
- skb_shinfo(segs)->gso_type = type;
- SKB_GSO_CB(segs)->data_offset = skb_headroom(segs) + doffset;
+ for (iter = segs; iter; iter = iter->next) {
+ skb_shinfo(iter)->gso_size = gso_size;
+ skb_shinfo(iter)->gso_segs = partial_segs;
+ skb_shinfo(iter)->gso_type = type;
+ SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
+ }
+
+ if (tail->len - doffset <= gso_size)
+ skb_shinfo(tail)->gso_size = 0;
+ else if (tail != segs)
+ skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
}
/* Following permits correct backpressure, for protocols
@@ -4474,8 +4522,10 @@ int skb_ensure_writable(struct sk_buff *skb, int write_len)
}
EXPORT_SYMBOL(skb_ensure_writable);
-/* remove VLAN header from packet and update csum accordingly. */
-static int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
+/* remove VLAN header from packet and update csum accordingly.
+ * expects a non skb_vlan_tag_present skb with a vlan tag payload
+ */
+int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
{
struct vlan_hdr *vhdr;
unsigned int offset = skb->data - skb_mac_header(skb);
@@ -4506,6 +4556,7 @@ pull:
return err;
}
+EXPORT_SYMBOL(__skb_vlan_pop);
int skb_vlan_pop(struct sk_buff *skb)
{
@@ -4516,9 +4567,7 @@ int skb_vlan_pop(struct sk_buff *skb)
if (likely(skb_vlan_tag_present(skb))) {
skb->vlan_tci = 0;
} else {
- if (unlikely((skb->protocol != htons(ETH_P_8021Q) &&
- skb->protocol != htons(ETH_P_8021AD)) ||
- skb->len < VLAN_ETH_HLEN))
+ if (unlikely(!eth_type_vlan(skb->protocol)))
return 0;
err = __skb_vlan_pop(skb, &vlan_tci);
@@ -4526,9 +4575,7 @@ int skb_vlan_pop(struct sk_buff *skb)
return err;
}
/* move next vlan tag to hw accel tag */
- if (likely((skb->protocol != htons(ETH_P_8021Q) &&
- skb->protocol != htons(ETH_P_8021AD)) ||
- skb->len < VLAN_ETH_HLEN))
+ if (likely(!eth_type_vlan(skb->protocol)))
return 0;
vlan_proto = skb->protocol;
diff --git a/net/core/sock.c b/net/core/sock.c
index 51a730485649..038e660ef844 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1340,7 +1340,6 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
if (!try_module_get(prot->owner))
goto out_free_sec;
sk_tx_queue_clear(sk);
- cgroup_sk_alloc(&sk->sk_cgrp_data);
}
return sk;
@@ -1400,6 +1399,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
sock_net_set(sk, net);
atomic_set(&sk->sk_wmem_alloc, 1);
+ cgroup_sk_alloc(&sk->sk_cgrp_data);
sock_update_classid(&sk->sk_cgrp_data);
sock_update_netprioidx(&sk->sk_cgrp_data);
}
@@ -1544,6 +1544,9 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
newsk->sk_priority = 0;
newsk->sk_incoming_cpu = raw_smp_processor_id();
atomic64_set(&newsk->sk_cookie, 0);
+
+ cgroup_sk_alloc(&newsk->sk_cgrp_data);
+
/*
* Before updating sk_refcnt, we must commit prior changes to memory
* (Documentation/RCU/rculist_nulls.txt for details)
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index ff7736f7ff42..96e47c539bee 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -38,4 +38,7 @@ config NET_DSA_TAG_EDSA
config NET_DSA_TAG_TRAILER
bool
+config NET_DSA_TAG_QCA
+ bool
+
endif
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 8af4ded70f1c..a3380ed0e0be 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -7,3 +7,4 @@ dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o
dsa_core-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
dsa_core-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
dsa_core-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
+dsa_core-$(CONFIG_NET_DSA_TAG_QCA) += tag_qca.o
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index d8d267e9a872..a6902c1e2f28 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -54,6 +54,9 @@ const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = {
#ifdef CONFIG_NET_DSA_TAG_BRCM
[DSA_TAG_PROTO_BRCM] = &brcm_netdev_ops,
#endif
+#ifdef CONFIG_NET_DSA_TAG_QCA
+ [DSA_TAG_PROTO_QCA] = &qca_netdev_ops,
+#endif
[DSA_TAG_PROTO_NONE] = &none_ops,
};
@@ -375,9 +378,11 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
if (ret < 0)
goto out;
- ret = ops->set_addr(ds, dst->master_netdev->dev_addr);
- if (ret < 0)
- goto out;
+ if (ops->set_addr) {
+ ret = ops->set_addr(ds, dst->master_netdev->dev_addr);
+ if (ret < 0)
+ goto out;
+ }
if (!ds->slave_mii_bus && ops->phy_read) {
ds->slave_mii_bus = devm_mdiobus_alloc(parent);
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 8278385dcd21..f8a7d9aab437 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -304,13 +304,11 @@ static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
if (err < 0)
return err;
- err = ds->ops->set_addr(ds, dst->master_netdev->dev_addr);
- if (err < 0)
- return err;
-
- err = ds->ops->set_addr(ds, dst->master_netdev->dev_addr);
- if (err < 0)
- return err;
+ if (ds->ops->set_addr) {
+ err = ds->ops->set_addr(ds, dst->master_netdev->dev_addr);
+ if (err < 0)
+ return err;
+ }
if (!ds->slave_mii_bus && ds->ops->phy_read) {
ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 00077a9c97f4..6cfd7388834e 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -81,5 +81,7 @@ extern const struct dsa_device_ops trailer_netdev_ops;
/* tag_brcm.c */
extern const struct dsa_device_ops brcm_netdev_ops;
+/* tag_qca.c */
+extern const struct dsa_device_ops qca_netdev_ops;
#endif
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 9ecbe787f102..6b1282c006b1 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -69,6 +69,30 @@ static inline bool dsa_port_is_bridged(struct dsa_slave_priv *p)
return !!p->bridge_dev;
}
+static void dsa_port_set_stp_state(struct dsa_switch *ds, int port, u8 state)
+{
+ struct dsa_port *dp = &ds->ports[port];
+
+ if (ds->ops->port_stp_state_set)
+ ds->ops->port_stp_state_set(ds, port, state);
+
+ if (ds->ops->port_fast_age) {
+ /* Fast age FDB entries or flush appropriate forwarding database
+ * for the given port, if we are moving it from Learning or
+ * Forwarding state, to Disabled or Blocking or Listening state.
+ */
+
+ if ((dp->stp_state == BR_STATE_LEARNING ||
+ dp->stp_state == BR_STATE_FORWARDING) &&
+ (state == BR_STATE_DISABLED ||
+ state == BR_STATE_BLOCKING ||
+ state == BR_STATE_LISTENING))
+ ds->ops->port_fast_age(ds, port);
+ }
+
+ dp->stp_state = state;
+}
+
static int dsa_slave_open(struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
@@ -104,8 +128,7 @@ static int dsa_slave_open(struct net_device *dev)
goto clear_promisc;
}
- if (ds->ops->port_stp_state_set)
- ds->ops->port_stp_state_set(ds, p->port, stp_state);
+ dsa_port_set_stp_state(ds, p->port, stp_state);
if (p->phy)
phy_start(p->phy);
@@ -147,8 +170,7 @@ static int dsa_slave_close(struct net_device *dev)
if (ds->ops->port_disable)
ds->ops->port_disable(ds, p->port, p->phy);
- if (ds->ops->port_stp_state_set)
- ds->ops->port_stp_state_set(ds, p->port, BR_STATE_DISABLED);
+ dsa_port_set_stp_state(ds, p->port, BR_STATE_DISABLED);
return 0;
}
@@ -354,7 +376,7 @@ static int dsa_slave_stp_state_set(struct net_device *dev,
if (switchdev_trans_ph_prepare(trans))
return ds->ops->port_stp_state_set ? 0 : -EOPNOTSUPP;
- ds->ops->port_stp_state_set(ds, p->port, attr->u.stp_state);
+ dsa_port_set_stp_state(ds, p->port, attr->u.stp_state);
return 0;
}
@@ -556,8 +578,7 @@ static void dsa_slave_bridge_port_leave(struct net_device *dev)
/* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
* so allow it to be in BR_STATE_FORWARDING to be kept functional
*/
- if (ds->ops->port_stp_state_set)
- ds->ops->port_stp_state_set(ds, p->port, BR_STATE_FORWARDING);
+ dsa_port_set_stp_state(ds, p->port, BR_STATE_FORWARDING);
}
static int dsa_slave_port_attr_get(struct net_device *dev,
diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
new file mode 100644
index 000000000000..0c90cacee7aa
--- /dev/null
+++ b/net/dsa/tag_qca.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/etherdevice.h>
+#include "dsa_priv.h"
+
+#define QCA_HDR_LEN 2
+#define QCA_HDR_VERSION 0x2
+
+#define QCA_HDR_RECV_VERSION_MASK GENMASK(15, 14)
+#define QCA_HDR_RECV_VERSION_S 14
+#define QCA_HDR_RECV_PRIORITY_MASK GENMASK(13, 11)
+#define QCA_HDR_RECV_PRIORITY_S 11
+#define QCA_HDR_RECV_TYPE_MASK GENMASK(10, 6)
+#define QCA_HDR_RECV_TYPE_S 6
+#define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3)
+#define QCA_HDR_RECV_SOURCE_PORT_MASK GENMASK(2, 0)
+
+#define QCA_HDR_XMIT_VERSION_MASK GENMASK(15, 14)
+#define QCA_HDR_XMIT_VERSION_S 14
+#define QCA_HDR_XMIT_PRIORITY_MASK GENMASK(13, 11)
+#define QCA_HDR_XMIT_PRIORITY_S 11
+#define QCA_HDR_XMIT_CONTROL_MASK GENMASK(10, 8)
+#define QCA_HDR_XMIT_CONTROL_S 8
+#define QCA_HDR_XMIT_FROM_CPU BIT(7)
+#define QCA_HDR_XMIT_DP_BIT_MASK GENMASK(6, 0)
+
+static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ u16 *phdr, hdr;
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ if (skb_cow_head(skb, 0) < 0)
+ goto out_free;
+
+ skb_push(skb, QCA_HDR_LEN);
+
+ memmove(skb->data, skb->data + QCA_HDR_LEN, 2 * ETH_ALEN);
+ phdr = (u16 *)(skb->data + 2 * ETH_ALEN);
+
+ /* Set the version field, and set destination port information */
+ hdr = QCA_HDR_VERSION << QCA_HDR_XMIT_VERSION_S |
+ QCA_HDR_XMIT_FROM_CPU |
+ BIT(p->port);
+
+ *phdr = htons(hdr);
+
+ return skb;
+
+out_free:
+ kfree_skb(skb);
+ return NULL;
+}
+
+static int qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ struct dsa_switch_tree *dst = dev->dsa_ptr;
+ struct dsa_switch *ds;
+ u8 ver;
+ int port;
+ __be16 *phdr, hdr;
+
+ if (unlikely(!dst))
+ goto out_drop;
+
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
+ if (unlikely(!pskb_may_pull(skb, QCA_HDR_LEN)))
+ goto out_drop;
+
+ /* The QCA header is added by the switch between src addr and Ethertype
+ * At this point, skb->data points to ethertype so header should be
+ * right before
+ */
+ phdr = (__be16 *)(skb->data - 2);
+ hdr = ntohs(*phdr);
+
+ /* Make sure the version is correct */
+ ver = (hdr & QCA_HDR_RECV_VERSION_MASK) >> QCA_HDR_RECV_VERSION_S;
+ if (unlikely(ver != QCA_HDR_VERSION))
+ goto out_drop;
+
+ /* Remove QCA tag and recalculate checksum */
+ skb_pull_rcsum(skb, QCA_HDR_LEN);
+ memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - QCA_HDR_LEN,
+ ETH_HLEN - QCA_HDR_LEN);
+
+ /* This protocol doesn't support cascading multiple switches so it's
+ * safe to assume the switch is first in the tree
+ */
+ ds = dst->ds[0];
+ if (!ds)
+ goto out_drop;
+
+ /* Get source port information */
+ port = (hdr & QCA_HDR_RECV_SOURCE_PORT_MASK);
+ if (!ds->ports[port].netdev)
+ goto out_drop;
+
+ /* Update skb & forward the frame accordingly */
+ skb_push(skb, ETH_HLEN);
+ skb->pkt_type = PACKET_HOST;
+ skb->dev = ds->ports[port].netdev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ skb->dev->stats.rx_packets++;
+ skb->dev->stats.rx_bytes += skb->len;
+
+ netif_receive_skb(skb);
+
+ return 0;
+
+out_drop:
+ kfree_skb(skb);
+out:
+ return 0;
+}
+
+const struct dsa_device_ops qca_netdev_ops = {
+ .xmit = qca_tag_xmit,
+ .rcv = qca_tag_rcv,
+};
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 50d6a9b49f6c..300b06888fdf 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -640,6 +640,21 @@ config TCP_CONG_CDG
D.A. Hayes and G. Armitage. "Revisiting TCP congestion control using
delay gradients." In Networking 2011. Preprint: http://goo.gl/No3vdg
+config TCP_CONG_BBR
+ tristate "BBR TCP"
+ default n
+ ---help---
+
+ BBR (Bottleneck Bandwidth and RTT) TCP congestion control aims to
+ maximize network utilization and minimize queues. It builds an explicit
+ model of the the bottleneck delivery rate and path round-trip
+ propagation delay. It tolerates packet loss and delay unrelated to
+ congestion. It can operate over LAN, WAN, cellular, wifi, or cable
+ modem links. It can coexist with flows that use loss-based congestion
+ control, and can operate with shallow buffers, deep buffers,
+ bufferbloat, policers, or AQM schemes that do not provide a delay
+ signal. It requires the fq ("Fair Queue") pacing packet scheduler.
+
choice
prompt "Default TCP congestion control"
default DEFAULT_CUBIC
@@ -674,6 +689,9 @@ choice
config DEFAULT_CDG
bool "CDG" if TCP_CONG_CDG=y
+ config DEFAULT_BBR
+ bool "BBR" if TCP_CONG_BBR=y
+
config DEFAULT_RENO
bool "Reno"
endchoice
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 24629b6f57cc..bc6a6c8b9bcd 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -8,7 +8,7 @@ obj-y := route.o inetpeer.o protocol.o \
inet_timewait_sock.o inet_connection_sock.o \
tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \
tcp_minisocks.o tcp_cong.o tcp_metrics.o tcp_fastopen.o \
- tcp_recovery.o \
+ tcp_rate.o tcp_recovery.o \
tcp_offload.o datagram.o raw.o udp.o udplite.o \
udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \
fib_frontend.o fib_semantics.o fib_trie.o \
@@ -41,6 +41,7 @@ obj-$(CONFIG_INET_DIAG) += inet_diag.o
obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o
obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o
obj-$(CONFIG_NET_TCPPROBE) += tcp_probe.o
+obj-$(CONFIG_TCP_CONG_BBR) += tcp_bbr.o
obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o
obj-$(CONFIG_TCP_CONG_CDG) += tcp_cdg.o
obj-$(CONFIG_TCP_CONG_CUBIC) += tcp_cubic.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index e94b47be0019..1effc986739e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1192,7 +1192,7 @@ EXPORT_SYMBOL(inet_sk_rebuild_header);
struct sk_buff *inet_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
- bool udpfrag = false, fixedid = false, encap;
+ bool udpfrag = false, fixedid = false, gso_partial, encap;
struct sk_buff *segs = ERR_PTR(-EINVAL);
const struct net_offload *ops;
unsigned int offset = 0;
@@ -1245,6 +1245,8 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
if (IS_ERR_OR_NULL(segs))
goto out;
+ gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
+
skb = segs;
do {
iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
@@ -1259,9 +1261,13 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
iph->id = htons(id);
id += skb_shinfo(skb)->gso_segs;
}
- tot_len = skb_shinfo(skb)->gso_size +
- SKB_GSO_CB(skb)->data_offset +
- skb->head - (unsigned char *)iph;
+
+ if (gso_partial)
+ tot_len = skb_shinfo(skb)->gso_size +
+ SKB_GSO_CB(skb)->data_offset +
+ skb->head - (unsigned char *)iph;
+ else
+ tot_len = skb->len - nhoff;
} else {
if (!fixedid)
iph->id = htons(id++);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 415e117967c7..062a67ca9a21 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -2232,7 +2232,7 @@ static struct devinet_sysctl_table {
};
static int __devinet_sysctl_register(struct net *net, char *dev_name,
- struct ipv4_devconf *p)
+ int ifindex, struct ipv4_devconf *p)
{
int i;
struct devinet_sysctl_table *t;
@@ -2255,6 +2255,8 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
goto free;
p->sysctl = t;
+
+ inet_netconf_notify_devconf(net, NETCONFA_ALL, ifindex, p);
return 0;
free:
@@ -2286,7 +2288,7 @@ static int devinet_sysctl_register(struct in_device *idev)
if (err)
return err;
err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
- &idev->cnf);
+ idev->dev->ifindex, &idev->cnf);
if (err)
neigh_sysctl_unregister(idev->arp_parms);
return err;
@@ -2347,11 +2349,12 @@ static __net_init int devinet_init_net(struct net *net)
}
#ifdef CONFIG_SYSCTL
- err = __devinet_sysctl_register(net, "all", all);
+ err = __devinet_sysctl_register(net, "all", NETCONFA_IFINDEX_ALL, all);
if (err < 0)
goto err_reg_all;
- err = __devinet_sysctl_register(net, "default", dflt);
+ err = __devinet_sysctl_register(net, "default",
+ NETCONFA_IFINDEX_DEFAULT, dflt);
if (err < 0)
goto err_reg_dflt;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 317c31939732..4e56a4c20a3c 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -503,6 +503,7 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
if (!dev)
return -ENODEV;
cfg->fc_oif = dev->ifindex;
+ cfg->fc_table = l3mdev_fib_table(dev);
if (colon) {
struct in_ifaddr *ifa;
struct in_device *in_dev = __in_dev_get_rtnl(dev);
@@ -1021,7 +1022,7 @@ no_promotions:
* First of all, we scan fib_info list searching
* for stray nexthop entries, then ignite fib_flush.
*/
- if (fib_sync_down_addr(dev_net(dev), ifa->ifa_local))
+ if (fib_sync_down_addr(dev, ifa->ifa_local))
fib_flush(dev_net(dev));
}
}
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 6e9ea69e5f75..770bebed6b28 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -56,6 +56,9 @@ int __fib_lookup(struct net *net, struct flowi4 *flp,
};
int err;
+ /* update flow if oif or iif point to device enslaved to l3mdev */
+ l3mdev_update_flow(net, flowi4_to_flowi(flp));
+
err = fib_rules_lookup(net->ipv4.rules_ops, flowi4_to_flowi(flp), 0, &arg);
#ifdef CONFIG_IP_ROUTE_CLASSID
if (arg.rule)
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 8066ccc48a17..388d3e21629b 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1057,6 +1057,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
fi->fib_priority = cfg->fc_priority;
fi->fib_prefsrc = cfg->fc_prefsrc;
fi->fib_type = cfg->fc_type;
+ fi->fib_tb_id = cfg->fc_table;
fi->fib_nhs = nhs;
change_nexthops(fi) {
@@ -1337,18 +1338,21 @@ nla_put_failure:
* referring to it.
* - device went down -> we must shutdown all nexthops going via it.
*/
-int fib_sync_down_addr(struct net *net, __be32 local)
+int fib_sync_down_addr(struct net_device *dev, __be32 local)
{
int ret = 0;
unsigned int hash = fib_laddr_hashfn(local);
struct hlist_head *head = &fib_info_laddrhash[hash];
+ struct net *net = dev_net(dev);
+ int tb_id = l3mdev_fib_table(dev);
struct fib_info *fi;
if (!fib_info_laddrhash || local == 0)
return 0;
hlist_for_each_entry(fi, head, fib_lhash) {
- if (!net_eq(fi->fib_net, net))
+ if (!net_eq(fi->fib_net, net) ||
+ fi->fib_tb_id != tb_id)
continue;
if (fi->fib_prefsrc == local) {
fi->fib_flags |= RTNH_F_DEAD;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index e2ffc2a5c7db..241f27bbd7ad 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1081,7 +1081,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
struct trie *t = (struct trie *)tb->tb_data;
struct fib_alias *fa, *new_fa;
struct key_vector *l, *tp;
- unsigned int nlflags = 0;
+ u16 nlflags = NLM_F_EXCL;
struct fib_info *fi;
u8 plen = cfg->fc_dst_len;
u8 slen = KEYLENGTH - plen;
@@ -1126,6 +1126,8 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
if (cfg->fc_nlflags & NLM_F_EXCL)
goto out;
+ nlflags &= ~NLM_F_EXCL;
+
/* We have 2 goals:
* 1. Find exact match for type, scope, fib_info to avoid
* duplicate routes
@@ -1151,6 +1153,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
struct fib_info *fi_drop;
u8 state;
+ nlflags |= NLM_F_REPLACE;
fa = fa_first;
if (fa_match) {
if (fa == fa_match)
@@ -1191,7 +1194,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
if (state & FA_S_ACCESSED)
rt_cache_flush(cfg->fc_nlinfo.nl_net);
rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
- tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
+ tb->tb_id, &cfg->fc_nlinfo, nlflags);
goto succeeded;
}
@@ -1203,7 +1206,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
goto out;
if (cfg->fc_nlflags & NLM_F_APPEND)
- nlflags = NLM_F_APPEND;
+ nlflags |= NLM_F_APPEND;
else
fa = fa_first;
}
@@ -1211,6 +1214,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
if (!(cfg->fc_nlflags & NLM_F_CREATE))
goto out;
+ nlflags |= NLM_F_CREATE;
err = -ENOBUFS;
new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
if (!new_fa)
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index ecd1e09dbbf1..96e0efecefa6 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -24,7 +24,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
__be16 protocol = skb->protocol;
u16 mac_len = skb->mac_len;
int gre_offset, outer_hlen;
- bool need_csum, ufo;
+ bool need_csum, ufo, gso_partial;
if (!skb->encapsulation)
goto out;
@@ -69,6 +69,8 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
goto out;
}
+ gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
+
outer_hlen = skb_tnl_header_len(skb);
gre_offset = outer_hlen - tnl_hlen;
skb = segs;
@@ -96,7 +98,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
greh = (struct gre_base_hdr *)skb_transport_header(skb);
pcsum = (__sum16 *)(greh + 1);
- if (skb_is_gso(skb)) {
+ if (gso_partial) {
unsigned int partial_adj;
/* Adjust checksum to account for the fact that
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index abfbe492ebfe..e4d16fc5bbb3 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -99,6 +99,7 @@ static size_t inet_sk_attr_size(void)
+ nla_total_size(1) /* INET_DIAG_SHUTDOWN */
+ nla_total_size(1) /* INET_DIAG_TOS */
+ nla_total_size(1) /* INET_DIAG_TCLASS */
+ + nla_total_size(4) /* INET_DIAG_MARK */
+ nla_total_size(sizeof(struct inet_diag_meminfo))
+ nla_total_size(sizeof(struct inet_diag_msg))
+ nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
@@ -109,7 +110,8 @@ static size_t inet_sk_attr_size(void)
int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
struct inet_diag_msg *r, int ext,
- struct user_namespace *user_ns)
+ struct user_namespace *user_ns,
+ bool net_admin)
{
const struct inet_sock *inet = inet_sk(sk);
@@ -136,6 +138,9 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
}
#endif
+ if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, sk->sk_mark))
+ goto errout;
+
r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
r->idiag_inode = sock_i_ino(sk);
@@ -149,7 +154,8 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
struct sk_buff *skb, const struct inet_diag_req_v2 *req,
struct user_namespace *user_ns,
u32 portid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh)
+ const struct nlmsghdr *unlh,
+ bool net_admin)
{
const struct tcp_congestion_ops *ca_ops;
const struct inet_diag_handler *handler;
@@ -175,7 +181,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
r->idiag_timer = 0;
r->idiag_retrans = 0;
- if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns))
+ if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
goto errout;
if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
@@ -274,10 +280,11 @@ static int inet_csk_diag_fill(struct sock *sk,
const struct inet_diag_req_v2 *req,
struct user_namespace *user_ns,
u32 portid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh)
+ const struct nlmsghdr *unlh,
+ bool net_admin)
{
- return inet_sk_diag_fill(sk, inet_csk(sk), skb, req,
- user_ns, portid, seq, nlmsg_flags, unlh);
+ return inet_sk_diag_fill(sk, inet_csk(sk), skb, req, user_ns,
+ portid, seq, nlmsg_flags, unlh, net_admin);
}
static int inet_twsk_diag_fill(struct sock *sk,
@@ -319,8 +326,9 @@ static int inet_twsk_diag_fill(struct sock *sk,
static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
u32 portid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh)
+ const struct nlmsghdr *unlh, bool net_admin)
{
+ struct request_sock *reqsk = inet_reqsk(sk);
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
long tmo;
@@ -334,7 +342,7 @@ static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
inet_diag_msg_common_fill(r, sk);
r->idiag_state = TCP_SYN_RECV;
r->idiag_timer = 1;
- r->idiag_retrans = inet_reqsk(sk)->num_retrans;
+ r->idiag_retrans = reqsk->num_retrans;
BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) !=
offsetof(struct sock, sk_cookie));
@@ -346,6 +354,10 @@ static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
r->idiag_uid = 0;
r->idiag_inode = 0;
+ if (net_admin && nla_put_u32(skb, INET_DIAG_MARK,
+ inet_rsk(reqsk)->ir_mark))
+ return -EMSGSIZE;
+
nlmsg_end(skb, nlh);
return 0;
}
@@ -354,7 +366,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
const struct inet_diag_req_v2 *r,
struct user_namespace *user_ns,
u32 portid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh)
+ const struct nlmsghdr *unlh, bool net_admin)
{
if (sk->sk_state == TCP_TIME_WAIT)
return inet_twsk_diag_fill(sk, skb, portid, seq,
@@ -362,10 +374,10 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
if (sk->sk_state == TCP_NEW_SYN_RECV)
return inet_req_diag_fill(sk, skb, portid, seq,
- nlmsg_flags, unlh);
+ nlmsg_flags, unlh, net_admin);
return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq,
- nlmsg_flags, unlh);
+ nlmsg_flags, unlh, net_admin);
}
struct sock *inet_diag_find_one_icsk(struct net *net,
@@ -435,7 +447,8 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
err = sk_diag_fill(sk, rep, req,
sk_user_ns(NETLINK_CB(in_skb).sk),
NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0, nlh);
+ nlh->nlmsg_seq, 0, nlh,
+ netlink_net_capable(in_skb, CAP_NET_ADMIN));
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
nlmsg_free(rep);
@@ -796,7 +809,8 @@ static int inet_csk_diag_dump(struct sock *sk,
struct sk_buff *skb,
struct netlink_callback *cb,
const struct inet_diag_req_v2 *r,
- const struct nlattr *bc)
+ const struct nlattr *bc,
+ bool net_admin)
{
if (!inet_diag_bc_sk(bc, sk))
return 0;
@@ -804,7 +818,8 @@ static int inet_csk_diag_dump(struct sock *sk,
return inet_csk_diag_fill(sk, skb, r,
sk_user_ns(NETLINK_CB(cb->skb).sk),
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
+ cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh,
+ net_admin);
}
static void twsk_build_assert(void)
@@ -840,6 +855,7 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
struct net *net = sock_net(skb->sk);
int i, num, s_i, s_num;
u32 idiag_states = r->idiag_states;
+ bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
if (idiag_states & TCPF_SYN_RECV)
idiag_states |= TCPF_NEW_SYN_RECV;
@@ -880,7 +896,8 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
cb->args[3] > 0)
goto next_listen;
- if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
+ if (inet_csk_diag_dump(sk, skb, cb, r,
+ bc, net_admin) < 0) {
spin_unlock_bh(&ilb->lock);
goto done;
}
@@ -948,7 +965,7 @@ skip_listen_ht:
sk_user_ns(NETLINK_CB(cb->skb).sk),
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- cb->nlh);
+ cb->nlh, net_admin);
if (res < 0) {
spin_unlock_bh(lock);
goto done;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 113cc43df789..576f705d8180 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -246,25 +246,6 @@ static void gre_err(struct sk_buff *skb, u32 info)
ipgre_err(skb, info, &tpi);
}
-static __be64 key_to_tunnel_id(__be32 key)
-{
-#ifdef __BIG_ENDIAN
- return (__force __be64)((__force u32)key);
-#else
- return (__force __be64)((__force u64)key << 32);
-#endif
-}
-
-/* Returns the least-significant 32 bits of a __be64. */
-static __be32 tunnel_id_to_key(__be64 x)
-{
-#ifdef __BIG_ENDIAN
- return (__force __be32)x;
-#else
- return (__force __be32)((__force u64)x >> 32);
-#endif
-}
-
static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
{
@@ -290,7 +271,7 @@ static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
__be64 tun_id;
flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
- tun_id = key_to_tunnel_id(tpi->key);
+ tun_id = key32_to_tunnel_id(tpi->key);
tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
if (!tun_dst)
return PACKET_REJECT;
@@ -446,7 +427,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
gre_build_header(skb, tunnel_hlen, flags, proto,
- tunnel_id_to_key(tun_info->key.tun_id), 0);
+ tunnel_id_to_key32(tun_info->key.tun_id), 0);
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 4b351af3e67b..d6feabb03516 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -312,6 +312,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
const struct iphdr *iph = ip_hdr(skb);
struct rtable *rt;
+ struct net_device *dev = skb->dev;
/* if ingress device is enslaved to an L3 master device pass the
* skb to its handler for processing
@@ -341,7 +342,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
*/
if (!skb_valid_dst(skb)) {
int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
- iph->tos, skb->dev);
+ iph->tos, dev);
if (unlikely(err)) {
if (err == -EXDEV)
__NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
@@ -370,7 +371,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
__IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len);
} else if (skb->pkt_type == PACKET_BROADCAST ||
skb->pkt_type == PACKET_MULTICAST) {
- struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
/* RFC 1122 3.3.6:
*
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 65569274efb8..05d105832bdb 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -99,6 +99,14 @@ int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
iph->tot_len = htons(skb->len);
ip_send_check(iph);
+
+ /* if egress device is enslaved to an L3 master device pass the
+ * skb to its handler for processing
+ */
+ skb = l3mdev_ip_out(sk, skb);
+ if (unlikely(!skb))
+ return 0;
+
return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
net, sk, skb, NULL, skb_dst(skb)->dev,
dst_output);
@@ -490,7 +498,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
to->tc_index = from->tc_index;
#endif
nf_copy(to, from);
-#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
+#if IS_ENABLED(CONFIG_IP_VS)
to->ipvs_property = from->ipvs_property;
#endif
skb_copy_secmark(to, from);
@@ -1574,8 +1582,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
}
oif = arg->bound_dev_if;
- if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
- oif = skb->skb_iif;
+ oif = oif ? : skb->skb_iif;
flowi4_init_output(&fl4, oif,
IP4_REPLY_MARK(net, skb->mark),
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 71a52f4d4cff..af4919792b6a 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -284,9 +284,12 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
ipc->ttl = val;
break;
case IP_TOS:
- if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
+ if (cmsg->cmsg_len == CMSG_LEN(sizeof(int)))
+ val = *(int *)CMSG_DATA(cmsg);
+ else if (cmsg->cmsg_len == CMSG_LEN(sizeof(u8)))
+ val = *(u8 *)CMSG_DATA(cmsg);
+ else
return -EINVAL;
- val = *(int *)CMSG_DATA(cmsg);
if (val < 0 || val > 255)
return -EINVAL;
ipc->tos = val;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 95649ebd2874..5719d6ba0824 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -55,6 +55,7 @@
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#include <net/udp.h>
+#include <net/dst_metadata.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
@@ -546,6 +547,81 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
return 0;
}
+void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, u8 proto)
+{
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+ u32 headroom = sizeof(struct iphdr);
+ struct ip_tunnel_info *tun_info;
+ const struct ip_tunnel_key *key;
+ const struct iphdr *inner_iph;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ __be16 df = 0;
+ u8 tos, ttl;
+
+ tun_info = skb_tunnel_info(skb);
+ if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
+ ip_tunnel_info_af(tun_info) != AF_INET))
+ goto tx_error;
+ key = &tun_info->key;
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
+ tos = key->tos;
+ if (tos == 1) {
+ if (skb->protocol == htons(ETH_P_IP))
+ tos = inner_iph->tos;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
+ }
+ init_tunnel_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src, 0,
+ RT_TOS(tos), tunnel->parms.link);
+ if (tunnel->encap.type != TUNNEL_ENCAP_NONE)
+ goto tx_error;
+ rt = ip_route_output_key(tunnel->net, &fl4);
+ if (IS_ERR(rt)) {
+ dev->stats.tx_carrier_errors++;
+ goto tx_error;
+ }
+ if (rt->dst.dev == dev) {
+ ip_rt_put(rt);
+ dev->stats.collisions++;
+ goto tx_error;
+ }
+ tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
+ ttl = key->ttl;
+ if (ttl == 0) {
+ if (skb->protocol == htons(ETH_P_IP))
+ ttl = inner_iph->ttl;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
+ else
+ ttl = ip4_dst_hoplimit(&rt->dst);
+ }
+ if (key->tun_flags & TUNNEL_DONT_FRAGMENT)
+ df = htons(IP_DF);
+ else if (skb->protocol == htons(ETH_P_IP))
+ df = inner_iph->frag_off & htons(IP_DF);
+ headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
+ if (headroom > dev->needed_headroom)
+ dev->needed_headroom = headroom;
+
+ if (skb_cow_head(skb, dev->needed_headroom)) {
+ ip_rt_put(rt);
+ goto tx_dropped;
+ }
+ iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, key->tos,
+ key->ttl, df, !net_eq(tunnel->net, dev_net(dev)));
+ return;
+tx_error:
+ dev->stats.tx_errors++;
+ goto kfree;
+tx_dropped:
+ dev->stats.tx_dropped++;
+kfree:
+ kfree_skb(skb);
+}
+EXPORT_SYMBOL_GPL(ip_md_tunnel_xmit);
+
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, u8 protocol)
{
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 0f227db0e9ac..777bc1883870 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -69,7 +69,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
skb_scrub_packet(skb, xnet);
- skb_clear_hash(skb);
+ skb_clear_hash_if_not_l4(skb);
skb_dst_set(skb, &rt->dst);
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index cc701fa70b12..5d7944f394d9 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -88,6 +88,7 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
struct net_device *dev;
struct pcpu_sw_netstats *tstats;
struct xfrm_state *x;
+ struct xfrm_mode *inner_mode;
struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4;
u32 orig_mark = skb->mark;
int ret;
@@ -105,7 +106,19 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
}
x = xfrm_input_state(skb);
- family = x->inner_mode->afinfo->family;
+
+ inner_mode = x->inner_mode;
+
+ if (x->sel.family == AF_UNSPEC) {
+ inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
+ if (inner_mode == NULL) {
+ XFRM_INC_STATS(dev_net(skb->dev),
+ LINUX_MIB_XFRMINSTATEMODEERROR);
+ return -EINVAL;
+ }
+ }
+
+ family = inner_mode->afinfo->family;
skb->mark = be32_to_cpu(tunnel->parms.i_key);
ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 4ae3f8e6c6cc..c9392589c415 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -115,6 +115,7 @@
#include <net/xfrm.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <net/dst_metadata.h>
static bool log_ecn_error = true;
module_param(log_ecn_error, bool, 0644);
@@ -193,6 +194,7 @@ static int ipip_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
{
struct net *net = dev_net(skb->dev);
struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
+ struct metadata_dst *tun_dst = NULL;
struct ip_tunnel *tunnel;
const struct iphdr *iph;
@@ -216,7 +218,12 @@ static int ipip_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
tpi = &ipip_tpi;
if (iptunnel_pull_header(skb, 0, tpi->proto, false))
goto drop;
- return ip_tunnel_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
+ if (tunnel->collect_md) {
+ tun_dst = ip_tun_rx_dst(skb, 0, 0, 0);
+ if (!tun_dst)
+ return 0;
+ }
+ return ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
}
return -1;
@@ -270,7 +277,10 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
skb_set_inner_ipproto(skb, ipproto);
- ip_tunnel_xmit(skb, dev, tiph, ipproto);
+ if (tunnel->collect_md)
+ ip_md_tunnel_xmit(skb, dev, ipproto);
+ else
+ ip_tunnel_xmit(skb, dev, tiph, ipproto);
return NETDEV_TX_OK;
tx_error:
@@ -380,13 +390,14 @@ static int ipip_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
}
static void ipip_netlink_parms(struct nlattr *data[],
- struct ip_tunnel_parm *parms)
+ struct ip_tunnel_parm *parms, bool *collect_md)
{
memset(parms, 0, sizeof(*parms));
parms->iph.version = 4;
parms->iph.protocol = IPPROTO_IPIP;
parms->iph.ihl = 5;
+ *collect_md = false;
if (!data)
return;
@@ -414,6 +425,9 @@ static void ipip_netlink_parms(struct nlattr *data[],
if (!data[IFLA_IPTUN_PMTUDISC] || nla_get_u8(data[IFLA_IPTUN_PMTUDISC]))
parms->iph.frag_off = htons(IP_DF);
+
+ if (data[IFLA_IPTUN_COLLECT_METADATA])
+ *collect_md = true;
}
/* This function returns true when ENCAP attributes are present in the nl msg */
@@ -453,18 +467,18 @@ static bool ipip_netlink_encap_parms(struct nlattr *data[],
static int ipip_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
+ struct ip_tunnel *t = netdev_priv(dev);
struct ip_tunnel_parm p;
struct ip_tunnel_encap ipencap;
if (ipip_netlink_encap_parms(data, &ipencap)) {
- struct ip_tunnel *t = netdev_priv(dev);
int err = ip_tunnel_encap_setup(t, &ipencap);
if (err < 0)
return err;
}
- ipip_netlink_parms(data, &p);
+ ipip_netlink_parms(data, &p, &t->collect_md);
return ip_tunnel_newlink(dev, tb, &p);
}
@@ -473,6 +487,7 @@ static int ipip_changelink(struct net_device *dev, struct nlattr *tb[],
{
struct ip_tunnel_parm p;
struct ip_tunnel_encap ipencap;
+ bool collect_md;
if (ipip_netlink_encap_parms(data, &ipencap)) {
struct ip_tunnel *t = netdev_priv(dev);
@@ -482,7 +497,9 @@ static int ipip_changelink(struct net_device *dev, struct nlattr *tb[],
return err;
}
- ipip_netlink_parms(data, &p);
+ ipip_netlink_parms(data, &p, &collect_md);
+ if (collect_md)
+ return -EINVAL;
if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) ||
(!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr))
@@ -516,6 +533,8 @@ static size_t ipip_get_size(const struct net_device *dev)
nla_total_size(2) +
/* IFLA_IPTUN_ENCAP_DPORT */
nla_total_size(2) +
+ /* IFLA_IPTUN_COLLECT_METADATA */
+ nla_total_size(0) +
0;
}
@@ -544,6 +563,9 @@ static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev)
tunnel->encap.flags))
goto nla_put_failure;
+ if (tunnel->collect_md)
+ if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -562,6 +584,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
[IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 },
[IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 },
[IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
+ [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG },
};
static struct rtnl_link_ops ipip_link_ops __read_mostly = {
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 26253328d227..a87bcd2d4a94 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -2076,6 +2076,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
struct rta_mfc_stats mfcs;
struct nlattr *mp_attr;
struct rtnexthop *nhp;
+ unsigned long lastuse;
int ct;
/* If cache is unresolved, don't try to parse IIF and OIF */
@@ -2105,12 +2106,14 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
nla_nest_end(skb, mp_attr);
+ lastuse = READ_ONCE(c->mfc_un.res.lastuse);
+ lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
+
mfcs.mfcs_packets = c->mfc_un.res.pkt;
mfcs.mfcs_bytes = c->mfc_un.res.bytes;
mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
- nla_put_u64_64bit(skb, RTA_EXPIRES,
- jiffies_to_clock_t(c->mfc_un.res.lastuse),
+ nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
RTA_PAD))
return -EMSGSIZE;
diff --git a/net/ipv4/netfilter/nft_chain_route_ipv4.c b/net/ipv4/netfilter/nft_chain_route_ipv4.c
index 2375b0a8be46..30493beb611a 100644
--- a/net/ipv4/netfilter/nft_chain_route_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_route_ipv4.c
@@ -31,6 +31,7 @@ static unsigned int nf_route_table_hook(void *priv,
__be32 saddr, daddr;
u_int8_t tos;
const struct iphdr *iph;
+ int err;
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct iphdr) ||
@@ -46,15 +47,17 @@ static unsigned int nf_route_table_hook(void *priv,
tos = iph->tos;
ret = nft_do_chain(&pkt, priv);
- if (ret != NF_DROP && ret != NF_QUEUE) {
+ if (ret != NF_DROP && ret != NF_STOLEN) {
iph = ip_hdr(skb);
if (iph->saddr != saddr ||
iph->daddr != daddr ||
skb->mark != mark ||
- iph->tos != tos)
- if (ip_route_me_harder(state->net, skb, RTN_UNSPEC))
- ret = NF_DROP;
+ iph->tos != tos) {
+ err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
+ if (err < 0)
+ ret = NF_DROP_ERR(err);
+ }
}
return ret;
}
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
index c24f41c816b3..2c2553b9026c 100644
--- a/net/ipv4/netfilter/nft_reject_ipv4.c
+++ b/net/ipv4/netfilter/nft_reject_ipv4.c
@@ -46,6 +46,7 @@ static const struct nft_expr_ops nft_reject_ipv4_ops = {
.eval = nft_reject_ipv4_eval,
.init = nft_reject_init,
.dump = nft_reject_dump,
+ .validate = nft_reject_validate,
};
static struct nft_expr_type nft_reject_ipv4_type __read_mostly = {
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 438f50c1a676..90a85c955872 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -606,12 +606,6 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
(inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
daddr, saddr, 0, 0);
- if (!saddr && ipc.oif) {
- err = l3mdev_get_saddr(net, ipc.oif, &fl4);
- if (err < 0)
- goto done;
- }
-
if (!inet->hdrincl) {
rfv.msg = msg;
rfv.hlen = 0;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 3e992783c1d0..654a9af20136 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -476,12 +476,18 @@ u32 ip_idents_reserve(u32 hash, int segs)
atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
u32 old = ACCESS_ONCE(*p_tstamp);
u32 now = (u32)jiffies;
- u32 delta = 0;
+ u32 new, delta = 0;
if (old != now && cmpxchg(p_tstamp, old, now) == old)
delta = prandom_u32_max(now - old);
- return atomic_add_return(segs + delta, p_id) - segs;
+ /* Do not use atomic_add_return() as it makes UBSAN unhappy */
+ do {
+ old = (u32)atomic_read(p_id);
+ new = old + delta + segs;
+ } while (atomic_cmpxchg(p_id, old, new) != old);
+
+ return new - segs;
}
EXPORT_SYMBOL(ip_idents_reserve);
@@ -1831,7 +1837,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
* Now we are ready to route packet.
*/
fl4.flowi4_oif = 0;
- fl4.flowi4_iif = l3mdev_fib_oif_rcu(dev);
+ fl4.flowi4_iif = dev->ifindex;
fl4.flowi4_mark = skb->mark;
fl4.flowi4_tos = tos;
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
@@ -2018,7 +2024,9 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
return ERR_PTR(-EINVAL);
if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
- if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
+ if (ipv4_is_loopback(fl4->saddr) &&
+ !(dev_out->flags & IFF_LOOPBACK) &&
+ !netif_is_l3_master(dev_out))
return ERR_PTR(-EINVAL);
if (ipv4_is_lbcast(fl4->daddr))
@@ -2148,7 +2156,6 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
unsigned int flags = 0;
struct fib_result res;
struct rtable *rth;
- int master_idx;
int orig_oif;
int err = -ENETUNREACH;
@@ -2158,9 +2165,6 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
orig_oif = fl4->flowi4_oif;
- master_idx = l3mdev_master_ifindex_by_index(net, fl4->flowi4_oif);
- if (master_idx)
- fl4->flowi4_oif = master_idx;
fl4->flowi4_iif = LOOPBACK_IFINDEX;
fl4->flowi4_tos = tos & IPTOS_RT_MASK;
fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
@@ -2244,10 +2248,6 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
fl4->saddr = inet_select_addr(dev_out, 0,
RT_SCOPE_HOST);
}
-
- rth = l3mdev_get_rtable(dev_out, fl4);
- if (rth)
- goto out;
}
if (!fl4->daddr) {
@@ -2265,8 +2265,7 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
if (err) {
res.fi = NULL;
res.table = NULL;
- if (fl4->flowi4_oif &&
- !netif_index_is_l3_master(net, fl4->flowi4_oif)) {
+ if (fl4->flowi4_oif) {
/* Apparently, routing tables are wrong. Assume,
that the destination is on link.
@@ -2302,7 +2301,9 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
else
fl4->saddr = fl4->daddr;
}
- dev_out = net->loopback_dev;
+
+ /* L3 master device is the loopback for that domain */
+ dev_out = l3mdev_master_dev_rcu(dev_out) ? : net->loopback_dev;
fl4->flowi4_oif = dev_out->ifindex;
flags |= RTCF_LOCAL;
goto make_route;
@@ -2577,9 +2578,6 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
fl4.flowi4_mark = mark;
- if (netif_index_is_l3_master(net, fl4.flowi4_oif))
- fl4.flowi4_flags = FLOWI_FLAG_L3MDEV_SRC | FLOWI_FLAG_SKIP_NH_OIF;
-
if (iif) {
struct net_device *dev;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 77311a92275c..f253e5019d22 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -380,14 +380,14 @@ void tcp_init_sock(struct sock *sk)
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- __skb_queue_head_init(&tp->out_of_order_queue);
+ tp->out_of_order_queue = RB_ROOT;
tcp_init_xmit_timers(sk);
tcp_prequeue_init(tp);
INIT_LIST_HEAD(&tp->tsq_node);
icsk->icsk_rto = TCP_TIMEOUT_INIT;
tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
- tp->rtt_min[0].rtt = ~0U;
+ minmax_reset(&tp->rtt_min, tcp_time_stamp, ~0U);
/* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control
@@ -396,6 +396,9 @@ void tcp_init_sock(struct sock *sk)
*/
tp->snd_cwnd = TCP_INIT_CWND;
+ /* There's a bubble in the pipe until at least the first ACK. */
+ tp->app_limited = ~0U;
+
/* See draft-stevens-tcpca-spec-01 for discussion of the
* initialization of these values.
*/
@@ -1014,23 +1017,40 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset,
flags);
lock_sock(sk);
+
+ tcp_rate_check_app_limited(sk); /* is sending application-limited? */
+
res = do_tcp_sendpages(sk, page, offset, size, flags);
release_sock(sk);
return res;
}
EXPORT_SYMBOL(tcp_sendpage);
-static inline int select_size(const struct sock *sk, bool sg)
+/* Do not bother using a page frag for very small frames.
+ * But use this heuristic only for the first skb in write queue.
+ *
+ * Having no payload in skb->head allows better SACK shifting
+ * in tcp_shift_skb_data(), reducing sack/rack overhead, because
+ * write queue has less skbs.
+ * Each skb can hold up to MAX_SKB_FRAGS * 32Kbytes, or ~0.5 MB.
+ * This also speeds up tso_fragment(), since it wont fallback
+ * to tcp_fragment().
+ */
+static int linear_payload_sz(bool first_skb)
+{
+ if (first_skb)
+ return SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
+ return 0;
+}
+
+static int select_size(const struct sock *sk, bool sg, bool first_skb)
{
const struct tcp_sock *tp = tcp_sk(sk);
int tmp = tp->mss_cache;
if (sg) {
if (sk_can_gso(sk)) {
- /* Small frames wont use a full page:
- * Payload will immediately follow tcp header.
- */
- tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
+ tmp = linear_payload_sz(first_skb);
} else {
int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
@@ -1101,6 +1121,8 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
+ tcp_rate_check_app_limited(sk); /* is sending application-limited? */
+
/* Wait for a connection to finish. One exception is TCP Fast Open
* (passive side) where data is allowed to be sent before a connection
* is fully established.
@@ -1161,6 +1183,8 @@ restart:
}
if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
+ bool first_skb;
+
new_segment:
/* Allocate new segment. If the interface is SG,
* allocate skb fitting to single page.
@@ -1172,10 +1196,11 @@ new_segment:
process_backlog = false;
goto restart;
}
+ first_skb = skb_queue_empty(&sk->sk_write_queue);
skb = sk_stream_alloc_skb(sk,
- select_size(sk, sg),
+ select_size(sk, sg, first_skb),
sk->sk_allocation,
- skb_queue_empty(&sk->sk_write_queue));
+ first_skb);
if (!skb)
goto wait_for_memory;
@@ -2243,7 +2268,7 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
tcp_write_queue_purge(sk);
- __skb_queue_purge(&tp->out_of_order_queue);
+ skb_rbtree_purge(&tp->out_of_order_queue);
inet->inet_dport = 0;
@@ -2687,7 +2712,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
{
const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
const struct inet_connection_sock *icsk = inet_csk(sk);
- u32 now = tcp_time_stamp;
+ u32 now = tcp_time_stamp, intv;
unsigned int start;
int notsent_bytes;
u64 rate64;
@@ -2777,6 +2802,15 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_min_rtt = tcp_min_rtt(tp);
info->tcpi_data_segs_in = tp->data_segs_in;
info->tcpi_data_segs_out = tp->data_segs_out;
+
+ info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0;
+ rate = READ_ONCE(tp->rate_delivered);
+ intv = READ_ONCE(tp->rate_interval_us);
+ if (rate && intv) {
+ rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC;
+ do_div(rate64, intv);
+ put_unaligned(rate64, &info->tcpi_delivery_rate);
+ }
}
EXPORT_SYMBOL_GPL(tcp_get_info);
@@ -3244,11 +3278,12 @@ static void __init tcp_init_mem(void)
void __init tcp_init(void)
{
- unsigned long limit;
int max_rshare, max_wshare, cnt;
+ unsigned long limit;
unsigned int i;
- sock_skb_cb_check_size(sizeof(struct tcp_skb_cb));
+ BUILD_BUG_ON(sizeof(struct tcp_skb_cb) >
+ FIELD_SIZEOF(struct sk_buff, cb));
percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
new file mode 100644
index 000000000000..0ea66c2c9344
--- /dev/null
+++ b/net/ipv4/tcp_bbr.c
@@ -0,0 +1,896 @@
+/* Bottleneck Bandwidth and RTT (BBR) congestion control
+ *
+ * BBR congestion control computes the sending rate based on the delivery
+ * rate (throughput) estimated from ACKs. In a nutshell:
+ *
+ * On each ACK, update our model of the network path:
+ * bottleneck_bandwidth = windowed_max(delivered / elapsed, 10 round trips)
+ * min_rtt = windowed_min(rtt, 10 seconds)
+ * pacing_rate = pacing_gain * bottleneck_bandwidth
+ * cwnd = max(cwnd_gain * bottleneck_bandwidth * min_rtt, 4)
+ *
+ * The core algorithm does not react directly to packet losses or delays,
+ * although BBR may adjust the size of next send per ACK when loss is
+ * observed, or adjust the sending rate if it estimates there is a
+ * traffic policer, in order to keep the drop rate reasonable.
+ *
+ * BBR is described in detail in:
+ * "BBR: Congestion-Based Congestion Control",
+ * Neal Cardwell, Yuchung Cheng, C. Stephen Gunn, Soheil Hassas Yeganeh,
+ * Van Jacobson. ACM Queue, Vol. 14 No. 5, September-October 2016.
+ *
+ * There is a public e-mail list for discussing BBR development and testing:
+ * https://groups.google.com/forum/#!forum/bbr-dev
+ *
+ * NOTE: BBR *must* be used with the fq qdisc ("man tc-fq") with pacing enabled,
+ * since pacing is integral to the BBR design and implementation.
+ * BBR without pacing would not function properly, and may incur unnecessary
+ * high packet loss rates.
+ */
+#include <linux/module.h>
+#include <net/tcp.h>
+#include <linux/inet_diag.h>
+#include <linux/inet.h>
+#include <linux/random.h>
+#include <linux/win_minmax.h>
+
+/* Scale factor for rate in pkt/uSec unit to avoid truncation in bandwidth
+ * estimation. The rate unit ~= (1500 bytes / 1 usec / 2^24) ~= 715 bps.
+ * This handles bandwidths from 0.06pps (715bps) to 256Mpps (3Tbps) in a u32.
+ * Since the minimum window is >=4 packets, the lower bound isn't
+ * an issue. The upper bound isn't an issue with existing technologies.
+ */
+#define BW_SCALE 24
+#define BW_UNIT (1 << BW_SCALE)
+
+#define BBR_SCALE 8 /* scaling factor for fractions in BBR (e.g. gains) */
+#define BBR_UNIT (1 << BBR_SCALE)
+
+/* BBR has the following modes for deciding how fast to send: */
+enum bbr_mode {
+ BBR_STARTUP, /* ramp up sending rate rapidly to fill pipe */
+ BBR_DRAIN, /* drain any queue created during startup */
+ BBR_PROBE_BW, /* discover, share bw: pace around estimated bw */
+ BBR_PROBE_RTT, /* cut cwnd to min to probe min_rtt */
+};
+
+/* BBR congestion control block */
+struct bbr {
+ u32 min_rtt_us; /* min RTT in min_rtt_win_sec window */
+ u32 min_rtt_stamp; /* timestamp of min_rtt_us */
+ u32 probe_rtt_done_stamp; /* end time for BBR_PROBE_RTT mode */
+ struct minmax bw; /* Max recent delivery rate in pkts/uS << 24 */
+ u32 rtt_cnt; /* count of packet-timed rounds elapsed */
+ u32 next_rtt_delivered; /* scb->tx.delivered at end of round */
+ struct skb_mstamp cycle_mstamp; /* time of this cycle phase start */
+ u32 mode:3, /* current bbr_mode in state machine */
+ prev_ca_state:3, /* CA state on previous ACK */
+ packet_conservation:1, /* use packet conservation? */
+ restore_cwnd:1, /* decided to revert cwnd to old value */
+ round_start:1, /* start of packet-timed tx->ack round? */
+ tso_segs_goal:7, /* segments we want in each skb we send */
+ idle_restart:1, /* restarting after idle? */
+ probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */
+ unused:5,
+ lt_is_sampling:1, /* taking long-term ("LT") samples now? */
+ lt_rtt_cnt:7, /* round trips in long-term interval */
+ lt_use_bw:1; /* use lt_bw as our bw estimate? */
+ u32 lt_bw; /* LT est delivery rate in pkts/uS << 24 */
+ u32 lt_last_delivered; /* LT intvl start: tp->delivered */
+ u32 lt_last_stamp; /* LT intvl start: tp->delivered_mstamp */
+ u32 lt_last_lost; /* LT intvl start: tp->lost */
+ u32 pacing_gain:10, /* current gain for setting pacing rate */
+ cwnd_gain:10, /* current gain for setting cwnd */
+ full_bw_cnt:3, /* number of rounds without large bw gains */
+ cycle_idx:3, /* current index in pacing_gain cycle array */
+ unused_b:6;
+ u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
+ u32 full_bw; /* recent bw, to estimate if pipe is full */
+};
+
+#define CYCLE_LEN 8 /* number of phases in a pacing gain cycle */
+
+/* Window length of bw filter (in rounds): */
+static const int bbr_bw_rtts = CYCLE_LEN + 2;
+/* Window length of min_rtt filter (in sec): */
+static const u32 bbr_min_rtt_win_sec = 10;
+/* Minimum time (in ms) spent at bbr_cwnd_min_target in BBR_PROBE_RTT mode: */
+static const u32 bbr_probe_rtt_mode_ms = 200;
+/* Skip TSO below the following bandwidth (bits/sec): */
+static const int bbr_min_tso_rate = 1200000;
+
+/* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain
+ * that will allow a smoothly increasing pacing rate that will double each RTT
+ * and send the same number of packets per RTT that an un-paced, slow-starting
+ * Reno or CUBIC flow would:
+ */
+static const int bbr_high_gain = BBR_UNIT * 2885 / 1000 + 1;
+/* The pacing gain of 1/high_gain in BBR_DRAIN is calculated to typically drain
+ * the queue created in BBR_STARTUP in a single round:
+ */
+static const int bbr_drain_gain = BBR_UNIT * 1000 / 2885;
+/* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs: */
+static const int bbr_cwnd_gain = BBR_UNIT * 2;
+/* The pacing_gain values for the PROBE_BW gain cycle, to discover/share bw: */
+static const int bbr_pacing_gain[] = {
+ BBR_UNIT * 5 / 4, /* probe for more available bw */
+ BBR_UNIT * 3 / 4, /* drain queue and/or yield bw to other flows */
+ BBR_UNIT, BBR_UNIT, BBR_UNIT, /* cruise at 1.0*bw to utilize pipe, */
+ BBR_UNIT, BBR_UNIT, BBR_UNIT /* without creating excess queue... */
+};
+/* Randomize the starting gain cycling phase over N phases: */
+static const u32 bbr_cycle_rand = 7;
+
+/* Try to keep at least this many packets in flight, if things go smoothly. For
+ * smooth functioning, a sliding window protocol ACKing every other packet
+ * needs at least 4 packets in flight:
+ */
+static const u32 bbr_cwnd_min_target = 4;
+
+/* To estimate if BBR_STARTUP mode (i.e. high_gain) has filled pipe... */
+/* If bw has increased significantly (1.25x), there may be more bw available: */
+static const u32 bbr_full_bw_thresh = BBR_UNIT * 5 / 4;
+/* But after 3 rounds w/o significant bw growth, estimate pipe is full: */
+static const u32 bbr_full_bw_cnt = 3;
+
+/* "long-term" ("LT") bandwidth estimator parameters... */
+/* The minimum number of rounds in an LT bw sampling interval: */
+static const u32 bbr_lt_intvl_min_rtts = 4;
+/* If lost/delivered ratio > 20%, interval is "lossy" and we may be policed: */
+static const u32 bbr_lt_loss_thresh = 50;
+/* If 2 intervals have a bw ratio <= 1/8, their bw is "consistent": */
+static const u32 bbr_lt_bw_ratio = BBR_UNIT / 8;
+/* If 2 intervals have a bw diff <= 4 Kbit/sec their bw is "consistent": */
+static const u32 bbr_lt_bw_diff = 4000 / 8;
+/* If we estimate we're policed, use lt_bw for this many round trips: */
+static const u32 bbr_lt_bw_max_rtts = 48;
+
+/* Do we estimate that STARTUP filled the pipe? */
+static bool bbr_full_bw_reached(const struct sock *sk)
+{
+ const struct bbr *bbr = inet_csk_ca(sk);
+
+ return bbr->full_bw_cnt >= bbr_full_bw_cnt;
+}
+
+/* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
+static u32 bbr_max_bw(const struct sock *sk)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ return minmax_get(&bbr->bw);
+}
+
+/* Return the estimated bandwidth of the path, in pkts/uS << BW_SCALE. */
+static u32 bbr_bw(const struct sock *sk)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk);
+}
+
+/* Return rate in bytes per second, optionally with a gain.
+ * The order here is chosen carefully to avoid overflow of u64. This should
+ * work for input rates of up to 2.9Tbit/sec and gain of 2.89x.
+ */
+static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
+{
+ rate *= tcp_mss_to_mtu(sk, tcp_sk(sk)->mss_cache);
+ rate *= gain;
+ rate >>= BBR_SCALE;
+ rate *= USEC_PER_SEC;
+ return rate >> BW_SCALE;
+}
+
+/* Pace using current bw estimate and a gain factor. In order to help drive the
+ * network toward lower queues while maintaining high utilization and low
+ * latency, the average pacing rate aims to be slightly (~1%) lower than the
+ * estimated bandwidth. This is an important aspect of the design. In this
+ * implementation this slightly lower pacing rate is achieved implicitly by not
+ * including link-layer headers in the packet size used for the pacing rate.
+ */
+static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+ u64 rate = bw;
+
+ rate = bbr_rate_bytes_per_sec(sk, rate, gain);
+ rate = min_t(u64, rate, sk->sk_max_pacing_rate);
+ if (bbr->mode != BBR_STARTUP || rate > sk->sk_pacing_rate)
+ sk->sk_pacing_rate = rate;
+}
+
+/* Return count of segments we want in the skbs we send, or 0 for default. */
+static u32 bbr_tso_segs_goal(struct sock *sk)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ return bbr->tso_segs_goal;
+}
+
+static void bbr_set_tso_segs_goal(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+ u32 min_segs;
+
+ min_segs = sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
+ bbr->tso_segs_goal = min(tcp_tso_autosize(sk, tp->mss_cache, min_segs),
+ 0x7FU);
+}
+
+/* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */
+static void bbr_save_cwnd(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT)
+ bbr->prior_cwnd = tp->snd_cwnd; /* this cwnd is good enough */
+ else /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */
+ bbr->prior_cwnd = max(bbr->prior_cwnd, tp->snd_cwnd);
+}
+
+static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ if (event == CA_EVENT_TX_START && tp->app_limited) {
+ bbr->idle_restart = 1;
+ /* Avoid pointless buffer overflows: pace at est. bw if we don't
+ * need more speed (we're restarting from idle and app-limited).
+ */
+ if (bbr->mode == BBR_PROBE_BW)
+ bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
+ }
+}
+
+/* Find target cwnd. Right-size the cwnd based on min RTT and the
+ * estimated bottleneck bandwidth:
+ *
+ * cwnd = bw * min_rtt * gain = BDP * gain
+ *
+ * The key factor, gain, controls the amount of queue. While a small gain
+ * builds a smaller queue, it becomes more vulnerable to noise in RTT
+ * measurements (e.g., delayed ACKs or other ACK compression effects). This
+ * noise may cause BBR to under-estimate the rate.
+ *
+ * To achieve full performance in high-speed paths, we budget enough cwnd to
+ * fit full-sized skbs in-flight on both end hosts to fully utilize the path:
+ * - one skb in sending host Qdisc,
+ * - one skb in sending host TSO/GSO engine
+ * - one skb being received by receiver host LRO/GRO/delayed-ACK engine
+ * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because
+ * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets,
+ * which allows 2 outstanding 2-packet sequences, to try to keep pipe
+ * full even with ACK-every-other-packet delayed ACKs.
+ */
+static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+ u32 cwnd;
+ u64 w;
+
+ /* If we've never had a valid RTT sample, cap cwnd at the initial
+ * default. This should only happen when the connection is not using TCP
+ * timestamps and has retransmitted all of the SYN/SYNACK/data packets
+ * ACKed so far. In this case, an RTO can cut cwnd to 1, in which
+ * case we need to slow-start up toward something safe: TCP_INIT_CWND.
+ */
+ if (unlikely(bbr->min_rtt_us == ~0U)) /* no valid RTT samples yet? */
+ return TCP_INIT_CWND; /* be safe: cap at default initial cwnd*/
+
+ w = (u64)bw * bbr->min_rtt_us;
+
+ /* Apply a gain to the given value, then remove the BW_SCALE shift. */
+ cwnd = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT;
+
+ /* Allow enough full-sized skbs in flight to utilize end systems. */
+ cwnd += 3 * bbr->tso_segs_goal;
+
+ /* Reduce delayed ACKs by rounding up cwnd to the next even number. */
+ cwnd = (cwnd + 1) & ~1U;
+
+ return cwnd;
+}
+
+/* An optimization in BBR to reduce losses: On the first round of recovery, we
+ * follow the packet conservation principle: send P packets per P packets acked.
+ * After that, we slow-start and send at most 2*P packets per P packets acked.
+ * After recovery finishes, or upon undo, we restore the cwnd we had when
+ * recovery started (capped by the target cwnd based on estimated BDP).
+ *
+ * TODO(ycheng/ncardwell): implement a rate-based approach.
+ */
+static bool bbr_set_cwnd_to_recover_or_restore(
+ struct sock *sk, const struct rate_sample *rs, u32 acked, u32 *new_cwnd)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+ u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state;
+ u32 cwnd = tp->snd_cwnd;
+
+ /* An ACK for P pkts should release at most 2*P packets. We do this
+ * in two steps. First, here we deduct the number of lost packets.
+ * Then, in bbr_set_cwnd() we slow start up toward the target cwnd.
+ */
+ if (rs->losses > 0)
+ cwnd = max_t(s32, cwnd - rs->losses, 1);
+
+ if (state == TCP_CA_Recovery && prev_state != TCP_CA_Recovery) {
+ /* Starting 1st round of Recovery, so do packet conservation. */
+ bbr->packet_conservation = 1;
+ bbr->next_rtt_delivered = tp->delivered; /* start round now */
+ /* Cut unused cwnd from app behavior, TSQ, or TSO deferral: */
+ cwnd = tcp_packets_in_flight(tp) + acked;
+ } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) {
+ /* Exiting loss recovery; restore cwnd saved before recovery. */
+ bbr->restore_cwnd = 1;
+ bbr->packet_conservation = 0;
+ }
+ bbr->prev_ca_state = state;
+
+ if (bbr->restore_cwnd) {
+ /* Restore cwnd after exiting loss recovery or PROBE_RTT. */
+ cwnd = max(cwnd, bbr->prior_cwnd);
+ bbr->restore_cwnd = 0;
+ }
+
+ if (bbr->packet_conservation) {
+ *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked);
+ return true; /* yes, using packet conservation */
+ }
+ *new_cwnd = cwnd;
+ return false;
+}
+
+/* Slow-start up toward target cwnd (if bw estimate is growing, or packet loss
+ * has drawn us down below target), or snap down to target if we're above it.
+ */
+static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
+ u32 acked, u32 bw, int gain)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+ u32 cwnd = 0, target_cwnd = 0;
+
+ if (!acked)
+ return;
+
+ if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
+ goto done;
+
+ /* If we're below target cwnd, slow start cwnd toward target cwnd. */
+ target_cwnd = bbr_target_cwnd(sk, bw, gain);
+ if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */
+ cwnd = min(cwnd + acked, target_cwnd);
+ else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND)
+ cwnd = cwnd + acked;
+ cwnd = max(cwnd, bbr_cwnd_min_target);
+
+done:
+ tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); /* apply global cap */
+ if (bbr->mode == BBR_PROBE_RTT) /* drain queue, refresh min_rtt */
+ tp->snd_cwnd = min(tp->snd_cwnd, bbr_cwnd_min_target);
+}
+
+/* End cycle phase if it's time and/or we hit the phase's in-flight target. */
+static bool bbr_is_next_cycle_phase(struct sock *sk,
+ const struct rate_sample *rs)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+ bool is_full_length =
+ skb_mstamp_us_delta(&tp->delivered_mstamp, &bbr->cycle_mstamp) >
+ bbr->min_rtt_us;
+ u32 inflight, bw;
+
+ /* The pacing_gain of 1.0 paces at the estimated bw to try to fully
+ * use the pipe without increasing the queue.
+ */
+ if (bbr->pacing_gain == BBR_UNIT)
+ return is_full_length; /* just use wall clock time */
+
+ inflight = rs->prior_in_flight; /* what was in-flight before ACK? */
+ bw = bbr_max_bw(sk);
+
+ /* A pacing_gain > 1.0 probes for bw by trying to raise inflight to at
+ * least pacing_gain*BDP; this may take more than min_rtt if min_rtt is
+ * small (e.g. on a LAN). We do not persist if packets are lost, since
+ * a path with small buffers may not hold that much.
+ */
+ if (bbr->pacing_gain > BBR_UNIT)
+ return is_full_length &&
+ (rs->losses || /* perhaps pacing_gain*BDP won't fit */
+ inflight >= bbr_target_cwnd(sk, bw, bbr->pacing_gain));
+
+ /* A pacing_gain < 1.0 tries to drain extra queue we added if bw
+ * probing didn't find more bw. If inflight falls to match BDP then we
+ * estimate queue is drained; persisting would underutilize the pipe.
+ */
+ return is_full_length ||
+ inflight <= bbr_target_cwnd(sk, bw, BBR_UNIT);
+}
+
+static void bbr_advance_cycle_phase(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ bbr->cycle_idx = (bbr->cycle_idx + 1) & (CYCLE_LEN - 1);
+ bbr->cycle_mstamp = tp->delivered_mstamp;
+ bbr->pacing_gain = bbr_pacing_gain[bbr->cycle_idx];
+}
+
+/* Gain cycling: cycle pacing gain to converge to fair share of available bw. */
+static void bbr_update_cycle_phase(struct sock *sk,
+ const struct rate_sample *rs)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ if ((bbr->mode == BBR_PROBE_BW) && !bbr->lt_use_bw &&
+ bbr_is_next_cycle_phase(sk, rs))
+ bbr_advance_cycle_phase(sk);
+}
+
+static void bbr_reset_startup_mode(struct sock *sk)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ bbr->mode = BBR_STARTUP;
+ bbr->pacing_gain = bbr_high_gain;
+ bbr->cwnd_gain = bbr_high_gain;
+}
+
+static void bbr_reset_probe_bw_mode(struct sock *sk)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ bbr->mode = BBR_PROBE_BW;
+ bbr->pacing_gain = BBR_UNIT;
+ bbr->cwnd_gain = bbr_cwnd_gain;
+ bbr->cycle_idx = CYCLE_LEN - 1 - prandom_u32_max(bbr_cycle_rand);
+ bbr_advance_cycle_phase(sk); /* flip to next phase of gain cycle */
+}
+
+static void bbr_reset_mode(struct sock *sk)
+{
+ if (!bbr_full_bw_reached(sk))
+ bbr_reset_startup_mode(sk);
+ else
+ bbr_reset_probe_bw_mode(sk);
+}
+
+/* Start a new long-term sampling interval. */
+static void bbr_reset_lt_bw_sampling_interval(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ bbr->lt_last_stamp = tp->delivered_mstamp.stamp_jiffies;
+ bbr->lt_last_delivered = tp->delivered;
+ bbr->lt_last_lost = tp->lost;
+ bbr->lt_rtt_cnt = 0;
+}
+
+/* Completely reset long-term bandwidth sampling. */
+static void bbr_reset_lt_bw_sampling(struct sock *sk)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ bbr->lt_bw = 0;
+ bbr->lt_use_bw = 0;
+ bbr->lt_is_sampling = false;
+ bbr_reset_lt_bw_sampling_interval(sk);
+}
+
+/* Long-term bw sampling interval is done. Estimate whether we're policed. */
+static void bbr_lt_bw_interval_done(struct sock *sk, u32 bw)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+ u32 diff;
+
+ if (bbr->lt_bw) { /* do we have bw from a previous interval? */
+ /* Is new bw close to the lt_bw from the previous interval? */
+ diff = abs(bw - bbr->lt_bw);
+ if ((diff * BBR_UNIT <= bbr_lt_bw_ratio * bbr->lt_bw) ||
+ (bbr_rate_bytes_per_sec(sk, diff, BBR_UNIT) <=
+ bbr_lt_bw_diff)) {
+ /* All criteria are met; estimate we're policed. */
+ bbr->lt_bw = (bw + bbr->lt_bw) >> 1; /* avg 2 intvls */
+ bbr->lt_use_bw = 1;
+ bbr->pacing_gain = BBR_UNIT; /* try to avoid drops */
+ bbr->lt_rtt_cnt = 0;
+ return;
+ }
+ }
+ bbr->lt_bw = bw;
+ bbr_reset_lt_bw_sampling_interval(sk);
+}
+
+/* Token-bucket traffic policers are common (see "An Internet-Wide Analysis of
+ * Traffic Policing", SIGCOMM 2016). BBR detects token-bucket policers and
+ * explicitly models their policed rate, to reduce unnecessary losses. We
+ * estimate that we're policed if we see 2 consecutive sampling intervals with
+ * consistent throughput and high packet loss. If we think we're being policed,
+ * set lt_bw to the "long-term" average delivery rate from those 2 intervals.
+ */
+static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+ u32 lost, delivered;
+ u64 bw;
+ s32 t;
+
+ if (bbr->lt_use_bw) { /* already using long-term rate, lt_bw? */
+ if (bbr->mode == BBR_PROBE_BW && bbr->round_start &&
+ ++bbr->lt_rtt_cnt >= bbr_lt_bw_max_rtts) {
+ bbr_reset_lt_bw_sampling(sk); /* stop using lt_bw */
+ bbr_reset_probe_bw_mode(sk); /* restart gain cycling */
+ }
+ return;
+ }
+
+ /* Wait for the first loss before sampling, to let the policer exhaust
+ * its tokens and estimate the steady-state rate allowed by the policer.
+ * Starting samples earlier includes bursts that over-estimate the bw.
+ */
+ if (!bbr->lt_is_sampling) {
+ if (!rs->losses)
+ return;
+ bbr_reset_lt_bw_sampling_interval(sk);
+ bbr->lt_is_sampling = true;
+ }
+
+ /* To avoid underestimates, reset sampling if we run out of data. */
+ if (rs->is_app_limited) {
+ bbr_reset_lt_bw_sampling(sk);
+ return;
+ }
+
+ if (bbr->round_start)
+ bbr->lt_rtt_cnt++; /* count round trips in this interval */
+ if (bbr->lt_rtt_cnt < bbr_lt_intvl_min_rtts)
+ return; /* sampling interval needs to be longer */
+ if (bbr->lt_rtt_cnt > 4 * bbr_lt_intvl_min_rtts) {
+ bbr_reset_lt_bw_sampling(sk); /* interval is too long */
+ return;
+ }
+
+ /* End sampling interval when a packet is lost, so we estimate the
+ * policer tokens were exhausted. Stopping the sampling before the
+ * tokens are exhausted under-estimates the policed rate.
+ */
+ if (!rs->losses)
+ return;
+
+ /* Calculate packets lost and delivered in sampling interval. */
+ lost = tp->lost - bbr->lt_last_lost;
+ delivered = tp->delivered - bbr->lt_last_delivered;
+ /* Is loss rate (lost/delivered) >= lt_loss_thresh? If not, wait. */
+ if (!delivered || (lost << BBR_SCALE) < bbr_lt_loss_thresh * delivered)
+ return;
+
+ /* Find average delivery rate in this sampling interval. */
+ t = (s32)(tp->delivered_mstamp.stamp_jiffies - bbr->lt_last_stamp);
+ if (t < 1)
+ return; /* interval is less than one jiffy, so wait */
+ t = jiffies_to_usecs(t);
+ /* Interval long enough for jiffies_to_usecs() to return a bogus 0? */
+ if (t < 1) {
+ bbr_reset_lt_bw_sampling(sk); /* interval too long; reset */
+ return;
+ }
+ bw = (u64)delivered * BW_UNIT;
+ do_div(bw, t);
+ bbr_lt_bw_interval_done(sk, bw);
+}
+
+/* Estimate the bandwidth based on how fast packets are delivered */
+static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+ u64 bw;
+
+ bbr->round_start = 0;
+ if (rs->delivered < 0 || rs->interval_us <= 0)
+ return; /* Not a valid observation */
+
+ /* See if we've reached the next RTT */
+ if (!before(rs->prior_delivered, bbr->next_rtt_delivered)) {
+ bbr->next_rtt_delivered = tp->delivered;
+ bbr->rtt_cnt++;
+ bbr->round_start = 1;
+ bbr->packet_conservation = 0;
+ }
+
+ bbr_lt_bw_sampling(sk, rs);
+
+ /* Divide delivered by the interval to find a (lower bound) bottleneck
+ * bandwidth sample. Delivered is in packets and interval_us in uS and
+ * ratio will be <<1 for most connections. So delivered is first scaled.
+ */
+ bw = (u64)rs->delivered * BW_UNIT;
+ do_div(bw, rs->interval_us);
+
+ /* If this sample is application-limited, it is likely to have a very
+ * low delivered count that represents application behavior rather than
+ * the available network rate. Such a sample could drag down estimated
+ * bw, causing needless slow-down. Thus, to continue to send at the
+ * last measured network rate, we filter out app-limited samples unless
+ * they describe the path bw at least as well as our bw model.
+ *
+ * So the goal during app-limited phase is to proceed with the best
+ * network rate no matter how long. We automatically leave this
+ * phase when app writes faster than the network can deliver :)
+ */
+ if (!rs->is_app_limited || bw >= bbr_max_bw(sk)) {
+ /* Incorporate new sample into our max bw filter. */
+ minmax_running_max(&bbr->bw, bbr_bw_rtts, bbr->rtt_cnt, bw);
+ }
+}
+
+/* Estimate when the pipe is full, using the change in delivery rate: BBR
+ * estimates that STARTUP filled the pipe if the estimated bw hasn't changed by
+ * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited
+ * rounds. Why 3 rounds: 1: rwin autotuning grows the rwin, 2: we fill the
+ * higher rwin, 3: we get higher delivery rate samples. Or transient
+ * cross-traffic or radio noise can go away. CUBIC Hystart shares a similar
+ * design goal, but uses delay and inter-ACK spacing instead of bandwidth.
+ */
+static void bbr_check_full_bw_reached(struct sock *sk,
+ const struct rate_sample *rs)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+ u32 bw_thresh;
+
+ if (bbr_full_bw_reached(sk) || !bbr->round_start || rs->is_app_limited)
+ return;
+
+ bw_thresh = (u64)bbr->full_bw * bbr_full_bw_thresh >> BBR_SCALE;
+ if (bbr_max_bw(sk) >= bw_thresh) {
+ bbr->full_bw = bbr_max_bw(sk);
+ bbr->full_bw_cnt = 0;
+ return;
+ }
+ ++bbr->full_bw_cnt;
+}
+
+/* If pipe is probably full, drain the queue and then enter steady-state. */
+static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) {
+ bbr->mode = BBR_DRAIN; /* drain queue we created */
+ bbr->pacing_gain = bbr_drain_gain; /* pace slow to drain */
+ bbr->cwnd_gain = bbr_high_gain; /* maintain cwnd */
+ } /* fall through to check if in-flight is already small: */
+ if (bbr->mode == BBR_DRAIN &&
+ tcp_packets_in_flight(tcp_sk(sk)) <=
+ bbr_target_cwnd(sk, bbr_max_bw(sk), BBR_UNIT))
+ bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */
+}
+
+/* The goal of PROBE_RTT mode is to have BBR flows cooperatively and
+ * periodically drain the bottleneck queue, to converge to measure the true
+ * min_rtt (unloaded propagation delay). This allows the flows to keep queues
+ * small (reducing queuing delay and packet loss) and achieve fairness among
+ * BBR flows.
+ *
+ * The min_rtt filter window is 10 seconds. When the min_rtt estimate expires,
+ * we enter PROBE_RTT mode and cap the cwnd at bbr_cwnd_min_target=4 packets.
+ * After at least bbr_probe_rtt_mode_ms=200ms and at least one packet-timed
+ * round trip elapsed with that flight size <= 4, we leave PROBE_RTT mode and
+ * re-enter the previous mode. BBR uses 200ms to approximately bound the
+ * performance penalty of PROBE_RTT's cwnd capping to roughly 2% (200ms/10s).
+ *
+ * Note that flows need only pay 2% if they are busy sending over the last 10
+ * seconds. Interactive applications (e.g., Web, RPCs, video chunks) often have
+ * natural silences or low-rate periods within 10 seconds where the rate is low
+ * enough for long enough to drain its queue in the bottleneck. We pick up
+ * these min RTT measurements opportunistically with our min_rtt filter. :-)
+ */
+static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+ bool filter_expired;
+
+ /* Track min RTT seen in the min_rtt_win_sec filter window: */
+ filter_expired = after(tcp_time_stamp,
+ bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ);
+ if (rs->rtt_us >= 0 &&
+ (rs->rtt_us <= bbr->min_rtt_us || filter_expired)) {
+ bbr->min_rtt_us = rs->rtt_us;
+ bbr->min_rtt_stamp = tcp_time_stamp;
+ }
+
+ if (bbr_probe_rtt_mode_ms > 0 && filter_expired &&
+ !bbr->idle_restart && bbr->mode != BBR_PROBE_RTT) {
+ bbr->mode = BBR_PROBE_RTT; /* dip, drain queue */
+ bbr->pacing_gain = BBR_UNIT;
+ bbr->cwnd_gain = BBR_UNIT;
+ bbr_save_cwnd(sk); /* note cwnd so we can restore it */
+ bbr->probe_rtt_done_stamp = 0;
+ }
+
+ if (bbr->mode == BBR_PROBE_RTT) {
+ /* Ignore low rate samples during this mode. */
+ tp->app_limited =
+ (tp->delivered + tcp_packets_in_flight(tp)) ? : 1;
+ /* Maintain min packets in flight for max(200 ms, 1 round). */
+ if (!bbr->probe_rtt_done_stamp &&
+ tcp_packets_in_flight(tp) <= bbr_cwnd_min_target) {
+ bbr->probe_rtt_done_stamp = tcp_time_stamp +
+ msecs_to_jiffies(bbr_probe_rtt_mode_ms);
+ bbr->probe_rtt_round_done = 0;
+ bbr->next_rtt_delivered = tp->delivered;
+ } else if (bbr->probe_rtt_done_stamp) {
+ if (bbr->round_start)
+ bbr->probe_rtt_round_done = 1;
+ if (bbr->probe_rtt_round_done &&
+ after(tcp_time_stamp, bbr->probe_rtt_done_stamp)) {
+ bbr->min_rtt_stamp = tcp_time_stamp;
+ bbr->restore_cwnd = 1; /* snap to prior_cwnd */
+ bbr_reset_mode(sk);
+ }
+ }
+ }
+ bbr->idle_restart = 0;
+}
+
+static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
+{
+ bbr_update_bw(sk, rs);
+ bbr_update_cycle_phase(sk, rs);
+ bbr_check_full_bw_reached(sk, rs);
+ bbr_check_drain(sk, rs);
+ bbr_update_min_rtt(sk, rs);
+}
+
+static void bbr_main(struct sock *sk, const struct rate_sample *rs)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+ u32 bw;
+
+ bbr_update_model(sk, rs);
+
+ bw = bbr_bw(sk);
+ bbr_set_pacing_rate(sk, bw, bbr->pacing_gain);
+ bbr_set_tso_segs_goal(sk);
+ bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain);
+}
+
+static void bbr_init(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+ u64 bw;
+
+ bbr->prior_cwnd = 0;
+ bbr->tso_segs_goal = 0; /* default segs per skb until first ACK */
+ bbr->rtt_cnt = 0;
+ bbr->next_rtt_delivered = 0;
+ bbr->prev_ca_state = TCP_CA_Open;
+ bbr->packet_conservation = 0;
+
+ bbr->probe_rtt_done_stamp = 0;
+ bbr->probe_rtt_round_done = 0;
+ bbr->min_rtt_us = tcp_min_rtt(tp);
+ bbr->min_rtt_stamp = tcp_time_stamp;
+
+ minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */
+
+ /* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
+ bw = (u64)tp->snd_cwnd * BW_UNIT;
+ do_div(bw, (tp->srtt_us >> 3) ? : USEC_PER_MSEC);
+ sk->sk_pacing_rate = 0; /* force an update of sk_pacing_rate */
+ bbr_set_pacing_rate(sk, bw, bbr_high_gain);
+
+ bbr->restore_cwnd = 0;
+ bbr->round_start = 0;
+ bbr->idle_restart = 0;
+ bbr->full_bw = 0;
+ bbr->full_bw_cnt = 0;
+ bbr->cycle_mstamp.v64 = 0;
+ bbr->cycle_idx = 0;
+ bbr_reset_lt_bw_sampling(sk);
+ bbr_reset_startup_mode(sk);
+}
+
+static u32 bbr_sndbuf_expand(struct sock *sk)
+{
+ /* Provision 3 * cwnd since BBR may slow-start even during recovery. */
+ return 3;
+}
+
+/* In theory BBR does not need to undo the cwnd since it does not
+ * always reduce cwnd on losses (see bbr_main()). Keep it for now.
+ */
+static u32 bbr_undo_cwnd(struct sock *sk)
+{
+ return tcp_sk(sk)->snd_cwnd;
+}
+
+/* Entering loss recovery, so save cwnd for when we exit or undo recovery. */
+static u32 bbr_ssthresh(struct sock *sk)
+{
+ bbr_save_cwnd(sk);
+ return TCP_INFINITE_SSTHRESH; /* BBR does not use ssthresh */
+}
+
+static size_t bbr_get_info(struct sock *sk, u32 ext, int *attr,
+ union tcp_cc_info *info)
+{
+ if (ext & (1 << (INET_DIAG_BBRINFO - 1)) ||
+ ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct bbr *bbr = inet_csk_ca(sk);
+ u64 bw = bbr_bw(sk);
+
+ bw = bw * tp->mss_cache * USEC_PER_SEC >> BW_SCALE;
+ memset(&info->bbr, 0, sizeof(info->bbr));
+ info->bbr.bbr_bw_lo = (u32)bw;
+ info->bbr.bbr_bw_hi = (u32)(bw >> 32);
+ info->bbr.bbr_min_rtt = bbr->min_rtt_us;
+ info->bbr.bbr_pacing_gain = bbr->pacing_gain;
+ info->bbr.bbr_cwnd_gain = bbr->cwnd_gain;
+ *attr = INET_DIAG_BBRINFO;
+ return sizeof(info->bbr);
+ }
+ return 0;
+}
+
+static void bbr_set_state(struct sock *sk, u8 new_state)
+{
+ struct bbr *bbr = inet_csk_ca(sk);
+
+ if (new_state == TCP_CA_Loss) {
+ struct rate_sample rs = { .losses = 1 };
+
+ bbr->prev_ca_state = TCP_CA_Loss;
+ bbr->full_bw = 0;
+ bbr->round_start = 1; /* treat RTO like end of a round */
+ bbr_lt_bw_sampling(sk, &rs);
+ }
+}
+
+static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
+ .flags = TCP_CONG_NON_RESTRICTED,
+ .name = "bbr",
+ .owner = THIS_MODULE,
+ .init = bbr_init,
+ .cong_control = bbr_main,
+ .sndbuf_expand = bbr_sndbuf_expand,
+ .undo_cwnd = bbr_undo_cwnd,
+ .cwnd_event = bbr_cwnd_event,
+ .ssthresh = bbr_ssthresh,
+ .tso_segs_goal = bbr_tso_segs_goal,
+ .get_info = bbr_get_info,
+ .set_state = bbr_set_state,
+};
+
+static int __init bbr_register(void)
+{
+ BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE);
+ return tcp_register_congestion_control(&tcp_bbr_cong_ops);
+}
+
+static void __exit bbr_unregister(void)
+{
+ tcp_unregister_congestion_control(&tcp_bbr_cong_ops);
+}
+
+module_init(bbr_register);
+module_exit(bbr_unregister);
+
+MODULE_AUTHOR("Van Jacobson <vanj@google.com>");
+MODULE_AUTHOR("Neal Cardwell <ncardwell@google.com>");
+MODULE_AUTHOR("Yuchung Cheng <ycheng@google.com>");
+MODULE_AUTHOR("Soheil Hassas Yeganeh <soheil@google.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("TCP BBR (Bottleneck Bandwidth and RTT)");
diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c
index 03725b294286..35b280361cb2 100644
--- a/net/ipv4/tcp_cdg.c
+++ b/net/ipv4/tcp_cdg.c
@@ -56,7 +56,7 @@ MODULE_PARM_DESC(use_shadow, "use shadow window heuristic");
module_param(use_tolerance, bool, 0644);
MODULE_PARM_DESC(use_tolerance, "use loss tolerance heuristic");
-struct minmax {
+struct cdg_minmax {
union {
struct {
s32 min;
@@ -74,10 +74,10 @@ enum cdg_state {
};
struct cdg {
- struct minmax rtt;
- struct minmax rtt_prev;
- struct minmax *gradients;
- struct minmax gsum;
+ struct cdg_minmax rtt;
+ struct cdg_minmax rtt_prev;
+ struct cdg_minmax *gradients;
+ struct cdg_minmax gsum;
bool gfilled;
u8 tail;
u8 state;
@@ -353,7 +353,7 @@ static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev)
{
struct cdg *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
- struct minmax *gradients;
+ struct cdg_minmax *gradients;
switch (ev) {
case CA_EVENT_CWND_RESTART:
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 882caa4e72bc..1294af4e0127 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -69,7 +69,7 @@ int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
int ret = 0;
/* all algorithms must implement ssthresh and cong_avoid ops */
- if (!ca->ssthresh || !ca->cong_avoid) {
+ if (!ca->ssthresh || !(ca->cong_avoid || ca->cong_control)) {
pr_err("%s does not implement required ops\n", ca->name);
return -EINVAL;
}
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 54d9f9b0120f..4e777a3243f9 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -150,6 +150,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
tp->segs_in = 0;
tcp_segs_in(tp, skb);
__skb_pull(skb, tcp_hdrlen(skb));
+ sk_forced_mem_schedule(sk, skb->truesize);
skb_set_owner_r(skb, sk);
TCP_SKB_CB(skb)->seq++;
@@ -226,6 +227,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
tcp_fastopen_add_skb(child, skb);
tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
+ tp->rcv_wup = tp->rcv_nxt;
/* tcp_conn_request() is sending the SYNACK,
* and queues the child into listener accept queue.
*/
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index f3a9f3c2c8d8..8c6ad2d319d6 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -289,6 +289,7 @@ static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr
static void tcp_sndbuf_expand(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
int sndmem, per_mss;
u32 nr_segs;
@@ -309,7 +310,8 @@ static void tcp_sndbuf_expand(struct sock *sk)
* Cubic needs 1.7 factor, rounded to 2 to include
* extra cushion (application might react slowly to POLLOUT)
*/
- sndmem = 2 * nr_segs * per_mss;
+ sndmem = ca_ops->sndbuf_expand ? ca_ops->sndbuf_expand(sk) : 2;
+ sndmem *= nr_segs * per_mss;
if (sk->sk_sndbuf < sndmem)
sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
@@ -899,12 +901,29 @@ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
}
+/* Sum the number of packets on the wire we have marked as lost.
+ * There are two cases we care about here:
+ * a) Packet hasn't been marked lost (nor retransmitted),
+ * and this is the first loss.
+ * b) Packet has been marked both lost and retransmitted,
+ * and this means we think it was lost again.
+ */
+static void tcp_sum_lost(struct tcp_sock *tp, struct sk_buff *skb)
+{
+ __u8 sacked = TCP_SKB_CB(skb)->sacked;
+
+ if (!(sacked & TCPCB_LOST) ||
+ ((sacked & TCPCB_LOST) && (sacked & TCPCB_SACKED_RETRANS)))
+ tp->lost += tcp_skb_pcount(skb);
+}
+
static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb)
{
if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
tcp_verify_retransmit_hint(tp, skb);
tp->lost_out += tcp_skb_pcount(skb);
+ tcp_sum_lost(tp, skb);
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
}
}
@@ -913,6 +932,7 @@ void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb)
{
tcp_verify_retransmit_hint(tp, skb);
+ tcp_sum_lost(tp, skb);
if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
tp->lost_out += tcp_skb_pcount(skb);
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
@@ -1094,6 +1114,7 @@ struct tcp_sacktag_state {
*/
struct skb_mstamp first_sackt;
struct skb_mstamp last_sackt;
+ struct rate_sample *rate;
int flag;
};
@@ -1261,6 +1282,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
start_seq, end_seq, dup_sack, pcount,
&skb->skb_mstamp);
+ tcp_rate_skb_delivered(sk, skb, state->rate);
if (skb == tp->lost_skb_hint)
tp->lost_cnt_hint += pcount;
@@ -1311,6 +1333,9 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
tcp_advance_highest_sack(sk, skb);
tcp_skb_collapse_tstamp(prev, skb);
+ if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp.v64))
+ TCP_SKB_CB(prev)->tx.delivered_mstamp.v64 = 0;
+
tcp_unlink_write_queue(skb, sk);
sk_wmem_free_skb(sk, skb);
@@ -1540,6 +1565,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
dup_sack,
tcp_skb_pcount(skb),
&skb->skb_mstamp);
+ tcp_rate_skb_delivered(sk, skb, state->rate);
if (!before(TCP_SKB_CB(skb)->seq,
tcp_highest_sack_seq(tp)))
@@ -1622,8 +1648,10 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
num_sacks, prior_snd_una);
- if (found_dup_sack)
+ if (found_dup_sack) {
state->flag |= FLAG_DSACKING_ACK;
+ tp->delivered++; /* A spurious retransmission is delivered */
+ }
/* Eliminate too old ACKs, but take into
* account more or less fresh ones, they can
@@ -1890,6 +1918,7 @@ void tcp_enter_loss(struct sock *sk)
struct sk_buff *skb;
bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
bool is_reneg; /* is receiver reneging on SACKs? */
+ bool mark_lost;
/* Reduce ssthresh if it has not yet been made inside this window. */
if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
@@ -1923,8 +1952,12 @@ void tcp_enter_loss(struct sock *sk)
if (skb == tcp_send_head(sk))
break;
+ mark_lost = (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) ||
+ is_reneg);
+ if (mark_lost)
+ tcp_sum_lost(tp, skb);
TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED;
- if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || is_reneg) {
+ if (mark_lost) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
tp->lost_out += tcp_skb_pcount(skb);
@@ -2503,6 +2536,9 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
+ if (inet_csk(sk)->icsk_ca_ops->cong_control)
+ return;
+
/* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
(tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
@@ -2879,67 +2915,13 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
*rexmit = REXMIT_LOST;
}
-/* Kathleen Nichols' algorithm for tracking the minimum value of
- * a data stream over some fixed time interval. (E.g., the minimum
- * RTT over the past five minutes.) It uses constant space and constant
- * time per update yet almost always delivers the same minimum as an
- * implementation that has to keep all the data in the window.
- *
- * The algorithm keeps track of the best, 2nd best & 3rd best min
- * values, maintaining an invariant that the measurement time of the
- * n'th best >= n-1'th best. It also makes sure that the three values
- * are widely separated in the time window since that bounds the worse
- * case error when that data is monotonically increasing over the window.
- *
- * Upon getting a new min, we can forget everything earlier because it
- * has no value - the new min is <= everything else in the window by
- * definition and it's the most recent. So we restart fresh on every new min
- * and overwrites 2nd & 3rd choices. The same property holds for 2nd & 3rd
- * best.
- */
static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us)
{
- const u32 now = tcp_time_stamp, wlen = sysctl_tcp_min_rtt_wlen * HZ;
- struct rtt_meas *m = tcp_sk(sk)->rtt_min;
- struct rtt_meas rttm = {
- .rtt = likely(rtt_us) ? rtt_us : jiffies_to_usecs(1),
- .ts = now,
- };
- u32 elapsed;
-
- /* Check if the new measurement updates the 1st, 2nd, or 3rd choices */
- if (unlikely(rttm.rtt <= m[0].rtt))
- m[0] = m[1] = m[2] = rttm;
- else if (rttm.rtt <= m[1].rtt)
- m[1] = m[2] = rttm;
- else if (rttm.rtt <= m[2].rtt)
- m[2] = rttm;
-
- elapsed = now - m[0].ts;
- if (unlikely(elapsed > wlen)) {
- /* Passed entire window without a new min so make 2nd choice
- * the new min & 3rd choice the new 2nd. So forth and so on.
- */
- m[0] = m[1];
- m[1] = m[2];
- m[2] = rttm;
- if (now - m[0].ts > wlen) {
- m[0] = m[1];
- m[1] = rttm;
- if (now - m[0].ts > wlen)
- m[0] = rttm;
- }
- } else if (m[1].ts == m[0].ts && elapsed > wlen / 4) {
- /* Passed a quarter of the window without a new min so
- * take 2nd choice from the 2nd quarter of the window.
- */
- m[2] = m[1] = rttm;
- } else if (m[2].ts == m[1].ts && elapsed > wlen / 2) {
- /* Passed half the window without a new min so take the 3rd
- * choice from the last half of the window.
- */
- m[2] = rttm;
- }
+ struct tcp_sock *tp = tcp_sk(sk);
+ u32 wlen = sysctl_tcp_min_rtt_wlen * HZ;
+
+ minmax_running_min(&tp->rtt_min, wlen, tcp_time_stamp,
+ rtt_us ? : jiffies_to_usecs(1));
}
static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
@@ -3102,10 +3084,11 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
*/
static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
u32 prior_snd_una, int *acked,
- struct tcp_sacktag_state *sack)
+ struct tcp_sacktag_state *sack,
+ struct skb_mstamp *now)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
- struct skb_mstamp first_ackt, last_ackt, now;
+ struct skb_mstamp first_ackt, last_ackt;
struct tcp_sock *tp = tcp_sk(sk);
u32 prior_sacked = tp->sacked_out;
u32 reord = tp->packets_out;
@@ -3137,7 +3120,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
acked_pcount = tcp_tso_acked(sk, skb);
if (!acked_pcount)
break;
-
fully_acked = false;
} else {
/* Speedup tcp_unlink_write_queue() and next loop */
@@ -3173,6 +3155,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
tp->packets_out -= acked_pcount;
pkts_acked += acked_pcount;
+ tcp_rate_skb_delivered(sk, skb, sack->rate);
/* Initial outgoing SYN's get put onto the write_queue
* just like anything else we transmit. It is not
@@ -3205,16 +3188,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
flag |= FLAG_SACK_RENEGING;
- skb_mstamp_get(&now);
if (likely(first_ackt.v64) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
- seq_rtt_us = skb_mstamp_us_delta(&now, &first_ackt);
- ca_rtt_us = skb_mstamp_us_delta(&now, &last_ackt);
+ seq_rtt_us = skb_mstamp_us_delta(now, &first_ackt);
+ ca_rtt_us = skb_mstamp_us_delta(now, &last_ackt);
}
if (sack->first_sackt.v64) {
- sack_rtt_us = skb_mstamp_us_delta(&now, &sack->first_sackt);
- ca_rtt_us = skb_mstamp_us_delta(&now, &sack->last_sackt);
+ sack_rtt_us = skb_mstamp_us_delta(now, &sack->first_sackt);
+ ca_rtt_us = skb_mstamp_us_delta(now, &sack->last_sackt);
}
-
+ sack->rate->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet, or -1 */
rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us,
ca_rtt_us);
@@ -3242,7 +3224,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
tp->fackets_out -= min(pkts_acked, tp->fackets_out);
} else if (skb && rtt_update && sack_rtt_us >= 0 &&
- sack_rtt_us > skb_mstamp_us_delta(&now, &skb->skb_mstamp)) {
+ sack_rtt_us > skb_mstamp_us_delta(now, &skb->skb_mstamp)) {
/* Do not re-arm RTO if the sack RTT is measured from data sent
* after when the head was last (re)transmitted. Otherwise the
* timeout may continue to extend in loss recovery.
@@ -3333,8 +3315,15 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
* information. All transmission or retransmission are delayed afterwards.
*/
static void tcp_cong_control(struct sock *sk, u32 ack, u32 acked_sacked,
- int flag)
+ int flag, const struct rate_sample *rs)
{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+
+ if (icsk->icsk_ca_ops->cong_control) {
+ icsk->icsk_ca_ops->cong_control(sk, rs);
+ return;
+ }
+
if (tcp_in_cwnd_reduction(sk)) {
/* Reduce cwnd if state mandates */
tcp_cwnd_reduction(sk, acked_sacked, flag);
@@ -3579,17 +3568,21 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_sacktag_state sack_state;
+ struct rate_sample rs = { .prior_delivered = 0 };
u32 prior_snd_una = tp->snd_una;
u32 ack_seq = TCP_SKB_CB(skb)->seq;
u32 ack = TCP_SKB_CB(skb)->ack_seq;
bool is_dupack = false;
u32 prior_fackets;
int prior_packets = tp->packets_out;
- u32 prior_delivered = tp->delivered;
+ u32 delivered = tp->delivered;
+ u32 lost = tp->lost;
int acked = 0; /* Number of packets newly acked */
int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
+ struct skb_mstamp now;
sack_state.first_sackt.v64 = 0;
+ sack_state.rate = &rs;
/* We very likely will need to access write queue head. */
prefetchw(sk->sk_write_queue.next);
@@ -3612,6 +3605,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (after(ack, tp->snd_nxt))
goto invalid_ack;
+ skb_mstamp_get(&now);
+
if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
tcp_rearm_rto(sk);
@@ -3622,6 +3617,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
}
prior_fackets = tp->fackets_out;
+ rs.prior_in_flight = tcp_packets_in_flight(tp);
/* ts_recent update must be made after we are sure that the packet
* is in window.
@@ -3677,7 +3673,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
/* See if we can take anything off of the retransmit queue. */
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked,
- &sack_state);
+ &sack_state, &now);
if (tcp_ack_is_dubious(sk, flag)) {
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
@@ -3694,7 +3690,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (icsk->icsk_pending == ICSK_TIME_RETRANS)
tcp_schedule_loss_probe(sk);
- tcp_cong_control(sk, ack, tp->delivered - prior_delivered, flag);
+ delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */
+ lost = tp->lost - lost; /* freshly marked lost */
+ tcp_rate_gen(sk, delivered, lost, &now, &rs);
+ tcp_cong_control(sk, ack, delivered, flag, &rs);
tcp_xmit_recovery(sk, rexmit);
return 1;
@@ -4108,7 +4107,7 @@ void tcp_fin(struct sock *sk)
/* It _is_ possible, that we have something out-of-order _after_ FIN.
* Probably, we should reset in this case. For now drop them.
*/
- __skb_queue_purge(&tp->out_of_order_queue);
+ skb_rbtree_purge(&tp->out_of_order_queue);
if (tcp_is_sack(tp))
tcp_sack_reset(&tp->rx_opt);
sk_mem_reclaim(sk);
@@ -4268,7 +4267,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
int this_sack;
/* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
- if (skb_queue_empty(&tp->out_of_order_queue)) {
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
tp->rx_opt.num_sacks = 0;
return;
}
@@ -4344,10 +4343,13 @@ static void tcp_ofo_queue(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
__u32 dsack_high = tp->rcv_nxt;
+ bool fin, fragstolen, eaten;
struct sk_buff *skb, *tail;
- bool fragstolen, eaten;
+ struct rb_node *p;
- while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) {
+ p = rb_first(&tp->out_of_order_queue);
+ while (p) {
+ skb = rb_entry(p, struct sk_buff, rbnode);
if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
break;
@@ -4357,9 +4359,10 @@ static void tcp_ofo_queue(struct sock *sk)
dsack_high = TCP_SKB_CB(skb)->end_seq;
tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
}
+ p = rb_next(p);
+ rb_erase(&skb->rbnode, &tp->out_of_order_queue);
- __skb_unlink(skb, &tp->out_of_order_queue);
- if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
+ if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) {
SOCK_DEBUG(sk, "ofo packet was already received\n");
tcp_drop(sk, skb);
continue;
@@ -4371,12 +4374,19 @@ static void tcp_ofo_queue(struct sock *sk)
tail = skb_peek_tail(&sk->sk_receive_queue);
eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen);
tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
+ fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
if (!eaten)
__skb_queue_tail(&sk->sk_receive_queue, skb);
- if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
- tcp_fin(sk);
- if (eaten)
+ else
kfree_skb_partial(skb, fragstolen);
+
+ if (unlikely(fin)) {
+ tcp_fin(sk);
+ /* tcp_fin() purges tp->out_of_order_queue,
+ * so we must end this loop right now.
+ */
+ break;
+ }
}
}
@@ -4403,8 +4413,10 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
+ struct rb_node **p, *q, *parent;
struct sk_buff *skb1;
u32 seq, end_seq;
+ bool fragstolen;
tcp_ecn_check_ce(tp, skb);
@@ -4419,88 +4431,92 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
inet_csk_schedule_ack(sk);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
+ seq = TCP_SKB_CB(skb)->seq;
+ end_seq = TCP_SKB_CB(skb)->end_seq;
SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
- tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
+ tp->rcv_nxt, seq, end_seq);
- skb1 = skb_peek_tail(&tp->out_of_order_queue);
- if (!skb1) {
+ p = &tp->out_of_order_queue.rb_node;
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
/* Initial out of order segment, build 1 SACK. */
if (tcp_is_sack(tp)) {
tp->rx_opt.num_sacks = 1;
- tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
- tp->selective_acks[0].end_seq =
- TCP_SKB_CB(skb)->end_seq;
+ tp->selective_acks[0].start_seq = seq;
+ tp->selective_acks[0].end_seq = end_seq;
}
- __skb_queue_head(&tp->out_of_order_queue, skb);
+ rb_link_node(&skb->rbnode, NULL, p);
+ rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
+ tp->ooo_last_skb = skb;
goto end;
}
- seq = TCP_SKB_CB(skb)->seq;
- end_seq = TCP_SKB_CB(skb)->end_seq;
-
- if (seq == TCP_SKB_CB(skb1)->end_seq) {
- bool fragstolen;
-
- if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
- __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
- } else {
- tcp_grow_window(sk, skb);
- kfree_skb_partial(skb, fragstolen);
- skb = NULL;
- }
-
- if (!tp->rx_opt.num_sacks ||
- tp->selective_acks[0].end_seq != seq)
- goto add_sack;
-
- /* Common case: data arrive in order after hole. */
- tp->selective_acks[0].end_seq = end_seq;
- goto end;
- }
-
- /* Find place to insert this segment. */
- while (1) {
- if (!after(TCP_SKB_CB(skb1)->seq, seq))
- break;
- if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) {
- skb1 = NULL;
- break;
- }
- skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1);
- }
-
- /* Do skb overlap to previous one? */
- if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
- if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
- /* All the bits are present. Drop. */
- NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
- tcp_drop(sk, skb);
- skb = NULL;
- tcp_dsack_set(sk, seq, end_seq);
- goto add_sack;
+ /* In the typical case, we are adding an skb to the end of the list.
+ * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
+ */
+ if (tcp_try_coalesce(sk, tp->ooo_last_skb, skb, &fragstolen)) {
+coalesce_done:
+ tcp_grow_window(sk, skb);
+ kfree_skb_partial(skb, fragstolen);
+ skb = NULL;
+ goto add_sack;
+ }
+ /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */
+ if (!before(seq, TCP_SKB_CB(tp->ooo_last_skb)->end_seq)) {
+ parent = &tp->ooo_last_skb->rbnode;
+ p = &parent->rb_right;
+ goto insert;
+ }
+
+ /* Find place to insert this segment. Handle overlaps on the way. */
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+ skb1 = rb_entry(parent, struct sk_buff, rbnode);
+ if (before(seq, TCP_SKB_CB(skb1)->seq)) {
+ p = &parent->rb_left;
+ continue;
}
- if (after(seq, TCP_SKB_CB(skb1)->seq)) {
- /* Partial overlap. */
- tcp_dsack_set(sk, seq,
- TCP_SKB_CB(skb1)->end_seq);
- } else {
- if (skb_queue_is_first(&tp->out_of_order_queue,
- skb1))
- skb1 = NULL;
- else
- skb1 = skb_queue_prev(
- &tp->out_of_order_queue,
- skb1);
+ if (before(seq, TCP_SKB_CB(skb1)->end_seq)) {
+ if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
+ /* All the bits are present. Drop. */
+ NET_INC_STATS(sock_net(sk),
+ LINUX_MIB_TCPOFOMERGE);
+ __kfree_skb(skb);
+ skb = NULL;
+ tcp_dsack_set(sk, seq, end_seq);
+ goto add_sack;
+ }
+ if (after(seq, TCP_SKB_CB(skb1)->seq)) {
+ /* Partial overlap. */
+ tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq);
+ } else {
+ /* skb's seq == skb1's seq and skb covers skb1.
+ * Replace skb1 with skb.
+ */
+ rb_replace_node(&skb1->rbnode, &skb->rbnode,
+ &tp->out_of_order_queue);
+ tcp_dsack_extend(sk,
+ TCP_SKB_CB(skb1)->seq,
+ TCP_SKB_CB(skb1)->end_seq);
+ NET_INC_STATS(sock_net(sk),
+ LINUX_MIB_TCPOFOMERGE);
+ __kfree_skb(skb1);
+ goto merge_right;
+ }
+ } else if (tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
+ goto coalesce_done;
}
+ p = &parent->rb_right;
}
- if (!skb1)
- __skb_queue_head(&tp->out_of_order_queue, skb);
- else
- __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
+insert:
+ /* Insert segment into RB tree. */
+ rb_link_node(&skb->rbnode, parent, p);
+ rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
- /* And clean segments covered by new one as whole. */
- while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) {
- skb1 = skb_queue_next(&tp->out_of_order_queue, skb);
+merge_right:
+ /* Remove other segments covered by skb. */
+ while ((q = rb_next(&skb->rbnode)) != NULL) {
+ skb1 = rb_entry(q, struct sk_buff, rbnode);
if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
break;
@@ -4509,12 +4525,15 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
end_seq);
break;
}
- __skb_unlink(skb1, &tp->out_of_order_queue);
+ rb_erase(&skb1->rbnode, &tp->out_of_order_queue);
tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
TCP_SKB_CB(skb1)->end_seq);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
tcp_drop(sk, skb1);
}
+ /* If there is no skb after us, we are the last_skb ! */
+ if (!q)
+ tp->ooo_last_skb = skb;
add_sack:
if (tcp_is_sack(tp))
@@ -4651,13 +4670,13 @@ queue_and_out:
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
tcp_fin(sk);
- if (!skb_queue_empty(&tp->out_of_order_queue)) {
+ if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
tcp_ofo_queue(sk);
/* RFC2581. 4.2. SHOULD send immediate ACK, when
* gap in queue is filled.
*/
- if (skb_queue_empty(&tp->out_of_order_queue))
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
inet_csk(sk)->icsk_ack.pingpong = 0;
}
@@ -4711,48 +4730,76 @@ drop:
tcp_data_queue_ofo(sk, skb);
}
+static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *list)
+{
+ if (list)
+ return !skb_queue_is_last(list, skb) ? skb->next : NULL;
+
+ return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode);
+}
+
static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
- struct sk_buff_head *list)
+ struct sk_buff_head *list,
+ struct rb_root *root)
{
- struct sk_buff *next = NULL;
+ struct sk_buff *next = tcp_skb_next(skb, list);
- if (!skb_queue_is_last(list, skb))
- next = skb_queue_next(list, skb);
+ if (list)
+ __skb_unlink(skb, list);
+ else
+ rb_erase(&skb->rbnode, root);
- __skb_unlink(skb, list);
__kfree_skb(skb);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
return next;
}
+/* Insert skb into rb tree, ordered by TCP_SKB_CB(skb)->seq */
+static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct sk_buff *skb1;
+
+ while (*p) {
+ parent = *p;
+ skb1 = rb_entry(parent, struct sk_buff, rbnode);
+ if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+ rb_link_node(&skb->rbnode, parent, p);
+ rb_insert_color(&skb->rbnode, root);
+}
+
/* Collapse contiguous sequence of skbs head..tail with
* sequence numbers start..end.
*
- * If tail is NULL, this means until the end of the list.
+ * If tail is NULL, this means until the end of the queue.
*
* Segments with FIN/SYN are not collapsed (only because this
* simplifies code)
*/
static void
-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
- struct sk_buff *head, struct sk_buff *tail,
- u32 start, u32 end)
+tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
+ struct sk_buff *head, struct sk_buff *tail, u32 start, u32 end)
{
- struct sk_buff *skb, *n;
+ struct sk_buff *skb = head, *n;
+ struct sk_buff_head tmp;
bool end_of_skbs;
/* First, check that queue is collapsible and find
- * the point where collapsing can be useful. */
- skb = head;
+ * the point where collapsing can be useful.
+ */
restart:
- end_of_skbs = true;
- skb_queue_walk_from_safe(list, skb, n) {
- if (skb == tail)
- break;
+ for (end_of_skbs = true; skb != NULL && skb != tail; skb = n) {
+ n = tcp_skb_next(skb, list);
+
/* No new bits? It is possible on ofo queue. */
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
- skb = tcp_collapse_one(sk, skb, list);
+ skb = tcp_collapse_one(sk, skb, list, root);
if (!skb)
break;
goto restart;
@@ -4770,13 +4817,10 @@ restart:
break;
}
- if (!skb_queue_is_last(list, skb)) {
- struct sk_buff *next = skb_queue_next(list, skb);
- if (next != tail &&
- TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) {
- end_of_skbs = false;
- break;
- }
+ if (n && n != tail &&
+ TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) {
+ end_of_skbs = false;
+ break;
}
/* Decided to skip this, advance start seq. */
@@ -4786,17 +4830,22 @@ restart:
(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
return;
+ __skb_queue_head_init(&tmp);
+
while (before(start, end)) {
int copy = min_t(int, SKB_MAX_ORDER(0, 0), end - start);
struct sk_buff *nskb;
nskb = alloc_skb(copy, GFP_ATOMIC);
if (!nskb)
- return;
+ break;
memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
- __skb_queue_before(list, skb, nskb);
+ if (list)
+ __skb_queue_before(list, skb, nskb);
+ else
+ __skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */
skb_set_owner_r(nskb, sk);
/* Copy data, releasing collapsed skbs. */
@@ -4814,14 +4863,17 @@ restart:
start += size;
}
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
- skb = tcp_collapse_one(sk, skb, list);
+ skb = tcp_collapse_one(sk, skb, list, root);
if (!skb ||
skb == tail ||
(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
- return;
+ goto end;
}
}
}
+end:
+ skb_queue_walk_safe(&tmp, skb, n)
+ tcp_rbtree_insert(root, skb);
}
/* Collapse ofo queue. Algorithm: select contiguous sequence of skbs
@@ -4830,43 +4882,43 @@ restart:
static void tcp_collapse_ofo_queue(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct sk_buff *skb = skb_peek(&tp->out_of_order_queue);
- struct sk_buff *head;
+ struct sk_buff *skb, *head;
+ struct rb_node *p;
u32 start, end;
- if (!skb)
+ p = rb_first(&tp->out_of_order_queue);
+ skb = rb_entry_safe(p, struct sk_buff, rbnode);
+new_range:
+ if (!skb) {
+ p = rb_last(&tp->out_of_order_queue);
+ /* Note: This is possible p is NULL here. We do not
+ * use rb_entry_safe(), as ooo_last_skb is valid only
+ * if rbtree is not empty.
+ */
+ tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode);
return;
-
+ }
start = TCP_SKB_CB(skb)->seq;
end = TCP_SKB_CB(skb)->end_seq;
- head = skb;
-
- for (;;) {
- struct sk_buff *next = NULL;
- if (!skb_queue_is_last(&tp->out_of_order_queue, skb))
- next = skb_queue_next(&tp->out_of_order_queue, skb);
- skb = next;
+ for (head = skb;;) {
+ skb = tcp_skb_next(skb, NULL);
- /* Segment is terminated when we see gap or when
- * we are at the end of all the queue. */
+ /* Range is terminated when we see a gap or when
+ * we are at the queue end.
+ */
if (!skb ||
after(TCP_SKB_CB(skb)->seq, end) ||
before(TCP_SKB_CB(skb)->end_seq, start)) {
- tcp_collapse(sk, &tp->out_of_order_queue,
+ tcp_collapse(sk, NULL, &tp->out_of_order_queue,
head, skb, start, end);
- head = skb;
- if (!skb)
- break;
- /* Start new segment */
+ goto new_range;
+ }
+
+ if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
start = TCP_SKB_CB(skb)->seq;
+ if (after(TCP_SKB_CB(skb)->end_seq, end))
end = TCP_SKB_CB(skb)->end_seq;
- } else {
- if (before(TCP_SKB_CB(skb)->seq, start))
- start = TCP_SKB_CB(skb)->seq;
- if (after(TCP_SKB_CB(skb)->end_seq, end))
- end = TCP_SKB_CB(skb)->end_seq;
- }
}
}
@@ -4883,20 +4935,24 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
static bool tcp_prune_ofo_queue(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct sk_buff *skb;
+ struct rb_node *node, *prev;
- if (skb_queue_empty(&tp->out_of_order_queue))
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
return false;
NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
-
- while ((skb = __skb_dequeue_tail(&tp->out_of_order_queue)) != NULL) {
- tcp_drop(sk, skb);
+ node = &tp->ooo_last_skb->rbnode;
+ do {
+ prev = rb_prev(node);
+ rb_erase(node, &tp->out_of_order_queue);
+ tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode));
sk_mem_reclaim(sk);
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
!tcp_under_memory_pressure(sk))
break;
- }
+ node = prev;
+ } while (node);
+ tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode);
/* Reset SACK state. A conforming SACK implementation will
* do the same at a timeout based retransmit. When a connection
@@ -4930,7 +4986,7 @@ static int tcp_prune_queue(struct sock *sk)
tcp_collapse_ofo_queue(sk);
if (!skb_queue_empty(&sk->sk_receive_queue))
- tcp_collapse(sk, &sk->sk_receive_queue,
+ tcp_collapse(sk, &sk->sk_receive_queue, NULL,
skb_peek(&sk->sk_receive_queue),
NULL,
tp->copied_seq, tp->rcv_nxt);
@@ -5035,7 +5091,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
/* We ACK each frame or... */
tcp_in_quickack_mode(sk) ||
/* We have out of order data. */
- (ofo_possible && skb_peek(&tp->out_of_order_queue))) {
+ (ofo_possible && !RB_EMPTY_ROOT(&tp->out_of_order_queue))) {
/* Then ack it now */
tcp_send_ack(sk);
} else {
@@ -5894,7 +5950,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
* so release it.
*/
if (req) {
- tp->total_retrans = req->num_retrans;
+ inet_csk(sk)->icsk_retransmits = 0;
reqsk_fastopen_remove(sk, req, false);
} else {
/* Make sure socket is routed, for correct metrics. */
@@ -5936,7 +5992,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
} else
tcp_init_metrics(sk);
- tcp_update_pacing_rate(sk);
+ if (!inet_csk(sk)->icsk_ca_ops->cong_control)
+ tcp_update_pacing_rate(sk);
/* Prevent spurious tcp_cwnd_restart() on first data packet */
tp->lsndtime = tcp_time_stamp;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 13b05adf9d3e..7ac37c314312 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1844,7 +1844,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
tcp_write_queue_purge(sk);
/* Cleans up our, hopefully empty, out_of_order_queue. */
- __skb_queue_purge(&tp->out_of_order_queue);
+ skb_rbtree_purge(&tp->out_of_order_queue);
#ifdef CONFIG_TCP_MD5SIG
/* Clean up the MD5 key list, if any */
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 4b95ec4ed2c8..6234ebaa7db1 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -464,7 +464,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->srtt_us = 0;
newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
- newtp->rtt_min[0].rtt = ~0U;
+ minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U);
newicsk->icsk_rto = TCP_TIMEOUT_INIT;
newtp->packets_out = 0;
@@ -487,8 +487,10 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->snd_cwnd = TCP_INIT_CWND;
newtp->snd_cwnd_cnt = 0;
+ /* There's a bubble in the pipe until at least the first ACK. */
+ newtp->app_limited = ~0U;
+
tcp_init_xmit_timers(newsk);
- __skb_queue_head_init(&newtp->out_of_order_queue);
newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
newtp->rx_opt.saw_tstamp = 0;
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 5c5964962d0c..bc68da38ea86 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -90,12 +90,6 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
goto out;
}
- /* GSO partial only requires splitting the frame into an MSS
- * multiple and possibly a remainder. So update the mss now.
- */
- if (features & NETIF_F_GSO_PARTIAL)
- mss = skb->len - (skb->len % mss);
-
copy_destructor = gso_skb->destructor == tcp_wfree;
ooo_okay = gso_skb->ooo_okay;
/* All segments but the first should have ooo_okay cleared */
@@ -108,6 +102,13 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
/* Only first segment might have ooo_okay set */
segs->ooo_okay = ooo_okay;
+ /* GSO partial and frag_list segmentation only requires splitting
+ * the frame into an MSS multiple and possibly a remainder, both
+ * cases return a GSO skb. So update the mss now.
+ */
+ if (skb_is_gso(segs))
+ mss *= skb_shinfo(segs)->gso_segs;
+
delta = htonl(oldlen + (thlen + mss));
skb = segs;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8b45794eb6b2..7c777089a4d6 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -734,9 +734,16 @@ static void tcp_tsq_handler(struct sock *sk)
{
if ((1 << sk->sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
- TCPF_CLOSE_WAIT | TCPF_LAST_ACK))
- tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle,
+ TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) {
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (tp->lost_out > tp->retrans_out &&
+ tp->snd_cwnd > tcp_packets_in_flight(tp))
+ tcp_xmit_retransmit_queue(sk);
+
+ tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
0, GFP_ATOMIC);
+ }
}
/*
* One tasklet per cpu tries to send more skbs.
@@ -918,6 +925,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
skb_mstamp_get(&skb->skb_mstamp);
TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
- tp->snd_una;
+ tcp_rate_skb_sent(sk, skb);
if (unlikely(skb_cloned(skb)))
skb = pskb_copy(skb, gfp_mask);
@@ -1213,6 +1221,9 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
tcp_set_skb_tso_segs(skb, mss_now);
tcp_set_skb_tso_segs(buff, mss_now);
+ /* Update delivered info for the new segment */
+ TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
+
/* If this packet has been sent out already, we must
* adjust the various packet counters.
*/
@@ -1358,6 +1369,7 @@ int tcp_mss_to_mtu(struct sock *sk, int mss)
}
return mtu;
}
+EXPORT_SYMBOL(tcp_mss_to_mtu);
/* MTU probing init per socket */
void tcp_mtup_init(struct sock *sk)
@@ -1545,7 +1557,8 @@ static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
/* Return how many segs we'd like on a TSO packet,
* to send one TSO packet per ms
*/
-static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now)
+u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
+ int min_tso_segs)
{
u32 bytes, segs;
@@ -1557,10 +1570,23 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now)
* This preserves ACK clocking and is consistent
* with tcp_tso_should_defer() heuristic.
*/
- segs = max_t(u32, bytes / mss_now, sysctl_tcp_min_tso_segs);
+ segs = max_t(u32, bytes / mss_now, min_tso_segs);
return min_t(u32, segs, sk->sk_gso_max_segs);
}
+EXPORT_SYMBOL(tcp_tso_autosize);
+
+/* Return the number of segments we want in the skb we are transmitting.
+ * See if congestion control module wants to decide; otherwise, autosize.
+ */
+static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
+{
+ const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
+ u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0;
+
+ return tso_segs ? :
+ tcp_tso_autosize(sk, mss_now, sysctl_tcp_min_tso_segs);
+}
/* Returns the portion of skb which can be sent right away */
static unsigned int tcp_mss_split_point(const struct sock *sk,
@@ -2020,6 +2046,39 @@ static int tcp_mtu_probe(struct sock *sk)
return -1;
}
+/* TCP Small Queues :
+ * Control number of packets in qdisc/devices to two packets / or ~1 ms.
+ * (These limits are doubled for retransmits)
+ * This allows for :
+ * - better RTT estimation and ACK scheduling
+ * - faster recovery
+ * - high rates
+ * Alas, some drivers / subsystems require a fair amount
+ * of queued bytes to ensure line rate.
+ * One example is wifi aggregation (802.11 AMPDU)
+ */
+static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
+ unsigned int factor)
+{
+ unsigned int limit;
+
+ limit = max(2 * skb->truesize, sk->sk_pacing_rate >> 10);
+ limit = min_t(u32, limit, sysctl_tcp_limit_output_bytes);
+ limit <<= factor;
+
+ if (atomic_read(&sk->sk_wmem_alloc) > limit) {
+ set_bit(TSQ_THROTTLED, &tcp_sk(sk)->tsq_flags);
+ /* It is possible TX completion already happened
+ * before we set TSQ_THROTTLED, so we must
+ * test again the condition.
+ */
+ smp_mb__after_atomic();
+ if (atomic_read(&sk->sk_wmem_alloc) > limit)
+ return true;
+ }
+ return false;
+}
+
/* This routine writes packets to the network. It advances the
* send_head. This happens as incoming acks open up the remote
* window for us.
@@ -2057,7 +2116,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
}
}
- max_segs = tcp_tso_autosize(sk, mss_now);
+ max_segs = tcp_tso_segs(sk, mss_now);
while ((skb = tcp_send_head(sk))) {
unsigned int limit;
@@ -2106,29 +2165,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;
- /* TCP Small Queues :
- * Control number of packets in qdisc/devices to two packets / or ~1 ms.
- * This allows for :
- * - better RTT estimation and ACK scheduling
- * - faster recovery
- * - high rates
- * Alas, some drivers / subsystems require a fair amount
- * of queued bytes to ensure line rate.
- * One example is wifi aggregation (802.11 AMPDU)
- */
- limit = max(2 * skb->truesize, sk->sk_pacing_rate >> 10);
- limit = min_t(u32, limit, sysctl_tcp_limit_output_bytes);
-
- if (atomic_read(&sk->sk_wmem_alloc) > limit) {
- set_bit(TSQ_THROTTLED, &tp->tsq_flags);
- /* It is possible TX completion already happened
- * before we set TSQ_THROTTLED, so we must
- * test again the condition.
- */
- smp_mb__after_atomic();
- if (atomic_read(&sk->sk_wmem_alloc) > limit)
- break;
- }
+ if (tcp_small_queue_check(sk, skb, 0))
+ break;
if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
break;
@@ -2605,7 +2643,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
* copying overhead: fragmentation, tunneling, mangling etc.
*/
if (atomic_read(&sk->sk_wmem_alloc) >
- min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
+ min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
+ sk->sk_sndbuf))
return -EAGAIN;
if (skb_still_in_host_queue(sk, skb))
@@ -2774,7 +2813,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
last_lost = tp->snd_una;
}
- max_segs = tcp_tso_autosize(sk, tcp_current_mss(sk));
+ max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
tcp_for_write_queue_from(skb, sk) {
__u8 sacked;
int segs;
@@ -2828,10 +2867,13 @@ begin_fwd:
if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
continue;
+ if (tcp_small_queue_check(sk, skb, 1))
+ return;
+
if (tcp_retransmit_skb(sk, skb, segs))
return;
- NET_INC_STATS(sock_net(sk), mib_idx);
+ NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
if (tcp_in_cwnd_reduction(sk))
tp->prr_out += tcp_skb_pcount(skb);
@@ -3568,6 +3610,8 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
if (!res) {
__TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
+ if (unlikely(tcp_passive_fastopen(sk)))
+ tcp_sk(sk)->total_retrans++;
}
return res;
}
diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
new file mode 100644
index 000000000000..9be1581a5a08
--- /dev/null
+++ b/net/ipv4/tcp_rate.c
@@ -0,0 +1,186 @@
+#include <net/tcp.h>
+
+/* The bandwidth estimator estimates the rate at which the network
+ * can currently deliver outbound data packets for this flow. At a high
+ * level, it operates by taking a delivery rate sample for each ACK.
+ *
+ * A rate sample records the rate at which the network delivered packets
+ * for this flow, calculated over the time interval between the transmission
+ * of a data packet and the acknowledgment of that packet.
+ *
+ * Specifically, over the interval between each transmit and corresponding ACK,
+ * the estimator generates a delivery rate sample. Typically it uses the rate
+ * at which packets were acknowledged. However, the approach of using only the
+ * acknowledgment rate faces a challenge under the prevalent ACK decimation or
+ * compression: packets can temporarily appear to be delivered much quicker
+ * than the bottleneck rate. Since it is physically impossible to do that in a
+ * sustained fashion, when the estimator notices that the ACK rate is faster
+ * than the transmit rate, it uses the latter:
+ *
+ * send_rate = #pkts_delivered/(last_snd_time - first_snd_time)
+ * ack_rate = #pkts_delivered/(last_ack_time - first_ack_time)
+ * bw = min(send_rate, ack_rate)
+ *
+ * Notice the estimator essentially estimates the goodput, not always the
+ * network bottleneck link rate when the sending or receiving is limited by
+ * other factors like applications or receiver window limits. The estimator
+ * deliberately avoids using the inter-packet spacing approach because that
+ * approach requires a large number of samples and sophisticated filtering.
+ *
+ * TCP flows can often be application-limited in request/response workloads.
+ * The estimator marks a bandwidth sample as application-limited if there
+ * was some moment during the sampled window of packets when there was no data
+ * ready to send in the write queue.
+ */
+
+/* Snapshot the current delivery information in the skb, to generate
+ * a rate sample later when the skb is (s)acked in tcp_rate_skb_delivered().
+ */
+void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ /* In general we need to start delivery rate samples from the
+ * time we received the most recent ACK, to ensure we include
+ * the full time the network needs to deliver all in-flight
+ * packets. If there are no packets in flight yet, then we
+ * know that any ACKs after now indicate that the network was
+ * able to deliver those packets completely in the sampling
+ * interval between now and the next ACK.
+ *
+ * Note that we use packets_out instead of tcp_packets_in_flight(tp)
+ * because the latter is a guess based on RTO and loss-marking
+ * heuristics. We don't want spurious RTOs or loss markings to cause
+ * a spuriously small time interval, causing a spuriously high
+ * bandwidth estimate.
+ */
+ if (!tp->packets_out) {
+ tp->first_tx_mstamp = skb->skb_mstamp;
+ tp->delivered_mstamp = skb->skb_mstamp;
+ }
+
+ TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp;
+ TCP_SKB_CB(skb)->tx.delivered_mstamp = tp->delivered_mstamp;
+ TCP_SKB_CB(skb)->tx.delivered = tp->delivered;
+ TCP_SKB_CB(skb)->tx.is_app_limited = tp->app_limited ? 1 : 0;
+}
+
+/* When an skb is sacked or acked, we fill in the rate sample with the (prior)
+ * delivery information when the skb was last transmitted.
+ *
+ * If an ACK (s)acks multiple skbs (e.g., stretched-acks), this function is
+ * called multiple times. We favor the information from the most recently
+ * sent skb, i.e., the skb with the highest prior_delivered count.
+ */
+void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
+ struct rate_sample *rs)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
+
+ if (!scb->tx.delivered_mstamp.v64)
+ return;
+
+ if (!rs->prior_delivered ||
+ after(scb->tx.delivered, rs->prior_delivered)) {
+ rs->prior_delivered = scb->tx.delivered;
+ rs->prior_mstamp = scb->tx.delivered_mstamp;
+ rs->is_app_limited = scb->tx.is_app_limited;
+ rs->is_retrans = scb->sacked & TCPCB_RETRANS;
+
+ /* Find the duration of the "send phase" of this window: */
+ rs->interval_us = skb_mstamp_us_delta(
+ &skb->skb_mstamp,
+ &scb->tx.first_tx_mstamp);
+
+ /* Record send time of most recently ACKed packet: */
+ tp->first_tx_mstamp = skb->skb_mstamp;
+ }
+ /* Mark off the skb delivered once it's sacked to avoid being
+ * used again when it's cumulatively acked. For acked packets
+ * we don't need to reset since it'll be freed soon.
+ */
+ if (scb->sacked & TCPCB_SACKED_ACKED)
+ scb->tx.delivered_mstamp.v64 = 0;
+}
+
+/* Update the connection delivery information and generate a rate sample. */
+void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
+ struct skb_mstamp *now, struct rate_sample *rs)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ u32 snd_us, ack_us;
+
+ /* Clear app limited if bubble is acked and gone. */
+ if (tp->app_limited && after(tp->delivered, tp->app_limited))
+ tp->app_limited = 0;
+
+ /* TODO: there are multiple places throughout tcp_ack() to get
+ * current time. Refactor the code using a new "tcp_acktag_state"
+ * to carry current time, flags, stats like "tcp_sacktag_state".
+ */
+ if (delivered)
+ tp->delivered_mstamp = *now;
+
+ rs->acked_sacked = delivered; /* freshly ACKed or SACKed */
+ rs->losses = lost; /* freshly marked lost */
+ /* Return an invalid sample if no timing information is available. */
+ if (!rs->prior_mstamp.v64) {
+ rs->delivered = -1;
+ rs->interval_us = -1;
+ return;
+ }
+ rs->delivered = tp->delivered - rs->prior_delivered;
+
+ /* Model sending data and receiving ACKs as separate pipeline phases
+ * for a window. Usually the ACK phase is longer, but with ACK
+ * compression the send phase can be longer. To be safe we use the
+ * longer phase.
+ */
+ snd_us = rs->interval_us; /* send phase */
+ ack_us = skb_mstamp_us_delta(now, &rs->prior_mstamp); /* ack phase */
+ rs->interval_us = max(snd_us, ack_us);
+
+ /* Normally we expect interval_us >= min-rtt.
+ * Note that rate may still be over-estimated when a spuriously
+ * retransmistted skb was first (s)acked because "interval_us"
+ * is under-estimated (up to an RTT). However continuously
+ * measuring the delivery rate during loss recovery is crucial
+ * for connections suffer heavy or prolonged losses.
+ */
+ if (unlikely(rs->interval_us < tcp_min_rtt(tp))) {
+ if (!rs->is_retrans)
+ pr_debug("tcp rate: %ld %d %u %u %u\n",
+ rs->interval_us, rs->delivered,
+ inet_csk(sk)->icsk_ca_state,
+ tp->rx_opt.sack_ok, tcp_min_rtt(tp));
+ rs->interval_us = -1;
+ return;
+ }
+
+ /* Record the last non-app-limited or the highest app-limited bw */
+ if (!rs->is_app_limited ||
+ ((u64)rs->delivered * tp->rate_interval_us >=
+ (u64)tp->rate_delivered * rs->interval_us)) {
+ tp->rate_delivered = rs->delivered;
+ tp->rate_interval_us = rs->interval_us;
+ tp->rate_app_limited = rs->is_app_limited;
+ }
+}
+
+/* If a gap is detected between sends, mark the socket application-limited. */
+void tcp_rate_check_app_limited(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (/* We have less than one packet to send. */
+ tp->write_seq - tp->snd_nxt < tp->mss_cache &&
+ /* Nothing in sending host's qdisc queues or NIC tx queue. */
+ sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) &&
+ /* We are not limited by CWND. */
+ tcp_packets_in_flight(tp) < tp->snd_cwnd &&
+ /* All lost packets have been retransmitted. */
+ tp->lost_out <= tp->retrans_out)
+ tp->app_limited =
+ (tp->delivered + tcp_packets_in_flight(tp)) ? : 1;
+}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index d84930b2dd95..f712b411f6ed 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -384,6 +384,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk)
*/
inet_rtx_syn_ack(sk, req);
req->num_timeout++;
+ icsk->icsk_retransmits++;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
}
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 028eb046ea40..9c5fc973267f 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -76,7 +76,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
if (!tcp_is_cwnd_limited(sk))
return;
- if (tp->snd_cwnd <= tp->snd_ssthresh)
+ if (tcp_in_slow_start(tp))
tcp_slow_start(tp, acked);
else if (!yeah->doing_reno_now) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 058c31286ce1..7d96dc2d3d08 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1021,12 +1021,6 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
flow_flags,
faddr, saddr, dport, inet->inet_sport);
- if (!saddr && ipc.oif) {
- err = l3mdev_get_saddr(net, ipc.oif, fl4);
- if (err < 0)
- goto out;
- }
-
security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt)) {
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 8a9f6e535caa..9a89c10a55f0 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -20,7 +20,7 @@
static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
struct netlink_callback *cb,
const struct inet_diag_req_v2 *req,
- struct nlattr *bc)
+ struct nlattr *bc, bool net_admin)
{
if (!inet_diag_bc_sk(bc, sk))
return 0;
@@ -28,7 +28,7 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
return inet_sk_diag_fill(sk, NULL, skb, req,
sk_user_ns(NETLINK_CB(cb->skb).sk),
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
+ cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh, net_admin);
}
static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
@@ -76,7 +76,8 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
err = inet_sk_diag_fill(sk, NULL, rep, req,
sk_user_ns(NETLINK_CB(in_skb).sk),
NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0, nlh);
+ nlh->nlmsg_seq, 0, nlh,
+ netlink_net_capable(in_skb, CAP_NET_ADMIN));
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
kfree_skb(rep);
@@ -97,6 +98,7 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb,
struct netlink_callback *cb,
const struct inet_diag_req_v2 *r, struct nlattr *bc)
{
+ bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
struct net *net = sock_net(skb->sk);
int num, s_num, slot, s_slot;
@@ -132,7 +134,7 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb,
r->id.idiag_dport)
goto next;
- if (sk_diag_dump(sk, skb, cb, r, bc) < 0) {
+ if (sk_diag_dump(sk, skb, cb, r, bc, net_admin) < 0) {
spin_unlock_bh(&hslot->lock);
goto done;
}
@@ -186,8 +188,8 @@ static int __udp_diag_destroy(struct sk_buff *in_skb,
if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
sk = __udp4_lib_lookup(net,
- req->id.idiag_dst[0], req->id.idiag_dport,
- req->id.idiag_src[0], req->id.idiag_sport,
+ req->id.idiag_dst[3], req->id.idiag_dport,
+ req->id.idiag_src[3], req->id.idiag_sport,
req->id.idiag_if, tbl, NULL);
else
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 81f253b6ff36..f9333c963607 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -21,7 +21,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
__be16 new_protocol, bool is_ipv6)
{
int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
- bool remcsum, need_csum, offload_csum, ufo;
+ bool remcsum, need_csum, offload_csum, ufo, gso_partial;
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct udphdr *uh = udp_hdr(skb);
u16 mac_offset = skb->mac_header;
@@ -88,6 +88,8 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
goto out;
}
+ gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
+
outer_hlen = skb_tnl_header_len(skb);
udp_offset = outer_hlen - tnl_hlen;
skb = segs;
@@ -117,7 +119,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
* will be using a length value equal to only one MSS sized
* segment instead of the entire frame.
*/
- if (skb_is_gso(skb)) {
+ if (gso_partial) {
uh->len = htons(skb_shinfo(skb)->gso_size +
SKB_GSO_CB(skb)->data_offset +
skb->head - (unsigned char *)uh);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index b644a23c3db0..6a7ff6957535 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -29,7 +29,7 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
memset(fl4, 0, sizeof(*fl4));
fl4->daddr = daddr->a4;
fl4->flowi4_tos = tos;
- fl4->flowi4_oif = oif;
+ fl4->flowi4_oif = l3mdev_master_ifindex_by_index(net, oif);
if (saddr)
fl4->saddr = saddr->a4;
@@ -112,7 +112,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
int oif = 0;
if (skb_dst(skb))
- oif = l3mdev_fib_oif(skb_dst(skb)->dev);
+ oif = skb_dst(skb)->dev->ifindex;
memset(fl4, 0, sizeof(struct flowi4));
fl4->flowi4_mark = skb->mark;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f418d2eaeddd..2f1f5d439788 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -778,7 +778,14 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
}
if (p == &net->ipv6.devconf_all->forwarding) {
+ int old_dflt = net->ipv6.devconf_dflt->forwarding;
+
net->ipv6.devconf_dflt->forwarding = newf;
+ if ((!newf) ^ (!old_dflt))
+ inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
+ NETCONFA_IFINDEX_DEFAULT,
+ net->ipv6.devconf_dflt);
+
addrconf_forward_change(net, newf);
if ((!newf) ^ (!old))
inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
@@ -1941,6 +1948,7 @@ errdad:
spin_unlock_bh(&ifp->lock);
addrconf_mod_dad_work(ifp, 0);
+ in6_ifa_put(ifp);
}
/* Join to solicited addr multicast group.
@@ -3850,6 +3858,7 @@ static void addrconf_dad_work(struct work_struct *w)
addrconf_dad_begin(ifp);
goto out;
} else if (action == DAD_ABORT) {
+ in6_ifa_hold(ifp);
addrconf_dad_stop(ifp, 1);
if (disable_ipv6)
addrconf_ifdown(idev->dev, 0);
@@ -6025,7 +6034,7 @@ static const struct ctl_table addrconf_sysctl[] = {
static int __addrconf_sysctl_register(struct net *net, char *dev_name,
struct inet6_dev *idev, struct ipv6_devconf *p)
{
- int i;
+ int i, ifindex;
struct ctl_table *table;
char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
@@ -6045,6 +6054,13 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
if (!p->sysctl_header)
goto free;
+ if (!strcmp(dev_name, "all"))
+ ifindex = NETCONFA_IFINDEX_ALL;
+ else if (!strcmp(dev_name, "default"))
+ ifindex = NETCONFA_IFINDEX_DEFAULT;
+ else
+ ifindex = idev->dev->ifindex;
+ inet6_netconf_notify_devconf(net, NETCONFA_ALL, ifindex, p);
return 0;
free:
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 5857c1fc8b67..eea23b57c6a5 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -38,6 +38,9 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
.flags = FIB_LOOKUP_NOREF,
};
+ /* update flow if oif or iif point to device enslaved to l3mdev */
+ l3mdev_update_flow(net, flowi6_to_flowi(fl6));
+
fib_rules_lookup(net->ipv6.fib6_rules_ops,
flowi6_to_flowi(fl6), flags, &arg);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 771be1fa4176..ef5485204522 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -743,6 +743,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
(info->nlh->nlmsg_flags & NLM_F_CREATE));
int found = 0;
bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
+ u16 nlflags = NLM_F_EXCL;
int err;
ins = &fn->leaf;
@@ -759,6 +760,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
if (info->nlh &&
(info->nlh->nlmsg_flags & NLM_F_EXCL))
return -EEXIST;
+
+ nlflags &= ~NLM_F_EXCL;
if (replace) {
if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) {
found++;
@@ -856,6 +859,7 @@ next_iter:
pr_warn("NLM_F_CREATE should be set when creating new route\n");
add:
+ nlflags |= NLM_F_CREATE;
err = fib6_commit_metrics(&rt->dst, mxc);
if (err)
return err;
@@ -864,7 +868,7 @@ add:
*ins = rt;
rt->rt6i_node = fn;
atomic_inc(&rt->rt6i_ref);
- inet6_rt_notify(RTM_NEWROUTE, rt, info, 0);
+ inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
if (!(fn->fn_flags & RTN_RTINFO)) {
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 397e1ed3daa3..4ce74f86291b 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1239,7 +1239,7 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]);
if (data[IFLA_GRE_FLOWINFO])
- parms->flowinfo = nla_get_u32(data[IFLA_GRE_FLOWINFO]);
+ parms->flowinfo = nla_get_be32(data[IFLA_GRE_FLOWINFO]);
if (data[IFLA_GRE_FLAGS])
parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 22e90e56b5a9..e7bfd55899a3 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -69,6 +69,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
int offset = 0;
bool encap, udpfrag;
int nhoff;
+ bool gso_partial;
skb_reset_network_header(skb);
nhoff = skb_network_header(skb) - skb_mac_header(skb);
@@ -101,9 +102,11 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
if (IS_ERR(segs))
goto out;
+ gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
+
for (skb = segs; skb; skb = skb->next) {
ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
- if (skb_is_gso(skb))
+ if (gso_partial)
payload_len = skb_shinfo(skb)->gso_size +
SKB_GSO_CB(skb)->data_offset +
skb->head - (unsigned char *)(ipv6h + 1);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 993fd9666f1b..6001e781164e 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -236,6 +236,14 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUT, skb->len);
+
+ /* if egress device is enslaved to an L3 master device pass the
+ * skb to its handler for processing
+ */
+ skb = l3mdev_ip6_out((struct sock *)sk, skb);
+ if (unlikely(!skb))
+ return 0;
+
/* hooks should never assume socket lock is held.
* we promote our socket to non const
*/
@@ -918,13 +926,6 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
int err;
int flags = 0;
- if (ipv6_addr_any(&fl6->saddr) && fl6->flowi6_oif &&
- (!*dst || !(*dst)->error)) {
- err = l3mdev_get_saddr6(net, sk, fl6);
- if (err)
- goto out_err;
- }
-
/* The correct way to handle this would be to do
* ip6_route_get_saddr, and then ip6_route_output; however,
* the route-specific preferred source forces the
@@ -1016,7 +1017,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
out_err_release:
dst_release(*dst);
*dst = NULL;
-out_err:
+
if (err == -ENETUNREACH)
IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
return err;
@@ -1062,8 +1063,6 @@ struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
return ERR_PTR(err);
if (final_dst)
fl6->daddr = *final_dst;
- if (!fl6->flowi6_oif)
- fl6->flowi6_oif = l3mdev_fib_oif(dst->dev);
return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 2050217df565..6a66adba0c22 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -57,6 +57,7 @@
#include <net/inet_ecn.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <net/dst_metadata.h>
MODULE_AUTHOR("Ville Nuorvala");
MODULE_DESCRIPTION("IPv6 tunneling device");
@@ -90,6 +91,7 @@ struct ip6_tnl_net {
struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE];
struct ip6_tnl __rcu *tnls_wc[1];
struct ip6_tnl __rcu **tnls[2];
+ struct ip6_tnl __rcu *collect_md_tun;
};
static struct net_device_stats *ip6_get_stats(struct net_device *dev)
@@ -166,6 +168,10 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
return t;
}
+ t = rcu_dereference(ip6n->collect_md_tun);
+ if (t)
+ return t;
+
t = rcu_dereference(ip6n->tnls_wc[0]);
if (t && (t->dev->flags & IFF_UP))
return t;
@@ -209,6 +215,8 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
{
struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
+ if (t->parms.collect_md)
+ rcu_assign_pointer(ip6n->collect_md_tun, t);
rcu_assign_pointer(t->next , rtnl_dereference(*tp));
rcu_assign_pointer(*tp, t);
}
@@ -224,6 +232,9 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
struct ip6_tnl __rcu **tp;
struct ip6_tnl *iter;
+ if (t->parms.collect_md)
+ rcu_assign_pointer(ip6n->collect_md_tun, NULL);
+
for (tp = ip6_tnl_bucket(ip6n, &t->parms);
(iter = rtnl_dereference(*tp)) != NULL;
tp = &iter->next) {
@@ -829,6 +840,9 @@ static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
+ if (tun_dst)
+ skb_dst_set(skb, (struct dst_entry *)tun_dst);
+
gro_cells_receive(&tunnel->gro_cells, skb);
return 0;
@@ -865,6 +879,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
{
struct ip6_tnl *t;
const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct metadata_dst *tun_dst = NULL;
int ret = -1;
rcu_read_lock();
@@ -881,7 +896,12 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
goto drop;
if (iptunnel_pull_header(skb, 0, tpi->proto, false))
goto drop;
- ret = __ip6_tnl_rcv(t, skb, tpi, NULL, dscp_ecn_decapsulate,
+ if (t->parms.collect_md) {
+ tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
+ if (!tun_dst)
+ return 0;
+ }
+ ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
log_ecn_error);
}
@@ -1012,8 +1032,16 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
int mtu;
unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
unsigned int max_headroom = psh_hlen;
+ u8 hop_limit;
int err = -1;
+ if (t->parms.collect_md) {
+ hop_limit = skb_tunnel_info(skb)->key.ttl;
+ goto route_lookup;
+ } else {
+ hop_limit = t->parms.hop_limit;
+ }
+
/* NBMA tunnel */
if (ipv6_addr_any(&t->parms.raddr)) {
struct in6_addr *addr6;
@@ -1043,6 +1071,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
goto tx_err_link_failure;
if (!dst) {
+route_lookup:
dst = ip6_route_output(net, NULL, fl6);
if (dst->error)
@@ -1053,6 +1082,10 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
dst = NULL;
goto tx_err_link_failure;
}
+ if (t->parms.collect_md &&
+ ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
+ &fl6->daddr, 0, &fl6->saddr))
+ goto tx_err_link_failure;
ndst = dst;
}
@@ -1071,7 +1104,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
}
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
- if (skb_dst(skb))
+ if (skb_dst(skb) && !t->parms.collect_md)
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if (skb->len > mtu && !skb_is_gso(skb)) {
*pmtu = mtu;
@@ -1111,8 +1144,13 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
skb = new_skb;
}
- if (!fl6->flowi6_mark && ndst)
- dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
+ if (t->parms.collect_md) {
+ if (t->encap.type != TUNNEL_ENCAP_NONE)
+ goto tx_err_dst_release;
+ } else {
+ if (!fl6->flowi6_mark && ndst)
+ dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
+ }
skb_dst_set(skb, dst);
if (encap_limit >= 0) {
@@ -1137,7 +1175,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
ipv6h = ipv6_hdr(skb);
ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
- ipv6h->hop_limit = t->parms.hop_limit;
+ ipv6h->hop_limit = hop_limit;
ipv6h->nexthdr = proto;
ipv6h->saddr = fl6->saddr;
ipv6h->daddr = fl6->daddr;
@@ -1170,18 +1208,34 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
if (tproto != IPPROTO_IPIP && tproto != 0)
return -1;
- if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
- encap_limit = t->parms.encap_limit;
+ dsfield = ipv4_get_dsfield(iph);
- memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+ if (t->parms.collect_md) {
+ struct ip_tunnel_info *tun_info;
+ const struct ip_tunnel_key *key;
- dsfield = ipv4_get_dsfield(iph);
+ tun_info = skb_tunnel_info(skb);
+ if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
+ ip_tunnel_info_af(tun_info) != AF_INET6))
+ return -1;
+ key = &tun_info->key;
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_proto = IPPROTO_IPIP;
+ fl6.daddr = key->u.ipv6.dst;
+ fl6.flowlabel = key->label;
+ } else {
+ if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+ encap_limit = t->parms.encap_limit;
- if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
- fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
- & IPV6_TCLASS_MASK;
- if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
- fl6.flowi6_mark = skb->mark;
+ memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+ fl6.flowi6_proto = IPPROTO_IPIP;
+
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+ fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
+ & IPV6_TCLASS_MASK;
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+ fl6.flowi6_mark = skb->mark;
+ }
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
return -1;
@@ -1219,28 +1273,47 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
ip6_tnl_addr_conflict(t, ipv6h))
return -1;
- offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
- if (offset > 0) {
- struct ipv6_tlv_tnl_enc_lim *tel;
- tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
- if (tel->encap_limit == 0) {
- icmpv6_send(skb, ICMPV6_PARAMPROB,
- ICMPV6_HDR_FIELD, offset + 2);
+ dsfield = ipv6_get_dsfield(ipv6h);
+
+ if (t->parms.collect_md) {
+ struct ip_tunnel_info *tun_info;
+ const struct ip_tunnel_key *key;
+
+ tun_info = skb_tunnel_info(skb);
+ if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
+ ip_tunnel_info_af(tun_info) != AF_INET6))
return -1;
+ key = &tun_info->key;
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_proto = IPPROTO_IPV6;
+ fl6.daddr = key->u.ipv6.dst;
+ fl6.flowlabel = key->label;
+ } else {
+ offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
+ if (offset > 0) {
+ struct ipv6_tlv_tnl_enc_lim *tel;
+
+ tel = (void *)&skb_network_header(skb)[offset];
+ if (tel->encap_limit == 0) {
+ icmpv6_send(skb, ICMPV6_PARAMPROB,
+ ICMPV6_HDR_FIELD, offset + 2);
+ return -1;
+ }
+ encap_limit = tel->encap_limit - 1;
+ } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
+ encap_limit = t->parms.encap_limit;
}
- encap_limit = tel->encap_limit - 1;
- } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
- encap_limit = t->parms.encap_limit;
- memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+ memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+ fl6.flowi6_proto = IPPROTO_IPV6;
- dsfield = ipv6_get_dsfield(ipv6h);
- if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
- fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
- if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
- fl6.flowlabel |= ip6_flowlabel(ipv6h);
- if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
- fl6.flowi6_mark = skb->mark;
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
+ fl6.flowlabel |= (*(__be32 *)ipv6h & IPV6_TCLASS_MASK);
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
+ fl6.flowlabel |= ip6_flowlabel(ipv6h);
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+ fl6.flowi6_mark = skb->mark;
+ }
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
return -1;
@@ -1739,6 +1812,10 @@ static int ip6_tnl_dev_init(struct net_device *dev)
if (err)
return err;
ip6_tnl_link_config(t);
+ if (t->parms.collect_md) {
+ dev->features |= NETIF_F_NETNS_LOCAL;
+ netif_keep_dst(dev);
+ }
return 0;
}
@@ -1809,6 +1886,9 @@ static void ip6_tnl_netlink_parms(struct nlattr *data[],
if (data[IFLA_IPTUN_PROTO])
parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
+
+ if (data[IFLA_IPTUN_COLLECT_METADATA])
+ parms->collect_md = true;
}
static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
@@ -1848,6 +1928,7 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct net *net = dev_net(dev);
+ struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
struct ip6_tnl *nt, *t;
struct ip_tunnel_encap ipencap;
@@ -1862,9 +1943,14 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
ip6_tnl_netlink_parms(data, &nt->parms);
- t = ip6_tnl_locate(net, &nt->parms, 0);
- if (!IS_ERR(t))
- return -EEXIST;
+ if (nt->parms.collect_md) {
+ if (rtnl_dereference(ip6n->collect_md_tun))
+ return -EEXIST;
+ } else {
+ t = ip6_tnl_locate(net, &nt->parms, 0);
+ if (!IS_ERR(t))
+ return -EEXIST;
+ }
return ip6_tnl_create2(dev);
}
@@ -1888,6 +1974,8 @@ static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
return err;
}
ip6_tnl_netlink_parms(data, &p);
+ if (p.collect_md)
+ return -EINVAL;
t = ip6_tnl_locate(net, &p, 0);
if (!IS_ERR(t)) {
@@ -1935,6 +2023,8 @@ static size_t ip6_tnl_get_size(const struct net_device *dev)
nla_total_size(2) +
/* IFLA_IPTUN_ENCAP_DPORT */
nla_total_size(2) +
+ /* IFLA_IPTUN_COLLECT_METADATA */
+ nla_total_size(0) +
0;
}
@@ -1953,16 +2043,15 @@ static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto))
goto nla_put_failure;
- if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
- tunnel->encap.type) ||
- nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT,
- tunnel->encap.sport) ||
- nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT,
- tunnel->encap.dport) ||
- nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
- tunnel->encap.flags))
+ if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
+ nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
+ nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
+ nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags))
goto nla_put_failure;
+ if (parm->collect_md)
+ if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -1990,6 +2079,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
[IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 },
[IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 },
[IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
+ [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG },
};
static struct rtnl_link_ops ip6_link_ops __read_mostly = {
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index cc7e05898307..8a02ca8a11af 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -321,11 +321,9 @@ static int vti6_rcv(struct sk_buff *skb)
goto discard;
}
- XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t;
-
rcu_read_unlock();
- return xfrm6_rcv(skb);
+ return xfrm6_rcv_tnl(skb, t);
}
rcu_read_unlock();
return -EINVAL;
@@ -340,6 +338,7 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err)
struct net_device *dev;
struct pcpu_sw_netstats *tstats;
struct xfrm_state *x;
+ struct xfrm_mode *inner_mode;
struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6;
u32 orig_mark = skb->mark;
int ret;
@@ -357,7 +356,19 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err)
}
x = xfrm_input_state(skb);
- family = x->inner_mode->afinfo->family;
+
+ inner_mode = x->inner_mode;
+
+ if (x->sel.family == AF_UNSPEC) {
+ inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
+ if (inner_mode == NULL) {
+ XFRM_INC_STATS(dev_net(skb->dev),
+ LINUX_MIB_XFRMINSTATEMODEERROR);
+ return -EINVAL;
+ }
+ }
+
+ family = inner_mode->afinfo->family;
skb->mark = be32_to_cpu(t->parms.i_key);
ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 6122f9c5cc49..fccb5dd91902 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2239,6 +2239,7 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
struct rta_mfc_stats mfcs;
struct nlattr *mp_attr;
struct rtnexthop *nhp;
+ unsigned long lastuse;
int ct;
/* If cache is unresolved, don't try to parse IIF and OIF */
@@ -2269,12 +2270,14 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
nla_nest_end(skb, mp_attr);
+ lastuse = READ_ONCE(c->mfc_un.res.lastuse);
+ lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
+
mfcs.mfcs_packets = c->mfc_un.res.pkt;
mfcs.mfcs_bytes = c->mfc_un.res.bytes;
mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
- nla_put_u64_64bit(skb, RTA_EXPIRES,
- jiffies_to_clock_t(c->mfc_un.res.lastuse),
+ nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
RTA_PAD))
return -EMSGSIZE;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index fe65cdc28a45..d8e671457d10 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -67,7 +67,6 @@
#include <net/flow.h>
#include <net/ip6_checksum.h>
#include <net/inet_common.h>
-#include <net/l3mdev.h>
#include <linux/proc_fs.h>
#include <linux/netfilter.h>
@@ -457,11 +456,9 @@ static void ndisc_send_skb(struct sk_buff *skb,
if (!dst) {
struct flowi6 fl6;
- int oif = l3mdev_fib_oif(skb->dev);
+ int oif = skb->dev->ifindex;
icmpv6_flow_init(sk, &fl6, type, saddr, daddr, oif);
- if (oif != skb->dev->ifindex)
- fl6.flowi6_flags |= FLOWI_FLAG_L3MDEV_SRC;
dst = icmp6_dst_alloc(skb->dev, &fl6);
if (IS_ERR(dst)) {
kfree_skb(skb);
@@ -1538,7 +1535,6 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
int rd_len;
u8 ha_buf[MAX_ADDR_LEN], *ha = NULL,
ops_data_buf[NDISC_OPS_REDIRECT_DATA_SPACE], *ops_data = NULL;
- int oif = l3mdev_fib_oif(dev);
bool ret;
if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) {
@@ -1555,10 +1551,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
}
icmpv6_flow_init(sk, &fl6, NDISC_REDIRECT,
- &saddr_buf, &ipv6_hdr(skb)->saddr, oif);
-
- if (oif != skb->dev->ifindex)
- fl6.flowi6_flags |= FLOWI_FLAG_L3MDEV_SRC;
+ &saddr_buf, &ipv6_hdr(skb)->saddr, dev->ifindex);
dst = ip6_route_output(net, NULL, &fl6);
if (dst->error) {
diff --git a/net/ipv6/netfilter/nft_chain_route_ipv6.c b/net/ipv6/netfilter/nft_chain_route_ipv6.c
index 01eb0f658366..f2727475895e 100644
--- a/net/ipv6/netfilter/nft_chain_route_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_route_ipv6.c
@@ -31,6 +31,7 @@ static unsigned int nf_route_table_hook(void *priv,
struct in6_addr saddr, daddr;
u_int8_t hop_limit;
u32 mark, flowlabel;
+ int err;
nft_set_pktinfo_ipv6(&pkt, skb, state);
@@ -44,13 +45,16 @@ static unsigned int nf_route_table_hook(void *priv,
flowlabel = *((u32 *)ipv6_hdr(skb));
ret = nft_do_chain(&pkt, priv);
- if (ret != NF_DROP && ret != NF_QUEUE &&
+ if (ret != NF_DROP && ret != NF_STOLEN &&
(memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) ||
memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) ||
skb->mark != mark ||
ipv6_hdr(skb)->hop_limit != hop_limit ||
- flowlabel != *((u_int32_t *)ipv6_hdr(skb))))
- return ip6_route_me_harder(state->net, skb) == 0 ? ret : NF_DROP;
+ flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) {
+ err = ip6_route_me_harder(state->net, skb);
+ if (err < 0)
+ ret = NF_DROP_ERR(err);
+ }
return ret;
}
diff --git a/net/ipv6/netfilter/nft_reject_ipv6.c b/net/ipv6/netfilter/nft_reject_ipv6.c
index 533cd5719c59..92bda9908bb9 100644
--- a/net/ipv6/netfilter/nft_reject_ipv6.c
+++ b/net/ipv6/netfilter/nft_reject_ipv6.c
@@ -47,6 +47,7 @@ static const struct nft_expr_ops nft_reject_ipv6_ops = {
.eval = nft_reject_ipv6_eval,
.init = nft_reject_init,
.dump = nft_reject_dump,
+ .validate = nft_reject_validate,
};
static struct nft_expr_type nft_reject_ipv6_type __read_mostly = {
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 462f2a76b5c2..7cca8ac66fe9 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -148,6 +148,13 @@ int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
ipv6_hdr(skb)->payload_len = htons(len);
IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
+ /* if egress device is enslaved to an L3 master device pass the
+ * skb to its handler for processing
+ */
+ skb = l3mdev_ip6_out(sk, skb);
+ if (unlikely(!skb))
+ return 0;
+
return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
net, sk, skb, NULL, skb_dst(skb)->dev,
dst_output);
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 0900352c924c..0e983b694ee8 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -126,8 +126,10 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
rt = (struct rt6_info *) dst;
np = inet6_sk(sk);
- if (!np)
- return -EBADF;
+ if (!np) {
+ err = -EBADF;
+ goto dst_err_out;
+ }
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = np->mcast_oif;
@@ -163,6 +165,9 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
}
release_sock(sk);
+dst_err_out:
+ dst_release(dst);
+
if (err)
return err;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 590dd1f7746f..54404f08efcc 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -653,6 +653,13 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
if (err)
goto error_fault;
+ /* if egress device is enslaved to an L3 master device pass the
+ * skb to its handler for processing
+ */
+ skb = l3mdev_ip6_out(sk, skb);
+ if (unlikely(!skb))
+ return 0;
+
IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
NULL, rt->dst.dev, dst_output);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 09d43ff11a8d..5a5aeb92b4ec 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1147,15 +1147,16 @@ static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *
return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
}
-static struct dst_entry *ip6_route_input_lookup(struct net *net,
- struct net_device *dev,
- struct flowi6 *fl6, int flags)
+struct dst_entry *ip6_route_input_lookup(struct net *net,
+ struct net_device *dev,
+ struct flowi6 *fl6, int flags)
{
if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
flags |= RT6_LOOKUP_F_IFACE;
return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
}
+EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
void ip6_route_input(struct sk_buff *skb)
{
@@ -1164,7 +1165,7 @@ void ip6_route_input(struct sk_buff *skb)
int flags = RT6_LOOKUP_F_HAS_SADDR;
struct ip_tunnel_info *tun_info;
struct flowi6 fl6 = {
- .flowi6_iif = l3mdev_fib_oif(skb->dev),
+ .flowi6_iif = skb->dev->ifindex,
.daddr = iph->daddr,
.saddr = iph->saddr,
.flowlabel = ip6_flowinfo(iph),
@@ -1188,12 +1189,15 @@ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table
struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
struct flowi6 *fl6, int flags)
{
- struct dst_entry *dst;
bool any_src;
- dst = l3mdev_get_rt6_dst(net, fl6);
- if (dst)
- return dst;
+ if (rt6_need_strict(&fl6->daddr)) {
+ struct dst_entry *dst;
+
+ dst = l3mdev_link_scope_lookup(net, fl6);
+ if (dst)
+ return dst;
+ }
fl6->flowi6_iif = LOOPBACK_IFINDEX;
@@ -1988,9 +1992,18 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
if (!(gwa_type & IPV6_ADDR_UNICAST))
goto out;
- if (cfg->fc_table)
+ if (cfg->fc_table) {
grt = ip6_nh_lookup_table(net, cfg, gw_addr);
+ if (grt) {
+ if (grt->rt6i_flags & RTF_GATEWAY ||
+ (dev && dev != grt->dst.dev)) {
+ ip6_rt_put(grt);
+ grt = NULL;
+ }
+ }
+ }
+
if (!grt)
grt = rt6_lookup(net, gw_addr, NULL,
cfg->fc_ifindex, 1);
@@ -2558,8 +2571,16 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
{
u32 tb_id;
struct net *net = dev_net(idev->dev);
- struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
- DST_NOCOUNT);
+ struct net_device *dev = net->loopback_dev;
+ struct rt6_info *rt;
+
+ /* use L3 Master device as loopback for host routes if device
+ * is enslaved and address is not link local or multicast
+ */
+ if (!rt6_need_strict(addr))
+ dev = l3mdev_master_dev_rcu(idev->dev) ? : dev;
+
+ rt = ip6_dst_alloc(net, dev, DST_NOCOUNT);
if (!rt)
return ERR_PTR(-ENOMEM);
@@ -3338,11 +3359,6 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
} else {
fl6.flowi6_oif = oif;
- if (netif_index_is_l3_master(net, oif)) {
- fl6.flowi6_flags = FLOWI_FLAG_L3MDEV_SRC |
- FLOWI_FLAG_SKIP_NH_OIF;
- }
-
rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 04529a3d42cb..54cf7197c7ab 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -818,12 +818,8 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
fl6.flowi6_proto = IPPROTO_TCP;
if (rt6_need_strict(&fl6.daddr) && !oif)
fl6.flowi6_oif = tcp_v6_iif(skb);
- else {
- if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
- oif = skb->skb_iif;
-
- fl6.flowi6_oif = oif;
- }
+ else
+ fl6.flowi6_oif = oif ? : skb->skb_iif;
fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
fl6.fl6_dport = t1->dest;
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 0eaab1fa6be5..b5789562aded 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -21,8 +21,10 @@ int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb)
return xfrm6_extract_header(skb);
}
-int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
+int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
+ struct ip6_tnl *t)
{
+ XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t;
XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
return xfrm_input(skb, nexthdr, spi, 0);
@@ -48,13 +50,18 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
return -1;
}
-int xfrm6_rcv(struct sk_buff *skb)
+int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t)
{
return xfrm6_rcv_spi(skb, skb_network_header(skb)[IP6CB(skb)->nhoff],
- 0);
+ 0, t);
}
-EXPORT_SYMBOL(xfrm6_rcv);
+EXPORT_SYMBOL(xfrm6_rcv_tnl);
+int xfrm6_rcv(struct sk_buff *skb)
+{
+ return xfrm6_rcv_tnl(skb, NULL);
+}
+EXPORT_SYMBOL(xfrm6_rcv);
int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
xfrm_address_t *saddr, u8 proto)
{
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 6cc97003e4a9..e0f71c01d728 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -36,7 +36,7 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
int err;
memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_oif = oif;
+ fl6.flowi6_oif = l3mdev_master_ifindex_by_index(net, oif);
fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
if (saddr)
@@ -134,7 +134,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
nexthdr = nh[nhoff];
if (skb_dst(skb))
- oif = l3mdev_fib_oif(skb_dst(skb)->dev);
+ oif = skb_dst(skb)->dev->ifindex;
memset(fl6, 0, sizeof(struct flowi6));
fl6->flowi6_mark = skb->mark;
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 5743044cd660..e1c0bbe7996c 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -236,7 +236,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
__be32 spi;
spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr);
- return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi);
+ return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi, NULL);
}
static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index db639690c205..391c3cbd2eed 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -832,7 +832,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
struct sock *sk = sock->sk;
struct irda_sock *new, *self = irda_sk(sk);
struct sock *newsk;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
int err;
err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0);
@@ -897,7 +897,6 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
err = -EPERM; /* value does not seem to make sense. -arnd */
if (!new->tsap) {
pr_debug("%s(), dup failed!\n", __func__);
- kfree_skb(skb);
goto out;
}
@@ -916,7 +915,6 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
/* Clean up the original one to keep it in listen state */
irttp_listen(self->tsap);
- kfree_skb(skb);
sk->sk_ack_backlog--;
newsock->state = SS_CONNECTED;
@@ -924,6 +922,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
irda_connect_response(new);
err = 0;
out:
+ kfree_skb(skb);
release_sock(sk);
return err;
}
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 2632ac748371..b7f869a85ab7 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -23,6 +23,7 @@
#include <linux/socket.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
+#include <linux/syscalls.h>
#include <net/kcm.h>
#include <net/netns/generic.h>
#include <net/sock.h>
@@ -1721,7 +1722,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
if (copy_to_user((void __user *)arg, &info,
sizeof(info))) {
err = -EFAULT;
- sock_release(newsock);
+ sys_close(info.fd);
}
}
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 1e40dacaa137..a2ed3bda4ddc 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1855,6 +1855,9 @@ static __net_exit void l2tp_exit_net(struct net *net)
(void)l2tp_tunnel_delete(tunnel);
}
rcu_read_unlock_bh();
+
+ flush_workqueue(l2tp_wq);
+ rcu_barrier();
}
static struct pernet_operations l2tp_net_ops = {
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 5871537af387..2599af6378e4 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -139,7 +139,7 @@ struct l2tp_session {
void (*session_close)(struct l2tp_session *session);
void (*ref)(struct l2tp_session *session);
void (*deref)(struct l2tp_session *session);
-#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
+#if IS_ENABLED(CONFIG_L2TP_DEBUGFS)
void (*show)(struct seq_file *m, void *priv);
#endif
uint8_t priv[0]; /* private data */
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 57fc5a46ce06..965f7e344cef 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -121,7 +121,7 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
}
-static struct net_device_ops l2tp_eth_netdev_ops = {
+static const struct net_device_ops l2tp_eth_netdev_ops = {
.ndo_init = l2tp_eth_dev_init,
.ndo_uninit = l2tp_eth_dev_uninit,
.ndo_start_xmit = l2tp_eth_dev_xmit,
@@ -195,7 +195,7 @@ static void l2tp_eth_delete(struct l2tp_session *session)
}
}
-#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
+#if IS_ENABLED(CONFIG_L2TP_DEBUGFS)
static void l2tp_eth_show(struct seq_file *m, void *arg)
{
struct l2tp_session *session = arg;
@@ -268,7 +268,7 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
priv->tunnel_sock = tunnel->sock;
session->recv_skb = l2tp_eth_dev_recv;
session->session_close = l2tp_eth_delete;
-#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
+#if IS_ENABLED(CONFIG_L2TP_DEBUGFS)
session->show = l2tp_eth_show;
#endif
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 34eff77982cf..41d47bfda15c 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -552,7 +552,7 @@ out:
return error;
}
-#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
+#if IS_ENABLED(CONFIG_L2TP_DEBUGFS)
static void pppol2tp_show(struct seq_file *m, void *arg)
{
struct l2tp_session *session = arg;
@@ -723,7 +723,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
session->recv_skb = pppol2tp_recv;
session->session_close = pppol2tp_session_close;
-#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
+#if IS_ENABLED(CONFIG_L2TP_DEBUGFS)
session->show = pppol2tp_show;
#endif
diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c
index c4a1c3e84e12..8da86ceca33d 100644
--- a/net/l3mdev/l3mdev.c
+++ b/net/l3mdev/l3mdev.c
@@ -100,15 +100,14 @@ u32 l3mdev_fib_table_by_index(struct net *net, int ifindex)
EXPORT_SYMBOL_GPL(l3mdev_fib_table_by_index);
/**
- * l3mdev_get_rt6_dst - IPv6 route lookup based on flow. Returns
- * cached route for L3 master device if relevant
- * to flow
+ * l3mdev_link_scope_lookup - IPv6 route lookup based on flow for link
+ * local and multicast addresses
* @net: network namespace for device index lookup
* @fl6: IPv6 flow struct for lookup
*/
-struct dst_entry *l3mdev_get_rt6_dst(struct net *net,
- struct flowi6 *fl6)
+struct dst_entry *l3mdev_link_scope_lookup(struct net *net,
+ struct flowi6 *fl6)
{
struct dst_entry *dst = NULL;
struct net_device *dev;
@@ -121,70 +120,15 @@ struct dst_entry *l3mdev_get_rt6_dst(struct net *net,
dev = netdev_master_upper_dev_get_rcu(dev);
if (dev && netif_is_l3_master(dev) &&
- dev->l3mdev_ops->l3mdev_get_rt6_dst)
- dst = dev->l3mdev_ops->l3mdev_get_rt6_dst(dev, fl6);
+ dev->l3mdev_ops->l3mdev_link_scope_lookup)
+ dst = dev->l3mdev_ops->l3mdev_link_scope_lookup(dev, fl6);
rcu_read_unlock();
}
return dst;
}
-EXPORT_SYMBOL_GPL(l3mdev_get_rt6_dst);
-
-/**
- * l3mdev_get_saddr - get source address for a flow based on an interface
- * enslaved to an L3 master device
- * @net: network namespace for device index lookup
- * @ifindex: Interface index
- * @fl4: IPv4 flow struct
- */
-
-int l3mdev_get_saddr(struct net *net, int ifindex, struct flowi4 *fl4)
-{
- struct net_device *dev;
- int rc = 0;
-
- if (ifindex) {
- rcu_read_lock();
-
- dev = dev_get_by_index_rcu(net, ifindex);
- if (dev && netif_is_l3_slave(dev))
- dev = netdev_master_upper_dev_get_rcu(dev);
-
- if (dev && netif_is_l3_master(dev) &&
- dev->l3mdev_ops->l3mdev_get_saddr)
- rc = dev->l3mdev_ops->l3mdev_get_saddr(dev, fl4);
-
- rcu_read_unlock();
- }
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(l3mdev_get_saddr);
-
-int l3mdev_get_saddr6(struct net *net, const struct sock *sk,
- struct flowi6 *fl6)
-{
- struct net_device *dev;
- int rc = 0;
-
- if (fl6->flowi6_oif) {
- rcu_read_lock();
-
- dev = dev_get_by_index_rcu(net, fl6->flowi6_oif);
- if (dev && netif_is_l3_slave(dev))
- dev = netdev_master_upper_dev_get_rcu(dev);
-
- if (dev && netif_is_l3_master(dev) &&
- dev->l3mdev_ops->l3mdev_get_saddr6)
- rc = dev->l3mdev_ops->l3mdev_get_saddr6(dev, sk, fl6);
-
- rcu_read_unlock();
- }
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(l3mdev_get_saddr6);
+EXPORT_SYMBOL_GPL(l3mdev_link_scope_lookup);
/**
* l3mdev_fib_rule_match - Determine if flowi references an
@@ -222,3 +166,38 @@ out:
return rc;
}
+
+void l3mdev_update_flow(struct net *net, struct flowi *fl)
+{
+ struct net_device *dev;
+ int ifindex;
+
+ rcu_read_lock();
+
+ if (fl->flowi_oif) {
+ dev = dev_get_by_index_rcu(net, fl->flowi_oif);
+ if (dev) {
+ ifindex = l3mdev_master_ifindex_rcu(dev);
+ if (ifindex) {
+ fl->flowi_oif = ifindex;
+ fl->flowi_flags |= FLOWI_FLAG_SKIP_NH_OIF;
+ goto out;
+ }
+ }
+ }
+
+ if (fl->flowi_iif) {
+ dev = dev_get_by_index_rcu(net, fl->flowi_iif);
+ if (dev) {
+ ifindex = l3mdev_master_ifindex_rcu(dev);
+ if (ifindex) {
+ fl->flowi_iif = ifindex;
+ fl->flowi_flags |= FLOWI_FLAG_SKIP_NH_OIF;
+ }
+ }
+ }
+
+out:
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(l3mdev_update_flow);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 8ae3ed97d95c..db916cf51ffe 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -38,7 +38,7 @@ static u16 llc_ui_sap_link_no_max[256];
static struct sockaddr_llc llc_ui_addrnull;
static const struct proto_ops llc_ui_ops;
-static long llc_ui_wait_for_conn(struct sock *sk, long timeout);
+static bool llc_ui_wait_for_conn(struct sock *sk, long timeout);
static int llc_ui_wait_for_disc(struct sock *sk, long timeout);
static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout);
@@ -551,7 +551,7 @@ static int llc_ui_wait_for_disc(struct sock *sk, long timeout)
return rc;
}
-static long llc_ui_wait_for_conn(struct sock *sk, long timeout)
+static bool llc_ui_wait_for_conn(struct sock *sk, long timeout)
{
DEFINE_WAIT(wait);
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index a9aff6079c42..f6749dced021 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -261,10 +261,16 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
.timeout = timeout,
.ssn = start_seq_num,
};
-
int i, ret = -EOPNOTSUPP;
u16 status = WLAN_STATUS_REQUEST_DECLINED;
+ if (tid >= IEEE80211_FIRST_TSPEC_TSID) {
+ ht_dbg(sta->sdata,
+ "STA %pM requests BA session on unsupported tid %d\n",
+ sta->sta.addr, tid);
+ goto end_no_lock;
+ }
+
if (!sta->sta.ht_cap.ht_supported) {
ht_dbg(sta->sdata,
"STA %pM erroneously requests BA session on tid %d w/o QoS\n",
@@ -298,10 +304,13 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
buf_size = IEEE80211_MAX_AMPDU_BUF;
/* make sure the size doesn't exceed the maximum supported by the hw */
- if (buf_size > local->hw.max_rx_aggregation_subframes)
- buf_size = local->hw.max_rx_aggregation_subframes;
+ if (buf_size > sta->sta.max_rx_aggregation_subframes)
+ buf_size = sta->sta.max_rx_aggregation_subframes;
params.buf_size = buf_size;
+ ht_dbg(sta->sdata, "AddBA Req buf_size=%d for %pM\n",
+ buf_size, sta->sta.addr);
+
/* examine state machine */
mutex_lock(&sta->ampdu_mlme.mtx);
@@ -406,8 +415,10 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
}
end:
- if (status == WLAN_STATUS_SUCCESS)
+ if (status == WLAN_STATUS_SUCCESS) {
__set_bit(tid, sta->ampdu_mlme.agg_session_valid);
+ __clear_bit(tid, sta->ampdu_mlme.unexpected_agg);
+ }
mutex_unlock(&sta->ampdu_mlme.mtx);
end_no_lock:
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 5650c46bf91a..45319cc01121 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -584,6 +584,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW))
return -EINVAL;
+ if (WARN_ON(tid >= IEEE80211_FIRST_TSPEC_TSID))
+ return -EINVAL;
+
ht_dbg(sdata, "Open BA session requested for %pM tid %u\n",
pubsta->addr, tid);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 543b1d4fc33d..e29ff5749944 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -39,7 +39,7 @@ static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy,
if (type == NL80211_IFTYPE_MONITOR && flags) {
sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
- sdata->u.mntr_flags = *flags;
+ sdata->u.mntr.flags = *flags;
}
return wdev;
@@ -73,8 +73,29 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
sdata->u.mgd.use_4addr = params->use_4addr;
}
- if (sdata->vif.type == NL80211_IFTYPE_MONITOR && flags) {
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_sub_if_data *monitor_sdata;
+ u32 mu_mntr_cap_flag = NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER;
+
+ monitor_sdata = rtnl_dereference(local->monitor_sdata);
+ if (monitor_sdata &&
+ wiphy_ext_feature_isset(wiphy, mu_mntr_cap_flag)) {
+ memcpy(monitor_sdata->vif.bss_conf.mu_group.membership,
+ params->vht_mumimo_groups, WLAN_MEMBERSHIP_LEN);
+ memcpy(monitor_sdata->vif.bss_conf.mu_group.position,
+ params->vht_mumimo_groups + WLAN_MEMBERSHIP_LEN,
+ WLAN_USER_POSITION_LEN);
+ monitor_sdata->vif.mu_mimo_owner = true;
+ ieee80211_bss_info_change_notify(monitor_sdata,
+ BSS_CHANGED_MU_GROUPS);
+
+ ether_addr_copy(monitor_sdata->u.mntr.mu_follow_addr,
+ params->macaddr);
+ }
+
+ if (!flags)
+ return 0;
if (ieee80211_sdata_running(sdata)) {
u32 mask = MONITOR_FLAG_COOK_FRAMES |
@@ -89,11 +110,11 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
* cooked_mntrs, monitor and all fif_* counters
* reconfigure hardware
*/
- if ((*flags & mask) != (sdata->u.mntr_flags & mask))
+ if ((*flags & mask) != (sdata->u.mntr.flags & mask))
return -EBUSY;
ieee80211_adjust_monitor_flags(sdata, -1);
- sdata->u.mntr_flags = *flags;
+ sdata->u.mntr.flags = *flags;
ieee80211_adjust_monitor_flags(sdata, 1);
ieee80211_configure_filter(local);
@@ -103,7 +124,7 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
* and ieee80211_do_open take care of "everything"
* mentioned in the comment above.
*/
- sdata->u.mntr_flags = *flags;
+ sdata->u.mntr.flags = *flags;
}
}
@@ -2940,10 +2961,6 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
}
chanctx = container_of(conf, struct ieee80211_chanctx, conf);
- if (!chanctx) {
- err = -EBUSY;
- goto out;
- }
ch_switch.timestamp = 0;
ch_switch.device_timestamp = 0;
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 2906c1004e1a..8ca62b6bb02a 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -71,138 +71,39 @@ DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s",
local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver");
-struct aqm_info {
- struct ieee80211_local *local;
- size_t size;
- size_t len;
- unsigned char buf[0];
-};
-
-#define AQM_HDR_LEN 200
-#define AQM_HW_ENTRY_LEN 40
-#define AQM_TXQ_ENTRY_LEN 110
-
-static int aqm_open(struct inode *inode, struct file *file)
+static ssize_t aqm_read(struct file *file,
+ char __user *user_buf,
+ size_t count,
+ loff_t *ppos)
{
- struct ieee80211_local *local = inode->i_private;
- struct ieee80211_sub_if_data *sdata;
- struct sta_info *sta;
- struct txq_info *txqi;
+ struct ieee80211_local *local = file->private_data;
struct fq *fq = &local->fq;
- struct aqm_info *info = NULL;
+ char buf[200];
int len = 0;
- int i;
-
- if (!local->ops->wake_tx_queue)
- return -EOPNOTSUPP;
-
- len += AQM_HDR_LEN;
- len += 6 * AQM_HW_ENTRY_LEN;
-
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list)
- len += AQM_TXQ_ENTRY_LEN;
- list_for_each_entry_rcu(sta, &local->sta_list, list)
- len += AQM_TXQ_ENTRY_LEN * ARRAY_SIZE(sta->sta.txq);
- rcu_read_unlock();
-
- info = vmalloc(len);
- if (!info)
- return -ENOMEM;
spin_lock_bh(&local->fq.lock);
rcu_read_lock();
- file->private_data = info;
- info->local = local;
- info->size = len;
- len = 0;
-
- len += scnprintf(info->buf + len, info->size - len,
- "* hw\n"
- "access name value\n"
- "R fq_flows_cnt %u\n"
- "R fq_backlog %u\n"
- "R fq_overlimit %u\n"
- "R fq_collisions %u\n"
- "RW fq_limit %u\n"
- "RW fq_quantum %u\n",
- fq->flows_cnt,
- fq->backlog,
- fq->overlimit,
- fq->collisions,
- fq->limit,
- fq->quantum);
-
- len += scnprintf(info->buf + len,
- info->size - len,
- "* vif\n"
- "ifname addr ac backlog-bytes backlog-packets flows overlimit collisions tx-bytes tx-packets\n");
-
- list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- txqi = to_txq_info(sdata->vif.txq);
- len += scnprintf(info->buf + len, info->size - len,
- "%s %pM %u %u %u %u %u %u %u %u\n",
- sdata->name,
- sdata->vif.addr,
- txqi->txq.ac,
- txqi->tin.backlog_bytes,
- txqi->tin.backlog_packets,
- txqi->tin.flows,
- txqi->tin.overlimit,
- txqi->tin.collisions,
- txqi->tin.tx_bytes,
- txqi->tin.tx_packets);
- }
-
- len += scnprintf(info->buf + len,
- info->size - len,
- "* sta\n"
- "ifname addr tid ac backlog-bytes backlog-packets flows overlimit collisions tx-bytes tx-packets\n");
-
- list_for_each_entry_rcu(sta, &local->sta_list, list) {
- sdata = sta->sdata;
- for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) {
- txqi = to_txq_info(sta->sta.txq[i]);
- len += scnprintf(info->buf + len, info->size - len,
- "%s %pM %d %d %u %u %u %u %u %u %u\n",
- sdata->name,
- sta->sta.addr,
- txqi->txq.tid,
- txqi->txq.ac,
- txqi->tin.backlog_bytes,
- txqi->tin.backlog_packets,
- txqi->tin.flows,
- txqi->tin.overlimit,
- txqi->tin.collisions,
- txqi->tin.tx_bytes,
- txqi->tin.tx_packets);
- }
- }
-
- info->len = len;
+ len = scnprintf(buf, sizeof(buf),
+ "access name value\n"
+ "R fq_flows_cnt %u\n"
+ "R fq_backlog %u\n"
+ "R fq_overlimit %u\n"
+ "R fq_collisions %u\n"
+ "RW fq_limit %u\n"
+ "RW fq_quantum %u\n",
+ fq->flows_cnt,
+ fq->backlog,
+ fq->overlimit,
+ fq->collisions,
+ fq->limit,
+ fq->quantum);
rcu_read_unlock();
spin_unlock_bh(&local->fq.lock);
- return 0;
-}
-
-static int aqm_release(struct inode *inode, struct file *file)
-{
- vfree(file->private_data);
- return 0;
-}
-
-static ssize_t aqm_read(struct file *file,
- char __user *user_buf,
- size_t count,
- loff_t *ppos)
-{
- struct aqm_info *info = file->private_data;
-
return simple_read_from_buffer(user_buf, count, ppos,
- info->buf, info->len);
+ buf, len);
}
static ssize_t aqm_write(struct file *file,
@@ -210,8 +111,7 @@ static ssize_t aqm_write(struct file *file,
size_t count,
loff_t *ppos)
{
- struct aqm_info *info = file->private_data;
- struct ieee80211_local *local = info->local;
+ struct ieee80211_local *local = file->private_data;
char buf[100];
size_t len;
@@ -237,8 +137,7 @@ static ssize_t aqm_write(struct file *file,
static const struct file_operations aqm_ops = {
.write = aqm_write,
.read = aqm_read,
- .open = aqm_open,
- .release = aqm_release,
+ .open = simple_open,
.llseek = default_llseek,
};
@@ -302,6 +201,7 @@ static const char *hw_flag_names[] = {
FLAG(USES_RSS),
FLAG(TX_AMSDU),
FLAG(TX_FRAG_LIST),
+ FLAG(REPORTS_LOW_ACK),
#undef FLAG
};
@@ -428,7 +328,9 @@ void debugfs_hw_add(struct ieee80211_local *local)
DEBUGFS_ADD(hwflags);
DEBUGFS_ADD(user_power);
DEBUGFS_ADD(power);
- DEBUGFS_ADD_MODE(aqm, 0600);
+
+ if (local->ops->wake_tx_queue)
+ DEBUGFS_ADD_MODE(aqm, 0600);
statsd = debugfs_create_dir("statistics", phyd);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index a5ba739cd2a7..5d35c0f37bb7 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -30,7 +30,7 @@ static ssize_t ieee80211_if_read(
size_t count, loff_t *ppos,
ssize_t (*format)(const struct ieee80211_sub_if_data *, char *, int))
{
- char buf[70];
+ char buf[200];
ssize_t ret = -EINVAL;
read_lock(&dev_base_lock);
@@ -486,6 +486,38 @@ static ssize_t ieee80211_if_fmt_num_buffered_multicast(
}
IEEE80211_IF_FILE_R(num_buffered_multicast);
+static ssize_t ieee80211_if_fmt_aqm(
+ const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct txq_info *txqi = to_txq_info(sdata->vif.txq);
+ int len;
+
+ spin_lock_bh(&local->fq.lock);
+ rcu_read_lock();
+
+ len = scnprintf(buf,
+ buflen,
+ "ac backlog-bytes backlog-packets new-flows drops marks overlimit collisions tx-bytes tx-packets\n"
+ "%u %u %u %u %u %u %u %u %u %u\n",
+ txqi->txq.ac,
+ txqi->tin.backlog_bytes,
+ txqi->tin.backlog_packets,
+ txqi->tin.flows,
+ txqi->cstats.drop_count,
+ txqi->cstats.ecn_mark,
+ txqi->tin.overlimit,
+ txqi->tin.collisions,
+ txqi->tin.tx_bytes,
+ txqi->tin.tx_packets);
+
+ rcu_read_unlock();
+ spin_unlock_bh(&local->fq.lock);
+
+ return len;
+}
+IEEE80211_IF_FILE_R(aqm);
+
/* IBSS attributes */
static ssize_t ieee80211_if_fmt_tsf(
const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
@@ -618,6 +650,9 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata)
DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz);
DEBUGFS_ADD(hw_queues);
+
+ if (sdata->local->ops->wake_tx_queue)
+ DEBUGFS_ADD(aqm);
}
static void add_sta_files(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index fd334133ff45..a2fcdb47a0e6 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -133,6 +133,55 @@ static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf,
}
STA_OPS(last_seq_ctrl);
+#define AQM_TXQ_ENTRY_LEN 130
+
+static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct sta_info *sta = file->private_data;
+ struct ieee80211_local *local = sta->local;
+ size_t bufsz = AQM_TXQ_ENTRY_LEN*(IEEE80211_NUM_TIDS+1);
+ char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf;
+ struct txq_info *txqi;
+ ssize_t rv;
+ int i;
+
+ if (!buf)
+ return -ENOMEM;
+
+ spin_lock_bh(&local->fq.lock);
+ rcu_read_lock();
+
+ p += scnprintf(p,
+ bufsz+buf-p,
+ "tid ac backlog-bytes backlog-packets new-flows drops marks overlimit collisions tx-bytes tx-packets\n");
+
+ for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
+ txqi = to_txq_info(sta->sta.txq[i]);
+ p += scnprintf(p, bufsz+buf-p,
+ "%d %d %u %u %u %u %u %u %u %u %u\n",
+ txqi->txq.tid,
+ txqi->txq.ac,
+ txqi->tin.backlog_bytes,
+ txqi->tin.backlog_packets,
+ txqi->tin.flows,
+ txqi->cstats.drop_count,
+ txqi->cstats.ecn_mark,
+ txqi->tin.overlimit,
+ txqi->tin.collisions,
+ txqi->tin.tx_bytes,
+ txqi->tin.tx_packets);
+ }
+
+ rcu_read_unlock();
+ spin_unlock_bh(&local->fq.lock);
+
+ rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+ kfree(buf);
+ return rv;
+}
+STA_OPS(aqm);
+
static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
@@ -478,6 +527,9 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
DEBUGFS_ADD_COUNTER(rx_fragments, rx_stats.fragments);
DEBUGFS_ADD_COUNTER(tx_filtered, status_stats.filtered);
+ if (local->ops->wake_tx_queue)
+ DEBUGFS_ADD(aqm);
+
if (sizeof(sta->driver_buffered_tids) == sizeof(u32))
debugfs_create_x32("driver_buffered_tids", 0400,
sta->debugfs_dir,
@@ -492,10 +544,6 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
void ieee80211_sta_debugfs_remove(struct sta_info *sta)
{
- struct ieee80211_local *local = sta->local;
- struct ieee80211_sub_if_data *sdata = sta->sdata;
-
- drv_sta_remove_debugfs(local, sdata, &sta->sta, sta->debugfs_dir);
debugfs_remove_recursive(sta->debugfs_dir);
sta->debugfs_dir = NULL;
}
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index c258f1041d33..c701b6438bd9 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -62,7 +62,7 @@ int drv_add_interface(struct ieee80211_local *local,
if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
(sdata->vif.type == NL80211_IFTYPE_MONITOR &&
!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) &&
- !(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))))
+ !(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE))))
return -EINVAL;
trace_drv_add_interface(local, sdata);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 42a41ae405ba..fe35a1c0dc86 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -162,7 +162,8 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
return;
if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
- sdata->vif.type == NL80211_IFTYPE_MONITOR))
+ (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
+ !sdata->vif.mu_mimo_owner)))
return;
if (!check_sdata_in_driver(sdata))
@@ -498,21 +499,6 @@ static inline void drv_sta_add_debugfs(struct ieee80211_local *local,
local->ops->sta_add_debugfs(&local->hw, &sdata->vif,
sta, dir);
}
-
-static inline void drv_sta_remove_debugfs(struct ieee80211_local *local,
- struct ieee80211_sub_if_data *sdata,
- struct ieee80211_sta *sta,
- struct dentry *dir)
-{
- might_sleep();
-
- sdata = get_bss_sdata(sdata);
- check_sdata_in_driver(sdata);
-
- if (local->ops->sta_remove_debugfs)
- local->ops->sta_remove_debugfs(&local->hw, &sdata->vif,
- sta, dir);
-}
#endif
static inline void drv_sta_pre_rcu_remove(struct ieee80211_local *local,
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index f56d342c31b8..e496dee5af08 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -3,7 +3,7 @@
* Copyright 2005, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
- * Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright 2013-2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -818,12 +818,18 @@ struct txq_info {
struct fq_tin tin;
struct fq_flow def_flow;
struct codel_vars def_cvars;
+ struct codel_stats cstats;
unsigned long flags;
/* keep last! */
struct ieee80211_txq txq;
};
+struct ieee80211_if_mntr {
+ u32 flags;
+ u8 mu_follow_addr[ETH_ALEN] __aligned(2);
+};
+
struct ieee80211_sub_if_data {
struct list_head list;
@@ -922,7 +928,7 @@ struct ieee80211_sub_if_data {
struct ieee80211_if_ibss ibss;
struct ieee80211_if_mesh mesh;
struct ieee80211_if_ocb ocb;
- u32 mntr_flags;
+ struct ieee80211_if_mntr mntr;
} u;
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -1112,7 +1118,6 @@ struct ieee80211_local {
struct fq fq;
struct codel_vars *cvars;
struct codel_params cparams;
- struct codel_stats cstats;
const struct ieee80211_ops *ops;
@@ -1208,7 +1213,7 @@ struct ieee80211_local {
spinlock_t tim_lock;
unsigned long num_sta;
struct list_head sta_list;
- struct rhashtable sta_hash;
+ struct rhltable sta_hash;
struct timer_list sta_cleanup;
int sta_generation;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index b123a9e325b3..b0abddc714ef 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -43,6 +43,8 @@
* by either the RTNL, the iflist_mtx or RCU.
*/
+static void ieee80211_iface_work(struct work_struct *work);
+
bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_chanctx_conf *chanctx_conf;
@@ -188,7 +190,7 @@ static int ieee80211_verify_mac(struct ieee80211_sub_if_data *sdata, u8 *addr,
continue;
if (iter->vif.type == NL80211_IFTYPE_MONITOR &&
- !(iter->u.mntr_flags & MONITOR_FLAG_ACTIVE))
+ !(iter->u.mntr.flags & MONITOR_FLAG_ACTIVE))
continue;
m = iter->vif.addr;
@@ -217,7 +219,7 @@ static int ieee80211_change_mac(struct net_device *dev, void *addr)
return -EBUSY;
if (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
- !(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))
+ !(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE))
check_dup = false;
ret = ieee80211_verify_mac(sdata, sa->sa_data, check_dup);
@@ -357,7 +359,7 @@ void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
const int offset)
{
struct ieee80211_local *local = sdata->local;
- u32 flags = sdata->u.mntr_flags;
+ u32 flags = sdata->u.mntr.flags;
#define ADJUST(_f, _s) do { \
if (flags & MONITOR_FLAG_##_f) \
@@ -448,6 +450,9 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
return ret;
}
+ skb_queue_head_init(&sdata->skb_queue);
+ INIT_WORK(&sdata->work, ieee80211_iface_work);
+
return 0;
}
@@ -589,12 +594,12 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
}
break;
case NL80211_IFTYPE_MONITOR:
- if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
+ if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) {
local->cooked_mntrs++;
break;
}
- if (sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE) {
+ if (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
res = drv_add_interface(local, sdata);
if (res)
goto err_stop;
@@ -926,7 +931,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
/* no need to tell driver */
break;
case NL80211_IFTYPE_MONITOR:
- if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
+ if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) {
local->cooked_mntrs--;
break;
}
@@ -1012,7 +1017,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
ieee80211_recalc_idle(local);
mutex_unlock(&local->mtx);
- if (!(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))
+ if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE))
break;
/* fall through */
@@ -1444,7 +1449,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_MONITOR:
sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP;
sdata->dev->netdev_ops = &ieee80211_monitorif_ops;
- sdata->u.mntr_flags = MONITOR_FLAG_CONTROL |
+ sdata->u.mntr.flags = MONITOR_FLAG_CONTROL |
MONITOR_FLAG_OTHER_BSS;
break;
case NL80211_IFTYPE_WDS:
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index d00ea9b13f49..ac053a9df36d 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -660,6 +660,9 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
ieee80211_roc_setup(local);
+ local->hw.radiotap_timestamp.units_pos = -1;
+ local->hw.radiotap_timestamp.accuracy = -1;
+
return &local->hw;
err_free:
wiphy_free(wiphy);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index fa7d37cf0351..b747c9645e43 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -757,6 +757,7 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
sta = next_hop_deref_protected(mpath);
if (mpath->flags & MESH_PATH_ACTIVE &&
ether_addr_equal(ta, sta->sta.addr) &&
+ !(mpath->flags & MESH_PATH_FIXED) &&
(!(mpath->flags & MESH_PATH_SN_VALID) ||
SN_GT(target_sn, mpath->sn) || target_sn == 0)) {
mpath->flags &= ~MESH_PATH_ACTIVE;
@@ -1023,7 +1024,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
goto enddiscovery;
spin_lock_bh(&mpath->state_lock);
- if (mpath->flags & MESH_PATH_DELETED) {
+ if (mpath->flags & (MESH_PATH_DELETED | MESH_PATH_FIXED)) {
spin_unlock_bh(&mpath->state_lock);
goto enddiscovery;
}
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 6db2ddfa0695..f0e6175a9821 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -826,7 +826,7 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
mpath->metric = 0;
mpath->hop_count = 0;
mpath->exp_time = 0;
- mpath->flags |= MESH_PATH_FIXED;
+ mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID;
mesh_path_activate(mpath);
spin_unlock_bh(&mpath->state_lock);
mesh_path_tx_pending(mpath);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 8d426f637f58..7486f2dab4ba 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1672,11 +1672,15 @@ __ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata)
non_acm_ac++)
if (!(sdata->wmm_acm & BIT(7 - 2 * non_acm_ac)))
break;
- /* The loop will result in using BK even if it requires
- * admission control, such configuration makes no sense
- * and we have to transmit somehow - the AC selection
- * does the same thing.
+ /* Usually the loop will result in using BK even if it
+ * requires admission control, but such a configuration
+ * makes no sense and we have to transmit somehow - the
+ * AC selection does the same thing.
+ * If we started out trying to downgrade from BK, then
+ * the extra condition here might be needed.
*/
+ if (non_acm_ac >= IEEE80211_NUM_ACS)
+ non_acm_ac = IEEE80211_AC_BK;
if (drv_conf_tx(local, sdata, ac,
&sdata->tx_conf[non_acm_ac]))
sdata_err(sdata,
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 00a43a70e1fc..28a3a0957c9e 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -178,8 +178,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
WARN_ON(!list_empty(&local->chanctx_list));
/* stop hardware - this must stop RX */
- if (local->open_count)
- ieee80211_stop_device(local);
+ ieee80211_stop_device(local);
suspend:
local->suspended = true;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 9dce3b157908..f7cf342bab52 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -180,6 +180,11 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
len += 12;
}
+ if (local->hw.radiotap_timestamp.units_pos >= 0) {
+ len = ALIGN(len, 8);
+ len += 12;
+ }
+
if (status->chains) {
/* antenna and antenna signal fields */
len += 2 * hweight8(status->chains);
@@ -447,6 +452,31 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
pos += 2;
}
+ if (local->hw.radiotap_timestamp.units_pos >= 0) {
+ u16 accuracy = 0;
+ u8 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT;
+
+ rthdr->it_present |=
+ cpu_to_le32(1 << IEEE80211_RADIOTAP_TIMESTAMP);
+
+ /* ensure 8 byte alignment */
+ while ((pos - (u8 *)rthdr) & 7)
+ pos++;
+
+ put_unaligned_le64(status->device_timestamp, pos);
+ pos += sizeof(u64);
+
+ if (local->hw.radiotap_timestamp.accuracy >= 0) {
+ accuracy = local->hw.radiotap_timestamp.accuracy;
+ flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY;
+ }
+ put_unaligned_le16(accuracy, pos);
+ pos += sizeof(u16);
+
+ *pos++ = local->hw.radiotap_timestamp.units_pos;
+ *pos++ = flags;
+ }
+
for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
*pos++ = status->chain_signal[chain];
*pos++ = chain;
@@ -485,6 +515,9 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
struct net_device *prev_dev = NULL;
int present_fcs_len = 0;
unsigned int rtap_vendor_space = 0;
+ struct ieee80211_mgmt *mgmt;
+ struct ieee80211_sub_if_data *monitor_sdata =
+ rcu_dereference(local->monitor_sdata);
if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) {
struct ieee80211_vendor_radiotap *rtap = (void *)origskb->data;
@@ -567,7 +600,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
continue;
- if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
+ if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)
continue;
if (!ieee80211_sdata_running(sdata))
@@ -585,6 +618,23 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
ieee80211_rx_stats(sdata->dev, skb->len);
}
+ mgmt = (void *)skb->data;
+ if (monitor_sdata &&
+ skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 + VHT_MUMIMO_GROUPS_DATA_LEN &&
+ ieee80211_is_action(mgmt->frame_control) &&
+ mgmt->u.action.category == WLAN_CATEGORY_VHT &&
+ mgmt->u.action.u.vht_group_notif.action_code == WLAN_VHT_ACTION_GROUPID_MGMT &&
+ is_valid_ether_addr(monitor_sdata->u.mntr.mu_follow_addr) &&
+ ether_addr_equal(mgmt->da, monitor_sdata->u.mntr.mu_follow_addr)) {
+ struct sk_buff *mu_skb = skb_copy(skb, GFP_ATOMIC);
+
+ if (mu_skb) {
+ mu_skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
+ skb_queue_tail(&monitor_sdata->skb_queue, mu_skb);
+ ieee80211_queue_work(&local->hw, &monitor_sdata->work);
+ }
+ }
+
if (prev_dev) {
skb->dev = prev_dev;
netif_receive_skb(skb);
@@ -1072,8 +1122,15 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
- if (!tid_agg_rx)
+ if (!tid_agg_rx) {
+ if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
+ !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
+ !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
+ ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
+ WLAN_BACK_RECIPIENT,
+ WLAN_REASON_QSTA_REQUIRE_SETUP);
goto dont_reorder;
+ }
/* qos null data frames are excluded */
if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
@@ -2535,6 +2592,12 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
tid = le16_to_cpu(bar_data.control) >> 12;
+ if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
+ !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
+ ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
+ WLAN_BACK_RECIPIENT,
+ WLAN_REASON_QSTA_REQUIRE_SETUP);
+
tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
if (!tid_agg_rx)
return RX_DROP_MONITOR;
@@ -3147,7 +3210,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
continue;
if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
- !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
+ !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES))
continue;
if (prev_dev) {
@@ -3940,7 +4003,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
__le16 fc;
struct ieee80211_rx_data rx;
struct ieee80211_sub_if_data *prev;
- struct rhash_head *tmp;
+ struct rhlist_head *tmp;
int err = 0;
fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
@@ -3983,13 +4046,10 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
goto out;
} else if (ieee80211_is_data(fc)) {
struct sta_info *sta, *prev_sta;
- const struct bucket_table *tbl;
prev_sta = NULL;
- tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
-
- for_each_sta_info(local, tbl, hdr->addr2, sta, tmp) {
+ for_each_sta_info(local, hdr->addr2, sta, tmp) {
if (!prev_sta) {
prev_sta = sta;
continue;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 070b40f15850..23d8ac829279 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -420,7 +420,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw,
{
struct ieee80211_local *local = hw_to_local(hw);
- trace_api_scan_completed(local, info);
+ trace_api_scan_completed(local, info->aborted);
set_bit(SCAN_COMPLETED, &local->scanning);
if (info->aborted)
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 19f14c907d74..011880d633b4 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -67,12 +67,10 @@
static const struct rhashtable_params sta_rht_params = {
.nelem_hint = 3, /* start small */
- .insecure_elasticity = true, /* Disable chain-length checks. */
.automatic_shrinking = true,
.head_offset = offsetof(struct sta_info, hash_node),
.key_offset = offsetof(struct sta_info, addr),
.key_len = ETH_ALEN,
- .hashfn = sta_addr_hash,
.max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE,
};
@@ -80,8 +78,8 @@ static const struct rhashtable_params sta_rht_params = {
static int sta_info_hash_del(struct ieee80211_local *local,
struct sta_info *sta)
{
- return rhashtable_remove_fast(&local->sta_hash, &sta->hash_node,
- sta_rht_params);
+ return rhltable_remove(&local->sta_hash, &sta->hash_node,
+ sta_rht_params);
}
static void __cleanup_single_sta(struct sta_info *sta)
@@ -157,19 +155,22 @@ static void cleanup_single_sta(struct sta_info *sta)
sta_info_free(local, sta);
}
+struct rhlist_head *sta_info_hash_lookup(struct ieee80211_local *local,
+ const u8 *addr)
+{
+ return rhltable_lookup(&local->sta_hash, addr, sta_rht_params);
+}
+
/* protected by RCU */
struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
const u8 *addr)
{
struct ieee80211_local *local = sdata->local;
+ struct rhlist_head *tmp;
struct sta_info *sta;
- struct rhash_head *tmp;
- const struct bucket_table *tbl;
rcu_read_lock();
- tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
-
- for_each_sta_info(local, tbl, addr, sta, tmp) {
+ for_each_sta_info(local, addr, sta, tmp) {
if (sta->sdata == sdata) {
rcu_read_unlock();
/* this is safe as the caller must already hold
@@ -190,14 +191,11 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
const u8 *addr)
{
struct ieee80211_local *local = sdata->local;
+ struct rhlist_head *tmp;
struct sta_info *sta;
- struct rhash_head *tmp;
- const struct bucket_table *tbl;
rcu_read_lock();
- tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
-
- for_each_sta_info(local, tbl, addr, sta, tmp) {
+ for_each_sta_info(local, addr, sta, tmp) {
if (sta->sdata == sdata ||
(sta->sdata->bss && sta->sdata->bss == sdata->bss)) {
rcu_read_unlock();
@@ -263,8 +261,8 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
static int sta_info_hash_add(struct ieee80211_local *local,
struct sta_info *sta)
{
- return rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
- sta_rht_params);
+ return rhltable_insert(&local->sta_hash, &sta->hash_node,
+ sta_rht_params);
}
static void sta_deliver_ps_frames(struct work_struct *wk)
@@ -340,6 +338,9 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
memcpy(sta->addr, addr, ETH_ALEN);
memcpy(sta->sta.addr, addr, ETH_ALEN);
+ sta->sta.max_rx_aggregation_subframes =
+ local->hw.max_rx_aggregation_subframes;
+
sta->local = local;
sta->sdata = sdata;
sta->rx_stats.last_rx = jiffies;
@@ -450,9 +451,9 @@ static int sta_info_insert_check(struct sta_info *sta)
is_multicast_ether_addr(sta->sta.addr)))
return -EINVAL;
- /* Strictly speaking this isn't necessary as we hold the mutex, but
- * the rhashtable code can't really deal with that distinction. We
- * do require the mutex for correctness though.
+ /* The RCU read lock is required by rhashtable due to
+ * asynchronous resize/rehash. We also require the mutex
+ * for correctness.
*/
rcu_read_lock();
lockdep_assert_held(&sdata->local->sta_mtx);
@@ -687,7 +688,7 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending)
}
/* No need to do anything if the driver does all */
- if (ieee80211_hw_check(&local->hw, AP_LINK_PS))
+ if (!local->ops->set_tim)
return;
if (sta->dead)
@@ -1040,16 +1041,11 @@ static void sta_info_cleanup(unsigned long data)
round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL));
}
-u32 sta_addr_hash(const void *key, u32 length, u32 seed)
-{
- return jhash(key, ETH_ALEN, seed);
-}
-
int sta_info_init(struct ieee80211_local *local)
{
int err;
- err = rhashtable_init(&local->sta_hash, &sta_rht_params);
+ err = rhltable_init(&local->sta_hash, &sta_rht_params);
if (err)
return err;
@@ -1065,7 +1061,7 @@ int sta_info_init(struct ieee80211_local *local)
void sta_info_stop(struct ieee80211_local *local)
{
del_timer_sync(&local->sta_cleanup);
- rhashtable_destroy(&local->sta_hash);
+ rhltable_destroy(&local->sta_hash);
}
@@ -1135,17 +1131,14 @@ struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw,
const u8 *localaddr)
{
struct ieee80211_local *local = hw_to_local(hw);
+ struct rhlist_head *tmp;
struct sta_info *sta;
- struct rhash_head *tmp;
- const struct bucket_table *tbl;
-
- tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
/*
* Just return a random station if localaddr is NULL
* ... first in list.
*/
- for_each_sta_info(local, tbl, addr, sta, tmp) {
+ for_each_sta_info(local, addr, sta, tmp) {
if (localaddr &&
!ether_addr_equal(sta->sdata->vif.addr, localaddr))
continue;
@@ -1616,7 +1609,6 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
sta_info_recalc_tim(sta);
} else {
- unsigned long tids = sta->txq_buffered_tids & driver_release_tids;
int tid;
/*
@@ -1648,7 +1640,8 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
- if (!(tids & BIT(tid)) || txqi->tin.backlog_packets)
+ if (!(driver_release_tids & BIT(tid)) ||
+ txqi->tin.backlog_packets)
continue;
sta_info_recalc_tim(sta);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 0556be3e3628..ed5fcb984a01 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -230,6 +230,8 @@ struct tid_ampdu_rx {
* @tid_rx_stop_requested: bitmap indicating which BA sessions per TID the
* driver requested to close until the work for it runs
* @agg_session_valid: bitmap indicating which TID has a rx BA session open on
+ * @unexpected_agg: bitmap indicating which TID already sent a delBA due to
+ * unexpected aggregation related frames outside a session
* @work: work struct for starting/stopping aggregation
* @tid_tx: aggregation info for Tx per TID
* @tid_start_tx: sessions where start was requested
@@ -244,6 +246,7 @@ struct sta_ampdu_mlme {
unsigned long tid_rx_timer_expired[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
unsigned long tid_rx_stop_requested[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
unsigned long agg_session_valid[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
+ unsigned long unexpected_agg[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
/* tx */
struct work_struct work;
struct tid_ampdu_tx __rcu *tid_tx[IEEE80211_NUM_TIDS];
@@ -452,7 +455,7 @@ struct sta_info {
/* General information, mostly static */
struct list_head list, free_list;
struct rcu_head rcu_head;
- struct rhash_head hash_node;
+ struct rhlist_head hash_node;
u8 addr[ETH_ALEN];
struct ieee80211_local *local;
struct ieee80211_sub_if_data *sdata;
@@ -635,6 +638,9 @@ rcu_dereference_protected_tid_tx(struct sta_info *sta, int tid)
*/
#define STA_INFO_CLEANUP_INTERVAL (10 * HZ)
+struct rhlist_head *sta_info_hash_lookup(struct ieee80211_local *local,
+ const u8 *addr);
+
/*
* Get a STA info, must be under RCU read lock.
*/
@@ -644,17 +650,9 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
const u8 *addr);
-u32 sta_addr_hash(const void *key, u32 length, u32 seed);
-
-#define _sta_bucket_idx(_tbl, _a) \
- rht_bucket_index(_tbl, sta_addr_hash(_a, ETH_ALEN, (_tbl)->hash_rnd))
-
-#define for_each_sta_info(local, tbl, _addr, _sta, _tmp) \
- rht_for_each_entry_rcu(_sta, _tmp, tbl, \
- _sta_bucket_idx(tbl, _addr), \
- hash_node) \
- /* compare address and run code only if it matches */ \
- if (ether_addr_equal(_sta->addr, (_addr)))
+#define for_each_sta_info(local, _addr, _sta, _tmp) \
+ rhl_for_each_entry_rcu(_sta, _tmp, \
+ sta_info_hash_lookup(local, _addr), hash_node)
/*
* Get STA info by index, BROKEN!
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index a2a68269675d..ddf71c648cab 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -557,6 +557,12 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
static void ieee80211_lost_packet(struct sta_info *sta,
struct ieee80211_tx_info *info)
{
+ /* If driver relies on its own algorithm for station kickout, skip
+ * mac80211 packet loss mechanism.
+ */
+ if (ieee80211_hw_check(&sta->local->hw, REPORTS_LOW_ACK))
+ return;
+
/* This packet was aggregated but doesn't carry status info */
if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
!(info->flags & IEEE80211_TX_STAT_AMPDU))
@@ -709,7 +715,7 @@ void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb,
if (!ieee80211_sdata_running(sdata))
continue;
- if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
+ if ((sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) &&
!send_to_cooked)
continue;
@@ -740,8 +746,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
__le16 fc;
struct ieee80211_supported_band *sband;
+ struct rhlist_head *tmp;
struct sta_info *sta;
- struct rhash_head *tmp;
int retry_count;
int rates_idx;
bool send_to_cooked;
@@ -749,7 +755,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ieee80211_bar *bar;
int shift = 0;
int tid = IEEE80211_NUM_TIDS;
- const struct bucket_table *tbl;
rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
@@ -758,9 +763,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
sband = local->hw.wiphy->bands[info->band];
fc = hdr->frame_control;
- tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
-
- for_each_sta_info(local, tbl, hdr->addr1, sta, tmp) {
+ for_each_sta_info(local, hdr->addr1, sta, tmp) {
/* skip wrong virtual interface */
if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr))
continue;
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index b5d28f14b9cf..afca7d103684 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -333,10 +333,11 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
if (!uc.center_freq1)
return;
- /* proceed to downgrade the chandef until usable or the same */
+ /* proceed to downgrade the chandef until usable or the same as AP BW */
while (uc.width > max_width ||
- !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
- sdata->wdev.iftype))
+ (uc.width > sta->tdls_chandef.width &&
+ !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc,
+ sdata->wdev.iftype)))
ieee80211_chandef_downgrade(&uc);
if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) {
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 1d0746dfea57..1ff08be90a98 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -796,6 +796,36 @@ static __le16 ieee80211_tx_next_seq(struct sta_info *sta, int tid)
return ret;
}
+static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *pubsta,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_txq *txq = NULL;
+
+ if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
+ (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
+ return NULL;
+
+ if (!ieee80211_is_data(hdr->frame_control))
+ return NULL;
+
+ if (pubsta) {
+ u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+
+ txq = pubsta->txq[tid];
+ } else if (vif) {
+ txq = vif->txq;
+ }
+
+ if (!txq)
+ return NULL;
+
+ return to_txq_info(txq);
+}
+
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
{
@@ -853,7 +883,8 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
tx->sta->tx_stats.msdu[tid]++;
- if (!tx->sta->sta.txq[0])
+ if (!ieee80211_get_txq(tx->local, info->control.vif, &tx->sta->sta,
+ tx->skb))
hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
return TX_CONTINUE;
@@ -1243,36 +1274,6 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
return TX_CONTINUE;
}
-static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *pubsta,
- struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_txq *txq = NULL;
-
- if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
- (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
- return NULL;
-
- if (!ieee80211_is_data(hdr->frame_control))
- return NULL;
-
- if (pubsta) {
- u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
-
- txq = pubsta->txq[tid];
- } else if (vif) {
- txq = vif->txq;
- }
-
- if (!txq)
- return NULL;
-
- return to_txq_info(txq);
-}
-
static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb)
{
IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time();
@@ -1343,7 +1344,7 @@ static struct sk_buff *fq_tin_dequeue_func(struct fq *fq,
local = container_of(fq, struct ieee80211_local, fq);
txqi = container_of(tin, struct txq_info, tin);
cparams = &local->cparams;
- cstats = &local->cstats;
+ cstats = &txqi->cstats;
if (flow == &txqi->def_flow)
cvars = &txqi->def_cvars;
@@ -1403,6 +1404,7 @@ void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata,
fq_tin_init(&txqi->tin);
fq_flow_init(&txqi->def_flow);
codel_vars_init(&txqi->def_cvars);
+ codel_stats_init(&txqi->cstats);
txqi->txq.vif = &sdata->vif;
@@ -1441,7 +1443,6 @@ int ieee80211_txq_setup_flows(struct ieee80211_local *local)
return ret;
codel_params_init(&local->cparams);
- codel_stats_init(&local->cstats);
local->cparams.interval = MS2TIME(100);
local->cparams.target = MS2TIME(20);
local->cparams.ecn = true;
@@ -1514,8 +1515,12 @@ out:
spin_unlock_bh(&fq->lock);
if (skb && skb_has_frag_list(skb) &&
- !ieee80211_hw_check(&local->hw, TX_FRAG_LIST))
- skb_linearize(skb);
+ !ieee80211_hw_check(&local->hw, TX_FRAG_LIST)) {
+ if (skb_linearize(skb)) {
+ ieee80211_free_txskb(&local->hw, skb);
+ return NULL;
+ }
+ }
return skb;
}
@@ -1643,7 +1648,7 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
switch (sdata->vif.type) {
case NL80211_IFTYPE_MONITOR:
- if (sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE) {
+ if (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
vif = &sdata->vif;
break;
}
@@ -2263,15 +2268,9 @@ static int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_STATION:
if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) {
sta = sta_info_get(sdata, skb->data);
- if (sta) {
- bool tdls_peer, tdls_auth;
-
- tdls_peer = test_sta_flag(sta,
- WLAN_STA_TDLS_PEER);
- tdls_auth = test_sta_flag(sta,
- WLAN_STA_TDLS_PEER_AUTH);
-
- if (tdls_peer && tdls_auth) {
+ if (sta && test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
+ if (test_sta_flag(sta,
+ WLAN_STA_TDLS_PEER_AUTH)) {
*sta_out = sta;
return 0;
}
@@ -2283,8 +2282,7 @@ static int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata,
* after a TDLS sta is removed due to being
* unreachable.
*/
- if (tdls_peer && !tdls_auth &&
- !ieee80211_is_tdls_setup(skb))
+ if (!ieee80211_is_tdls_setup(skb))
return -EINVAL;
}
@@ -3243,7 +3241,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
*ieee80211_get_qos_ctl(hdr) = tid;
- if (!sta->sta.txq[0])
+ if (!ieee80211_get_txq(local, &sdata->vif, &sta->sta, skb))
hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
} else {
info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 42bf0b6685e8..b6865d884487 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -598,7 +598,7 @@ static void __iterate_interfaces(struct ieee80211_local *local,
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
switch (sdata->vif.type) {
case NL80211_IFTYPE_MONITOR:
- if (!(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))
+ if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE))
continue;
break;
case NL80211_IFTYPE_AP_VLAN:
@@ -2555,7 +2555,6 @@ int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
if (need_basic && basic_rates & BIT(i))
basic = 0x80;
- rate = sband->bitrates[i].bitrate;
rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
5 * (1 << shift));
*pos++ = basic | (u8) rate;
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index 7079cd32a7ad..06019dba4b10 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -663,6 +663,7 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name,
/* TODO check this */
SET_NETDEV_DEV(ndev, &local->phy->dev);
+ dev_net_set(ndev, wpan_phy_net(local->hw.phy));
sdata = netdev_priv(ndev);
ndev->ieee802154_ptr = &sdata->wpan_dev;
memcpy(sdata->name, ndev->name, IFNAMSIZ);
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index 446e1300383e..4dcf6e18563a 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -101,11 +101,16 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
sdata->dev->stats.rx_bytes += skb->len;
switch (mac_cb(skb)->type) {
+ case IEEE802154_FC_TYPE_BEACON:
+ case IEEE802154_FC_TYPE_ACK:
+ case IEEE802154_FC_TYPE_MAC_CMD:
+ goto fail;
+
case IEEE802154_FC_TYPE_DATA:
return ieee802154_deliver_skb(skb);
default:
- pr_warn("ieee802154: bad frame received (type = %d)\n",
- mac_cb(skb)->type);
+ pr_warn_ratelimited("ieee802154: bad frame received "
+ "(type = %d)\n", mac_cb(skb)->type);
goto fail;
}
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 72fc514ec676..fa6715db4581 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -231,19 +231,17 @@ EXPORT_SYMBOL(nf_unregister_net_hooks);
static LIST_HEAD(nf_hook_list);
-int nf_register_hook(struct nf_hook_ops *reg)
+static int _nf_register_hook(struct nf_hook_ops *reg)
{
struct net *net, *last;
int ret;
- rtnl_lock();
for_each_net(net) {
ret = nf_register_net_hook(net, reg);
if (ret && ret != -ENOENT)
goto rollback;
}
list_add_tail(&reg->list, &nf_hook_list);
- rtnl_unlock();
return 0;
rollback:
@@ -253,19 +251,34 @@ rollback:
break;
nf_unregister_net_hook(net, reg);
}
+ return ret;
+}
+
+int nf_register_hook(struct nf_hook_ops *reg)
+{
+ int ret;
+
+ rtnl_lock();
+ ret = _nf_register_hook(reg);
rtnl_unlock();
+
return ret;
}
EXPORT_SYMBOL(nf_register_hook);
-void nf_unregister_hook(struct nf_hook_ops *reg)
+static void _nf_unregister_hook(struct nf_hook_ops *reg)
{
struct net *net;
- rtnl_lock();
list_del(&reg->list);
for_each_net(net)
nf_unregister_net_hook(net, reg);
+}
+
+void nf_unregister_hook(struct nf_hook_ops *reg)
+{
+ rtnl_lock();
+ _nf_unregister_hook(reg);
rtnl_unlock();
}
EXPORT_SYMBOL(nf_unregister_hook);
@@ -289,6 +302,26 @@ err:
}
EXPORT_SYMBOL(nf_register_hooks);
+/* Caller MUST take rtnl_lock() */
+int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n)
+{
+ unsigned int i;
+ int err = 0;
+
+ for (i = 0; i < n; i++) {
+ err = _nf_register_hook(&reg[i]);
+ if (err)
+ goto err;
+ }
+ return err;
+
+err:
+ if (i > 0)
+ _nf_unregister_hooks(reg, i);
+ return err;
+}
+EXPORT_SYMBOL(_nf_register_hooks);
+
void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
{
while (n-- > 0)
@@ -296,6 +329,14 @@ void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
}
EXPORT_SYMBOL(nf_unregister_hooks);
+/* Caller MUST take rtnl_lock */
+void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
+{
+ while (n-- > 0)
+ _nf_unregister_hook(&reg[n]);
+}
+EXPORT_SYMBOL(_nf_unregister_hooks);
+
unsigned int nf_iterate(struct sk_buff *skb,
struct nf_hook_state *state,
struct nf_hook_entry **entryp)
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index c94ec197845c..ba6a1d421222 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1108,9 +1108,9 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
if (IS_ERR(ct))
return (struct nf_conntrack_tuple_hash *)ct;
- if (tmpl && nfct_synproxy(tmpl)) {
- nfct_seqadj_ext_add(ct);
- nfct_synproxy_ext_add(ct);
+ if (!nf_ct_add_synproxy(ct, tmpl)) {
+ nf_conntrack_free(ct);
+ return ERR_PTR(-ENOMEM);
}
timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 81ae41f85d3a..bbb8f3df79f7 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -441,7 +441,8 @@ nf_nat_setup_info(struct nf_conn *ct,
ct->status |= IPS_DST_NAT;
if (nfct_help(ct))
- nfct_seqadj_ext_add(ct);
+ if (!nfct_seqadj_ext_add(ct))
+ return NF_DROP;
}
if (maniptype == NF_NAT_MANIP_SRC) {
@@ -801,7 +802,7 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
if (err < 0)
return err;
- return nf_nat_setup_info(ct, &range, manip);
+ return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0;
}
#else
static int
diff --git a/net/netfilter/nf_tables_trace.c b/net/netfilter/nf_tables_trace.c
index 696fe8f6f2f2..ab695f8e2d29 100644
--- a/net/netfilter/nf_tables_trace.c
+++ b/net/netfilter/nf_tables_trace.c
@@ -239,7 +239,7 @@ void nft_trace_notify(struct nft_traceinfo *info)
break;
case NFT_TRACETYPE_POLICY:
if (nla_put_be32(skb, NFTA_TRACE_POLICY,
- info->basechain->policy))
+ htonl(info->basechain->policy)))
goto nla_put_failure;
break;
}
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index 70eb2f6a3b01..d44d89b56127 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -343,12 +343,12 @@ static int nfnl_acct_del(struct net *net, struct sock *nfnl,
struct sk_buff *skb, const struct nlmsghdr *nlh,
const struct nlattr * const tb[])
{
- char *acct_name;
- struct nf_acct *cur;
+ struct nf_acct *cur, *tmp;
int ret = -ENOENT;
+ char *acct_name;
if (!tb[NFACCT_NAME]) {
- list_for_each_entry(cur, &net->nfnl_acct_list, head)
+ list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head)
nfnl_acct_try_del(cur);
return 0;
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 68216cdc7083..139e0867e56e 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -98,31 +98,28 @@ static int cttimeout_new_timeout(struct net *net, struct sock *ctnl,
break;
}
- l4proto = nf_ct_l4proto_find_get(l3num, l4num);
-
- /* This protocol is not supportted, skip. */
- if (l4proto->l4proto != l4num) {
- ret = -EOPNOTSUPP;
- goto err_proto_put;
- }
-
if (matching) {
if (nlh->nlmsg_flags & NLM_F_REPLACE) {
/* You cannot replace one timeout policy by another of
* different kind, sorry.
*/
if (matching->l3num != l3num ||
- matching->l4proto->l4proto != l4num) {
- ret = -EINVAL;
- goto err_proto_put;
- }
-
- ret = ctnl_timeout_parse_policy(&matching->data,
- l4proto, net,
- cda[CTA_TIMEOUT_DATA]);
- return ret;
+ matching->l4proto->l4proto != l4num)
+ return -EINVAL;
+
+ return ctnl_timeout_parse_policy(&matching->data,
+ matching->l4proto, net,
+ cda[CTA_TIMEOUT_DATA]);
}
- ret = -EBUSY;
+
+ return -EBUSY;
+ }
+
+ l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+
+ /* This protocol is not supportted, skip. */
+ if (l4proto->l4proto != l4num) {
+ ret = -EOPNOTSUPP;
goto err_proto_put;
}
@@ -305,7 +302,16 @@ static void ctnl_untimeout(struct net *net, struct ctnl_timeout *timeout)
const struct hlist_nulls_node *nn;
unsigned int last_hsize;
spinlock_t *lock;
- int i;
+ int i, cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
+
+ spin_lock_bh(&pcpu->lock);
+ hlist_nulls_for_each_entry(h, nn, &pcpu->unconfirmed, hnnode)
+ untimeout(h, timeout);
+ spin_unlock_bh(&pcpu->lock);
+ }
local_bh_disable();
restart:
@@ -350,12 +356,13 @@ static int cttimeout_del_timeout(struct net *net, struct sock *ctnl,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[])
{
- struct ctnl_timeout *cur;
+ struct ctnl_timeout *cur, *tmp;
int ret = -ENOENT;
char *name;
if (!cda[CTA_TIMEOUT_NAME]) {
- list_for_each_entry(cur, &net->nfct_timeout_list, head)
+ list_for_each_entry_safe(cur, tmp, &net->nfct_timeout_list,
+ head)
ctnl_timeout_try_del(net, cur);
return 0;
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 14264edf2d77..6c1e0246706e 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -293,10 +293,16 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
}
EXPORT_SYMBOL_GPL(nft_meta_get_init);
-static int nft_meta_set_init_pkttype(const struct nft_ctx *ctx)
+int nft_meta_set_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
{
+ struct nft_meta *priv = nft_expr_priv(expr);
unsigned int hooks;
+ if (priv->key != NFT_META_PKTTYPE)
+ return 0;
+
switch (ctx->afi->family) {
case NFPROTO_BRIDGE:
hooks = 1 << NF_BR_PRE_ROUTING;
@@ -310,6 +316,7 @@ static int nft_meta_set_init_pkttype(const struct nft_ctx *ctx)
return nft_chain_validate_hooks(ctx->chain, hooks);
}
+EXPORT_SYMBOL_GPL(nft_meta_set_validate);
int nft_meta_set_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
@@ -329,15 +336,16 @@ int nft_meta_set_init(const struct nft_ctx *ctx,
len = sizeof(u8);
break;
case NFT_META_PKTTYPE:
- err = nft_meta_set_init_pkttype(ctx);
- if (err)
- return err;
len = sizeof(u8);
break;
default:
return -EOPNOTSUPP;
}
+ err = nft_meta_set_validate(ctx, expr, NULL);
+ if (err < 0)
+ return err;
+
priv->sreg = nft_parse_register(tb[NFTA_META_SREG]);
err = nft_validate_register_load(priv->sreg, len);
if (err < 0)
@@ -409,6 +417,7 @@ static const struct nft_expr_ops nft_meta_set_ops = {
.init = nft_meta_set_init,
.destroy = nft_meta_set_destroy,
.dump = nft_meta_set_dump,
+ .validate = nft_meta_set_validate,
};
static const struct nft_expr_ops *
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c
index 0522fc9bfb0a..c64de3f7379d 100644
--- a/net/netfilter/nft_reject.c
+++ b/net/netfilter/nft_reject.c
@@ -26,11 +26,27 @@ const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
};
EXPORT_SYMBOL_GPL(nft_reject_policy);
+int nft_reject_validate(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nft_data **data)
+{
+ return nft_chain_validate_hooks(ctx->chain,
+ (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_FORWARD) |
+ (1 << NF_INET_LOCAL_OUT));
+}
+EXPORT_SYMBOL_GPL(nft_reject_validate);
+
int nft_reject_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_reject *priv = nft_expr_priv(expr);
+ int err;
+
+ err = nft_reject_validate(ctx, expr, NULL);
+ if (err < 0)
+ return err;
if (tb[NFTA_REJECT_TYPE] == NULL)
return -EINVAL;
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
index 759ca5248a3d..e79d9ca2ffee 100644
--- a/net/netfilter/nft_reject_inet.c
+++ b/net/netfilter/nft_reject_inet.c
@@ -66,7 +66,11 @@ static int nft_reject_inet_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_reject *priv = nft_expr_priv(expr);
- int icmp_code;
+ int icmp_code, err;
+
+ err = nft_reject_validate(ctx, expr, NULL);
+ if (err < 0)
+ return err;
if (tb[NFTA_REJECT_TYPE] == NULL)
return -EINVAL;
@@ -124,6 +128,7 @@ static const struct nft_expr_ops nft_reject_inet_ops = {
.eval = nft_reject_inet_eval,
.init = nft_reject_inet_init,
.dump = nft_reject_inet_dump,
+ .validate = nft_reject_validate,
};
static struct nft_expr_type nft_reject_inet_type __read_mostly = {
diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c
index ef36a56a02c6..4dedb96d1a06 100644
--- a/net/netfilter/xt_sctp.c
+++ b/net/netfilter/xt_sctp.c
@@ -68,7 +68,7 @@ match_packet(const struct sk_buff *skb,
++i, offset, sch->type, htons(sch->length),
sch->flags);
#endif
- offset += WORD_ROUND(ntohs(sch->length));
+ offset += SCTP_PAD4(ntohs(sch->length));
pr_debug("skb->len: %d\toffset: %d\n", skb->len, offset);
diff --git a/net/netlink/diag.c b/net/netlink/diag.c
index 3e3e2534478a..b2f0e986a6f4 100644
--- a/net/netlink/diag.c
+++ b/net/netlink/diag.c
@@ -127,7 +127,6 @@ stop:
goto done;
rhashtable_walk_exit(hti);
- cb->args[2] = 0;
num++;
mc_list:
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index ca91fc33f8a9..863e992dfbc0 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -71,6 +71,8 @@ struct ovs_frag_data {
static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
#define DEFERRED_ACTION_FIFO_SIZE 10
+#define OVS_RECURSION_LIMIT 5
+#define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
struct action_fifo {
int head;
int tail;
@@ -78,7 +80,12 @@ struct action_fifo {
struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
};
+struct recirc_keys {
+ struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD];
+};
+
static struct action_fifo __percpu *action_fifos;
+static struct recirc_keys __percpu *recirc_keys;
static DEFINE_PER_CPU(int, exec_actions_level);
static void action_fifo_init(struct action_fifo *fifo)
@@ -246,20 +253,24 @@ static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
int err;
err = skb_vlan_pop(skb);
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tag_present(skb)) {
invalidate_flow_key(key);
- else
- key->eth.tci = 0;
+ } else {
+ key->eth.vlan.tci = 0;
+ key->eth.vlan.tpid = 0;
+ }
return err;
}
static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
const struct ovs_action_push_vlan *vlan)
{
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tag_present(skb)) {
invalidate_flow_key(key);
- else
- key->eth.tci = vlan->vlan_tci;
+ } else {
+ key->eth.vlan.tci = vlan->vlan_tci;
+ key->eth.vlan.tpid = vlan->vlan_tpid;
+ }
return skb_vlan_push(skb, vlan->vlan_tpid,
ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
}
@@ -1016,6 +1027,7 @@ static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
const struct nlattr *a, int rem)
{
struct deferred_action *da;
+ int level;
if (!is_flow_key_valid(key)) {
int err;
@@ -1039,6 +1051,18 @@ static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
return 0;
}
+ level = this_cpu_read(exec_actions_level);
+ if (level <= OVS_DEFERRED_ACTION_THRESHOLD) {
+ struct recirc_keys *rks = this_cpu_ptr(recirc_keys);
+ struct sw_flow_key *recirc_key = &rks->key[level - 1];
+
+ *recirc_key = *key;
+ recirc_key->recirc_id = nla_get_u32(a);
+ ovs_dp_process_packet(skb, recirc_key);
+
+ return 0;
+ }
+
da = add_deferred_actions(skb, key, NULL);
if (da) {
da->pkt_key.recirc_id = nla_get_u32(a);
@@ -1205,11 +1229,10 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
const struct sw_flow_actions *acts,
struct sw_flow_key *key)
{
- static const int ovs_recursion_limit = 5;
int err, level;
level = __this_cpu_inc_return(exec_actions_level);
- if (unlikely(level > ovs_recursion_limit)) {
+ if (unlikely(level > OVS_RECURSION_LIMIT)) {
net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
ovs_dp_name(dp));
kfree_skb(skb);
@@ -1234,10 +1257,17 @@ int action_fifos_init(void)
if (!action_fifos)
return -ENOMEM;
+ recirc_keys = alloc_percpu(struct recirc_keys);
+ if (!recirc_keys) {
+ free_percpu(action_fifos);
+ return -ENOMEM;
+ }
+
return 0;
}
void action_fifos_exit(void)
{
free_percpu(action_fifos);
+ free_percpu(recirc_keys);
}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 524c0fd3078e..4d67ea856067 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -928,7 +928,6 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
struct sw_flow_mask mask;
struct sk_buff *reply;
struct datapath *dp;
- struct sw_flow_key key;
struct sw_flow_actions *acts;
struct sw_flow_match match;
u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
@@ -956,20 +955,24 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
}
/* Extract key. */
- ovs_match_init(&match, &key, &mask);
+ ovs_match_init(&match, &new_flow->key, false, &mask);
error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
a[OVS_FLOW_ATTR_MASK], log);
if (error)
goto err_kfree_flow;
- ovs_flow_mask_key(&new_flow->key, &key, true, &mask);
-
/* Extract flow identifier. */
error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
- &key, log);
+ &new_flow->key, log);
if (error)
goto err_kfree_flow;
+ /* unmasked key is needed to match when ufid is not used. */
+ if (ovs_identifier_is_key(&new_flow->id))
+ match.key = new_flow->id.unmasked_key;
+
+ ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask);
+
/* Validate actions. */
error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
&new_flow->key, &acts, log);
@@ -996,7 +999,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
if (ovs_identifier_is_ufid(&new_flow->id))
flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
if (!flow)
- flow = ovs_flow_tbl_lookup(&dp->table, &key);
+ flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key);
if (likely(!flow)) {
rcu_assign_pointer(new_flow->sf_acts, acts);
@@ -1121,7 +1124,7 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
if (a[OVS_FLOW_ATTR_KEY]) {
- ovs_match_init(&match, &key, &mask);
+ ovs_match_init(&match, &key, true, &mask);
error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
a[OVS_FLOW_ATTR_MASK], log);
} else if (!ufid_present) {
@@ -1238,7 +1241,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
if (a[OVS_FLOW_ATTR_KEY]) {
- ovs_match_init(&match, &key, NULL);
+ ovs_match_init(&match, &key, true, NULL);
err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
log);
} else if (!ufid_present) {
@@ -1297,7 +1300,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
if (a[OVS_FLOW_ATTR_KEY]) {
- ovs_match_init(&match, &key, NULL);
+ ovs_match_init(&match, &key, true, NULL);
err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
NULL, log);
if (unlikely(err))
@@ -2437,3 +2440,7 @@ module_exit(dp_cleanup);
MODULE_DESCRIPTION("Open vSwitch switching datapath");
MODULE_LICENSE("GPL");
+MODULE_ALIAS_GENL_FAMILY(OVS_DATAPATH_FAMILY);
+MODULE_ALIAS_GENL_FAMILY(OVS_VPORT_FAMILY);
+MODULE_ALIAS_GENL_FAMILY(OVS_FLOW_FAMILY);
+MODULE_ALIAS_GENL_FAMILY(OVS_PACKET_FAMILY);
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 0ea128eeeab2..634cc10d6dee 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/in.h>
#include <linux/rcupdate.h>
+#include <linux/cpumask.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -72,32 +73,33 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
{
struct flow_stats *stats;
int node = numa_node_id();
+ int cpu = smp_processor_id();
int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
- stats = rcu_dereference(flow->stats[node]);
+ stats = rcu_dereference(flow->stats[cpu]);
- /* Check if already have node-specific stats. */
+ /* Check if already have CPU-specific stats. */
if (likely(stats)) {
spin_lock(&stats->lock);
/* Mark if we write on the pre-allocated stats. */
- if (node == 0 && unlikely(flow->stats_last_writer != node))
- flow->stats_last_writer = node;
+ if (cpu == 0 && unlikely(flow->stats_last_writer != cpu))
+ flow->stats_last_writer = cpu;
} else {
stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */
spin_lock(&stats->lock);
- /* If the current NUMA-node is the only writer on the
+ /* If the current CPU is the only writer on the
* pre-allocated stats keep using them.
*/
- if (unlikely(flow->stats_last_writer != node)) {
+ if (unlikely(flow->stats_last_writer != cpu)) {
/* A previous locker may have already allocated the
- * stats, so we need to check again. If node-specific
+ * stats, so we need to check again. If CPU-specific
* stats were already allocated, we update the pre-
* allocated stats as we have already locked them.
*/
- if (likely(flow->stats_last_writer != NUMA_NO_NODE)
- && likely(!rcu_access_pointer(flow->stats[node]))) {
- /* Try to allocate node-specific stats. */
+ if (likely(flow->stats_last_writer != -1) &&
+ likely(!rcu_access_pointer(flow->stats[cpu]))) {
+ /* Try to allocate CPU-specific stats. */
struct flow_stats *new_stats;
new_stats =
@@ -114,12 +116,12 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
new_stats->tcp_flags = tcp_flags;
spin_lock_init(&new_stats->lock);
- rcu_assign_pointer(flow->stats[node],
+ rcu_assign_pointer(flow->stats[cpu],
new_stats);
goto unlock;
}
}
- flow->stats_last_writer = node;
+ flow->stats_last_writer = cpu;
}
}
@@ -136,14 +138,15 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
struct ovs_flow_stats *ovs_stats,
unsigned long *used, __be16 *tcp_flags)
{
- int node;
+ int cpu;
*used = 0;
*tcp_flags = 0;
memset(ovs_stats, 0, sizeof(*ovs_stats));
- for_each_node(node) {
- struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[node]);
+ /* We open code this to make sure cpu 0 is always considered */
+ for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpu_possible_mask)) {
+ struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
if (stats) {
/* Local CPU may write on non-local stats, so we must
@@ -163,10 +166,11 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
/* Called with ovs_mutex. */
void ovs_flow_stats_clear(struct sw_flow *flow)
{
- int node;
+ int cpu;
- for_each_node(node) {
- struct flow_stats *stats = ovsl_dereference(flow->stats[node]);
+ /* We open code this to make sure cpu 0 is always considered */
+ for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpu_possible_mask)) {
+ struct flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
if (stats) {
spin_lock_bh(&stats->lock);
@@ -302,24 +306,57 @@ static bool icmp6hdr_ok(struct sk_buff *skb)
sizeof(struct icmp6hdr));
}
-static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
+/**
+ * Parse vlan tag from vlan header.
+ * Returns ERROR on memory error.
+ * Returns 0 if it encounters a non-vlan or incomplete packet.
+ * Returns 1 after successfully parsing vlan tag.
+ */
+static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh)
{
- struct qtag_prefix {
- __be16 eth_type; /* ETH_P_8021Q */
- __be16 tci;
- };
- struct qtag_prefix *qp;
+ struct vlan_head *vh = (struct vlan_head *)skb->data;
+
+ if (likely(!eth_type_vlan(vh->tpid)))
+ return 0;
- if (unlikely(skb->len < sizeof(struct qtag_prefix) + sizeof(__be16)))
+ if (unlikely(skb->len < sizeof(struct vlan_head) + sizeof(__be16)))
return 0;
- if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) +
- sizeof(__be16))))
+ if (unlikely(!pskb_may_pull(skb, sizeof(struct vlan_head) +
+ sizeof(__be16))))
return -ENOMEM;
- qp = (struct qtag_prefix *) skb->data;
- key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT);
- __skb_pull(skb, sizeof(struct qtag_prefix));
+ vh = (struct vlan_head *)skb->data;
+ key_vh->tci = vh->tci | htons(VLAN_TAG_PRESENT);
+ key_vh->tpid = vh->tpid;
+
+ __skb_pull(skb, sizeof(struct vlan_head));
+ return 1;
+}
+
+static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
+{
+ int res;
+
+ key->eth.vlan.tci = 0;
+ key->eth.vlan.tpid = 0;
+ key->eth.cvlan.tci = 0;
+ key->eth.cvlan.tpid = 0;
+
+ if (likely(skb_vlan_tag_present(skb))) {
+ key->eth.vlan.tci = htons(skb->vlan_tci);
+ key->eth.vlan.tpid = skb->vlan_proto;
+ } else {
+ /* Parse outer vlan tag in the non-accelerated case. */
+ res = parse_vlan_tag(skb, &key->eth.vlan);
+ if (res <= 0)
+ return res;
+ }
+
+ /* Parse inner vlan tag. */
+ res = parse_vlan_tag(skb, &key->eth.cvlan);
+ if (res <= 0)
+ return res;
return 0;
}
@@ -480,12 +517,8 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
* update skb->csum here.
*/
- key->eth.tci = 0;
- if (skb_vlan_tag_present(skb))
- key->eth.tci = htons(skb->vlan_tci);
- else if (eth->h_proto == htons(ETH_P_8021Q))
- if (unlikely(parse_vlan(skb, key)))
- return -ENOMEM;
+ if (unlikely(parse_vlan(skb, key)))
+ return -ENOMEM;
key->eth.type = parse_ethertype(skb);
if (unlikely(key->eth.type == htons(0)))
@@ -734,8 +767,6 @@ int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr,
{
int err;
- memset(key, 0, OVS_SW_FLOW_KEY_METADATA_SIZE);
-
/* Extract metadata from netlink attributes. */
err = ovs_nla_get_flow_metadata(net, attr, key, log);
if (err)
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 03378e75a67c..ae783f5c6695 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -50,6 +50,11 @@ struct ovs_tunnel_info {
struct metadata_dst *tun_dst;
};
+struct vlan_head {
+ __be16 tpid; /* Vlan type. Generally 802.1q or 802.1ad.*/
+ __be16 tci; /* 0 if no VLAN, VLAN_TAG_PRESENT set otherwise. */
+};
+
#define OVS_SW_FLOW_KEY_METADATA_SIZE \
(offsetof(struct sw_flow_key, recirc_id) + \
FIELD_SIZEOF(struct sw_flow_key, recirc_id))
@@ -69,7 +74,8 @@ struct sw_flow_key {
struct {
u8 src[ETH_ALEN]; /* Ethernet source address. */
u8 dst[ETH_ALEN]; /* Ethernet destination address. */
- __be16 tci; /* 0 if no VLAN, VLAN_TAG_PRESENT set otherwise. */
+ struct vlan_head vlan;
+ struct vlan_head cvlan;
__be16 type; /* Ethernet frame type. */
} eth;
union {
@@ -172,14 +178,14 @@ struct sw_flow {
struct hlist_node node[2];
u32 hash;
} flow_table, ufid_table;
- int stats_last_writer; /* NUMA-node id of the last writer on
+ int stats_last_writer; /* CPU id of the last writer on
* 'stats[0]'.
*/
struct sw_flow_key key;
struct sw_flow_id id;
struct sw_flow_mask *mask;
struct sw_flow_actions __rcu *sf_acts;
- struct flow_stats __rcu *stats[]; /* One for each NUMA node. First one
+ struct flow_stats __rcu *stats[]; /* One for each CPU. First one
* is allocated at flow creation time,
* the rest are allocated on demand
* while holding the 'stats[0].lock'.
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index c78a6a1476fb..ae25ded82b3b 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -808,6 +808,167 @@ int ovs_nla_put_tunnel_info(struct sk_buff *skb,
ip_tunnel_info_af(tun_info));
}
+static int encode_vlan_from_nlattrs(struct sw_flow_match *match,
+ const struct nlattr *a[],
+ bool is_mask, bool inner)
+{
+ __be16 tci = 0;
+ __be16 tpid = 0;
+
+ if (a[OVS_KEY_ATTR_VLAN])
+ tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
+
+ if (a[OVS_KEY_ATTR_ETHERTYPE])
+ tpid = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
+
+ if (likely(!inner)) {
+ SW_FLOW_KEY_PUT(match, eth.vlan.tpid, tpid, is_mask);
+ SW_FLOW_KEY_PUT(match, eth.vlan.tci, tci, is_mask);
+ } else {
+ SW_FLOW_KEY_PUT(match, eth.cvlan.tpid, tpid, is_mask);
+ SW_FLOW_KEY_PUT(match, eth.cvlan.tci, tci, is_mask);
+ }
+ return 0;
+}
+
+static int validate_vlan_from_nlattrs(const struct sw_flow_match *match,
+ u64 key_attrs, bool inner,
+ const struct nlattr **a, bool log)
+{
+ __be16 tci = 0;
+
+ if (!((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) &&
+ (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) &&
+ eth_type_vlan(nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE])))) {
+ /* Not a VLAN. */
+ return 0;
+ }
+
+ if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) &&
+ (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) {
+ OVS_NLERR(log, "Invalid %s frame", (inner) ? "C-VLAN" : "VLAN");
+ return -EINVAL;
+ }
+
+ if (a[OVS_KEY_ATTR_VLAN])
+ tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
+
+ if (!(tci & htons(VLAN_TAG_PRESENT))) {
+ if (tci) {
+ OVS_NLERR(log, "%s TCI does not have VLAN_TAG_PRESENT bit set.",
+ (inner) ? "C-VLAN" : "VLAN");
+ return -EINVAL;
+ } else if (nla_len(a[OVS_KEY_ATTR_ENCAP])) {
+ /* Corner case for truncated VLAN header. */
+ OVS_NLERR(log, "Truncated %s header has non-zero encap attribute.",
+ (inner) ? "C-VLAN" : "VLAN");
+ return -EINVAL;
+ }
+ }
+
+ return 1;
+}
+
+static int validate_vlan_mask_from_nlattrs(const struct sw_flow_match *match,
+ u64 key_attrs, bool inner,
+ const struct nlattr **a, bool log)
+{
+ __be16 tci = 0;
+ __be16 tpid = 0;
+ bool encap_valid = !!(match->key->eth.vlan.tci &
+ htons(VLAN_TAG_PRESENT));
+ bool i_encap_valid = !!(match->key->eth.cvlan.tci &
+ htons(VLAN_TAG_PRESENT));
+
+ if (!(key_attrs & (1 << OVS_KEY_ATTR_ENCAP))) {
+ /* Not a VLAN. */
+ return 0;
+ }
+
+ if ((!inner && !encap_valid) || (inner && !i_encap_valid)) {
+ OVS_NLERR(log, "Encap mask attribute is set for non-%s frame.",
+ (inner) ? "C-VLAN" : "VLAN");
+ return -EINVAL;
+ }
+
+ if (a[OVS_KEY_ATTR_VLAN])
+ tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
+
+ if (a[OVS_KEY_ATTR_ETHERTYPE])
+ tpid = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
+
+ if (tpid != htons(0xffff)) {
+ OVS_NLERR(log, "Must have an exact match on %s TPID (mask=%x).",
+ (inner) ? "C-VLAN" : "VLAN", ntohs(tpid));
+ return -EINVAL;
+ }
+ if (!(tci & htons(VLAN_TAG_PRESENT))) {
+ OVS_NLERR(log, "%s TCI mask does not have exact match for VLAN_TAG_PRESENT bit.",
+ (inner) ? "C-VLAN" : "VLAN");
+ return -EINVAL;
+ }
+
+ return 1;
+}
+
+static int __parse_vlan_from_nlattrs(struct sw_flow_match *match,
+ u64 *key_attrs, bool inner,
+ const struct nlattr **a, bool is_mask,
+ bool log)
+{
+ int err;
+ const struct nlattr *encap;
+
+ if (!is_mask)
+ err = validate_vlan_from_nlattrs(match, *key_attrs, inner,
+ a, log);
+ else
+ err = validate_vlan_mask_from_nlattrs(match, *key_attrs, inner,
+ a, log);
+ if (err <= 0)
+ return err;
+
+ err = encode_vlan_from_nlattrs(match, a, is_mask, inner);
+ if (err)
+ return err;
+
+ *key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
+ *key_attrs &= ~(1 << OVS_KEY_ATTR_VLAN);
+ *key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
+
+ encap = a[OVS_KEY_ATTR_ENCAP];
+
+ if (!is_mask)
+ err = parse_flow_nlattrs(encap, a, key_attrs, log);
+ else
+ err = parse_flow_mask_nlattrs(encap, a, key_attrs, log);
+
+ return err;
+}
+
+static int parse_vlan_from_nlattrs(struct sw_flow_match *match,
+ u64 *key_attrs, const struct nlattr **a,
+ bool is_mask, bool log)
+{
+ int err;
+ bool encap_valid = false;
+
+ err = __parse_vlan_from_nlattrs(match, key_attrs, false, a,
+ is_mask, log);
+ if (err)
+ return err;
+
+ encap_valid = !!(match->key->eth.vlan.tci & htons(VLAN_TAG_PRESENT));
+ if (encap_valid) {
+ err = __parse_vlan_from_nlattrs(match, key_attrs, true, a,
+ is_mask, log);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
u64 *attrs, const struct nlattr **a,
bool is_mask, bool log)
@@ -923,20 +1084,11 @@ static int ovs_key_from_nlattrs(struct net *net, struct sw_flow_match *match,
}
if (attrs & (1 << OVS_KEY_ATTR_VLAN)) {
- __be16 tci;
-
- tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
- if (!(tci & htons(VLAN_TAG_PRESENT))) {
- if (is_mask)
- OVS_NLERR(log, "VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.");
- else
- OVS_NLERR(log, "VLAN TCI does not have VLAN_TAG_PRESENT bit set.");
-
- return -EINVAL;
- }
-
- SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask);
- attrs &= ~(1 << OVS_KEY_ATTR_VLAN);
+ /* VLAN attribute is always parsed before getting here since it
+ * may occur multiple times.
+ */
+ OVS_NLERR(log, "VLAN attribute unexpected.");
+ return -EINVAL;
}
if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
@@ -1182,49 +1334,18 @@ int ovs_nla_get_match(struct net *net, struct sw_flow_match *match,
bool log)
{
const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
- const struct nlattr *encap;
struct nlattr *newmask = NULL;
u64 key_attrs = 0;
u64 mask_attrs = 0;
- bool encap_valid = false;
int err;
err = parse_flow_nlattrs(nla_key, a, &key_attrs, log);
if (err)
return err;
- if ((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) &&
- (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) &&
- (nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q))) {
- __be16 tci;
-
- if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) &&
- (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) {
- OVS_NLERR(log, "Invalid Vlan frame.");
- return -EINVAL;
- }
-
- key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
- tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
- encap = a[OVS_KEY_ATTR_ENCAP];
- key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
- encap_valid = true;
-
- if (tci & htons(VLAN_TAG_PRESENT)) {
- err = parse_flow_nlattrs(encap, a, &key_attrs, log);
- if (err)
- return err;
- } else if (!tci) {
- /* Corner case for truncated 802.1Q header. */
- if (nla_len(encap)) {
- OVS_NLERR(log, "Truncated 802.1Q header has non-zero encap attribute.");
- return -EINVAL;
- }
- } else {
- OVS_NLERR(log, "Encap attr is set for non-VLAN frame");
- return -EINVAL;
- }
- }
+ err = parse_vlan_from_nlattrs(match, &key_attrs, a, false, log);
+ if (err)
+ return err;
err = ovs_key_from_nlattrs(net, match, key_attrs, a, false, log);
if (err)
@@ -1265,46 +1386,12 @@ int ovs_nla_get_match(struct net *net, struct sw_flow_match *match,
goto free_newmask;
/* Always match on tci. */
- SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true);
-
- if (mask_attrs & 1 << OVS_KEY_ATTR_ENCAP) {
- __be16 eth_type = 0;
- __be16 tci = 0;
-
- if (!encap_valid) {
- OVS_NLERR(log, "Encap mask attribute is set for non-VLAN frame.");
- err = -EINVAL;
- goto free_newmask;
- }
-
- mask_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
- if (a[OVS_KEY_ATTR_ETHERTYPE])
- eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
-
- if (eth_type == htons(0xffff)) {
- mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
- encap = a[OVS_KEY_ATTR_ENCAP];
- err = parse_flow_mask_nlattrs(encap, a,
- &mask_attrs, log);
- if (err)
- goto free_newmask;
- } else {
- OVS_NLERR(log, "VLAN frames must have an exact match on the TPID (mask=%x).",
- ntohs(eth_type));
- err = -EINVAL;
- goto free_newmask;
- }
-
- if (a[OVS_KEY_ATTR_VLAN])
- tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
+ SW_FLOW_KEY_PUT(match, eth.vlan.tci, htons(0xffff), true);
+ SW_FLOW_KEY_PUT(match, eth.cvlan.tci, htons(0xffff), true);
- if (!(tci & htons(VLAN_TAG_PRESENT))) {
- OVS_NLERR(log, "VLAN tag present bit must have an exact match (tci_mask=%x).",
- ntohs(tci));
- err = -EINVAL;
- goto free_newmask;
- }
- }
+ err = parse_vlan_from_nlattrs(match, &mask_attrs, a, true, log);
+ if (err)
+ goto free_newmask;
err = ovs_key_from_nlattrs(net, match, mask_attrs, a, true,
log);
@@ -1410,12 +1497,25 @@ int ovs_nla_get_flow_metadata(struct net *net, const struct nlattr *attr,
return metadata_from_nlattrs(net, &match, &attrs, a, false, log);
}
+static int ovs_nla_put_vlan(struct sk_buff *skb, const struct vlan_head *vh,
+ bool is_mask)
+{
+ __be16 eth_type = !is_mask ? vh->tpid : htons(0xffff);
+
+ if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
+ nla_put_be16(skb, OVS_KEY_ATTR_VLAN, vh->tci))
+ return -EMSGSIZE;
+ return 0;
+}
+
static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
const struct sw_flow_key *output, bool is_mask,
struct sk_buff *skb)
{
struct ovs_key_ethernet *eth_key;
- struct nlattr *nla, *encap;
+ struct nlattr *nla;
+ struct nlattr *encap = NULL;
+ struct nlattr *in_encap = NULL;
if (nla_put_u32(skb, OVS_KEY_ATTR_RECIRC_ID, output->recirc_id))
goto nla_put_failure;
@@ -1464,17 +1564,21 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
ether_addr_copy(eth_key->eth_src, output->eth.src);
ether_addr_copy(eth_key->eth_dst, output->eth.dst);
- if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
- __be16 eth_type;
- eth_type = !is_mask ? htons(ETH_P_8021Q) : htons(0xffff);
- if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
- nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci))
+ if (swkey->eth.vlan.tci || eth_type_vlan(swkey->eth.type)) {
+ if (ovs_nla_put_vlan(skb, &output->eth.vlan, is_mask))
goto nla_put_failure;
encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
- if (!swkey->eth.tci)
+ if (!swkey->eth.vlan.tci)
goto unencap;
- } else
- encap = NULL;
+
+ if (swkey->eth.cvlan.tci || eth_type_vlan(swkey->eth.type)) {
+ if (ovs_nla_put_vlan(skb, &output->eth.cvlan, is_mask))
+ goto nla_put_failure;
+ in_encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
+ if (!swkey->eth.cvlan.tci)
+ goto unencap;
+ }
+ }
if (swkey->eth.type == htons(ETH_P_802_2)) {
/*
@@ -1493,6 +1597,14 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type))
goto nla_put_failure;
+ if (eth_type_vlan(swkey->eth.type)) {
+ /* There are 3 VLAN tags, we don't know anything about the rest
+ * of the packet, so truncate here.
+ */
+ WARN_ON_ONCE(!(encap && in_encap));
+ goto unencap;
+ }
+
if (swkey->eth.type == htons(ETH_P_IP)) {
struct ovs_key_ipv4 *ipv4_key;
@@ -1619,6 +1731,8 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
}
unencap:
+ if (in_encap)
+ nla_nest_end(skb, in_encap);
if (encap)
nla_nest_end(skb, encap);
@@ -1882,13 +1996,15 @@ static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
void ovs_match_init(struct sw_flow_match *match,
struct sw_flow_key *key,
+ bool reset_key,
struct sw_flow_mask *mask)
{
memset(match, 0, sizeof(*match));
match->key = key;
match->mask = mask;
- memset(key, 0, sizeof(*key));
+ if (reset_key)
+ memset(key, 0, sizeof(*key));
if (mask) {
memset(&mask->key, 0, sizeof(mask->key));
@@ -1935,7 +2051,7 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
struct nlattr *a;
int err = 0, start, opts_type;
- ovs_match_init(&match, &key, NULL);
+ ovs_match_init(&match, &key, true, NULL);
opts_type = ip_tun_from_nlattr(nla_data(attr), &match, false, log);
if (opts_type < 0)
return opts_type;
@@ -2283,7 +2399,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
case OVS_ACTION_ATTR_PUSH_VLAN:
vlan = nla_data(a);
- if (vlan->vlan_tpid != htons(ETH_P_8021Q))
+ if (!eth_type_vlan(vlan->vlan_tpid))
return -EINVAL;
if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
return -EINVAL;
@@ -2388,7 +2504,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
(*sfa)->orig_len = nla_len(attr);
err = __ovs_nla_copy_actions(net, attr, key, 0, sfa, key->eth.type,
- key->eth.tci, log);
+ key->eth.vlan.tci, log);
if (err)
ovs_nla_free_flow_actions(*sfa);
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h
index 47dd142eca1c..45f9769e5aac 100644
--- a/net/openvswitch/flow_netlink.h
+++ b/net/openvswitch/flow_netlink.h
@@ -41,7 +41,8 @@ size_t ovs_tun_key_attr_size(void);
size_t ovs_key_attr_size(void);
void ovs_match_init(struct sw_flow_match *match,
- struct sw_flow_key *key, struct sw_flow_mask *mask);
+ struct sw_flow_key *key, bool reset_key,
+ struct sw_flow_mask *mask);
int ovs_nla_put_key(const struct sw_flow_key *, const struct sw_flow_key *,
int attr, bool is_mask, struct sk_buff *);
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index d073fff82fdb..ea7a8073fa02 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/in.h>
#include <linux/rcupdate.h>
+#include <linux/cpumask.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -79,17 +80,12 @@ struct sw_flow *ovs_flow_alloc(void)
{
struct sw_flow *flow;
struct flow_stats *stats;
- int node;
- flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
+ flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
if (!flow)
return ERR_PTR(-ENOMEM);
- flow->sf_acts = NULL;
- flow->mask = NULL;
- flow->id.unmasked_key = NULL;
- flow->id.ufid_len = 0;
- flow->stats_last_writer = NUMA_NO_NODE;
+ flow->stats_last_writer = -1;
/* Initialize the default stat node. */
stats = kmem_cache_alloc_node(flow_stats_cache,
@@ -102,10 +98,6 @@ struct sw_flow *ovs_flow_alloc(void)
RCU_INIT_POINTER(flow->stats[0], stats);
- for_each_node(node)
- if (node != 0)
- RCU_INIT_POINTER(flow->stats[node], NULL);
-
return flow;
err:
kmem_cache_free(flow_cache, flow);
@@ -142,16 +134,17 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
static void flow_free(struct sw_flow *flow)
{
- int node;
+ int cpu;
if (ovs_identifier_is_key(&flow->id))
kfree(flow->id.unmasked_key);
if (flow->sf_acts)
ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
- for_each_node(node)
- if (flow->stats[node])
+ /* We open code this to make sure cpu 0 is always considered */
+ for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpu_possible_mask))
+ if (flow->stats[cpu])
kmem_cache_free(flow_stats_cache,
- (struct flow_stats __force *)flow->stats[node]);
+ (struct flow_stats __force *)flow->stats[cpu]);
kmem_cache_free(flow_cache, flow);
}
@@ -756,7 +749,7 @@ int ovs_flow_init(void)
BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
- + (nr_node_ids
+ + (nr_cpu_ids
* sizeof(struct flow_stats *)),
0, 0, NULL);
if (flow_cache == NULL)
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 6b21fd068d87..8f198437c724 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -485,9 +485,14 @@ static unsigned int packet_length(const struct sk_buff *skb)
{
unsigned int length = skb->len - ETH_HLEN;
- if (skb->protocol == htons(ETH_P_8021Q))
+ if (skb_vlan_tagged(skb))
length -= VLAN_HLEN;
+ /* Don't subtract for multiple VLAN tags. Most (all?) drivers allow
+ * (ETH_LEN + VLAN_HLEN) in addition to the mtu value, but almost none
+ * account for 802.1ad. e.g. is_skb_forwardable().
+ */
+
return length;
}
diff --git a/net/rxrpc/Kconfig b/net/rxrpc/Kconfig
index 784c53163b7b..86f8853a038c 100644
--- a/net/rxrpc/Kconfig
+++ b/net/rxrpc/Kconfig
@@ -19,6 +19,20 @@ config AF_RXRPC
See Documentation/networking/rxrpc.txt.
+config AF_RXRPC_IPV6
+ bool "IPv6 support for RxRPC"
+ depends on (IPV6 = m && AF_RXRPC = m) || (IPV6 = y && AF_RXRPC)
+ help
+ Say Y here to allow AF_RXRPC to use IPV6 UDP as well as IPV4 UDP as
+ its network transport.
+
+config AF_RXRPC_INJECT_LOSS
+ bool "Inject packet loss into RxRPC packet stream"
+ depends on AF_RXRPC
+ help
+ Say Y here to inject packet loss by discarding some received and some
+ transmitted packets.
+
config AF_RXRPC_DEBUG
bool "RxRPC dynamic debugging"
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index b66a9e6f8d04..8dbf7bed2cc4 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -45,7 +45,7 @@ u32 rxrpc_epoch;
atomic_t rxrpc_debug_id;
/* count of skbs currently in use */
-atomic_t rxrpc_n_skbs;
+atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
struct workqueue_struct *rxrpc_workqueue;
@@ -106,19 +106,25 @@ static int rxrpc_validate_address(struct rxrpc_sock *rx,
case AF_INET:
if (srx->transport_len < sizeof(struct sockaddr_in))
return -EINVAL;
- _debug("INET: %x @ %pI4",
- ntohs(srx->transport.sin.sin_port),
- &srx->transport.sin.sin_addr);
tail = offsetof(struct sockaddr_rxrpc, transport.sin.__pad);
break;
+#ifdef CONFIG_AF_RXRPC_IPV6
case AF_INET6:
+ if (srx->transport_len < sizeof(struct sockaddr_in6))
+ return -EINVAL;
+ tail = offsetof(struct sockaddr_rxrpc, transport) +
+ sizeof(struct sockaddr_in6);
+ break;
+#endif
+
default:
return -EAFNOSUPPORT;
}
if (tail < len)
memset((void *)srx + tail, 0, len - tail);
+ _debug("INET: %pISp", &srx->transport);
return 0;
}
@@ -155,15 +161,15 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
}
if (rx->srx.srx_service) {
- write_lock_bh(&local->services_lock);
- list_for_each_entry(prx, &local->services, listen_link) {
+ write_lock(&local->services_lock);
+ hlist_for_each_entry(prx, &local->services, listen_link) {
if (prx->srx.srx_service == rx->srx.srx_service)
goto service_in_use;
}
rx->local = local;
- list_add_tail(&rx->listen_link, &local->services);
- write_unlock_bh(&local->services_lock);
+ hlist_add_head_rcu(&rx->listen_link, &local->services);
+ write_unlock(&local->services_lock);
rx->sk.sk_state = RXRPC_SERVER_BOUND;
} else {
@@ -176,7 +182,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
return 0;
service_in_use:
- write_unlock_bh(&local->services_lock);
+ write_unlock(&local->services_lock);
rxrpc_put_local(local);
ret = -EADDRINUSE;
error_unlock:
@@ -193,7 +199,7 @@ static int rxrpc_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
struct rxrpc_sock *rx = rxrpc_sk(sk);
- unsigned int max;
+ unsigned int max, old;
int ret;
_enter("%p,%d", rx, backlog);
@@ -212,9 +218,13 @@ static int rxrpc_listen(struct socket *sock, int backlog)
backlog = max;
else if (backlog < 0 || backlog > max)
break;
+ old = sk->sk_max_ack_backlog;
sk->sk_max_ack_backlog = backlog;
- rx->sk.sk_state = RXRPC_SERVER_LISTENING;
- ret = 0;
+ ret = rxrpc_service_prealloc(rx, GFP_KERNEL);
+ if (ret == 0)
+ rx->sk.sk_state = RXRPC_SERVER_LISTENING;
+ else
+ sk->sk_max_ack_backlog = old;
break;
default:
ret = -EBUSY;
@@ -294,9 +304,8 @@ EXPORT_SYMBOL(rxrpc_kernel_begin_call);
void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call)
{
_enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
- rxrpc_remove_user_ID(rxrpc_sk(sock->sk), call);
- rxrpc_purge_queue(&call->knlrecv_queue);
- rxrpc_put_call(call);
+ rxrpc_release_call(rxrpc_sk(sock->sk), call);
+ rxrpc_put_call(call, rxrpc_call_put_kernel);
}
EXPORT_SYMBOL(rxrpc_kernel_end_call);
@@ -304,16 +313,19 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call);
* rxrpc_kernel_new_call_notification - Get notifications of new calls
* @sock: The socket to intercept received messages on
* @notify_new_call: Function to be called when new calls appear
+ * @discard_new_call: Function to discard preallocated calls
*
* Allow a kernel service to be given notifications about new calls.
*/
void rxrpc_kernel_new_call_notification(
struct socket *sock,
- rxrpc_notify_new_call_t notify_new_call)
+ rxrpc_notify_new_call_t notify_new_call,
+ rxrpc_discard_new_call_t discard_new_call)
{
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
rx->notify_new_call = notify_new_call;
+ rx->discard_new_call = discard_new_call;
}
EXPORT_SYMBOL(rxrpc_kernel_new_call_notification);
@@ -395,6 +407,23 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
switch (rx->sk.sk_state) {
case RXRPC_UNBOUND:
+ rx->srx.srx_family = AF_RXRPC;
+ rx->srx.srx_service = 0;
+ rx->srx.transport_type = SOCK_DGRAM;
+ rx->srx.transport.family = rx->family;
+ switch (rx->family) {
+ case AF_INET:
+ rx->srx.transport_len = sizeof(struct sockaddr_in);
+ break;
+#ifdef CONFIG_AF_RXRPC_IPV6
+ case AF_INET6:
+ rx->srx.transport_len = sizeof(struct sockaddr_in6);
+ break;
+#endif
+ default:
+ ret = -EAFNOSUPPORT;
+ goto error_unlock;
+ }
local = rxrpc_lookup_local(&rx->srx);
if (IS_ERR(local)) {
ret = PTR_ERR(local);
@@ -509,15 +538,16 @@ error:
static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
- unsigned int mask;
struct sock *sk = sock->sk;
+ struct rxrpc_sock *rx = rxrpc_sk(sk);
+ unsigned int mask;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
/* the socket is readable if there are any messages waiting on the Rx
* queue */
- if (!skb_queue_empty(&sk->sk_receive_queue))
+ if (!list_empty(&rx->recvmsg_q))
mask |= POLLIN | POLLRDNORM;
/* the socket is writable if there is space to add new data to the
@@ -544,7 +574,8 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
return -EAFNOSUPPORT;
/* we support transport protocol UDP/UDP6 only */
- if (protocol != PF_INET)
+ if (protocol != PF_INET &&
+ IS_ENABLED(CONFIG_AF_RXRPC_IPV6) && protocol != PF_INET6)
return -EPROTONOSUPPORT;
if (sock->type != SOCK_DGRAM)
@@ -558,6 +589,7 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
return -ENOMEM;
sock_init_data(sock, sk);
+ sock_set_flag(sk, SOCK_RCU_FREE);
sk->sk_state = RXRPC_UNBOUND;
sk->sk_write_space = rxrpc_write_space;
sk->sk_max_ack_backlog = 0;
@@ -567,9 +599,12 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
rx->family = protocol;
rx->calls = RB_ROOT;
- INIT_LIST_HEAD(&rx->listen_link);
- INIT_LIST_HEAD(&rx->secureq);
- INIT_LIST_HEAD(&rx->acceptq);
+ INIT_HLIST_NODE(&rx->listen_link);
+ spin_lock_init(&rx->incoming_lock);
+ INIT_LIST_HEAD(&rx->sock_calls);
+ INIT_LIST_HEAD(&rx->to_be_accepted);
+ INIT_LIST_HEAD(&rx->recvmsg_q);
+ rwlock_init(&rx->recvmsg_lock);
rwlock_init(&rx->call_lock);
memset(&rx->srx, 0, sizeof(rx->srx));
@@ -578,6 +613,39 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
}
/*
+ * Kill all the calls on a socket and shut it down.
+ */
+static int rxrpc_shutdown(struct socket *sock, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct rxrpc_sock *rx = rxrpc_sk(sk);
+ int ret = 0;
+
+ _enter("%p,%d", sk, flags);
+
+ if (flags != SHUT_RDWR)
+ return -EOPNOTSUPP;
+ if (sk->sk_state == RXRPC_CLOSE)
+ return -ESHUTDOWN;
+
+ lock_sock(sk);
+
+ spin_lock_bh(&sk->sk_receive_queue.lock);
+ if (sk->sk_state < RXRPC_CLOSE) {
+ sk->sk_state = RXRPC_CLOSE;
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ } else {
+ ret = -ESHUTDOWN;
+ }
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
+
+ rxrpc_discard_prealloc(rx);
+
+ release_sock(sk);
+ return ret;
+}
+
+/*
* RxRPC socket destructor
*/
static void rxrpc_sock_destructor(struct sock *sk)
@@ -615,13 +683,14 @@ static int rxrpc_release_sock(struct sock *sk)
ASSERTCMP(rx->listen_link.next, !=, LIST_POISON1);
- if (!list_empty(&rx->listen_link)) {
- write_lock_bh(&rx->local->services_lock);
- list_del(&rx->listen_link);
- write_unlock_bh(&rx->local->services_lock);
+ if (!hlist_unhashed(&rx->listen_link)) {
+ write_lock(&rx->local->services_lock);
+ hlist_del_rcu(&rx->listen_link);
+ write_unlock(&rx->local->services_lock);
}
/* try to flush out this socket */
+ rxrpc_discard_prealloc(rx);
rxrpc_release_calls_on_socket(rx);
flush_workqueue(rxrpc_workqueue);
rxrpc_purge_queue(&sk->sk_receive_queue);
@@ -670,7 +739,7 @@ static const struct proto_ops rxrpc_rpc_ops = {
.poll = rxrpc_poll,
.ioctl = sock_no_ioctl,
.listen = rxrpc_listen,
- .shutdown = sock_no_shutdown,
+ .shutdown = rxrpc_shutdown,
.setsockopt = rxrpc_setsockopt,
.getsockopt = sock_no_getsockopt,
.sendmsg = rxrpc_sendmsg,
@@ -798,7 +867,8 @@ static void __exit af_rxrpc_exit(void)
proto_unregister(&rxrpc_proto);
rxrpc_destroy_all_calls();
rxrpc_destroy_all_connections();
- ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0);
+ ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0);
+ ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0);
rxrpc_destroy_all_locals();
remove_proc_entry("rxrpc_conns", init_net.proc_net);
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index bb342f5fe7e4..ca96e547cb9a 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -35,8 +35,6 @@ struct rxrpc_crypt {
#define rxrpc_queue_delayed_work(WS,D) \
queue_delayed_work(rxrpc_workqueue, (WS), (D))
-#define rxrpc_queue_call(CALL) rxrpc_queue_work(&(CALL)->processor)
-
struct rxrpc_connection;
/*
@@ -66,19 +64,45 @@ enum {
};
/*
+ * Service backlog preallocation.
+ *
+ * This contains circular buffers of preallocated peers, connections and calls
+ * for incoming service calls and their head and tail pointers. This allows
+ * calls to be set up in the data_ready handler, thereby avoiding the need to
+ * shuffle packets around so much.
+ */
+struct rxrpc_backlog {
+ unsigned short peer_backlog_head;
+ unsigned short peer_backlog_tail;
+ unsigned short conn_backlog_head;
+ unsigned short conn_backlog_tail;
+ unsigned short call_backlog_head;
+ unsigned short call_backlog_tail;
+#define RXRPC_BACKLOG_MAX 32
+ struct rxrpc_peer *peer_backlog[RXRPC_BACKLOG_MAX];
+ struct rxrpc_connection *conn_backlog[RXRPC_BACKLOG_MAX];
+ struct rxrpc_call *call_backlog[RXRPC_BACKLOG_MAX];
+};
+
+/*
* RxRPC socket definition
*/
struct rxrpc_sock {
/* WARNING: sk has to be the first member */
struct sock sk;
rxrpc_notify_new_call_t notify_new_call; /* Func to notify of new call */
+ rxrpc_discard_new_call_t discard_new_call; /* Func to discard a new call */
struct rxrpc_local *local; /* local endpoint */
- struct list_head listen_link; /* link in the local endpoint's listen list */
- struct list_head secureq; /* calls awaiting connection security clearance */
- struct list_head acceptq; /* calls awaiting acceptance */
+ struct hlist_node listen_link; /* link in the local endpoint's listen list */
+ struct rxrpc_backlog *backlog; /* Preallocation for services */
+ spinlock_t incoming_lock; /* Incoming call vs service shutdown lock */
+ struct list_head sock_calls; /* List of calls owned by this socket */
+ struct list_head to_be_accepted; /* calls awaiting acceptance */
+ struct list_head recvmsg_q; /* Calls awaiting recvmsg's attention */
+ rwlock_t recvmsg_lock; /* Lock for recvmsg_q */
struct key *key; /* security for this socket */
struct key *securities; /* list of server security descriptors */
- struct rb_root calls; /* outstanding calls on this socket */
+ struct rb_root calls; /* User ID -> call mapping */
unsigned long flags;
#define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */
rwlock_t call_lock; /* lock for calls */
@@ -117,13 +141,13 @@ struct rxrpc_host_header {
* - max 48 bytes (struct sk_buff::cb)
*/
struct rxrpc_skb_priv {
- struct rxrpc_call *call; /* call with which associated */
- unsigned long resend_at; /* time in jiffies at which to resend */
+ union {
+ u8 nr_jumbo; /* Number of jumbo subpackets */
+ };
union {
unsigned int offset; /* offset into buffer of next read */
int remain; /* amount of space remaining for next write */
u32 error; /* network error code */
- bool need_resend; /* T if needs resending */
};
struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
@@ -157,7 +181,12 @@ struct rxrpc_security {
void *);
/* verify the security on a received packet */
- int (*verify_packet)(struct rxrpc_call *, struct sk_buff *, u32 *);
+ int (*verify_packet)(struct rxrpc_call *, struct sk_buff *,
+ unsigned int, unsigned int, rxrpc_seq_t, u16);
+
+ /* Locate the data in a received packet that has been verified. */
+ void (*locate_data)(struct rxrpc_call *, struct sk_buff *,
+ unsigned int *, unsigned int *);
/* issue a challenge */
int (*issue_challenge)(struct rxrpc_connection *);
@@ -187,9 +216,8 @@ struct rxrpc_local {
struct list_head link;
struct socket *socket; /* my UDP socket */
struct work_struct processor;
- struct list_head services; /* services listening on this endpoint */
+ struct hlist_head services; /* services listening on this endpoint */
struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
- struct sk_buff_head accept_queue; /* incoming calls awaiting acceptance */
struct sk_buff_head reject_queue; /* packets awaiting rejection */
struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */
struct rb_root client_conns; /* Client connections by socket params */
@@ -227,10 +255,12 @@ struct rxrpc_peer {
/* calculated RTT cache */
#define RXRPC_RTT_CACHE_SIZE 32
- suseconds_t rtt; /* current RTT estimate (in uS) */
- unsigned int rtt_point; /* next entry at which to insert */
- unsigned int rtt_usage; /* amount of cache actually used */
- suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */
+ ktime_t rtt_last_req; /* Time of last RTT request */
+ u64 rtt; /* Current RTT estimate (in nS) */
+ u64 rtt_sum; /* Sum of cache contents */
+ u64 rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* Determined RTT cache */
+ u8 rtt_cursor; /* next entry at which to insert */
+ u8 rtt_usage; /* amount of cache actually used */
};
/*
@@ -283,6 +313,7 @@ enum rxrpc_conn_cache_state {
RXRPC_CONN_CLIENT_ACTIVE, /* Conn is on active list, doing calls */
RXRPC_CONN_CLIENT_CULLED, /* Conn is culled and delisted, doing calls */
RXRPC_CONN_CLIENT_IDLE, /* Conn is on idle list, doing mostly nothing */
+ RXRPC_CONN__NR_CACHE_STATES
};
/*
@@ -291,6 +322,7 @@ enum rxrpc_conn_cache_state {
enum rxrpc_conn_proto_state {
RXRPC_CONN_UNUSED, /* Connection not yet attempted */
RXRPC_CONN_CLIENT, /* Client connection */
+ RXRPC_CONN_SERVICE_PREALLOC, /* Service connection preallocation */
RXRPC_CONN_SERVICE_UNSECURED, /* Service unsecured connection */
RXRPC_CONN_SERVICE_CHALLENGING, /* Service challenging for security */
RXRPC_CONN_SERVICE, /* Service secured connection */
@@ -345,17 +377,16 @@ struct rxrpc_connection {
unsigned long events;
unsigned long idle_timestamp; /* Time at which last became idle */
spinlock_t state_lock; /* state-change lock */
- enum rxrpc_conn_cache_state cache_state : 8;
- enum rxrpc_conn_proto_state state : 8; /* current state of connection */
+ enum rxrpc_conn_cache_state cache_state;
+ enum rxrpc_conn_proto_state state; /* current state of connection */
u32 local_abort; /* local abort code */
u32 remote_abort; /* remote abort code */
int debug_id; /* debug ID for printks */
atomic_t serial; /* packet serial number counter */
unsigned int hi_serial; /* highest serial number received */
+ u32 security_nonce; /* response re-use preventer */
u8 size_align; /* data size alignment (for security) */
- u8 header_size; /* rxrpc + security header size */
u8 security_size; /* security header size */
- u32 security_nonce; /* response re-use preventer */
u8 security_ix; /* security type */
u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
};
@@ -365,39 +396,23 @@ struct rxrpc_connection {
*/
enum rxrpc_call_flag {
RXRPC_CALL_RELEASED, /* call has been released - no more message to userspace */
- RXRPC_CALL_TERMINAL_MSG, /* call has given the socket its final message */
- RXRPC_CALL_RCVD_LAST, /* all packets received */
- RXRPC_CALL_RUN_RTIMER, /* Tx resend timer started */
- RXRPC_CALL_TX_SOFT_ACK, /* sent some soft ACKs */
- RXRPC_CALL_INIT_ACCEPT, /* acceptance was initiated */
RXRPC_CALL_HAS_USERID, /* has a user ID attached */
- RXRPC_CALL_EXPECT_OOS, /* expect out of sequence packets */
RXRPC_CALL_IS_SERVICE, /* Call is service call */
RXRPC_CALL_EXPOSED, /* The call was exposed to the world */
- RXRPC_CALL_RX_NO_MORE, /* Don't indicate MSG_MORE from recvmsg() */
+ RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
+ RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
+ RXRPC_CALL_PINGING, /* Ping in process */
+ RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
};
/*
* Events that can be raised on a call.
*/
enum rxrpc_call_event {
- RXRPC_CALL_EV_RCVD_ACKALL, /* ACKALL or reply received */
- RXRPC_CALL_EV_RCVD_BUSY, /* busy packet received */
- RXRPC_CALL_EV_RCVD_ABORT, /* abort packet received */
- RXRPC_CALL_EV_RCVD_ERROR, /* network error received */
- RXRPC_CALL_EV_ACK_FINAL, /* need to generate final ACK (and release call) */
RXRPC_CALL_EV_ACK, /* need to generate ACK */
- RXRPC_CALL_EV_REJECT_BUSY, /* need to generate busy message */
RXRPC_CALL_EV_ABORT, /* need to generate abort */
- RXRPC_CALL_EV_CONN_ABORT, /* local connection abort generated */
- RXRPC_CALL_EV_RESEND_TIMER, /* Tx resend timer expired */
+ RXRPC_CALL_EV_TIMER, /* Timer expired */
RXRPC_CALL_EV_RESEND, /* Tx resend required */
- RXRPC_CALL_EV_DRAIN_RX_OOS, /* drain the Rx out of sequence queue */
- RXRPC_CALL_EV_LIFE_TIMER, /* call's lifetimer ran out */
- RXRPC_CALL_EV_ACCEPTED, /* incoming call accepted by userspace app */
- RXRPC_CALL_EV_SECURED, /* incoming call's connection is now secure */
- RXRPC_CALL_EV_POST_ACCEPT, /* need to post an "accept?" message to the app */
- RXRPC_CALL_EV_RELEASE, /* need to release the call's resources */
};
/*
@@ -409,7 +424,7 @@ enum rxrpc_call_state {
RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */
RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */
- RXRPC_CALL_CLIENT_FINAL_ACK, /* - client sending final ACK phase */
+ RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */
RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */
RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */
RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */
@@ -417,7 +432,6 @@ enum rxrpc_call_state {
RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */
RXRPC_CALL_COMPLETE, /* - call complete */
- RXRPC_CALL_DEAD, /* - call is dead */
NR__RXRPC_CALL_STATES
};
@@ -426,7 +440,6 @@ enum rxrpc_call_state {
*/
enum rxrpc_call_completion {
RXRPC_CALL_SUCCEEDED, /* - Normal termination */
- RXRPC_CALL_SERVER_BUSY, /* - call rejected by busy server */
RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
@@ -435,6 +448,17 @@ enum rxrpc_call_completion {
};
/*
+ * Call Tx congestion management modes.
+ */
+enum rxrpc_congest_mode {
+ RXRPC_CALL_SLOW_START,
+ RXRPC_CALL_CONGEST_AVOIDANCE,
+ RXRPC_CALL_PACKET_LOSS,
+ RXRPC_CALL_FAST_RETRANSMIT,
+ NR__RXRPC_CONGEST_MODES
+};
+
+/*
* RxRPC call definition
* - matched by { connection, call_id }
*/
@@ -442,78 +466,329 @@ struct rxrpc_call {
struct rcu_head rcu;
struct rxrpc_connection *conn; /* connection carrying call */
struct rxrpc_peer *peer; /* Peer record for remote address */
- struct rxrpc_sock *socket; /* socket responsible */
- struct timer_list lifetimer; /* lifetime remaining on call */
- struct timer_list deadspan; /* reap timer for re-ACK'ing, etc */
- struct timer_list ack_timer; /* ACK generation timer */
- struct timer_list resend_timer; /* Tx resend timer */
- struct work_struct destroyer; /* call destroyer */
- struct work_struct processor; /* packet processor and ACK generator */
+ struct rxrpc_sock __rcu *socket; /* socket responsible */
+ unsigned long ack_at; /* When deferred ACK needs to happen */
+ unsigned long resend_at; /* When next resend needs to happen */
+ unsigned long expire_at; /* When the call times out */
+ struct timer_list timer; /* Combined event timer */
+ struct work_struct processor; /* Event processor */
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
struct list_head link; /* link in master call list */
struct list_head chan_wait_link; /* Link in conn->waiting_calls */
struct hlist_node error_link; /* link in error distribution list */
- struct list_head accept_link; /* calls awaiting acceptance */
- struct rb_node sock_node; /* node in socket call tree */
- struct sk_buff_head rx_queue; /* received packets */
- struct sk_buff_head rx_oos_queue; /* packets received out of sequence */
- struct sk_buff_head knlrecv_queue; /* Queue for kernel_recv [TODO: replace this] */
+ struct list_head accept_link; /* Link in rx->acceptq */
+ struct list_head recvmsg_link; /* Link in rx->recvmsg_q */
+ struct list_head sock_link; /* Link in rx->sock_calls */
+ struct rb_node sock_node; /* Node in rx->calls */
struct sk_buff *tx_pending; /* Tx socket buffer being filled */
wait_queue_head_t waitq; /* Wait queue for channel or Tx */
__be32 crypto_buf[2]; /* Temporary packet crypto buffer */
unsigned long user_call_ID; /* user-defined call ID */
- unsigned long creation_jif; /* time of call creation */
unsigned long flags;
unsigned long events;
spinlock_t lock;
rwlock_t state_lock; /* lock for state transition */
u32 abort_code; /* Local/remote abort code */
int error; /* Local error incurred */
- enum rxrpc_call_state state : 8; /* current state of call */
- enum rxrpc_call_completion completion : 8; /* Call completion condition */
+ enum rxrpc_call_state state; /* current state of call */
+ enum rxrpc_call_completion completion; /* Call completion condition */
atomic_t usage;
- atomic_t skb_count; /* Outstanding packets on this call */
- atomic_t sequence; /* Tx data packet sequence counter */
u16 service_id; /* service ID */
+ u8 security_ix; /* Security type */
u32 call_id; /* call ID on connection */
u32 cid; /* connection ID plus channel index */
int debug_id; /* debug ID for printks */
-
- /* transmission-phase ACK management */
- u8 acks_head; /* offset into window of first entry */
- u8 acks_tail; /* offset into window of last entry */
- u8 acks_winsz; /* size of un-ACK'd window */
- u8 acks_unacked; /* lowest unacked packet in last ACK received */
- int acks_latest; /* serial number of latest ACK received */
- rxrpc_seq_t acks_hard; /* highest definitively ACK'd msg seq */
- unsigned long *acks_window; /* sent packet window
- * - elements are pointers with LSB set if ACK'd
+ unsigned short rx_pkt_offset; /* Current recvmsg packet offset */
+ unsigned short rx_pkt_len; /* Current recvmsg packet len */
+
+ /* Rx/Tx circular buffer, depending on phase.
+ *
+ * In the Rx phase, packets are annotated with 0 or the number of the
+ * segment of a jumbo packet each buffer refers to. There can be up to
+ * 47 segments in a maximum-size UDP packet.
+ *
+ * In the Tx phase, packets are annotated with which buffers have been
+ * acked.
+ */
+#define RXRPC_RXTX_BUFF_SIZE 64
+#define RXRPC_RXTX_BUFF_MASK (RXRPC_RXTX_BUFF_SIZE - 1)
+#define RXRPC_INIT_RX_WINDOW_SIZE 32
+ struct sk_buff **rxtx_buffer;
+ u8 *rxtx_annotations;
+#define RXRPC_TX_ANNO_ACK 0
+#define RXRPC_TX_ANNO_UNACK 1
+#define RXRPC_TX_ANNO_NAK 2
+#define RXRPC_TX_ANNO_RETRANS 3
+#define RXRPC_TX_ANNO_MASK 0x03
+#define RXRPC_TX_ANNO_LAST 0x04
+#define RXRPC_TX_ANNO_RESENT 0x08
+
+#define RXRPC_RX_ANNO_JUMBO 0x3f /* Jumbo subpacket number + 1 if not zero */
+#define RXRPC_RX_ANNO_JLAST 0x40 /* Set if last element of a jumbo packet */
+#define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */
+ rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but
+ * not hard-ACK'd packet follows this.
+ */
+ rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
+
+ /* TCP-style slow-start congestion control [RFC5681]. Since the SMSS
+ * is fixed, we keep these numbers in terms of segments (ie. DATA
+ * packets) rather than bytes.
+ */
+#define RXRPC_TX_SMSS RXRPC_JUMBO_DATALEN
+ u8 cong_cwnd; /* Congestion window size */
+ u8 cong_extra; /* Extra to send for congestion management */
+ u8 cong_ssthresh; /* Slow-start threshold */
+ enum rxrpc_congest_mode cong_mode:8; /* Congestion management mode */
+ u8 cong_dup_acks; /* Count of ACKs showing missing packets */
+ u8 cong_cumul_acks; /* Cumulative ACK count */
+ ktime_t cong_tstamp; /* Last time cwnd was changed */
+
+ rxrpc_seq_t rx_hard_ack; /* Dead slot in buffer; the first received but not
+ * consumed packet follows this.
*/
+ rxrpc_seq_t rx_top; /* Highest Rx slot allocated. */
+ rxrpc_seq_t rx_expect_next; /* Expected next packet sequence number */
+ u8 rx_winsize; /* Size of Rx window */
+ u8 tx_winsize; /* Maximum size of Tx window */
+ bool tx_phase; /* T if transmission phase, F if receive phase */
+ u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */
/* receive-phase ACK management */
- rxrpc_seq_t rx_data_expect; /* next data seq ID expected to be received */
- rxrpc_seq_t rx_data_post; /* next data seq ID expected to be posted */
- rxrpc_seq_t rx_data_recv; /* last data seq ID encountered by recvmsg */
- rxrpc_seq_t rx_data_eaten; /* last data seq ID consumed by recvmsg */
- rxrpc_seq_t rx_first_oos; /* first packet in rx_oos_queue (or 0) */
- rxrpc_seq_t ackr_win_top; /* top of ACK window (rx_data_eaten is bottom) */
- rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
u8 ackr_reason; /* reason to ACK */
u16 ackr_skew; /* skew on packet being ACK'd */
rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
- atomic_t ackr_not_idle; /* number of packets in Rx queue */
+ rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
+ rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */
+ rxrpc_seq_t ackr_seen; /* Highest packet shown seen */
+ rxrpc_serial_t ackr_ping; /* Last ping sent */
+ ktime_t ackr_ping_time; /* Time last ping sent */
- /* received packet records, 1 bit per record */
-#define RXRPC_ACKR_WINDOW_ASZ DIV_ROUND_UP(RXRPC_MAXACKS, BITS_PER_LONG)
- unsigned long ackr_window[RXRPC_ACKR_WINDOW_ASZ + 1];
+ /* transmission-phase ACK management */
+ ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
+ rxrpc_serial_t acks_latest; /* serial number of latest ACK received */
+ rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
};
+/*
+ * Summary of a new ACK and the changes it made to the Tx buffer packet states.
+ */
+struct rxrpc_ack_summary {
+ u8 ack_reason;
+ u8 nr_acks; /* Number of ACKs in packet */
+ u8 nr_nacks; /* Number of NACKs in packet */
+ u8 nr_new_acks; /* Number of new ACKs in packet */
+ u8 nr_new_nacks; /* Number of new NACKs in packet */
+ u8 nr_rot_new_acks; /* Number of rotated new ACKs */
+ bool new_low_nack; /* T if new low NACK found */
+ bool retrans_timeo; /* T if reTx due to timeout happened */
+ u8 flight_size; /* Number of unreceived transmissions */
+ /* Place to stash values for tracing */
+ enum rxrpc_congest_mode mode:8;
+ u8 cwnd;
+ u8 ssthresh;
+ u8 dup_acks;
+ u8 cumulative_acks;
+};
+
+enum rxrpc_skb_trace {
+ rxrpc_skb_rx_cleaned,
+ rxrpc_skb_rx_freed,
+ rxrpc_skb_rx_got,
+ rxrpc_skb_rx_lost,
+ rxrpc_skb_rx_received,
+ rxrpc_skb_rx_rotated,
+ rxrpc_skb_rx_purged,
+ rxrpc_skb_rx_seen,
+ rxrpc_skb_tx_cleaned,
+ rxrpc_skb_tx_freed,
+ rxrpc_skb_tx_got,
+ rxrpc_skb_tx_lost,
+ rxrpc_skb_tx_new,
+ rxrpc_skb_tx_rotated,
+ rxrpc_skb_tx_seen,
+ rxrpc_skb__nr_trace
+};
+
+extern const char rxrpc_skb_traces[rxrpc_skb__nr_trace][7];
+
+enum rxrpc_conn_trace {
+ rxrpc_conn_new_client,
+ rxrpc_conn_new_service,
+ rxrpc_conn_queued,
+ rxrpc_conn_seen,
+ rxrpc_conn_got,
+ rxrpc_conn_put_client,
+ rxrpc_conn_put_service,
+ rxrpc_conn__nr_trace
+};
+
+extern const char rxrpc_conn_traces[rxrpc_conn__nr_trace][4];
+
+enum rxrpc_client_trace {
+ rxrpc_client_activate_chans,
+ rxrpc_client_alloc,
+ rxrpc_client_chan_activate,
+ rxrpc_client_chan_disconnect,
+ rxrpc_client_chan_pass,
+ rxrpc_client_chan_unstarted,
+ rxrpc_client_cleanup,
+ rxrpc_client_count,
+ rxrpc_client_discard,
+ rxrpc_client_duplicate,
+ rxrpc_client_exposed,
+ rxrpc_client_replace,
+ rxrpc_client_to_active,
+ rxrpc_client_to_culled,
+ rxrpc_client_to_idle,
+ rxrpc_client_to_inactive,
+ rxrpc_client_to_waiting,
+ rxrpc_client_uncount,
+ rxrpc_client__nr_trace
+};
+
+extern const char rxrpc_client_traces[rxrpc_client__nr_trace][7];
+extern const char rxrpc_conn_cache_states[RXRPC_CONN__NR_CACHE_STATES][5];
+
+enum rxrpc_call_trace {
+ rxrpc_call_new_client,
+ rxrpc_call_new_service,
+ rxrpc_call_queued,
+ rxrpc_call_queued_ref,
+ rxrpc_call_seen,
+ rxrpc_call_connected,
+ rxrpc_call_release,
+ rxrpc_call_got,
+ rxrpc_call_got_userid,
+ rxrpc_call_got_kernel,
+ rxrpc_call_put,
+ rxrpc_call_put_userid,
+ rxrpc_call_put_kernel,
+ rxrpc_call_put_noqueue,
+ rxrpc_call_error,
+ rxrpc_call__nr_trace
+};
+
+extern const char rxrpc_call_traces[rxrpc_call__nr_trace][4];
+
+enum rxrpc_transmit_trace {
+ rxrpc_transmit_wait,
+ rxrpc_transmit_queue,
+ rxrpc_transmit_queue_last,
+ rxrpc_transmit_rotate,
+ rxrpc_transmit_rotate_last,
+ rxrpc_transmit_await_reply,
+ rxrpc_transmit_end,
+ rxrpc_transmit__nr_trace
+};
+
+extern const char rxrpc_transmit_traces[rxrpc_transmit__nr_trace][4];
+
+enum rxrpc_receive_trace {
+ rxrpc_receive_incoming,
+ rxrpc_receive_queue,
+ rxrpc_receive_queue_last,
+ rxrpc_receive_front,
+ rxrpc_receive_rotate,
+ rxrpc_receive_end,
+ rxrpc_receive__nr_trace
+};
+
+extern const char rxrpc_receive_traces[rxrpc_receive__nr_trace][4];
+
+enum rxrpc_recvmsg_trace {
+ rxrpc_recvmsg_enter,
+ rxrpc_recvmsg_wait,
+ rxrpc_recvmsg_dequeue,
+ rxrpc_recvmsg_hole,
+ rxrpc_recvmsg_next,
+ rxrpc_recvmsg_cont,
+ rxrpc_recvmsg_full,
+ rxrpc_recvmsg_data_return,
+ rxrpc_recvmsg_terminal,
+ rxrpc_recvmsg_to_be_accepted,
+ rxrpc_recvmsg_return,
+ rxrpc_recvmsg__nr_trace
+};
+
+extern const char rxrpc_recvmsg_traces[rxrpc_recvmsg__nr_trace][5];
+
+enum rxrpc_rtt_tx_trace {
+ rxrpc_rtt_tx_ping,
+ rxrpc_rtt_tx_data,
+ rxrpc_rtt_tx__nr_trace
+};
+
+extern const char rxrpc_rtt_tx_traces[rxrpc_rtt_tx__nr_trace][5];
+
+enum rxrpc_rtt_rx_trace {
+ rxrpc_rtt_rx_ping_response,
+ rxrpc_rtt_rx_requested_ack,
+ rxrpc_rtt_rx__nr_trace
+};
+
+extern const char rxrpc_rtt_rx_traces[rxrpc_rtt_rx__nr_trace][5];
+
+enum rxrpc_timer_trace {
+ rxrpc_timer_begin,
+ rxrpc_timer_init_for_reply,
+ rxrpc_timer_expired,
+ rxrpc_timer_set_for_ack,
+ rxrpc_timer_set_for_resend,
+ rxrpc_timer_set_for_send,
+ rxrpc_timer__nr_trace
+};
+
+extern const char rxrpc_timer_traces[rxrpc_timer__nr_trace][8];
+
+enum rxrpc_propose_ack_trace {
+ rxrpc_propose_ack_client_tx_end,
+ rxrpc_propose_ack_input_data,
+ rxrpc_propose_ack_ping_for_lost_ack,
+ rxrpc_propose_ack_ping_for_lost_reply,
+ rxrpc_propose_ack_ping_for_params,
+ rxrpc_propose_ack_respond_to_ack,
+ rxrpc_propose_ack_respond_to_ping,
+ rxrpc_propose_ack_retry_tx,
+ rxrpc_propose_ack_rotate_rx,
+ rxrpc_propose_ack_terminal_ack,
+ rxrpc_propose_ack__nr_trace
+};
+
+enum rxrpc_propose_ack_outcome {
+ rxrpc_propose_ack_use,
+ rxrpc_propose_ack_update,
+ rxrpc_propose_ack_subsume,
+ rxrpc_propose_ack__nr_outcomes
+};
+
+extern const char rxrpc_propose_ack_traces[rxrpc_propose_ack__nr_trace][8];
+extern const char *const rxrpc_propose_ack_outcomes[rxrpc_propose_ack__nr_outcomes];
+
+enum rxrpc_congest_change {
+ rxrpc_cong_begin_retransmission,
+ rxrpc_cong_cleared_nacks,
+ rxrpc_cong_new_low_nack,
+ rxrpc_cong_no_change,
+ rxrpc_cong_progress,
+ rxrpc_cong_retransmit_again,
+ rxrpc_cong_rtt_window_end,
+ rxrpc_cong_saw_nack,
+ rxrpc_congest__nr_change
+};
+
+extern const char rxrpc_congest_modes[NR__RXRPC_CONGEST_MODES][10];
+extern const char rxrpc_congest_changes[rxrpc_congest__nr_change][9];
+
+extern const char *const rxrpc_pkts[];
+extern const char const rxrpc_ack_names[RXRPC_ACK__INVALID + 1][4];
+
#include <trace/events/rxrpc.h>
/*
* af_rxrpc.c
*/
-extern atomic_t rxrpc_n_skbs;
+extern atomic_t rxrpc_n_tx_skbs, rxrpc_n_rx_skbs;
extern u32 rxrpc_epoch;
extern atomic_t rxrpc_debug_id;
extern struct workqueue_struct *rxrpc_workqueue;
@@ -521,6 +796,11 @@ extern struct workqueue_struct *rxrpc_workqueue;
/*
* call_accept.c
*/
+int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
+void rxrpc_discard_prealloc(struct rxrpc_sock *);
+struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
+ struct rxrpc_connection *,
+ struct sk_buff *);
void rxrpc_accept_incoming_calls(struct rxrpc_local *);
struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
rxrpc_notify_rx_t);
@@ -529,8 +809,9 @@ int rxrpc_reject_call(struct rxrpc_sock *);
/*
* call_event.c
*/
-void __rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool);
-void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool);
+void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace);
+void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
+ enum rxrpc_propose_ack_trace);
void rxrpc_process_call(struct work_struct *);
/*
@@ -539,26 +820,26 @@ void rxrpc_process_call(struct work_struct *);
extern const char *const rxrpc_call_states[];
extern const char *const rxrpc_call_completions[];
extern unsigned int rxrpc_max_call_lifetime;
-extern unsigned int rxrpc_dead_call_expiry;
extern struct kmem_cache *rxrpc_call_jar;
extern struct list_head rxrpc_calls;
extern rwlock_t rxrpc_call_lock;
struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
+struct rxrpc_call *rxrpc_alloc_call(gfp_t);
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
struct rxrpc_conn_parameters *,
struct sockaddr_rxrpc *,
unsigned long, gfp_t);
-struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
- struct rxrpc_connection *,
- struct sk_buff *);
-void rxrpc_release_call(struct rxrpc_call *);
+void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
+ struct sk_buff *);
+void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
+bool __rxrpc_queue_call(struct rxrpc_call *);
+bool rxrpc_queue_call(struct rxrpc_call *);
void rxrpc_see_call(struct rxrpc_call *);
-void rxrpc_get_call(struct rxrpc_call *);
-void rxrpc_put_call(struct rxrpc_call *);
-void rxrpc_get_call_for_skb(struct rxrpc_call *, struct sk_buff *);
-void rxrpc_put_call_for_skb(struct rxrpc_call *, struct sk_buff *);
+void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
+void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
+void rxrpc_cleanup_call(struct rxrpc_call *);
void __exit rxrpc_destroy_all_calls(void);
static inline bool rxrpc_is_service_call(const struct rxrpc_call *call)
@@ -584,6 +865,7 @@ static inline bool __rxrpc_set_call_completion(struct rxrpc_call *call,
call->error = error;
call->completion = compl,
call->state = RXRPC_CALL_COMPLETE;
+ wake_up(&call->waitq);
return true;
}
return false;
@@ -594,7 +876,7 @@ static inline bool rxrpc_set_call_completion(struct rxrpc_call *call,
u32 abort_code,
int error)
{
- int ret;
+ bool ret;
write_lock_bh(&call->state_lock);
ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
@@ -605,40 +887,41 @@ static inline bool rxrpc_set_call_completion(struct rxrpc_call *call,
/*
* Record that a call successfully completed.
*/
-static inline void __rxrpc_call_completed(struct rxrpc_call *call)
+static inline bool __rxrpc_call_completed(struct rxrpc_call *call)
{
- __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
+ return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
}
-static inline void rxrpc_call_completed(struct rxrpc_call *call)
+static inline bool rxrpc_call_completed(struct rxrpc_call *call)
{
+ bool ret;
+
write_lock_bh(&call->state_lock);
- __rxrpc_call_completed(call);
+ ret = __rxrpc_call_completed(call);
write_unlock_bh(&call->state_lock);
+ return ret;
}
/*
* Record that a call is locally aborted.
*/
-static inline bool __rxrpc_abort_call(struct rxrpc_call *call,
+static inline bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
+ rxrpc_seq_t seq,
u32 abort_code, int error)
{
- if (__rxrpc_set_call_completion(call,
- RXRPC_CALL_LOCALLY_ABORTED,
- abort_code, error)) {
- set_bit(RXRPC_CALL_EV_ABORT, &call->events);
- return true;
- }
- return false;
+ trace_rxrpc_abort(why, call->cid, call->call_id, seq,
+ abort_code, error);
+ return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
+ abort_code, error);
}
-static inline bool rxrpc_abort_call(struct rxrpc_call *call,
- u32 abort_code, int error)
+static inline bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
+ rxrpc_seq_t seq, u32 abort_code, int error)
{
bool ret;
write_lock_bh(&call->state_lock);
- ret = __rxrpc_abort_call(call, abort_code, error);
+ ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
write_unlock_bh(&call->state_lock);
return ret;
}
@@ -664,8 +947,6 @@ void __exit rxrpc_destroy_all_client_connections(void);
* conn_event.c
*/
void rxrpc_process_connection(struct work_struct *);
-void rxrpc_reject_packet(struct rxrpc_local *, struct sk_buff *);
-void rxrpc_reject_packets(struct rxrpc_local *);
/*
* conn_object.c
@@ -682,7 +963,11 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
void rxrpc_disconnect_call(struct rxrpc_call *);
void rxrpc_kill_connection(struct rxrpc_connection *);
-void __rxrpc_put_connection(struct rxrpc_connection *);
+bool rxrpc_queue_conn(struct rxrpc_connection *);
+void rxrpc_see_connection(struct rxrpc_connection *);
+void rxrpc_get_connection(struct rxrpc_connection *);
+struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *);
+void rxrpc_put_service_conn(struct rxrpc_connection *);
void __exit rxrpc_destroy_all_connections(void);
static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn)
@@ -695,39 +980,15 @@ static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn)
return !rxrpc_conn_is_client(conn);
}
-static inline void rxrpc_get_connection(struct rxrpc_connection *conn)
-{
- atomic_inc(&conn->usage);
-}
-
-static inline
-struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
-{
- return atomic_inc_not_zero(&conn->usage) ? conn : NULL;
-}
-
static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
{
if (!conn)
return;
- if (rxrpc_conn_is_client(conn)) {
- if (atomic_dec_and_test(&conn->usage))
- rxrpc_put_client_conn(conn);
- } else {
- if (atomic_dec_return(&conn->usage) == 1)
- __rxrpc_put_connection(conn);
- }
-}
-
-
-static inline bool rxrpc_queue_conn(struct rxrpc_connection *conn)
-{
- if (!rxrpc_get_connection_maybe(conn))
- return false;
- if (!rxrpc_queue_work(&conn->processor))
- rxrpc_put_connection(conn);
- return true;
+ if (rxrpc_conn_is_client(conn))
+ rxrpc_put_client_conn(conn);
+ else
+ rxrpc_put_service_conn(conn);
}
/*
@@ -735,17 +996,14 @@ static inline bool rxrpc_queue_conn(struct rxrpc_connection *conn)
*/
struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
struct sk_buff *);
-struct rxrpc_connection *rxrpc_incoming_connection(struct rxrpc_local *,
- struct sockaddr_rxrpc *,
- struct sk_buff *);
+struct rxrpc_connection *rxrpc_prealloc_service_connection(gfp_t);
+void rxrpc_new_incoming_connection(struct rxrpc_connection *, struct sk_buff *);
void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
/*
* input.c
*/
void rxrpc_data_ready(struct sock *);
-int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool);
-void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
/*
* insecure.c
@@ -809,21 +1067,22 @@ extern unsigned int rxrpc_rx_mtu;
extern unsigned int rxrpc_rx_jumbo_max;
extern unsigned int rxrpc_resend_timeout;
-extern const char *const rxrpc_pkts[];
extern const s8 rxrpc_ack_priority[];
-extern const char *rxrpc_acks(u8 reason);
-
/*
* output.c
*/
-int rxrpc_send_data_packet(struct rxrpc_connection *, struct sk_buff *);
+int rxrpc_send_call_packet(struct rxrpc_call *, u8);
+int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *);
+void rxrpc_reject_packets(struct rxrpc_local *);
/*
* peer_event.c
*/
void rxrpc_error_report(struct sock *);
void rxrpc_peer_error_distributor(struct work_struct *);
+void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
+ rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
/*
* peer_object.c
@@ -833,6 +1092,8 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
struct sockaddr_rxrpc *, gfp_t);
struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
+struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *,
+ struct rxrpc_peer *);
static inline struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
{
@@ -862,7 +1123,7 @@ extern const struct file_operations rxrpc_connection_seq_fops;
/*
* recvmsg.c
*/
-void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *);
+void rxrpc_notify_socket(struct rxrpc_call *);
int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
/*
@@ -879,7 +1140,7 @@ int __init rxrpc_init_security(void);
void rxrpc_exit_security(void);
int rxrpc_init_client_conn_security(struct rxrpc_connection *);
int rxrpc_init_server_conn_security(struct rxrpc_connection *);
-
+
/*
* sendmsg.c
*/
@@ -890,10 +1151,11 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
*/
void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
void rxrpc_packet_destructor(struct sk_buff *);
-void rxrpc_new_skb(struct sk_buff *);
-void rxrpc_see_skb(struct sk_buff *);
-void rxrpc_get_skb(struct sk_buff *);
-void rxrpc_free_skb(struct sk_buff *);
+void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
+void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
+void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
+void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
+void rxrpc_lose_skb(struct sk_buff *, enum rxrpc_skb_trace);
void rxrpc_purge_queue(struct sk_buff_head *);
/*
@@ -912,6 +1174,23 @@ static inline void rxrpc_sysctl_exit(void) {}
*/
int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *);
+static inline bool before(u32 seq1, u32 seq2)
+{
+ return (s32)(seq1 - seq2) < 0;
+}
+static inline bool before_eq(u32 seq1, u32 seq2)
+{
+ return (s32)(seq1 - seq2) <= 0;
+}
+static inline bool after(u32 seq1, u32 seq2)
+{
+ return (s32)(seq1 - seq2) > 0;
+}
+static inline bool after_eq(u32 seq1, u32 seq2)
+{
+ return (s32)(seq1 - seq2) >= 0;
+}
+
/*
* debug tracing
*/
@@ -994,11 +1273,12 @@ do { \
#define ASSERTCMP(X, OP, Y) \
do { \
- unsigned long _x = (unsigned long)(X); \
- unsigned long _y = (unsigned long)(Y); \
+ __typeof__(X) _x = (X); \
+ __typeof__(Y) _y = (__typeof__(X))(Y); \
if (unlikely(!(_x OP _y))) { \
- pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
- _x, _x, #OP, _y, _y); \
+ pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
+ (unsigned long)_x, (unsigned long)_x, #OP, \
+ (unsigned long)_y, (unsigned long)_y); \
BUG(); \
} \
} while (0)
@@ -1013,11 +1293,12 @@ do { \
#define ASSERTIFCMP(C, X, OP, Y) \
do { \
- unsigned long _x = (unsigned long)(X); \
- unsigned long _y = (unsigned long)(Y); \
+ __typeof__(X) _x = (X); \
+ __typeof__(Y) _y = (__typeof__(X))(Y); \
if (unlikely((C) && !(_x OP _y))) { \
pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \
- _x, _x, #OP, _y, _y); \
+ (unsigned long)_x, (unsigned long)_x, #OP, \
+ (unsigned long)_y, (unsigned long)_y); \
BUG(); \
} \
} while (0)
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 68a439e30df1..a8d39d7cf42c 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -20,265 +20,409 @@
#include <linux/in6.h>
#include <linux/icmp.h>
#include <linux/gfp.h>
+#include <linux/circ_buf.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <net/ip.h>
#include "ar-internal.h"
/*
- * generate a connection-level abort
+ * Preallocate a single service call, connection and peer and, if possible,
+ * give them a user ID and attach the user's side of the ID to them.
*/
-static int rxrpc_busy(struct rxrpc_local *local, struct sockaddr_rxrpc *srx,
- struct rxrpc_wire_header *whdr)
+static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
+ struct rxrpc_backlog *b,
+ rxrpc_notify_rx_t notify_rx,
+ rxrpc_user_attach_call_t user_attach_call,
+ unsigned long user_call_ID, gfp_t gfp)
{
- struct msghdr msg;
- struct kvec iov[1];
- size_t len;
- int ret;
+ const void *here = __builtin_return_address(0);
+ struct rxrpc_call *call;
+ int max, tmp;
+ unsigned int size = RXRPC_BACKLOG_MAX;
+ unsigned int head, tail, call_head, call_tail;
+
+ max = rx->sk.sk_max_ack_backlog;
+ tmp = rx->sk.sk_ack_backlog;
+ if (tmp >= max) {
+ _leave(" = -ENOBUFS [full %u]", max);
+ return -ENOBUFS;
+ }
+ max -= tmp;
+
+ /* We don't need more conns and peers than we have calls, but on the
+ * other hand, we shouldn't ever use more peers than conns or conns
+ * than calls.
+ */
+ call_head = b->call_backlog_head;
+ call_tail = READ_ONCE(b->call_backlog_tail);
+ tmp = CIRC_CNT(call_head, call_tail, size);
+ if (tmp >= max) {
+ _leave(" = -ENOBUFS [enough %u]", tmp);
+ return -ENOBUFS;
+ }
+ max = tmp + 1;
+
+ head = b->peer_backlog_head;
+ tail = READ_ONCE(b->peer_backlog_tail);
+ if (CIRC_CNT(head, tail, size) < max) {
+ struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
+ if (!peer)
+ return -ENOMEM;
+ b->peer_backlog[head] = peer;
+ smp_store_release(&b->peer_backlog_head,
+ (head + 1) & (size - 1));
+ }
- _enter("%d,,", local->debug_id);
+ head = b->conn_backlog_head;
+ tail = READ_ONCE(b->conn_backlog_tail);
+ if (CIRC_CNT(head, tail, size) < max) {
+ struct rxrpc_connection *conn;
- whdr->type = RXRPC_PACKET_TYPE_BUSY;
- whdr->serial = htonl(1);
+ conn = rxrpc_prealloc_service_connection(gfp);
+ if (!conn)
+ return -ENOMEM;
+ b->conn_backlog[head] = conn;
+ smp_store_release(&b->conn_backlog_head,
+ (head + 1) & (size - 1));
- msg.msg_name = &srx->transport.sin;
- msg.msg_namelen = sizeof(srx->transport.sin);
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = 0;
+ trace_rxrpc_conn(conn, rxrpc_conn_new_service,
+ atomic_read(&conn->usage), here);
+ }
- iov[0].iov_base = whdr;
- iov[0].iov_len = sizeof(*whdr);
+ /* Now it gets complicated, because calls get registered with the
+ * socket here, particularly if a user ID is preassigned by the user.
+ */
+ call = rxrpc_alloc_call(gfp);
+ if (!call)
+ return -ENOMEM;
+ call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
+ call->state = RXRPC_CALL_SERVER_PREALLOC;
- len = iov[0].iov_len;
+ trace_rxrpc_call(call, rxrpc_call_new_service,
+ atomic_read(&call->usage),
+ here, (const void *)user_call_ID);
- _proto("Tx BUSY %%1");
+ write_lock(&rx->call_lock);
+ if (user_attach_call) {
+ struct rxrpc_call *xcall;
+ struct rb_node *parent, **pp;
+
+ /* Check the user ID isn't already in use */
+ pp = &rx->calls.rb_node;
+ parent = NULL;
+ while (*pp) {
+ parent = *pp;
+ xcall = rb_entry(parent, struct rxrpc_call, sock_node);
+ if (user_call_ID < call->user_call_ID)
+ pp = &(*pp)->rb_left;
+ else if (user_call_ID > call->user_call_ID)
+ pp = &(*pp)->rb_right;
+ else
+ goto id_in_use;
+ }
- ret = kernel_sendmsg(local->socket, &msg, iov, 1, len);
- if (ret < 0) {
- _leave(" = -EAGAIN [sendmsg failed: %d]", ret);
- return -EAGAIN;
+ call->user_call_ID = user_call_ID;
+ call->notify_rx = notify_rx;
+ rxrpc_get_call(call, rxrpc_call_got_kernel);
+ user_attach_call(call, user_call_ID);
+ rxrpc_get_call(call, rxrpc_call_got_userid);
+ rb_link_node(&call->sock_node, parent, pp);
+ rb_insert_color(&call->sock_node, &rx->calls);
+ set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
}
- _leave(" = 0");
+ list_add(&call->sock_link, &rx->sock_calls);
+
+ write_unlock(&rx->call_lock);
+
+ write_lock(&rxrpc_call_lock);
+ list_add_tail(&call->link, &rxrpc_calls);
+ write_unlock(&rxrpc_call_lock);
+
+ b->call_backlog[call_head] = call;
+ smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
+ _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
return 0;
+
+id_in_use:
+ write_unlock(&rx->call_lock);
+ rxrpc_cleanup_call(call);
+ _leave(" = -EBADSLT");
+ return -EBADSLT;
}
/*
- * accept an incoming call that needs peer, transport and/or connection setting
- * up
+ * Preallocate sufficient service connections, calls and peers to cover the
+ * entire backlog of a socket. When a new call comes in, if we don't have
+ * sufficient of each available, the call gets rejected as busy or ignored.
+ *
+ * The backlog is replenished when a connection is accepted or rejected.
*/
-static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
- struct rxrpc_sock *rx,
- struct sk_buff *skb,
- struct sockaddr_rxrpc *srx)
+int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
{
- struct rxrpc_connection *conn;
- struct rxrpc_skb_priv *sp, *nsp;
- struct rxrpc_call *call;
- struct sk_buff *notification;
- int ret;
+ struct rxrpc_backlog *b = rx->backlog;
- _enter("");
+ if (!b) {
+ b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
+ if (!b)
+ return -ENOMEM;
+ rx->backlog = b;
+ }
+
+ if (rx->discard_new_call)
+ return 0;
+
+ while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp) == 0)
+ ;
- sp = rxrpc_skb(skb);
+ return 0;
+}
- /* get a notification message to send to the server app */
- notification = alloc_skb(0, GFP_NOFS);
- if (!notification) {
- _debug("no memory");
- ret = -ENOMEM;
- goto error_nofree;
+/*
+ * Discard the preallocation on a service.
+ */
+void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
+{
+ struct rxrpc_backlog *b = rx->backlog;
+ unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
+
+ if (!b)
+ return;
+ rx->backlog = NULL;
+
+ /* Make sure that there aren't any incoming calls in progress before we
+ * clear the preallocation buffers.
+ */
+ spin_lock_bh(&rx->incoming_lock);
+ spin_unlock_bh(&rx->incoming_lock);
+
+ head = b->peer_backlog_head;
+ tail = b->peer_backlog_tail;
+ while (CIRC_CNT(head, tail, size) > 0) {
+ struct rxrpc_peer *peer = b->peer_backlog[tail];
+ kfree(peer);
+ tail = (tail + 1) & (size - 1);
}
- rxrpc_new_skb(notification);
- notification->mark = RXRPC_SKB_MARK_NEW_CALL;
-
- conn = rxrpc_incoming_connection(local, srx, skb);
- if (IS_ERR(conn)) {
- _debug("no conn");
- ret = PTR_ERR(conn);
- goto error;
+
+ head = b->conn_backlog_head;
+ tail = b->conn_backlog_tail;
+ while (CIRC_CNT(head, tail, size) > 0) {
+ struct rxrpc_connection *conn = b->conn_backlog[tail];
+ write_lock(&rxrpc_connection_lock);
+ list_del(&conn->link);
+ list_del(&conn->proc_link);
+ write_unlock(&rxrpc_connection_lock);
+ kfree(conn);
+ tail = (tail + 1) & (size - 1);
}
- call = rxrpc_incoming_call(rx, conn, skb);
- rxrpc_put_connection(conn);
- if (IS_ERR(call)) {
- _debug("no call");
- ret = PTR_ERR(call);
- goto error;
+ head = b->call_backlog_head;
+ tail = b->call_backlog_tail;
+ while (CIRC_CNT(head, tail, size) > 0) {
+ struct rxrpc_call *call = b->call_backlog[tail];
+ if (rx->discard_new_call) {
+ _debug("discard %lx", call->user_call_ID);
+ rx->discard_new_call(call, call->user_call_ID);
+ rxrpc_put_call(call, rxrpc_call_put_kernel);
+ }
+ rxrpc_call_completed(call);
+ rxrpc_release_call(rx, call);
+ rxrpc_put_call(call, rxrpc_call_put);
+ tail = (tail + 1) & (size - 1);
}
- /* attach the call to the socket */
- read_lock_bh(&local->services_lock);
- if (rx->sk.sk_state == RXRPC_CLOSE)
- goto invalid_service;
+ kfree(b);
+}
- write_lock(&rx->call_lock);
- if (!test_and_set_bit(RXRPC_CALL_INIT_ACCEPT, &call->flags)) {
- rxrpc_get_call(call);
-
- spin_lock(&call->conn->state_lock);
- if (sp->hdr.securityIndex > 0 &&
- call->conn->state == RXRPC_CONN_SERVICE_UNSECURED) {
- _debug("await conn sec");
- list_add_tail(&call->accept_link, &rx->secureq);
- call->conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
- set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
- rxrpc_queue_conn(call->conn);
- } else {
- _debug("conn ready");
- call->state = RXRPC_CALL_SERVER_ACCEPTING;
- list_add_tail(&call->accept_link, &rx->acceptq);
- rxrpc_get_call_for_skb(call, notification);
- nsp = rxrpc_skb(notification);
- nsp->call = call;
-
- ASSERTCMP(atomic_read(&call->usage), >=, 3);
-
- _debug("notify");
- spin_lock(&call->lock);
- ret = rxrpc_queue_rcv_skb(call, notification, true,
- false);
- spin_unlock(&call->lock);
- notification = NULL;
- BUG_ON(ret < 0);
+/*
+ * Allocate a new incoming call from the prealloc pool, along with a connection
+ * and a peer as necessary.
+ */
+static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
+ struct rxrpc_local *local,
+ struct rxrpc_connection *conn,
+ struct sk_buff *skb)
+{
+ struct rxrpc_backlog *b = rx->backlog;
+ struct rxrpc_peer *peer, *xpeer;
+ struct rxrpc_call *call;
+ unsigned short call_head, conn_head, peer_head;
+ unsigned short call_tail, conn_tail, peer_tail;
+ unsigned short call_count, conn_count;
+
+ /* #calls >= #conns >= #peers must hold true. */
+ call_head = smp_load_acquire(&b->call_backlog_head);
+ call_tail = b->call_backlog_tail;
+ call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
+ conn_head = smp_load_acquire(&b->conn_backlog_head);
+ conn_tail = b->conn_backlog_tail;
+ conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
+ ASSERTCMP(conn_count, >=, call_count);
+ peer_head = smp_load_acquire(&b->peer_backlog_head);
+ peer_tail = b->peer_backlog_tail;
+ ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
+ conn_count);
+
+ if (call_count == 0)
+ return NULL;
+
+ if (!conn) {
+ /* No connection. We're going to need a peer to start off
+ * with. If one doesn't yet exist, use a spare from the
+ * preallocation set. We dump the address into the spare in
+ * anticipation - and to save on stack space.
+ */
+ xpeer = b->peer_backlog[peer_tail];
+ if (rxrpc_extract_addr_from_skb(&xpeer->srx, skb) < 0)
+ return NULL;
+
+ peer = rxrpc_lookup_incoming_peer(local, xpeer);
+ if (peer == xpeer) {
+ b->peer_backlog[peer_tail] = NULL;
+ smp_store_release(&b->peer_backlog_tail,
+ (peer_tail + 1) &
+ (RXRPC_BACKLOG_MAX - 1));
}
- spin_unlock(&call->conn->state_lock);
- _debug("queued");
+ /* Now allocate and set up the connection */
+ conn = b->conn_backlog[conn_tail];
+ b->conn_backlog[conn_tail] = NULL;
+ smp_store_release(&b->conn_backlog_tail,
+ (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
+ rxrpc_get_local(local);
+ conn->params.local = local;
+ conn->params.peer = peer;
+ rxrpc_see_connection(conn);
+ rxrpc_new_incoming_connection(conn, skb);
+ } else {
+ rxrpc_get_connection(conn);
}
- write_unlock(&rx->call_lock);
- _debug("process");
- rxrpc_fast_process_packet(call, skb);
+ /* And now we can allocate and set up a new call */
+ call = b->call_backlog[call_tail];
+ b->call_backlog[call_tail] = NULL;
+ smp_store_release(&b->call_backlog_tail,
+ (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
- _debug("done");
- read_unlock_bh(&local->services_lock);
- rxrpc_free_skb(notification);
- rxrpc_put_call(call);
- _leave(" = 0");
- return 0;
-
-invalid_service:
- _debug("invalid");
- read_unlock_bh(&local->services_lock);
-
- read_lock_bh(&call->state_lock);
- if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
- rxrpc_get_call(call);
- rxrpc_queue_call(call);
- }
- read_unlock_bh(&call->state_lock);
- rxrpc_put_call(call);
- ret = -ECONNREFUSED;
-error:
- rxrpc_free_skb(notification);
-error_nofree:
- _leave(" = %d", ret);
- return ret;
+ rxrpc_see_call(call);
+ call->conn = conn;
+ call->peer = rxrpc_get_peer(conn->params.peer);
+ return call;
}
/*
- * accept incoming calls that need peer, transport and/or connection setting up
- * - the packets we get are all incoming client DATA packets that have seq == 1
+ * Set up a new incoming call. Called in BH context with the RCU read lock
+ * held.
+ *
+ * If this is for a kernel service, when we allocate the call, it will have
+ * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
+ * retainer ref obtained from the backlog buffer. Prealloc calls for userspace
+ * services only have the ref from the backlog buffer. We want to pass this
+ * ref to non-BH context to dispose of.
+ *
+ * If we want to report an error, we mark the skb with the packet type and
+ * abort code and return NULL.
*/
-void rxrpc_accept_incoming_calls(struct rxrpc_local *local)
+struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
+ struct rxrpc_connection *conn,
+ struct sk_buff *skb)
{
- struct rxrpc_skb_priv *sp;
- struct sockaddr_rxrpc srx;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_sock *rx;
- struct rxrpc_wire_header whdr;
- struct sk_buff *skb;
- int ret;
-
- _enter("%d", local->debug_id);
+ struct rxrpc_call *call;
- skb = skb_dequeue(&local->accept_queue);
- if (!skb) {
- _leave("\n");
- return;
- }
+ _enter("");
- _net("incoming call skb %p", skb);
-
- rxrpc_see_skb(skb);
- sp = rxrpc_skb(skb);
-
- /* Set up a response packet header in case we need it */
- whdr.epoch = htonl(sp->hdr.epoch);
- whdr.cid = htonl(sp->hdr.cid);
- whdr.callNumber = htonl(sp->hdr.callNumber);
- whdr.seq = htonl(sp->hdr.seq);
- whdr.serial = 0;
- whdr.flags = 0;
- whdr.type = 0;
- whdr.userStatus = 0;
- whdr.securityIndex = sp->hdr.securityIndex;
- whdr._rsvd = 0;
- whdr.serviceId = htons(sp->hdr.serviceId);
-
- if (rxrpc_extract_addr_from_skb(&srx, skb) < 0)
- goto drop;
-
- /* get the socket providing the service */
- read_lock_bh(&local->services_lock);
- list_for_each_entry(rx, &local->services, listen_link) {
- if (rx->srx.srx_service == sp->hdr.serviceId &&
- rx->sk.sk_state != RXRPC_CLOSE)
+ /* Get the socket providing the service */
+ hlist_for_each_entry_rcu_bh(rx, &local->services, listen_link) {
+ if (rx->srx.srx_service == sp->hdr.serviceId)
goto found_service;
}
- read_unlock_bh(&local->services_lock);
- goto invalid_service;
+
+ trace_rxrpc_abort("INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ RX_INVALID_OPERATION, EOPNOTSUPP);
+ skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
+ skb->priority = RX_INVALID_OPERATION;
+ _leave(" = NULL [service]");
+ return NULL;
found_service:
- _debug("found service %hd", rx->srx.srx_service);
- if (sk_acceptq_is_full(&rx->sk))
- goto backlog_full;
- sk_acceptq_added(&rx->sk);
- sock_hold(&rx->sk);
- read_unlock_bh(&local->services_lock);
-
- ret = rxrpc_accept_incoming_call(local, rx, skb, &srx);
- if (ret < 0)
- sk_acceptq_removed(&rx->sk);
- sock_put(&rx->sk);
- switch (ret) {
- case -ECONNRESET: /* old calls are ignored */
- case -ECONNABORTED: /* aborted calls are reaborted or ignored */
- case 0:
- return;
- case -ECONNREFUSED:
- goto invalid_service;
- case -EBUSY:
- goto busy;
- case -EKEYREJECTED:
- goto security_mismatch;
+ spin_lock(&rx->incoming_lock);
+ if (rx->sk.sk_state == RXRPC_CLOSE) {
+ trace_rxrpc_abort("CLS", sp->hdr.cid, sp->hdr.callNumber,
+ sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
+ skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
+ skb->priority = RX_INVALID_OPERATION;
+ _leave(" = NULL [close]");
+ call = NULL;
+ goto out;
+ }
+
+ call = rxrpc_alloc_incoming_call(rx, local, conn, skb);
+ if (!call) {
+ skb->mark = RXRPC_SKB_MARK_BUSY;
+ _leave(" = NULL [busy]");
+ call = NULL;
+ goto out;
+ }
+
+ trace_rxrpc_receive(call, rxrpc_receive_incoming,
+ sp->hdr.serial, sp->hdr.seq);
+
+ /* Make the call live. */
+ rxrpc_incoming_call(rx, call, skb);
+ conn = call->conn;
+
+ if (rx->notify_new_call)
+ rx->notify_new_call(&rx->sk, call, call->user_call_ID);
+ else
+ sk_acceptq_added(&rx->sk);
+
+ spin_lock(&conn->state_lock);
+ switch (conn->state) {
+ case RXRPC_CONN_SERVICE_UNSECURED:
+ conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
+ set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
+ rxrpc_queue_conn(call->conn);
+ break;
+
+ case RXRPC_CONN_SERVICE:
+ write_lock(&call->state_lock);
+ if (rx->discard_new_call)
+ call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
+ else
+ call->state = RXRPC_CALL_SERVER_ACCEPTING;
+ write_unlock(&call->state_lock);
+ break;
+
+ case RXRPC_CONN_REMOTELY_ABORTED:
+ rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
+ conn->remote_abort, ECONNABORTED);
+ break;
+ case RXRPC_CONN_LOCALLY_ABORTED:
+ rxrpc_abort_call("CON", call, sp->hdr.seq,
+ conn->local_abort, ECONNABORTED);
+ break;
default:
BUG();
}
+ spin_unlock(&conn->state_lock);
-backlog_full:
- read_unlock_bh(&local->services_lock);
-busy:
- rxrpc_busy(local, &srx, &whdr);
- rxrpc_free_skb(skb);
- return;
+ if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
+ rxrpc_notify_socket(call);
-drop:
- rxrpc_free_skb(skb);
- return;
+ /* We have to discard the prealloc queue's ref here and rely on a
+ * combination of the RCU read lock and refs held either by the socket
+ * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
+ * service to prevent the call from being deallocated too early.
+ */
+ rxrpc_put_call(call, rxrpc_call_put);
-invalid_service:
- skb->priority = RX_INVALID_OPERATION;
- rxrpc_reject_packet(local, skb);
- return;
-
- /* can't change connection security type mid-flow */
-security_mismatch:
- skb->priority = RX_PROTOCOL_ERROR;
- rxrpc_reject_packet(local, skb);
- return;
+ _leave(" = %p{%d}", call, call->debug_id);
+out:
+ spin_unlock(&rx->incoming_lock);
+ return call;
}
/*
@@ -299,12 +443,13 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
write_lock(&rx->call_lock);
- ret = -ENODATA;
- if (list_empty(&rx->acceptq))
- goto out;
+ if (list_empty(&rx->to_be_accepted)) {
+ write_unlock(&rx->call_lock);
+ kleave(" = -ENODATA [empty]");
+ return ERR_PTR(-ENODATA);
+ }
/* check the user ID isn't already in use */
- ret = -EBADSLT;
pp = &rx->calls.rb_node;
parent = NULL;
while (*pp) {
@@ -316,11 +461,14 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
else if (user_call_ID > call->user_call_ID)
pp = &(*pp)->rb_right;
else
- goto out;
+ goto id_in_use;
}
- /* dequeue the first call and check it's still valid */
- call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link);
+ /* Dequeue the first call and check it's still valid. We gain
+ * responsibility for the queue's reference.
+ */
+ call = list_entry(rx->to_be_accepted.next,
+ struct rxrpc_call, accept_link);
list_del_init(&call->accept_link);
sk_acceptq_removed(&rx->sk);
rxrpc_see_call(call);
@@ -333,9 +481,6 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
case RXRPC_CALL_COMPLETE:
ret = call->error;
goto out_release;
- case RXRPC_CALL_DEAD:
- ret = -ETIME;
- goto out_discard;
default:
BUG();
}
@@ -343,33 +488,32 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
/* formalise the acceptance */
call->notify_rx = notify_rx;
call->user_call_ID = user_call_ID;
+ rxrpc_get_call(call, rxrpc_call_got_userid);
rb_link_node(&call->sock_node, parent, pp);
rb_insert_color(&call->sock_node, &rx->calls);
if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
BUG();
- if (test_and_set_bit(RXRPC_CALL_EV_ACCEPTED, &call->events))
- BUG();
- rxrpc_queue_call(call);
- rxrpc_get_call(call);
write_unlock_bh(&call->state_lock);
write_unlock(&rx->call_lock);
+ rxrpc_notify_socket(call);
+ rxrpc_service_prealloc(rx, GFP_KERNEL);
_leave(" = %p{%d}", call, call->debug_id);
return call;
- /* if the call is already dying or dead, then we leave the socket's ref
- * on it to be released by rxrpc_dead_call_expired() as induced by
- * rxrpc_release_call() */
out_release:
_debug("release %p", call);
- if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
- rxrpc_queue_call(call);
-out_discard:
write_unlock_bh(&call->state_lock);
- _debug("discard %p", call);
-out:
write_unlock(&rx->call_lock);
+ rxrpc_release_call(rx, call);
+ rxrpc_put_call(call, rxrpc_call_put);
+ goto out;
+
+id_in_use:
+ ret = -EBADSLT;
+ write_unlock(&rx->call_lock);
+out:
+ rxrpc_service_prealloc(rx, GFP_KERNEL);
_leave(" = %d", ret);
return ERR_PTR(ret);
}
@@ -381,6 +525,7 @@ out:
int rxrpc_reject_call(struct rxrpc_sock *rx)
{
struct rxrpc_call *call;
+ bool abort = false;
int ret;
_enter("");
@@ -389,12 +534,16 @@ int rxrpc_reject_call(struct rxrpc_sock *rx)
write_lock(&rx->call_lock);
- ret = -ENODATA;
- if (list_empty(&rx->acceptq))
- goto out;
+ if (list_empty(&rx->to_be_accepted)) {
+ write_unlock(&rx->call_lock);
+ return -ENODATA;
+ }
- /* dequeue the first call and check it's still valid */
- call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link);
+ /* Dequeue the first call and check it's still valid. We gain
+ * responsibility for the queue's reference.
+ */
+ call = list_entry(rx->to_be_accepted.next,
+ struct rxrpc_call, accept_link);
list_del_init(&call->accept_link);
sk_acceptq_removed(&rx->sk);
rxrpc_see_call(call);
@@ -402,76 +551,56 @@ int rxrpc_reject_call(struct rxrpc_sock *rx)
write_lock_bh(&call->state_lock);
switch (call->state) {
case RXRPC_CALL_SERVER_ACCEPTING:
- __rxrpc_set_call_completion(call, RXRPC_CALL_SERVER_BUSY,
- 0, ECONNABORTED);
- if (test_and_set_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events))
- rxrpc_queue_call(call);
- ret = 0;
- goto out_release;
+ __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, ECONNABORTED);
+ abort = true;
+ /* fall through */
case RXRPC_CALL_COMPLETE:
ret = call->error;
- goto out_release;
- case RXRPC_CALL_DEAD:
- ret = -ETIME;
goto out_discard;
default:
BUG();
}
- /* if the call is already dying or dead, then we leave the socket's ref
- * on it to be released by rxrpc_dead_call_expired() as induced by
- * rxrpc_release_call() */
-out_release:
- _debug("release %p", call);
- if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
- rxrpc_queue_call(call);
out_discard:
write_unlock_bh(&call->state_lock);
- _debug("discard %p", call);
-out:
write_unlock(&rx->call_lock);
+ if (abort) {
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
+ rxrpc_release_call(rx, call);
+ rxrpc_put_call(call, rxrpc_call_put);
+ }
+ rxrpc_service_prealloc(rx, GFP_KERNEL);
_leave(" = %d", ret);
return ret;
}
-/**
- * rxrpc_kernel_accept_call - Allow a kernel service to accept an incoming call
- * @sock: The socket on which the impending call is waiting
- * @user_call_ID: The tag to attach to the call
- * @notify_rx: Where to send notifications instead of socket queue
+/*
+ * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
+ * @sock: The socket on which to preallocate
+ * @notify_rx: Event notification function for the call
+ * @user_attach_call: Func to attach call to user_call_ID
+ * @user_call_ID: The tag to attach to the preallocated call
+ * @gfp: The allocation conditions.
*
- * Allow a kernel service to accept an incoming call, assuming the incoming
- * call is still valid. The caller should immediately trigger their own
- * notification as there must be data waiting.
- */
-struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *sock,
- unsigned long user_call_ID,
- rxrpc_notify_rx_t notify_rx)
-{
- struct rxrpc_call *call;
-
- _enter(",%lx", user_call_ID);
- call = rxrpc_accept_call(rxrpc_sk(sock->sk), user_call_ID, notify_rx);
- _leave(" = %p", call);
- return call;
-}
-EXPORT_SYMBOL(rxrpc_kernel_accept_call);
-
-/**
- * rxrpc_kernel_reject_call - Allow a kernel service to reject an incoming call
- * @sock: The socket on which the impending call is waiting
+ * Charge up the socket with preallocated calls, each with a user ID. A
+ * function should be provided to effect the attachment from the user's side.
+ * The user is given a ref to hold on the call.
*
- * Allow a kernel service to reject an incoming call with a BUSY message,
- * assuming the incoming call is still valid.
+ * Note that the call may be come connected before this function returns.
*/
-int rxrpc_kernel_reject_call(struct socket *sock)
+int rxrpc_kernel_charge_accept(struct socket *sock,
+ rxrpc_notify_rx_t notify_rx,
+ rxrpc_user_attach_call_t user_attach_call,
+ unsigned long user_call_ID, gfp_t gfp)
{
- int ret;
+ struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
+ struct rxrpc_backlog *b = rx->backlog;
- _enter("");
- ret = rxrpc_reject_call(rxrpc_sk(sock->sk));
- _leave(" = %d", ret);
- return ret;
+ if (sock->sk->sk_state == RXRPC_CLOSE)
+ return -ESHUTDOWN;
+
+ return rxrpc_service_prealloc_one(rx, b, notify_rx,
+ user_attach_call, user_call_ID,
+ gfp);
}
-EXPORT_SYMBOL(rxrpc_kernel_reject_call);
+EXPORT_SYMBOL(rxrpc_kernel_charge_accept);
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 4754c7fb6242..0e8478012212 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -22,1270 +22,336 @@
#include "ar-internal.h"
/*
- * propose an ACK be sent
+ * Set the timer
*/
-void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
- u16 skew, u32 serial, bool immediate)
+void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why)
{
- unsigned long expiry;
- s8 prior = rxrpc_ack_priority[ack_reason];
+ unsigned long t, now = jiffies;
- ASSERTCMP(prior, >, 0);
+ read_lock_bh(&call->state_lock);
- _enter("{%d},%s,%%%x,%u",
- call->debug_id, rxrpc_acks(ack_reason), serial, immediate);
+ if (call->state < RXRPC_CALL_COMPLETE) {
+ t = call->expire_at;
+ if (time_before_eq(t, now))
+ goto out;
- if (prior < rxrpc_ack_priority[call->ackr_reason]) {
- if (immediate)
- goto cancel_timer;
- return;
- }
+ if (time_after(call->resend_at, now) &&
+ time_before(call->resend_at, t))
+ t = call->resend_at;
- /* update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
- * numbers */
- if (prior == rxrpc_ack_priority[call->ackr_reason]) {
- if (prior <= 4) {
- call->ackr_skew = skew;
- call->ackr_serial = serial;
- }
- if (immediate)
- goto cancel_timer;
- return;
- }
-
- call->ackr_reason = ack_reason;
- call->ackr_serial = serial;
+ if (time_after(call->ack_at, now) &&
+ time_before(call->ack_at, t))
+ t = call->ack_at;
- switch (ack_reason) {
- case RXRPC_ACK_DELAY:
- _debug("run delay timer");
- expiry = rxrpc_soft_ack_delay;
- goto run_timer;
-
- case RXRPC_ACK_IDLE:
- if (!immediate) {
- _debug("run defer timer");
- expiry = rxrpc_idle_ack_delay;
- goto run_timer;
- }
- goto cancel_timer;
-
- case RXRPC_ACK_REQUESTED:
- expiry = rxrpc_requested_ack_delay;
- if (!expiry)
- goto cancel_timer;
- if (!immediate || serial == 1) {
- _debug("run defer timer");
- goto run_timer;
+ if (call->timer.expires != t || !timer_pending(&call->timer)) {
+ mod_timer(&call->timer, t);
+ trace_rxrpc_timer(call, why, now);
}
-
- default:
- _debug("immediate ACK");
- goto cancel_timer;
}
-run_timer:
- expiry += jiffies;
- if (!timer_pending(&call->ack_timer) ||
- time_after(call->ack_timer.expires, expiry))
- mod_timer(&call->ack_timer, expiry);
- return;
-
-cancel_timer:
- _debug("cancel timer %%%u", serial);
- try_to_del_timer_sync(&call->ack_timer);
- read_lock_bh(&call->state_lock);
- if (call->state < RXRPC_CALL_COMPLETE &&
- !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
- rxrpc_queue_call(call);
+out:
read_unlock_bh(&call->state_lock);
}
/*
- * propose an ACK be sent, locking the call structure
+ * propose an ACK be sent
*/
-void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
- u16 skew, u32 serial, bool immediate)
+static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
+ u16 skew, u32 serial, bool immediate,
+ bool background,
+ enum rxrpc_propose_ack_trace why)
{
+ enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
+ unsigned long now, ack_at, expiry = rxrpc_soft_ack_delay;
s8 prior = rxrpc_ack_priority[ack_reason];
- if (prior > rxrpc_ack_priority[call->ackr_reason]) {
- spin_lock_bh(&call->lock);
- __rxrpc_propose_ACK(call, ack_reason, skew, serial, immediate);
- spin_unlock_bh(&call->lock);
- }
-}
-
-/*
- * set the resend timer
- */
-static void rxrpc_set_resend(struct rxrpc_call *call, u8 resend,
- unsigned long resend_at)
-{
- read_lock_bh(&call->state_lock);
- if (call->state == RXRPC_CALL_COMPLETE)
- resend = 0;
-
- if (resend & 1) {
- _debug("SET RESEND");
- set_bit(RXRPC_CALL_EV_RESEND, &call->events);
- }
-
- if (resend & 2) {
- _debug("MODIFY RESEND TIMER");
- set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
- mod_timer(&call->resend_timer, resend_at);
- } else {
- _debug("KILL RESEND TIMER");
- del_timer_sync(&call->resend_timer);
- clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
- clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
- }
- read_unlock_bh(&call->state_lock);
-}
-
-/*
- * resend packets
- */
-static void rxrpc_resend(struct rxrpc_call *call)
-{
- struct rxrpc_wire_header *whdr;
- struct rxrpc_skb_priv *sp;
- struct sk_buff *txb;
- unsigned long *p_txb, resend_at;
- bool stop;
- int loop;
- u8 resend;
-
- _enter("{%d,%d,%d,%d},",
- call->acks_hard, call->acks_unacked,
- atomic_read(&call->sequence),
- CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
-
- stop = false;
- resend = 0;
- resend_at = 0;
-
- for (loop = call->acks_tail;
- loop != call->acks_head || stop;
- loop = (loop + 1) & (call->acks_winsz - 1)
- ) {
- p_txb = call->acks_window + loop;
- smp_read_barrier_depends();
- if (*p_txb & 1)
- continue;
-
- txb = (struct sk_buff *) *p_txb;
- sp = rxrpc_skb(txb);
-
- if (sp->need_resend) {
- sp->need_resend = false;
-
- /* each Tx packet has a new serial number */
- sp->hdr.serial = atomic_inc_return(&call->conn->serial);
-
- whdr = (struct rxrpc_wire_header *)txb->head;
- whdr->serial = htonl(sp->hdr.serial);
-
- _proto("Tx DATA %%%u { #%d }",
- sp->hdr.serial, sp->hdr.seq);
- if (rxrpc_send_data_packet(call->conn, txb) < 0) {
- stop = true;
- sp->resend_at = jiffies + 3;
- } else {
- if (rxrpc_is_client_call(call))
- rxrpc_expose_client_call(call);
- sp->resend_at =
- jiffies + rxrpc_resend_timeout;
- }
- }
-
- if (time_after_eq(jiffies + 1, sp->resend_at)) {
- sp->need_resend = true;
- resend |= 1;
- } else if (resend & 2) {
- if (time_before(sp->resend_at, resend_at))
- resend_at = sp->resend_at;
- } else {
- resend_at = sp->resend_at;
- resend |= 2;
- }
- }
-
- rxrpc_set_resend(call, resend, resend_at);
- _leave("");
-}
-
-/*
- * handle resend timer expiry
- */
-static void rxrpc_resend_timer(struct rxrpc_call *call)
-{
- struct rxrpc_skb_priv *sp;
- struct sk_buff *txb;
- unsigned long *p_txb, resend_at;
- int loop;
- u8 resend;
-
- _enter("%d,%d,%d",
- call->acks_tail, call->acks_unacked, call->acks_head);
-
- if (call->state == RXRPC_CALL_COMPLETE)
- return;
-
- resend = 0;
- resend_at = 0;
-
- for (loop = call->acks_unacked;
- loop != call->acks_head;
- loop = (loop + 1) & (call->acks_winsz - 1)
- ) {
- p_txb = call->acks_window + loop;
- smp_read_barrier_depends();
- txb = (struct sk_buff *) (*p_txb & ~1);
- sp = rxrpc_skb(txb);
-
- ASSERT(!(*p_txb & 1));
-
- if (sp->need_resend) {
- ;
- } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
- sp->need_resend = true;
- resend |= 1;
- } else if (resend & 2) {
- if (time_before(sp->resend_at, resend_at))
- resend_at = sp->resend_at;
- } else {
- resend_at = sp->resend_at;
- resend |= 2;
- }
- }
-
- rxrpc_set_resend(call, resend, resend_at);
- _leave("");
-}
-
-/*
- * process soft ACKs of our transmitted packets
- * - these indicate packets the peer has or has not received, but hasn't yet
- * given to the consumer, and so can still be discarded and re-requested
- */
-static int rxrpc_process_soft_ACKs(struct rxrpc_call *call,
- struct rxrpc_ackpacket *ack,
- struct sk_buff *skb)
-{
- struct rxrpc_skb_priv *sp;
- struct sk_buff *txb;
- unsigned long *p_txb, resend_at;
- int loop;
- u8 sacks[RXRPC_MAXACKS], resend;
-
- _enter("{%d,%d},{%d},",
- call->acks_hard,
- CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz),
- ack->nAcks);
-
- if (skb_copy_bits(skb, 0, sacks, ack->nAcks) < 0)
- goto protocol_error;
-
- resend = 0;
- resend_at = 0;
- for (loop = 0; loop < ack->nAcks; loop++) {
- p_txb = call->acks_window;
- p_txb += (call->acks_tail + loop) & (call->acks_winsz - 1);
- smp_read_barrier_depends();
- txb = (struct sk_buff *) (*p_txb & ~1);
- sp = rxrpc_skb(txb);
-
- switch (sacks[loop]) {
- case RXRPC_ACK_TYPE_ACK:
- sp->need_resend = false;
- *p_txb |= 1;
- break;
- case RXRPC_ACK_TYPE_NACK:
- sp->need_resend = true;
- *p_txb &= ~1;
- resend = 1;
- break;
- default:
- _debug("Unsupported ACK type %d", sacks[loop]);
- goto protocol_error;
- }
- }
-
- smp_mb();
- call->acks_unacked = (call->acks_tail + loop) & (call->acks_winsz - 1);
-
- /* anything not explicitly ACK'd is implicitly NACK'd, but may just not
- * have been received or processed yet by the far end */
- for (loop = call->acks_unacked;
- loop != call->acks_head;
- loop = (loop + 1) & (call->acks_winsz - 1)
- ) {
- p_txb = call->acks_window + loop;
- smp_read_barrier_depends();
- txb = (struct sk_buff *) (*p_txb & ~1);
- sp = rxrpc_skb(txb);
-
- if (*p_txb & 1) {
- /* packet must have been discarded */
- sp->need_resend = true;
- *p_txb &= ~1;
- resend |= 1;
- } else if (sp->need_resend) {
- ;
- } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
- sp->need_resend = true;
- resend |= 1;
- } else if (resend & 2) {
- if (time_before(sp->resend_at, resend_at))
- resend_at = sp->resend_at;
- } else {
- resend_at = sp->resend_at;
- resend |= 2;
+ /* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
+ * numbers, but we don't alter the timeout.
+ */
+ _debug("prior %u %u vs %u %u",
+ ack_reason, prior,
+ call->ackr_reason, rxrpc_ack_priority[call->ackr_reason]);
+ if (ack_reason == call->ackr_reason) {
+ if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) {
+ outcome = rxrpc_propose_ack_update;
+ call->ackr_serial = serial;
+ call->ackr_skew = skew;
}
+ if (!immediate)
+ goto trace;
+ } else if (prior > rxrpc_ack_priority[call->ackr_reason]) {
+ call->ackr_reason = ack_reason;
+ call->ackr_serial = serial;
+ call->ackr_skew = skew;
+ } else {
+ outcome = rxrpc_propose_ack_subsume;
}
- rxrpc_set_resend(call, resend, resend_at);
- _leave(" = 0");
- return 0;
-
-protocol_error:
- _leave(" = -EPROTO");
- return -EPROTO;
-}
-
-/*
- * discard hard-ACK'd packets from the Tx window
- */
-static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
-{
- unsigned long _skb;
- int tail = call->acks_tail, old_tail;
- int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz);
+ switch (ack_reason) {
+ case RXRPC_ACK_REQUESTED:
+ if (rxrpc_requested_ack_delay < expiry)
+ expiry = rxrpc_requested_ack_delay;
+ if (serial == 1)
+ immediate = false;
+ break;
- _enter("{%u,%u},%u", call->acks_hard, win, hard);
+ case RXRPC_ACK_DELAY:
+ if (rxrpc_soft_ack_delay < expiry)
+ expiry = rxrpc_soft_ack_delay;
+ break;
- ASSERTCMP(hard - call->acks_hard, <=, win);
+ case RXRPC_ACK_PING:
+ case RXRPC_ACK_IDLE:
+ if (rxrpc_idle_ack_delay < expiry)
+ expiry = rxrpc_idle_ack_delay;
+ break;
- while (call->acks_hard < hard) {
- smp_read_barrier_depends();
- _skb = call->acks_window[tail] & ~1;
- rxrpc_free_skb((struct sk_buff *) _skb);
- old_tail = tail;
- tail = (tail + 1) & (call->acks_winsz - 1);
- call->acks_tail = tail;
- if (call->acks_unacked == old_tail)
- call->acks_unacked = tail;
- call->acks_hard++;
+ default:
+ immediate = true;
+ break;
}
- wake_up(&call->waitq);
-}
-
-/*
- * clear the Tx window in the event of a failure
- */
-static void rxrpc_clear_tx_window(struct rxrpc_call *call)
-{
- rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
-}
-
-/*
- * drain the out of sequence received packet queue into the packet Rx queue
- */
-static int rxrpc_drain_rx_oos_queue(struct rxrpc_call *call)
-{
- struct rxrpc_skb_priv *sp;
- struct sk_buff *skb;
- bool terminal;
- int ret;
-
- _enter("{%d,%d}", call->rx_data_post, call->rx_first_oos);
-
- spin_lock_bh(&call->lock);
-
- ret = -ECONNRESET;
- if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
- goto socket_unavailable;
-
- skb = skb_dequeue(&call->rx_oos_queue);
- if (skb) {
- rxrpc_see_skb(skb);
- sp = rxrpc_skb(skb);
-
- _debug("drain OOS packet %d [%d]",
- sp->hdr.seq, call->rx_first_oos);
-
- if (sp->hdr.seq != call->rx_first_oos) {
- skb_queue_head(&call->rx_oos_queue, skb);
- call->rx_first_oos = rxrpc_skb(skb)->hdr.seq;
- _debug("requeue %p {%u}", skb, call->rx_first_oos);
- } else {
- skb->mark = RXRPC_SKB_MARK_DATA;
- terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) &&
- !(sp->hdr.flags & RXRPC_CLIENT_INITIATED));
- ret = rxrpc_queue_rcv_skb(call, skb, true, terminal);
- BUG_ON(ret < 0);
- _debug("drain #%u", call->rx_data_post);
- call->rx_data_post++;
-
- /* find out what the next packet is */
- skb = skb_peek(&call->rx_oos_queue);
- rxrpc_see_skb(skb);
- if (skb)
- call->rx_first_oos = rxrpc_skb(skb)->hdr.seq;
- else
- call->rx_first_oos = 0;
- _debug("peek %p {%u}", skb, call->rx_first_oos);
+ now = jiffies;
+ if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
+ _debug("already scheduled");
+ } else if (immediate || expiry == 0) {
+ _debug("immediate ACK %lx", call->events);
+ if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events) &&
+ background)
+ rxrpc_queue_call(call);
+ } else {
+ ack_at = now + expiry;
+ _debug("deferred ACK %ld < %ld", expiry, call->ack_at - now);
+ if (time_before(ack_at, call->ack_at)) {
+ call->ack_at = ack_at;
+ rxrpc_set_timer(call, rxrpc_timer_set_for_ack);
}
}
- ret = 0;
-socket_unavailable:
- spin_unlock_bh(&call->lock);
- _leave(" = %d", ret);
- return ret;
+trace:
+ trace_rxrpc_propose_ack(call, why, ack_reason, serial, immediate,
+ background, outcome);
}
/*
- * insert an out of sequence packet into the buffer
+ * propose an ACK be sent, locking the call structure
*/
-static void rxrpc_insert_oos_packet(struct rxrpc_call *call,
- struct sk_buff *skb)
+void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
+ u16 skew, u32 serial, bool immediate, bool background,
+ enum rxrpc_propose_ack_trace why)
{
- struct rxrpc_skb_priv *sp, *psp;
- struct sk_buff *p;
- u32 seq;
-
- sp = rxrpc_skb(skb);
- seq = sp->hdr.seq;
- _enter(",,{%u}", seq);
-
- skb->destructor = rxrpc_packet_destructor;
- ASSERTCMP(sp->call, ==, NULL);
- sp->call = call;
- rxrpc_get_call_for_skb(call, skb);
-
- /* insert into the buffer in sequence order */
spin_lock_bh(&call->lock);
-
- skb_queue_walk(&call->rx_oos_queue, p) {
- psp = rxrpc_skb(p);
- if (psp->hdr.seq > seq) {
- _debug("insert oos #%u before #%u", seq, psp->hdr.seq);
- skb_insert(p, skb, &call->rx_oos_queue);
- goto inserted;
- }
- }
-
- _debug("append oos #%u", seq);
- skb_queue_tail(&call->rx_oos_queue, skb);
-inserted:
-
- /* we might now have a new front to the queue */
- if (call->rx_first_oos == 0 || seq < call->rx_first_oos)
- call->rx_first_oos = seq;
-
- read_lock(&call->state_lock);
- if (call->state < RXRPC_CALL_COMPLETE &&
- call->rx_data_post == call->rx_first_oos) {
- _debug("drain rx oos now");
- set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events);
- }
- read_unlock(&call->state_lock);
-
+ __rxrpc_propose_ACK(call, ack_reason, skew, serial,
+ immediate, background, why);
spin_unlock_bh(&call->lock);
- _leave(" [stored #%u]", call->rx_first_oos);
-}
-
-/*
- * clear the Tx window on final ACK reception
- */
-static void rxrpc_zap_tx_window(struct rxrpc_call *call)
-{
- struct rxrpc_skb_priv *sp;
- struct sk_buff *skb;
- unsigned long _skb, *acks_window;
- u8 winsz = call->acks_winsz;
- int tail;
-
- acks_window = call->acks_window;
- call->acks_window = NULL;
-
- while (CIRC_CNT(call->acks_head, call->acks_tail, winsz) > 0) {
- tail = call->acks_tail;
- smp_read_barrier_depends();
- _skb = acks_window[tail] & ~1;
- smp_mb();
- call->acks_tail = (call->acks_tail + 1) & (winsz - 1);
-
- skb = (struct sk_buff *) _skb;
- sp = rxrpc_skb(skb);
- _debug("+++ clear Tx %u", sp->hdr.seq);
- rxrpc_free_skb(skb);
- }
-
- kfree(acks_window);
}
/*
- * process the extra information that may be appended to an ACK packet
+ * Handle congestion being detected by the retransmit timeout.
*/
-static void rxrpc_extract_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
- unsigned int latest, int nAcks)
+static void rxrpc_congestion_timeout(struct rxrpc_call *call)
{
- struct rxrpc_ackinfo ackinfo;
- struct rxrpc_peer *peer;
- unsigned int mtu;
-
- if (skb_copy_bits(skb, nAcks + 3, &ackinfo, sizeof(ackinfo)) < 0) {
- _leave(" [no ackinfo]");
- return;
- }
-
- _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
- latest,
- ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU),
- ntohl(ackinfo.rwind), ntohl(ackinfo.jumbo_max));
-
- mtu = min(ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU));
-
- peer = call->peer;
- if (mtu < peer->maxdata) {
- spin_lock_bh(&peer->lock);
- peer->maxdata = mtu;
- peer->mtu = mtu + peer->hdrsize;
- spin_unlock_bh(&peer->lock);
- _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
- }
+ set_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags);
}
/*
- * process packets in the reception queue
+ * Perform retransmission of NAK'd and unack'd packets.
*/
-static int rxrpc_process_rx_queue(struct rxrpc_call *call,
- u32 *_abort_code)
+static void rxrpc_resend(struct rxrpc_call *call)
{
- struct rxrpc_ackpacket ack;
struct rxrpc_skb_priv *sp;
struct sk_buff *skb;
- bool post_ACK;
- int latest;
- u32 hard, tx;
-
- _enter("");
-
-process_further:
- skb = skb_dequeue(&call->rx_queue);
- if (!skb)
- return -EAGAIN;
-
- rxrpc_see_skb(skb);
- _net("deferred skb %p", skb);
-
- sp = rxrpc_skb(skb);
+ rxrpc_seq_t cursor, seq, top;
+ ktime_t now = ktime_get_real(), max_age, oldest, resend_at, ack_ts;
+ int ix;
+ u8 annotation, anno_type, retrans = 0, unacked = 0;
- _debug("process %s [st %d]", rxrpc_pkts[sp->hdr.type], call->state);
+ _enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
- post_ACK = false;
+ max_age = ktime_sub_ms(now, rxrpc_resend_timeout);
- switch (sp->hdr.type) {
- /* data packets that wind up here have been received out of
- * order, need security processing or are jumbo packets */
- case RXRPC_PACKET_TYPE_DATA:
- _proto("OOSQ DATA %%%u { #%u }", sp->hdr.serial, sp->hdr.seq);
+ spin_lock_bh(&call->lock);
- /* secured packets must be verified and possibly decrypted */
- if (call->conn->security->verify_packet(call, skb,
- _abort_code) < 0)
- goto protocol_error;
+ cursor = call->tx_hard_ack;
+ top = call->tx_top;
+ ASSERT(before_eq(cursor, top));
+ if (cursor == top)
+ goto out_unlock;
+
+ /* Scan the packet list without dropping the lock and decide which of
+ * the packets in the Tx buffer we're going to resend and what the new
+ * resend timeout will be.
+ */
+ oldest = now;
+ for (seq = cursor + 1; before_eq(seq, top); seq++) {
+ ix = seq & RXRPC_RXTX_BUFF_MASK;
+ annotation = call->rxtx_annotations[ix];
+ anno_type = annotation & RXRPC_TX_ANNO_MASK;
+ annotation &= ~RXRPC_TX_ANNO_MASK;
+ if (anno_type == RXRPC_TX_ANNO_ACK)
+ continue;
- rxrpc_insert_oos_packet(call, skb);
- goto process_further;
+ skb = call->rxtx_buffer[ix];
+ rxrpc_see_skb(skb, rxrpc_skb_tx_seen);
+ sp = rxrpc_skb(skb);
- /* partial ACK to process */
- case RXRPC_PACKET_TYPE_ACK:
- if (skb_copy_bits(skb, 0, &ack, sizeof(ack)) < 0) {
- _debug("extraction failure");
- goto protocol_error;
+ if (anno_type == RXRPC_TX_ANNO_UNACK) {
+ if (ktime_after(skb->tstamp, max_age)) {
+ if (ktime_before(skb->tstamp, oldest))
+ oldest = skb->tstamp;
+ continue;
+ }
+ if (!(annotation & RXRPC_TX_ANNO_RESENT))
+ unacked++;
}
- if (!skb_pull(skb, sizeof(ack)))
- BUG();
-
- latest = sp->hdr.serial;
- hard = ntohl(ack.firstPacket);
- tx = atomic_read(&call->sequence);
- _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
- latest,
- ntohs(ack.maxSkew),
- hard,
- ntohl(ack.previousPacket),
- ntohl(ack.serial),
- rxrpc_acks(ack.reason),
- ack.nAcks);
-
- rxrpc_extract_ackinfo(call, skb, latest, ack.nAcks);
+ /* Okay, we need to retransmit a packet. */
+ call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS | annotation;
+ retrans++;
+ trace_rxrpc_retransmit(call, seq, annotation | anno_type,
+ ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
+ }
+
+ resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);
+ call->resend_at = jiffies +
+ nsecs_to_jiffies(ktime_to_ns(ktime_sub(resend_at, now))) +
+ 1; /* We have to make sure that the calculated jiffies value
+ * falls at or after the nsec value, or we shall loop
+ * ceaselessly because the timer times out, but we haven't
+ * reached the nsec timeout yet.
+ */
+
+ if (unacked)
+ rxrpc_congestion_timeout(call);
+
+ /* If there was nothing that needed retransmission then it's likely
+ * that an ACK got lost somewhere. Send a ping to find out instead of
+ * retransmitting data.
+ */
+ if (!retrans) {
+ rxrpc_set_timer(call, rxrpc_timer_set_for_resend);
+ spin_unlock_bh(&call->lock);
+ ack_ts = ktime_sub(now, call->acks_latest_ts);
+ if (ktime_to_ns(ack_ts) < call->peer->rtt)
+ goto out;
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
+ rxrpc_propose_ack_ping_for_lost_ack);
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
+ goto out;
+ }
+
+ /* Now go through the Tx window and perform the retransmissions. We
+ * have to drop the lock for each send. If an ACK comes in whilst the
+ * lock is dropped, it may clear some of the retransmission markers for
+ * packets that it soft-ACKs.
+ */
+ for (seq = cursor + 1; before_eq(seq, top); seq++) {
+ ix = seq & RXRPC_RXTX_BUFF_MASK;
+ annotation = call->rxtx_annotations[ix];
+ anno_type = annotation & RXRPC_TX_ANNO_MASK;
+ if (anno_type != RXRPC_TX_ANNO_RETRANS)
+ continue;
- if (ack.reason == RXRPC_ACK_PING) {
- _proto("Rx ACK %%%u PING Request", latest);
- rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
- skb->priority, sp->hdr.serial, true);
- }
+ skb = call->rxtx_buffer[ix];
+ rxrpc_get_skb(skb, rxrpc_skb_tx_got);
+ spin_unlock_bh(&call->lock);
- /* discard any out-of-order or duplicate ACKs */
- if (latest - call->acks_latest <= 0) {
- _debug("discard ACK %d <= %d",
- latest, call->acks_latest);
- goto discard;
+ if (rxrpc_send_data_packet(call, skb) < 0) {
+ rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
+ return;
}
- call->acks_latest = latest;
- if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
- call->state != RXRPC_CALL_CLIENT_AWAIT_REPLY &&
- call->state != RXRPC_CALL_SERVER_SEND_REPLY &&
- call->state != RXRPC_CALL_SERVER_AWAIT_ACK)
- goto discard;
+ if (rxrpc_is_client_call(call))
+ rxrpc_expose_client_call(call);
- _debug("Tx=%d H=%u S=%d", tx, call->acks_hard, call->state);
-
- if (hard > 0) {
- if (hard - 1 > tx) {
- _debug("hard-ACK'd packet %d not transmitted"
- " (%d top)",
- hard - 1, tx);
- goto protocol_error;
- }
-
- if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY ||
- call->state == RXRPC_CALL_SERVER_AWAIT_ACK) &&
- hard > tx) {
- call->acks_hard = tx;
- goto all_acked;
- }
-
- smp_rmb();
- rxrpc_rotate_tx_window(call, hard - 1);
- }
+ rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
+ spin_lock_bh(&call->lock);
- if (ack.nAcks > 0) {
- if (hard - 1 + ack.nAcks > tx) {
- _debug("soft-ACK'd packet %d+%d not"
- " transmitted (%d top)",
- hard - 1, ack.nAcks, tx);
- goto protocol_error;
+ /* We need to clear the retransmit state, but there are two
+ * things we need to be aware of: A new ACK/NAK might have been
+ * received and the packet might have been hard-ACK'd (in which
+ * case it will no longer be in the buffer).
+ */
+ if (after(seq, call->tx_hard_ack)) {
+ annotation = call->rxtx_annotations[ix];
+ anno_type = annotation & RXRPC_TX_ANNO_MASK;
+ if (anno_type == RXRPC_TX_ANNO_RETRANS ||
+ anno_type == RXRPC_TX_ANNO_NAK) {
+ annotation &= ~RXRPC_TX_ANNO_MASK;
+ annotation |= RXRPC_TX_ANNO_UNACK;
}
-
- if (rxrpc_process_soft_ACKs(call, &ack, skb) < 0)
- goto protocol_error;
+ annotation |= RXRPC_TX_ANNO_RESENT;
+ call->rxtx_annotations[ix] = annotation;
}
- goto discard;
-
- /* complete ACK to process */
- case RXRPC_PACKET_TYPE_ACKALL:
- goto all_acked;
-
- /* abort and busy are handled elsewhere */
- case RXRPC_PACKET_TYPE_BUSY:
- case RXRPC_PACKET_TYPE_ABORT:
- BUG();
-
- /* connection level events - also handled elsewhere */
- case RXRPC_PACKET_TYPE_CHALLENGE:
- case RXRPC_PACKET_TYPE_RESPONSE:
- case RXRPC_PACKET_TYPE_DEBUG:
- BUG();
- }
-
- /* if we've had a hard ACK that covers all the packets we've sent, then
- * that ends that phase of the operation */
-all_acked:
- write_lock_bh(&call->state_lock);
- _debug("ack all %d", call->state);
-
- switch (call->state) {
- case RXRPC_CALL_CLIENT_AWAIT_REPLY:
- call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
- break;
- case RXRPC_CALL_SERVER_AWAIT_ACK:
- _debug("srv complete");
- __rxrpc_call_completed(call);
- post_ACK = true;
- break;
- case RXRPC_CALL_CLIENT_SEND_REQUEST:
- case RXRPC_CALL_SERVER_RECV_REQUEST:
- goto protocol_error_unlock; /* can't occur yet */
- default:
- write_unlock_bh(&call->state_lock);
- goto discard; /* assume packet left over from earlier phase */
- }
-
- write_unlock_bh(&call->state_lock);
-
- /* if all the packets we sent are hard-ACK'd, then we can discard
- * whatever we've got left */
- _debug("clear Tx %d",
- CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
- del_timer_sync(&call->resend_timer);
- clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
- clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
-
- if (call->acks_window)
- rxrpc_zap_tx_window(call);
-
- if (post_ACK) {
- /* post the final ACK message for userspace to pick up */
- _debug("post ACK");
- skb->mark = RXRPC_SKB_MARK_FINAL_ACK;
- sp->call = call;
- rxrpc_get_call_for_skb(call, skb);
- spin_lock_bh(&call->lock);
- if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0)
- BUG();
- spin_unlock_bh(&call->lock);
- goto process_further;
+ if (after(call->tx_hard_ack, seq))
+ seq = call->tx_hard_ack;
}
-discard:
- rxrpc_free_skb(skb);
- goto process_further;
-
-protocol_error_unlock:
- write_unlock_bh(&call->state_lock);
-protocol_error:
- rxrpc_free_skb(skb);
- _leave(" = -EPROTO");
- return -EPROTO;
-}
-
-/*
- * post a message to the socket Rx queue for recvmsg() to pick up
- */
-static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error,
- bool fatal)
-{
- struct rxrpc_skb_priv *sp;
- struct sk_buff *skb;
- int ret;
-
- _enter("{%d,%lx},%u,%u,%d",
- call->debug_id, call->flags, mark, error, fatal);
-
- /* remove timers and things for fatal messages */
- if (fatal) {
- del_timer_sync(&call->resend_timer);
- del_timer_sync(&call->ack_timer);
- clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
- }
-
- if (mark != RXRPC_SKB_MARK_NEW_CALL &&
- !test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
- _leave("[no userid]");
- return 0;
- }
-
- if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
- skb = alloc_skb(0, GFP_NOFS);
- if (!skb)
- return -ENOMEM;
-
- rxrpc_new_skb(skb);
-
- skb->mark = mark;
-
- sp = rxrpc_skb(skb);
- memset(sp, 0, sizeof(*sp));
- sp->error = error;
- sp->call = call;
- rxrpc_get_call_for_skb(call, skb);
-
- spin_lock_bh(&call->lock);
- ret = rxrpc_queue_rcv_skb(call, skb, true, fatal);
- spin_unlock_bh(&call->lock);
- BUG_ON(ret < 0);
- }
-
- return 0;
+out_unlock:
+ spin_unlock_bh(&call->lock);
+out:
+ _leave("");
}
/*
- * handle background processing of incoming call packets and ACK / abort
- * generation
+ * Handle retransmission and deferred ACK/abort generation.
*/
void rxrpc_process_call(struct work_struct *work)
{
struct rxrpc_call *call =
container_of(work, struct rxrpc_call, processor);
- struct rxrpc_wire_header whdr;
- struct rxrpc_ackpacket ack;
- struct rxrpc_ackinfo ackinfo;
- struct msghdr msg;
- struct kvec iov[5];
- enum rxrpc_call_event genbit;
- unsigned long bits;
- __be32 data, pad;
- size_t len;
- int loop, nbit, ioc, ret, mtu;
- u32 serial, abort_code = RX_PROTOCOL_ERROR;
- u8 *acks = NULL;
+ unsigned long now;
rxrpc_see_call(call);
//printk("\n--------------------\n");
- _enter("{%d,%s,%lx} [%lu]",
- call->debug_id, rxrpc_call_states[call->state], call->events,
- (jiffies - call->creation_jif) / (HZ / 10));
-
- if (!call->conn)
- goto skip_msg_init;
-
- /* there's a good chance we're going to have to send a message, so set
- * one up in advance */
- msg.msg_name = &call->peer->srx.transport;
- msg.msg_namelen = call->peer->srx.transport_len;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = 0;
+ _enter("{%d,%s,%lx}",
+ call->debug_id, rxrpc_call_states[call->state], call->events);
- whdr.epoch = htonl(call->conn->proto.epoch);
- whdr.cid = htonl(call->cid);
- whdr.callNumber = htonl(call->call_id);
- whdr.seq = 0;
- whdr.type = RXRPC_PACKET_TYPE_ACK;
- whdr.flags = call->conn->out_clientflag;
- whdr.userStatus = 0;
- whdr.securityIndex = call->conn->security_ix;
- whdr._rsvd = 0;
- whdr.serviceId = htons(call->service_id);
-
- memset(iov, 0, sizeof(iov));
- iov[0].iov_base = &whdr;
- iov[0].iov_len = sizeof(whdr);
-skip_msg_init:
-
- /* deal with events of a final nature */
- if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) {
- enum rxrpc_skb_mark mark;
-
- clear_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events);
- clear_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events);
- clear_bit(RXRPC_CALL_EV_ABORT, &call->events);
-
- if (call->completion == RXRPC_CALL_NETWORK_ERROR) {
- mark = RXRPC_SKB_MARK_NET_ERROR;
- _debug("post net error %d", call->error);
- } else {
- mark = RXRPC_SKB_MARK_LOCAL_ERROR;
- _debug("post net local error %d", call->error);
- }
-
- if (rxrpc_post_message(call, mark, call->error, true) < 0)
- goto no_mem;
- clear_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events);
- goto kill_ACKs;
+recheck_state:
+ if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
+ goto recheck_state;
}
- if (test_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events)) {
- ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
-
- clear_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events);
- clear_bit(RXRPC_CALL_EV_ABORT, &call->events);
-
- _debug("post conn abort");
-
- if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
- call->error, true) < 0)
- goto no_mem;
- clear_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events);
- goto kill_ACKs;
+ if (call->state == RXRPC_CALL_COMPLETE) {
+ del_timer_sync(&call->timer);
+ goto out_put;
}
- if (test_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events)) {
- whdr.type = RXRPC_PACKET_TYPE_BUSY;
- genbit = RXRPC_CALL_EV_REJECT_BUSY;
- goto send_message;
+ now = jiffies;
+ if (time_after_eq(now, call->expire_at)) {
+ rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, ETIME);
+ set_bit(RXRPC_CALL_EV_ABORT, &call->events);
+ goto recheck_state;
}
- if (test_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
- ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
-
- if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
- call->error, true) < 0)
- goto no_mem;
- whdr.type = RXRPC_PACKET_TYPE_ABORT;
- data = htonl(call->abort_code);
- iov[1].iov_base = &data;
- iov[1].iov_len = sizeof(data);
- genbit = RXRPC_CALL_EV_ABORT;
- goto send_message;
- }
-
- if (test_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events)) {
- genbit = RXRPC_CALL_EV_ACK_FINAL;
-
- ack.bufferSpace = htons(8);
- ack.maxSkew = 0;
- ack.serial = 0;
- ack.reason = RXRPC_ACK_IDLE;
- ack.nAcks = 0;
- call->ackr_reason = 0;
-
- spin_lock_bh(&call->lock);
- ack.serial = htonl(call->ackr_serial);
- ack.previousPacket = htonl(call->ackr_prev_seq);
- ack.firstPacket = htonl(call->rx_data_eaten + 1);
- spin_unlock_bh(&call->lock);
-
- pad = 0;
-
- iov[1].iov_base = &ack;
- iov[1].iov_len = sizeof(ack);
- iov[2].iov_base = &pad;
- iov[2].iov_len = 3;
- iov[3].iov_base = &ackinfo;
- iov[3].iov_len = sizeof(ackinfo);
- goto send_ACK;
- }
-
- if (call->events & ((1 << RXRPC_CALL_EV_RCVD_BUSY) |
- (1 << RXRPC_CALL_EV_RCVD_ABORT))
- ) {
- u32 mark;
-
- if (test_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events))
- mark = RXRPC_SKB_MARK_REMOTE_ABORT;
- else
- mark = RXRPC_SKB_MARK_BUSY;
-
- _debug("post abort/busy");
- rxrpc_clear_tx_window(call);
- if (rxrpc_post_message(call, mark, ECONNABORTED, true) < 0)
- goto no_mem;
-
- clear_bit(RXRPC_CALL_EV_RCVD_BUSY, &call->events);
- clear_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
- goto kill_ACKs;
- }
-
- if (test_and_clear_bit(RXRPC_CALL_EV_RCVD_ACKALL, &call->events)) {
- _debug("do implicit ackall");
- rxrpc_clear_tx_window(call);
- }
-
- if (test_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events)) {
- rxrpc_abort_call(call, RX_CALL_TIMEOUT, ETIME);
-
- _debug("post timeout");
- if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
- ETIME, true) < 0)
- goto no_mem;
-
- clear_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
- goto kill_ACKs;
- }
-
- /* deal with assorted inbound messages */
- if (!skb_queue_empty(&call->rx_queue)) {
- ret = rxrpc_process_rx_queue(call, &abort_code);
- switch (ret) {
- case 0:
- case -EAGAIN:
- break;
- case -ENOMEM:
- goto no_mem;
- case -EKEYEXPIRED:
- case -EKEYREJECTED:
- case -EPROTO:
- rxrpc_abort_call(call, abort_code, -ret);
- goto kill_ACKs;
+ if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) ||
+ time_after_eq(now, call->ack_at)) {
+ call->ack_at = call->expire_at;
+ if (call->ackr_reason) {
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
+ goto recheck_state;
}
}
- /* handle resending */
- if (test_and_clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
- rxrpc_resend_timer(call);
- if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events))
+ if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events) ||
+ time_after_eq(now, call->resend_at)) {
rxrpc_resend(call);
-
- /* consider sending an ordinary ACK */
- if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
- _debug("send ACK: window: %d - %d { %lx }",
- call->rx_data_eaten, call->ackr_win_top,
- call->ackr_window[0]);
-
- if (call->state > RXRPC_CALL_SERVER_ACK_REQUEST &&
- call->ackr_reason != RXRPC_ACK_PING_RESPONSE) {
- /* ACK by sending reply DATA packet in this state */
- clear_bit(RXRPC_CALL_EV_ACK, &call->events);
- goto maybe_reschedule;
- }
-
- genbit = RXRPC_CALL_EV_ACK;
-
- acks = kzalloc(call->ackr_win_top - call->rx_data_eaten,
- GFP_NOFS);
- if (!acks)
- goto no_mem;
-
- //hdr.flags = RXRPC_SLOW_START_OK;
- ack.bufferSpace = htons(8);
- ack.maxSkew = 0;
-
- spin_lock_bh(&call->lock);
- ack.reason = call->ackr_reason;
- ack.serial = htonl(call->ackr_serial);
- ack.previousPacket = htonl(call->ackr_prev_seq);
- ack.firstPacket = htonl(call->rx_data_eaten + 1);
-
- ack.nAcks = 0;
- for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) {
- nbit = loop * BITS_PER_LONG;
- for (bits = call->ackr_window[loop]; bits; bits >>= 1
- ) {
- _debug("- l=%d n=%d b=%lx", loop, nbit, bits);
- if (bits & 1) {
- acks[nbit] = RXRPC_ACK_TYPE_ACK;
- ack.nAcks = nbit + 1;
- }
- nbit++;
- }
- }
- call->ackr_reason = 0;
- spin_unlock_bh(&call->lock);
-
- pad = 0;
-
- iov[1].iov_base = &ack;
- iov[1].iov_len = sizeof(ack);
- iov[2].iov_base = acks;
- iov[2].iov_len = ack.nAcks;
- iov[3].iov_base = &pad;
- iov[3].iov_len = 3;
- iov[4].iov_base = &ackinfo;
- iov[4].iov_len = sizeof(ackinfo);
-
- switch (ack.reason) {
- case RXRPC_ACK_REQUESTED:
- case RXRPC_ACK_DUPLICATE:
- case RXRPC_ACK_OUT_OF_SEQUENCE:
- case RXRPC_ACK_EXCEEDS_WINDOW:
- case RXRPC_ACK_NOSPACE:
- case RXRPC_ACK_PING:
- case RXRPC_ACK_PING_RESPONSE:
- goto send_ACK_with_skew;
- case RXRPC_ACK_DELAY:
- case RXRPC_ACK_IDLE:
- goto send_ACK;
- }
+ goto recheck_state;
}
- /* handle completion of security negotiations on an incoming
- * connection */
- if (test_and_clear_bit(RXRPC_CALL_EV_SECURED, &call->events)) {
- _debug("secured");
- spin_lock_bh(&call->lock);
-
- if (call->state == RXRPC_CALL_SERVER_SECURING) {
- _debug("securing");
- write_lock(&call->socket->call_lock);
- if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
- _debug("not released");
- call->state = RXRPC_CALL_SERVER_ACCEPTING;
- list_move_tail(&call->accept_link,
- &call->socket->acceptq);
- }
- write_unlock(&call->socket->call_lock);
- read_lock(&call->state_lock);
- if (call->state < RXRPC_CALL_COMPLETE)
- set_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events);
- read_unlock(&call->state_lock);
- }
-
- spin_unlock_bh(&call->lock);
- if (!test_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events))
- goto maybe_reschedule;
- }
-
- /* post a notification of an acceptable connection to the app */
- if (test_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events)) {
- _debug("post accept");
- if (rxrpc_post_message(call, RXRPC_SKB_MARK_NEW_CALL,
- 0, false) < 0)
- goto no_mem;
- clear_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events);
- goto maybe_reschedule;
- }
-
- /* handle incoming call acceptance */
- if (test_and_clear_bit(RXRPC_CALL_EV_ACCEPTED, &call->events)) {
- _debug("accepted");
- ASSERTCMP(call->rx_data_post, ==, 0);
- call->rx_data_post = 1;
- read_lock_bh(&call->state_lock);
- if (call->state < RXRPC_CALL_COMPLETE)
- set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events);
- read_unlock_bh(&call->state_lock);
- }
-
- /* drain the out of sequence received packet queue into the packet Rx
- * queue */
- if (test_and_clear_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events)) {
- while (call->rx_data_post == call->rx_first_oos)
- if (rxrpc_drain_rx_oos_queue(call) < 0)
- break;
- goto maybe_reschedule;
- }
-
- if (test_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
- rxrpc_release_call(call);
- clear_bit(RXRPC_CALL_EV_RELEASE, &call->events);
- }
+ rxrpc_set_timer(call, rxrpc_timer_set_for_resend);
/* other events may have been raised since we started checking */
- goto maybe_reschedule;
-
-send_ACK_with_skew:
- ack.maxSkew = htons(call->ackr_skew);
-send_ACK:
- mtu = call->peer->if_mtu;
- mtu -= call->peer->hdrsize;
- ackinfo.maxMTU = htonl(mtu);
- ackinfo.rwind = htonl(rxrpc_rx_window_size);
-
- /* permit the peer to send us jumbo packets if it wants to */
- ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
- ackinfo.jumbo_max = htonl(rxrpc_rx_jumbo_max);
-
- serial = atomic_inc_return(&call->conn->serial);
- whdr.serial = htonl(serial);
- _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
- serial,
- ntohs(ack.maxSkew),
- ntohl(ack.firstPacket),
- ntohl(ack.previousPacket),
- ntohl(ack.serial),
- rxrpc_acks(ack.reason),
- ack.nAcks);
-
- del_timer_sync(&call->ack_timer);
- if (ack.nAcks > 0)
- set_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags);
- goto send_message_2;
-
-send_message:
- _debug("send message");
-
- serial = atomic_inc_return(&call->conn->serial);
- whdr.serial = htonl(serial);
- _proto("Tx %s %%%u", rxrpc_pkts[whdr.type], serial);
-send_message_2:
-
- len = iov[0].iov_len;
- ioc = 1;
- if (iov[4].iov_len) {
- ioc = 5;
- len += iov[4].iov_len;
- len += iov[3].iov_len;
- len += iov[2].iov_len;
- len += iov[1].iov_len;
- } else if (iov[3].iov_len) {
- ioc = 4;
- len += iov[3].iov_len;
- len += iov[2].iov_len;
- len += iov[1].iov_len;
- } else if (iov[2].iov_len) {
- ioc = 3;
- len += iov[2].iov_len;
- len += iov[1].iov_len;
- } else if (iov[1].iov_len) {
- ioc = 2;
- len += iov[1].iov_len;
- }
-
- ret = kernel_sendmsg(call->conn->params.local->socket,
- &msg, iov, ioc, len);
- if (ret < 0) {
- _debug("sendmsg failed: %d", ret);
- read_lock_bh(&call->state_lock);
- if (call->state < RXRPC_CALL_DEAD)
- rxrpc_queue_call(call);
- read_unlock_bh(&call->state_lock);
- goto error;
- }
-
- switch (genbit) {
- case RXRPC_CALL_EV_ABORT:
- clear_bit(genbit, &call->events);
- clear_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
- goto kill_ACKs;
-
- case RXRPC_CALL_EV_ACK_FINAL:
- rxrpc_call_completed(call);
- goto kill_ACKs;
-
- default:
- clear_bit(genbit, &call->events);
- switch (call->state) {
- case RXRPC_CALL_CLIENT_AWAIT_REPLY:
- case RXRPC_CALL_CLIENT_RECV_REPLY:
- case RXRPC_CALL_SERVER_RECV_REQUEST:
- case RXRPC_CALL_SERVER_ACK_REQUEST:
- _debug("start ACK timer");
- rxrpc_propose_ACK(call, RXRPC_ACK_DELAY,
- call->ackr_skew, call->ackr_serial,
- false);
- default:
- break;
- }
- goto maybe_reschedule;
- }
-
-kill_ACKs:
- del_timer_sync(&call->ack_timer);
- if (test_and_clear_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events))
- rxrpc_put_call(call);
- clear_bit(RXRPC_CALL_EV_ACK, &call->events);
-
-maybe_reschedule:
- if (call->events || !skb_queue_empty(&call->rx_queue)) {
- read_lock_bh(&call->state_lock);
- if (call->state < RXRPC_CALL_DEAD)
- rxrpc_queue_call(call);
- read_unlock_bh(&call->state_lock);
- }
-
- /* don't leave aborted connections on the accept queue */
- if (call->state >= RXRPC_CALL_COMPLETE &&
- !list_empty(&call->accept_link)) {
- _debug("X unlinking once-pending call %p { e=%lx f=%lx c=%x }",
- call, call->events, call->flags, call->conn->proto.cid);
-
- read_lock_bh(&call->state_lock);
- if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
- rxrpc_queue_call(call);
- read_unlock_bh(&call->state_lock);
- }
-
-error:
- kfree(acks);
-
- /* because we don't want two CPUs both processing the work item for one
- * call at the same time, we use a flag to note when it's busy; however
- * this means there's a race between clearing the flag and setting the
- * work pending bit and the work item being processed again */
- if (call->events && !work_pending(&call->processor)) {
- _debug("jumpstart %x", call->conn->proto.cid);
- rxrpc_queue_call(call);
+ if (call->events && call->state < RXRPC_CALL_COMPLETE) {
+ __rxrpc_queue_call(call);
+ goto out;
}
+out_put:
+ rxrpc_put_call(call, rxrpc_call_put);
+out:
_leave("");
- return;
-
-no_mem:
- _debug("out of memory");
- goto maybe_reschedule;
}
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 65691742199b..d4b3293b78fa 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -24,18 +24,13 @@
*/
unsigned int rxrpc_max_call_lifetime = 60 * HZ;
-/*
- * Time till dead call expires after last use (in jiffies).
- */
-unsigned int rxrpc_dead_call_expiry = 2 * HZ;
-
const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
[RXRPC_CALL_UNINITIALISED] = "Uninit ",
[RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
[RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
[RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
[RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
- [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK",
+ [RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc",
[RXRPC_CALL_SERVER_SECURING] = "SvSecure",
[RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
[RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
@@ -43,27 +38,49 @@ const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
[RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
[RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
[RXRPC_CALL_COMPLETE] = "Complete",
- [RXRPC_CALL_DEAD] = "Dead ",
};
const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
[RXRPC_CALL_SUCCEEDED] = "Complete",
- [RXRPC_CALL_SERVER_BUSY] = "SvBusy ",
[RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
[RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
[RXRPC_CALL_LOCAL_ERROR] = "LocError",
[RXRPC_CALL_NETWORK_ERROR] = "NetError",
};
+const char rxrpc_call_traces[rxrpc_call__nr_trace][4] = {
+ [rxrpc_call_new_client] = "NWc",
+ [rxrpc_call_new_service] = "NWs",
+ [rxrpc_call_queued] = "QUE",
+ [rxrpc_call_queued_ref] = "QUR",
+ [rxrpc_call_connected] = "CON",
+ [rxrpc_call_release] = "RLS",
+ [rxrpc_call_seen] = "SEE",
+ [rxrpc_call_got] = "GOT",
+ [rxrpc_call_got_userid] = "Gus",
+ [rxrpc_call_got_kernel] = "Gke",
+ [rxrpc_call_put] = "PUT",
+ [rxrpc_call_put_userid] = "Pus",
+ [rxrpc_call_put_kernel] = "Pke",
+ [rxrpc_call_put_noqueue] = "PNQ",
+ [rxrpc_call_error] = "*E*",
+};
+
struct kmem_cache *rxrpc_call_jar;
LIST_HEAD(rxrpc_calls);
DEFINE_RWLOCK(rxrpc_call_lock);
-static void rxrpc_destroy_call(struct work_struct *work);
-static void rxrpc_call_life_expired(unsigned long _call);
-static void rxrpc_dead_call_expired(unsigned long _call);
-static void rxrpc_ack_time_expired(unsigned long _call);
-static void rxrpc_resend_time_expired(unsigned long _call);
+static void rxrpc_call_timer_expired(unsigned long _call)
+{
+ struct rxrpc_call *call = (struct rxrpc_call *)_call;
+
+ _enter("%d", call->debug_id);
+
+ if (call->state < RXRPC_CALL_COMPLETE) {
+ trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
+ rxrpc_queue_call(call);
+ }
+}
/*
* find an extant server call
@@ -96,7 +113,7 @@ struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
return NULL;
found_extant_call:
- rxrpc_get_call(call);
+ rxrpc_get_call(call, rxrpc_call_got);
read_unlock(&rx->call_lock);
_leave(" = %p [%d]", call, atomic_read(&call->usage));
return call;
@@ -105,7 +122,7 @@ found_extant_call:
/*
* allocate a new call
*/
-static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
+struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
{
struct rxrpc_call *call;
@@ -113,30 +130,24 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
if (!call)
return NULL;
- call->acks_winsz = 16;
- call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
+ call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE,
+ sizeof(struct sk_buff *),
gfp);
- if (!call->acks_window) {
- kmem_cache_free(rxrpc_call_jar, call);
- return NULL;
- }
+ if (!call->rxtx_buffer)
+ goto nomem;
+
+ call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp);
+ if (!call->rxtx_annotations)
+ goto nomem_2;
- setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
- (unsigned long) call);
- setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
- (unsigned long) call);
- setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
- (unsigned long) call);
- setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
- (unsigned long) call);
- INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
+ setup_timer(&call->timer, rxrpc_call_timer_expired,
+ (unsigned long)call);
INIT_WORK(&call->processor, &rxrpc_process_call);
INIT_LIST_HEAD(&call->link);
INIT_LIST_HEAD(&call->chan_wait_link);
INIT_LIST_HEAD(&call->accept_link);
- skb_queue_head_init(&call->rx_queue);
- skb_queue_head_init(&call->rx_oos_queue);
- skb_queue_head_init(&call->knlrecv_queue);
+ INIT_LIST_HEAD(&call->recvmsg_link);
+ INIT_LIST_HEAD(&call->sock_link);
init_waitqueue_head(&call->waitq);
spin_lock_init(&call->lock);
rwlock_init(&call->state_lock);
@@ -145,65 +156,65 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
memset(&call->sock_node, 0xed, sizeof(call->sock_node));
- call->rx_data_expect = 1;
- call->rx_data_eaten = 0;
- call->rx_first_oos = 0;
- call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
- call->creation_jif = jiffies;
+ /* Leave space in the ring to handle a maxed-out jumbo packet */
+ call->rx_winsize = rxrpc_rx_window_size;
+ call->tx_winsize = 16;
+ call->rx_expect_next = 1;
+
+ if (RXRPC_TX_SMSS > 2190)
+ call->cong_cwnd = 2;
+ else if (RXRPC_TX_SMSS > 1095)
+ call->cong_cwnd = 3;
+ else
+ call->cong_cwnd = 4;
+ call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
return call;
+
+nomem_2:
+ kfree(call->rxtx_buffer);
+nomem:
+ kmem_cache_free(rxrpc_call_jar, call);
+ return NULL;
}
/*
* Allocate a new client call.
*/
-static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
- struct sockaddr_rxrpc *srx,
+static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
gfp_t gfp)
{
struct rxrpc_call *call;
+ ktime_t now;
_enter("");
- ASSERT(rx->local != NULL);
-
call = rxrpc_alloc_call(gfp);
if (!call)
return ERR_PTR(-ENOMEM);
call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
-
- sock_hold(&rx->sk);
- call->socket = rx;
- call->rx_data_post = 1;
call->service_id = srx->srx_service;
+ call->tx_phase = true;
+ now = ktime_get_real();
+ call->acks_latest_ts = now;
+ call->cong_tstamp = now;
_leave(" = %p", call);
return call;
}
/*
- * Begin client call.
+ * Initiate the call ack/resend/expiry timer.
*/
-static int rxrpc_begin_client_call(struct rxrpc_call *call,
- struct rxrpc_conn_parameters *cp,
- struct sockaddr_rxrpc *srx,
- gfp_t gfp)
+static void rxrpc_start_call_timer(struct rxrpc_call *call)
{
- int ret;
-
- /* Set up or get a connection record and set the protocol parameters,
- * including channel number and call ID.
- */
- ret = rxrpc_connect_call(call, cp, srx, gfp);
- if (ret < 0)
- return ret;
-
- spin_lock(&call->conn->params.peer->lock);
- hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets);
- spin_unlock(&call->conn->params.peer->lock);
-
- call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
- add_timer(&call->lifetimer);
- return 0;
+ unsigned long expire_at;
+
+ expire_at = jiffies + rxrpc_max_call_lifetime;
+ call->expire_at = expire_at;
+ call->ack_at = expire_at;
+ call->resend_at = expire_at;
+ call->timer.expires = expire_at + 1;
+ rxrpc_set_timer(call, rxrpc_timer_begin);
}
/*
@@ -223,19 +234,16 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
_enter("%p,%lx", rx, user_call_ID);
- call = rxrpc_alloc_client_call(rx, srx, gfp);
+ call = rxrpc_alloc_client_call(srx, gfp);
if (IS_ERR(call)) {
_leave(" = %ld", PTR_ERR(call));
return call;
}
- trace_rxrpc_call(call, 0, atomic_read(&call->usage), 0, here,
- (const void *)user_call_ID);
+ trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
+ here, (const void *)user_call_ID);
/* Publish the call, even though it is incompletely set up as yet */
- call->user_call_ID = user_call_ID;
- __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
-
write_lock(&rx->call_lock);
pp = &rx->calls.rb_node;
@@ -249,192 +257,138 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
else if (user_call_ID > xcall->user_call_ID)
pp = &(*pp)->rb_right;
else
- goto found_user_ID_now_present;
+ goto error_dup_user_ID;
}
- rxrpc_get_call(call);
-
+ rcu_assign_pointer(call->socket, rx);
+ call->user_call_ID = user_call_ID;
+ __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
+ rxrpc_get_call(call, rxrpc_call_got_userid);
rb_link_node(&call->sock_node, parent, pp);
rb_insert_color(&call->sock_node, &rx->calls);
+ list_add(&call->sock_link, &rx->sock_calls);
+
write_unlock(&rx->call_lock);
- write_lock_bh(&rxrpc_call_lock);
+ write_lock(&rxrpc_call_lock);
list_add_tail(&call->link, &rxrpc_calls);
- write_unlock_bh(&rxrpc_call_lock);
+ write_unlock(&rxrpc_call_lock);
- ret = rxrpc_begin_client_call(call, cp, srx, gfp);
+ /* Set up or get a connection record and set the protocol parameters,
+ * including channel number and call ID.
+ */
+ ret = rxrpc_connect_call(call, cp, srx, gfp);
if (ret < 0)
goto error;
- _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
+ trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
+ here, ERR_PTR(ret));
- _leave(" = %p [new]", call);
- return call;
+ spin_lock_bh(&call->conn->params.peer->lock);
+ hlist_add_head(&call->error_link,
+ &call->conn->params.peer->error_targets);
+ spin_unlock_bh(&call->conn->params.peer->lock);
-error:
- write_lock(&rx->call_lock);
- rb_erase(&call->sock_node, &rx->calls);
- write_unlock(&rx->call_lock);
- rxrpc_put_call(call);
+ rxrpc_start_call_timer(call);
- write_lock_bh(&rxrpc_call_lock);
- list_del_init(&call->link);
- write_unlock_bh(&rxrpc_call_lock);
+ _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
- set_bit(RXRPC_CALL_RELEASED, &call->flags);
- call->state = RXRPC_CALL_DEAD;
- rxrpc_put_call(call);
- _leave(" = %d", ret);
- return ERR_PTR(ret);
+ _leave(" = %p [new]", call);
+ return call;
/* We unexpectedly found the user ID in the list after taking
* the call_lock. This shouldn't happen unless the user races
* with itself and tries to add the same user ID twice at the
* same time in different threads.
*/
-found_user_ID_now_present:
+error_dup_user_ID:
write_unlock(&rx->call_lock);
- set_bit(RXRPC_CALL_RELEASED, &call->flags);
- call->state = RXRPC_CALL_DEAD;
- rxrpc_put_call(call);
- _leave(" = -EEXIST [%p]", call);
- return ERR_PTR(-EEXIST);
+ ret = -EEXIST;
+
+error:
+ __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
+ RX_CALL_DEAD, ret);
+ trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
+ here, ERR_PTR(ret));
+ rxrpc_release_call(rx, call);
+ rxrpc_put_call(call, rxrpc_call_put);
+ _leave(" = %d", ret);
+ return ERR_PTR(ret);
}
/*
- * set up an incoming call
- * - called in process context with IRQs enabled
+ * Set up an incoming call. call->conn points to the connection.
+ * This is called in BH context and isn't allowed to fail.
*/
-struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
- struct rxrpc_connection *conn,
- struct sk_buff *skb)
+void rxrpc_incoming_call(struct rxrpc_sock *rx,
+ struct rxrpc_call *call,
+ struct sk_buff *skb)
{
+ struct rxrpc_connection *conn = call->conn;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- struct rxrpc_call *call, *candidate;
- const void *here = __builtin_return_address(0);
- u32 call_id, chan;
-
- _enter(",%d", conn->debug_id);
-
- ASSERT(rx != NULL);
-
- candidate = rxrpc_alloc_call(GFP_NOIO);
- if (!candidate)
- return ERR_PTR(-EBUSY);
-
- trace_rxrpc_call(candidate, 1, atomic_read(&candidate->usage),
- 0, here, NULL);
-
- chan = sp->hdr.cid & RXRPC_CHANNELMASK;
- candidate->socket = rx;
- candidate->conn = conn;
- candidate->peer = conn->params.peer;
- candidate->cid = sp->hdr.cid;
- candidate->call_id = sp->hdr.callNumber;
- candidate->rx_data_post = 0;
- candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
- candidate->flags |= (1 << RXRPC_CALL_IS_SERVICE);
- if (conn->security_ix > 0)
- candidate->state = RXRPC_CALL_SERVER_SECURING;
-
- spin_lock(&conn->channel_lock);
-
- /* set the channel for this call */
- call = rcu_dereference_protected(conn->channels[chan].call,
- lockdep_is_held(&conn->channel_lock));
-
- _debug("channel[%u] is %p", candidate->cid & RXRPC_CHANNELMASK, call);
- if (call && call->call_id == sp->hdr.callNumber) {
- /* already set; must've been a duplicate packet */
- _debug("extant call [%d]", call->state);
- ASSERTCMP(call->conn, ==, conn);
-
- read_lock(&call->state_lock);
- switch (call->state) {
- case RXRPC_CALL_LOCALLY_ABORTED:
- if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
- rxrpc_queue_call(call);
- case RXRPC_CALL_REMOTELY_ABORTED:
- read_unlock(&call->state_lock);
- goto aborted_call;
- default:
- rxrpc_get_call(call);
- read_unlock(&call->state_lock);
- goto extant_call;
- }
- }
-
- if (call) {
- /* it seems the channel is still in use from the previous call
- * - ditch the old binding if its call is now complete */
- _debug("CALL: %u { %s }",
- call->debug_id, rxrpc_call_states[call->state]);
-
- if (call->state == RXRPC_CALL_COMPLETE) {
- __rxrpc_disconnect_call(conn, call);
- } else {
- spin_unlock(&conn->channel_lock);
- kmem_cache_free(rxrpc_call_jar, candidate);
- _leave(" = -EBUSY");
- return ERR_PTR(-EBUSY);
- }
- }
-
- /* check the call number isn't duplicate */
- _debug("check dup");
- call_id = sp->hdr.callNumber;
-
- /* We just ignore calls prior to the current call ID. Terminated calls
- * are handled via the connection.
+ u32 chan;
+
+ _enter(",%d", call->conn->debug_id);
+
+ rcu_assign_pointer(call->socket, rx);
+ call->call_id = sp->hdr.callNumber;
+ call->service_id = sp->hdr.serviceId;
+ call->cid = sp->hdr.cid;
+ call->state = RXRPC_CALL_SERVER_ACCEPTING;
+ if (sp->hdr.securityIndex > 0)
+ call->state = RXRPC_CALL_SERVER_SECURING;
+ call->cong_tstamp = skb->tstamp;
+
+ /* Set the channel for this call. We don't get channel_lock as we're
+ * only defending against the data_ready handler (which we're called
+ * from) and the RESPONSE packet parser (which is only really
+ * interested in call_counter and can cope with a disagreement with the
+ * call pointer).
*/
- if (call_id <= conn->channels[chan].call_counter)
- goto old_call; /* TODO: Just drop packet */
-
- /* make the call available */
- _debug("new call");
- call = candidate;
- candidate = NULL;
- conn->channels[chan].call_counter = call_id;
+ chan = sp->hdr.cid & RXRPC_CHANNELMASK;
+ conn->channels[chan].call_counter = call->call_id;
+ conn->channels[chan].call_id = call->call_id;
rcu_assign_pointer(conn->channels[chan].call, call);
- sock_hold(&rx->sk);
- rxrpc_get_connection(conn);
- rxrpc_get_peer(call->peer);
- spin_unlock(&conn->channel_lock);
spin_lock(&conn->params.peer->lock);
hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
spin_unlock(&conn->params.peer->lock);
- write_lock_bh(&rxrpc_call_lock);
- list_add_tail(&call->link, &rxrpc_calls);
- write_unlock_bh(&rxrpc_call_lock);
-
- call->service_id = conn->params.service_id;
-
_net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
- call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
- add_timer(&call->lifetimer);
- _leave(" = %p {%d} [new]", call, call->debug_id);
- return call;
+ rxrpc_start_call_timer(call);
+ _leave("");
+}
-extant_call:
- spin_unlock(&conn->channel_lock);
- kmem_cache_free(rxrpc_call_jar, candidate);
- _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
- return call;
+/*
+ * Queue a call's work processor, getting a ref to pass to the work queue.
+ */
+bool rxrpc_queue_call(struct rxrpc_call *call)
+{
+ const void *here = __builtin_return_address(0);
+ int n = __atomic_add_unless(&call->usage, 1, 0);
+ if (n == 0)
+ return false;
+ if (rxrpc_queue_work(&call->processor))
+ trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL);
+ else
+ rxrpc_put_call(call, rxrpc_call_put_noqueue);
+ return true;
+}
-aborted_call:
- spin_unlock(&conn->channel_lock);
- kmem_cache_free(rxrpc_call_jar, candidate);
- _leave(" = -ECONNABORTED");
- return ERR_PTR(-ECONNABORTED);
-
-old_call:
- spin_unlock(&conn->channel_lock);
- kmem_cache_free(rxrpc_call_jar, candidate);
- _leave(" = -ECONNRESET [old]");
- return ERR_PTR(-ECONNRESET);
+/*
+ * Queue a call's work processor, passing the callers ref to the work queue.
+ */
+bool __rxrpc_queue_call(struct rxrpc_call *call)
+{
+ const void *here = __builtin_return_address(0);
+ int n = atomic_read(&call->usage);
+ ASSERTCMP(n, >=, 1);
+ if (rxrpc_queue_work(&call->processor))
+ trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL);
+ else
+ rxrpc_put_call(call, rxrpc_call_put_noqueue);
+ return true;
}
/*
@@ -445,157 +399,88 @@ void rxrpc_see_call(struct rxrpc_call *call)
const void *here = __builtin_return_address(0);
if (call) {
int n = atomic_read(&call->usage);
- int m = atomic_read(&call->skb_count);
- trace_rxrpc_call(call, 2, n, m, here, 0);
+ trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL);
}
}
/*
* Note the addition of a ref on a call.
*/
-void rxrpc_get_call(struct rxrpc_call *call)
+void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
{
const void *here = __builtin_return_address(0);
int n = atomic_inc_return(&call->usage);
- int m = atomic_read(&call->skb_count);
- trace_rxrpc_call(call, 3, n, m, here, 0);
+ trace_rxrpc_call(call, op, n, here, NULL);
}
/*
- * Note the addition of a ref on a call for a socket buffer.
+ * Detach a call from its owning socket.
*/
-void rxrpc_get_call_for_skb(struct rxrpc_call *call, struct sk_buff *skb)
+void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
{
const void *here = __builtin_return_address(0);
- int n = atomic_inc_return(&call->usage);
- int m = atomic_inc_return(&call->skb_count);
-
- trace_rxrpc_call(call, 4, n, m, here, skb);
-}
-
-/*
- * detach a call from a socket and set up for release
- */
-void rxrpc_release_call(struct rxrpc_call *call)
-{
struct rxrpc_connection *conn = call->conn;
- struct rxrpc_sock *rx = call->socket;
+ bool put = false;
+ int i;
- _enter("{%d,%d,%d,%d}",
- call->debug_id, atomic_read(&call->usage),
- atomic_read(&call->ackr_not_idle),
- call->rx_first_oos);
+ _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
- rxrpc_see_call(call);
+ trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
+ here, (const void *)call->flags);
+
+ ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
spin_lock_bh(&call->lock);
if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
BUG();
spin_unlock_bh(&call->lock);
- /* dissociate from the socket
- * - the socket's ref on the call is passed to the death timer
- */
- _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
+ del_timer_sync(&call->timer);
- spin_lock(&conn->params.peer->lock);
- hlist_del_init(&call->error_link);
- spin_unlock(&conn->params.peer->lock);
+ /* Make sure we don't get any more notifications */
+ write_lock_bh(&rx->recvmsg_lock);
- write_lock_bh(&rx->call_lock);
- if (!list_empty(&call->accept_link)) {
+ if (!list_empty(&call->recvmsg_link)) {
_debug("unlinking once-pending call %p { e=%lx f=%lx }",
call, call->events, call->flags);
- ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
- list_del_init(&call->accept_link);
- sk_acceptq_removed(&rx->sk);
- } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
- rb_erase(&call->sock_node, &rx->calls);
- memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
- clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
+ list_del(&call->recvmsg_link);
+ put = true;
}
- write_unlock_bh(&rx->call_lock);
- /* free up the channel for reuse */
- write_lock_bh(&call->state_lock);
+ /* list_empty() must return false in rxrpc_notify_socket() */
+ call->recvmsg_link.next = NULL;
+ call->recvmsg_link.prev = NULL;
- if (call->state < RXRPC_CALL_COMPLETE &&
- call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
- _debug("+++ ABORTING STATE %d +++\n", call->state);
- __rxrpc_abort_call(call, RX_CALL_DEAD, ECONNRESET);
- }
- write_unlock_bh(&call->state_lock);
-
- rxrpc_disconnect_call(call);
-
- /* clean up the Rx queue */
- if (!skb_queue_empty(&call->rx_queue) ||
- !skb_queue_empty(&call->rx_oos_queue)) {
- struct rxrpc_skb_priv *sp;
- struct sk_buff *skb;
-
- _debug("purge Rx queues");
-
- spin_lock_bh(&call->lock);
- while ((skb = skb_dequeue(&call->rx_queue)) ||
- (skb = skb_dequeue(&call->rx_oos_queue))) {
- spin_unlock_bh(&call->lock);
-
- sp = rxrpc_skb(skb);
- _debug("- zap %s %%%u #%u",
- rxrpc_pkts[sp->hdr.type],
- sp->hdr.serial, sp->hdr.seq);
- rxrpc_free_skb(skb);
- spin_lock_bh(&call->lock);
- }
- spin_unlock_bh(&call->lock);
- }
+ write_unlock_bh(&rx->recvmsg_lock);
+ if (put)
+ rxrpc_put_call(call, rxrpc_call_put);
- del_timer_sync(&call->resend_timer);
- del_timer_sync(&call->ack_timer);
- del_timer_sync(&call->lifetimer);
- call->deadspan.expires = jiffies + rxrpc_dead_call_expiry;
- add_timer(&call->deadspan);
+ write_lock(&rx->call_lock);
- _leave("");
-}
+ if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
+ rb_erase(&call->sock_node, &rx->calls);
+ memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
+ rxrpc_put_call(call, rxrpc_call_put_userid);
+ }
-/*
- * handle a dead call being ready for reaping
- */
-static void rxrpc_dead_call_expired(unsigned long _call)
-{
- struct rxrpc_call *call = (struct rxrpc_call *) _call;
+ list_del(&call->sock_link);
+ write_unlock(&rx->call_lock);
- _enter("{%d}", call->debug_id);
+ _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
- rxrpc_see_call(call);
- write_lock_bh(&call->state_lock);
- call->state = RXRPC_CALL_DEAD;
- write_unlock_bh(&call->state_lock);
- rxrpc_put_call(call);
-}
+ if (conn)
+ rxrpc_disconnect_call(call);
-/*
- * mark a call as to be released, aborting it if it's still in progress
- * - called with softirqs disabled
- */
-static void rxrpc_mark_call_released(struct rxrpc_call *call)
-{
- bool sched = false;
-
- rxrpc_see_call(call);
- write_lock(&call->state_lock);
- if (call->state < RXRPC_CALL_DEAD) {
- sched = __rxrpc_abort_call(call, RX_CALL_DEAD, ECONNRESET);
- if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
- sched = true;
+ for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
+ rxrpc_free_skb(call->rxtx_buffer[i],
+ (call->tx_phase ? rxrpc_skb_tx_cleaned :
+ rxrpc_skb_rx_cleaned));
+ call->rxtx_buffer[i] = NULL;
}
- write_unlock(&call->state_lock);
- if (sched)
- rxrpc_queue_call(call);
+
+ _leave("");
}
/*
@@ -604,70 +489,52 @@ static void rxrpc_mark_call_released(struct rxrpc_call *call)
void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
{
struct rxrpc_call *call;
- struct rb_node *p;
_enter("%p", rx);
- read_lock_bh(&rx->call_lock);
-
- /* kill the not-yet-accepted incoming calls */
- list_for_each_entry(call, &rx->secureq, accept_link) {
- rxrpc_mark_call_released(call);
- }
-
- list_for_each_entry(call, &rx->acceptq, accept_link) {
- rxrpc_mark_call_released(call);
+ while (!list_empty(&rx->to_be_accepted)) {
+ call = list_entry(rx->to_be_accepted.next,
+ struct rxrpc_call, accept_link);
+ list_del(&call->accept_link);
+ rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, ECONNRESET);
+ rxrpc_put_call(call, rxrpc_call_put);
}
- /* mark all the calls as no longer wanting incoming packets */
- for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
- call = rb_entry(p, struct rxrpc_call, sock_node);
- rxrpc_mark_call_released(call);
+ while (!list_empty(&rx->sock_calls)) {
+ call = list_entry(rx->sock_calls.next,
+ struct rxrpc_call, sock_link);
+ rxrpc_get_call(call, rxrpc_call_got);
+ rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, ECONNRESET);
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
+ rxrpc_release_call(rx, call);
+ rxrpc_put_call(call, rxrpc_call_put);
}
- read_unlock_bh(&rx->call_lock);
_leave("");
}
/*
* release a call
*/
-void rxrpc_put_call(struct rxrpc_call *call)
+void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
{
const void *here = __builtin_return_address(0);
- int n, m;
+ int n;
ASSERT(call != NULL);
n = atomic_dec_return(&call->usage);
- m = atomic_read(&call->skb_count);
- trace_rxrpc_call(call, 5, n, m, here, NULL);
+ trace_rxrpc_call(call, op, n, here, NULL);
ASSERTCMP(n, >=, 0);
if (n == 0) {
_debug("call %d dead", call->debug_id);
- WARN_ON(m != 0);
- ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
- rxrpc_queue_work(&call->destroyer);
- }
-}
+ ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
-/*
- * Release a call ref held by a socket buffer.
- */
-void rxrpc_put_call_for_skb(struct rxrpc_call *call, struct sk_buff *skb)
-{
- const void *here = __builtin_return_address(0);
- int n, m;
+ write_lock(&rxrpc_call_lock);
+ list_del_init(&call->link);
+ write_unlock(&rxrpc_call_lock);
- n = atomic_dec_return(&call->usage);
- m = atomic_dec_return(&call->skb_count);
- trace_rxrpc_call(call, 6, n, m, here, skb);
- ASSERTCMP(n, >=, 0);
- if (n == 0) {
- _debug("call %d dead", call->debug_id);
- WARN_ON(m != 0);
- ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
- rxrpc_queue_work(&call->destroyer);
+ rxrpc_cleanup_call(call);
}
}
@@ -678,99 +545,53 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
{
struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
- rxrpc_purge_queue(&call->rx_queue);
- rxrpc_purge_queue(&call->knlrecv_queue);
rxrpc_put_peer(call->peer);
+ kfree(call->rxtx_buffer);
+ kfree(call->rxtx_annotations);
kmem_cache_free(rxrpc_call_jar, call);
}
/*
* clean up a call
*/
-static void rxrpc_cleanup_call(struct rxrpc_call *call)
+void rxrpc_cleanup_call(struct rxrpc_call *call)
{
- _net("DESTROY CALL %d", call->debug_id);
+ int i;
- ASSERT(call->socket);
+ _net("DESTROY CALL %d", call->debug_id);
memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
- del_timer_sync(&call->lifetimer);
- del_timer_sync(&call->deadspan);
- del_timer_sync(&call->ack_timer);
- del_timer_sync(&call->resend_timer);
+ del_timer_sync(&call->timer);
+ ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
- ASSERTCMP(call->events, ==, 0);
- if (work_pending(&call->processor)) {
- _debug("defer destroy");
- rxrpc_queue_work(&call->destroyer);
- return;
- }
-
ASSERTCMP(call->conn, ==, NULL);
- if (call->acks_window) {
- _debug("kill Tx window %d",
- CIRC_CNT(call->acks_head, call->acks_tail,
- call->acks_winsz));
- smp_mb();
- while (CIRC_CNT(call->acks_head, call->acks_tail,
- call->acks_winsz) > 0) {
- struct rxrpc_skb_priv *sp;
- unsigned long _skb;
-
- _skb = call->acks_window[call->acks_tail] & ~1;
- sp = rxrpc_skb((struct sk_buff *)_skb);
- _debug("+++ clear Tx %u", sp->hdr.seq);
- rxrpc_free_skb((struct sk_buff *)_skb);
- call->acks_tail =
- (call->acks_tail + 1) & (call->acks_winsz - 1);
- }
-
- kfree(call->acks_window);
- }
+ /* Clean up the Rx/Tx buffer */
+ for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++)
+ rxrpc_free_skb(call->rxtx_buffer[i],
+ (call->tx_phase ? rxrpc_skb_tx_cleaned :
+ rxrpc_skb_rx_cleaned));
- rxrpc_free_skb(call->tx_pending);
+ rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned);
- rxrpc_purge_queue(&call->rx_queue);
- ASSERT(skb_queue_empty(&call->rx_oos_queue));
- rxrpc_purge_queue(&call->knlrecv_queue);
- sock_put(&call->socket->sk);
call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
}
/*
- * destroy a call
- */
-static void rxrpc_destroy_call(struct work_struct *work)
-{
- struct rxrpc_call *call =
- container_of(work, struct rxrpc_call, destroyer);
-
- _enter("%p{%d,%x,%p}",
- call, atomic_read(&call->usage), call->cid, call->conn);
-
- ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
-
- write_lock_bh(&rxrpc_call_lock);
- list_del_init(&call->link);
- write_unlock_bh(&rxrpc_call_lock);
-
- rxrpc_cleanup_call(call);
- _leave("");
-}
-
-/*
- * preemptively destroy all the call records from a transport endpoint rather
- * than waiting for them to time out
+ * Make sure that all calls are gone.
*/
void __exit rxrpc_destroy_all_calls(void)
{
struct rxrpc_call *call;
_enter("");
- write_lock_bh(&rxrpc_call_lock);
+
+ if (list_empty(&rxrpc_calls))
+ return;
+
+ write_lock(&rxrpc_call_lock);
while (!list_empty(&rxrpc_calls)) {
call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
@@ -779,87 +600,15 @@ void __exit rxrpc_destroy_all_calls(void)
rxrpc_see_call(call);
list_del_init(&call->link);
- switch (atomic_read(&call->usage)) {
- case 0:
- ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
- break;
- case 1:
- if (del_timer_sync(&call->deadspan) != 0 &&
- call->state != RXRPC_CALL_DEAD)
- rxrpc_dead_call_expired((unsigned long) call);
- if (call->state != RXRPC_CALL_DEAD)
- break;
- default:
- pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
- call, atomic_read(&call->usage),
- atomic_read(&call->ackr_not_idle),
- rxrpc_call_states[call->state],
- call->flags, call->events);
- if (!skb_queue_empty(&call->rx_queue))
- pr_err("Rx queue occupied\n");
- if (!skb_queue_empty(&call->rx_oos_queue))
- pr_err("OOS queue occupied\n");
- break;
- }
-
- write_unlock_bh(&rxrpc_call_lock);
+ pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
+ call, atomic_read(&call->usage),
+ rxrpc_call_states[call->state],
+ call->flags, call->events);
+
+ write_unlock(&rxrpc_call_lock);
cond_resched();
- write_lock_bh(&rxrpc_call_lock);
+ write_lock(&rxrpc_call_lock);
}
- write_unlock_bh(&rxrpc_call_lock);
- _leave("");
-}
-
-/*
- * handle call lifetime being exceeded
- */
-static void rxrpc_call_life_expired(unsigned long _call)
-{
- struct rxrpc_call *call = (struct rxrpc_call *) _call;
-
- _enter("{%d}", call->debug_id);
-
- rxrpc_see_call(call);
- if (call->state >= RXRPC_CALL_COMPLETE)
- return;
-
- set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
- rxrpc_queue_call(call);
-}
-
-/*
- * handle resend timer expiry
- * - may not take call->state_lock as this can deadlock against del_timer_sync()
- */
-static void rxrpc_resend_time_expired(unsigned long _call)
-{
- struct rxrpc_call *call = (struct rxrpc_call *) _call;
-
- _enter("{%d}", call->debug_id);
-
- rxrpc_see_call(call);
- if (call->state >= RXRPC_CALL_COMPLETE)
- return;
-
- clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
- if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
- rxrpc_queue_call(call);
-}
-
-/*
- * handle ACK timer expiry
- */
-static void rxrpc_ack_time_expired(unsigned long _call)
-{
- struct rxrpc_call *call = (struct rxrpc_call *) _call;
-
- _enter("{%d}", call->debug_id);
-
- rxrpc_see_call(call);
- if (call->state >= RXRPC_CALL_COMPLETE)
- return;
-
- if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
- rxrpc_queue_call(call);
+ write_unlock(&rxrpc_call_lock);
}
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 82de1aeaef21..c76a125df891 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -105,6 +105,14 @@ static void rxrpc_discard_expired_client_conns(struct work_struct *);
static DECLARE_DELAYED_WORK(rxrpc_client_conn_reap,
rxrpc_discard_expired_client_conns);
+const char rxrpc_conn_cache_states[RXRPC_CONN__NR_CACHE_STATES][5] = {
+ [RXRPC_CONN_CLIENT_INACTIVE] = "Inac",
+ [RXRPC_CONN_CLIENT_WAITING] = "Wait",
+ [RXRPC_CONN_CLIENT_ACTIVE] = "Actv",
+ [RXRPC_CONN_CLIENT_CULLED] = "Cull",
+ [RXRPC_CONN_CLIENT_IDLE] = "Idle",
+};
+
/*
* Get a connection ID and epoch for a client connection from the global pool.
* The connection struct pointer is then recorded in the idr radix tree. The
@@ -220,6 +228,9 @@ rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
rxrpc_get_local(conn->params.local);
key_get(conn->params.key);
+ trace_rxrpc_conn(conn, rxrpc_conn_new_client, atomic_read(&conn->usage),
+ __builtin_return_address(0));
+ trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
_leave(" = %p", conn);
return conn;
@@ -348,6 +359,7 @@ static int rxrpc_get_client_conn(struct rxrpc_call *call,
if (cp->exclusive) {
call->conn = candidate;
+ call->security_ix = candidate->security_ix;
_leave(" = 0 [exclusive %d]", candidate->debug_id);
return 0;
}
@@ -384,6 +396,7 @@ static int rxrpc_get_client_conn(struct rxrpc_call *call,
rb_replace_node(&conn->client_node,
&candidate->client_node,
&local->client_conns);
+ trace_rxrpc_client(conn, -1, rxrpc_client_replace);
goto candidate_published;
}
}
@@ -395,6 +408,7 @@ static int rxrpc_get_client_conn(struct rxrpc_call *call,
candidate_published:
set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
call->conn = candidate;
+ call->security_ix = candidate->security_ix;
spin_unlock(&local->client_conns_lock);
_leave(" = 0 [new %d]", candidate->debug_id);
return 0;
@@ -407,11 +421,15 @@ found_extant_conn:
_debug("found conn");
spin_unlock(&local->client_conns_lock);
- rxrpc_put_connection(candidate);
- candidate = NULL;
+ if (candidate) {
+ trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate);
+ rxrpc_put_connection(candidate);
+ candidate = NULL;
+ }
spin_lock(&conn->channel_lock);
call->conn = conn;
+ call->security_ix = conn->security_ix;
list_add(&call->chan_wait_link, &conn->waiting_calls);
spin_unlock(&conn->channel_lock);
_leave(" = 0 [extant %d]", conn->debug_id);
@@ -430,6 +448,7 @@ error:
*/
static void rxrpc_activate_conn(struct rxrpc_connection *conn)
{
+ trace_rxrpc_client(conn, -1, rxrpc_client_to_active);
conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
rxrpc_nr_active_client_conns++;
list_move_tail(&conn->cache_link, &rxrpc_active_client_conns);
@@ -459,8 +478,10 @@ static void rxrpc_animate_client_conn(struct rxrpc_connection *conn)
spin_lock(&rxrpc_client_conn_cache_lock);
nr_conns = rxrpc_nr_client_conns;
- if (!test_and_set_bit(RXRPC_CONN_COUNTED, &conn->flags))
+ if (!test_and_set_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
+ trace_rxrpc_client(conn, -1, rxrpc_client_count);
rxrpc_nr_client_conns = nr_conns + 1;
+ }
switch (conn->cache_state) {
case RXRPC_CONN_CLIENT_ACTIVE:
@@ -491,6 +512,7 @@ activate_conn:
wait_for_capacity:
_debug("wait");
+ trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
list_move_tail(&conn->cache_link, &rxrpc_waiting_client_conns);
goto out_unlock;
@@ -521,6 +543,8 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
struct rxrpc_call, chan_wait_link);
u32 call_id = chan->call_counter + 1;
+ trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
+
write_lock_bh(&call->state_lock);
call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
write_unlock_bh(&call->state_lock);
@@ -560,6 +584,8 @@ static void rxrpc_activate_channels(struct rxrpc_connection *conn)
_enter("%d", conn->debug_id);
+ trace_rxrpc_client(conn, -1, rxrpc_client_activate_chans);
+
if (conn->cache_state != RXRPC_CONN_CLIENT_ACTIVE ||
conn->active_chans == RXRPC_ACTIVE_CHANS_MASK)
return;
@@ -654,10 +680,13 @@ int rxrpc_connect_call(struct rxrpc_call *call,
* had a chance at re-use (the per-connection security negotiation is
* expensive).
*/
-static void rxrpc_expose_client_conn(struct rxrpc_connection *conn)
+static void rxrpc_expose_client_conn(struct rxrpc_connection *conn,
+ unsigned int channel)
{
- if (!test_and_set_bit(RXRPC_CONN_EXPOSED, &conn->flags))
+ if (!test_and_set_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
+ trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
rxrpc_get_connection(conn);
+ }
}
/*
@@ -666,9 +695,9 @@ static void rxrpc_expose_client_conn(struct rxrpc_connection *conn)
*/
void rxrpc_expose_client_call(struct rxrpc_call *call)
{
+ unsigned int channel = call->cid & RXRPC_CHANNELMASK;
struct rxrpc_connection *conn = call->conn;
- struct rxrpc_channel *chan =
- &conn->channels[call->cid & RXRPC_CHANNELMASK];
+ struct rxrpc_channel *chan = &conn->channels[channel];
if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
/* Mark the call ID as being used. If the callNumber counter
@@ -679,7 +708,7 @@ void rxrpc_expose_client_call(struct rxrpc_call *call)
chan->call_counter++;
if (chan->call_counter >= INT_MAX)
set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
- rxrpc_expose_client_conn(conn);
+ rxrpc_expose_client_conn(conn, channel);
}
}
@@ -692,6 +721,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
struct rxrpc_connection *conn = call->conn;
struct rxrpc_channel *chan = &conn->channels[channel];
+ trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
call->conn = NULL;
spin_lock(&conn->channel_lock);
@@ -706,6 +736,8 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
list_del_init(&call->chan_wait_link);
+ trace_rxrpc_client(conn, channel, rxrpc_client_chan_unstarted);
+
/* We must deactivate or idle the connection if it's now
* waiting for nothing.
*/
@@ -718,7 +750,6 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
}
ASSERTCMP(rcu_access_pointer(chan->call), ==, call);
- ASSERTCMP(atomic_read(&conn->usage), >=, 2);
/* If a client call was exposed to the world, we save the result for
* retransmission.
@@ -737,7 +768,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
/* See if we can pass the channel directly to another call. */
if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE &&
!list_empty(&conn->waiting_calls)) {
- _debug("pass chan");
+ trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
rxrpc_activate_one_channel(conn, channel);
goto out_2;
}
@@ -760,7 +791,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
goto out;
}
- _debug("pass chan 2");
+ trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
rxrpc_activate_one_channel(conn, channel);
goto out;
@@ -792,7 +823,7 @@ idle_connection:
* immediately or moved to the idle list for a short while.
*/
if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
- _debug("make idle");
+ trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
conn->idle_timestamp = jiffies;
conn->cache_state = RXRPC_CONN_CLIENT_IDLE;
list_move_tail(&conn->cache_link, &rxrpc_idle_client_conns);
@@ -802,7 +833,7 @@ idle_connection:
&rxrpc_client_conn_reap,
rxrpc_conn_idle_client_expiry);
} else {
- _debug("make inactive");
+ trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive);
conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
list_del_init(&conn->cache_link);
}
@@ -815,10 +846,12 @@ idle_connection:
static struct rxrpc_connection *
rxrpc_put_one_client_conn(struct rxrpc_connection *conn)
{
- struct rxrpc_connection *next;
+ struct rxrpc_connection *next = NULL;
struct rxrpc_local *local = conn->params.local;
unsigned int nr_conns;
+ trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
+
if (test_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags)) {
spin_lock(&local->client_conns_lock);
if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS,
@@ -831,24 +864,23 @@ rxrpc_put_one_client_conn(struct rxrpc_connection *conn)
ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_INACTIVE);
- if (!test_bit(RXRPC_CONN_COUNTED, &conn->flags))
- return NULL;
-
- spin_lock(&rxrpc_client_conn_cache_lock);
- nr_conns = --rxrpc_nr_client_conns;
+ if (test_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
+ trace_rxrpc_client(conn, -1, rxrpc_client_uncount);
+ spin_lock(&rxrpc_client_conn_cache_lock);
+ nr_conns = --rxrpc_nr_client_conns;
+
+ if (nr_conns < rxrpc_max_client_connections &&
+ !list_empty(&rxrpc_waiting_client_conns)) {
+ next = list_entry(rxrpc_waiting_client_conns.next,
+ struct rxrpc_connection, cache_link);
+ rxrpc_get_connection(next);
+ rxrpc_activate_conn(next);
+ }
- next = NULL;
- if (nr_conns < rxrpc_max_client_connections &&
- !list_empty(&rxrpc_waiting_client_conns)) {
- next = list_entry(rxrpc_waiting_client_conns.next,
- struct rxrpc_connection, cache_link);
- rxrpc_get_connection(next);
- rxrpc_activate_conn(next);
+ spin_unlock(&rxrpc_client_conn_cache_lock);
}
- spin_unlock(&rxrpc_client_conn_cache_lock);
rxrpc_kill_connection(conn);
-
if (next)
rxrpc_activate_channels(next);
@@ -863,20 +895,18 @@ rxrpc_put_one_client_conn(struct rxrpc_connection *conn)
*/
void rxrpc_put_client_conn(struct rxrpc_connection *conn)
{
- struct rxrpc_connection *next;
+ const void *here = __builtin_return_address(0);
+ int n;
do {
- _enter("%p{u=%d,d=%d}",
- conn, atomic_read(&conn->usage), conn->debug_id);
-
- next = rxrpc_put_one_client_conn(conn);
-
- if (!next)
- break;
- conn = next;
- } while (atomic_dec_and_test(&conn->usage));
-
- _leave("");
+ n = atomic_dec_return(&conn->usage);
+ trace_rxrpc_conn(conn, rxrpc_conn_put_client, n, here);
+ if (n > 0)
+ return;
+ ASSERTCMP(n, >=, 0);
+
+ conn = rxrpc_put_one_client_conn(conn);
+ } while (conn);
}
/*
@@ -907,9 +937,11 @@ static void rxrpc_cull_active_client_conns(void)
ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_ACTIVE);
if (list_empty(&conn->waiting_calls)) {
+ trace_rxrpc_client(conn, -1, rxrpc_client_to_culled);
conn->cache_state = RXRPC_CONN_CLIENT_CULLED;
list_del_init(&conn->cache_link);
} else {
+ trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
list_move_tail(&conn->cache_link,
&rxrpc_waiting_client_conns);
@@ -983,7 +1015,7 @@ next:
goto not_yet_expired;
}
- _debug("discard conn %d", conn->debug_id);
+ trace_rxrpc_client(conn, -1, rxrpc_client_discard);
if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags))
BUG();
conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 9db90f4f768d..37609ce89f52 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -15,10 +15,6 @@
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/errqueue.h>
-#include <linux/udp.h>
-#include <linux/in.h>
-#include <linux/in6.h>
-#include <linux/icmp.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <net/ip.h>
@@ -101,6 +97,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
pkt.info.maxMTU = htonl(mtu);
pkt.info.rwind = htonl(rxrpc_rx_window_size);
pkt.info.jumbo_max = htonl(rxrpc_rx_jumbo_max);
+ pkt.whdr.flags |= RXRPC_SLOW_START_OK;
len += sizeof(pkt.ack) + sizeof(pkt.info);
break;
}
@@ -123,6 +120,8 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
_proto("Tx ABORT %%%u { %d } [re]", serial, conn->local_abort);
break;
case RXRPC_PACKET_TYPE_ACK:
+ trace_rxrpc_tx_ack(NULL, serial, chan->last_seq, 0,
+ RXRPC_ACK_DUPLICATE, 0);
_proto("Tx ACK %%%u [re]", serial);
break;
}
@@ -140,16 +139,10 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
u32 abort_code, int error)
{
struct rxrpc_call *call;
- bool queue;
- int i, bit;
+ int i;
_enter("{%d},%x", conn->debug_id, abort_code);
- if (compl == RXRPC_CALL_LOCALLY_ABORTED)
- bit = RXRPC_CALL_EV_CONN_ABORT;
- else
- bit = RXRPC_CALL_EV_RCVD_ABORT;
-
spin_lock(&conn->channel_lock);
for (i = 0; i < RXRPC_MAXCALLS; i++) {
@@ -157,16 +150,13 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
conn->channels[i].call,
lockdep_is_held(&conn->channel_lock));
if (call) {
- rxrpc_see_call(call);
- write_lock_bh(&call->state_lock);
- if (rxrpc_set_call_completion(call, compl, abort_code,
- error)) {
- set_bit(bit, &call->events);
- queue = true;
- }
- write_unlock_bh(&call->state_lock);
- if (queue)
- rxrpc_queue_call(call);
+ if (compl == RXRPC_CALL_LOCALLY_ABORTED)
+ trace_rxrpc_abort("CON", call->cid,
+ call->call_id, 0,
+ abort_code, error);
+ if (rxrpc_set_call_completion(call, compl,
+ abort_code, error))
+ rxrpc_notify_socket(call);
}
}
@@ -245,17 +235,18 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
/*
* mark a call as being on a now-secured channel
- * - must be called with softirqs disabled
+ * - must be called with BH's disabled.
*/
static void rxrpc_call_is_secure(struct rxrpc_call *call)
{
_enter("%p", call);
if (call) {
- read_lock(&call->state_lock);
- if (call->state < RXRPC_CALL_COMPLETE &&
- !test_and_set_bit(RXRPC_CALL_EV_SECURED, &call->events))
- rxrpc_queue_call(call);
- read_unlock(&call->state_lock);
+ write_lock_bh(&call->state_lock);
+ if (call->state == RXRPC_CALL_SERVER_SECURING) {
+ call->state = RXRPC_CALL_SERVER_ACCEPTING;
+ rxrpc_notify_socket(call);
+ }
+ write_unlock_bh(&call->state_lock);
}
}
@@ -272,7 +263,7 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
int loop, ret;
if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
- kleave(" = -ECONNABORTED [%u]", conn->state);
+ _leave(" = -ECONNABORTED [%u]", conn->state);
return -ECONNABORTED;
}
@@ -285,14 +276,14 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
return 0;
case RXRPC_PACKET_TYPE_ABORT:
- if (skb_copy_bits(skb, 0, &wtmp, sizeof(wtmp)) < 0)
+ if (skb_copy_bits(skb, sp->offset, &wtmp, sizeof(wtmp)) < 0)
return -EPROTO;
abort_code = ntohl(wtmp);
_proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code);
conn->state = RXRPC_CONN_REMOTELY_ABORTED;
- rxrpc_abort_calls(conn, 0, RXRPC_CALL_REMOTELY_ABORTED,
- abort_code);
+ rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED,
+ abort_code, ECONNABORTED);
return -ECONNABORTED;
case RXRPC_PACKET_TYPE_CHALLENGE:
@@ -317,14 +308,16 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) {
conn->state = RXRPC_CONN_SERVICE;
+ spin_unlock(&conn->state_lock);
for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
rxrpc_call_is_secure(
rcu_dereference_protected(
conn->channels[loop].call,
lockdep_is_held(&conn->channel_lock)));
+ } else {
+ spin_unlock(&conn->state_lock);
}
- spin_unlock(&conn->state_lock);
spin_unlock(&conn->channel_lock);
return 0;
@@ -387,7 +380,7 @@ void rxrpc_process_connection(struct work_struct *work)
u32 abort_code = RX_PROTOCOL_ERROR;
int ret;
- _enter("{%d}", conn->debug_id);
+ rxrpc_see_connection(conn);
if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
rxrpc_secure_connection(conn);
@@ -395,7 +388,7 @@ void rxrpc_process_connection(struct work_struct *work)
/* go through the conn-level event packets, releasing the ref on this
* connection that each one has when we've finished with it */
while ((skb = skb_dequeue(&conn->rx_queue))) {
- rxrpc_see_skb(skb);
+ rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
ret = rxrpc_process_event(conn, skb, &abort_code);
switch (ret) {
case -EPROTO:
@@ -406,7 +399,7 @@ void rxrpc_process_connection(struct work_struct *work)
goto requeue_and_leave;
case -ECONNABORTED:
default:
- rxrpc_free_skb(skb);
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
break;
}
}
@@ -423,92 +416,7 @@ requeue_and_leave:
protocol_error:
if (rxrpc_abort_connection(conn, -ret, abort_code) < 0)
goto requeue_and_leave;
- rxrpc_free_skb(skb);
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
_leave(" [EPROTO]");
goto out;
}
-
-/*
- * put a packet up for transport-level abort
- */
-void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
-{
- CHECK_SLAB_OKAY(&local->usage);
-
- skb_queue_tail(&local->reject_queue, skb);
- rxrpc_queue_local(local);
-}
-
-/*
- * reject packets through the local endpoint
- */
-void rxrpc_reject_packets(struct rxrpc_local *local)
-{
- union {
- struct sockaddr sa;
- struct sockaddr_in sin;
- } sa;
- struct rxrpc_skb_priv *sp;
- struct rxrpc_wire_header whdr;
- struct sk_buff *skb;
- struct msghdr msg;
- struct kvec iov[2];
- size_t size;
- __be32 code;
-
- _enter("%d", local->debug_id);
-
- iov[0].iov_base = &whdr;
- iov[0].iov_len = sizeof(whdr);
- iov[1].iov_base = &code;
- iov[1].iov_len = sizeof(code);
- size = sizeof(whdr) + sizeof(code);
-
- msg.msg_name = &sa;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = 0;
-
- memset(&sa, 0, sizeof(sa));
- sa.sa.sa_family = local->srx.transport.family;
- switch (sa.sa.sa_family) {
- case AF_INET:
- msg.msg_namelen = sizeof(sa.sin);
- break;
- default:
- msg.msg_namelen = 0;
- break;
- }
-
- memset(&whdr, 0, sizeof(whdr));
- whdr.type = RXRPC_PACKET_TYPE_ABORT;
-
- while ((skb = skb_dequeue(&local->reject_queue))) {
- rxrpc_see_skb(skb);
- sp = rxrpc_skb(skb);
- switch (sa.sa.sa_family) {
- case AF_INET:
- sa.sin.sin_port = udp_hdr(skb)->source;
- sa.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
- code = htonl(skb->priority);
-
- whdr.epoch = htonl(sp->hdr.epoch);
- whdr.cid = htonl(sp->hdr.cid);
- whdr.callNumber = htonl(sp->hdr.callNumber);
- whdr.serviceId = htons(sp->hdr.serviceId);
- whdr.flags = sp->hdr.flags;
- whdr.flags ^= RXRPC_CLIENT_INITIATED;
- whdr.flags &= RXRPC_CLIENT_INITIATED;
-
- kernel_sendmsg(local->socket, &msg, iov, 2, size);
- break;
-
- default:
- break;
- }
-
- rxrpc_free_skb(skb);
- }
-
- _leave("");
-}
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 9c6685b97e70..e1e83af47866 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -53,7 +53,6 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
spin_lock_init(&conn->state_lock);
conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
conn->size_align = 4;
- conn->header_size = sizeof(struct rxrpc_wire_header);
conn->idle_timestamp = jiffies;
}
@@ -134,6 +133,16 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
srx.transport.sin.sin_addr.s_addr)
goto not_found;
break;
+#ifdef CONFIG_AF_RXRPC_IPV6
+ case AF_INET6:
+ if (peer->srx.transport.sin6.sin6_port !=
+ srx.transport.sin6.sin6_port ||
+ memcmp(&peer->srx.transport.sin6.sin6_addr,
+ &srx.transport.sin6.sin6_addr,
+ sizeof(struct in6_addr)) != 0)
+ goto not_found;
+ break;
+#endif
default:
BUG();
}
@@ -169,7 +178,7 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
chan->last_abort = call->abort_code;
chan->last_type = RXRPC_PACKET_TYPE_ABORT;
} else {
- chan->last_seq = call->rx_data_eaten;
+ chan->last_seq = call->rx_hard_ack;
chan->last_type = RXRPC_PACKET_TYPE_ACK;
}
/* Sync with rxrpc_conn_retransmit(). */
@@ -191,6 +200,10 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
{
struct rxrpc_connection *conn = call->conn;
+ spin_lock_bh(&conn->params.peer->lock);
+ hlist_del_init(&call->error_link);
+ spin_unlock_bh(&conn->params.peer->lock);
+
if (rxrpc_is_client_call(call))
return rxrpc_disconnect_client_call(call);
@@ -232,11 +245,77 @@ void rxrpc_kill_connection(struct rxrpc_connection *conn)
}
/*
- * release a virtual connection
+ * Queue a connection's work processor, getting a ref to pass to the work
+ * queue.
*/
-void __rxrpc_put_connection(struct rxrpc_connection *conn)
+bool rxrpc_queue_conn(struct rxrpc_connection *conn)
{
- rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
+ const void *here = __builtin_return_address(0);
+ int n = __atomic_add_unless(&conn->usage, 1, 0);
+ if (n == 0)
+ return false;
+ if (rxrpc_queue_work(&conn->processor))
+ trace_rxrpc_conn(conn, rxrpc_conn_queued, n + 1, here);
+ else
+ rxrpc_put_connection(conn);
+ return true;
+}
+
+/*
+ * Note the re-emergence of a connection.
+ */
+void rxrpc_see_connection(struct rxrpc_connection *conn)
+{
+ const void *here = __builtin_return_address(0);
+ if (conn) {
+ int n = atomic_read(&conn->usage);
+
+ trace_rxrpc_conn(conn, rxrpc_conn_seen, n, here);
+ }
+}
+
+/*
+ * Get a ref on a connection.
+ */
+void rxrpc_get_connection(struct rxrpc_connection *conn)
+{
+ const void *here = __builtin_return_address(0);
+ int n = atomic_inc_return(&conn->usage);
+
+ trace_rxrpc_conn(conn, rxrpc_conn_got, n, here);
+}
+
+/*
+ * Try to get a ref on a connection.
+ */
+struct rxrpc_connection *
+rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
+{
+ const void *here = __builtin_return_address(0);
+
+ if (conn) {
+ int n = __atomic_add_unless(&conn->usage, 1, 0);
+ if (n > 0)
+ trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here);
+ else
+ conn = NULL;
+ }
+ return conn;
+}
+
+/*
+ * Release a service connection
+ */
+void rxrpc_put_service_conn(struct rxrpc_connection *conn)
+{
+ const void *here = __builtin_return_address(0);
+ int n;
+
+ n = atomic_dec_return(&conn->usage);
+ trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
+ ASSERTCMP(n, >=, 0);
+ if (n == 0)
+ rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
}
/*
@@ -286,6 +365,8 @@ static void rxrpc_connection_reaper(struct work_struct *work)
ASSERTCMP(atomic_read(&conn->usage), >, 0);
if (likely(atomic_read(&conn->usage) > 1))
continue;
+ if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
+ continue;
idle_timestamp = READ_ONCE(conn->idle_timestamp);
_debug("reap CONN %d { u=%d,t=%ld }",
diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c
index 316a92107fee..eef551f40dc2 100644
--- a/net/rxrpc/conn_service.c
+++ b/net/rxrpc/conn_service.c
@@ -65,9 +65,8 @@ done:
* Insert a service connection into a peer's tree, thereby making it a target
* for incoming packets.
*/
-static struct rxrpc_connection *
-rxrpc_publish_service_conn(struct rxrpc_peer *peer,
- struct rxrpc_connection *conn)
+static void rxrpc_publish_service_conn(struct rxrpc_peer *peer,
+ struct rxrpc_connection *conn)
{
struct rxrpc_connection *cursor = NULL;
struct rxrpc_conn_proto k = conn->proto;
@@ -96,7 +95,7 @@ conn_published:
set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags);
write_sequnlock_bh(&peer->service_conn_lock);
_leave(" = %d [new]", conn->debug_id);
- return conn;
+ return;
found_extant_conn:
if (atomic_read(&cursor->usage) == 0)
@@ -119,106 +118,58 @@ replace_old_connection:
}
/*
- * get a record of an incoming connection
+ * Preallocate a service connection. The connection is placed on the proc and
+ * reap lists so that we don't have to get the lock from BH context.
*/
-struct rxrpc_connection *rxrpc_incoming_connection(struct rxrpc_local *local,
- struct sockaddr_rxrpc *srx,
- struct sk_buff *skb)
+struct rxrpc_connection *rxrpc_prealloc_service_connection(gfp_t gfp)
{
- struct rxrpc_connection *conn;
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- struct rxrpc_peer *peer;
- const char *new = "old";
-
- _enter("");
+ struct rxrpc_connection *conn = rxrpc_alloc_connection(gfp);
- peer = rxrpc_lookup_peer(local, srx, GFP_NOIO);
- if (!peer) {
- _debug("no peer");
- return ERR_PTR(-EBUSY);
- }
+ if (conn) {
+ /* We maintain an extra ref on the connection whilst it is on
+ * the rxrpc_connections list.
+ */
+ conn->state = RXRPC_CONN_SERVICE_PREALLOC;
+ atomic_set(&conn->usage, 2);
- ASSERT(sp->hdr.flags & RXRPC_CLIENT_INITIATED);
-
- rcu_read_lock();
- peer = rxrpc_lookup_peer_rcu(local, srx);
- if (peer) {
- conn = rxrpc_find_service_conn_rcu(peer, skb);
- if (conn) {
- if (sp->hdr.securityIndex != conn->security_ix)
- goto security_mismatch_rcu;
- if (rxrpc_get_connection_maybe(conn))
- goto found_extant_connection_rcu;
-
- /* The conn has expired but we can't remove it without
- * the appropriate lock, so we attempt to replace it
- * when we have a new candidate.
- */
- }
+ write_lock(&rxrpc_connection_lock);
+ list_add_tail(&conn->link, &rxrpc_connections);
+ list_add_tail(&conn->proc_link, &rxrpc_connection_proc_list);
+ write_unlock(&rxrpc_connection_lock);
- if (!rxrpc_get_peer_maybe(peer))
- peer = NULL;
+ trace_rxrpc_conn(conn, rxrpc_conn_new_service,
+ atomic_read(&conn->usage),
+ __builtin_return_address(0));
}
- rcu_read_unlock();
- if (!peer) {
- peer = rxrpc_lookup_peer(local, srx, GFP_NOIO);
- if (!peer)
- goto enomem;
- }
+ return conn;
+}
- /* We don't have a matching record yet. */
- conn = rxrpc_alloc_connection(GFP_NOIO);
- if (!conn)
- goto enomem_peer;
+/*
+ * Set up an incoming connection. This is called in BH context with the RCU
+ * read lock held.
+ */
+void rxrpc_new_incoming_connection(struct rxrpc_connection *conn,
+ struct sk_buff *skb)
+{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+ _enter("");
conn->proto.epoch = sp->hdr.epoch;
conn->proto.cid = sp->hdr.cid & RXRPC_CIDMASK;
- conn->params.local = local;
- conn->params.peer = peer;
conn->params.service_id = sp->hdr.serviceId;
conn->security_ix = sp->hdr.securityIndex;
conn->out_clientflag = 0;
- conn->state = RXRPC_CONN_SERVICE;
- if (conn->params.service_id)
+ if (conn->security_ix)
conn->state = RXRPC_CONN_SERVICE_UNSECURED;
-
- rxrpc_get_local(local);
-
- /* We maintain an extra ref on the connection whilst it is on
- * the rxrpc_connections list.
- */
- atomic_set(&conn->usage, 2);
-
- write_lock(&rxrpc_connection_lock);
- list_add_tail(&conn->link, &rxrpc_connections);
- list_add_tail(&conn->proc_link, &rxrpc_connection_proc_list);
- write_unlock(&rxrpc_connection_lock);
+ else
+ conn->state = RXRPC_CONN_SERVICE;
/* Make the connection a target for incoming packets. */
- rxrpc_publish_service_conn(peer, conn);
-
- new = "new";
-
-success:
- _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->proto.cid);
- _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
- return conn;
-
-found_extant_connection_rcu:
- rcu_read_unlock();
- goto success;
-
-security_mismatch_rcu:
- rcu_read_unlock();
- _leave(" = -EKEYREJECTED");
- return ERR_PTR(-EKEYREJECTED);
+ rxrpc_publish_service_conn(conn->params.peer, conn);
-enomem_peer:
- rxrpc_put_peer(peer);
-enomem:
- _leave(" = -ENOMEM");
- return ERR_PTR(-ENOMEM);
+ _net("CONNECTION new %d {%x}", conn->debug_id, conn->proto.cid);
}
/*
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 72f016cfaaf5..094720dd1eaf 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -1,6 +1,6 @@
/* RxRPC packet reception
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -27,545 +27,908 @@
#include <net/net_namespace.h>
#include "ar-internal.h"
+static void rxrpc_proto_abort(const char *why,
+ struct rxrpc_call *call, rxrpc_seq_t seq)
+{
+ if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, EBADMSG)) {
+ set_bit(RXRPC_CALL_EV_ABORT, &call->events);
+ rxrpc_queue_call(call);
+ }
+}
+
/*
- * queue a packet for recvmsg to pass to userspace
- * - the caller must hold a lock on call->lock
- * - must not be called with interrupts disabled (sk_filter() disables BH's)
- * - eats the packet whether successful or not
- * - there must be just one reference to the packet, which the caller passes to
- * this function
+ * Do TCP-style congestion management [RFC 5681].
*/
-int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
- bool force, bool terminal)
+static void rxrpc_congestion_management(struct rxrpc_call *call,
+ struct sk_buff *skb,
+ struct rxrpc_ack_summary *summary)
{
- struct rxrpc_skb_priv *sp;
- struct rxrpc_sock *rx = call->socket;
- struct sock *sk;
- int ret;
+ enum rxrpc_congest_change change = rxrpc_cong_no_change;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ unsigned int cumulative_acks = call->cong_cumul_acks;
+ unsigned int cwnd = call->cong_cwnd;
+ bool resend = false;
+
+ summary->flight_size =
+ (call->tx_top - call->tx_hard_ack) - summary->nr_acks;
+
+ if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) {
+ summary->retrans_timeo = true;
+ call->cong_ssthresh = max_t(unsigned int,
+ summary->flight_size / 2, 2);
+ cwnd = 1;
+ if (cwnd > call->cong_ssthresh &&
+ call->cong_mode == RXRPC_CALL_SLOW_START) {
+ call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
+ call->cong_tstamp = skb->tstamp;
+ cumulative_acks = 0;
+ }
+ }
- _enter(",,%d,%d", force, terminal);
+ cumulative_acks += summary->nr_new_acks;
+ cumulative_acks += summary->nr_rot_new_acks;
+ if (cumulative_acks > 255)
+ cumulative_acks = 255;
+
+ summary->mode = call->cong_mode;
+ summary->cwnd = call->cong_cwnd;
+ summary->ssthresh = call->cong_ssthresh;
+ summary->cumulative_acks = cumulative_acks;
+ summary->dup_acks = call->cong_dup_acks;
+
+ switch (call->cong_mode) {
+ case RXRPC_CALL_SLOW_START:
+ if (summary->nr_nacks > 0)
+ goto packet_loss_detected;
+ if (summary->cumulative_acks > 0)
+ cwnd += 1;
+ if (cwnd > call->cong_ssthresh) {
+ call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
+ call->cong_tstamp = skb->tstamp;
+ }
+ goto out;
- ASSERT(!irqs_disabled());
+ case RXRPC_CALL_CONGEST_AVOIDANCE:
+ if (summary->nr_nacks > 0)
+ goto packet_loss_detected;
- sp = rxrpc_skb(skb);
- ASSERTCMP(sp->call, ==, call);
-
- /* if we've already posted the terminal message for a call, then we
- * don't post any more */
- if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
- _debug("already terminated");
- ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE);
- rxrpc_free_skb(skb);
- return 0;
- }
-
- sk = &rx->sk;
-
- if (!force) {
- /* cast skb->rcvbuf to unsigned... It's pointless, but
- * reduces number of warnings when compiling with -W
- * --ANK */
-// ret = -ENOBUFS;
-// if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-// (unsigned int) sk->sk_rcvbuf)
-// goto out;
-
- ret = sk_filter(sk, skb);
- if (ret < 0)
+ /* We analyse the number of packets that get ACK'd per RTT
+ * period and increase the window if we managed to fill it.
+ */
+ if (call->peer->rtt_usage == 0)
goto out;
- }
+ if (ktime_before(skb->tstamp,
+ ktime_add_ns(call->cong_tstamp,
+ call->peer->rtt)))
+ goto out_no_clear_ca;
+ change = rxrpc_cong_rtt_window_end;
+ call->cong_tstamp = skb->tstamp;
+ if (cumulative_acks >= cwnd)
+ cwnd++;
+ goto out;
- spin_lock_bh(&sk->sk_receive_queue.lock);
- if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags) &&
- !test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- call->socket->sk.sk_state != RXRPC_CLOSE) {
- skb->destructor = rxrpc_packet_destructor;
- skb->dev = NULL;
- skb->sk = sk;
- atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+ case RXRPC_CALL_PACKET_LOSS:
+ if (summary->nr_nacks == 0)
+ goto resume_normality;
- if (terminal) {
- _debug("<<<< TERMINAL MESSAGE >>>>");
- set_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags);
+ if (summary->new_low_nack) {
+ change = rxrpc_cong_new_low_nack;
+ call->cong_dup_acks = 1;
+ if (call->cong_extra > 1)
+ call->cong_extra = 1;
+ goto send_extra_data;
}
- /* allow interception by a kernel service */
- if (skb->mark == RXRPC_SKB_MARK_NEW_CALL &&
- rx->notify_new_call) {
- spin_unlock_bh(&sk->sk_receive_queue.lock);
- skb_queue_tail(&call->knlrecv_queue, skb);
- rx->notify_new_call(&rx->sk);
- } else if (call->notify_rx) {
- spin_unlock_bh(&sk->sk_receive_queue.lock);
- skb_queue_tail(&call->knlrecv_queue, skb);
- call->notify_rx(&rx->sk, call, call->user_call_ID);
- } else {
- _net("post skb %p", skb);
- __skb_queue_tail(&sk->sk_receive_queue, skb);
- spin_unlock_bh(&sk->sk_receive_queue.lock);
+ call->cong_dup_acks++;
+ if (call->cong_dup_acks < 3)
+ goto send_extra_data;
+
+ change = rxrpc_cong_begin_retransmission;
+ call->cong_mode = RXRPC_CALL_FAST_RETRANSMIT;
+ call->cong_ssthresh = max_t(unsigned int,
+ summary->flight_size / 2, 2);
+ cwnd = call->cong_ssthresh + 3;
+ call->cong_extra = 0;
+ call->cong_dup_acks = 0;
+ resend = true;
+ goto out;
- if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk);
+ case RXRPC_CALL_FAST_RETRANSMIT:
+ if (!summary->new_low_nack) {
+ if (summary->nr_new_acks == 0)
+ cwnd += 1;
+ call->cong_dup_acks++;
+ if (call->cong_dup_acks == 2) {
+ change = rxrpc_cong_retransmit_again;
+ call->cong_dup_acks = 0;
+ resend = true;
+ }
+ } else {
+ change = rxrpc_cong_progress;
+ cwnd = call->cong_ssthresh;
+ if (summary->nr_nacks == 0)
+ goto resume_normality;
}
- skb = NULL;
- } else {
- spin_unlock_bh(&sk->sk_receive_queue.lock);
+ goto out;
+
+ default:
+ BUG();
+ goto out;
}
- ret = 0;
+resume_normality:
+ change = rxrpc_cong_cleared_nacks;
+ call->cong_dup_acks = 0;
+ call->cong_extra = 0;
+ call->cong_tstamp = skb->tstamp;
+ if (cwnd <= call->cong_ssthresh)
+ call->cong_mode = RXRPC_CALL_SLOW_START;
+ else
+ call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
out:
- rxrpc_free_skb(skb);
+ cumulative_acks = 0;
+out_no_clear_ca:
+ if (cwnd >= RXRPC_RXTX_BUFF_SIZE - 1)
+ cwnd = RXRPC_RXTX_BUFF_SIZE - 1;
+ call->cong_cwnd = cwnd;
+ call->cong_cumul_acks = cumulative_acks;
+ trace_rxrpc_congest(call, summary, sp->hdr.serial, change);
+ if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
+ rxrpc_queue_call(call);
+ return;
- _leave(" = %d", ret);
- return ret;
+packet_loss_detected:
+ change = rxrpc_cong_saw_nack;
+ call->cong_mode = RXRPC_CALL_PACKET_LOSS;
+ call->cong_dup_acks = 0;
+ goto send_extra_data;
+
+send_extra_data:
+ /* Send some previously unsent DATA if we have some to advance the ACK
+ * state.
+ */
+ if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
+ RXRPC_TX_ANNO_LAST ||
+ summary->nr_acks != call->tx_top - call->tx_hard_ack) {
+ call->cong_extra++;
+ wake_up(&call->waitq);
+ }
+ goto out_no_clear_ca;
}
/*
- * process a DATA packet, posting the packet to the appropriate queue
- * - eats the packet if successful
+ * Ping the other end to fill our RTT cache and to retrieve the rwind
+ * and MTU parameters.
*/
-static int rxrpc_fast_process_data(struct rxrpc_call *call,
- struct sk_buff *skb, u32 seq)
+static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb,
+ int skew)
{
- struct rxrpc_skb_priv *sp;
- bool terminal;
- int ret, ackbit, ack;
- u32 serial;
- u16 skew;
- u8 flags;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ ktime_t now = skb->tstamp;
- _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq);
+ if (call->peer->rtt_usage < 3 ||
+ ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial,
+ true, true,
+ rxrpc_propose_ack_ping_for_params);
+}
- sp = rxrpc_skb(skb);
- ASSERTCMP(sp->call, ==, NULL);
- flags = sp->hdr.flags;
- serial = sp->hdr.serial;
- skew = skb->priority;
+/*
+ * Apply a hard ACK by advancing the Tx window.
+ */
+static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
+ struct rxrpc_ack_summary *summary)
+{
+ struct sk_buff *skb, *list = NULL;
+ int ix;
+ u8 annotation;
+
+ if (call->acks_lowest_nak == call->tx_hard_ack) {
+ call->acks_lowest_nak = to;
+ } else if (before_eq(call->acks_lowest_nak, to)) {
+ summary->new_low_nack = true;
+ call->acks_lowest_nak = to;
+ }
spin_lock(&call->lock);
- if (call->state > RXRPC_CALL_COMPLETE)
- goto discard;
+ while (before(call->tx_hard_ack, to)) {
+ call->tx_hard_ack++;
+ ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK;
+ skb = call->rxtx_buffer[ix];
+ annotation = call->rxtx_annotations[ix];
+ rxrpc_see_skb(skb, rxrpc_skb_tx_rotated);
+ call->rxtx_buffer[ix] = NULL;
+ call->rxtx_annotations[ix] = 0;
+ skb->next = list;
+ list = skb;
+
+ if (annotation & RXRPC_TX_ANNO_LAST)
+ set_bit(RXRPC_CALL_TX_LAST, &call->flags);
+ if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK)
+ summary->nr_rot_new_acks++;
+ }
- ASSERTCMP(call->rx_data_expect, >=, call->rx_data_post);
- ASSERTCMP(call->rx_data_post, >=, call->rx_data_recv);
- ASSERTCMP(call->rx_data_recv, >=, call->rx_data_eaten);
+ spin_unlock(&call->lock);
- if (seq < call->rx_data_post) {
- _debug("dup #%u [-%u]", seq, call->rx_data_post);
- ack = RXRPC_ACK_DUPLICATE;
- ret = -ENOBUFS;
- goto discard_and_ack;
- }
+ trace_rxrpc_transmit(call, (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ?
+ rxrpc_transmit_rotate_last :
+ rxrpc_transmit_rotate));
+ wake_up(&call->waitq);
- /* we may already have the packet in the out of sequence queue */
- ackbit = seq - (call->rx_data_eaten + 1);
- ASSERTCMP(ackbit, >=, 0);
- if (__test_and_set_bit(ackbit, call->ackr_window)) {
- _debug("dup oos #%u [%u,%u]",
- seq, call->rx_data_eaten, call->rx_data_post);
- ack = RXRPC_ACK_DUPLICATE;
- goto discard_and_ack;
+ while (list) {
+ skb = list;
+ list = skb->next;
+ skb->next = NULL;
+ rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
}
+}
- if (seq >= call->ackr_win_top) {
- _debug("exceed #%u [%u]", seq, call->ackr_win_top);
- __clear_bit(ackbit, call->ackr_window);
- ack = RXRPC_ACK_EXCEEDS_WINDOW;
- goto discard_and_ack;
- }
+/*
+ * End the transmission phase of a call.
+ *
+ * This occurs when we get an ACKALL packet, the first DATA packet of a reply,
+ * or a final ACK packet.
+ */
+static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
+ const char *abort_why)
+{
- if (seq == call->rx_data_expect) {
- clear_bit(RXRPC_CALL_EXPECT_OOS, &call->flags);
- call->rx_data_expect++;
- } else if (seq > call->rx_data_expect) {
- _debug("oos #%u [%u]", seq, call->rx_data_expect);
- call->rx_data_expect = seq + 1;
- if (test_and_set_bit(RXRPC_CALL_EXPECT_OOS, &call->flags)) {
- ack = RXRPC_ACK_OUT_OF_SEQUENCE;
- goto enqueue_and_ack;
- }
- goto enqueue_packet;
- }
+ ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
- if (seq != call->rx_data_post) {
- _debug("ahead #%u [%u]", seq, call->rx_data_post);
- goto enqueue_packet;
- }
+ write_lock(&call->state_lock);
- if (test_bit(RXRPC_CALL_RCVD_LAST, &call->flags))
- goto protocol_error;
+ switch (call->state) {
+ case RXRPC_CALL_CLIENT_SEND_REQUEST:
+ case RXRPC_CALL_CLIENT_AWAIT_REPLY:
+ if (reply_begun)
+ call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
+ else
+ call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
+ break;
- /* if the packet need security things doing to it, then it goes down
- * the slow path */
- if (call->conn->security_ix)
- goto enqueue_packet;
+ case RXRPC_CALL_SERVER_AWAIT_ACK:
+ __rxrpc_call_completed(call);
+ rxrpc_notify_socket(call);
+ break;
- sp->call = call;
- rxrpc_get_call_for_skb(call, skb);
- terminal = ((flags & RXRPC_LAST_PACKET) &&
- !(flags & RXRPC_CLIENT_INITIATED));
- ret = rxrpc_queue_rcv_skb(call, skb, false, terminal);
- if (ret < 0) {
- if (ret == -ENOMEM || ret == -ENOBUFS) {
- __clear_bit(ackbit, call->ackr_window);
- ack = RXRPC_ACK_NOSPACE;
- goto discard_and_ack;
- }
- goto out;
+ default:
+ goto bad_state;
}
- skb = NULL;
- sp = NULL;
-
- _debug("post #%u", seq);
- ASSERTCMP(call->rx_data_post, ==, seq);
- call->rx_data_post++;
+ write_unlock(&call->state_lock);
+ if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) {
+ rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, 0, false, true,
+ rxrpc_propose_ack_client_tx_end);
+ trace_rxrpc_transmit(call, rxrpc_transmit_await_reply);
+ } else {
+ trace_rxrpc_transmit(call, rxrpc_transmit_end);
+ }
+ _leave(" = ok");
+ return true;
+
+bad_state:
+ write_unlock(&call->state_lock);
+ kdebug("end_tx %s", rxrpc_call_states[call->state]);
+ rxrpc_proto_abort(abort_why, call, call->tx_top);
+ return false;
+}
- if (flags & RXRPC_LAST_PACKET)
- set_bit(RXRPC_CALL_RCVD_LAST, &call->flags);
+/*
+ * Begin the reply reception phase of a call.
+ */
+static bool rxrpc_receiving_reply(struct rxrpc_call *call)
+{
+ struct rxrpc_ack_summary summary = { 0 };
+ rxrpc_seq_t top = READ_ONCE(call->tx_top);
+
+ if (call->ackr_reason) {
+ spin_lock_bh(&call->lock);
+ call->ackr_reason = 0;
+ call->resend_at = call->expire_at;
+ call->ack_at = call->expire_at;
+ spin_unlock_bh(&call->lock);
+ rxrpc_set_timer(call, rxrpc_timer_init_for_reply);
+ }
- /* if we've reached an out of sequence packet then we need to drain
- * that queue into the socket Rx queue now */
- if (call->rx_data_post == call->rx_first_oos) {
- _debug("drain rx oos now");
- read_lock(&call->state_lock);
- if (call->state < RXRPC_CALL_COMPLETE &&
- !test_and_set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events))
- rxrpc_queue_call(call);
- read_unlock(&call->state_lock);
+ if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
+ rxrpc_rotate_tx_window(call, top, &summary);
+ if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
+ rxrpc_proto_abort("TXL", call, top);
+ return false;
}
+ if (!rxrpc_end_tx_phase(call, true, "ETD"))
+ return false;
+ call->tx_phase = false;
+ return true;
+}
- spin_unlock(&call->lock);
- atomic_inc(&call->ackr_not_idle);
- rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, skew, serial, false);
- _leave(" = 0 [posted]");
- return 0;
+/*
+ * Scan a jumbo packet to validate its structure and to work out how many
+ * subpackets it contains.
+ *
+ * A jumbo packet is a collection of consecutive packets glued together with
+ * little headers between that indicate how to change the initial header for
+ * each subpacket.
+ *
+ * RXRPC_JUMBO_PACKET must be set on all but the last subpacket - and all but
+ * the last are RXRPC_JUMBO_DATALEN in size. The last subpacket may be of any
+ * size.
+ */
+static bool rxrpc_validate_jumbo(struct sk_buff *skb)
+{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ unsigned int offset = sp->offset;
+ unsigned int len = skb->len;
+ int nr_jumbo = 1;
+ u8 flags = sp->hdr.flags;
-protocol_error:
- ret = -EBADMSG;
-out:
- spin_unlock(&call->lock);
- _leave(" = %d", ret);
- return ret;
+ do {
+ nr_jumbo++;
+ if (len - offset < RXRPC_JUMBO_SUBPKTLEN)
+ goto protocol_error;
+ if (flags & RXRPC_LAST_PACKET)
+ goto protocol_error;
+ offset += RXRPC_JUMBO_DATALEN;
+ if (skb_copy_bits(skb, offset, &flags, 1) < 0)
+ goto protocol_error;
+ offset += sizeof(struct rxrpc_jumbo_header);
+ } while (flags & RXRPC_JUMBO_PACKET);
-discard_and_ack:
- _debug("discard and ACK packet %p", skb);
- __rxrpc_propose_ACK(call, ack, skew, serial, true);
-discard:
- spin_unlock(&call->lock);
- rxrpc_free_skb(skb);
- _leave(" = 0 [discarded]");
- return 0;
+ sp->nr_jumbo = nr_jumbo;
+ return true;
-enqueue_and_ack:
- __rxrpc_propose_ACK(call, ack, skew, serial, true);
-enqueue_packet:
- _net("defer skb %p", skb);
- spin_unlock(&call->lock);
- skb_queue_tail(&call->rx_queue, skb);
- atomic_inc(&call->ackr_not_idle);
- read_lock(&call->state_lock);
- if (call->state < RXRPC_CALL_DEAD)
- rxrpc_queue_call(call);
- read_unlock(&call->state_lock);
- _leave(" = 0 [queued]");
- return 0;
+protocol_error:
+ return false;
}
/*
- * assume an implicit ACKALL of the transmission phase of a client socket upon
- * reception of the first reply packet
+ * Handle reception of a duplicate packet.
+ *
+ * We have to take care to avoid an attack here whereby we're given a series of
+ * jumbograms, each with a sequence number one before the preceding one and
+ * filled up to maximum UDP size. If they never send us the first packet in
+ * the sequence, they can cause us to have to hold on to around 2MiB of kernel
+ * space until the call times out.
+ *
+ * We limit the space usage by only accepting three duplicate jumbo packets per
+ * call. After that, we tell the other side we're no longer accepting jumbos
+ * (that information is encoded in the ACK packet).
*/
-static void rxrpc_assume_implicit_ackall(struct rxrpc_call *call, u32 serial)
+static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
+ u8 annotation, bool *_jumbo_bad)
{
- write_lock_bh(&call->state_lock);
-
- switch (call->state) {
- case RXRPC_CALL_CLIENT_AWAIT_REPLY:
- call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
- call->acks_latest = serial;
-
- _debug("implicit ACKALL %%%u", call->acks_latest);
- set_bit(RXRPC_CALL_EV_RCVD_ACKALL, &call->events);
- write_unlock_bh(&call->state_lock);
-
- if (try_to_del_timer_sync(&call->resend_timer) >= 0) {
- clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
- clear_bit(RXRPC_CALL_EV_RESEND, &call->events);
- clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
- }
- break;
+ /* Discard normal packets that are duplicates. */
+ if (annotation == 0)
+ return;
- default:
- write_unlock_bh(&call->state_lock);
- break;
+ /* Skip jumbo subpackets that are duplicates. When we've had three or
+ * more partially duplicate jumbo packets, we refuse to take any more
+ * jumbos for this call.
+ */
+ if (!*_jumbo_bad) {
+ call->nr_jumbo_bad++;
+ *_jumbo_bad = true;
}
}
/*
- * post an incoming packet to the nominated call to deal with
- * - must get rid of the sk_buff, either by freeing it or by queuing it
+ * Process a DATA packet, adding the packet to the Rx ring.
*/
-void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
+static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
+ u16 skew)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- __be32 wtmp;
- u32 abort_code;
+ unsigned int offset = sp->offset;
+ unsigned int ix;
+ rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
+ rxrpc_seq_t seq = sp->hdr.seq, hard_ack;
+ bool immediate_ack = false, jumbo_bad = false, queued;
+ u16 len;
+ u8 ack = 0, flags, annotation = 0;
- _enter("%p,%p", call, skb);
+ _enter("{%u,%u},{%u,%u}",
+ call->rx_hard_ack, call->rx_top, skb->len, seq);
- ASSERT(!irqs_disabled());
+ _proto("Rx DATA %%%u { #%u f=%02x }",
+ sp->hdr.serial, seq, sp->hdr.flags);
-#if 0 // INJECT RX ERROR
- if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) {
- static int skip = 0;
- if (++skip == 3) {
- printk("DROPPED 3RD PACKET!!!!!!!!!!!!!\n");
- skip = 0;
- goto free_packet;
- }
+ if (call->state >= RXRPC_CALL_COMPLETE)
+ return;
+
+ /* Received data implicitly ACKs all of the request packets we sent
+ * when we're acting as a client.
+ */
+ if ((call->state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
+ call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
+ !rxrpc_receiving_reply(call))
+ return;
+
+ call->ackr_prev_seq = seq;
+
+ hard_ack = READ_ONCE(call->rx_hard_ack);
+ if (after(seq, hard_ack + call->rx_winsize)) {
+ ack = RXRPC_ACK_EXCEEDS_WINDOW;
+ ack_serial = serial;
+ goto ack;
}
-#endif
- /* request ACK generation for any ACK or DATA packet that requests
- * it */
- if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
- _proto("ACK Requested on %%%u", sp->hdr.serial);
- rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
- skb->priority, sp->hdr.serial, false);
+ flags = sp->hdr.flags;
+ if (flags & RXRPC_JUMBO_PACKET) {
+ if (call->nr_jumbo_bad > 3) {
+ ack = RXRPC_ACK_NOSPACE;
+ ack_serial = serial;
+ goto ack;
+ }
+ annotation = 1;
}
- switch (sp->hdr.type) {
- case RXRPC_PACKET_TYPE_ABORT:
- _debug("abort");
+next_subpacket:
+ queued = false;
+ ix = seq & RXRPC_RXTX_BUFF_MASK;
+ len = skb->len;
+ if (flags & RXRPC_JUMBO_PACKET)
+ len = RXRPC_JUMBO_DATALEN;
+
+ if (flags & RXRPC_LAST_PACKET) {
+ if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
+ seq != call->rx_top)
+ return rxrpc_proto_abort("LSN", call, seq);
+ } else {
+ if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
+ after_eq(seq, call->rx_top))
+ return rxrpc_proto_abort("LSA", call, seq);
+ }
- if (skb_copy_bits(skb, 0, &wtmp, sizeof(wtmp)) < 0)
- goto protocol_error;
+ if (before_eq(seq, hard_ack)) {
+ ack = RXRPC_ACK_DUPLICATE;
+ ack_serial = serial;
+ goto skip;
+ }
- abort_code = ntohl(wtmp);
- _proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code);
+ if (flags & RXRPC_REQUEST_ACK && !ack) {
+ ack = RXRPC_ACK_REQUESTED;
+ ack_serial = serial;
+ }
- if (__rxrpc_set_call_completion(call,
- RXRPC_CALL_REMOTELY_ABORTED,
- abort_code, ECONNABORTED)) {
- set_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
- rxrpc_queue_call(call);
+ if (call->rxtx_buffer[ix]) {
+ rxrpc_input_dup_data(call, seq, annotation, &jumbo_bad);
+ if (ack != RXRPC_ACK_DUPLICATE) {
+ ack = RXRPC_ACK_DUPLICATE;
+ ack_serial = serial;
}
- goto free_packet;
+ immediate_ack = true;
+ goto skip;
+ }
- case RXRPC_PACKET_TYPE_BUSY:
- _proto("Rx BUSY %%%u", sp->hdr.serial);
+ /* Queue the packet. We use a couple of memory barriers here as need
+ * to make sure that rx_top is perceived to be set after the buffer
+ * pointer and that the buffer pointer is set after the annotation and
+ * the skb data.
+ *
+ * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window()
+ * and also rxrpc_fill_out_ack().
+ */
+ rxrpc_get_skb(skb, rxrpc_skb_rx_got);
+ call->rxtx_annotations[ix] = annotation;
+ smp_wmb();
+ call->rxtx_buffer[ix] = skb;
+ if (after(seq, call->rx_top)) {
+ smp_store_release(&call->rx_top, seq);
+ } else if (before(seq, call->rx_top)) {
+ /* Send an immediate ACK if we fill in a hole */
+ if (!ack) {
+ ack = RXRPC_ACK_DELAY;
+ ack_serial = serial;
+ }
+ immediate_ack = true;
+ }
+ if (flags & RXRPC_LAST_PACKET) {
+ set_bit(RXRPC_CALL_RX_LAST, &call->flags);
+ trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq);
+ } else {
+ trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq);
+ }
+ queued = true;
- if (rxrpc_conn_is_service(call->conn))
- goto protocol_error;
+ if (after_eq(seq, call->rx_expect_next)) {
+ if (after(seq, call->rx_expect_next)) {
+ _net("OOS %u > %u", seq, call->rx_expect_next);
+ ack = RXRPC_ACK_OUT_OF_SEQUENCE;
+ ack_serial = serial;
+ }
+ call->rx_expect_next = seq + 1;
+ }
- write_lock_bh(&call->state_lock);
- switch (call->state) {
- case RXRPC_CALL_CLIENT_SEND_REQUEST:
- __rxrpc_set_call_completion(call,
- RXRPC_CALL_SERVER_BUSY,
- 0, EBUSY);
- set_bit(RXRPC_CALL_EV_RCVD_BUSY, &call->events);
- rxrpc_queue_call(call);
- case RXRPC_CALL_SERVER_BUSY:
- goto free_packet_unlock;
- default:
- goto protocol_error_locked;
+skip:
+ offset += len;
+ if (flags & RXRPC_JUMBO_PACKET) {
+ if (skb_copy_bits(skb, offset, &flags, 1) < 0)
+ return rxrpc_proto_abort("XJF", call, seq);
+ offset += sizeof(struct rxrpc_jumbo_header);
+ seq++;
+ serial++;
+ annotation++;
+ if (flags & RXRPC_JUMBO_PACKET)
+ annotation |= RXRPC_RX_ANNO_JLAST;
+ if (after(seq, hard_ack + call->rx_winsize)) {
+ ack = RXRPC_ACK_EXCEEDS_WINDOW;
+ ack_serial = serial;
+ if (!jumbo_bad) {
+ call->nr_jumbo_bad++;
+ jumbo_bad = true;
+ }
+ goto ack;
}
- default:
- _proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], sp->hdr.serial);
- goto protocol_error;
+ _proto("Rx DATA Jumbo %%%u", serial);
+ goto next_subpacket;
+ }
- case RXRPC_PACKET_TYPE_DATA:
- _proto("Rx DATA %%%u { #%u }", sp->hdr.serial, sp->hdr.seq);
+ if (queued && flags & RXRPC_LAST_PACKET && !ack) {
+ ack = RXRPC_ACK_DELAY;
+ ack_serial = serial;
+ }
- if (sp->hdr.seq == 0)
- goto protocol_error;
+ack:
+ if (ack)
+ rxrpc_propose_ACK(call, ack, skew, ack_serial,
+ immediate_ack, true,
+ rxrpc_propose_ack_input_data);
+
+ if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1)
+ rxrpc_notify_socket(call);
+ _leave(" [queued]");
+}
+
+/*
+ * Process a requested ACK.
+ */
+static void rxrpc_input_requested_ack(struct rxrpc_call *call,
+ ktime_t resp_time,
+ rxrpc_serial_t orig_serial,
+ rxrpc_serial_t ack_serial)
+{
+ struct rxrpc_skb_priv *sp;
+ struct sk_buff *skb;
+ ktime_t sent_at;
+ int ix;
+
+ for (ix = 0; ix < RXRPC_RXTX_BUFF_SIZE; ix++) {
+ skb = call->rxtx_buffer[ix];
+ if (!skb)
+ continue;
+
+ sp = rxrpc_skb(skb);
+ if (sp->hdr.serial != orig_serial)
+ continue;
+ smp_rmb();
+ sent_at = skb->tstamp;
+ goto found;
+ }
+ return;
- call->ackr_prev_seq = sp->hdr.seq;
+found:
+ rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_requested_ack,
+ orig_serial, ack_serial, sent_at, resp_time);
+}
- /* received data implicitly ACKs all of the request packets we
- * sent when we're acting as a client */
- if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
- rxrpc_assume_implicit_ackall(call, sp->hdr.serial);
+/*
+ * Process a ping response.
+ */
+static void rxrpc_input_ping_response(struct rxrpc_call *call,
+ ktime_t resp_time,
+ rxrpc_serial_t orig_serial,
+ rxrpc_serial_t ack_serial)
+{
+ rxrpc_serial_t ping_serial;
+ ktime_t ping_time;
- switch (rxrpc_fast_process_data(call, skb, sp->hdr.seq)) {
- case 0:
- skb = NULL;
- goto done;
+ ping_time = call->ackr_ping_time;
+ smp_rmb();
+ ping_serial = call->ackr_ping;
- default:
- BUG();
+ if (!test_bit(RXRPC_CALL_PINGING, &call->flags) ||
+ before(orig_serial, ping_serial))
+ return;
+ clear_bit(RXRPC_CALL_PINGING, &call->flags);
+ if (after(orig_serial, ping_serial))
+ return;
- /* data packet received beyond the last packet */
- case -EBADMSG:
- goto protocol_error;
- }
+ rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_ping_response,
+ orig_serial, ack_serial, ping_time, resp_time);
+}
- case RXRPC_PACKET_TYPE_ACKALL:
- case RXRPC_PACKET_TYPE_ACK:
- /* ACK processing is done in process context */
- read_lock_bh(&call->state_lock);
- if (call->state < RXRPC_CALL_DEAD) {
- skb_queue_tail(&call->rx_queue, skb);
- rxrpc_queue_call(call);
- skb = NULL;
- }
- read_unlock_bh(&call->state_lock);
- goto free_packet;
+/*
+ * Process the extra information that may be appended to an ACK packet
+ */
+static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
+ struct rxrpc_ackinfo *ackinfo)
+{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ struct rxrpc_peer *peer;
+ unsigned int mtu;
+ u32 rwind = ntohl(ackinfo->rwind);
+
+ _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
+ sp->hdr.serial,
+ ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
+ rwind, ntohl(ackinfo->jumbo_max));
+
+ if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
+ rwind = RXRPC_RXTX_BUFF_SIZE - 1;
+ call->tx_winsize = rwind;
+
+ mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU));
+
+ peer = call->peer;
+ if (mtu < peer->maxdata) {
+ spin_lock_bh(&peer->lock);
+ peer->maxdata = mtu;
+ peer->mtu = mtu + peer->hdrsize;
+ spin_unlock_bh(&peer->lock);
+ _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
}
+}
-protocol_error:
- _debug("protocol error");
- write_lock_bh(&call->state_lock);
-protocol_error_locked:
- if (__rxrpc_abort_call(call, RX_PROTOCOL_ERROR, EPROTO))
- rxrpc_queue_call(call);
-free_packet_unlock:
- write_unlock_bh(&call->state_lock);
-free_packet:
- rxrpc_free_skb(skb);
-done:
- _leave("");
+/*
+ * Process individual soft ACKs.
+ *
+ * Each ACK in the array corresponds to one packet and can be either an ACK or
+ * a NAK. If we get find an explicitly NAK'd packet we resend immediately;
+ * packets that lie beyond the end of the ACK list are scheduled for resend by
+ * the timer on the basis that the peer might just not have processed them at
+ * the time the ACK was sent.
+ */
+static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
+ rxrpc_seq_t seq, int nr_acks,
+ struct rxrpc_ack_summary *summary)
+{
+ int ix;
+ u8 annotation, anno_type;
+
+ for (; nr_acks > 0; nr_acks--, seq++) {
+ ix = seq & RXRPC_RXTX_BUFF_MASK;
+ annotation = call->rxtx_annotations[ix];
+ anno_type = annotation & RXRPC_TX_ANNO_MASK;
+ annotation &= ~RXRPC_TX_ANNO_MASK;
+ switch (*acks++) {
+ case RXRPC_ACK_TYPE_ACK:
+ summary->nr_acks++;
+ if (anno_type == RXRPC_TX_ANNO_ACK)
+ continue;
+ summary->nr_new_acks++;
+ call->rxtx_annotations[ix] =
+ RXRPC_TX_ANNO_ACK | annotation;
+ break;
+ case RXRPC_ACK_TYPE_NACK:
+ if (!summary->nr_nacks &&
+ call->acks_lowest_nak != seq) {
+ call->acks_lowest_nak = seq;
+ summary->new_low_nack = true;
+ }
+ summary->nr_nacks++;
+ if (anno_type == RXRPC_TX_ANNO_NAK)
+ continue;
+ summary->nr_new_nacks++;
+ if (anno_type == RXRPC_TX_ANNO_RETRANS)
+ continue;
+ call->rxtx_annotations[ix] =
+ RXRPC_TX_ANNO_NAK | annotation;
+ break;
+ default:
+ return rxrpc_proto_abort("SFT", call, 0);
+ }
+ }
}
/*
- * split up a jumbo data packet
+ * Process an ACK packet.
+ *
+ * ack.firstPacket is the sequence number of the first soft-ACK'd/NAK'd packet
+ * in the ACK array. Anything before that is hard-ACK'd and may be discarded.
+ *
+ * A hard-ACK means that a packet has been processed and may be discarded; a
+ * soft-ACK means that the packet may be discarded and retransmission
+ * requested. A phase is complete when all packets are hard-ACK'd.
*/
-static void rxrpc_process_jumbo_packet(struct rxrpc_call *call,
- struct sk_buff *jumbo)
+static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
+ u16 skew)
{
- struct rxrpc_jumbo_header jhdr;
- struct rxrpc_skb_priv *sp;
- struct sk_buff *part;
+ struct rxrpc_ack_summary summary = { 0 };
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ union {
+ struct rxrpc_ackpacket ack;
+ struct rxrpc_ackinfo info;
+ u8 acks[RXRPC_MAXACKS];
+ } buf;
+ rxrpc_serial_t acked_serial;
+ rxrpc_seq_t first_soft_ack, hard_ack;
+ int nr_acks, offset;
+
+ _enter("");
+
+ if (skb_copy_bits(skb, sp->offset, &buf.ack, sizeof(buf.ack)) < 0) {
+ _debug("extraction failure");
+ return rxrpc_proto_abort("XAK", call, 0);
+ }
+ sp->offset += sizeof(buf.ack);
+
+ acked_serial = ntohl(buf.ack.serial);
+ first_soft_ack = ntohl(buf.ack.firstPacket);
+ hard_ack = first_soft_ack - 1;
+ nr_acks = buf.ack.nAcks;
+ summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ?
+ buf.ack.reason : RXRPC_ACK__INVALID);
+
+ trace_rxrpc_rx_ack(call, first_soft_ack, summary.ack_reason, nr_acks);
+
+ _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
+ sp->hdr.serial,
+ ntohs(buf.ack.maxSkew),
+ first_soft_ack,
+ ntohl(buf.ack.previousPacket),
+ acked_serial,
+ rxrpc_ack_names[summary.ack_reason],
+ buf.ack.nAcks);
+
+ if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE)
+ rxrpc_input_ping_response(call, skb->tstamp, acked_serial,
+ sp->hdr.serial);
+ if (buf.ack.reason == RXRPC_ACK_REQUESTED)
+ rxrpc_input_requested_ack(call, skb->tstamp, acked_serial,
+ sp->hdr.serial);
+
+ if (buf.ack.reason == RXRPC_ACK_PING) {
+ _proto("Rx ACK %%%u PING Request", sp->hdr.serial);
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
+ skew, sp->hdr.serial, true, true,
+ rxrpc_propose_ack_respond_to_ping);
+ } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
+ rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
+ skew, sp->hdr.serial, true, true,
+ rxrpc_propose_ack_respond_to_ack);
+ }
- _enter(",{%u,%u}", jumbo->data_len, jumbo->len);
+ offset = sp->offset + nr_acks + 3;
+ if (skb->len >= offset + sizeof(buf.info)) {
+ if (skb_copy_bits(skb, offset, &buf.info, sizeof(buf.info)) < 0)
+ return rxrpc_proto_abort("XAI", call, 0);
+ rxrpc_input_ackinfo(call, skb, &buf.info);
+ }
- sp = rxrpc_skb(jumbo);
+ if (first_soft_ack == 0)
+ return rxrpc_proto_abort("AK0", call, 0);
- do {
- sp->hdr.flags &= ~RXRPC_JUMBO_PACKET;
-
- /* make a clone to represent the first subpacket in what's left
- * of the jumbo packet */
- part = skb_clone(jumbo, GFP_ATOMIC);
- if (!part) {
- /* simply ditch the tail in the event of ENOMEM */
- pskb_trim(jumbo, RXRPC_JUMBO_DATALEN);
- break;
- }
- rxrpc_new_skb(part);
+ /* Ignore ACKs unless we are or have just been transmitting. */
+ switch (call->state) {
+ case RXRPC_CALL_CLIENT_SEND_REQUEST:
+ case RXRPC_CALL_CLIENT_AWAIT_REPLY:
+ case RXRPC_CALL_SERVER_SEND_REPLY:
+ case RXRPC_CALL_SERVER_AWAIT_ACK:
+ break;
+ default:
+ return;
+ }
- pskb_trim(part, RXRPC_JUMBO_DATALEN);
+ /* Discard any out-of-order or duplicate ACKs. */
+ if (before_eq(sp->hdr.serial, call->acks_latest)) {
+ _debug("discard ACK %d <= %d",
+ sp->hdr.serial, call->acks_latest);
+ return;
+ }
+ call->acks_latest_ts = skb->tstamp;
+ call->acks_latest = sp->hdr.serial;
+
+ if (before(hard_ack, call->tx_hard_ack) ||
+ after(hard_ack, call->tx_top))
+ return rxrpc_proto_abort("AKW", call, 0);
+ if (nr_acks > call->tx_top - hard_ack)
+ return rxrpc_proto_abort("AKN", call, 0);
+
+ if (after(hard_ack, call->tx_hard_ack))
+ rxrpc_rotate_tx_window(call, hard_ack, &summary);
+
+ if (nr_acks > 0) {
+ if (skb_copy_bits(skb, sp->offset, buf.acks, nr_acks) < 0)
+ return rxrpc_proto_abort("XSA", call, 0);
+ rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks,
+ &summary);
+ }
- if (!pskb_pull(jumbo, RXRPC_JUMBO_DATALEN))
- goto protocol_error;
+ if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
+ rxrpc_end_tx_phase(call, false, "ETA");
+ return;
+ }
- if (skb_copy_bits(jumbo, 0, &jhdr, sizeof(jhdr)) < 0)
- goto protocol_error;
- if (!pskb_pull(jumbo, sizeof(jhdr)))
- BUG();
+ if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
+ RXRPC_TX_ANNO_LAST &&
+ summary.nr_acks == call->tx_top - hard_ack)
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial,
+ false, true,
+ rxrpc_propose_ack_ping_for_lost_reply);
- sp->hdr.seq += 1;
- sp->hdr.serial += 1;
- sp->hdr.flags = jhdr.flags;
- sp->hdr._rsvd = ntohs(jhdr._rsvd);
+ return rxrpc_congestion_management(call, skb, &summary);
+}
- _proto("Rx DATA Jumbo %%%u", sp->hdr.serial - 1);
+/*
+ * Process an ACKALL packet.
+ */
+static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
+{
+ struct rxrpc_ack_summary summary = { 0 };
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- rxrpc_fast_process_packet(call, part);
- part = NULL;
+ _proto("Rx ACKALL %%%u", sp->hdr.serial);
- } while (sp->hdr.flags & RXRPC_JUMBO_PACKET);
+ rxrpc_rotate_tx_window(call, call->tx_top, &summary);
+ if (test_bit(RXRPC_CALL_TX_LAST, &call->flags))
+ rxrpc_end_tx_phase(call, false, "ETL");
+}
- rxrpc_fast_process_packet(call, jumbo);
- _leave("");
- return;
+/*
+ * Process an ABORT packet.
+ */
+static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
+{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ __be32 wtmp;
+ u32 abort_code = RX_CALL_DEAD;
-protocol_error:
- _debug("protocol error");
- rxrpc_free_skb(part);
- rxrpc_free_skb(jumbo);
- if (rxrpc_abort_call(call, RX_PROTOCOL_ERROR, EPROTO))
- rxrpc_queue_call(call);
- _leave("");
+ _enter("");
+
+ if (skb->len >= 4 &&
+ skb_copy_bits(skb, sp->offset, &wtmp, sizeof(wtmp)) >= 0)
+ abort_code = ntohl(wtmp);
+
+ _proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code);
+
+ if (rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
+ abort_code, ECONNABORTED))
+ rxrpc_notify_socket(call);
}
/*
- * post an incoming packet to the appropriate call/socket to deal with
- * - must get rid of the sk_buff, either by freeing it or by queuing it
+ * Process an incoming call packet.
*/
-static void rxrpc_post_packet_to_call(struct rxrpc_call *call,
- struct sk_buff *skb)
+static void rxrpc_input_call_packet(struct rxrpc_call *call,
+ struct sk_buff *skb, u16 skew)
{
- struct rxrpc_skb_priv *sp;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
_enter("%p,%p", call, skb);
- sp = rxrpc_skb(skb);
+ switch (sp->hdr.type) {
+ case RXRPC_PACKET_TYPE_DATA:
+ rxrpc_input_data(call, skb, skew);
+ break;
- _debug("extant call [%d]", call->state);
+ case RXRPC_PACKET_TYPE_ACK:
+ rxrpc_input_ack(call, skb, skew);
+ break;
- read_lock(&call->state_lock);
- switch (call->state) {
- case RXRPC_CALL_DEAD:
- goto dead_call;
-
- case RXRPC_CALL_COMPLETE:
- switch (call->completion) {
- case RXRPC_CALL_LOCALLY_ABORTED:
- if (!test_and_set_bit(RXRPC_CALL_EV_ABORT,
- &call->events)) {
- rxrpc_queue_call(call);
- goto free_unlock;
- }
- default:
- goto dead_call;
- case RXRPC_CALL_SUCCEEDED:
- if (rxrpc_conn_is_service(call->conn))
- goto dead_call;
- goto resend_final_ack;
- }
+ case RXRPC_PACKET_TYPE_BUSY:
+ _proto("Rx BUSY %%%u", sp->hdr.serial);
- case RXRPC_CALL_CLIENT_FINAL_ACK:
- goto resend_final_ack;
+ /* Just ignore BUSY packets from the server; the retry and
+ * lifespan timers will take care of business. BUSY packets
+ * from the client don't make sense.
+ */
+ break;
+
+ case RXRPC_PACKET_TYPE_ABORT:
+ rxrpc_input_abort(call, skb);
+ break;
+
+ case RXRPC_PACKET_TYPE_ACKALL:
+ rxrpc_input_ackall(call, skb);
+ break;
default:
+ _proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], sp->hdr.serial);
break;
}
- read_unlock(&call->state_lock);
- rxrpc_get_call(call);
-
- if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
- sp->hdr.flags & RXRPC_JUMBO_PACKET)
- rxrpc_process_jumbo_packet(call, skb);
- else
- rxrpc_fast_process_packet(call, skb);
-
- rxrpc_put_call(call);
- goto done;
-
-resend_final_ack:
- _debug("final ack again");
- rxrpc_get_call(call);
- set_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events);
- rxrpc_queue_call(call);
- goto free_unlock;
-
-dead_call:
- if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) {
- skb->priority = RX_CALL_DEAD;
- rxrpc_reject_packet(call->conn->params.local, skb);
- goto unlock;
- }
-free_unlock:
- rxrpc_free_skb(skb);
-unlock:
- read_unlock(&call->state_lock);
-done:
_leave("");
}
@@ -597,6 +960,17 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
}
/*
+ * put a packet up for transport-level abort
+ */
+static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
+{
+ CHECK_SLAB_OKAY(&local->usage);
+
+ skb_queue_tail(&local->reject_queue, skb);
+ rxrpc_queue_local(local);
+}
+
+/*
* Extract the wire header from a packet and translate the byte order.
*/
static noinline
@@ -607,8 +981,6 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
/* dig out the RxRPC connection details */
if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0)
return -EBADMSG;
- if (!pskb_pull(skb, sizeof(whdr)))
- BUG();
memset(sp, 0, sizeof(*sp));
sp->hdr.epoch = ntohl(whdr.epoch);
@@ -622,6 +994,7 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
sp->hdr.securityIndex = whdr.securityIndex;
sp->hdr._rsvd = ntohs(whdr._rsvd);
sp->hdr.serviceId = ntohs(whdr.serviceId);
+ sp->offset = sizeof(whdr);
return 0;
}
@@ -633,19 +1006,22 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
* shut down and the local endpoint from going away, thus sk_user_data will not
* be cleared until this function returns.
*/
-void rxrpc_data_ready(struct sock *sk)
+void rxrpc_data_ready(struct sock *udp_sk)
{
struct rxrpc_connection *conn;
+ struct rxrpc_channel *chan;
+ struct rxrpc_call *call;
struct rxrpc_skb_priv *sp;
- struct rxrpc_local *local = sk->sk_user_data;
+ struct rxrpc_local *local = udp_sk->sk_user_data;
struct sk_buff *skb;
+ unsigned int channel;
int ret, skew;
- _enter("%p", sk);
+ _enter("%p", udp_sk);
ASSERT(!irqs_disabled());
- skb = skb_recv_datagram(sk, 0, 1, &ret);
+ skb = skb_recv_datagram(udp_sk, 0, 1, &ret);
if (!skb) {
if (ret == -EAGAIN)
return;
@@ -653,13 +1029,13 @@ void rxrpc_data_ready(struct sock *sk)
return;
}
- rxrpc_new_skb(skb);
+ rxrpc_new_skb(skb, rxrpc_skb_rx_received);
_net("recv skb %p", skb);
/* we'll probably need to checksum it (didn't call sock_recvmsg) */
if (skb_checksum_complete(skb)) {
- rxrpc_free_skb(skb);
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
__UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0);
_leave(" [CSUM failed]");
return;
@@ -673,13 +1049,21 @@ void rxrpc_data_ready(struct sock *sk)
skb_orphan(skb);
sp = rxrpc_skb(skb);
- _net("Rx UDP packet from %08x:%04hu",
- ntohl(ip_hdr(skb)->saddr), ntohs(udp_hdr(skb)->source));
-
/* dig out the RxRPC connection details */
if (rxrpc_extract_header(sp, skb) < 0)
goto bad_message;
+ if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
+ static int lose;
+ if ((lose++ & 7) == 7) {
+ trace_rxrpc_rx_lose(sp);
+ rxrpc_lose_skb(skb, rxrpc_skb_rx_lost);
+ return;
+ }
+ }
+
+ trace_rxrpc_rx_packet(sp);
+
_net("Rx RxRPC %s ep=%x call=%x:%x",
sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
sp->hdr.epoch, sp->hdr.cid, sp->hdr.callNumber);
@@ -690,110 +1074,125 @@ void rxrpc_data_ready(struct sock *sk)
goto bad_message;
}
- if (sp->hdr.type == RXRPC_PACKET_TYPE_VERSION) {
+ switch (sp->hdr.type) {
+ case RXRPC_PACKET_TYPE_VERSION:
rxrpc_post_packet_to_local(local, skb);
goto out;
- }
- if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
- (sp->hdr.callNumber == 0 || sp->hdr.seq == 0))
- goto bad_message;
+ case RXRPC_PACKET_TYPE_BUSY:
+ if (sp->hdr.flags & RXRPC_CLIENT_INITIATED)
+ goto discard;
+
+ case RXRPC_PACKET_TYPE_DATA:
+ if (sp->hdr.callNumber == 0)
+ goto bad_message;
+ if (sp->hdr.flags & RXRPC_JUMBO_PACKET &&
+ !rxrpc_validate_jumbo(skb))
+ goto bad_message;
+ break;
+ }
rcu_read_lock();
conn = rxrpc_find_connection_rcu(local, skb);
- if (!conn) {
- skb->priority = 0;
- goto cant_route_call;
- }
+ if (conn) {
+ if (sp->hdr.securityIndex != conn->security_ix)
+ goto wrong_security;
- /* Note the serial number skew here */
- skew = (int)sp->hdr.serial - (int)conn->hi_serial;
- if (skew >= 0) {
- if (skew > 0)
- conn->hi_serial = sp->hdr.serial;
- skb->priority = 0;
- } else {
- skew = -skew;
- skb->priority = min(skew, 65535);
- }
+ if (sp->hdr.callNumber == 0) {
+ /* Connection-level packet */
+ _debug("CONN %p {%d}", conn, conn->debug_id);
+ rxrpc_post_packet_to_conn(conn, skb);
+ goto out_unlock;
+ }
+
+ /* Note the serial number skew here */
+ skew = (int)sp->hdr.serial - (int)conn->hi_serial;
+ if (skew >= 0) {
+ if (skew > 0)
+ conn->hi_serial = sp->hdr.serial;
+ } else {
+ skew = -skew;
+ skew = min(skew, 65535);
+ }
- if (sp->hdr.callNumber == 0) {
- /* Connection-level packet */
- _debug("CONN %p {%d}", conn, conn->debug_id);
- rxrpc_post_packet_to_conn(conn, skb);
- goto out_unlock;
- } else {
/* Call-bound packets are routed by connection channel. */
- unsigned int channel = sp->hdr.cid & RXRPC_CHANNELMASK;
- struct rxrpc_channel *chan = &conn->channels[channel];
- struct rxrpc_call *call;
+ channel = sp->hdr.cid & RXRPC_CHANNELMASK;
+ chan = &conn->channels[channel];
/* Ignore really old calls */
if (sp->hdr.callNumber < chan->last_call)
goto discard_unlock;
if (sp->hdr.callNumber == chan->last_call) {
- /* For the previous service call, if completed
- * successfully, we discard all further packets.
+ /* For the previous service call, if completed successfully, we
+ * discard all further packets.
*/
if (rxrpc_conn_is_service(conn) &&
(chan->last_type == RXRPC_PACKET_TYPE_ACK ||
sp->hdr.type == RXRPC_PACKET_TYPE_ABORT))
goto discard_unlock;
- /* But otherwise we need to retransmit the final packet
- * from data cached in the connection record.
+ /* But otherwise we need to retransmit the final packet from
+ * data cached in the connection record.
*/
rxrpc_post_packet_to_conn(conn, skb);
goto out_unlock;
}
call = rcu_dereference(chan->call);
- if (!call || atomic_read(&call->usage) == 0)
- goto cant_route_call;
+ } else {
+ skew = 0;
+ call = NULL;
+ }
- rxrpc_see_call(call);
- rxrpc_post_packet_to_call(call, skb);
- goto out_unlock;
+ if (!call || atomic_read(&call->usage) == 0) {
+ if (!(sp->hdr.type & RXRPC_CLIENT_INITIATED) ||
+ sp->hdr.callNumber == 0 ||
+ sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
+ goto bad_message_unlock;
+ if (sp->hdr.seq != 1)
+ goto discard_unlock;
+ call = rxrpc_new_incoming_call(local, conn, skb);
+ if (!call) {
+ rcu_read_unlock();
+ goto reject_packet;
+ }
+ rxrpc_send_ping(call, skb, skew);
}
+ rxrpc_input_call_packet(call, skb, skew);
+ goto discard_unlock;
+
discard_unlock:
- rxrpc_free_skb(skb);
-out_unlock:
rcu_read_unlock();
+discard:
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
out:
+ trace_rxrpc_rx_done(0, 0);
return;
-cant_route_call:
+out_unlock:
rcu_read_unlock();
+ goto out;
- _debug("can't route call");
- if (sp->hdr.flags & RXRPC_CLIENT_INITIATED &&
- sp->hdr.type == RXRPC_PACKET_TYPE_DATA) {
- if (sp->hdr.seq == 1) {
- _debug("first packet");
- skb_queue_tail(&local->accept_queue, skb);
- rxrpc_queue_work(&local->processor);
- _leave(" [incoming]");
- return;
- }
- skb->priority = RX_INVALID_OPERATION;
- } else {
- skb->priority = RX_CALL_DEAD;
- }
-
- if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) {
- _debug("reject type %d",sp->hdr.type);
- rxrpc_reject_packet(local, skb);
- } else {
- rxrpc_free_skb(skb);
- }
- _leave(" [no call]");
- return;
+wrong_security:
+ rcu_read_unlock();
+ trace_rxrpc_abort("SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ RXKADINCONSISTENCY, EBADMSG);
+ skb->priority = RXKADINCONSISTENCY;
+ goto post_abort;
+bad_message_unlock:
+ rcu_read_unlock();
bad_message:
+ trace_rxrpc_abort("BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ RX_PROTOCOL_ERROR, EBADMSG);
skb->priority = RX_PROTOCOL_ERROR;
+post_abort:
+ skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
+reject_packet:
+ trace_rxrpc_rx_done(skb->mark, skb->priority);
rxrpc_reject_packet(local, skb);
_leave(" [badmsg]");
}
diff --git a/net/rxrpc/insecure.c b/net/rxrpc/insecure.c
index c21ad213b337..7d4375e557e6 100644
--- a/net/rxrpc/insecure.c
+++ b/net/rxrpc/insecure.c
@@ -23,31 +23,36 @@ static int none_prime_packet_security(struct rxrpc_connection *conn)
}
static int none_secure_packet(struct rxrpc_call *call,
- struct sk_buff *skb,
- size_t data_size,
- void *sechdr)
+ struct sk_buff *skb,
+ size_t data_size,
+ void *sechdr)
{
return 0;
}
-static int none_verify_packet(struct rxrpc_call *call,
- struct sk_buff *skb,
- u32 *_abort_code)
+static int none_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
+ unsigned int offset, unsigned int len,
+ rxrpc_seq_t seq, u16 expected_cksum)
{
return 0;
}
+static void none_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
+ unsigned int *_offset, unsigned int *_len)
+{
+}
+
static int none_respond_to_challenge(struct rxrpc_connection *conn,
- struct sk_buff *skb,
- u32 *_abort_code)
+ struct sk_buff *skb,
+ u32 *_abort_code)
{
*_abort_code = RX_PROTOCOL_ERROR;
return -EPROTO;
}
static int none_verify_response(struct rxrpc_connection *conn,
- struct sk_buff *skb,
- u32 *_abort_code)
+ struct sk_buff *skb,
+ u32 *_abort_code)
{
*_abort_code = RX_PROTOCOL_ERROR;
return -EPROTO;
@@ -78,6 +83,7 @@ const struct rxrpc_security rxrpc_no_security = {
.prime_packet_security = none_prime_packet_security,
.secure_packet = none_secure_packet,
.verify_packet = none_verify_packet,
+ .locate_data = none_locate_data,
.respond_to_challenge = none_respond_to_challenge,
.verify_response = none_verify_response,
.clear = none_clear,
diff --git a/net/rxrpc/local_event.c b/net/rxrpc/local_event.c
index bcc6593b4cdb..190f68bd9e27 100644
--- a/net/rxrpc/local_event.c
+++ b/net/rxrpc/local_event.c
@@ -15,8 +15,6 @@
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
-#include <linux/udp.h>
-#include <linux/ip.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <generated/utsrelease.h>
@@ -33,7 +31,7 @@ static void rxrpc_send_version_request(struct rxrpc_local *local,
{
struct rxrpc_wire_header whdr;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- struct sockaddr_in sin;
+ struct sockaddr_rxrpc srx;
struct msghdr msg;
struct kvec iov[2];
size_t len;
@@ -41,12 +39,11 @@ static void rxrpc_send_version_request(struct rxrpc_local *local,
_enter("");
- sin.sin_family = AF_INET;
- sin.sin_port = udp_hdr(skb)->source;
- sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
+ if (rxrpc_extract_addr_from_skb(&srx, skb) < 0)
+ return;
- msg.msg_name = &sin;
- msg.msg_namelen = sizeof(sin);
+ msg.msg_name = &srx.transport;
+ msg.msg_namelen = srx.transport_len;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
@@ -93,12 +90,12 @@ void rxrpc_process_local_events(struct rxrpc_local *local)
if (skb) {
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- rxrpc_see_skb(skb);
+ rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
_debug("{%d},{%u}", local->debug_id, sp->hdr.type);
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_VERSION:
- if (skb_copy_bits(skb, 0, &v, 1) < 0)
+ if (skb_copy_bits(skb, sp->offset, &v, 1) < 0)
return;
_proto("Rx VERSION { %02x }", v);
if (v == 0)
@@ -110,7 +107,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local)
break;
}
- rxrpc_free_skb(skb);
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
}
_leave("");
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index a753796fbe8f..e3fad80b0795 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -58,6 +58,17 @@ static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
memcmp(&local->srx.transport.sin.sin_addr,
&srx->transport.sin.sin_addr,
sizeof(struct in_addr));
+#ifdef CONFIG_AF_RXRPC_IPV6
+ case AF_INET6:
+ /* If the choice of UDP6 port is left up to the transport, then
+ * the endpoint record doesn't match.
+ */
+ return ((u16 __force)local->srx.transport.sin6.sin6_port -
+ (u16 __force)srx->transport.sin6.sin6_port) ?:
+ memcmp(&local->srx.transport.sin6.sin6_addr,
+ &srx->transport.sin6.sin6_addr,
+ sizeof(struct in6_addr));
+#endif
default:
BUG();
}
@@ -75,9 +86,8 @@ static struct rxrpc_local *rxrpc_alloc_local(const struct sockaddr_rxrpc *srx)
atomic_set(&local->usage, 1);
INIT_LIST_HEAD(&local->link);
INIT_WORK(&local->processor, rxrpc_local_processor);
- INIT_LIST_HEAD(&local->services);
+ INIT_HLIST_HEAD(&local->services);
init_rwsem(&local->defrag_sem);
- skb_queue_head_init(&local->accept_queue);
skb_queue_head_init(&local->reject_queue);
skb_queue_head_init(&local->event_queue);
local->client_conns = RB_ROOT;
@@ -101,11 +111,12 @@ static int rxrpc_open_socket(struct rxrpc_local *local)
struct sock *sock;
int ret, opt;
- _enter("%p{%d}", local, local->srx.transport_type);
+ _enter("%p{%d,%d}",
+ local, local->srx.transport_type, local->srx.transport.family);
/* create a socket to represent the local endpoint */
- ret = sock_create_kern(&init_net, PF_INET, local->srx.transport_type,
- IPPROTO_UDP, &local->socket);
+ ret = sock_create_kern(&init_net, local->srx.transport.family,
+ local->srx.transport_type, 0, &local->socket);
if (ret < 0) {
_leave(" = %d [socket]", ret);
return ret;
@@ -170,18 +181,8 @@ struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx)
long diff;
int ret;
- if (srx->transport.family == AF_INET) {
- _enter("{%d,%u,%pI4+%hu}",
- srx->transport_type,
- srx->transport.family,
- &srx->transport.sin.sin_addr,
- ntohs(srx->transport.sin.sin_port));
- } else {
- _enter("{%d,%u}",
- srx->transport_type,
- srx->transport.family);
- return ERR_PTR(-EAFNOSUPPORT);
- }
+ _enter("{%d,%d,%pISp}",
+ srx->transport_type, srx->transport.family, &srx->transport);
mutex_lock(&rxrpc_local_mutex);
@@ -234,13 +235,8 @@ struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx)
found:
mutex_unlock(&rxrpc_local_mutex);
- _net("LOCAL %s %d {%d,%u,%pI4+%hu}",
- age,
- local->debug_id,
- local->srx.transport_type,
- local->srx.transport.family,
- &local->srx.transport.sin.sin_addr,
- ntohs(local->srx.transport.sin.sin_port));
+ _net("LOCAL %s %d {%pISp}",
+ age, local->debug_id, &local->srx.transport);
_leave(" = %p", local);
return local;
@@ -296,7 +292,7 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
mutex_unlock(&rxrpc_local_mutex);
ASSERT(RB_EMPTY_ROOT(&local->client_conns));
- ASSERT(list_empty(&local->services));
+ ASSERT(hlist_empty(&local->services));
if (socket) {
local->socket = NULL;
@@ -308,7 +304,6 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
/* At this point, there should be no more packets coming in to the
* local endpoint.
*/
- rxrpc_purge_queue(&local->accept_queue);
rxrpc_purge_queue(&local->reject_queue);
rxrpc_purge_queue(&local->event_queue);
@@ -332,11 +327,6 @@ static void rxrpc_local_processor(struct work_struct *work)
if (atomic_read(&local->usage) == 0)
return rxrpc_local_destroyer(local);
- if (!skb_queue_empty(&local->accept_queue)) {
- rxrpc_accept_incoming_calls(local);
- again = true;
- }
-
if (!skb_queue_empty(&local->reject_queue)) {
rxrpc_reject_packets(local);
again = true;
diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c
index 39e7cc37c392..aedb8978226d 100644
--- a/net/rxrpc/misc.c
+++ b/net/rxrpc/misc.c
@@ -50,7 +50,10 @@ unsigned int rxrpc_idle_ack_delay = 0.5 * HZ;
* limit is hit, we should generate an EXCEEDS_WINDOW ACK and discard further
* packets.
*/
-unsigned int rxrpc_rx_window_size = 32;
+unsigned int rxrpc_rx_window_size = RXRPC_INIT_RX_WINDOW_SIZE;
+#if (RXRPC_RXTX_BUFF_SIZE - 1) < RXRPC_INIT_RX_WINDOW_SIZE
+#error Need to reduce RXRPC_INIT_RX_WINDOW_SIZE
+#endif
/*
* Maximum Rx MTU size. This indicates to the sender the size of jumbo packet
@@ -65,9 +68,9 @@ unsigned int rxrpc_rx_mtu = 5692;
unsigned int rxrpc_rx_jumbo_max = 4;
/*
- * Time till packet resend (in jiffies).
+ * Time till packet resend (in milliseconds).
*/
-unsigned int rxrpc_resend_timeout = 4 * HZ;
+unsigned int rxrpc_resend_timeout = 4 * 1000;
const char *const rxrpc_pkts[] = {
"?00",
@@ -80,21 +83,153 @@ const s8 rxrpc_ack_priority[] = {
[RXRPC_ACK_DELAY] = 1,
[RXRPC_ACK_REQUESTED] = 2,
[RXRPC_ACK_IDLE] = 3,
- [RXRPC_ACK_PING_RESPONSE] = 4,
- [RXRPC_ACK_DUPLICATE] = 5,
- [RXRPC_ACK_OUT_OF_SEQUENCE] = 6,
- [RXRPC_ACK_EXCEEDS_WINDOW] = 7,
- [RXRPC_ACK_NOSPACE] = 8,
-};
-
-const char *rxrpc_acks(u8 reason)
-{
- static const char *const str[] = {
- "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY",
- "IDL", "-?-"
- };
-
- if (reason >= ARRAY_SIZE(str))
- reason = ARRAY_SIZE(str) - 1;
- return str[reason];
-}
+ [RXRPC_ACK_DUPLICATE] = 4,
+ [RXRPC_ACK_OUT_OF_SEQUENCE] = 5,
+ [RXRPC_ACK_EXCEEDS_WINDOW] = 6,
+ [RXRPC_ACK_NOSPACE] = 7,
+ [RXRPC_ACK_PING_RESPONSE] = 8,
+ [RXRPC_ACK_PING] = 9,
+};
+
+const char const rxrpc_ack_names[RXRPC_ACK__INVALID + 1][4] = {
+ "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY",
+ "IDL", "-?-"
+};
+
+const char rxrpc_skb_traces[rxrpc_skb__nr_trace][7] = {
+ [rxrpc_skb_rx_cleaned] = "Rx CLN",
+ [rxrpc_skb_rx_freed] = "Rx FRE",
+ [rxrpc_skb_rx_got] = "Rx GOT",
+ [rxrpc_skb_rx_lost] = "Rx *L*",
+ [rxrpc_skb_rx_received] = "Rx RCV",
+ [rxrpc_skb_rx_purged] = "Rx PUR",
+ [rxrpc_skb_rx_rotated] = "Rx ROT",
+ [rxrpc_skb_rx_seen] = "Rx SEE",
+ [rxrpc_skb_tx_cleaned] = "Tx CLN",
+ [rxrpc_skb_tx_freed] = "Tx FRE",
+ [rxrpc_skb_tx_got] = "Tx GOT",
+ [rxrpc_skb_tx_lost] = "Tx *L*",
+ [rxrpc_skb_tx_new] = "Tx NEW",
+ [rxrpc_skb_tx_rotated] = "Tx ROT",
+ [rxrpc_skb_tx_seen] = "Tx SEE",
+};
+
+const char rxrpc_conn_traces[rxrpc_conn__nr_trace][4] = {
+ [rxrpc_conn_new_client] = "NWc",
+ [rxrpc_conn_new_service] = "NWs",
+ [rxrpc_conn_queued] = "QUE",
+ [rxrpc_conn_seen] = "SEE",
+ [rxrpc_conn_got] = "GOT",
+ [rxrpc_conn_put_client] = "PTc",
+ [rxrpc_conn_put_service] = "PTs",
+};
+
+const char rxrpc_client_traces[rxrpc_client__nr_trace][7] = {
+ [rxrpc_client_activate_chans] = "Activa",
+ [rxrpc_client_alloc] = "Alloc ",
+ [rxrpc_client_chan_activate] = "ChActv",
+ [rxrpc_client_chan_disconnect] = "ChDisc",
+ [rxrpc_client_chan_pass] = "ChPass",
+ [rxrpc_client_chan_unstarted] = "ChUnst",
+ [rxrpc_client_cleanup] = "Clean ",
+ [rxrpc_client_count] = "Count ",
+ [rxrpc_client_discard] = "Discar",
+ [rxrpc_client_duplicate] = "Duplic",
+ [rxrpc_client_exposed] = "Expose",
+ [rxrpc_client_replace] = "Replac",
+ [rxrpc_client_to_active] = "->Actv",
+ [rxrpc_client_to_culled] = "->Cull",
+ [rxrpc_client_to_idle] = "->Idle",
+ [rxrpc_client_to_inactive] = "->Inac",
+ [rxrpc_client_to_waiting] = "->Wait",
+ [rxrpc_client_uncount] = "Uncoun",
+};
+
+const char rxrpc_transmit_traces[rxrpc_transmit__nr_trace][4] = {
+ [rxrpc_transmit_wait] = "WAI",
+ [rxrpc_transmit_queue] = "QUE",
+ [rxrpc_transmit_queue_last] = "QLS",
+ [rxrpc_transmit_rotate] = "ROT",
+ [rxrpc_transmit_rotate_last] = "RLS",
+ [rxrpc_transmit_await_reply] = "AWR",
+ [rxrpc_transmit_end] = "END",
+};
+
+const char rxrpc_receive_traces[rxrpc_receive__nr_trace][4] = {
+ [rxrpc_receive_incoming] = "INC",
+ [rxrpc_receive_queue] = "QUE",
+ [rxrpc_receive_queue_last] = "QLS",
+ [rxrpc_receive_front] = "FRN",
+ [rxrpc_receive_rotate] = "ROT",
+ [rxrpc_receive_end] = "END",
+};
+
+const char rxrpc_recvmsg_traces[rxrpc_recvmsg__nr_trace][5] = {
+ [rxrpc_recvmsg_enter] = "ENTR",
+ [rxrpc_recvmsg_wait] = "WAIT",
+ [rxrpc_recvmsg_dequeue] = "DEQU",
+ [rxrpc_recvmsg_hole] = "HOLE",
+ [rxrpc_recvmsg_next] = "NEXT",
+ [rxrpc_recvmsg_cont] = "CONT",
+ [rxrpc_recvmsg_full] = "FULL",
+ [rxrpc_recvmsg_data_return] = "DATA",
+ [rxrpc_recvmsg_terminal] = "TERM",
+ [rxrpc_recvmsg_to_be_accepted] = "TBAC",
+ [rxrpc_recvmsg_return] = "RETN",
+};
+
+const char rxrpc_rtt_tx_traces[rxrpc_rtt_tx__nr_trace][5] = {
+ [rxrpc_rtt_tx_ping] = "PING",
+ [rxrpc_rtt_tx_data] = "DATA",
+};
+
+const char rxrpc_rtt_rx_traces[rxrpc_rtt_rx__nr_trace][5] = {
+ [rxrpc_rtt_rx_ping_response] = "PONG",
+ [rxrpc_rtt_rx_requested_ack] = "RACK",
+};
+
+const char rxrpc_timer_traces[rxrpc_timer__nr_trace][8] = {
+ [rxrpc_timer_begin] = "Begin ",
+ [rxrpc_timer_expired] = "*EXPR*",
+ [rxrpc_timer_init_for_reply] = "IniRpl",
+ [rxrpc_timer_set_for_ack] = "SetAck",
+ [rxrpc_timer_set_for_send] = "SetTx ",
+ [rxrpc_timer_set_for_resend] = "SetRTx",
+};
+
+const char rxrpc_propose_ack_traces[rxrpc_propose_ack__nr_trace][8] = {
+ [rxrpc_propose_ack_client_tx_end] = "ClTxEnd",
+ [rxrpc_propose_ack_input_data] = "DataIn ",
+ [rxrpc_propose_ack_ping_for_lost_ack] = "LostAck",
+ [rxrpc_propose_ack_ping_for_lost_reply] = "LostRpl",
+ [rxrpc_propose_ack_ping_for_params] = "Params ",
+ [rxrpc_propose_ack_respond_to_ack] = "Rsp2Ack",
+ [rxrpc_propose_ack_respond_to_ping] = "Rsp2Png",
+ [rxrpc_propose_ack_retry_tx] = "RetryTx",
+ [rxrpc_propose_ack_rotate_rx] = "RxAck ",
+ [rxrpc_propose_ack_terminal_ack] = "ClTerm ",
+};
+
+const char *const rxrpc_propose_ack_outcomes[rxrpc_propose_ack__nr_outcomes] = {
+ [rxrpc_propose_ack_use] = "",
+ [rxrpc_propose_ack_update] = " Update",
+ [rxrpc_propose_ack_subsume] = " Subsume",
+};
+
+const char rxrpc_congest_modes[NR__RXRPC_CONGEST_MODES][10] = {
+ [RXRPC_CALL_SLOW_START] = "SlowStart",
+ [RXRPC_CALL_CONGEST_AVOIDANCE] = "CongAvoid",
+ [RXRPC_CALL_PACKET_LOSS] = "PktLoss ",
+ [RXRPC_CALL_FAST_RETRANSMIT] = "FastReTx ",
+};
+
+const char rxrpc_congest_changes[rxrpc_congest__nr_change][9] = {
+ [rxrpc_cong_begin_retransmission] = " Retrans",
+ [rxrpc_cong_cleared_nacks] = " Cleared",
+ [rxrpc_cong_new_low_nack] = " NewLowN",
+ [rxrpc_cong_no_change] = "",
+ [rxrpc_cong_progress] = " Progres",
+ [rxrpc_cong_retransmit_again] = " ReTxAgn",
+ [rxrpc_cong_rtt_window_end] = " RttWinE",
+ [rxrpc_cong_saw_nack] = " SawNack",
+};
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 5b5508f6fc2a..cf43a715685e 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -19,46 +19,319 @@
#include <net/af_rxrpc.h>
#include "ar-internal.h"
+struct rxrpc_pkt_buffer {
+ struct rxrpc_wire_header whdr;
+ union {
+ struct {
+ struct rxrpc_ackpacket ack;
+ u8 acks[255];
+ u8 pad[3];
+ };
+ __be32 abort_code;
+ };
+ struct rxrpc_ackinfo ackinfo;
+};
+
+/*
+ * Fill out an ACK packet.
+ */
+static size_t rxrpc_fill_out_ack(struct rxrpc_call *call,
+ struct rxrpc_pkt_buffer *pkt,
+ rxrpc_seq_t *_hard_ack,
+ rxrpc_seq_t *_top)
+{
+ rxrpc_serial_t serial;
+ rxrpc_seq_t hard_ack, top, seq;
+ int ix;
+ u32 mtu, jmax;
+ u8 *ackp = pkt->acks;
+
+ /* Barrier against rxrpc_input_data(). */
+ serial = call->ackr_serial;
+ hard_ack = READ_ONCE(call->rx_hard_ack);
+ top = smp_load_acquire(&call->rx_top);
+ *_hard_ack = hard_ack;
+ *_top = top;
+
+ pkt->ack.bufferSpace = htons(8);
+ pkt->ack.maxSkew = htons(call->ackr_skew);
+ pkt->ack.firstPacket = htonl(hard_ack + 1);
+ pkt->ack.previousPacket = htonl(call->ackr_prev_seq);
+ pkt->ack.serial = htonl(serial);
+ pkt->ack.reason = call->ackr_reason;
+ pkt->ack.nAcks = top - hard_ack;
+
+ if (pkt->ack.reason == RXRPC_ACK_PING)
+ pkt->whdr.flags |= RXRPC_REQUEST_ACK;
+
+ if (after(top, hard_ack)) {
+ seq = hard_ack + 1;
+ do {
+ ix = seq & RXRPC_RXTX_BUFF_MASK;
+ if (call->rxtx_buffer[ix])
+ *ackp++ = RXRPC_ACK_TYPE_ACK;
+ else
+ *ackp++ = RXRPC_ACK_TYPE_NACK;
+ seq++;
+ } while (before_eq(seq, top));
+ }
+
+ mtu = call->conn->params.peer->if_mtu;
+ mtu -= call->conn->params.peer->hdrsize;
+ jmax = (call->nr_jumbo_bad > 3) ? 1 : rxrpc_rx_jumbo_max;
+ pkt->ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
+ pkt->ackinfo.maxMTU = htonl(mtu);
+ pkt->ackinfo.rwind = htonl(call->rx_winsize);
+ pkt->ackinfo.jumbo_max = htonl(jmax);
+
+ *ackp++ = 0;
+ *ackp++ = 0;
+ *ackp++ = 0;
+ return top - hard_ack + 3;
+}
+
+/*
+ * Send an ACK or ABORT call packet.
+ */
+int rxrpc_send_call_packet(struct rxrpc_call *call, u8 type)
+{
+ struct rxrpc_connection *conn = NULL;
+ struct rxrpc_pkt_buffer *pkt;
+ struct msghdr msg;
+ struct kvec iov[2];
+ rxrpc_serial_t serial;
+ rxrpc_seq_t hard_ack, top;
+ size_t len, n;
+ bool ping = false;
+ int ioc, ret;
+ u32 abort_code;
+
+ _enter("%u,%s", call->debug_id, rxrpc_pkts[type]);
+
+ spin_lock_bh(&call->lock);
+ if (call->conn)
+ conn = rxrpc_get_connection_maybe(call->conn);
+ spin_unlock_bh(&call->lock);
+ if (!conn)
+ return -ECONNRESET;
+
+ pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
+ if (!pkt) {
+ rxrpc_put_connection(conn);
+ return -ENOMEM;
+ }
+
+ msg.msg_name = &call->peer->srx.transport;
+ msg.msg_namelen = call->peer->srx.transport_len;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_flags = 0;
+
+ pkt->whdr.epoch = htonl(conn->proto.epoch);
+ pkt->whdr.cid = htonl(call->cid);
+ pkt->whdr.callNumber = htonl(call->call_id);
+ pkt->whdr.seq = 0;
+ pkt->whdr.type = type;
+ pkt->whdr.flags = conn->out_clientflag;
+ pkt->whdr.userStatus = 0;
+ pkt->whdr.securityIndex = call->security_ix;
+ pkt->whdr._rsvd = 0;
+ pkt->whdr.serviceId = htons(call->service_id);
+
+ iov[0].iov_base = pkt;
+ iov[0].iov_len = sizeof(pkt->whdr);
+ len = sizeof(pkt->whdr);
+
+ switch (type) {
+ case RXRPC_PACKET_TYPE_ACK:
+ spin_lock_bh(&call->lock);
+ if (!call->ackr_reason) {
+ spin_unlock_bh(&call->lock);
+ ret = 0;
+ goto out;
+ }
+ ping = (call->ackr_reason == RXRPC_ACK_PING);
+ n = rxrpc_fill_out_ack(call, pkt, &hard_ack, &top);
+ call->ackr_reason = 0;
+
+ spin_unlock_bh(&call->lock);
+
+
+ pkt->whdr.flags |= RXRPC_SLOW_START_OK;
+
+ iov[0].iov_len += sizeof(pkt->ack) + n;
+ iov[1].iov_base = &pkt->ackinfo;
+ iov[1].iov_len = sizeof(pkt->ackinfo);
+ len += sizeof(pkt->ack) + n + sizeof(pkt->ackinfo);
+ ioc = 2;
+ break;
+
+ case RXRPC_PACKET_TYPE_ABORT:
+ abort_code = call->abort_code;
+ pkt->abort_code = htonl(abort_code);
+ iov[0].iov_len += sizeof(pkt->abort_code);
+ len += sizeof(pkt->abort_code);
+ ioc = 1;
+ break;
+
+ default:
+ BUG();
+ ret = -ENOANO;
+ goto out;
+ }
+
+ serial = atomic_inc_return(&conn->serial);
+ pkt->whdr.serial = htonl(serial);
+ switch (type) {
+ case RXRPC_PACKET_TYPE_ACK:
+ trace_rxrpc_tx_ack(call, serial,
+ ntohl(pkt->ack.firstPacket),
+ ntohl(pkt->ack.serial),
+ pkt->ack.reason, pkt->ack.nAcks);
+ break;
+ }
+
+ if (ping) {
+ call->ackr_ping = serial;
+ smp_wmb();
+ /* We need to stick a time in before we send the packet in case
+ * the reply gets back before kernel_sendmsg() completes - but
+ * asking UDP to send the packet can take a relatively long
+ * time, so we update the time after, on the assumption that
+ * the packet transmission is more likely to happen towards the
+ * end of the kernel_sendmsg() call.
+ */
+ call->ackr_ping_time = ktime_get_real();
+ set_bit(RXRPC_CALL_PINGING, &call->flags);
+ trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_ping, serial);
+ }
+ ret = kernel_sendmsg(conn->params.local->socket,
+ &msg, iov, ioc, len);
+ if (ping)
+ call->ackr_ping_time = ktime_get_real();
+
+ if (type == RXRPC_PACKET_TYPE_ACK &&
+ call->state < RXRPC_CALL_COMPLETE) {
+ if (ret < 0) {
+ clear_bit(RXRPC_CALL_PINGING, &call->flags);
+ rxrpc_propose_ACK(call, pkt->ack.reason,
+ ntohs(pkt->ack.maxSkew),
+ ntohl(pkt->ack.serial),
+ true, true,
+ rxrpc_propose_ack_retry_tx);
+ } else {
+ spin_lock_bh(&call->lock);
+ if (after(hard_ack, call->ackr_consumed))
+ call->ackr_consumed = hard_ack;
+ if (after(top, call->ackr_seen))
+ call->ackr_seen = top;
+ spin_unlock_bh(&call->lock);
+ }
+ }
+
+out:
+ rxrpc_put_connection(conn);
+ kfree(pkt);
+ return ret;
+}
+
/*
* send a packet through the transport endpoint
*/
-int rxrpc_send_data_packet(struct rxrpc_connection *conn, struct sk_buff *skb)
+int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb)
{
- struct kvec iov[1];
+ struct rxrpc_connection *conn = call->conn;
+ struct rxrpc_wire_header whdr;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct msghdr msg;
+ struct kvec iov[2];
+ rxrpc_serial_t serial;
+ size_t len;
int ret, opt;
_enter(",{%d}", skb->len);
- iov[0].iov_base = skb->head;
- iov[0].iov_len = skb->len;
+ /* Each transmission of a Tx packet needs a new serial number */
+ serial = atomic_inc_return(&conn->serial);
- msg.msg_name = &conn->params.peer->srx.transport;
- msg.msg_namelen = conn->params.peer->srx.transport_len;
+ whdr.epoch = htonl(conn->proto.epoch);
+ whdr.cid = htonl(call->cid);
+ whdr.callNumber = htonl(call->call_id);
+ whdr.seq = htonl(sp->hdr.seq);
+ whdr.serial = htonl(serial);
+ whdr.type = RXRPC_PACKET_TYPE_DATA;
+ whdr.flags = sp->hdr.flags;
+ whdr.userStatus = 0;
+ whdr.securityIndex = call->security_ix;
+ whdr._rsvd = htons(sp->hdr._rsvd);
+ whdr.serviceId = htons(call->service_id);
+
+ iov[0].iov_base = &whdr;
+ iov[0].iov_len = sizeof(whdr);
+ iov[1].iov_base = skb->head;
+ iov[1].iov_len = skb->len;
+ len = iov[0].iov_len + iov[1].iov_len;
+
+ msg.msg_name = &call->peer->srx.transport;
+ msg.msg_namelen = call->peer->srx.transport_len;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
+ /* If our RTT cache needs working on, request an ACK. Also request
+ * ACKs if a DATA packet appears to have been lost.
+ */
+ if (call->cong_mode == RXRPC_CALL_FAST_RETRANSMIT ||
+ (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) ||
+ ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
+ ktime_get_real()))
+ whdr.flags |= RXRPC_REQUEST_ACK;
+
+ if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
+ static int lose;
+ if ((lose++ & 7) == 7) {
+ trace_rxrpc_tx_data(call, sp->hdr.seq, serial,
+ whdr.flags, true);
+ rxrpc_lose_skb(skb, rxrpc_skb_tx_lost);
+ _leave(" = 0 [lose]");
+ return 0;
+ }
+ }
+
+ _proto("Tx DATA %%%u { #%u }", serial, sp->hdr.seq);
+
/* send the packet with the don't fragment bit set if we currently
* think it's small enough */
- if (skb->len - sizeof(struct rxrpc_wire_header) < conn->params.peer->maxdata) {
- down_read(&conn->params.local->defrag_sem);
- /* send the packet by UDP
- * - returns -EMSGSIZE if UDP would have to fragment the packet
- * to go out of the interface
- * - in which case, we'll have processed the ICMP error
- * message and update the peer record
- */
- ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 1,
- iov[0].iov_len);
+ if (iov[1].iov_len >= call->peer->maxdata)
+ goto send_fragmentable;
+
+ down_read(&conn->params.local->defrag_sem);
+ /* send the packet by UDP
+ * - returns -EMSGSIZE if UDP would have to fragment the packet
+ * to go out of the interface
+ * - in which case, we'll have processed the ICMP error
+ * message and update the peer record
+ */
+ ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
- up_read(&conn->params.local->defrag_sem);
- if (ret == -EMSGSIZE)
- goto send_fragmentable;
+ up_read(&conn->params.local->defrag_sem);
+ if (ret == -EMSGSIZE)
+ goto send_fragmentable;
- _leave(" = %d [%u]", ret, conn->params.peer->maxdata);
- return ret;
+done:
+ trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags, false);
+ if (ret >= 0) {
+ ktime_t now = ktime_get_real();
+ skb->tstamp = now;
+ smp_wmb();
+ sp->hdr.serial = serial;
+ if (whdr.flags & RXRPC_REQUEST_ACK) {
+ call->peer->rtt_last_req = now;
+ trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
+ }
}
+ _leave(" = %d [%u]", ret, call->peer->maxdata);
+ return ret;
send_fragmentable:
/* attempt to send this message with fragmentation enabled */
@@ -73,8 +346,8 @@ send_fragmentable:
SOL_IP, IP_MTU_DISCOVER,
(char *)&opt, sizeof(opt));
if (ret == 0) {
- ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 1,
- iov[0].iov_len);
+ ret = kernel_sendmsg(conn->params.local->socket, &msg,
+ iov, 2, len);
opt = IP_PMTUDISC_DO;
kernel_setsockopt(conn->params.local->socket, SOL_IP,
@@ -82,9 +355,82 @@ send_fragmentable:
(char *)&opt, sizeof(opt));
}
break;
+
+#ifdef CONFIG_AF_RXRPC_IPV6
+ case AF_INET6:
+ opt = IPV6_PMTUDISC_DONT;
+ ret = kernel_setsockopt(conn->params.local->socket,
+ SOL_IPV6, IPV6_MTU_DISCOVER,
+ (char *)&opt, sizeof(opt));
+ if (ret == 0) {
+ ret = kernel_sendmsg(conn->params.local->socket, &msg,
+ iov, 1, iov[0].iov_len);
+
+ opt = IPV6_PMTUDISC_DO;
+ kernel_setsockopt(conn->params.local->socket,
+ SOL_IPV6, IPV6_MTU_DISCOVER,
+ (char *)&opt, sizeof(opt));
+ }
+ break;
+#endif
}
up_write(&conn->params.local->defrag_sem);
- _leave(" = %d [frag %u]", ret, conn->params.peer->maxdata);
- return ret;
+ goto done;
+}
+
+/*
+ * reject packets through the local endpoint
+ */
+void rxrpc_reject_packets(struct rxrpc_local *local)
+{
+ struct sockaddr_rxrpc srx;
+ struct rxrpc_skb_priv *sp;
+ struct rxrpc_wire_header whdr;
+ struct sk_buff *skb;
+ struct msghdr msg;
+ struct kvec iov[2];
+ size_t size;
+ __be32 code;
+
+ _enter("%d", local->debug_id);
+
+ iov[0].iov_base = &whdr;
+ iov[0].iov_len = sizeof(whdr);
+ iov[1].iov_base = &code;
+ iov[1].iov_len = sizeof(code);
+ size = sizeof(whdr) + sizeof(code);
+
+ msg.msg_name = &srx.transport;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_flags = 0;
+
+ memset(&whdr, 0, sizeof(whdr));
+ whdr.type = RXRPC_PACKET_TYPE_ABORT;
+
+ while ((skb = skb_dequeue(&local->reject_queue))) {
+ rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
+ sp = rxrpc_skb(skb);
+
+ if (rxrpc_extract_addr_from_skb(&srx, skb) == 0) {
+ msg.msg_namelen = srx.transport_len;
+
+ code = htonl(skb->priority);
+
+ whdr.epoch = htonl(sp->hdr.epoch);
+ whdr.cid = htonl(sp->hdr.cid);
+ whdr.callNumber = htonl(sp->hdr.callNumber);
+ whdr.serviceId = htons(sp->hdr.serviceId);
+ whdr.flags = sp->hdr.flags;
+ whdr.flags ^= RXRPC_CLIENT_INITIATED;
+ whdr.flags &= RXRPC_CLIENT_INITIATED;
+
+ kernel_sendmsg(local->socket, &msg, iov, 2, size);
+ }
+
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ }
+
+ _leave("");
}
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 27b9ecad007e..bf13b8470c9a 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -66,6 +66,32 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
}
break;
+#ifdef CONFIG_AF_RXRPC_IPV6
+ case AF_INET6:
+ srx.transport.sin6.sin6_port = serr->port;
+ srx.transport_len = sizeof(struct sockaddr_in6);
+ switch (serr->ee.ee_origin) {
+ case SO_EE_ORIGIN_ICMP6:
+ _net("Rx ICMP6");
+ memcpy(&srx.transport.sin6.sin6_addr,
+ skb_network_header(skb) + serr->addr_offset,
+ sizeof(struct in6_addr));
+ break;
+ case SO_EE_ORIGIN_ICMP:
+ _net("Rx ICMP on v6 sock");
+ memcpy(srx.transport.sin6.sin6_addr.s6_addr + 12,
+ skb_network_header(skb) + serr->addr_offset,
+ sizeof(struct in_addr));
+ break;
+ default:
+ memcpy(&srx.transport.sin6.sin6_addr,
+ &ipv6_hdr(skb)->saddr,
+ sizeof(struct in6_addr));
+ break;
+ }
+ break;
+#endif
+
default:
BUG();
}
@@ -129,22 +155,21 @@ void rxrpc_error_report(struct sock *sk)
_leave("UDP socket errqueue empty");
return;
}
+ rxrpc_new_skb(skb, rxrpc_skb_rx_received);
serr = SKB_EXT_ERR(skb);
if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
_leave("UDP empty message");
- kfree_skb(skb);
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
return;
}
- rxrpc_new_skb(skb);
-
rcu_read_lock();
peer = rxrpc_lookup_peer_icmp_rcu(local, skb);
if (peer && !rxrpc_get_peer_maybe(peer))
peer = NULL;
if (!peer) {
rcu_read_unlock();
- rxrpc_free_skb(skb);
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
_leave(" [no peer]");
return;
}
@@ -154,7 +179,7 @@ void rxrpc_error_report(struct sock *sk)
serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
rxrpc_adjust_mtu(peer, serr);
rcu_read_unlock();
- rxrpc_free_skb(skb);
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
rxrpc_put_peer(peer);
_leave(" [MTU update]");
return;
@@ -162,7 +187,7 @@ void rxrpc_error_report(struct sock *sk)
rxrpc_store_error(peer, serr);
rcu_read_unlock();
- rxrpc_free_skb(skb);
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
/* The ref we obtained is passed off to the work item */
rxrpc_queue_work(&peer->error_distributor);
@@ -249,7 +274,6 @@ void rxrpc_peer_error_distributor(struct work_struct *work)
container_of(work, struct rxrpc_peer, error_distributor);
struct rxrpc_call *call;
enum rxrpc_call_completion compl;
- bool queue;
int error;
_enter("");
@@ -272,15 +296,8 @@ void rxrpc_peer_error_distributor(struct work_struct *work)
hlist_del_init(&call->error_link);
rxrpc_see_call(call);
- queue = false;
- write_lock(&call->state_lock);
- if (__rxrpc_set_call_completion(call, compl, 0, error)) {
- set_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events);
- queue = true;
- }
- write_unlock(&call->state_lock);
- if (queue)
- rxrpc_queue_call(call);
+ if (rxrpc_set_call_completion(call, compl, 0, error))
+ rxrpc_notify_socket(call);
}
spin_unlock_bh(&peer->lock);
@@ -288,3 +305,44 @@ void rxrpc_peer_error_distributor(struct work_struct *work)
rxrpc_put_peer(peer);
_leave("");
}
+
+/*
+ * Add RTT information to cache. This is called in softirq mode and has
+ * exclusive access to the peer RTT data.
+ */
+void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
+ rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
+ ktime_t send_time, ktime_t resp_time)
+{
+ struct rxrpc_peer *peer = call->peer;
+ s64 rtt;
+ u64 sum = peer->rtt_sum, avg;
+ u8 cursor = peer->rtt_cursor, usage = peer->rtt_usage;
+
+ rtt = ktime_to_ns(ktime_sub(resp_time, send_time));
+ if (rtt < 0)
+ return;
+
+ /* Replace the oldest datum in the RTT buffer */
+ sum -= peer->rtt_cache[cursor];
+ sum += rtt;
+ peer->rtt_cache[cursor] = rtt;
+ peer->rtt_cursor = (cursor + 1) & (RXRPC_RTT_CACHE_SIZE - 1);
+ peer->rtt_sum = sum;
+ if (usage < RXRPC_RTT_CACHE_SIZE) {
+ usage++;
+ peer->rtt_usage = usage;
+ }
+
+ /* Now recalculate the average */
+ if (usage == RXRPC_RTT_CACHE_SIZE) {
+ avg = sum / RXRPC_RTT_CACHE_SIZE;
+ } else {
+ avg = sum;
+ do_div(avg, usage);
+ }
+
+ peer->rtt = avg;
+ trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt,
+ usage, avg);
+}
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index aebc73ac16dc..941b724d523b 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -16,12 +16,14 @@
#include <linux/skbuff.h>
#include <linux/udp.h>
#include <linux/in.h>
+#include <linux/in6.h>
#include <linux/slab.h>
#include <linux/hashtable.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <net/ip.h>
#include <net/route.h>
+#include <net/ip6_route.h>
#include "ar-internal.h"
static DEFINE_HASHTABLE(rxrpc_peer_hash, 10);
@@ -50,6 +52,13 @@ static unsigned long rxrpc_peer_hash_key(struct rxrpc_local *local,
size = sizeof(srx->transport.sin.sin_addr);
p = (u16 *)&srx->transport.sin.sin_addr;
break;
+#ifdef CONFIG_AF_RXRPC_IPV6
+ case AF_INET6:
+ hash_key += (u16 __force)srx->transport.sin.sin_port;
+ size = sizeof(srx->transport.sin6.sin6_addr);
+ p = (u16 *)&srx->transport.sin6.sin6_addr;
+ break;
+#endif
default:
WARN(1, "AF_RXRPC: Unsupported transport address family\n");
return 0;
@@ -93,6 +102,14 @@ static long rxrpc_peer_cmp_key(const struct rxrpc_peer *peer,
memcmp(&peer->srx.transport.sin.sin_addr,
&srx->transport.sin.sin_addr,
sizeof(struct in_addr));
+#ifdef CONFIG_AF_RXRPC_IPV6
+ case AF_INET6:
+ return ((u16 __force)peer->srx.transport.sin6.sin6_port -
+ (u16 __force)srx->transport.sin6.sin6_port) ?:
+ memcmp(&peer->srx.transport.sin6.sin6_addr,
+ &srx->transport.sin6.sin6_addr,
+ sizeof(struct in6_addr));
+#endif
default:
BUG();
}
@@ -130,17 +147,7 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
if (peer) {
- switch (srx->transport.family) {
- case AF_INET:
- _net("PEER %d {%d,%u,%pI4+%hu}",
- peer->debug_id,
- peer->srx.transport_type,
- peer->srx.transport.family,
- &peer->srx.transport.sin.sin_addr,
- ntohs(peer->srx.transport.sin.sin_port));
- break;
- }
-
+ _net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport);
_leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
}
return peer;
@@ -152,22 +159,53 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
*/
static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
{
+ struct dst_entry *dst;
struct rtable *rt;
- struct flowi4 fl4;
+ struct flowi fl;
+ struct flowi4 *fl4 = &fl.u.ip4;
+#ifdef CONFIG_AF_RXRPC_IPV6
+ struct flowi6 *fl6 = &fl.u.ip6;
+#endif
peer->if_mtu = 1500;
- rt = ip_route_output_ports(&init_net, &fl4, NULL,
- peer->srx.transport.sin.sin_addr.s_addr, 0,
- htons(7000), htons(7001),
- IPPROTO_UDP, 0, 0);
- if (IS_ERR(rt)) {
- _leave(" [route err %ld]", PTR_ERR(rt));
- return;
+ memset(&fl, 0, sizeof(fl));
+ switch (peer->srx.transport.family) {
+ case AF_INET:
+ rt = ip_route_output_ports(
+ &init_net, fl4, NULL,
+ peer->srx.transport.sin.sin_addr.s_addr, 0,
+ htons(7000), htons(7001), IPPROTO_UDP, 0, 0);
+ if (IS_ERR(rt)) {
+ _leave(" [route err %ld]", PTR_ERR(rt));
+ return;
+ }
+ dst = &rt->dst;
+ break;
+
+#ifdef CONFIG_AF_RXRPC_IPV6
+ case AF_INET6:
+ fl6->flowi6_iif = LOOPBACK_IFINDEX;
+ fl6->flowi6_scope = RT_SCOPE_UNIVERSE;
+ fl6->flowi6_proto = IPPROTO_UDP;
+ memcpy(&fl6->daddr, &peer->srx.transport.sin6.sin6_addr,
+ sizeof(struct in6_addr));
+ fl6->fl6_dport = htons(7001);
+ fl6->fl6_sport = htons(7000);
+ dst = ip6_route_output(&init_net, NULL, fl6);
+ if (IS_ERR(dst)) {
+ _leave(" [route err %ld]", PTR_ERR(dst));
+ return;
+ }
+ break;
+#endif
+
+ default:
+ BUG();
}
- peer->if_mtu = dst_mtu(&rt->dst);
- dst_release(&rt->dst);
+ peer->if_mtu = dst_mtu(dst);
+ dst_release(dst);
_leave(" [if_mtu %u]", peer->if_mtu);
}
@@ -199,6 +237,41 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
}
/*
+ * Initialise peer record.
+ */
+static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key)
+{
+ peer->hash_key = hash_key;
+ rxrpc_assess_MTU_size(peer);
+ peer->mtu = peer->if_mtu;
+ peer->rtt_last_req = ktime_get_real();
+
+ switch (peer->srx.transport.family) {
+ case AF_INET:
+ peer->hdrsize = sizeof(struct iphdr);
+ break;
+#ifdef CONFIG_AF_RXRPC_IPV6
+ case AF_INET6:
+ peer->hdrsize = sizeof(struct ipv6hdr);
+ break;
+#endif
+ default:
+ BUG();
+ }
+
+ switch (peer->srx.transport_type) {
+ case SOCK_DGRAM:
+ peer->hdrsize += sizeof(struct udphdr);
+ break;
+ default:
+ BUG();
+ }
+
+ peer->hdrsize += sizeof(struct rxrpc_wire_header);
+ peer->maxdata = peer->mtu - peer->hdrsize;
+}
+
+/*
* Set up a new peer.
*/
static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
@@ -212,31 +285,40 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
peer = rxrpc_alloc_peer(local, gfp);
if (peer) {
- peer->hash_key = hash_key;
memcpy(&peer->srx, srx, sizeof(*srx));
+ rxrpc_init_peer(peer, hash_key);
+ }
- rxrpc_assess_MTU_size(peer);
- peer->mtu = peer->if_mtu;
-
- if (srx->transport.family == AF_INET) {
- peer->hdrsize = sizeof(struct iphdr);
- switch (srx->transport_type) {
- case SOCK_DGRAM:
- peer->hdrsize += sizeof(struct udphdr);
- break;
- default:
- BUG();
- break;
- }
- } else {
- BUG();
- }
+ _leave(" = %p", peer);
+ return peer;
+}
+
+/*
+ * Set up a new incoming peer. The address is prestored in the preallocated
+ * peer.
+ */
+struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
+ struct rxrpc_peer *prealloc)
+{
+ struct rxrpc_peer *peer;
+ unsigned long hash_key;
+
+ hash_key = rxrpc_peer_hash_key(local, &prealloc->srx);
+ prealloc->local = local;
+ rxrpc_init_peer(prealloc, hash_key);
- peer->hdrsize += sizeof(struct rxrpc_wire_header);
- peer->maxdata = peer->mtu - peer->hdrsize;
+ spin_lock(&rxrpc_peer_hash_lock);
+
+ /* Need to check that we aren't racing with someone else */
+ peer = __rxrpc_lookup_peer_rcu(local, &prealloc->srx, hash_key);
+ if (peer && !rxrpc_get_peer_maybe(peer))
+ peer = NULL;
+ if (!peer) {
+ peer = prealloc;
+ hash_add_rcu(rxrpc_peer_hash, &peer->hash_link, hash_key);
}
- _leave(" = %p", peer);
+ spin_unlock(&rxrpc_peer_hash_lock);
return peer;
}
@@ -249,11 +331,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
struct rxrpc_peer *peer, *candidate;
unsigned long hash_key = rxrpc_peer_hash_key(local, srx);
- _enter("{%d,%d,%pI4+%hu}",
- srx->transport_type,
- srx->transport_len,
- &srx->transport.sin.sin_addr,
- ntohs(srx->transport.sin.sin_port));
+ _enter("{%pISp}", &srx->transport);
/* search the peer list first */
rcu_read_lock();
@@ -272,7 +350,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
return NULL;
}
- spin_lock(&rxrpc_peer_hash_lock);
+ spin_lock_bh(&rxrpc_peer_hash_lock);
/* Need to check that we aren't racing with someone else */
peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
@@ -282,7 +360,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
hash_add_rcu(rxrpc_peer_hash,
&candidate->hash_link, hash_key);
- spin_unlock(&rxrpc_peer_hash_lock);
+ spin_unlock_bh(&rxrpc_peer_hash_lock);
if (peer)
kfree(candidate);
@@ -290,11 +368,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
peer = candidate;
}
- _net("PEER %d {%d,%pI4+%hu}",
- peer->debug_id,
- peer->srx.transport_type,
- &peer->srx.transport.sin.sin_addr,
- ntohs(peer->srx.transport.sin.sin_port));
+ _net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport);
_leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
return peer;
@@ -307,9 +381,9 @@ void __rxrpc_put_peer(struct rxrpc_peer *peer)
{
ASSERT(hlist_empty(&peer->error_targets));
- spin_lock(&rxrpc_peer_hash_lock);
+ spin_lock_bh(&rxrpc_peer_hash_lock);
hash_del_rcu(&peer->hash_link);
- spin_unlock(&rxrpc_peer_hash_lock);
+ spin_unlock_bh(&rxrpc_peer_hash_lock);
kfree_rcu(peer, rcu);
}
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index 82c64055449d..65cd980767fa 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -17,6 +17,7 @@
static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
[RXRPC_CONN_UNUSED] = "Unused ",
[RXRPC_CONN_CLIENT] = "Client ",
+ [RXRPC_CONN_SERVICE_PREALLOC] = "SvPrealc",
[RXRPC_CONN_SERVICE_UNSECURED] = "SvUnsec ",
[RXRPC_CONN_SERVICE_CHALLENGING] = "SvChall ",
[RXRPC_CONN_SERVICE] = "SvSecure",
@@ -29,6 +30,7 @@ static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
*/
static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
{
+ rcu_read_lock();
read_lock(&rxrpc_call_lock);
return seq_list_start_head(&rxrpc_calls, *_pos);
}
@@ -41,6 +43,7 @@ static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
{
read_unlock(&rxrpc_call_lock);
+ rcu_read_unlock();
}
static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
@@ -49,11 +52,12 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
struct rxrpc_sock *rx;
struct rxrpc_peer *peer;
struct rxrpc_call *call;
- char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1];
+ char lbuff[50], rbuff[50];
if (v == &rxrpc_calls) {
seq_puts(seq,
- "Proto Local Remote "
+ "Proto Local "
+ " Remote "
" SvID ConnID CallID End Use State Abort "
" UserID\n");
return 0;
@@ -61,13 +65,11 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
call = list_entry(v, struct rxrpc_call, link);
- rx = READ_ONCE(call->socket);
+ rx = rcu_dereference(call->socket);
if (rx) {
local = READ_ONCE(rx->local);
if (local)
- sprintf(lbuff, "%pI4:%u",
- &local->srx.transport.sin.sin_addr,
- ntohs(local->srx.transport.sin.sin_port));
+ sprintf(lbuff, "%pISpc", &local->srx.transport);
else
strcpy(lbuff, "no_local");
} else {
@@ -76,14 +78,12 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
peer = call->peer;
if (peer)
- sprintf(rbuff, "%pI4:%u",
- &peer->srx.transport.sin.sin_addr,
- ntohs(peer->srx.transport.sin.sin_port));
+ sprintf(rbuff, "%pISpc", &peer->srx.transport);
else
strcpy(rbuff, "no_connection");
seq_printf(seq,
- "UDP %-22.22s %-22.22s %4x %08x %08x %s %3u"
+ "UDP %-47.47s %-47.47s %4x %08x %08x %s %3u"
" %-8.8s %08x %lx\n",
lbuff,
rbuff,
@@ -142,11 +142,12 @@ static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
{
struct rxrpc_connection *conn;
- char lbuff[4 + 4 + 4 + 4 + 5 + 1], rbuff[4 + 4 + 4 + 4 + 5 + 1];
+ char lbuff[50], rbuff[50];
if (v == &rxrpc_connection_proc_list) {
seq_puts(seq,
- "Proto Local Remote "
+ "Proto Local "
+ " Remote "
" SvID ConnID End Use State Key "
" Serial ISerial\n"
);
@@ -154,17 +155,18 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
}
conn = list_entry(v, struct rxrpc_connection, proc_link);
+ if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) {
+ strcpy(lbuff, "no_local");
+ strcpy(rbuff, "no_connection");
+ goto print;
+ }
- sprintf(lbuff, "%pI4:%u",
- &conn->params.local->srx.transport.sin.sin_addr,
- ntohs(conn->params.local->srx.transport.sin.sin_port));
-
- sprintf(rbuff, "%pI4:%u",
- &conn->params.peer->srx.transport.sin.sin_addr,
- ntohs(conn->params.peer->srx.transport.sin.sin_port));
+ sprintf(lbuff, "%pISpc", &conn->params.local->srx.transport);
+ sprintf(rbuff, "%pISpc", &conn->params.peer->srx.transport);
+print:
seq_printf(seq,
- "UDP %-22.22s %-22.22s %4x %08x %s %3u"
+ "UDP %-47.47s %-47.47s %4x %08x %s %3u"
" %s %08x %08x %08x\n",
lbuff,
rbuff,
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 0ab7b334bab1..038ae62ddb4d 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -19,427 +19,545 @@
#include "ar-internal.h"
/*
- * removal a call's user ID from the socket tree to make the user ID available
- * again and so that it won't be seen again in association with that call
+ * Post a call for attention by the socket or kernel service. Further
+ * notifications are suppressed by putting recvmsg_link on a dummy queue.
*/
-void rxrpc_remove_user_ID(struct rxrpc_sock *rx, struct rxrpc_call *call)
+void rxrpc_notify_socket(struct rxrpc_call *call)
{
- _debug("RELEASE CALL %d", call->debug_id);
+ struct rxrpc_sock *rx;
+ struct sock *sk;
- if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
- write_lock_bh(&rx->call_lock);
- rb_erase(&call->sock_node, &call->socket->calls);
- clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
- write_unlock_bh(&rx->call_lock);
+ _enter("%d", call->debug_id);
+
+ if (!list_empty(&call->recvmsg_link))
+ return;
+
+ rcu_read_lock();
+
+ rx = rcu_dereference(call->socket);
+ sk = &rx->sk;
+ if (rx && sk->sk_state < RXRPC_CLOSE) {
+ if (call->notify_rx) {
+ call->notify_rx(sk, call, call->user_call_ID);
+ } else {
+ write_lock_bh(&rx->recvmsg_lock);
+ if (list_empty(&call->recvmsg_link)) {
+ rxrpc_get_call(call, rxrpc_call_got);
+ list_add_tail(&call->recvmsg_link, &rx->recvmsg_q);
+ }
+ write_unlock_bh(&rx->recvmsg_lock);
+
+ if (!sock_flag(sk, SOCK_DEAD)) {
+ _debug("call %ps", sk->sk_data_ready);
+ sk->sk_data_ready(sk);
+ }
+ }
}
- read_lock_bh(&call->state_lock);
- if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
- rxrpc_queue_call(call);
- read_unlock_bh(&call->state_lock);
+ rcu_read_unlock();
+ _leave("");
}
/*
- * receive a message from an RxRPC socket
- * - we need to be careful about two or more threads calling recvmsg
- * simultaneously
+ * Pass a call terminating message to userspace.
*/
-int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
- int flags)
+static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
{
- struct rxrpc_skb_priv *sp;
- struct rxrpc_call *call = NULL, *continue_call = NULL;
- struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
- struct sk_buff *skb;
- long timeo;
- int copy, ret, ullen, offset, copied = 0;
- u32 abort_code;
+ u32 tmp = 0;
+ int ret;
- DEFINE_WAIT(wait);
+ switch (call->completion) {
+ case RXRPC_CALL_SUCCEEDED:
+ ret = 0;
+ if (rxrpc_is_service_call(call))
+ ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &tmp);
+ break;
+ case RXRPC_CALL_REMOTELY_ABORTED:
+ tmp = call->abort_code;
+ ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp);
+ break;
+ case RXRPC_CALL_LOCALLY_ABORTED:
+ tmp = call->abort_code;
+ ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &tmp);
+ break;
+ case RXRPC_CALL_NETWORK_ERROR:
+ tmp = call->error;
+ ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &tmp);
+ break;
+ case RXRPC_CALL_LOCAL_ERROR:
+ tmp = call->error;
+ ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, &tmp);
+ break;
+ default:
+ pr_err("Invalid terminal call state %u\n", call->state);
+ BUG();
+ break;
+ }
- _enter(",,,%zu,%d", len, flags);
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_terminal, call->rx_hard_ack,
+ call->rx_pkt_offset, call->rx_pkt_len, ret);
+ return ret;
+}
- if (flags & (MSG_OOB | MSG_TRUNC))
- return -EOPNOTSUPP;
+/*
+ * Pass back notification of a new call. The call is added to the
+ * to-be-accepted list. This means that the next call to be accepted might not
+ * be the last call seen awaiting acceptance, but unless we leave this on the
+ * front of the queue and block all other messages until someone gives us a
+ * user_ID for it, there's not a lot we can do.
+ */
+static int rxrpc_recvmsg_new_call(struct rxrpc_sock *rx,
+ struct rxrpc_call *call,
+ struct msghdr *msg, int flags)
+{
+ int tmp = 0, ret;
- ullen = msg->msg_flags & MSG_CMSG_COMPAT ? 4 : sizeof(unsigned long);
+ ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &tmp);
- timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
- msg->msg_flags |= MSG_MORE;
+ if (ret == 0 && !(flags & MSG_PEEK)) {
+ _debug("to be accepted");
+ write_lock_bh(&rx->recvmsg_lock);
+ list_del_init(&call->recvmsg_link);
+ write_unlock_bh(&rx->recvmsg_lock);
- lock_sock(&rx->sk);
+ rxrpc_get_call(call, rxrpc_call_got);
+ write_lock(&rx->call_lock);
+ list_add_tail(&call->accept_link, &rx->to_be_accepted);
+ write_unlock(&rx->call_lock);
+ }
- for (;;) {
- /* return immediately if a client socket has no outstanding
- * calls */
- if (RB_EMPTY_ROOT(&rx->calls)) {
- if (copied)
- goto out;
- if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
- release_sock(&rx->sk);
- if (continue_call)
- rxrpc_put_call(continue_call);
- return -ENODATA;
- }
- }
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_to_be_accepted, 1, 0, 0, ret);
+ return ret;
+}
- /* get the next message on the Rx queue */
- skb = skb_peek(&rx->sk.sk_receive_queue);
- if (!skb) {
- /* nothing remains on the queue */
- if (copied &&
- (flags & MSG_PEEK || timeo == 0))
- goto out;
+/*
+ * End the packet reception phase.
+ */
+static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
+{
+ _enter("%d,%s", call->debug_id, rxrpc_call_states[call->state]);
- /* wait for a message to turn up */
- release_sock(&rx->sk);
- prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
- TASK_INTERRUPTIBLE);
- ret = sock_error(&rx->sk);
- if (ret)
- goto wait_error;
-
- if (skb_queue_empty(&rx->sk.sk_receive_queue)) {
- if (signal_pending(current))
- goto wait_interrupted;
- timeo = schedule_timeout(timeo);
- }
- finish_wait(sk_sleep(&rx->sk), &wait);
- lock_sock(&rx->sk);
- continue;
- }
+ trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top);
+ ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
- peek_next_packet:
- rxrpc_see_skb(skb);
- sp = rxrpc_skb(skb);
- call = sp->call;
- ASSERT(call != NULL);
- rxrpc_see_call(call);
-
- _debug("next pkt %s", rxrpc_pkts[sp->hdr.type]);
-
- /* make sure we wait for the state to be updated in this call */
- spin_lock_bh(&call->lock);
- spin_unlock_bh(&call->lock);
-
- if (test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
- _debug("packet from released call");
- if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
- BUG();
- rxrpc_free_skb(skb);
- continue;
- }
+ if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
+ rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false,
+ rxrpc_propose_ack_terminal_ack);
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
+ }
- /* determine whether to continue last data receive */
- if (continue_call) {
- _debug("maybe cont");
- if (call != continue_call ||
- skb->mark != RXRPC_SKB_MARK_DATA) {
- release_sock(&rx->sk);
- rxrpc_put_call(continue_call);
- _leave(" = %d [noncont]", copied);
- return copied;
- }
- }
+ write_lock_bh(&call->state_lock);
- rxrpc_get_call(call);
+ switch (call->state) {
+ case RXRPC_CALL_CLIENT_RECV_REPLY:
+ __rxrpc_call_completed(call);
+ break;
- /* copy the peer address and timestamp */
- if (!continue_call) {
- if (msg->msg_name) {
- size_t len =
- sizeof(call->conn->params.peer->srx);
- memcpy(msg->msg_name,
- &call->conn->params.peer->srx, len);
- msg->msg_namelen = len;
- }
- sock_recv_timestamp(msg, &rx->sk, skb);
- }
+ case RXRPC_CALL_SERVER_RECV_REQUEST:
+ call->tx_phase = true;
+ call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
+ break;
+ default:
+ break;
+ }
- /* receive the message */
- if (skb->mark != RXRPC_SKB_MARK_DATA)
- goto receive_non_data_message;
+ write_unlock_bh(&call->state_lock);
+}
- _debug("recvmsg DATA #%u { %d, %d }",
- sp->hdr.seq, skb->len, sp->offset);
+/*
+ * Discard a packet we've used up and advance the Rx window by one.
+ */
+static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
+{
+ struct rxrpc_skb_priv *sp;
+ struct sk_buff *skb;
+ rxrpc_serial_t serial;
+ rxrpc_seq_t hard_ack, top;
+ u8 flags;
+ int ix;
- if (!continue_call) {
- /* only set the control data once per recvmsg() */
- ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
- ullen, &call->user_call_ID);
- if (ret < 0)
- goto copy_error;
- ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
- }
+ _enter("%d", call->debug_id);
- ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv);
- ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1);
- call->rx_data_recv = sp->hdr.seq;
+ hard_ack = call->rx_hard_ack;
+ top = smp_load_acquire(&call->rx_top);
+ ASSERT(before(hard_ack, top));
- ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten);
+ hard_ack++;
+ ix = hard_ack & RXRPC_RXTX_BUFF_MASK;
+ skb = call->rxtx_buffer[ix];
+ rxrpc_see_skb(skb, rxrpc_skb_rx_rotated);
+ sp = rxrpc_skb(skb);
+ flags = sp->hdr.flags;
+ serial = sp->hdr.serial;
+ if (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO)
+ serial += (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) - 1;
+
+ call->rxtx_buffer[ix] = NULL;
+ call->rxtx_annotations[ix] = 0;
+ /* Barrier against rxrpc_input_data(). */
+ smp_store_release(&call->rx_hard_ack, hard_ack);
+
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+
+ _debug("%u,%u,%02x", hard_ack, top, flags);
+ trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack);
+ if (flags & RXRPC_LAST_PACKET) {
+ rxrpc_end_rx_phase(call, serial);
+ } else {
+ /* Check to see if there's an ACK that needs sending. */
+ if (after_eq(hard_ack, call->ackr_consumed + 2) ||
+ after_eq(top, call->ackr_seen + 2) ||
+ (hard_ack == top && after(hard_ack, call->ackr_consumed)))
+ rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial,
+ true, false,
+ rxrpc_propose_ack_rotate_rx);
+ if (call->ackr_reason)
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
+ }
+}
- offset = sp->offset;
- copy = skb->len - offset;
- if (copy > len - copied)
- copy = len - copied;
+/*
+ * Decrypt and verify a (sub)packet. The packet's length may be changed due to
+ * padding, but if this is the case, the packet length will be resident in the
+ * socket buffer. Note that we can't modify the master skb info as the skb may
+ * be the home to multiple subpackets.
+ */
+static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
+ u8 annotation,
+ unsigned int offset, unsigned int len)
+{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ rxrpc_seq_t seq = sp->hdr.seq;
+ u16 cksum = sp->hdr.cksum;
+
+ _enter("");
+
+ /* For all but the head jumbo subpacket, the security checksum is in a
+ * jumbo header immediately prior to the data.
+ */
+ if ((annotation & RXRPC_RX_ANNO_JUMBO) > 1) {
+ __be16 tmp;
+ if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0)
+ BUG();
+ cksum = ntohs(tmp);
+ seq += (annotation & RXRPC_RX_ANNO_JUMBO) - 1;
+ }
- ret = skb_copy_datagram_msg(skb, offset, msg, copy);
+ return call->conn->security->verify_packet(call, skb, offset, len,
+ seq, cksum);
+}
+/*
+ * Locate the data within a packet. This is complicated by:
+ *
+ * (1) An skb may contain a jumbo packet - so we have to find the appropriate
+ * subpacket.
+ *
+ * (2) The (sub)packets may be encrypted and, if so, the encrypted portion
+ * contains an extra header which includes the true length of the data,
+ * excluding any encrypted padding.
+ */
+static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
+ u8 *_annotation,
+ unsigned int *_offset, unsigned int *_len)
+{
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ unsigned int offset = *_offset;
+ unsigned int len = *_len;
+ int ret;
+ u8 annotation = *_annotation;
+
+ /* Locate the subpacket */
+ offset = sp->offset;
+ len = skb->len - sp->offset;
+ if ((annotation & RXRPC_RX_ANNO_JUMBO) > 0) {
+ offset += (((annotation & RXRPC_RX_ANNO_JUMBO) - 1) *
+ RXRPC_JUMBO_SUBPKTLEN);
+ len = (annotation & RXRPC_RX_ANNO_JLAST) ?
+ skb->len - offset : RXRPC_JUMBO_SUBPKTLEN;
+ }
+
+ if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) {
+ ret = rxrpc_verify_packet(call, skb, annotation, offset, len);
if (ret < 0)
- goto copy_error;
+ return ret;
+ *_annotation |= RXRPC_RX_ANNO_VERIFIED;
+ }
+
+ *_offset = offset;
+ *_len = len;
+ call->conn->security->locate_data(call, skb, _offset, _len);
+ return 0;
+}
- /* handle piecemeal consumption of data packets */
- _debug("copied %d+%d", copy, copied);
+/*
+ * Deliver messages to a call. This keeps processing packets until the buffer
+ * is filled and we find either more DATA (returns 0) or the end of the DATA
+ * (returns 1). If more packets are required, it returns -EAGAIN.
+ */
+static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
+ struct msghdr *msg, struct iov_iter *iter,
+ size_t len, int flags, size_t *_offset)
+{
+ struct rxrpc_skb_priv *sp;
+ struct sk_buff *skb;
+ rxrpc_seq_t hard_ack, top, seq;
+ size_t remain;
+ bool last;
+ unsigned int rx_pkt_offset, rx_pkt_len;
+ int ix, copy, ret = -EAGAIN, ret2;
- offset += copy;
- copied += copy;
+ rx_pkt_offset = call->rx_pkt_offset;
+ rx_pkt_len = call->rx_pkt_len;
- if (!(flags & MSG_PEEK))
- sp->offset = offset;
+ if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) {
+ seq = call->rx_hard_ack;
+ ret = 1;
+ goto done;
+ }
- if (sp->offset < skb->len) {
- _debug("buffer full");
- ASSERTCMP(copied, ==, len);
+ /* Barriers against rxrpc_input_data(). */
+ hard_ack = call->rx_hard_ack;
+ top = smp_load_acquire(&call->rx_top);
+ for (seq = hard_ack + 1; before_eq(seq, top); seq++) {
+ ix = seq & RXRPC_RXTX_BUFF_MASK;
+ skb = call->rxtx_buffer[ix];
+ if (!skb) {
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_hole, seq,
+ rx_pkt_offset, rx_pkt_len, 0);
break;
}
+ smp_rmb();
+ rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
+ sp = rxrpc_skb(skb);
- /* we transferred the whole data packet */
if (!(flags & MSG_PEEK))
- rxrpc_kernel_data_consumed(call, skb);
-
- if (sp->hdr.flags & RXRPC_LAST_PACKET) {
- _debug("last");
- if (rxrpc_conn_is_client(call->conn)) {
- /* last byte of reply received */
- ret = copied;
- goto terminal_message;
+ trace_rxrpc_receive(call, rxrpc_receive_front,
+ sp->hdr.serial, seq);
+
+ if (msg)
+ sock_recv_timestamp(msg, sock->sk, skb);
+
+ if (rx_pkt_offset == 0) {
+ ret2 = rxrpc_locate_data(call, skb,
+ &call->rxtx_annotations[ix],
+ &rx_pkt_offset, &rx_pkt_len);
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_next, seq,
+ rx_pkt_offset, rx_pkt_len, ret2);
+ if (ret2 < 0) {
+ ret = ret2;
+ goto out;
}
+ } else {
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_cont, seq,
+ rx_pkt_offset, rx_pkt_len, 0);
+ }
- /* last bit of request received */
- if (!(flags & MSG_PEEK)) {
- _debug("eat packet");
- if (skb_dequeue(&rx->sk.sk_receive_queue) !=
- skb)
- BUG();
- rxrpc_free_skb(skb);
+ /* We have to handle short, empty and used-up DATA packets. */
+ remain = len - *_offset;
+ copy = rx_pkt_len;
+ if (copy > remain)
+ copy = remain;
+ if (copy > 0) {
+ ret2 = skb_copy_datagram_iter(skb, rx_pkt_offset, iter,
+ copy);
+ if (ret2 < 0) {
+ ret = ret2;
+ goto out;
}
- msg->msg_flags &= ~MSG_MORE;
- break;
- }
- /* move on to the next data message */
- _debug("next");
- if (!continue_call)
- continue_call = sp->call;
- else
- rxrpc_put_call(call);
- call = NULL;
-
- if (flags & MSG_PEEK) {
- _debug("peek next");
- skb = skb->next;
- if (skb == (struct sk_buff *) &rx->sk.sk_receive_queue)
- break;
- goto peek_next_packet;
+ /* handle piecemeal consumption of data packets */
+ rx_pkt_offset += copy;
+ rx_pkt_len -= copy;
+ *_offset += copy;
}
- _debug("eat packet");
- if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
- BUG();
- rxrpc_free_skb(skb);
- }
-
- /* end of non-terminal data packet reception for the moment */
- _debug("end rcv data");
-out:
- release_sock(&rx->sk);
- if (call)
- rxrpc_put_call(call);
- if (continue_call)
- rxrpc_put_call(continue_call);
- _leave(" = %d [data]", copied);
- return copied;
-
- /* handle non-DATA messages such as aborts, incoming connections and
- * final ACKs */
-receive_non_data_message:
- _debug("non-data");
-
- if (skb->mark == RXRPC_SKB_MARK_NEW_CALL) {
- _debug("RECV NEW CALL");
- ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &abort_code);
- if (ret < 0)
- goto copy_error;
- if (!(flags & MSG_PEEK)) {
- if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
- BUG();
- rxrpc_free_skb(skb);
+ if (rx_pkt_len > 0) {
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_full, seq,
+ rx_pkt_offset, rx_pkt_len, 0);
+ ASSERTCMP(*_offset, ==, len);
+ ret = 0;
+ break;
}
- goto out;
- }
- ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
- ullen, &call->user_call_ID);
- if (ret < 0)
- goto copy_error;
- ASSERT(test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
+ /* The whole packet has been transferred. */
+ last = sp->hdr.flags & RXRPC_LAST_PACKET;
+ if (!(flags & MSG_PEEK))
+ rxrpc_rotate_rx_window(call);
+ rx_pkt_offset = 0;
+ rx_pkt_len = 0;
- switch (skb->mark) {
- case RXRPC_SKB_MARK_DATA:
- BUG();
- case RXRPC_SKB_MARK_FINAL_ACK:
- ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ACK, 0, &abort_code);
- break;
- case RXRPC_SKB_MARK_BUSY:
- ret = put_cmsg(msg, SOL_RXRPC, RXRPC_BUSY, 0, &abort_code);
- break;
- case RXRPC_SKB_MARK_REMOTE_ABORT:
- abort_code = call->abort_code;
- ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &abort_code);
- break;
- case RXRPC_SKB_MARK_LOCAL_ABORT:
- abort_code = call->abort_code;
- ret = put_cmsg(msg, SOL_RXRPC, RXRPC_ABORT, 4, &abort_code);
- if (call->error) {
- abort_code = call->error;
- ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4,
- &abort_code);
+ if (last) {
+ ASSERTCMP(seq, ==, READ_ONCE(call->rx_top));
+ ret = 1;
+ goto out;
}
- break;
- case RXRPC_SKB_MARK_NET_ERROR:
- _debug("RECV NET ERROR %d", sp->error);
- abort_code = sp->error;
- ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NET_ERROR, 4, &abort_code);
- break;
- case RXRPC_SKB_MARK_LOCAL_ERROR:
- _debug("RECV LOCAL ERROR %d", sp->error);
- abort_code = sp->error;
- ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4,
- &abort_code);
- break;
- default:
- pr_err("Unknown packet mark %u\n", skb->mark);
- BUG();
- break;
}
- if (ret < 0)
- goto copy_error;
-
-terminal_message:
- _debug("terminal");
- msg->msg_flags &= ~MSG_MORE;
- msg->msg_flags |= MSG_EOR;
-
+out:
if (!(flags & MSG_PEEK)) {
- _net("free terminal skb %p", skb);
- if (skb_dequeue(&rx->sk.sk_receive_queue) != skb)
- BUG();
- rxrpc_free_skb(skb);
- rxrpc_remove_user_ID(rx, call);
+ call->rx_pkt_offset = rx_pkt_offset;
+ call->rx_pkt_len = rx_pkt_len;
}
-
- release_sock(&rx->sk);
- rxrpc_put_call(call);
- if (continue_call)
- rxrpc_put_call(continue_call);
- _leave(" = %d", ret);
+done:
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_data_return, seq,
+ rx_pkt_offset, rx_pkt_len, ret);
return ret;
-
-copy_error:
- _debug("copy error");
- release_sock(&rx->sk);
- rxrpc_put_call(call);
- if (continue_call)
- rxrpc_put_call(continue_call);
- _leave(" = %d", ret);
- return ret;
-
-wait_interrupted:
- ret = sock_intr_errno(timeo);
-wait_error:
- finish_wait(sk_sleep(&rx->sk), &wait);
- if (continue_call)
- rxrpc_put_call(continue_call);
- if (copied)
- copied = ret;
- _leave(" = %d [waitfail %d]", copied, ret);
- return copied;
-
}
/*
- * Deliver messages to a call. This keeps processing packets until the buffer
- * is filled and we find either more DATA (returns 0) or the end of the DATA
- * (returns 1). If more packets are required, it returns -EAGAIN.
- *
- * TODO: Note that this is hacked in at the moment and will be replaced.
+ * Receive a message from an RxRPC socket
+ * - we need to be careful about two or more threads calling recvmsg
+ * simultaneously
*/
-static int temp_deliver_data(struct socket *sock, struct rxrpc_call *call,
- struct iov_iter *iter, size_t size,
- size_t *_offset)
+int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ int flags)
{
- struct rxrpc_skb_priv *sp;
- struct sk_buff *skb;
- size_t remain;
- int ret, copy;
+ struct rxrpc_call *call;
+ struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
+ struct list_head *l;
+ size_t copied = 0;
+ long timeo;
+ int ret;
- _enter("%d", call->debug_id);
+ DEFINE_WAIT(wait);
-next:
- local_bh_disable();
- skb = skb_dequeue(&call->knlrecv_queue);
- local_bh_enable();
- if (!skb) {
- if (test_bit(RXRPC_CALL_RX_NO_MORE, &call->flags))
- return 1;
- _leave(" = -EAGAIN [empty]");
- return -EAGAIN;
+ trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_enter, 0, 0, 0, 0);
+
+ if (flags & (MSG_OOB | MSG_TRUNC))
+ return -EOPNOTSUPP;
+
+ timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
+
+try_again:
+ lock_sock(&rx->sk);
+
+ /* Return immediately if a client socket has no outstanding calls */
+ if (RB_EMPTY_ROOT(&rx->calls) &&
+ list_empty(&rx->recvmsg_q) &&
+ rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
+ release_sock(&rx->sk);
+ return -ENODATA;
}
- sp = rxrpc_skb(skb);
- _debug("dequeued %p %u/%zu", skb, sp->offset, size);
-
- switch (skb->mark) {
- case RXRPC_SKB_MARK_DATA:
- remain = size - *_offset;
- if (remain > 0) {
- copy = skb->len - sp->offset;
- if (copy > remain)
- copy = remain;
- ret = skb_copy_datagram_iter(skb, sp->offset, iter,
- copy);
- if (ret < 0)
- goto requeue_and_leave;
+ if (list_empty(&rx->recvmsg_q)) {
+ ret = -EWOULDBLOCK;
+ if (timeo == 0) {
+ call = NULL;
+ goto error_no_call;
+ }
- /* handle piecemeal consumption of data packets */
- sp->offset += copy;
- *_offset += copy;
+ release_sock(&rx->sk);
+
+ /* Wait for something to happen */
+ prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
+ TASK_INTERRUPTIBLE);
+ ret = sock_error(&rx->sk);
+ if (ret)
+ goto wait_error;
+
+ if (list_empty(&rx->recvmsg_q)) {
+ if (signal_pending(current))
+ goto wait_interrupted;
+ trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_wait,
+ 0, 0, 0, 0);
+ timeo = schedule_timeout(timeo);
}
+ finish_wait(sk_sleep(&rx->sk), &wait);
+ goto try_again;
+ }
- if (sp->offset < skb->len)
- goto partially_used_skb;
+ /* Find the next call and dequeue it if we're not just peeking. If we
+ * do dequeue it, that comes with a ref that we will need to release.
+ */
+ write_lock_bh(&rx->recvmsg_lock);
+ l = rx->recvmsg_q.next;
+ call = list_entry(l, struct rxrpc_call, recvmsg_link);
+ if (!(flags & MSG_PEEK))
+ list_del_init(&call->recvmsg_link);
+ else
+ rxrpc_get_call(call, rxrpc_call_got);
+ write_unlock_bh(&rx->recvmsg_lock);
+
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0, 0, 0, 0);
+
+ if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
+ BUG();
- /* We consumed the whole packet */
- ASSERTCMP(sp->offset, ==, skb->len);
- if (sp->hdr.flags & RXRPC_LAST_PACKET)
- set_bit(RXRPC_CALL_RX_NO_MORE, &call->flags);
- rxrpc_kernel_data_consumed(call, skb);
- rxrpc_free_skb(skb);
- goto next;
+ if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
+ if (flags & MSG_CMSG_COMPAT) {
+ unsigned int id32 = call->user_call_ID;
+ ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
+ sizeof(unsigned int), &id32);
+ } else {
+ ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
+ sizeof(unsigned long),
+ &call->user_call_ID);
+ }
+ if (ret < 0)
+ goto error;
+ }
+
+ if (msg->msg_name) {
+ size_t len = sizeof(call->conn->params.peer->srx);
+ memcpy(msg->msg_name, &call->conn->params.peer->srx, len);
+ msg->msg_namelen = len;
+ }
+
+ switch (call->state) {
+ case RXRPC_CALL_SERVER_ACCEPTING:
+ ret = rxrpc_recvmsg_new_call(rx, call, msg, flags);
+ break;
+ case RXRPC_CALL_CLIENT_RECV_REPLY:
+ case RXRPC_CALL_SERVER_RECV_REQUEST:
+ case RXRPC_CALL_SERVER_ACK_REQUEST:
+ ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len,
+ flags, &copied);
+ if (ret == -EAGAIN)
+ ret = 0;
+
+ if (after(call->rx_top, call->rx_hard_ack) &&
+ call->rxtx_buffer[(call->rx_hard_ack + 1) & RXRPC_RXTX_BUFF_MASK])
+ rxrpc_notify_socket(call);
+ break;
default:
- rxrpc_free_skb(skb);
- goto next;
+ ret = 0;
+ break;
+ }
+
+ if (ret < 0)
+ goto error;
+
+ if (call->state == RXRPC_CALL_COMPLETE) {
+ ret = rxrpc_recvmsg_term(call, msg);
+ if (ret < 0)
+ goto error;
+ if (!(flags & MSG_PEEK))
+ rxrpc_release_call(rx, call);
+ msg->msg_flags |= MSG_EOR;
+ ret = 1;
}
-partially_used_skb:
- ASSERTCMP(*_offset, ==, size);
- ret = 0;
-requeue_and_leave:
- skb_queue_head(&call->knlrecv_queue, skb);
+ if (ret == 0)
+ msg->msg_flags |= MSG_MORE;
+ else
+ msg->msg_flags &= ~MSG_MORE;
+ ret = copied;
+
+error:
+ rxrpc_put_call(call, rxrpc_call_put);
+error_no_call:
+ release_sock(&rx->sk);
+ trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
return ret;
+
+wait_interrupted:
+ ret = sock_intr_errno(timeo);
+wait_error:
+ finish_wait(sk_sleep(&rx->sk), &wait);
+ call = NULL;
+ goto error_no_call;
}
/**
@@ -474,8 +592,9 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
struct kvec iov;
int ret;
- _enter("{%d,%s},%zu,%d",
- call->debug_id, rxrpc_call_states[call->state], size, want_more);
+ _enter("{%d,%s},%zu/%zu,%d",
+ call->debug_id, rxrpc_call_states[call->state],
+ *_offset, size, want_more);
ASSERTCMP(*_offset, <=, size);
ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_ACCEPTING);
@@ -490,7 +609,8 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
case RXRPC_CALL_CLIENT_RECV_REPLY:
case RXRPC_CALL_SERVER_RECV_REQUEST:
case RXRPC_CALL_SERVER_ACK_REQUEST:
- ret = temp_deliver_data(sock, call, &iter, size, _offset);
+ ret = rxrpc_recvmsg_data(sock, call, NULL, &iter, size, 0,
+ _offset);
if (ret < 0)
goto out;
@@ -515,7 +635,6 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
goto call_complete;
default:
- *_offset = 0;
ret = -EINPROGRESS;
goto out;
}
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 89f475febfd7..88d080a1a3de 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -80,12 +80,10 @@ static int rxkad_init_connection_security(struct rxrpc_connection *conn)
case RXRPC_SECURITY_AUTH:
conn->size_align = 8;
conn->security_size = sizeof(struct rxkad_level1_hdr);
- conn->header_size += sizeof(struct rxkad_level1_hdr);
break;
case RXRPC_SECURITY_ENCRYPT:
conn->size_align = 8;
conn->security_size = sizeof(struct rxkad_level2_hdr);
- conn->header_size += sizeof(struct rxkad_level2_hdr);
break;
default:
ret = -EKEYREJECTED;
@@ -161,7 +159,7 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
_enter("");
- check = sp->hdr.seq ^ sp->hdr.callNumber;
+ check = sp->hdr.seq ^ call->call_id;
data_size |= (u32)check << 16;
hdr.data_size = htonl(data_size);
@@ -205,7 +203,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
_enter("");
- check = sp->hdr.seq ^ sp->hdr.callNumber;
+ check = sp->hdr.seq ^ call->call_id;
rxkhdr.data_size = htonl(data_size | (u32)check << 16);
rxkhdr.checksum = 0;
@@ -277,7 +275,7 @@ static int rxkad_secure_packet(struct rxrpc_call *call,
/* calculate the security checksum */
x = (call->cid & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
x |= sp->hdr.seq & 0x3fffffff;
- call->crypto_buf[0] = htonl(sp->hdr.callNumber);
+ call->crypto_buf[0] = htonl(call->call_id);
call->crypto_buf[1] = htonl(x);
sg_init_one(&sg, call->crypto_buf, 8);
@@ -316,12 +314,11 @@ static int rxkad_secure_packet(struct rxrpc_call *call,
/*
* decrypt partial encryption on a packet (level 1 security)
*/
-static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
- struct sk_buff *skb,
- u32 *_abort_code)
+static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
+ unsigned int offset, unsigned int len,
+ rxrpc_seq_t seq)
{
struct rxkad_level1_hdr sechdr;
- struct rxrpc_skb_priv *sp;
SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
struct rxrpc_crypt iv;
struct scatterlist sg[16];
@@ -332,15 +329,20 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
_enter("");
- sp = rxrpc_skb(skb);
+ if (len < 8) {
+ rxrpc_abort_call("V1H", call, seq, RXKADSEALEDINCON, EPROTO);
+ goto protocol_error;
+ }
- /* we want to decrypt the skbuff in-place */
+ /* Decrypt the skbuff in-place. TODO: We really want to decrypt
+ * directly into the target buffer.
+ */
nsg = skb_cow_data(skb, 0, &trailer);
if (nsg < 0 || nsg > 16)
goto nomem;
sg_init_table(sg, nsg);
- skb_to_sgvec(skb, sg, 0, 8);
+ skb_to_sgvec(skb, sg, offset, 8);
/* start the decryption afresh */
memset(&iv, 0, sizeof(iv));
@@ -351,35 +353,35 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
- /* remove the decrypted packet length */
- if (skb_copy_bits(skb, 0, &sechdr, sizeof(sechdr)) < 0)
- goto datalen_error;
- if (!skb_pull(skb, sizeof(sechdr)))
- BUG();
+ /* Extract the decrypted packet length */
+ if (skb_copy_bits(skb, offset, &sechdr, sizeof(sechdr)) < 0) {
+ rxrpc_abort_call("XV1", call, seq, RXKADDATALEN, EPROTO);
+ goto protocol_error;
+ }
+ offset += sizeof(sechdr);
+ len -= sizeof(sechdr);
buf = ntohl(sechdr.data_size);
data_size = buf & 0xffff;
check = buf >> 16;
- check ^= sp->hdr.seq ^ sp->hdr.callNumber;
+ check ^= seq ^ call->call_id;
check &= 0xffff;
if (check != 0) {
- *_abort_code = RXKADSEALEDINCON;
+ rxrpc_abort_call("V1C", call, seq, RXKADSEALEDINCON, EPROTO);
goto protocol_error;
}
- /* shorten the packet to remove the padding */
- if (data_size > skb->len)
- goto datalen_error;
- else if (data_size < skb->len)
- skb->len = data_size;
+ if (data_size > len) {
+ rxrpc_abort_call("V1L", call, seq, RXKADDATALEN, EPROTO);
+ goto protocol_error;
+ }
_leave(" = 0 [dlen=%x]", data_size);
return 0;
-datalen_error:
- *_abort_code = RXKADDATALEN;
protocol_error:
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
_leave(" = -EPROTO");
return -EPROTO;
@@ -391,13 +393,12 @@ nomem:
/*
* wholly decrypt a packet (level 2 security)
*/
-static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
- struct sk_buff *skb,
- u32 *_abort_code)
+static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
+ unsigned int offset, unsigned int len,
+ rxrpc_seq_t seq)
{
const struct rxrpc_key_token *token;
struct rxkad_level2_hdr sechdr;
- struct rxrpc_skb_priv *sp;
SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
struct rxrpc_crypt iv;
struct scatterlist _sg[4], *sg;
@@ -408,9 +409,14 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
_enter(",{%d}", skb->len);
- sp = rxrpc_skb(skb);
+ if (len < 8) {
+ rxrpc_abort_call("V2H", call, seq, RXKADSEALEDINCON, EPROTO);
+ goto protocol_error;
+ }
- /* we want to decrypt the skbuff in-place */
+ /* Decrypt the skbuff in-place. TODO: We really want to decrypt
+ * directly into the target buffer.
+ */
nsg = skb_cow_data(skb, 0, &trailer);
if (nsg < 0)
goto nomem;
@@ -423,7 +429,7 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
}
sg_init_table(sg, nsg);
- skb_to_sgvec(skb, sg, 0, skb->len);
+ skb_to_sgvec(skb, sg, offset, len);
/* decrypt from the session key */
token = call->conn->params.key->payload.data[0];
@@ -431,41 +437,41 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
skcipher_request_set_tfm(req, call->conn->cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, sg, sg, skb->len, iv.x);
+ skcipher_request_set_crypt(req, sg, sg, len, iv.x);
crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
if (sg != _sg)
kfree(sg);
- /* remove the decrypted packet length */
- if (skb_copy_bits(skb, 0, &sechdr, sizeof(sechdr)) < 0)
- goto datalen_error;
- if (!skb_pull(skb, sizeof(sechdr)))
- BUG();
+ /* Extract the decrypted packet length */
+ if (skb_copy_bits(skb, offset, &sechdr, sizeof(sechdr)) < 0) {
+ rxrpc_abort_call("XV2", call, seq, RXKADDATALEN, EPROTO);
+ goto protocol_error;
+ }
+ offset += sizeof(sechdr);
+ len -= sizeof(sechdr);
buf = ntohl(sechdr.data_size);
data_size = buf & 0xffff;
check = buf >> 16;
- check ^= sp->hdr.seq ^ sp->hdr.callNumber;
+ check ^= seq ^ call->call_id;
check &= 0xffff;
if (check != 0) {
- *_abort_code = RXKADSEALEDINCON;
+ rxrpc_abort_call("V2C", call, seq, RXKADSEALEDINCON, EPROTO);
goto protocol_error;
}
- /* shorten the packet to remove the padding */
- if (data_size > skb->len)
- goto datalen_error;
- else if (data_size < skb->len)
- skb->len = data_size;
+ if (data_size > len) {
+ rxrpc_abort_call("V2L", call, seq, RXKADDATALEN, EPROTO);
+ goto protocol_error;
+ }
_leave(" = 0 [dlen=%x]", data_size);
return 0;
-datalen_error:
- *_abort_code = RXKADDATALEN;
protocol_error:
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
_leave(" = -EPROTO");
return -EPROTO;
@@ -475,40 +481,31 @@ nomem:
}
/*
- * verify the security on a received packet
+ * Verify the security on a received packet or subpacket (if part of a
+ * jumbo packet).
*/
-static int rxkad_verify_packet(struct rxrpc_call *call,
- struct sk_buff *skb,
- u32 *_abort_code)
+static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
+ unsigned int offset, unsigned int len,
+ rxrpc_seq_t seq, u16 expected_cksum)
{
SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
- struct rxrpc_skb_priv *sp;
struct rxrpc_crypt iv;
struct scatterlist sg;
u16 cksum;
u32 x, y;
- int ret;
-
- sp = rxrpc_skb(skb);
_enter("{%d{%x}},{#%u}",
- call->debug_id, key_serial(call->conn->params.key), sp->hdr.seq);
+ call->debug_id, key_serial(call->conn->params.key), seq);
if (!call->conn->cipher)
return 0;
- if (sp->hdr.securityIndex != RXRPC_SECURITY_RXKAD) {
- *_abort_code = RXKADINCONSISTENCY;
- _leave(" = -EPROTO [not rxkad]");
- return -EPROTO;
- }
-
/* continue encrypting from where we left off */
memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
/* validate the security checksum */
x = (call->cid & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
- x |= sp->hdr.seq & 0x3fffffff;
+ x |= seq & 0x3fffffff;
call->crypto_buf[0] = htonl(call->call_id);
call->crypto_buf[1] = htonl(x);
@@ -524,29 +521,69 @@ static int rxkad_verify_packet(struct rxrpc_call *call,
if (cksum == 0)
cksum = 1; /* zero checksums are not permitted */
- if (sp->hdr.cksum != cksum) {
- *_abort_code = RXKADSEALEDINCON;
+ if (cksum != expected_cksum) {
+ rxrpc_abort_call("VCK", call, seq, RXKADSEALEDINCON, EPROTO);
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
_leave(" = -EPROTO [csum failed]");
return -EPROTO;
}
switch (call->conn->params.security_level) {
case RXRPC_SECURITY_PLAIN:
- ret = 0;
- break;
+ return 0;
case RXRPC_SECURITY_AUTH:
- ret = rxkad_verify_packet_auth(call, skb, _abort_code);
- break;
+ return rxkad_verify_packet_1(call, skb, offset, len, seq);
case RXRPC_SECURITY_ENCRYPT:
- ret = rxkad_verify_packet_encrypt(call, skb, _abort_code);
- break;
+ return rxkad_verify_packet_2(call, skb, offset, len, seq);
default:
- ret = -ENOANO;
- break;
+ return -ENOANO;
}
+}
- _leave(" = %d", ret);
- return ret;
+/*
+ * Locate the data contained in a packet that was partially encrypted.
+ */
+static void rxkad_locate_data_1(struct rxrpc_call *call, struct sk_buff *skb,
+ unsigned int *_offset, unsigned int *_len)
+{
+ struct rxkad_level1_hdr sechdr;
+
+ if (skb_copy_bits(skb, *_offset, &sechdr, sizeof(sechdr)) < 0)
+ BUG();
+ *_offset += sizeof(sechdr);
+ *_len = ntohl(sechdr.data_size) & 0xffff;
+}
+
+/*
+ * Locate the data contained in a packet that was completely encrypted.
+ */
+static void rxkad_locate_data_2(struct rxrpc_call *call, struct sk_buff *skb,
+ unsigned int *_offset, unsigned int *_len)
+{
+ struct rxkad_level2_hdr sechdr;
+
+ if (skb_copy_bits(skb, *_offset, &sechdr, sizeof(sechdr)) < 0)
+ BUG();
+ *_offset += sizeof(sechdr);
+ *_len = ntohl(sechdr.data_size) & 0xffff;
+}
+
+/*
+ * Locate the data contained in an already decrypted packet.
+ */
+static void rxkad_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
+ unsigned int *_offset, unsigned int *_len)
+{
+ switch (call->conn->params.security_level) {
+ case RXRPC_SECURITY_AUTH:
+ rxkad_locate_data_1(call, skb, _offset, _len);
+ return;
+ case RXRPC_SECURITY_ENCRYPT:
+ rxkad_locate_data_2(call, skb, _offset, _len);
+ return;
+ default:
+ return;
+ }
}
/*
@@ -716,7 +753,7 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
struct rxkad_challenge challenge;
struct rxkad_response resp
__attribute__((aligned(8))); /* must be aligned for crypto */
- struct rxrpc_skb_priv *sp;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
u32 version, nonce, min_level, abort_code;
int ret;
@@ -734,8 +771,7 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
}
abort_code = RXKADPACKETSHORT;
- sp = rxrpc_skb(skb);
- if (skb_copy_bits(skb, 0, &challenge, sizeof(challenge)) < 0)
+ if (skb_copy_bits(skb, sp->offset, &challenge, sizeof(challenge)) < 0)
goto protocol_error;
version = ntohl(challenge.version);
@@ -981,7 +1017,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
{
struct rxkad_response response
__attribute__((aligned(8))); /* must be aligned for crypto */
- struct rxrpc_skb_priv *sp;
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_crypt session_key;
time_t expiry;
void *ticket;
@@ -992,7 +1028,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
_enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key));
abort_code = RXKADPACKETSHORT;
- if (skb_copy_bits(skb, 0, &response, sizeof(response)) < 0)
+ if (skb_copy_bits(skb, sp->offset, &response, sizeof(response)) < 0)
goto protocol_error;
if (!pskb_pull(skb, sizeof(response)))
BUG();
@@ -1000,7 +1036,6 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
version = ntohl(response.version);
ticket_len = ntohl(response.ticket_len);
kvno = ntohl(response.kvno);
- sp = rxrpc_skb(skb);
_proto("Rx RESPONSE %%%u { v=%u kv=%u tl=%u }",
sp->hdr.serial, version, kvno, ticket_len);
@@ -1022,7 +1057,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
return -ENOMEM;
abort_code = RXKADPACKETSHORT;
- if (skb_copy_bits(skb, 0, ticket, ticket_len) < 0)
+ if (skb_copy_bits(skb, sp->offset, ticket, ticket_len) < 0)
goto protocol_error_free;
ret = rxkad_decrypt_ticket(conn, ticket, ticket_len, &session_key,
@@ -1147,6 +1182,7 @@ const struct rxrpc_security rxkad = {
.prime_packet_security = rxkad_prime_packet_security,
.secure_packet = rxkad_secure_packet,
.verify_packet = rxkad_verify_packet,
+ .locate_data = rxkad_locate_data,
.issue_challenge = rxkad_issue_challenge,
.respond_to_challenge = rxkad_respond_to_challenge,
.verify_response = rxkad_verify_response,
diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c
index 814d285ff802..82d8134e9287 100644
--- a/net/rxrpc/security.c
+++ b/net/rxrpc/security.c
@@ -130,20 +130,20 @@ int rxrpc_init_server_conn_security(struct rxrpc_connection *conn)
}
/* find the service */
- read_lock_bh(&local->services_lock);
- list_for_each_entry(rx, &local->services, listen_link) {
+ read_lock(&local->services_lock);
+ hlist_for_each_entry(rx, &local->services, listen_link) {
if (rx->srx.srx_service == conn->params.service_id)
goto found_service;
}
/* the service appears to have died */
- read_unlock_bh(&local->services_lock);
+ read_unlock(&local->services_lock);
_leave(" = -ENOENT");
return -ENOENT;
found_service:
if (!rx->securities) {
- read_unlock_bh(&local->services_lock);
+ read_unlock(&local->services_lock);
_leave(" = -ENOKEY");
return -ENOKEY;
}
@@ -152,13 +152,13 @@ found_service:
kref = keyring_search(make_key_ref(rx->securities, 1UL),
&key_type_rxrpc_s, kdesc);
if (IS_ERR(kref)) {
- read_unlock_bh(&local->services_lock);
+ read_unlock(&local->services_lock);
_leave(" = %ld [search]", PTR_ERR(kref));
return PTR_ERR(kref);
}
key = key_ref_to_ptr(kref);
- read_unlock_bh(&local->services_lock);
+ read_unlock(&local->services_lock);
conn->server_key = key;
conn->security = sec;
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 7376794a0308..1f8040d82395 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -15,7 +15,6 @@
#include <linux/gfp.h>
#include <linux/skbuff.h>
#include <linux/export.h>
-#include <linux/circ_buf.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"
@@ -38,24 +37,28 @@ static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
DECLARE_WAITQUEUE(myself, current);
int ret;
- _enter(",{%d},%ld",
- CIRC_SPACE(call->acks_head, ACCESS_ONCE(call->acks_tail),
- call->acks_winsz),
- *timeo);
+ _enter(",{%u,%u,%u}",
+ call->tx_hard_ack, call->tx_top, call->tx_winsize);
add_wait_queue(&call->waitq, &myself);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
ret = 0;
- if (CIRC_SPACE(call->acks_head, ACCESS_ONCE(call->acks_tail),
- call->acks_winsz) > 0)
+ if (call->tx_top - call->tx_hard_ack <
+ min_t(unsigned int, call->tx_winsize,
+ call->cong_cwnd + call->cong_extra))
break;
+ if (call->state >= RXRPC_CALL_COMPLETE) {
+ ret = -call->error;
+ break;
+ }
if (signal_pending(current)) {
ret = sock_intr_errno(*timeo);
break;
}
+ trace_rxrpc_transmit(call, rxrpc_transmit_wait);
release_sock(&rx->sk);
*timeo = schedule_timeout(*timeo);
lock_sock(&rx->sk);
@@ -68,36 +71,55 @@ static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
}
/*
- * attempt to schedule an instant Tx resend
+ * Schedule an instant Tx resend.
*/
-static inline void rxrpc_instant_resend(struct rxrpc_call *call)
+static inline void rxrpc_instant_resend(struct rxrpc_call *call, int ix)
{
- read_lock_bh(&call->state_lock);
- if (try_to_del_timer_sync(&call->resend_timer) >= 0) {
- clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
- if (call->state < RXRPC_CALL_COMPLETE &&
- !test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
+ spin_lock_bh(&call->lock);
+
+ if (call->state < RXRPC_CALL_COMPLETE) {
+ call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS;
+ if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
rxrpc_queue_call(call);
}
- read_unlock_bh(&call->state_lock);
+
+ spin_unlock_bh(&call->lock);
}
/*
- * queue a packet for transmission, set the resend timer and attempt
- * to send the packet immediately
+ * Queue a DATA packet for transmission, set the resend timeout and send the
+ * packet immediately
*/
static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
bool last)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- int ret;
+ rxrpc_seq_t seq = sp->hdr.seq;
+ int ret, ix;
+ u8 annotation = RXRPC_TX_ANNO_UNACK;
- _net("queue skb %p [%d]", skb, call->acks_head);
+ _net("queue skb %p [%d]", skb, seq);
- ASSERT(call->acks_window != NULL);
- call->acks_window[call->acks_head] = (unsigned long) skb;
+ ASSERTCMP(seq, ==, call->tx_top + 1);
+
+ if (last)
+ annotation |= RXRPC_TX_ANNO_LAST;
+
+ /* We have to set the timestamp before queueing as the retransmit
+ * algorithm can see the packet as soon as we queue it.
+ */
+ skb->tstamp = ktime_get_real();
+
+ ix = seq & RXRPC_RXTX_BUFF_MASK;
+ rxrpc_get_skb(skb, rxrpc_skb_tx_got);
+ call->rxtx_annotations[ix] = annotation;
smp_wmb();
- call->acks_head = (call->acks_head + 1) & (call->acks_winsz - 1);
+ call->rxtx_buffer[ix] = skb;
+ call->tx_top = seq;
+ if (last)
+ trace_rxrpc_transmit(call, rxrpc_transmit_queue_last);
+ else
+ trace_rxrpc_transmit(call, rxrpc_transmit_queue);
if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) {
_debug("________awaiting reply/ACK__________");
@@ -119,60 +141,26 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
write_unlock_bh(&call->state_lock);
}
- _proto("Tx DATA %%%u { #%u }", sp->hdr.serial, sp->hdr.seq);
-
- sp->need_resend = false;
- sp->resend_at = jiffies + rxrpc_resend_timeout;
- if (!test_and_set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags)) {
- _debug("run timer");
- call->resend_timer.expires = sp->resend_at;
- add_timer(&call->resend_timer);
- }
-
- /* attempt to cancel the rx-ACK timer, deferring reply transmission if
- * we're ACK'ing the request phase of an incoming call */
- ret = -EAGAIN;
- if (try_to_del_timer_sync(&call->ack_timer) >= 0) {
- /* the packet may be freed by rxrpc_process_call() before this
- * returns */
- if (rxrpc_is_client_call(call))
- rxrpc_expose_client_call(call);
- ret = rxrpc_send_data_packet(call->conn, skb);
- _net("sent skb %p", skb);
- } else {
- _debug("failed to delete ACK timer");
- }
+ if (seq == 1 && rxrpc_is_client_call(call))
+ rxrpc_expose_client_call(call);
+ ret = rxrpc_send_data_packet(call, skb);
if (ret < 0) {
_debug("need instant resend %d", ret);
- sp->need_resend = true;
- rxrpc_instant_resend(call);
- }
+ rxrpc_instant_resend(call, ix);
+ } else {
+ unsigned long resend_at;
- _leave("");
-}
+ resend_at = jiffies + msecs_to_jiffies(rxrpc_resend_timeout);
-/*
- * Convert a host-endian header into a network-endian header.
- */
-static void rxrpc_insert_header(struct sk_buff *skb)
-{
- struct rxrpc_wire_header whdr;
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ if (time_before(resend_at, call->resend_at)) {
+ call->resend_at = resend_at;
+ rxrpc_set_timer(call, rxrpc_timer_set_for_send);
+ }
+ }
- whdr.epoch = htonl(sp->hdr.epoch);
- whdr.cid = htonl(sp->hdr.cid);
- whdr.callNumber = htonl(sp->hdr.callNumber);
- whdr.seq = htonl(sp->hdr.seq);
- whdr.serial = htonl(sp->hdr.serial);
- whdr.type = sp->hdr.type;
- whdr.flags = sp->hdr.flags;
- whdr.userStatus = sp->hdr.userStatus;
- whdr.securityIndex = sp->hdr.securityIndex;
- whdr._rsvd = htons(sp->hdr._rsvd);
- whdr.serviceId = htons(sp->hdr.serviceId);
-
- memcpy(skb->head, &whdr, sizeof(whdr));
+ rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
+ _leave("");
}
/*
@@ -203,18 +191,22 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
skb = call->tx_pending;
call->tx_pending = NULL;
- rxrpc_see_skb(skb);
+ rxrpc_see_skb(skb, rxrpc_skb_tx_seen);
copied = 0;
do {
+ /* Check to see if there's a ping ACK to reply to. */
+ if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE)
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
+
if (!skb) {
size_t size, chunk, max, space;
_debug("alloc");
- if (CIRC_SPACE(call->acks_head,
- ACCESS_ONCE(call->acks_tail),
- call->acks_winsz) <= 0) {
+ if (call->tx_top - call->tx_hard_ack >=
+ min_t(unsigned int, call->tx_winsize,
+ call->cong_cwnd + call->cong_extra)) {
ret = -EAGAIN;
if (msg->msg_flags & MSG_DONTWAIT)
goto maybe_error;
@@ -224,7 +216,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
goto maybe_error;
}
- max = call->conn->params.peer->maxdata;
+ max = RXRPC_JUMBO_DATALEN;
max -= call->conn->security_size;
max &= ~(call->conn->size_align - 1UL);
@@ -235,7 +227,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
space = chunk + call->conn->size_align;
space &= ~(call->conn->size_align - 1UL);
- size = space + call->conn->header_size;
+ size = space + call->conn->security_size;
_debug("SIZE: %zu/%zu/%zu", chunk, space, size);
@@ -245,15 +237,15 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
if (!skb)
goto maybe_error;
- rxrpc_new_skb(skb);
+ rxrpc_new_skb(skb, rxrpc_skb_tx_new);
_debug("ALLOC SEND %p", skb);
ASSERTCMP(skb->mark, ==, 0);
- _debug("HS: %u", call->conn->header_size);
- skb_reserve(skb, call->conn->header_size);
- skb->len += call->conn->header_size;
+ _debug("HS: %u", call->conn->security_size);
+ skb_reserve(skb, call->conn->security_size);
+ skb->len += call->conn->security_size;
sp = rxrpc_skb(skb);
sp->remain = chunk;
@@ -313,36 +305,23 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
memset(skb_put(skb, pad), 0, pad);
}
- seq = atomic_inc_return(&call->sequence);
+ seq = call->tx_top + 1;
- sp->hdr.epoch = conn->proto.epoch;
- sp->hdr.cid = call->cid;
- sp->hdr.callNumber = call->call_id;
sp->hdr.seq = seq;
- sp->hdr.serial = atomic_inc_return(&conn->serial);
- sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
- sp->hdr.userStatus = 0;
- sp->hdr.securityIndex = conn->security_ix;
sp->hdr._rsvd = 0;
- sp->hdr.serviceId = call->service_id;
+ sp->hdr.flags = conn->out_clientflag;
- sp->hdr.flags = conn->out_clientflag;
if (msg_data_left(msg) == 0 && !more)
sp->hdr.flags |= RXRPC_LAST_PACKET;
- else if (CIRC_SPACE(call->acks_head,
- ACCESS_ONCE(call->acks_tail),
- call->acks_winsz) > 1)
+ else if (call->tx_top - call->tx_hard_ack <
+ call->tx_winsize)
sp->hdr.flags |= RXRPC_MORE_PACKETS;
- if (more && seq & 1)
- sp->hdr.flags |= RXRPC_REQUEST_ACK;
ret = conn->security->secure_packet(
- call, skb, skb->mark,
- skb->head + sizeof(struct rxrpc_wire_header));
+ call, skb, skb->mark, skb->head);
if (ret < 0)
goto out;
- rxrpc_insert_header(skb);
rxrpc_queue_packet(call, skb, !msg_data_left(msg) && !more);
skb = NULL;
}
@@ -356,9 +335,9 @@ out:
return ret;
call_terminated:
- rxrpc_free_skb(skb);
+ rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
_leave(" = %d", -call->error);
- return ret;
+ return -call->error;
maybe_error:
if (copied)
@@ -452,28 +431,6 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg,
}
/*
- * abort a call, sending an ABORT packet to the peer
- */
-static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code)
-{
- if (call->state >= RXRPC_CALL_COMPLETE)
- return;
-
- write_lock_bh(&call->state_lock);
-
- if (__rxrpc_abort_call(call, abort_code, ECONNABORTED)) {
- del_timer_sync(&call->resend_timer);
- del_timer_sync(&call->ack_timer);
- clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
- clear_bit(RXRPC_CALL_EV_ACK, &call->events);
- clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
- rxrpc_queue_call(call);
- }
-
- write_unlock_bh(&call->state_lock);
-}
-
-/*
* Create a new client call for sendmsg().
*/
static struct rxrpc_call *
@@ -534,7 +491,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
call = rxrpc_accept_call(rx, user_call_ID, NULL);
if (IS_ERR(call))
return PTR_ERR(call);
- rxrpc_put_call(call);
+ rxrpc_put_call(call, rxrpc_call_put);
return 0;
}
@@ -548,7 +505,6 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
return PTR_ERR(call);
}
- rxrpc_see_call(call);
_debug("CALL %d USR %lx ST %d on CONN %p",
call->debug_id, call->user_call_ID, call->state, call->conn);
@@ -556,8 +512,10 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
/* it's too late for this call */
ret = -ESHUTDOWN;
} else if (cmd == RXRPC_CMD_SEND_ABORT) {
- rxrpc_send_abort(call, abort_code);
ret = 0;
+ if (rxrpc_abort_call("CMD", call, 0, abort_code, ECONNABORTED))
+ ret = rxrpc_send_call_packet(call,
+ RXRPC_PACKET_TYPE_ABORT);
} else if (cmd != RXRPC_CMD_SEND_DATA) {
ret = -EINVAL;
} else if (rxrpc_is_client_call(call) &&
@@ -573,7 +531,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
ret = rxrpc_send_data(rx, call, msg, len);
}
- rxrpc_put_call(call);
+ rxrpc_put_call(call, rxrpc_call_put);
_leave(" = %d", ret);
return ret;
}
@@ -626,20 +584,20 @@ EXPORT_SYMBOL(rxrpc_kernel_send_data);
* @sock: The socket the call is on
* @call: The call to be aborted
* @abort_code: The abort code to stick into the ABORT packet
+ * @error: Local error value
+ * @why: 3-char string indicating why.
*
* Allow a kernel service to abort a call, if it's still in an abortable state.
*/
void rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call,
- u32 abort_code)
+ u32 abort_code, int error, const char *why)
{
- _enter("{%d},%d", call->debug_id, abort_code);
+ _enter("{%d},%d,%d,%s", call->debug_id, abort_code, error, why);
lock_sock(sock->sk);
- _debug("CALL %d USR %lx ST %d on CONN %p",
- call->debug_id, call->user_call_ID, call->state, call->conn);
-
- rxrpc_send_abort(call, abort_code);
+ if (rxrpc_abort_call(why, call, 0, abort_code, error))
+ rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ABORT);
release_sock(sock->sk);
_leave("");
diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c
index 9752f8b1fdd0..5154cbf7e540 100644
--- a/net/rxrpc/skbuff.c
+++ b/net/rxrpc/skbuff.c
@@ -18,198 +18,77 @@
#include <net/af_rxrpc.h>
#include "ar-internal.h"
-/*
- * set up for the ACK at the end of the receive phase when we discard the final
- * receive phase data packet
- * - called with softirqs disabled
- */
-static void rxrpc_request_final_ACK(struct rxrpc_call *call)
-{
- /* the call may be aborted before we have a chance to ACK it */
- write_lock(&call->state_lock);
-
- switch (call->state) {
- case RXRPC_CALL_CLIENT_RECV_REPLY:
- call->state = RXRPC_CALL_CLIENT_FINAL_ACK;
- _debug("request final ACK");
-
- /* get an extra ref on the call for the final-ACK generator to
- * release */
- rxrpc_get_call(call);
- set_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events);
- if (try_to_del_timer_sync(&call->ack_timer) >= 0)
- rxrpc_queue_call(call);
- break;
-
- case RXRPC_CALL_SERVER_RECV_REQUEST:
- call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
- default:
- break;
- }
-
- write_unlock(&call->state_lock);
-}
-
-/*
- * drop the bottom ACK off of the call ACK window and advance the window
- */
-static void rxrpc_hard_ACK_data(struct rxrpc_call *call, struct sk_buff *skb)
-{
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- int loop;
- u32 seq;
-
- spin_lock_bh(&call->lock);
-
- _debug("hard ACK #%u", sp->hdr.seq);
-
- for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) {
- call->ackr_window[loop] >>= 1;
- call->ackr_window[loop] |=
- call->ackr_window[loop + 1] << (BITS_PER_LONG - 1);
- }
-
- seq = sp->hdr.seq;
- ASSERTCMP(seq, ==, call->rx_data_eaten + 1);
- call->rx_data_eaten = seq;
-
- if (call->ackr_win_top < UINT_MAX)
- call->ackr_win_top++;
-
- ASSERTIFCMP(call->state <= RXRPC_CALL_COMPLETE,
- call->rx_data_post, >=, call->rx_data_recv);
- ASSERTIFCMP(call->state <= RXRPC_CALL_COMPLETE,
- call->rx_data_recv, >=, call->rx_data_eaten);
-
- if (sp->hdr.flags & RXRPC_LAST_PACKET) {
- rxrpc_request_final_ACK(call);
- } else if (atomic_dec_and_test(&call->ackr_not_idle) &&
- test_and_clear_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags)) {
- /* We previously soft-ACK'd some received packets that have now
- * been consumed, so send a hard-ACK if no more packets are
- * immediately forthcoming to allow the transmitter to free up
- * its Tx bufferage.
- */
- _debug("send Rx idle ACK");
- __rxrpc_propose_ACK(call, RXRPC_ACK_IDLE,
- skb->priority, sp->hdr.serial, false);
- }
-
- spin_unlock_bh(&call->lock);
-}
-
-/**
- * rxrpc_kernel_data_consumed - Record consumption of data message
- * @call: The call to which the message pertains.
- * @skb: Message holding data
- *
- * Record the consumption of a data message and generate an ACK if appropriate.
- * The call state is shifted if this was the final packet. The caller must be
- * in process context with no spinlocks held.
- *
- * TODO: Actually generate the ACK here rather than punting this to the
- * workqueue.
- */
-void rxrpc_kernel_data_consumed(struct rxrpc_call *call, struct sk_buff *skb)
-{
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-
- _enter("%d,%p{%u}", call->debug_id, skb, sp->hdr.seq);
-
- ASSERTCMP(sp->call, ==, call);
- ASSERTCMP(sp->hdr.type, ==, RXRPC_PACKET_TYPE_DATA);
-
- /* TODO: Fix the sequence number tracking */
- ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv);
- ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1);
- ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten);
-
- call->rx_data_recv = sp->hdr.seq;
- rxrpc_hard_ACK_data(call, skb);
-}
+#define select_skb_count(op) (op >= rxrpc_skb_tx_cleaned ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs)
/*
- * Destroy a packet that has an RxRPC control buffer
+ * Note the allocation or reception of a socket buffer.
*/
-void rxrpc_packet_destructor(struct sk_buff *skb)
-{
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- struct rxrpc_call *call = sp->call;
-
- _enter("%p{%p}", skb, call);
-
- if (call) {
- rxrpc_put_call_for_skb(call, skb);
- sp->call = NULL;
- }
-
- if (skb->sk)
- sock_rfree(skb);
- _leave("");
-}
-
-/**
- * rxrpc_kernel_free_skb - Free an RxRPC socket buffer
- * @skb: The socket buffer to be freed
- *
- * Let RxRPC free its own socket buffer, permitting it to maintain debug
- * accounting.
- */
-void rxrpc_kernel_free_skb(struct sk_buff *skb)
-{
- rxrpc_free_skb(skb);
-}
-EXPORT_SYMBOL(rxrpc_kernel_free_skb);
-
-/*
- * Note the existence of a new-to-us socket buffer (allocated or dequeued).
- */
-void rxrpc_new_skb(struct sk_buff *skb)
+void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
{
const void *here = __builtin_return_address(0);
- int n = atomic_inc_return(&rxrpc_n_skbs);
- trace_rxrpc_skb(skb, 0, atomic_read(&skb->users), n, here);
+ int n = atomic_inc_return(select_skb_count(op));
+ trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
}
/*
* Note the re-emergence of a socket buffer from a queue or buffer.
*/
-void rxrpc_see_skb(struct sk_buff *skb)
+void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
{
const void *here = __builtin_return_address(0);
if (skb) {
- int n = atomic_read(&rxrpc_n_skbs);
- trace_rxrpc_skb(skb, 1, atomic_read(&skb->users), n, here);
+ int n = atomic_read(select_skb_count(op));
+ trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
}
}
/*
* Note the addition of a ref on a socket buffer.
*/
-void rxrpc_get_skb(struct sk_buff *skb)
+void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
{
const void *here = __builtin_return_address(0);
- int n = atomic_inc_return(&rxrpc_n_skbs);
- trace_rxrpc_skb(skb, 2, atomic_read(&skb->users), n, here);
+ int n = atomic_inc_return(select_skb_count(op));
+ trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
skb_get(skb);
}
/*
* Note the destruction of a socket buffer.
*/
-void rxrpc_free_skb(struct sk_buff *skb)
+void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
{
const void *here = __builtin_return_address(0);
if (skb) {
int n;
CHECK_SLAB_OKAY(&skb->users);
- n = atomic_dec_return(&rxrpc_n_skbs);
- trace_rxrpc_skb(skb, 3, atomic_read(&skb->users), n, here);
+ n = atomic_dec_return(select_skb_count(op));
+ trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
kfree_skb(skb);
}
}
/*
+ * Note the injected loss of a socket buffer.
+ */
+void rxrpc_lose_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
+{
+ const void *here = __builtin_return_address(0);
+ if (skb) {
+ int n;
+ CHECK_SLAB_OKAY(&skb->users);
+ if (op == rxrpc_skb_tx_lost) {
+ n = atomic_read(select_skb_count(op));
+ trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
+ } else {
+ n = atomic_dec_return(select_skb_count(op));
+ trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
+ kfree_skb(skb);
+ }
+ }
+}
+
+/*
* Clear a queue of socket buffers.
*/
void rxrpc_purge_queue(struct sk_buff_head *list)
@@ -217,8 +96,9 @@ void rxrpc_purge_queue(struct sk_buff_head *list)
const void *here = __builtin_return_address(0);
struct sk_buff *skb;
while ((skb = skb_dequeue((list))) != NULL) {
- int n = atomic_dec_return(&rxrpc_n_skbs);
- trace_rxrpc_skb(skb, 4, atomic_read(&skb->users), n, here);
+ int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged));
+ trace_rxrpc_skb(skb, rxrpc_skb_rx_purged,
+ atomic_read(&skb->users), n, here);
kfree_skb(skb);
}
}
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
index dc380af8a81e..13d1df03ebac 100644
--- a/net/rxrpc/sysctl.c
+++ b/net/rxrpc/sysctl.c
@@ -20,7 +20,7 @@ static const unsigned int one = 1;
static const unsigned int four = 4;
static const unsigned int thirtytwo = 32;
static const unsigned int n_65535 = 65535;
-static const unsigned int n_max_acks = RXRPC_MAXACKS;
+static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
/*
* RxRPC operating parameters.
@@ -59,7 +59,7 @@ static struct ctl_table rxrpc_sysctl_table[] = {
.data = &rxrpc_resend_timeout,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec_ms_jiffies,
+ .proc_handler = proc_dointvec,
.extra1 = (void *)&one,
},
{
@@ -88,14 +88,6 @@ static struct ctl_table rxrpc_sysctl_table[] = {
.proc_handler = proc_dointvec_jiffies,
.extra1 = (void *)&one,
},
- {
- .procname = "dead_call_expiry",
- .data = &rxrpc_dead_call_expiry,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- .extra1 = (void *)&one,
- },
/* Non-time values */
{
diff --git a/net/rxrpc/utils.c b/net/rxrpc/utils.c
index b88914d53ca5..ff7af71c4b49 100644
--- a/net/rxrpc/utils.c
+++ b/net/rxrpc/utils.c
@@ -30,6 +30,7 @@ int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *srx, struct sk_buff *skb)
srx->transport.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
return 0;
+#ifdef CONFIG_AF_RXRPC_IPV6
case ETH_P_IPV6:
srx->transport_type = SOCK_DGRAM;
srx->transport_len = sizeof(srx->transport.sin6);
@@ -37,6 +38,7 @@ int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *srx, struct sk_buff *skb)
srx->transport.sin6.sin6_port = udp_hdr(skb)->source;
srx->transport.sin6.sin6_addr = ipv6_hdr(skb)->saddr;
return 0;
+#endif
default:
pr_warn_ratelimited("AF_RXRPC: Unknown eth protocol %u\n",
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index ccf931b3b94c..87956a768d1b 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -749,6 +749,17 @@ config NET_ACT_CONNMARK
To compile this code as a module, choose M here: the
module will be called act_connmark.
+config NET_ACT_SKBMOD
+ tristate "skb data modification action"
+ depends on NET_CLS_ACT
+ ---help---
+ Say Y here to allow modification of skb data
+
+ If unsure, say N.
+
+ To compile this code as a module, choose M here: the
+ module will be called act_skbmod.
+
config NET_ACT_IFE
tristate "Inter-FE action based on IETF ForCES InterFE LFB"
depends on NET_CLS_ACT
@@ -761,6 +772,17 @@ config NET_ACT_IFE
To compile this code as a module, choose M here: the
module will be called act_ife.
+config NET_ACT_TUNNEL_KEY
+ tristate "IP tunnel metadata manipulation"
+ depends on NET_CLS_ACT
+ ---help---
+ Say Y here to set/release ip tunnel metadata.
+
+ If unsure, say N.
+
+ To compile this code as a module, choose M here: the
+ module will be called act_tunnel_key.
+
config NET_IFE_SKBMARK
tristate "Support to encoding decoding skb mark on IFE action"
depends on NET_ACT_IFE
@@ -771,6 +793,11 @@ config NET_IFE_SKBPRIO
depends on NET_ACT_IFE
---help---
+config NET_IFE_SKBTCINDEX
+ tristate "Support to encoding decoding skb tcindex on IFE action"
+ depends on NET_ACT_IFE
+ ---help---
+
config NET_CLS_IND
bool "Incoming device classification"
depends on NET_CLS_U32 || NET_CLS_FW
diff --git a/net/sched/Makefile b/net/sched/Makefile
index ae088a5a9d95..4bdda3634e0b 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -19,9 +19,12 @@ obj-$(CONFIG_NET_ACT_CSUM) += act_csum.o
obj-$(CONFIG_NET_ACT_VLAN) += act_vlan.o
obj-$(CONFIG_NET_ACT_BPF) += act_bpf.o
obj-$(CONFIG_NET_ACT_CONNMARK) += act_connmark.o
+obj-$(CONFIG_NET_ACT_SKBMOD) += act_skbmod.o
obj-$(CONFIG_NET_ACT_IFE) += act_ife.o
obj-$(CONFIG_NET_IFE_SKBMARK) += act_meta_mark.o
obj-$(CONFIG_NET_IFE_SKBPRIO) += act_meta_skbprio.o
+obj-$(CONFIG_NET_IFE_SKBTCINDEX) += act_meta_skbtcindex.o
+obj-$(CONFIG_NET_ACT_TUNNEL_KEY)+= act_tunnel_key.o
obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o
obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index d09d0687594b..c9102172ce3b 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -592,9 +592,19 @@ err_out:
return ERR_PTR(err);
}
-int tcf_action_init(struct net *net, struct nlattr *nla,
- struct nlattr *est, char *name, int ovr,
- int bind, struct list_head *actions)
+static void cleanup_a(struct list_head *actions, int ovr)
+{
+ struct tc_action *a;
+
+ if (!ovr)
+ return;
+
+ list_for_each_entry(a, actions, list)
+ a->tcfa_refcnt--;
+}
+
+int tcf_action_init(struct net *net, struct nlattr *nla, struct nlattr *est,
+ char *name, int ovr, int bind, struct list_head *actions)
{
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct tc_action *act;
@@ -612,8 +622,15 @@ int tcf_action_init(struct net *net, struct nlattr *nla,
goto err;
}
act->order = i;
+ if (ovr)
+ act->tcfa_refcnt++;
list_add_tail(&act->list, actions);
}
+
+ /* Remove the temp refcnt which was necessary to protect against
+ * destroying an existing action which was being replaced
+ */
+ cleanup_a(actions, ovr);
return 0;
err:
@@ -883,6 +900,8 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
goto err;
}
act->order = i;
+ if (event == RTM_GETACTION)
+ act->tcfa_refcnt++;
list_add_tail(&act->list, &actions);
}
@@ -923,9 +942,8 @@ tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
return err;
}
-static int
-tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
- u32 portid, int ovr)
+static int tcf_action_add(struct net *net, struct nlattr *nla,
+ struct nlmsghdr *n, u32 portid, int ovr)
{
int ret = 0;
LIST_HEAD(actions);
@@ -988,8 +1006,7 @@ replay:
return ret;
}
-static struct nlattr *
-find_dump_kind(const struct nlmsghdr *n)
+static struct nlattr *find_dump_kind(const struct nlmsghdr *n)
{
struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
@@ -1016,8 +1033,7 @@ find_dump_kind(const struct nlmsghdr *n)
return kind;
}
-static int
-tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
+static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct nlmsghdr *nlh;
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index bfa870731e74..1d3960033f61 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -39,13 +39,10 @@ static struct tc_action_ops act_bpf_ops;
static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
struct tcf_result *res)
{
+ bool at_ingress = skb_at_tc_ingress(skb);
struct tcf_bpf *prog = to_bpf(act);
struct bpf_prog *filter;
int action, filter_res;
- bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS;
-
- if (unlikely(!skb_mac_header_was_set(skb)))
- return TC_ACT_UNSPEC;
tcf_lastuse_update(&prog->tcf_tm);
bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index b5dbf633a863..e0defcef376d 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -116,8 +116,8 @@ static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
return (void *)(skb_network_header(skb) + ihl);
}
-static int tcf_csum_ipv4_icmp(struct sk_buff *skb,
- unsigned int ihl, unsigned int ipl)
+static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl,
+ unsigned int ipl)
{
struct icmphdr *icmph;
@@ -152,8 +152,8 @@ static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
return 1;
}
-static int tcf_csum_ipv6_icmp(struct sk_buff *skb,
- unsigned int ihl, unsigned int ipl)
+static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl,
+ unsigned int ipl)
{
struct icmp6hdr *icmp6h;
const struct ipv6hdr *ip6h;
@@ -174,8 +174,8 @@ static int tcf_csum_ipv6_icmp(struct sk_buff *skb,
return 1;
}
-static int tcf_csum_ipv4_tcp(struct sk_buff *skb,
- unsigned int ihl, unsigned int ipl)
+static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl,
+ unsigned int ipl)
{
struct tcphdr *tcph;
const struct iphdr *iph;
@@ -195,8 +195,8 @@ static int tcf_csum_ipv4_tcp(struct sk_buff *skb,
return 1;
}
-static int tcf_csum_ipv6_tcp(struct sk_buff *skb,
- unsigned int ihl, unsigned int ipl)
+static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl,
+ unsigned int ipl)
{
struct tcphdr *tcph;
const struct ipv6hdr *ip6h;
@@ -217,8 +217,8 @@ static int tcf_csum_ipv6_tcp(struct sk_buff *skb,
return 1;
}
-static int tcf_csum_ipv4_udp(struct sk_buff *skb,
- unsigned int ihl, unsigned int ipl, int udplite)
+static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
+ unsigned int ipl, int udplite)
{
struct udphdr *udph;
const struct iphdr *iph;
@@ -270,8 +270,8 @@ ignore_obscure_skb:
return 1;
}
-static int tcf_csum_ipv6_udp(struct sk_buff *skb,
- unsigned int ihl, unsigned int ipl, int udplite)
+static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
+ unsigned int ipl, int udplite)
{
struct udphdr *udph;
const struct ipv6hdr *ip6h;
@@ -380,8 +380,8 @@ fail:
return 0;
}
-static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh,
- unsigned int ixhl, unsigned int *pl)
+static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl,
+ unsigned int *pl)
{
int off, len, optlen;
unsigned char *xh = (void *)ip6xh;
@@ -494,8 +494,8 @@ fail:
return 0;
}
-static int tcf_csum(struct sk_buff *skb,
- const struct tc_action *a, struct tcf_result *res)
+static int tcf_csum(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
{
struct tcf_csum *p = to_tcf_csum(a);
int action;
@@ -531,8 +531,8 @@ drop:
return TC_ACT_SHOT;
}
-static int tcf_csum_dump(struct sk_buff *skb,
- struct tc_action *a, int bind, int ref)
+static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
+ int ref)
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_csum *p = to_tcf_csum(a);
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index e24a4093d6f6..e0aa30f83c6c 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -156,7 +156,8 @@ static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets,
int action = READ_ONCE(gact->tcf_action);
struct tcf_t *tm = &gact->tcf_tm;
- _bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), bytes, packets);
+ _bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), bytes,
+ packets);
if (action == TC_ACT_SHOT)
this_cpu_ptr(gact->common.cpu_qstats)->drops += packets;
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index e87cd81315e1..ccf7b4b655fe 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -63,6 +63,23 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
}
EXPORT_SYMBOL_GPL(ife_tlv_meta_encode);
+int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi)
+{
+ u16 edata = 0;
+
+ if (mi->metaval)
+ edata = *(u16 *)mi->metaval;
+ else if (metaval)
+ edata = metaval;
+
+ if (!edata) /* will not encode */
+ return 0;
+
+ edata = htons(edata);
+ return ife_tlv_meta_encode(skbdata, mi->metaid, 2, &edata);
+}
+EXPORT_SYMBOL_GPL(ife_encode_meta_u16);
+
int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi)
{
if (mi->metaval)
@@ -81,6 +98,15 @@ int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi)
}
EXPORT_SYMBOL_GPL(ife_check_meta_u32);
+int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi)
+{
+ if (metaval || mi->metaval)
+ return 8; /* T+L+(V) == 2+2+(2+2bytepad) */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ife_check_meta_u16);
+
int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi)
{
u32 edata = metaval;
diff --git a/net/sched/act_meta_skbtcindex.c b/net/sched/act_meta_skbtcindex.c
new file mode 100644
index 000000000000..3b35774ce890
--- /dev/null
+++ b/net/sched/act_meta_skbtcindex.c
@@ -0,0 +1,79 @@
+/*
+ * net/sched/act_meta_tc_index.c IFE skb->tc_index metadata module
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * copyright Jamal Hadi Salim (2016)
+ *
+*/
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <uapi/linux/tc_act/tc_ife.h>
+#include <net/tc_act/tc_ife.h>
+#include <linux/rtnetlink.h>
+
+static int skbtcindex_encode(struct sk_buff *skb, void *skbdata,
+ struct tcf_meta_info *e)
+{
+ u32 ifetc_index = skb->tc_index;
+
+ return ife_encode_meta_u16(ifetc_index, skbdata, e);
+}
+
+static int skbtcindex_decode(struct sk_buff *skb, void *data, u16 len)
+{
+ u16 ifetc_index = *(u16 *)data;
+
+ skb->tc_index = ntohs(ifetc_index);
+ return 0;
+}
+
+static int skbtcindex_check(struct sk_buff *skb, struct tcf_meta_info *e)
+{
+ return ife_check_meta_u16(skb->tc_index, e);
+}
+
+static struct tcf_meta_ops ife_skbtcindex_ops = {
+ .metaid = IFE_META_TCINDEX,
+ .metatype = NLA_U16,
+ .name = "tc_index",
+ .synopsis = "skb tc_index 16 bit metadata",
+ .check_presence = skbtcindex_check,
+ .encode = skbtcindex_encode,
+ .decode = skbtcindex_decode,
+ .get = ife_get_meta_u16,
+ .alloc = ife_alloc_meta_u16,
+ .release = ife_release_meta_gen,
+ .validate = ife_validate_meta_u16,
+ .owner = THIS_MODULE,
+};
+
+static int __init ifetc_index_init_module(void)
+{
+ return register_ife_op(&ife_skbtcindex_ops);
+}
+
+static void __exit ifetc_index_cleanup_module(void)
+{
+ unregister_ife_op(&ife_skbtcindex_ops);
+}
+
+module_init(ifetc_index_init_module);
+module_exit(ifetc_index_cleanup_module);
+
+MODULE_AUTHOR("Jamal Hadi Salim(2016)");
+MODULE_DESCRIPTION("Inter-FE skb tc_index metadata module");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_IFE_META(IFE_META_SKBTCINDEX);
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 6038c85d92f5..667dc382df82 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -204,7 +204,15 @@ out:
return retval;
}
-static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
+static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
+ u64 lastuse)
+{
+ tcf_lastuse_update(&a->tcfa_tm);
+ _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
+}
+
+static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
+ int ref)
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_mirred *m = to_mirred(a);
@@ -280,6 +288,7 @@ static struct tc_action_ops act_mirred_ops = {
.type = TCA_ACT_MIRRED,
.owner = THIS_MODULE,
.act = tcf_mirred,
+ .stats_update = tcf_stats_update,
.dump = tcf_mirred_dump,
.cleanup = tcf_mirred_release,
.init = tcf_mirred_init,
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 8a3be1d99775..d1bd248fe146 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -249,6 +249,8 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
police->tcfp_t_c = now;
police->tcfp_toks = toks;
police->tcfp_ptoks = ptoks;
+ if (police->tcfp_result == TC_ACT_SHOT)
+ police->tcf_qstats.drops++;
spin_unlock(&police->tcf_lock);
return police->tcfp_result;
}
@@ -261,8 +263,8 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
return police->tcf_action;
}
-static int
-tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
+static int tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a,
+ int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_police *police = to_police(a);
@@ -347,14 +349,12 @@ static struct pernet_operations police_net_ops = {
.size = sizeof(struct tc_action_net),
};
-static int __init
-police_init_module(void)
+static int __init police_init_module(void)
{
return tcf_register_action(&act_police_ops, &police_net_ops);
}
-static void __exit
-police_cleanup_module(void)
+static void __exit police_cleanup_module(void)
{
tcf_unregister_action(&act_police_ops, &police_net_ops);
}
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
new file mode 100644
index 000000000000..e7d96381c908
--- /dev/null
+++ b/net/sched/act_skbmod.c
@@ -0,0 +1,301 @@
+/*
+ * net/sched/act_skbmod.c skb data modifier
+ *
+ * Copyright (c) 2016 Jamal Hadi Salim <jhs@mojatatu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+
+#include <linux/tc_act/tc_skbmod.h>
+#include <net/tc_act/tc_skbmod.h>
+
+#define SKBMOD_TAB_MASK 15
+
+static int skbmod_net_id;
+static struct tc_action_ops act_skbmod_ops;
+
+#define MAX_EDIT_LEN ETH_HLEN
+static int tcf_skbmod_run(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
+{
+ struct tcf_skbmod *d = to_skbmod(a);
+ int action;
+ struct tcf_skbmod_params *p;
+ u64 flags;
+ int err;
+
+ tcf_lastuse_update(&d->tcf_tm);
+ bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
+
+ /* XXX: if you are going to edit more fields beyond ethernet header
+ * (example when you add IP header replacement or vlan swap)
+ * then MAX_EDIT_LEN needs to change appropriately
+ */
+ err = skb_ensure_writable(skb, MAX_EDIT_LEN);
+ if (unlikely(err)) { /* best policy is to drop on the floor */
+ qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats));
+ return TC_ACT_SHOT;
+ }
+
+ rcu_read_lock();
+ action = READ_ONCE(d->tcf_action);
+ if (unlikely(action == TC_ACT_SHOT)) {
+ qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats));
+ rcu_read_unlock();
+ return action;
+ }
+
+ p = rcu_dereference(d->skbmod_p);
+ flags = p->flags;
+ if (flags & SKBMOD_F_DMAC)
+ ether_addr_copy(eth_hdr(skb)->h_dest, p->eth_dst);
+ if (flags & SKBMOD_F_SMAC)
+ ether_addr_copy(eth_hdr(skb)->h_source, p->eth_src);
+ if (flags & SKBMOD_F_ETYPE)
+ eth_hdr(skb)->h_proto = p->eth_type;
+ rcu_read_unlock();
+
+ if (flags & SKBMOD_F_SWAPMAC) {
+ u16 tmpaddr[ETH_ALEN / 2]; /* ether_addr_copy() requirement */
+ /*XXX: I am sure we can come up with more efficient swapping*/
+ ether_addr_copy((u8 *)tmpaddr, eth_hdr(skb)->h_dest);
+ ether_addr_copy(eth_hdr(skb)->h_dest, eth_hdr(skb)->h_source);
+ ether_addr_copy(eth_hdr(skb)->h_source, (u8 *)tmpaddr);
+ }
+
+ return action;
+}
+
+static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = {
+ [TCA_SKBMOD_PARMS] = { .len = sizeof(struct tc_skbmod) },
+ [TCA_SKBMOD_DMAC] = { .len = ETH_ALEN },
+ [TCA_SKBMOD_SMAC] = { .len = ETH_ALEN },
+ [TCA_SKBMOD_ETYPE] = { .type = NLA_U16 },
+};
+
+static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
+ struct nlattr *est, struct tc_action **a,
+ int ovr, int bind)
+{
+ struct tc_action_net *tn = net_generic(net, skbmod_net_id);
+ struct nlattr *tb[TCA_SKBMOD_MAX + 1];
+ struct tcf_skbmod_params *p, *p_old;
+ struct tc_skbmod *parm;
+ struct tcf_skbmod *d;
+ bool exists = false;
+ u8 *daddr = NULL;
+ u8 *saddr = NULL;
+ u16 eth_type = 0;
+ u32 lflags = 0;
+ int ret = 0, err;
+
+ if (!nla)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_SKBMOD_MAX, nla, skbmod_policy);
+ if (err < 0)
+ return err;
+
+ if (!tb[TCA_SKBMOD_PARMS])
+ return -EINVAL;
+
+ if (tb[TCA_SKBMOD_DMAC]) {
+ daddr = nla_data(tb[TCA_SKBMOD_DMAC]);
+ lflags |= SKBMOD_F_DMAC;
+ }
+
+ if (tb[TCA_SKBMOD_SMAC]) {
+ saddr = nla_data(tb[TCA_SKBMOD_SMAC]);
+ lflags |= SKBMOD_F_SMAC;
+ }
+
+ if (tb[TCA_SKBMOD_ETYPE]) {
+ eth_type = nla_get_u16(tb[TCA_SKBMOD_ETYPE]);
+ lflags |= SKBMOD_F_ETYPE;
+ }
+
+ parm = nla_data(tb[TCA_SKBMOD_PARMS]);
+ if (parm->flags & SKBMOD_F_SWAPMAC)
+ lflags = SKBMOD_F_SWAPMAC;
+
+ exists = tcf_hash_check(tn, parm->index, a, bind);
+ if (exists && bind)
+ return 0;
+
+ if (!lflags)
+ return -EINVAL;
+
+ if (!exists) {
+ ret = tcf_hash_create(tn, parm->index, est, a,
+ &act_skbmod_ops, bind, true);
+ if (ret)
+ return ret;
+
+ ret = ACT_P_CREATED;
+ } else {
+ tcf_hash_release(*a, bind);
+ if (!ovr)
+ return -EEXIST;
+ }
+
+ d = to_skbmod(*a);
+
+ ASSERT_RTNL();
+ p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
+ if (unlikely(!p)) {
+ if (ovr)
+ tcf_hash_release(*a, bind);
+ return -ENOMEM;
+ }
+
+ p->flags = lflags;
+ d->tcf_action = parm->action;
+
+ p_old = rtnl_dereference(d->skbmod_p);
+
+ if (ovr)
+ spin_lock_bh(&d->tcf_lock);
+
+ if (lflags & SKBMOD_F_DMAC)
+ ether_addr_copy(p->eth_dst, daddr);
+ if (lflags & SKBMOD_F_SMAC)
+ ether_addr_copy(p->eth_src, saddr);
+ if (lflags & SKBMOD_F_ETYPE)
+ p->eth_type = htons(eth_type);
+
+ rcu_assign_pointer(d->skbmod_p, p);
+ if (ovr)
+ spin_unlock_bh(&d->tcf_lock);
+
+ if (p_old)
+ kfree_rcu(p_old, rcu);
+
+ if (ret == ACT_P_CREATED)
+ tcf_hash_insert(tn, *a);
+ return ret;
+}
+
+static void tcf_skbmod_cleanup(struct tc_action *a, int bind)
+{
+ struct tcf_skbmod *d = to_skbmod(a);
+ struct tcf_skbmod_params *p;
+
+ p = rcu_dereference_protected(d->skbmod_p, 1);
+ kfree_rcu(p, rcu);
+}
+
+static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
+ int bind, int ref)
+{
+ struct tcf_skbmod *d = to_skbmod(a);
+ unsigned char *b = skb_tail_pointer(skb);
+ struct tcf_skbmod_params *p = rtnl_dereference(d->skbmod_p);
+ struct tc_skbmod opt = {
+ .index = d->tcf_index,
+ .refcnt = d->tcf_refcnt - ref,
+ .bindcnt = d->tcf_bindcnt - bind,
+ .action = d->tcf_action,
+ };
+ struct tcf_t t;
+
+ opt.flags = p->flags;
+ if (nla_put(skb, TCA_SKBMOD_PARMS, sizeof(opt), &opt))
+ goto nla_put_failure;
+ if ((p->flags & SKBMOD_F_DMAC) &&
+ nla_put(skb, TCA_SKBMOD_DMAC, ETH_ALEN, p->eth_dst))
+ goto nla_put_failure;
+ if ((p->flags & SKBMOD_F_SMAC) &&
+ nla_put(skb, TCA_SKBMOD_SMAC, ETH_ALEN, p->eth_src))
+ goto nla_put_failure;
+ if ((p->flags & SKBMOD_F_ETYPE) &&
+ nla_put_u16(skb, TCA_SKBMOD_ETYPE, ntohs(p->eth_type)))
+ goto nla_put_failure;
+
+ tcf_tm_dump(&t, &d->tcf_tm);
+ if (nla_put_64bit(skb, TCA_SKBMOD_TM, sizeof(t), &t, TCA_SKBMOD_PAD))
+ goto nla_put_failure;
+
+ return skb->len;
+nla_put_failure:
+ rcu_read_unlock();
+ nlmsg_trim(skb, b);
+ return -1;
+}
+
+static int tcf_skbmod_walker(struct net *net, struct sk_buff *skb,
+ struct netlink_callback *cb, int type,
+ const struct tc_action_ops *ops)
+{
+ struct tc_action_net *tn = net_generic(net, skbmod_net_id);
+
+ return tcf_generic_walker(tn, skb, cb, type, ops);
+}
+
+static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, skbmod_net_id);
+
+ return tcf_hash_search(tn, a, index);
+}
+
+static struct tc_action_ops act_skbmod_ops = {
+ .kind = "skbmod",
+ .type = TCA_ACT_SKBMOD,
+ .owner = THIS_MODULE,
+ .act = tcf_skbmod_run,
+ .dump = tcf_skbmod_dump,
+ .init = tcf_skbmod_init,
+ .cleanup = tcf_skbmod_cleanup,
+ .walk = tcf_skbmod_walker,
+ .lookup = tcf_skbmod_search,
+ .size = sizeof(struct tcf_skbmod),
+};
+
+static __net_init int skbmod_init_net(struct net *net)
+{
+ struct tc_action_net *tn = net_generic(net, skbmod_net_id);
+
+ return tc_action_net_init(tn, &act_skbmod_ops, SKBMOD_TAB_MASK);
+}
+
+static void __net_exit skbmod_exit_net(struct net *net)
+{
+ struct tc_action_net *tn = net_generic(net, skbmod_net_id);
+
+ tc_action_net_exit(tn);
+}
+
+static struct pernet_operations skbmod_net_ops = {
+ .init = skbmod_init_net,
+ .exit = skbmod_exit_net,
+ .id = &skbmod_net_id,
+ .size = sizeof(struct tc_action_net),
+};
+
+MODULE_AUTHOR("Jamal Hadi Salim, <jhs@mojatatu.com>");
+MODULE_DESCRIPTION("SKB data mod-ing");
+MODULE_LICENSE("GPL");
+
+static int __init skbmod_init_module(void)
+{
+ return tcf_register_action(&act_skbmod_ops, &skbmod_net_ops);
+}
+
+static void __exit skbmod_cleanup_module(void)
+{
+ tcf_unregister_action(&act_skbmod_ops, &skbmod_net_ops);
+}
+
+module_init(skbmod_init_module);
+module_exit(skbmod_cleanup_module);
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
new file mode 100644
index 000000000000..af47bdf2f483
--- /dev/null
+++ b/net/sched/act_tunnel_key.c
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2016, Amir Vadai <amir@vadai.me>
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <net/dst.h>
+#include <net/dst_metadata.h>
+
+#include <linux/tc_act/tc_tunnel_key.h>
+#include <net/tc_act/tc_tunnel_key.h>
+
+#define TUNNEL_KEY_TAB_MASK 15
+
+static int tunnel_key_net_id;
+static struct tc_action_ops act_tunnel_key_ops;
+
+static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
+{
+ struct tcf_tunnel_key *t = to_tunnel_key(a);
+ struct tcf_tunnel_key_params *params;
+ int action;
+
+ rcu_read_lock();
+
+ params = rcu_dereference(t->params);
+
+ tcf_lastuse_update(&t->tcf_tm);
+ bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb);
+ action = params->action;
+
+ switch (params->tcft_action) {
+ case TCA_TUNNEL_KEY_ACT_RELEASE:
+ skb_dst_drop(skb);
+ break;
+ case TCA_TUNNEL_KEY_ACT_SET:
+ skb_dst_drop(skb);
+ skb_dst_set(skb, dst_clone(&params->tcft_enc_metadata->dst));
+ break;
+ default:
+ WARN_ONCE(1, "Bad tunnel_key action %d.\n",
+ params->tcft_action);
+ break;
+ }
+
+ rcu_read_unlock();
+
+ return action;
+}
+
+static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
+ [TCA_TUNNEL_KEY_PARMS] = { .len = sizeof(struct tc_tunnel_key) },
+ [TCA_TUNNEL_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
+ [TCA_TUNNEL_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
+ [TCA_TUNNEL_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
+ [TCA_TUNNEL_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
+ [TCA_TUNNEL_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
+};
+
+static int tunnel_key_init(struct net *net, struct nlattr *nla,
+ struct nlattr *est, struct tc_action **a,
+ int ovr, int bind)
+{
+ struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
+ struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
+ struct tcf_tunnel_key_params *params_old;
+ struct tcf_tunnel_key_params *params_new;
+ struct metadata_dst *metadata = NULL;
+ struct tc_tunnel_key *parm;
+ struct tcf_tunnel_key *t;
+ bool exists = false;
+ __be64 key_id;
+ int ret = 0;
+ int err;
+
+ if (!nla)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_TUNNEL_KEY_MAX, nla, tunnel_key_policy);
+ if (err < 0)
+ return err;
+
+ if (!tb[TCA_TUNNEL_KEY_PARMS])
+ return -EINVAL;
+
+ parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
+ exists = tcf_hash_check(tn, parm->index, a, bind);
+ if (exists && bind)
+ return 0;
+
+ switch (parm->t_action) {
+ case TCA_TUNNEL_KEY_ACT_RELEASE:
+ break;
+ case TCA_TUNNEL_KEY_ACT_SET:
+ if (!tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ key_id = key32_to_tunnel_id(nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]));
+
+ if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] &&
+ tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) {
+ __be32 saddr;
+ __be32 daddr;
+
+ saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]);
+ daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]);
+
+ metadata = __ip_tun_set_dst(saddr, daddr, 0, 0,
+ TUNNEL_KEY, key_id, 0);
+ } else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] &&
+ tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) {
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+
+ saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]);
+ daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]);
+
+ metadata = __ipv6_tun_set_dst(&saddr, &daddr, 0, 0, 0,
+ TUNNEL_KEY, key_id, 0);
+ }
+
+ if (!metadata) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
+ break;
+ default:
+ goto err_out;
+ }
+
+ if (!exists) {
+ ret = tcf_hash_create(tn, parm->index, est, a,
+ &act_tunnel_key_ops, bind, true);
+ if (ret)
+ return ret;
+
+ ret = ACT_P_CREATED;
+ } else {
+ tcf_hash_release(*a, bind);
+ if (!ovr)
+ return -EEXIST;
+ }
+
+ t = to_tunnel_key(*a);
+
+ ASSERT_RTNL();
+ params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
+ if (unlikely(!params_new)) {
+ if (ret == ACT_P_CREATED)
+ tcf_hash_release(*a, bind);
+ return -ENOMEM;
+ }
+
+ params_old = rtnl_dereference(t->params);
+
+ params_new->action = parm->action;
+ params_new->tcft_action = parm->t_action;
+ params_new->tcft_enc_metadata = metadata;
+
+ rcu_assign_pointer(t->params, params_new);
+
+ if (params_old)
+ kfree_rcu(params_old, rcu);
+
+ if (ret == ACT_P_CREATED)
+ tcf_hash_insert(tn, *a);
+
+ return ret;
+
+err_out:
+ if (exists)
+ tcf_hash_release(*a, bind);
+ return ret;
+}
+
+static void tunnel_key_release(struct tc_action *a, int bind)
+{
+ struct tcf_tunnel_key *t = to_tunnel_key(a);
+ struct tcf_tunnel_key_params *params;
+
+ params = rcu_dereference_protected(t->params, 1);
+
+ if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
+ dst_release(&params->tcft_enc_metadata->dst);
+
+ kfree_rcu(params, rcu);
+}
+
+static int tunnel_key_dump_addresses(struct sk_buff *skb,
+ const struct ip_tunnel_info *info)
+{
+ unsigned short family = ip_tunnel_info_af(info);
+
+ if (family == AF_INET) {
+ __be32 saddr = info->key.u.ipv4.src;
+ __be32 daddr = info->key.u.ipv4.dst;
+
+ if (!nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_SRC, saddr) &&
+ !nla_put_in_addr(skb, TCA_TUNNEL_KEY_ENC_IPV4_DST, daddr))
+ return 0;
+ }
+
+ if (family == AF_INET6) {
+ const struct in6_addr *saddr6 = &info->key.u.ipv6.src;
+ const struct in6_addr *daddr6 = &info->key.u.ipv6.dst;
+
+ if (!nla_put_in6_addr(skb,
+ TCA_TUNNEL_KEY_ENC_IPV6_SRC, saddr6) &&
+ !nla_put_in6_addr(skb,
+ TCA_TUNNEL_KEY_ENC_IPV6_DST, daddr6))
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
+ int bind, int ref)
+{
+ unsigned char *b = skb_tail_pointer(skb);
+ struct tcf_tunnel_key *t = to_tunnel_key(a);
+ struct tcf_tunnel_key_params *params;
+ struct tc_tunnel_key opt = {
+ .index = t->tcf_index,
+ .refcnt = t->tcf_refcnt - ref,
+ .bindcnt = t->tcf_bindcnt - bind,
+ };
+ struct tcf_t tm;
+
+ params = rtnl_dereference(t->params);
+
+ opt.t_action = params->tcft_action;
+ opt.action = params->action;
+
+ if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
+ goto nla_put_failure;
+
+ if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) {
+ struct ip_tunnel_key *key =
+ &params->tcft_enc_metadata->u.tun_info.key;
+ __be32 key_id = tunnel_id_to_key32(key->tun_id);
+
+ if (nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id) ||
+ tunnel_key_dump_addresses(skb,
+ &params->tcft_enc_metadata->u.tun_info))
+ goto nla_put_failure;
+ }
+
+ tcf_tm_dump(&tm, &t->tcf_tm);
+ if (nla_put_64bit(skb, TCA_TUNNEL_KEY_TM, sizeof(tm),
+ &tm, TCA_TUNNEL_KEY_PAD))
+ goto nla_put_failure;
+
+ return skb->len;
+
+nla_put_failure:
+ nlmsg_trim(skb, b);
+ return -1;
+}
+
+static int tunnel_key_walker(struct net *net, struct sk_buff *skb,
+ struct netlink_callback *cb, int type,
+ const struct tc_action_ops *ops)
+{
+ struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
+
+ return tcf_generic_walker(tn, skb, cb, type, ops);
+}
+
+static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
+
+ return tcf_hash_search(tn, a, index);
+}
+
+static struct tc_action_ops act_tunnel_key_ops = {
+ .kind = "tunnel_key",
+ .type = TCA_ACT_TUNNEL_KEY,
+ .owner = THIS_MODULE,
+ .act = tunnel_key_act,
+ .dump = tunnel_key_dump,
+ .init = tunnel_key_init,
+ .cleanup = tunnel_key_release,
+ .walk = tunnel_key_walker,
+ .lookup = tunnel_key_search,
+ .size = sizeof(struct tcf_tunnel_key),
+};
+
+static __net_init int tunnel_key_init_net(struct net *net)
+{
+ struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
+
+ return tc_action_net_init(tn, &act_tunnel_key_ops, TUNNEL_KEY_TAB_MASK);
+}
+
+static void __net_exit tunnel_key_exit_net(struct net *net)
+{
+ struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
+
+ tc_action_net_exit(tn);
+}
+
+static struct pernet_operations tunnel_key_net_ops = {
+ .init = tunnel_key_init_net,
+ .exit = tunnel_key_exit_net,
+ .id = &tunnel_key_net_id,
+ .size = sizeof(struct tc_action_net),
+};
+
+static int __init tunnel_key_init_module(void)
+{
+ return tcf_register_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
+}
+
+static void __exit tunnel_key_cleanup_module(void)
+{
+ tcf_unregister_action(&act_tunnel_key_ops, &tunnel_key_net_ops);
+}
+
+module_init(tunnel_key_init_module);
+module_exit(tunnel_key_cleanup_module);
+
+MODULE_AUTHOR("Amir Vadai <amir@vadai.me>");
+MODULE_DESCRIPTION("ip tunnel manipulation actions");
+MODULE_LICENSE("GPL v2");
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 59a8d3150ae2..a95c00b119da 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -30,6 +30,7 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a,
struct tcf_vlan *v = to_vlan(a);
int action;
int err;
+ u16 tci;
spin_lock(&v->tcf_lock);
tcf_lastuse_update(&v->tcf_tm);
@@ -48,6 +49,30 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a,
if (err)
goto drop;
break;
+ case TCA_VLAN_ACT_MODIFY:
+ /* No-op if no vlan tag (either hw-accel or in-payload) */
+ if (!skb_vlan_tagged(skb))
+ goto unlock;
+ /* extract existing tag (and guarantee no hw-accel tag) */
+ if (skb_vlan_tag_present(skb)) {
+ tci = skb_vlan_tag_get(skb);
+ skb->vlan_tci = 0;
+ } else {
+ /* in-payload vlan tag, pop it */
+ err = __skb_vlan_pop(skb, &tci);
+ if (err)
+ goto drop;
+ }
+ /* replace the vid */
+ tci = (tci & ~VLAN_VID_MASK) | v->tcfv_push_vid;
+ /* replace prio bits, if tcfv_push_prio specified */
+ if (v->tcfv_push_prio) {
+ tci &= ~VLAN_PRIO_MASK;
+ tci |= v->tcfv_push_prio << VLAN_PRIO_SHIFT;
+ }
+ /* put updated tci as hwaccel tag */
+ __vlan_hwaccel_put_tag(skb, v->tcfv_push_proto, tci);
+ break;
default:
BUG();
}
@@ -102,6 +127,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
case TCA_VLAN_ACT_POP:
break;
case TCA_VLAN_ACT_PUSH:
+ case TCA_VLAN_ACT_MODIFY:
if (!tb[TCA_VLAN_PUSH_VLAN_ID]) {
if (exists)
tcf_hash_release(*a, bind);
@@ -185,7 +211,8 @@ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
if (nla_put(skb, TCA_VLAN_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
- if (v->tcfv_action == TCA_VLAN_ACT_PUSH &&
+ if ((v->tcfv_action == TCA_VLAN_ACT_PUSH ||
+ v->tcfv_action == TCA_VLAN_ACT_MODIFY) &&
(nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, v->tcfv_push_vid) ||
nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL,
v->tcfv_push_proto) ||
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index a7c5645373af..11da7da0b7c4 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -344,13 +344,15 @@ replay:
if (err == 0) {
struct tcf_proto *next = rtnl_dereference(tp->next);
- tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
+ tfilter_notify(net, skb, n, tp, fh,
+ RTM_DELTFILTER);
if (tcf_destroy(tp, false))
RCU_INIT_POINTER(*back, next);
}
goto errout;
case RTM_GETTFILTER:
- err = tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
+ err = tfilter_notify(net, skb, n, tp, fh,
+ RTM_NEWTFILTER);
goto errout;
default:
err = -EINVAL;
@@ -448,7 +450,8 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
struct net *net = sock_net(a->skb->sk);
return tcf_fill_node(net, a->skb, tp, n, NETLINK_CB(a->cb->skb).portid,
- a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER);
+ a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ RTM_NEWTFILTER);
}
/* called with RTNL */
@@ -552,7 +555,7 @@ void tcf_exts_destroy(struct tcf_exts *exts)
EXPORT_SYMBOL(tcf_exts_destroy);
int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
- struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr)
+ struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr)
{
#ifdef CONFIG_NET_CLS_ACT
{
@@ -560,8 +563,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
if (exts->police && tb[exts->police]) {
act = tcf_action_init_1(net, tb[exts->police], rate_tlv,
- "police", ovr,
- TCA_ACT_BIND);
+ "police", ovr, TCA_ACT_BIND);
if (IS_ERR(act))
return PTR_ERR(act);
@@ -573,8 +575,8 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
int err, i = 0;
err = tcf_action_init(net, tb[exts->action], rate_tlv,
- NULL, ovr,
- TCA_ACT_BIND, &actions);
+ NULL, ovr, TCA_ACT_BIND,
+ &actions);
if (err)
return err;
list_for_each_entry(act, &actions, list)
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 4742f415ee5b..bb1d5a487081 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -27,6 +27,8 @@ MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
MODULE_DESCRIPTION("TC BPF based classifier");
#define CLS_BPF_NAME_LEN 256
+#define CLS_BPF_SUPPORTED_GEN_FLAGS \
+ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
struct cls_bpf_head {
struct list_head plist;
@@ -39,6 +41,8 @@ struct cls_bpf_prog {
struct list_head link;
struct tcf_result res;
bool exts_integrated;
+ bool offloaded;
+ u32 gen_flags;
struct tcf_exts exts;
u32 handle;
union {
@@ -54,8 +58,10 @@ struct cls_bpf_prog {
static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
[TCA_BPF_CLASSID] = { .type = NLA_U32 },
[TCA_BPF_FLAGS] = { .type = NLA_U32 },
+ [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 },
[TCA_BPF_FD] = { .type = NLA_U32 },
- [TCA_BPF_NAME] = { .type = NLA_NUL_STRING, .len = CLS_BPF_NAME_LEN },
+ [TCA_BPF_NAME] = { .type = NLA_NUL_STRING,
+ .len = CLS_BPF_NAME_LEN },
[TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
[TCA_BPF_OPS] = { .type = NLA_BINARY,
.len = sizeof(struct sock_filter) * BPF_MAXINSNS },
@@ -83,9 +89,6 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct cls_bpf_prog *prog;
int ret = -1;
- if (unlikely(!skb_mac_header_was_set(skb)))
- return -1;
-
/* Needed here for accessing maps. */
rcu_read_lock();
list_for_each_entry_rcu(prog, &head->plist, link) {
@@ -93,7 +96,9 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
- if (at_ingress) {
+ if (tc_skip_sw(prog->gen_flags)) {
+ filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
+ } else if (at_ingress) {
/* It is safe to push/pull even if skb_shared() */
__skb_push(skb, skb->mac_len);
bpf_compute_data_end(skb);
@@ -140,6 +145,91 @@ static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
return !prog->bpf_ops;
}
+static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
+ enum tc_clsbpf_command cmd)
+{
+ struct net_device *dev = tp->q->dev_queue->dev;
+ struct tc_cls_bpf_offload bpf_offload = {};
+ struct tc_to_netdev offload;
+
+ offload.type = TC_SETUP_CLSBPF;
+ offload.cls_bpf = &bpf_offload;
+
+ bpf_offload.command = cmd;
+ bpf_offload.exts = &prog->exts;
+ bpf_offload.prog = prog->filter;
+ bpf_offload.name = prog->bpf_name;
+ bpf_offload.exts_integrated = prog->exts_integrated;
+ bpf_offload.gen_flags = prog->gen_flags;
+
+ return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+ tp->protocol, &offload);
+}
+
+static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
+ struct cls_bpf_prog *oldprog)
+{
+ struct net_device *dev = tp->q->dev_queue->dev;
+ struct cls_bpf_prog *obj = prog;
+ enum tc_clsbpf_command cmd;
+ bool skip_sw;
+ int ret;
+
+ skip_sw = tc_skip_sw(prog->gen_flags) ||
+ (oldprog && tc_skip_sw(oldprog->gen_flags));
+
+ if (oldprog && oldprog->offloaded) {
+ if (tc_should_offload(dev, tp, prog->gen_flags)) {
+ cmd = TC_CLSBPF_REPLACE;
+ } else if (!tc_skip_sw(prog->gen_flags)) {
+ obj = oldprog;
+ cmd = TC_CLSBPF_DESTROY;
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ if (!tc_should_offload(dev, tp, prog->gen_flags))
+ return skip_sw ? -EINVAL : 0;
+ cmd = TC_CLSBPF_ADD;
+ }
+
+ ret = cls_bpf_offload_cmd(tp, obj, cmd);
+ if (ret)
+ return skip_sw ? ret : 0;
+
+ obj->offloaded = true;
+ if (oldprog)
+ oldprog->offloaded = false;
+
+ return 0;
+}
+
+static void cls_bpf_stop_offload(struct tcf_proto *tp,
+ struct cls_bpf_prog *prog)
+{
+ int err;
+
+ if (!prog->offloaded)
+ return;
+
+ err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
+ if (err) {
+ pr_err("Stopping hardware offload failed: %d\n", err);
+ return;
+ }
+
+ prog->offloaded = false;
+}
+
+static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
+ struct cls_bpf_prog *prog)
+{
+ if (!prog->offloaded)
+ return;
+
+ cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
+}
+
static int cls_bpf_init(struct tcf_proto *tp)
{
struct cls_bpf_head *head;
@@ -179,6 +269,7 @@ static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
{
struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg;
+ cls_bpf_stop_offload(tp, prog);
list_del_rcu(&prog->link);
tcf_unbind_filter(tp, &prog->res);
call_rcu(&prog->rcu, __cls_bpf_delete_prog);
@@ -195,6 +286,7 @@ static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
return false;
list_for_each_entry_safe(prog, tmp, &head->plist, link) {
+ cls_bpf_stop_offload(tp, prog);
list_del_rcu(&prog->link);
tcf_unbind_filter(tp, &prog->res);
call_rcu(&prog->rcu, __cls_bpf_delete_prog);
@@ -304,6 +396,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
{
bool is_bpf, is_ebpf, have_exts = false;
struct tcf_exts exts;
+ u32 gen_flags = 0;
int ret;
is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
@@ -328,8 +421,17 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
}
+ if (tb[TCA_BPF_FLAGS_GEN]) {
+ gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
+ if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
+ !tc_flags_valid(gen_flags)) {
+ ret = -EINVAL;
+ goto errout;
+ }
+ }
prog->exts_integrated = have_exts;
+ prog->gen_flags = gen_flags;
ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
cls_bpf_prog_from_efd(tb, prog, tp);
@@ -412,10 +514,17 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
goto errout;
}
- ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
+ ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE],
+ ovr);
if (ret < 0)
goto errout;
+ ret = cls_bpf_offload(tp, prog, oldprog);
+ if (ret) {
+ cls_bpf_delete_prog(tp, prog);
+ return ret;
+ }
+
if (oldprog) {
list_replace_rcu(&oldprog->link, &prog->link);
tcf_unbind_filter(tp, &oldprog->res);
@@ -477,6 +586,8 @@ static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
tm->tcm_handle = prog->handle;
+ cls_bpf_offload_update_stats(tp, prog);
+
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
@@ -499,6 +610,9 @@ static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
goto nla_put_failure;
+ if (prog->gen_flags &&
+ nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
+ goto nla_put_failure;
nla_nest_end(skb, nest);
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 2c1ae549edbf..e39672394c7b 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -29,7 +29,7 @@
#include <net/route.h>
#include <net/flow_dissector.h>
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <net/netfilter/nf_conntrack.h>
#endif
@@ -87,12 +87,14 @@ static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
}
-static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow)
+static u32 flow_get_proto(const struct sk_buff *skb,
+ const struct flow_keys *flow)
{
return flow->basic.ip_proto;
}
-static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
+static u32 flow_get_proto_src(const struct sk_buff *skb,
+ const struct flow_keys *flow)
{
if (flow->ports.ports)
return ntohs(flow->ports.src);
@@ -100,7 +102,8 @@ static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys
return addr_fold(skb->sk);
}
-static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
+static u32 flow_get_proto_dst(const struct sk_buff *skb,
+ const struct flow_keys *flow)
{
if (flow->ports.ports)
return ntohs(flow->ports.dst);
@@ -125,14 +128,14 @@ static u32 flow_get_mark(const struct sk_buff *skb)
static u32 flow_get_nfct(const struct sk_buff *skb)
{
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
return addr_fold(skb->nfct);
#else
return 0;
#endif
}
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#define CTTUPLE(skb, member) \
({ \
enum ip_conntrack_info ctinfo; \
@@ -149,7 +152,8 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
})
#endif
-static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow)
+static u32 flow_get_nfct_src(const struct sk_buff *skb,
+ const struct flow_keys *flow)
{
switch (tc_skb_protocol(skb)) {
case htons(ETH_P_IP):
@@ -161,7 +165,8 @@ fallback:
return flow_get_src(skb, flow);
}
-static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow)
+static u32 flow_get_nfct_dst(const struct sk_buff *skb,
+ const struct flow_keys *flow)
{
switch (tc_skb_protocol(skb)) {
case htons(ETH_P_IP):
@@ -173,14 +178,16 @@ fallback:
return flow_get_dst(skb, flow);
}
-static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
+static u32 flow_get_nfct_proto_src(const struct sk_buff *skb,
+ const struct flow_keys *flow)
{
return ntohs(CTTUPLE(skb, src.u.all));
fallback:
return flow_get_proto_src(skb, flow);
}
-static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
+static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb,
+ const struct flow_keys *flow)
{
return ntohs(CTTUPLE(skb, dst.u.all));
fallback:
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index cf9ad5b50889..2af09c872a1a 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -23,18 +23,26 @@
#include <net/ip.h>
#include <net/flow_dissector.h>
+#include <net/dst.h>
+#include <net/dst_metadata.h>
+
struct fl_flow_key {
int indev_ifindex;
struct flow_dissector_key_control control;
+ struct flow_dissector_key_control enc_control;
struct flow_dissector_key_basic basic;
struct flow_dissector_key_eth_addrs eth;
struct flow_dissector_key_vlan vlan;
- struct flow_dissector_key_addrs ipaddrs;
union {
struct flow_dissector_key_ipv4_addrs ipv4;
struct flow_dissector_key_ipv6_addrs ipv6;
};
struct flow_dissector_key_ports tp;
+ struct flow_dissector_key_keyid enc_key_id;
+ union {
+ struct flow_dissector_key_ipv4_addrs enc_ipv4;
+ struct flow_dissector_key_ipv6_addrs enc_ipv6;
+ };
} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
struct fl_flow_mask_range {
@@ -124,11 +132,31 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct cls_fl_filter *f;
struct fl_flow_key skb_key;
struct fl_flow_key skb_mkey;
+ struct ip_tunnel_info *info;
if (!atomic_read(&head->ht.nelems))
return -1;
fl_clear_masked_range(&skb_key, &head->mask);
+
+ info = skb_tunnel_info(skb);
+ if (info) {
+ struct ip_tunnel_key *key = &info->key;
+
+ switch (ip_tunnel_info_af(info)) {
+ case AF_INET:
+ skb_key.enc_ipv4.src = key->u.ipv4.src;
+ skb_key.enc_ipv4.dst = key->u.ipv4.dst;
+ break;
+ case AF_INET6:
+ skb_key.enc_ipv6.src = key->u.ipv6.src;
+ skb_key.enc_ipv6.dst = key->u.ipv6.dst;
+ break;
+ }
+
+ skb_key.enc_key_id.keyid = tunnel_id_to_key32(key->tun_id);
+ }
+
skb_key.indev_ifindex = skb->skb_iif;
/* skb_flow_dissect() does not set n_proto in case an unknown protocol,
* so do it rather here.
@@ -213,7 +241,8 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
tc.type = TC_SETUP_CLSFLOWER;
tc.cls_flower = &offload;
- err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc);
+ err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
+ &tc);
if (tc_skip_sw(flags))
return err;
@@ -297,7 +326,19 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
[TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
[TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
-
+ [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
+ [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
+ [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
+ [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
+ [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
+ [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
+ [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
+ [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
};
static void fl_set_key_val(struct nlattr **tb,
@@ -395,20 +436,54 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
if (key->basic.ip_proto == IPPROTO_TCP) {
fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
- &mask->tp.src, TCA_FLOWER_UNSPEC,
+ &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
sizeof(key->tp.src));
fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
- &mask->tp.dst, TCA_FLOWER_UNSPEC,
+ &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
sizeof(key->tp.dst));
} else if (key->basic.ip_proto == IPPROTO_UDP) {
fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
- &mask->tp.src, TCA_FLOWER_UNSPEC,
+ &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
sizeof(key->tp.src));
fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
- &mask->tp.dst, TCA_FLOWER_UNSPEC,
+ &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
sizeof(key->tp.dst));
}
+ if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
+ tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
+ key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ fl_set_key_val(tb, &key->enc_ipv4.src,
+ TCA_FLOWER_KEY_ENC_IPV4_SRC,
+ &mask->enc_ipv4.src,
+ TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
+ sizeof(key->enc_ipv4.src));
+ fl_set_key_val(tb, &key->enc_ipv4.dst,
+ TCA_FLOWER_KEY_ENC_IPV4_DST,
+ &mask->enc_ipv4.dst,
+ TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
+ sizeof(key->enc_ipv4.dst));
+ }
+
+ if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
+ tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
+ key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ fl_set_key_val(tb, &key->enc_ipv6.src,
+ TCA_FLOWER_KEY_ENC_IPV6_SRC,
+ &mask->enc_ipv6.src,
+ TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
+ sizeof(key->enc_ipv6.src));
+ fl_set_key_val(tb, &key->enc_ipv6.dst,
+ TCA_FLOWER_KEY_ENC_IPV6_DST,
+ &mask->enc_ipv6.dst,
+ TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
+ sizeof(key->enc_ipv6.dst));
+ }
+
+ fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
+ &mask->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
+ sizeof(key->enc_key_id.keyid));
+
return 0;
}
@@ -806,21 +881,48 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
if (key->basic.ip_proto == IPPROTO_TCP &&
(fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
- &mask->tp.src, TCA_FLOWER_UNSPEC,
+ &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
sizeof(key->tp.src)) ||
fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
- &mask->tp.dst, TCA_FLOWER_UNSPEC,
+ &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
sizeof(key->tp.dst))))
goto nla_put_failure;
else if (key->basic.ip_proto == IPPROTO_UDP &&
(fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
- &mask->tp.src, TCA_FLOWER_UNSPEC,
+ &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
sizeof(key->tp.src)) ||
fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
- &mask->tp.dst, TCA_FLOWER_UNSPEC,
+ &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
sizeof(key->tp.dst))))
goto nla_put_failure;
+ if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
+ (fl_dump_key_val(skb, &key->enc_ipv4.src,
+ TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
+ TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
+ sizeof(key->enc_ipv4.src)) ||
+ fl_dump_key_val(skb, &key->enc_ipv4.dst,
+ TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
+ TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
+ sizeof(key->enc_ipv4.dst))))
+ goto nla_put_failure;
+ else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
+ (fl_dump_key_val(skb, &key->enc_ipv6.src,
+ TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
+ TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
+ sizeof(key->enc_ipv6.src)) ||
+ fl_dump_key_val(skb, &key->enc_ipv6.dst,
+ TCA_FLOWER_KEY_ENC_IPV6_DST,
+ &mask->enc_ipv6.dst,
+ TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
+ sizeof(key->enc_ipv6.dst))))
+ goto nla_put_failure;
+
+ if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
+ &mask->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
+ sizeof(key->enc_key_id)))
+ goto nla_put_failure;
+
nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags);
if (tcf_exts_dump(skb, &f->exts))
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index cc0bda945800..9dc63d54e167 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -57,7 +57,7 @@ static u32 fw_hash(u32 handle)
}
static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp,
- struct tcf_result *res)
+ struct tcf_result *res)
{
struct fw_head *head = rcu_dereference_bh(tp->root);
struct fw_filter *f;
@@ -188,7 +188,8 @@ static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = {
static int
fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
- struct nlattr **tb, struct nlattr **tca, unsigned long base, bool ovr)
+ struct nlattr **tb, struct nlattr **tca, unsigned long base,
+ bool ovr)
{
struct fw_head *head = rtnl_dereference(tp->root);
struct tcf_exts e;
@@ -237,9 +238,8 @@ errout:
static int fw_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
- u32 handle,
- struct nlattr **tca,
- unsigned long *arg, bool ovr)
+ u32 handle, struct nlattr **tca, unsigned long *arg,
+ bool ovr)
{
struct fw_head *head = rtnl_dereference(tp->root);
struct fw_filter *f = (struct fw_filter *) *arg;
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index c91e65d81a48..455fc8f83d0a 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -268,8 +268,7 @@ static int route4_init(struct tcf_proto *tp)
return 0;
}
-static void
-route4_delete_filter(struct rcu_head *head)
+static void route4_delete_filter(struct rcu_head *head)
{
struct route4_filter *f = container_of(head, struct route4_filter, rcu);
@@ -474,10 +473,8 @@ errout:
}
static int route4_change(struct net *net, struct sk_buff *in_skb,
- struct tcf_proto *tp, unsigned long base,
- u32 handle,
- struct nlattr **tca,
- unsigned long *arg, bool ovr)
+ struct tcf_proto *tp, unsigned long base, u32 handle,
+ struct nlattr **tca, unsigned long *arg, bool ovr)
{
struct route4_head *head = rtnl_dereference(tp->root);
struct route4_filter __rcu **fp;
@@ -562,7 +559,8 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
return 0;
errout:
- tcf_exts_destroy(&f->exts);
+ if (f)
+ tcf_exts_destroy(&f->exts);
kfree(f);
return err;
}
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index d9500709831f..96144bdf30db 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -50,14 +50,13 @@ struct tcindex_data {
struct rcu_head rcu;
};
-static inline int
-tcindex_filter_is_set(struct tcindex_filter_result *r)
+static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
{
return tcf_exts_is_predicative(&r->exts) || r->res.classid;
}
-static struct tcindex_filter_result *
-tcindex_lookup(struct tcindex_data *p, u16 key)
+static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
+ u16 key)
{
if (p->perfect) {
struct tcindex_filter_result *f = p->perfect + key;
@@ -144,7 +143,8 @@ static void tcindex_destroy_rexts(struct rcu_head *head)
static void tcindex_destroy_fexts(struct rcu_head *head)
{
- struct tcindex_filter *f = container_of(head, struct tcindex_filter, rcu);
+ struct tcindex_filter *f = container_of(head, struct tcindex_filter,
+ rcu);
tcf_exts_destroy(&f->result.exts);
kfree(f);
@@ -550,7 +550,7 @@ static bool tcindex_destroy(struct tcf_proto *tp, bool force)
static int tcindex_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
- struct sk_buff *skb, struct tcmsg *t)
+ struct sk_buff *skb, struct tcmsg *t)
{
struct tcindex_data *p = rtnl_dereference(tp->root);
struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index a29263a9d8c1..ae83c3aec308 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -104,7 +104,8 @@ static inline unsigned int u32_hash_fold(__be32 key,
return h;
}
-static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res)
+static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ struct tcf_result *res)
{
struct {
struct tc_u_knode *knode;
@@ -256,8 +257,7 @@ deadloop:
return -1;
}
-static struct tc_u_hnode *
-u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
+static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
{
struct tc_u_hnode *ht;
@@ -270,8 +270,7 @@ u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
return ht;
}
-static struct tc_u_knode *
-u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
+static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
{
unsigned int sel;
struct tc_u_knode *n = NULL;
@@ -360,8 +359,7 @@ static int u32_init(struct tcf_proto *tp)
return 0;
}
-static int u32_destroy_key(struct tcf_proto *tp,
- struct tc_u_knode *n,
+static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
bool free_pf)
{
tcf_exts_destroy(&n->exts);
@@ -448,9 +446,8 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
}
}
-static int u32_replace_hw_hnode(struct tcf_proto *tp,
- struct tc_u_hnode *h,
- u32 flags)
+static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
+ u32 flags)
{
struct net_device *dev = tp->q->dev_queue->dev;
struct tc_cls_u32_offload u32_offload = {0};
@@ -496,9 +493,8 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h)
}
}
-static int u32_replace_hw_knode(struct tcf_proto *tp,
- struct tc_u_knode *n,
- u32 flags)
+static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
+ u32 flags)
{
struct net_device *dev = tp->q->dev_queue->dev;
struct tc_cls_u32_offload u32_offload = {0};
@@ -763,8 +759,7 @@ errout:
return err;
}
-static void u32_replace_knode(struct tcf_proto *tp,
- struct tc_u_common *tp_c,
+static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
struct tc_u_knode *n)
{
struct tc_u_knode __rcu **ins;
@@ -845,8 +840,7 @@ static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
static int u32_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base, u32 handle,
- struct nlattr **tca,
- unsigned long *arg, bool ovr)
+ struct nlattr **tca, unsigned long *arg, bool ovr)
{
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *ht;
@@ -1088,7 +1082,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
}
static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
- struct sk_buff *skb, struct tcmsg *t)
+ struct sk_buff *skb, struct tcmsg *t)
{
struct tc_u_knode *n = (struct tc_u_knode *)fh;
struct tc_u_hnode *ht_up, *ht_down;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index d677b3484d81..206dc24add3a 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -389,7 +389,8 @@ static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
static struct qdisc_rate_table *qdisc_rtab_list;
-struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
+struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
+ struct nlattr *tab)
{
struct qdisc_rate_table *rtab;
@@ -541,7 +542,8 @@ nla_put_failure:
return -1;
}
-void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
+void __qdisc_calculate_pkt_len(struct sk_buff *skb,
+ const struct qdisc_size_table *stab)
{
int pkt_len, slot;
@@ -888,10 +890,10 @@ static struct lock_class_key qdisc_rx_lock;
Parameters are passed via opt.
*/
-static struct Qdisc *
-qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
- struct Qdisc *p, u32 parent, u32 handle,
- struct nlattr **tca, int *errp)
+static struct Qdisc *qdisc_create(struct net_device *dev,
+ struct netdev_queue *dev_queue,
+ struct Qdisc *p, u32 parent, u32 handle,
+ struct nlattr **tca, int *errp)
{
int err;
struct nlattr *kind = tca[TCA_KIND];
@@ -1073,7 +1075,8 @@ struct check_loop_arg {
int depth;
};
-static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
+static int check_loop_fn(struct Qdisc *q, unsigned long cl,
+ struct qdisc_walker *w);
static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
{
@@ -1450,7 +1453,8 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
} else {
if (!tc_qdisc_dump_ignore(q) &&
tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ RTM_NEWQDISC) <= 0)
goto done;
q_idx++;
}
@@ -1471,7 +1475,8 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
}
if (!tc_qdisc_dump_ignore(q) &&
tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ RTM_NEWQDISC) <= 0)
goto done;
q_idx++;
}
@@ -1505,7 +1510,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
s_q_idx = 0;
q_idx = 0;
- if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx, true) < 0)
+ if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
+ true) < 0)
goto done;
dev_queue = dev_ingress_queue(dev);
@@ -1640,7 +1646,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
if (cops->delete)
err = cops->delete(q, cl);
if (err == 0)
- tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
+ tclass_notify(net, skb, n, q, cl,
+ RTM_DELTCLASS);
goto out;
case RTM_GETTCLASS:
err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
@@ -1738,12 +1745,14 @@ struct qdisc_dump_args {
struct netlink_callback *cb;
};
-static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
+static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
+ struct qdisc_walker *arg)
{
struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
- a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
+ a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ RTM_NEWTCLASS);
}
static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
@@ -1976,10 +1985,12 @@ static int __init pktsched_init(void)
rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
- rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, NULL);
+ rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
+ NULL);
rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
- rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, NULL);
+ rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
+ NULL);
return 0;
}
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index 4002df3c7d9f..5bfa79ee657c 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -69,7 +69,7 @@ struct codel_sched_data {
static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
{
struct Qdisc *sch = ctx;
- struct sk_buff *skb = __skb_dequeue(&sch->q);
+ struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
if (skb)
sch->qstats.backlog -= qdisc_pkt_len(skb);
@@ -172,7 +172,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
qlen = sch->q.qlen;
while (sch->q.qlen > sch->limit) {
- struct sk_buff *skb = __skb_dequeue(&sch->q);
+ struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
dropped += qdisc_pkt_len(skb);
qdisc_qstats_backlog_dec(sch, skb);
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index baeed6a78d28..1e37247656f8 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -31,7 +31,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
- if (likely(skb_queue_len(&sch->q) < sch->limit))
+ if (likely(sch->q.qlen < sch->limit))
return qdisc_enqueue_tail(skb, sch);
return qdisc_drop(skb, sch, to_free);
@@ -42,7 +42,7 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
{
unsigned int prev_backlog;
- if (likely(skb_queue_len(&sch->q) < sch->limit))
+ if (likely(sch->q.qlen < sch->limit))
return qdisc_enqueue_tail(skb, sch);
prev_backlog = sch->qstats.backlog;
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index e5458b99e09c..18e752439f6f 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -86,6 +86,7 @@ struct fq_sched_data {
struct rb_root delayed; /* for rate limited flows */
u64 time_next_delayed_flow;
+ unsigned long unthrottle_latency_ns;
struct fq_flow internal; /* for non classified or high prio packets */
u32 quantum;
@@ -94,6 +95,7 @@ struct fq_sched_data {
u32 flow_max_rate; /* optional max rate per flow */
u32 flow_plimit; /* max packets per flow */
u32 orphan_mask; /* mask for orphaned skb */
+ u32 low_rate_threshold;
struct rb_root *fq_root;
u8 rate_enable;
u8 fq_trees_log;
@@ -407,11 +409,19 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
static void fq_check_throttled(struct fq_sched_data *q, u64 now)
{
+ unsigned long sample;
struct rb_node *p;
if (q->time_next_delayed_flow > now)
return;
+ /* Update unthrottle latency EWMA.
+ * This is cheap and can help diagnosing timer/latency problems.
+ */
+ sample = (unsigned long)(now - q->time_next_delayed_flow);
+ q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
+ q->unthrottle_latency_ns += sample >> 3;
+
q->time_next_delayed_flow = ~0ULL;
while ((p = rb_first(&q->delayed)) != NULL) {
struct fq_flow *f = container_of(p, struct fq_flow, rate_node);
@@ -433,7 +443,7 @@ static struct sk_buff *fq_dequeue(struct Qdisc *sch)
struct fq_flow_head *head;
struct sk_buff *skb;
struct fq_flow *f;
- u32 rate;
+ u32 rate, plen;
skb = fq_dequeue_head(sch, &q->internal);
if (skb)
@@ -482,7 +492,7 @@ begin:
prefetch(&skb->end);
f->credit -= qdisc_pkt_len(skb);
- if (f->credit > 0 || !q->rate_enable)
+ if (!q->rate_enable)
goto out;
/* Do not pace locally generated ack packets */
@@ -493,8 +503,15 @@ begin:
if (skb->sk)
rate = min(skb->sk->sk_pacing_rate, rate);
+ if (rate <= q->low_rate_threshold) {
+ f->credit = 0;
+ plen = qdisc_pkt_len(skb);
+ } else {
+ plen = max(qdisc_pkt_len(skb), q->quantum);
+ if (f->credit > 0)
+ goto out;
+ }
if (rate != ~0U) {
- u32 plen = max(qdisc_pkt_len(skb), q->quantum);
u64 len = (u64)plen * NSEC_PER_SEC;
if (likely(rate))
@@ -507,7 +524,12 @@ begin:
len = NSEC_PER_SEC;
q->stat_pkts_too_long++;
}
-
+ /* Account for schedule/timers drifts.
+ * f->time_next_packet was set when prior packet was sent,
+ * and current time (@now) can be too late by tens of us.
+ */
+ if (f->time_next_packet)
+ len -= min(len/2, now - f->time_next_packet);
f->time_next_packet = now + len;
}
out:
@@ -662,6 +684,7 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
[TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
[TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
[TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
+ [TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 },
};
static int fq_change(struct Qdisc *sch, struct nlattr *opt)
@@ -716,6 +739,10 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
if (tb[TCA_FQ_FLOW_MAX_RATE])
q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
+ if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
+ q->low_rate_threshold =
+ nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);
+
if (tb[TCA_FQ_RATE_ENABLE]) {
u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
@@ -774,6 +801,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt)
q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
q->flow_refill_delay = msecs_to_jiffies(40);
q->flow_max_rate = ~0U;
+ q->time_next_delayed_flow = ~0ULL;
q->rate_enable = 1;
q->new_flows.first = NULL;
q->old_flows.first = NULL;
@@ -781,6 +809,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt)
q->fq_root = NULL;
q->fq_trees_log = ilog2(1024);
q->orphan_mask = 1024 - 1;
+ q->low_rate_threshold = 550000 / 8;
qdisc_watchdog_init(&q->watchdog, sch);
if (opt)
@@ -811,6 +840,8 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
jiffies_to_usecs(q->flow_refill_delay)) ||
nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
+ nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
+ q->low_rate_threshold) ||
nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
goto nla_put_failure;
@@ -823,20 +854,24 @@ nla_put_failure:
static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
struct fq_sched_data *q = qdisc_priv(sch);
- u64 now = ktime_get_ns();
- struct tc_fq_qd_stats st = {
- .gc_flows = q->stat_gc_flows,
- .highprio_packets = q->stat_internal_packets,
- .tcp_retrans = q->stat_tcp_retrans,
- .throttled = q->stat_throttled,
- .flows_plimit = q->stat_flows_plimit,
- .pkts_too_long = q->stat_pkts_too_long,
- .allocation_errors = q->stat_allocation_errors,
- .flows = q->flows,
- .inactive_flows = q->inactive_flows,
- .throttled_flows = q->throttled_flows,
- .time_next_delayed_flow = q->time_next_delayed_flow - now,
- };
+ struct tc_fq_qd_stats st;
+
+ sch_tree_lock(sch);
+
+ st.gc_flows = q->stat_gc_flows;
+ st.highprio_packets = q->stat_internal_packets;
+ st.tcp_retrans = q->stat_tcp_retrans;
+ st.throttled = q->stat_throttled;
+ st.flows_plimit = q->stat_flows_plimit;
+ st.pkts_too_long = q->stat_pkts_too_long;
+ st.allocation_errors = q->stat_allocation_errors;
+ st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
+ st.flows = q->flows;
+ st.inactive_flows = q->inactive_flows;
+ st.throttled_flows = q->throttled_flows;
+ st.unthrottle_latency_ns = min_t(unsigned long,
+ q->unthrottle_latency_ns, ~0U);
+ sch_tree_unlock(sch);
return gnet_stats_copy_app(d, &st, sizeof(st));
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 0d21b567ff27..6cfb6e9038c2 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -466,7 +466,7 @@ static const u8 prio2band[TC_PRIO_MAX + 1] = {
*/
struct pfifo_fast_priv {
u32 bitmap;
- struct sk_buff_head q[PFIFO_FAST_BANDS];
+ struct qdisc_skb_head q[PFIFO_FAST_BANDS];
};
/*
@@ -477,7 +477,7 @@ struct pfifo_fast_priv {
*/
static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0};
-static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
+static inline struct qdisc_skb_head *band2list(struct pfifo_fast_priv *priv,
int band)
{
return priv->q + band;
@@ -486,10 +486,10 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
struct sk_buff **to_free)
{
- if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
+ if (qdisc->q.qlen < qdisc_dev(qdisc)->tx_queue_len) {
int band = prio2band[skb->priority & TC_PRIO_MAX];
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
- struct sk_buff_head *list = band2list(priv, band);
+ struct qdisc_skb_head *list = band2list(priv, band);
priv->bitmap |= (1 << band);
qdisc->q.qlen++;
@@ -505,11 +505,16 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
int band = bitmap2band[priv->bitmap];
if (likely(band >= 0)) {
- struct sk_buff_head *list = band2list(priv, band);
- struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list);
+ struct qdisc_skb_head *qh = band2list(priv, band);
+ struct sk_buff *skb = __qdisc_dequeue_head(qh);
+
+ if (likely(skb != NULL)) {
+ qdisc_qstats_backlog_dec(qdisc, skb);
+ qdisc_bstats_update(qdisc, skb);
+ }
qdisc->q.qlen--;
- if (skb_queue_empty(list))
+ if (qh->qlen == 0)
priv->bitmap &= ~(1 << band);
return skb;
@@ -524,9 +529,9 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
int band = bitmap2band[priv->bitmap];
if (band >= 0) {
- struct sk_buff_head *list = band2list(priv, band);
+ struct qdisc_skb_head *qh = band2list(priv, band);
- return skb_peek(list);
+ return qh->head;
}
return NULL;
@@ -564,7 +569,7 @@ static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
- __skb_queue_head_init(band2list(priv, prio));
+ qdisc_skb_head_init(band2list(priv, prio));
/* Can by-pass the queue discipline */
qdisc->flags |= TCQ_F_CAN_BYPASS;
@@ -612,7 +617,8 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
sch->padded = (char *) sch - (char *) p;
}
- skb_queue_head_init(&sch->q);
+ qdisc_skb_head_init(&sch->q);
+ spin_lock_init(&sch->q.lock);
spin_lock_init(&sch->busylock);
lockdep_set_class(&sch->busylock,
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 53dbfa187870..c798d0de8a9d 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -162,7 +162,7 @@ struct htb_sched {
struct work_struct work;
/* non shaped skbs; let them go directly thru */
- struct sk_buff_head direct_queue;
+ struct qdisc_skb_head direct_queue;
long direct_pkts;
struct qdisc_watchdog watchdog;
@@ -570,6 +570,22 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
list_del_init(&cl->un.leaf.drop_list);
}
+static void htb_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
+ struct qdisc_skb_head *qh)
+{
+ struct sk_buff *last = qh->tail;
+
+ if (last) {
+ skb->next = NULL;
+ last->next = skb;
+ qh->tail = skb;
+ } else {
+ qh->tail = skb;
+ qh->head = skb;
+ }
+ qh->qlen++;
+}
+
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
@@ -580,7 +596,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (cl == HTB_DIRECT) {
/* enqueue to helper queue */
if (q->direct_queue.qlen < q->direct_qlen) {
- __skb_queue_tail(&q->direct_queue, skb);
+ htb_enqueue_tail(skb, sch, &q->direct_queue);
q->direct_pkts++;
} else {
return qdisc_drop(skb, sch, to_free);
@@ -888,7 +904,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
unsigned long start_at;
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
- skb = __skb_dequeue(&q->direct_queue);
+ skb = __qdisc_dequeue_head(&q->direct_queue);
if (skb != NULL) {
ok:
qdisc_bstats_update(sch, skb);
@@ -1019,7 +1035,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
qdisc_watchdog_init(&q->watchdog, sch);
INIT_WORK(&q->work, htb_work_func);
- __skb_queue_head_init(&q->direct_queue);
+ qdisc_skb_head_init(&q->direct_queue);
if (tb[TCA_HTB_DIRECT_QLEN])
q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index aaaf02175338..9f7b380cf0a3 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -413,6 +413,16 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
return segs;
}
+static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb)
+{
+ skb->next = qh->head;
+
+ if (!qh->head)
+ qh->tail = skb;
+ qh->head = skb;
+ qh->qlen++;
+}
+
/*
* Insert one skb into qdisc.
* Note: parent depends on return value to account for queue length.
@@ -502,7 +512,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1<<(prandom_u32() % 8);
}
- if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
+ if (unlikely(sch->q.qlen >= sch->limit))
return qdisc_drop(skb, sch, to_free);
qdisc_qstats_backlog_inc(sch, skb);
@@ -522,8 +532,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (q->rate) {
struct sk_buff *last;
- if (!skb_queue_empty(&sch->q))
- last = skb_peek_tail(&sch->q);
+ if (sch->q.qlen)
+ last = sch->q.tail;
else
last = netem_rb_to_skb(rb_last(&q->t_root));
if (last) {
@@ -552,7 +562,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
cb->time_to_send = psched_get_time();
q->counter = 0;
- __skb_queue_head(&sch->q, skb);
+ netem_enqueue_skb_head(&sch->q, skb);
sch->qstats.requeues++;
}
@@ -587,7 +597,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
struct rb_node *p;
tfifo_dequeue:
- skb = __skb_dequeue(&sch->q);
+ skb = __qdisc_dequeue_head(&sch->q);
if (skb) {
qdisc_qstats_backlog_dec(sch, skb);
deliver:
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index a570b0bb254c..5c3a99d6aa82 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -231,7 +231,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
/* Drop excess packets if new limit is lower */
qlen = sch->q.qlen;
while (sch->q.qlen > sch->limit) {
- struct sk_buff *skb = __skb_dequeue(&sch->q);
+ struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
dropped += qdisc_pkt_len(skb);
qdisc_qstats_backlog_dec(sch, skb);
@@ -511,7 +511,7 @@ static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb;
- skb = __qdisc_dequeue_head(sch, &sch->q);
+ skb = qdisc_dequeue_head(sch);
if (!skb)
return NULL;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 1c23060c41a6..f10d3397f917 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1408,7 +1408,7 @@ void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
transports) {
if (t->pmtu_pending && t->dst) {
sctp_transport_update_pmtu(sk, t,
- WORD_TRUNC(dst_mtu(t->dst)));
+ SCTP_TRUNC4(dst_mtu(t->dst)));
t->pmtu_pending = 0;
}
if (!pmtu || (t->pathmtu < pmtu))
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 912eb1685a5d..f99d4855d3de 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -48,7 +48,7 @@ static struct sctp_hmac sctp_hmac_list[SCTP_AUTH_NUM_HMACS] = {
/* id 2 is reserved as well */
.hmac_id = SCTP_AUTH_HMAC_ID_RESERVED_2,
},
-#if defined (CONFIG_CRYPTO_SHA256) || defined (CONFIG_CRYPTO_SHA256_MODULE)
+#if IS_ENABLED(CONFIG_CRYPTO_SHA256)
{
.hmac_id = SCTP_AUTH_HMAC_ID_SHA256,
.hmac_name = "hmac(sha256)",
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index a55e54738b81..8afe2e90d003 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -70,6 +70,19 @@ static struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp)
return msg;
}
+void sctp_datamsg_free(struct sctp_datamsg *msg)
+{
+ struct sctp_chunk *chunk;
+
+ /* This doesn't have to be a _safe vairant because
+ * sctp_chunk_free() only drops the refs.
+ */
+ list_for_each_entry(chunk, &msg->chunks, frag_list)
+ sctp_chunk_free(chunk);
+
+ sctp_datamsg_put(msg);
+}
+
/* Final destructruction of datamsg memory. */
static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
{
@@ -182,9 +195,10 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
/* This is the biggest possible DATA chunk that can fit into
* the packet
*/
- max_data = (asoc->pathmtu -
- sctp_sk(asoc->base.sk)->pf->af->net_header_len -
- sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk)) & ~3;
+ max_data = asoc->pathmtu -
+ sctp_sk(asoc->base.sk)->pf->af->net_header_len -
+ sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk);
+ max_data = SCTP_TRUNC4(max_data);
max = asoc->frag_point;
/* If the the peer requested that we authenticate DATA chunks
@@ -195,8 +209,8 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
struct sctp_hmac *hmac_desc = sctp_auth_asoc_get_hmac(asoc);
if (hmac_desc)
- max_data -= WORD_ROUND(sizeof(sctp_auth_chunk_t) +
- hmac_desc->hmac_len);
+ max_data -= SCTP_PAD4(sizeof(sctp_auth_chunk_t) +
+ hmac_desc->hmac_len);
}
/* Now, check if we need to reduce our max */
@@ -216,7 +230,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
asoc->outqueue.out_qlen == 0 &&
list_empty(&asoc->outqueue.retransmit) &&
msg_len > max)
- max_data -= WORD_ROUND(sizeof(sctp_sack_chunk_t));
+ max_data -= SCTP_PAD4(sizeof(sctp_sack_chunk_t));
/* Encourage Cookie-ECHO bundling. */
if (asoc->state < SCTP_STATE_COOKIE_ECHOED)
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 69444d32ecda..a2ea1d1cc06a 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -605,7 +605,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
/* PMTU discovery (RFC1191) */
if (ICMP_FRAG_NEEDED == code) {
sctp_icmp_frag_needed(sk, asoc, transport,
- WORD_TRUNC(info));
+ SCTP_TRUNC4(info));
goto out_unlock;
} else {
if (ICMP_PROT_UNREACH == code) {
@@ -673,7 +673,7 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
break;
- ch_end = offset + WORD_ROUND(ntohs(ch->length));
+ ch_end = offset + SCTP_PAD4(ntohs(ch->length));
if (ch_end > skb->len)
break;
@@ -796,27 +796,34 @@ struct sctp_hash_cmp_arg {
static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg,
const void *ptr)
{
+ struct sctp_transport *t = (struct sctp_transport *)ptr;
const struct sctp_hash_cmp_arg *x = arg->key;
- const struct sctp_transport *t = ptr;
- struct sctp_association *asoc = t->asoc;
- const struct net *net = x->net;
+ struct sctp_association *asoc;
+ int err = 1;
if (!sctp_cmp_addr_exact(&t->ipaddr, x->paddr))
- return 1;
- if (!net_eq(sock_net(asoc->base.sk), net))
- return 1;
+ return err;
+ if (!sctp_transport_hold(t))
+ return err;
+
+ asoc = t->asoc;
+ if (!net_eq(sock_net(asoc->base.sk), x->net))
+ goto out;
if (x->ep) {
if (x->ep != asoc->ep)
- return 1;
+ goto out;
} else {
if (x->laddr->v4.sin_port != htons(asoc->base.bind_addr.port))
- return 1;
+ goto out;
if (!sctp_bind_addr_match(&asoc->base.bind_addr,
x->laddr, sctp_sk(asoc->base.sk)))
- return 1;
+ goto out;
}
- return 0;
+ err = 0;
+out:
+ sctp_transport_put(t);
+ return err;
}
static inline u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
@@ -1121,7 +1128,7 @@ static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,
if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
break;
- ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
+ ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
if (ch_end > skb_tail_pointer(skb))
break;
@@ -1190,7 +1197,7 @@ static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
* that the chunk length doesn't cause overflow. Otherwise, we'll
* walk off the end.
*/
- if (WORD_ROUND(ntohs(ch->length)) > skb->len)
+ if (SCTP_PAD4(ntohs(ch->length)) > skb->len)
return NULL;
/* If this is INIT/INIT-ACK look inside the chunk too. */
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 6437aa97cfd7..f731de3e8428 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -213,7 +213,7 @@ new_skb:
}
chunk->chunk_hdr = ch;
- chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
+ chunk->chunk_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
chunk->subh.v = NULL; /* Subheader is no longer valid. */
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 1f1682b9a6a8..2a5c1896d18f 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -180,7 +180,6 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
int one_packet, gfp_t gfp)
{
sctp_xmit_t retval;
- int error = 0;
pr_debug("%s: packet:%p size:%Zu chunk:%p size:%d\n", __func__,
packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1);
@@ -188,6 +187,8 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
case SCTP_XMIT_PMTU_FULL:
if (!packet->has_cookie_echo) {
+ int error = 0;
+
error = sctp_packet_transmit(packet, gfp);
if (error < 0)
chunk->skb->sk->sk_err = -error;
@@ -296,7 +297,7 @@ static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
struct sctp_chunk *chunk)
{
sctp_xmit_t retval = SCTP_XMIT_OK;
- __u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length));
+ __u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length));
/* Check to see if this chunk will fit into the packet */
retval = sctp_packet_will_fit(packet, chunk, chunk_len);
@@ -441,14 +442,14 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
* time. Application may notice this error.
*/
pr_err_once("Trying to GSO but underlying device doesn't support it.");
- goto nomem;
+ goto err;
}
} else {
pkt_size = packet->size;
}
head = alloc_skb(pkt_size + MAX_HEADER, gfp);
if (!head)
- goto nomem;
+ goto err;
if (gso) {
NAPI_GRO_CB(head)->last = head;
skb_shinfo(head)->gso_type = sk->sk_gso_type;
@@ -469,8 +470,12 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
}
}
dst = dst_clone(tp->dst);
- if (!dst)
- goto no_route;
+ if (!dst) {
+ if (asoc)
+ IP_INC_STATS(sock_net(asoc->base.sk),
+ IPSTATS_MIB_OUTNOROUTES);
+ goto nodst;
+ }
skb_dst_set(head, dst);
/* Build the SCTP header. */
@@ -503,7 +508,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
if (gso) {
pkt_size = packet->overhead;
list_for_each_entry(chunk, &packet->chunk_list, list) {
- int padded = WORD_ROUND(chunk->skb->len);
+ int padded = SCTP_PAD4(chunk->skb->len);
if (pkt_size + padded > tp->pathmtu)
break;
@@ -533,7 +538,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
* included in the chunk length field. The sender should
* never pad with more than 3 bytes.
*
- * [This whole comment explains WORD_ROUND() below.]
+ * [This whole comment explains SCTP_PAD4() below.]
*/
pkt_size -= packet->overhead;
@@ -555,7 +560,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
has_data = 1;
}
- padding = WORD_ROUND(chunk->skb->len) - chunk->skb->len;
+ padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len;
if (padding)
memset(skb_put(chunk->skb, padding), 0, padding);
@@ -582,7 +587,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
* acknowledged or have failed.
* Re-queue auth chunks if needed.
*/
- pkt_size -= WORD_ROUND(chunk->skb->len);
+ pkt_size -= SCTP_PAD4(chunk->skb->len);
if (!sctp_chunk_is_data(chunk) && chunk != packet->auth)
sctp_chunk_free(chunk);
@@ -621,8 +626,10 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
if (!gso)
break;
- if (skb_gro_receive(&head, nskb))
+ if (skb_gro_receive(&head, nskb)) {
+ kfree_skb(nskb);
goto nomem;
+ }
nskb = NULL;
if (WARN_ON_ONCE(skb_shinfo(head)->gso_segs >=
sk->sk_gso_max_segs))
@@ -716,18 +723,13 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
}
head->ignore_df = packet->ipfragok;
tp->af_specific->sctp_xmit(head, tp);
+ goto out;
-out:
- sctp_packet_reset(packet);
- return err;
-no_route:
- kfree_skb(head);
- if (nskb != head)
- kfree_skb(nskb);
-
- if (asoc)
- IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
+nomem:
+ if (packet->auth && list_empty(&packet->auth->list))
+ sctp_chunk_free(packet->auth);
+nodst:
/* FIXME: Returning the 'err' will effect all the associations
* associated with a socket, although only one of the paths of the
* association is unreachable.
@@ -736,22 +738,18 @@ no_route:
* required.
*/
/* err = -EHOSTUNREACH; */
-err:
- /* Control chunks are unreliable so just drop them. DATA chunks
- * will get resent or dropped later.
- */
+ kfree_skb(head);
+err:
list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
list_del_init(&chunk->list);
if (!sctp_chunk_is_data(chunk))
sctp_chunk_free(chunk);
}
- goto out;
-nomem:
- if (packet->auth && list_empty(&packet->auth->list))
- sctp_chunk_free(packet->auth);
- err = -ENOMEM;
- goto err;
+
+out:
+ sctp_packet_reset(packet);
+ return err;
}
/********************************************************************
@@ -878,7 +876,7 @@ static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
struct sctp_chunk *chunk,
u16 chunk_len)
{
- size_t psize, pmtu;
+ size_t psize, pmtu, maxsize;
sctp_xmit_t retval = SCTP_XMIT_OK;
psize = packet->size;
@@ -906,6 +904,17 @@ static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
goto out;
}
+ /* Similarly, if this chunk was built before a PMTU
+ * reduction, we have to fragment it at IP level now. So
+ * if the packet already contains something, we need to
+ * flush.
+ */
+ maxsize = pmtu - packet->overhead;
+ if (packet->auth)
+ maxsize -= SCTP_PAD4(packet->auth->skb->len);
+ if (chunk_len > maxsize)
+ retval = SCTP_XMIT_PMTU_FULL;
+
/* It is also okay to fragment if the chunk we are
* adding is a control chunk, but only if current packet
* is not a GSO one otherwise it causes fragmentation of
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 72e54a416af6..3ec6da8bbb53 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -68,7 +68,7 @@ static void sctp_mark_missing(struct sctp_outq *q,
static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
-static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
+static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
/* Add data to the front of the queue. */
static inline void sctp_outq_head_data(struct sctp_outq *q,
@@ -285,10 +285,9 @@ void sctp_outq_free(struct sctp_outq *q)
}
/* Put a new chunk in an sctp_outq. */
-int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
+void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
{
struct net *net = sock_net(q->asoc->base.sk);
- int error = 0;
pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk,
chunk && chunk->chunk_hdr ?
@@ -299,54 +298,26 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
* immediately.
*/
if (sctp_chunk_is_data(chunk)) {
- /* Is it OK to queue data chunks? */
- /* From 9. Termination of Association
- *
- * When either endpoint performs a shutdown, the
- * association on each peer will stop accepting new
- * data from its user and only deliver data in queue
- * at the time of sending or receiving the SHUTDOWN
- * chunk.
- */
- switch (q->asoc->state) {
- case SCTP_STATE_CLOSED:
- case SCTP_STATE_SHUTDOWN_PENDING:
- case SCTP_STATE_SHUTDOWN_SENT:
- case SCTP_STATE_SHUTDOWN_RECEIVED:
- case SCTP_STATE_SHUTDOWN_ACK_SENT:
- /* Cannot send after transport endpoint shutdown */
- error = -ESHUTDOWN;
- break;
-
- default:
- pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n",
- __func__, q, chunk, chunk && chunk->chunk_hdr ?
- sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
- "illegal chunk");
-
- sctp_chunk_hold(chunk);
- sctp_outq_tail_data(q, chunk);
- if (chunk->asoc->prsctp_enable &&
- SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
- chunk->asoc->sent_cnt_removable++;
- if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
- SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
- else
- SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
- break;
- }
+ pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n",
+ __func__, q, chunk, chunk && chunk->chunk_hdr ?
+ sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
+ "illegal chunk");
+
+ sctp_outq_tail_data(q, chunk);
+ if (chunk->asoc->prsctp_enable &&
+ SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
+ chunk->asoc->sent_cnt_removable++;
+ if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
+ SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
+ else
+ SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
} else {
list_add_tail(&chunk->list, &q->control_chunk_list);
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
}
- if (error < 0)
- return error;
-
if (!q->cork)
- error = sctp_outq_flush(q, 0, gfp);
-
- return error;
+ sctp_outq_flush(q, 0, gfp);
}
/* Insert a chunk into the sorted list based on the TSNs. The retransmit list
@@ -559,7 +530,6 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
sctp_retransmit_reason_t reason)
{
struct net *net = sock_net(q->asoc->base.sk);
- int error = 0;
switch (reason) {
case SCTP_RTXR_T3_RTX:
@@ -603,10 +573,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
* will be flushed at the end.
*/
if (reason != SCTP_RTXR_FAST_RTX)
- error = sctp_outq_flush(q, /* rtx_timeout */ 1, GFP_ATOMIC);
-
- if (error)
- q->asoc->base.sk->sk_err = -error;
+ sctp_outq_flush(q, /* rtx_timeout */ 1, GFP_ATOMIC);
}
/*
@@ -778,12 +745,12 @@ redo:
}
/* Cork the outqueue so queued chunks are really queued. */
-int sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
+void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
{
if (q->cork)
q->cork = 0;
- return sctp_outq_flush(q, 0, gfp);
+ sctp_outq_flush(q, 0, gfp);
}
@@ -796,7 +763,7 @@ int sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
* locking concerns must be made. Today we use the sock lock to protect
* this function.
*/
-static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
+static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
{
struct sctp_packet *packet;
struct sctp_packet singleton;
@@ -919,8 +886,10 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
sctp_packet_config(&singleton, vtag, 0);
sctp_packet_append_chunk(&singleton, chunk);
error = sctp_packet_transmit(&singleton, gfp);
- if (error < 0)
- return error;
+ if (error < 0) {
+ asoc->base.sk->sk_err = -error;
+ return;
+ }
break;
case SCTP_CID_ABORT:
@@ -1018,6 +987,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
retran:
error = sctp_outq_flush_rtx(q, packet,
rtx_timeout, &start_timer);
+ if (error < 0)
+ asoc->base.sk->sk_err = -error;
if (start_timer) {
sctp_transport_reset_t3_rtx(transport);
@@ -1192,14 +1163,15 @@ sctp_flush_out:
struct sctp_transport,
send_ready);
packet = &t->packet;
- if (!sctp_packet_empty(packet))
+ if (!sctp_packet_empty(packet)) {
error = sctp_packet_transmit(packet, gfp);
+ if (error < 0)
+ asoc->base.sk->sk_err = -error;
+ }
/* Clear the burst limited state, if any */
sctp_transport_burst_reset(t);
}
-
- return error;
}
/* Update unack_data based on the incoming SACK chunk */
@@ -1747,7 +1719,7 @@ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
{
int i;
sctp_sack_variable_t *frags;
- __u16 gap;
+ __u16 tsn_offset, blocks;
__u32 ctsn = ntohl(sack->cum_tsn_ack);
if (TSN_lte(tsn, ctsn))
@@ -1766,10 +1738,11 @@ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
*/
frags = sack->variable;
- gap = tsn - ctsn;
- for (i = 0; i < ntohs(sack->num_gap_ack_blocks); ++i) {
- if (TSN_lte(ntohs(frags[i].gab.start), gap) &&
- TSN_lte(gap, ntohs(frags[i].gab.end)))
+ blocks = ntohs(sack->num_gap_ack_blocks);
+ tsn_offset = tsn - ctsn;
+ for (i = 0; i < blocks; ++i) {
+ if (tsn_offset >= ntohs(frags[i].gab.start) &&
+ tsn_offset <= ntohs(frags[i].gab.end))
goto pass;
}
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
index f3508aa75815..807158e32f5f 100644
--- a/net/sctp/sctp_diag.c
+++ b/net/sctp/sctp_diag.c
@@ -106,7 +106,8 @@ static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
const struct inet_diag_req_v2 *req,
struct user_namespace *user_ns,
int portid, u32 seq, u16 nlmsg_flags,
- const struct nlmsghdr *unlh)
+ const struct nlmsghdr *unlh,
+ bool net_admin)
{
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct list_head *addr_list;
@@ -133,7 +134,7 @@ static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
r->idiag_retrans = 0;
}
- if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns))
+ if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
goto errout;
if (ext & (1 << (INET_DIAG_SKMEMINFO - 1))) {
@@ -203,6 +204,7 @@ struct sctp_comm_param {
struct netlink_callback *cb;
const struct inet_diag_req_v2 *r;
const struct nlmsghdr *nlh;
+ bool net_admin;
};
static size_t inet_assoc_attr_size(struct sctp_association *asoc)
@@ -219,6 +221,7 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
+ nla_total_size(1) /* INET_DIAG_SHUTDOWN */
+ nla_total_size(1) /* INET_DIAG_TOS */
+ nla_total_size(1) /* INET_DIAG_TCLASS */
+ + nla_total_size(4) /* INET_DIAG_MARK */
+ nla_total_size(addrlen * asoc->peer.transport_count)
+ nla_total_size(addrlen * addrcnt)
+ nla_total_size(sizeof(struct inet_diag_meminfo))
@@ -256,7 +259,8 @@ static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
err = inet_sctp_diag_fill(sk, assoc, rep, req,
sk_user_ns(NETLINK_CB(in_skb).sk),
NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0, nlh);
+ nlh->nlmsg_seq, 0, nlh,
+ commp->net_admin);
release_sock(sk);
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
@@ -310,7 +314,8 @@ static int sctp_tsp_dump(struct sctp_transport *tsp, void *p)
sk_user_ns(NETLINK_CB(cb->skb).sk),
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- NLM_F_MULTI, cb->nlh) < 0) {
+ NLM_F_MULTI, cb->nlh,
+ commp->net_admin) < 0) {
cb->args[3] = 1;
err = 2;
goto release;
@@ -320,7 +325,8 @@ static int sctp_tsp_dump(struct sctp_transport *tsp, void *p)
if (inet_sctp_diag_fill(sk, assoc, skb, r,
sk_user_ns(NETLINK_CB(cb->skb).sk),
NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, 0, cb->nlh) < 0) {
+ cb->nlh->nlmsg_seq, 0, cb->nlh,
+ commp->net_admin) < 0) {
err = 2;
goto release;
}
@@ -375,7 +381,7 @@ static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
sk_user_ns(NETLINK_CB(cb->skb).sk),
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- cb->nlh) < 0) {
+ cb->nlh, commp->net_admin) < 0) {
err = 2;
goto out;
}
@@ -412,6 +418,7 @@ static int sctp_diag_dump_one(struct sk_buff *in_skb,
.skb = in_skb,
.r = req,
.nlh = nlh,
+ .net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN),
};
if (req->sdiag_family == AF_INET) {
@@ -447,6 +454,7 @@ static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
.skb = skb,
.cb = cb,
.r = r,
+ .net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN),
};
/* eps hashtable dumps
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 8c77b87a8565..79dd66079dd7 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -253,7 +253,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
num_types = sp->pf->supported_addrs(sp, types);
chunksize = sizeof(init) + addrs_len;
- chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types));
+ chunksize += SCTP_PAD4(SCTP_SAT_LEN(num_types));
chunksize += sizeof(ecap_param);
if (asoc->prsctp_enable)
@@ -283,14 +283,14 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
/* Add HMACS parameter length if any were defined */
auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
if (auth_hmacs->length)
- chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
+ chunksize += SCTP_PAD4(ntohs(auth_hmacs->length));
else
auth_hmacs = NULL;
/* Add CHUNKS parameter length */
auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
if (auth_chunks->length)
- chunksize += WORD_ROUND(ntohs(auth_chunks->length));
+ chunksize += SCTP_PAD4(ntohs(auth_chunks->length));
else
auth_chunks = NULL;
@@ -300,8 +300,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
/* If we have any extensions to report, account for that */
if (num_ext)
- chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
- num_ext);
+ chunksize += SCTP_PAD4(sizeof(sctp_supported_ext_param_t) +
+ num_ext);
/* RFC 2960 3.3.2 Initiation (INIT) (1)
*
@@ -443,13 +443,13 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs;
if (auth_hmacs->length)
- chunksize += WORD_ROUND(ntohs(auth_hmacs->length));
+ chunksize += SCTP_PAD4(ntohs(auth_hmacs->length));
else
auth_hmacs = NULL;
auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks;
if (auth_chunks->length)
- chunksize += WORD_ROUND(ntohs(auth_chunks->length));
+ chunksize += SCTP_PAD4(ntohs(auth_chunks->length));
else
auth_chunks = NULL;
@@ -458,8 +458,8 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
}
if (num_ext)
- chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) +
- num_ext);
+ chunksize += SCTP_PAD4(sizeof(sctp_supported_ext_param_t) +
+ num_ext);
/* Now allocate and fill out the chunk. */
retval = sctp_make_control(asoc, SCTP_CID_INIT_ACK, 0, chunksize, gfp);
@@ -1390,7 +1390,7 @@ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
struct sock *sk;
/* No need to allocate LL here, as this is only a chunk. */
- skb = alloc_skb(WORD_ROUND(sizeof(sctp_chunkhdr_t) + paylen), gfp);
+ skb = alloc_skb(SCTP_PAD4(sizeof(sctp_chunkhdr_t) + paylen), gfp);
if (!skb)
goto nodata;
@@ -1482,7 +1482,7 @@ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data)
void *target;
void *padding;
int chunklen = ntohs(chunk->chunk_hdr->length);
- int padlen = WORD_ROUND(chunklen) - chunklen;
+ int padlen = SCTP_PAD4(chunklen) - chunklen;
padding = skb_put(chunk->skb, padlen);
target = skb_put(chunk->skb, len);
@@ -1900,7 +1900,7 @@ static int sctp_process_missing_param(const struct sctp_association *asoc,
struct __sctp_missing report;
__u16 len;
- len = WORD_ROUND(sizeof(report));
+ len = SCTP_PAD4(sizeof(report));
/* Make an ERROR chunk, preparing enough room for
* returning multiple unknown parameters.
@@ -2098,9 +2098,9 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
if (*errp) {
if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
- WORD_ROUND(ntohs(param.p->length))))
+ SCTP_PAD4(ntohs(param.p->length))))
sctp_addto_chunk_fixed(*errp,
- WORD_ROUND(ntohs(param.p->length)),
+ SCTP_PAD4(ntohs(param.p->length)),
param.v);
} else {
/* If there is no memory for generating the ERROR
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 12d45193357c..c345bf153bed 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1020,19 +1020,13 @@ static void sctp_cmd_t1_timer_update(struct sctp_association *asoc,
* This way the whole message is queued up and bundling if
* encouraged for small fragments.
*/
-static int sctp_cmd_send_msg(struct sctp_association *asoc,
- struct sctp_datamsg *msg, gfp_t gfp)
+static void sctp_cmd_send_msg(struct sctp_association *asoc,
+ struct sctp_datamsg *msg, gfp_t gfp)
{
struct sctp_chunk *chunk;
- int error = 0;
-
- list_for_each_entry(chunk, &msg->chunks, frag_list) {
- error = sctp_outq_tail(&asoc->outqueue, chunk, gfp);
- if (error)
- break;
- }
- return error;
+ list_for_each_entry(chunk, &msg->chunks, frag_list)
+ sctp_outq_tail(&asoc->outqueue, chunk, gfp);
}
@@ -1427,8 +1421,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
local_cork = 1;
}
/* Send a chunk to our peer. */
- error = sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk,
- gfp);
+ sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk, gfp);
break;
case SCTP_CMD_SEND_PKT:
@@ -1682,7 +1675,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
case SCTP_CMD_FORCE_PRIM_RETRAN:
t = asoc->peer.retran_path;
asoc->peer.retran_path = asoc->peer.primary_path;
- error = sctp_outq_uncork(&asoc->outqueue, gfp);
+ sctp_outq_uncork(&asoc->outqueue, gfp);
local_cork = 0;
asoc->peer.retran_path = t;
break;
@@ -1709,7 +1702,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
sctp_outq_cork(&asoc->outqueue);
local_cork = 1;
}
- error = sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
+ sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
break;
case SCTP_CMD_SEND_NEXT_ASCONF:
sctp_cmd_send_asconf(asoc);
@@ -1739,9 +1732,9 @@ out:
*/
if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) {
if (chunk->end_of_packet || chunk->singleton)
- error = sctp_outq_uncork(&asoc->outqueue, gfp);
+ sctp_outq_uncork(&asoc->outqueue, gfp);
} else if (local_cork)
- error = sctp_outq_uncork(&asoc->outqueue, gfp);
+ sctp_outq_uncork(&asoc->outqueue, gfp);
if (sp->data_ready_signalled)
sp->data_ready_signalled = 0;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index d88bb2b0b699..026e3bca4a94 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3454,7 +3454,7 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
}
/* Report violation if chunk len overflows */
- ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
+ ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
if (ch_end > skb_tail_pointer(skb))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
@@ -4185,7 +4185,7 @@ sctp_disposition_t sctp_sf_unk_chunk(struct net *net,
hdr = unk_chunk->chunk_hdr;
err_chunk = sctp_make_op_error(asoc, unk_chunk,
SCTP_ERROR_UNKNOWN_CHUNK, hdr,
- WORD_ROUND(ntohs(hdr->length)),
+ SCTP_PAD4(ntohs(hdr->length)),
0);
if (err_chunk) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
@@ -4203,7 +4203,7 @@ sctp_disposition_t sctp_sf_unk_chunk(struct net *net,
hdr = unk_chunk->chunk_hdr;
err_chunk = sctp_make_op_error(asoc, unk_chunk,
SCTP_ERROR_UNKNOWN_CHUNK, hdr,
- WORD_ROUND(ntohs(hdr->length)),
+ SCTP_PAD4(ntohs(hdr->length)),
0);
if (err_chunk) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9fc417a8b476..6cdc61c21438 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1958,6 +1958,8 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
/* Now send the (possibly) fragmented message. */
list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
+ sctp_chunk_hold(chunk);
+
/* Do accounting for the write space. */
sctp_set_owner_w(chunk);
@@ -1970,13 +1972,15 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
* breaks.
*/
err = sctp_primitive_SEND(net, asoc, datamsg);
- sctp_datamsg_put(datamsg);
/* Did the lower layer accept the chunk? */
- if (err)
+ if (err) {
+ sctp_datamsg_free(datamsg);
goto out_free;
+ }
pr_debug("%s: we sent primitively\n", __func__);
+ sctp_datamsg_put(datamsg);
err = msg_len;
if (unlikely(wait_connect)) {
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 81b86678be4d..ce54dce13ddb 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -233,7 +233,7 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
}
if (transport->dst) {
- transport->pathmtu = WORD_TRUNC(dst_mtu(transport->dst));
+ transport->pathmtu = SCTP_TRUNC4(dst_mtu(transport->dst));
} else
transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
}
@@ -287,7 +287,7 @@ void sctp_transport_route(struct sctp_transport *transport,
return;
}
if (transport->dst) {
- transport->pathmtu = WORD_TRUNC(dst_mtu(transport->dst));
+ transport->pathmtu = SCTP_TRUNC4(dst_mtu(transport->dst));
/* Initialize sk->sk_rcv_saddr, if the transport is the
* association's active path for getsockname().
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index d85b803da11d..bea00058ce35 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -383,7 +383,7 @@ sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
ch = (sctp_errhdr_t *)(chunk->skb->data);
cause = ch->cause;
- elen = WORD_ROUND(ntohs(ch->length)) - sizeof(sctp_errhdr_t);
+ elen = SCTP_PAD4(ntohs(ch->length)) - sizeof(sctp_errhdr_t);
/* Pull off the ERROR header. */
skb_pull(chunk->skb, sizeof(sctp_errhdr_t));
@@ -688,7 +688,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
* MUST ignore the padding bytes.
*/
len = ntohs(chunk->chunk_hdr->length);
- padding = WORD_ROUND(len) - len;
+ padding = SCTP_PAD4(len) - len;
/* Fixup cloned skb with just this chunks data. */
skb_trim(skb, chunk->chunk_end - padding - skb->data);
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 877e55066f89..84d0fdaf7de9 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -140,11 +140,8 @@ int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
* we can go ahead and clear out the lobby in one shot
*/
if (!skb_queue_empty(&sp->pd_lobby)) {
- struct list_head *list;
skb_queue_splice_tail_init(&sp->pd_lobby,
&sk->sk_receive_queue);
- list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
- INIT_LIST_HEAD(list);
return 1;
}
} else {
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 1d281816f2bf..d8582028b346 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -569,9 +569,10 @@ gss_svc_searchbyctx(struct cache_detail *cd, struct xdr_netobj *handle)
struct rsc *found;
memset(&rsci, 0, sizeof(rsci));
- rsci.handle.data = handle->data;
- rsci.handle.len = handle->len;
+ if (dup_to_netobj(&rsci.handle, handle->data, handle->len))
+ return NULL;
found = rsc_lookup(cd, &rsci);
+ rsc_free(&rsci);
if (!found)
return NULL;
if (cache_check(cd, &found->h, NULL))
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 7f79fb7dc6a0..66f23b376fa0 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -453,7 +453,7 @@ static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
struct rpc_xprt_switch *xps;
if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) {
- WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
+ WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC));
xps = args->bc_xprt->xpt_bc_xps;
xprt_switch_get(xps);
} else {
@@ -520,7 +520,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
char servername[48];
if (args->bc_xprt) {
- WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
+ WARN_ON_ONCE(!(args->protocol & XPRT_TRANSPORT_BC));
xprt = args->bc_xprt->xpt_bc_xprt;
if (xprt) {
xprt_get(xprt);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 536d0be3f61b..799cce6cbe45 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -51,6 +51,7 @@
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <linux/sunrpc/addr.h>
+#include <linux/sunrpc/svc_rdma.h>
#include <asm/bitops.h>
#include <linux/module.h> /* try_module_get()/module_put() */
@@ -923,7 +924,7 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
}
INIT_LIST_HEAD(&buf->rb_recv_bufs);
- for (i = 0; i < buf->rb_max_requests; i++) {
+ for (i = 0; i < buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS; i++) {
struct rpcrdma_rep *rep;
rep = rpcrdma_create_rep(r_xprt);
@@ -1018,6 +1019,7 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
rep = rpcrdma_buffer_get_rep_locked(buf);
rpcrdma_destroy_rep(ia, rep);
}
+ buf->rb_send_count = 0;
spin_lock(&buf->rb_reqslock);
while (!list_empty(&buf->rb_allreqs)) {
@@ -1032,6 +1034,7 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
spin_lock(&buf->rb_reqslock);
}
spin_unlock(&buf->rb_reqslock);
+ buf->rb_recv_count = 0;
rpcrdma_destroy_mrs(buf);
}
@@ -1074,8 +1077,27 @@ rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
spin_unlock(&buf->rb_mwlock);
}
+static struct rpcrdma_rep *
+rpcrdma_buffer_get_rep(struct rpcrdma_buffer *buffers)
+{
+ /* If an RPC previously completed without a reply (say, a
+ * credential problem or a soft timeout occurs) then hold off
+ * on supplying more Receive buffers until the number of new
+ * pending RPCs catches up to the number of posted Receives.
+ */
+ if (unlikely(buffers->rb_send_count < buffers->rb_recv_count))
+ return NULL;
+
+ if (unlikely(list_empty(&buffers->rb_recv_bufs)))
+ return NULL;
+ buffers->rb_recv_count++;
+ return rpcrdma_buffer_get_rep_locked(buffers);
+}
+
/*
* Get a set of request/reply buffers.
+ *
+ * Reply buffer (if available) is attached to send buffer upon return.
*/
struct rpcrdma_req *
rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
@@ -1085,21 +1107,15 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
spin_lock(&buffers->rb_lock);
if (list_empty(&buffers->rb_send_bufs))
goto out_reqbuf;
+ buffers->rb_send_count++;
req = rpcrdma_buffer_get_req_locked(buffers);
- if (list_empty(&buffers->rb_recv_bufs))
- goto out_repbuf;
- req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers);
+ req->rl_reply = rpcrdma_buffer_get_rep(buffers);
spin_unlock(&buffers->rb_lock);
return req;
out_reqbuf:
spin_unlock(&buffers->rb_lock);
- pr_warn("rpcrdma: out of request buffers (%p)\n", buffers);
- return NULL;
-out_repbuf:
- list_add(&req->rl_free, &buffers->rb_send_bufs);
- spin_unlock(&buffers->rb_lock);
- pr_warn("rpcrdma: out of reply buffers (%p)\n", buffers);
+ pr_warn("RPC: %s: out of request buffers\n", __func__);
return NULL;
}
@@ -1117,9 +1133,12 @@ rpcrdma_buffer_put(struct rpcrdma_req *req)
req->rl_reply = NULL;
spin_lock(&buffers->rb_lock);
+ buffers->rb_send_count--;
list_add_tail(&req->rl_free, &buffers->rb_send_bufs);
- if (rep)
+ if (rep) {
+ buffers->rb_recv_count--;
list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
+ }
spin_unlock(&buffers->rb_lock);
}
@@ -1133,8 +1152,7 @@ rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
struct rpcrdma_buffer *buffers = req->rl_buffer;
spin_lock(&buffers->rb_lock);
- if (!list_empty(&buffers->rb_recv_bufs))
- req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers);
+ req->rl_reply = rpcrdma_buffer_get_rep(buffers);
spin_unlock(&buffers->rb_lock);
}
@@ -1148,6 +1166,7 @@ rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
spin_lock(&buffers->rb_lock);
+ buffers->rb_recv_count--;
list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
spin_unlock(&buffers->rb_lock);
}
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 670fad57153a..a71b0f5897d8 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -321,6 +321,7 @@ struct rpcrdma_buffer {
char *rb_pool;
spinlock_t rb_lock; /* protect buf lists */
+ int rb_send_count, rb_recv_count;
struct list_head rb_send_bufs;
struct list_head rb_recv_bufs;
u32 rb_max_requests;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 8ede3bc52481..bf168838a029 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1074,7 +1074,7 @@ static void xs_udp_data_receive(struct sock_xprt *transport)
skb = skb_recv_datagram(sk, 0, 1, &err);
if (skb != NULL) {
xs_udp_data_read_skb(&transport->xprt, sk, skb);
- skb_free_datagram(sk, skb);
+ skb_free_datagram_locked(sk, skb);
continue;
}
if (!test_and_clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 6b626a64b517..a04fe9be1c60 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -62,6 +62,8 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
/**
* named_prepare_buf - allocate & initialize a publication message
+ *
+ * The buffer returned is of size INT_H_SIZE + payload size
*/
static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
u32 dest)
@@ -141,9 +143,9 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
struct publication *publ;
struct sk_buff *skb = NULL;
struct distr_item *item = NULL;
- uint msg_dsz = (tipc_node_get_mtu(net, dnode, 0) / ITEM_SIZE) *
- ITEM_SIZE;
- uint msg_rem = msg_dsz;
+ u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0) - INT_H_SIZE) /
+ ITEM_SIZE) * ITEM_SIZE;
+ u32 msg_rem = msg_dsz;
list_for_each_entry(publ, pls, local_list) {
/* Prepare next buffer: */
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index dd274687a53d..d80cd3f7503f 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -665,7 +665,8 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
if (!opts[TIPC_NLA_UDP_LOCAL] || !opts[TIPC_NLA_UDP_REMOTE]) {
pr_err("Invalid UDP bearer configuration");
- return -EINVAL;
+ err = -EINVAL;
+ goto err;
}
err = tipc_parse_udp_addr(opts[TIPC_NLA_UDP_LOCAL], &local,
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index f1dffe84f0d5..8309687a56b0 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -661,11 +661,11 @@ static int unix_set_peek_off(struct sock *sk, int val)
{
struct unix_sock *u = unix_sk(sk);
- if (mutex_lock_interruptible(&u->readlock))
+ if (mutex_lock_interruptible(&u->iolock))
return -EINTR;
sk->sk_peek_off = val;
- mutex_unlock(&u->readlock);
+ mutex_unlock(&u->iolock);
return 0;
}
@@ -779,7 +779,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
spin_lock_init(&u->lock);
atomic_long_set(&u->inflight, 0);
INIT_LIST_HEAD(&u->link);
- mutex_init(&u->readlock); /* single task reading lock */
+ mutex_init(&u->iolock); /* single task reading lock */
+ mutex_init(&u->bindlock); /* single task binding lock */
init_waitqueue_head(&u->peer_wait);
init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
unix_insert_socket(unix_sockets_unbound(sk), sk);
@@ -848,7 +849,7 @@ static int unix_autobind(struct socket *sock)
int err;
unsigned int retries = 0;
- err = mutex_lock_interruptible(&u->readlock);
+ err = mutex_lock_interruptible(&u->bindlock);
if (err)
return err;
@@ -895,7 +896,7 @@ retry:
spin_unlock(&unix_table_lock);
err = 0;
-out: mutex_unlock(&u->readlock);
+out: mutex_unlock(&u->bindlock);
return err;
}
@@ -954,20 +955,32 @@ fail:
return NULL;
}
-static int unix_mknod(struct dentry *dentry, const struct path *path, umode_t mode,
- struct path *res)
+static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
{
- int err;
+ struct dentry *dentry;
+ struct path path;
+ int err = 0;
+ /*
+ * Get the parent directory, calculate the hash for last
+ * component.
+ */
+ dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
+ err = PTR_ERR(dentry);
+ if (IS_ERR(dentry))
+ return err;
- err = security_path_mknod(path, dentry, mode, 0);
+ /*
+ * All right, let's create it.
+ */
+ err = security_path_mknod(&path, dentry, mode, 0);
if (!err) {
- err = vfs_mknod(d_inode(path->dentry), dentry, mode, 0);
+ err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
if (!err) {
- res->mnt = mntget(path->mnt);
+ res->mnt = mntget(path.mnt);
res->dentry = dget(dentry);
}
}
-
+ done_path_create(&path, dentry);
return err;
}
@@ -978,12 +991,10 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct unix_sock *u = unix_sk(sk);
struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
char *sun_path = sunaddr->sun_path;
- int err, name_err;
+ int err;
unsigned int hash;
struct unix_address *addr;
struct hlist_head *list;
- struct path path;
- struct dentry *dentry;
err = -EINVAL;
if (sunaddr->sun_family != AF_UNIX)
@@ -999,34 +1010,14 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
addr_len = err;
- name_err = 0;
- dentry = NULL;
- if (sun_path[0]) {
- /* Get the parent directory, calculate the hash for last
- * component.
- */
- dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
-
- if (IS_ERR(dentry)) {
- /* delay report until after 'already bound' check */
- name_err = PTR_ERR(dentry);
- dentry = NULL;
- }
- }
-
- err = mutex_lock_interruptible(&u->readlock);
+ err = mutex_lock_interruptible(&u->bindlock);
if (err)
- goto out_path;
+ goto out;
err = -EINVAL;
if (u->addr)
goto out_up;
- if (name_err) {
- err = name_err == -EEXIST ? -EADDRINUSE : name_err;
- goto out_up;
- }
-
err = -ENOMEM;
addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
if (!addr)
@@ -1037,11 +1028,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
addr->hash = hash ^ sk->sk_type;
atomic_set(&addr->refcnt, 1);
- if (dentry) {
- struct path u_path;
+ if (sun_path[0]) {
+ struct path path;
umode_t mode = S_IFSOCK |
(SOCK_INODE(sock)->i_mode & ~current_umask());
- err = unix_mknod(dentry, &path, mode, &u_path);
+ err = unix_mknod(sun_path, mode, &path);
if (err) {
if (err == -EEXIST)
err = -EADDRINUSE;
@@ -1049,9 +1040,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out_up;
}
addr->hash = UNIX_HASH_SIZE;
- hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
+ hash = d_real_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
spin_lock(&unix_table_lock);
- u->path = u_path;
+ u->path = path;
list = &unix_socket_table[hash];
} else {
spin_lock(&unix_table_lock);
@@ -1073,11 +1064,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
out_unlock:
spin_unlock(&unix_table_lock);
out_up:
- mutex_unlock(&u->readlock);
-out_path:
- if (dentry)
- done_path_create(&path, dentry);
-
+ mutex_unlock(&u->bindlock);
out:
return err;
}
@@ -1969,17 +1956,17 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
if (false) {
alloc_skb:
unix_state_unlock(other);
- mutex_unlock(&unix_sk(other)->readlock);
+ mutex_unlock(&unix_sk(other)->iolock);
newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
&err, 0);
if (!newskb)
goto err;
}
- /* we must acquire readlock as we modify already present
+ /* we must acquire iolock as we modify already present
* skbs in the sk_receive_queue and mess with skb->len
*/
- err = mutex_lock_interruptible(&unix_sk(other)->readlock);
+ err = mutex_lock_interruptible(&unix_sk(other)->iolock);
if (err) {
err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
goto err;
@@ -2046,7 +2033,7 @@ alloc_skb:
}
unix_state_unlock(other);
- mutex_unlock(&unix_sk(other)->readlock);
+ mutex_unlock(&unix_sk(other)->iolock);
other->sk_data_ready(other);
scm_destroy(&scm);
@@ -2055,7 +2042,7 @@ alloc_skb:
err_state_unlock:
unix_state_unlock(other);
err_unlock:
- mutex_unlock(&unix_sk(other)->readlock);
+ mutex_unlock(&unix_sk(other)->iolock);
err:
kfree_skb(newskb);
if (send_sigpipe && !(flags & MSG_NOSIGNAL))
@@ -2123,7 +2110,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
do {
- mutex_lock(&u->readlock);
+ mutex_lock(&u->iolock);
skip = sk_peek_offset(sk, flags);
skb = __skb_try_recv_datagram(sk, flags, &peeked, &skip, &err,
@@ -2131,14 +2118,14 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
if (skb)
break;
- mutex_unlock(&u->readlock);
+ mutex_unlock(&u->iolock);
if (err != -EAGAIN)
break;
} while (timeo &&
!__skb_wait_for_more_packets(sk, &err, &timeo, last));
- if (!skb) { /* implies readlock unlocked */
+ if (!skb) { /* implies iolock unlocked */
unix_state_lock(sk);
/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
@@ -2203,7 +2190,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
out_free:
skb_free_datagram(sk, skb);
- mutex_unlock(&u->readlock);
+ mutex_unlock(&u->iolock);
out:
return err;
}
@@ -2298,7 +2285,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
/* Lock the socket to prevent queue disordering
* while sleeps in memcpy_tomsg
*/
- mutex_lock(&u->readlock);
+ mutex_lock(&u->iolock);
if (flags & MSG_PEEK)
skip = sk_peek_offset(sk, flags);
@@ -2340,7 +2327,7 @@ again:
break;
}
- mutex_unlock(&u->readlock);
+ mutex_unlock(&u->iolock);
timeo = unix_stream_data_wait(sk, timeo, last,
last_len);
@@ -2351,7 +2338,7 @@ again:
goto out;
}
- mutex_lock(&u->readlock);
+ mutex_lock(&u->iolock);
goto redo;
unlock:
unix_state_unlock(sk);
@@ -2454,7 +2441,7 @@ unlock:
}
} while (size);
- mutex_unlock(&u->readlock);
+ mutex_unlock(&u->iolock);
if (state->msg)
scm_recv(sock, state->msg, &scm, flags);
else
@@ -2495,9 +2482,9 @@ static ssize_t skb_unix_socket_splice(struct sock *sk,
int ret;
struct unix_sock *u = unix_sk(sk);
- mutex_unlock(&u->readlock);
+ mutex_unlock(&u->iolock);
ret = splice_to_pipe(pipe, spd);
- mutex_lock(&u->readlock);
+ mutex_lock(&u->iolock);
return ret;
}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 2029b49a1df3..4911cd997b9a 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1252,7 +1252,7 @@ static int __init cfg80211_init(void)
if (err)
goto out_fail_reg;
- cfg80211_wq = create_singlethread_workqueue("cfg80211");
+ cfg80211_wq = alloc_ordered_workqueue("cfg80211", WQ_MEM_RECLAIM);
if (!cfg80211_wq) {
err = -ENOMEM;
goto out_fail_wq;
diff --git a/net/wireless/core.h b/net/wireless/core.h
index eee91443924d..5555e3c13ae9 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -249,9 +249,9 @@ struct cfg80211_event {
};
struct cfg80211_cached_keys {
- struct key_params params[6];
- u8 data[6][WLAN_MAX_KEY_LEN];
- int def, defmgmt;
+ struct key_params params[4];
+ u8 data[4][WLAN_KEY_LEN_WEP104];
+ int def;
};
enum cfg80211_chan_mode {
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 4a4dda53bdf1..eafdfa5798ae 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -114,6 +114,9 @@ static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
}
}
+ if (WARN_ON(connkeys && connkeys->def < 0))
+ return -EINVAL;
+
if (WARN_ON(wdev->connect_keys))
kzfree(wdev->connect_keys);
wdev->connect_keys = connkeys;
@@ -284,18 +287,16 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
if (!netif_running(wdev->netdev))
return 0;
- if (wdev->wext.keys) {
+ if (wdev->wext.keys)
wdev->wext.keys->def = wdev->wext.default_key;
- wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key;
- }
wdev->wext.ibss.privacy = wdev->wext.default_key != -1;
- if (wdev->wext.keys) {
+ if (wdev->wext.keys && wdev->wext.keys->def != -1) {
ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL);
if (!ck)
return -ENOMEM;
- for (i = 0; i < 6; i++)
+ for (i = 0; i < 4; i++)
ck->params[i].key = ck->data[i];
}
err = __cfg80211_join_ibss(rdev, wdev->netdev,
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index c284d883c349..d6abb0704db5 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -222,7 +222,7 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
ASSERT_WDEV_LOCK(wdev);
if (auth_type == NL80211_AUTHTYPE_SHARED_KEY)
- if (!key || !key_len || key_idx < 0 || key_idx > 4)
+ if (!key || !key_len || key_idx < 0 || key_idx > 3)
return -EINVAL;
if (wdev->current_bss &&
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 499785778983..fd111e2b559d 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -848,13 +848,21 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
struct nlattr *key;
struct cfg80211_cached_keys *result;
int rem, err, def = 0;
+ bool have_key = false;
+
+ nla_for_each_nested(key, keys, rem) {
+ have_key = true;
+ break;
+ }
+
+ if (!have_key)
+ return NULL;
result = kzalloc(sizeof(*result), GFP_KERNEL);
if (!result)
return ERR_PTR(-ENOMEM);
result->def = -1;
- result->defmgmt = -1;
nla_for_each_nested(key, keys, rem) {
memset(&parse, 0, sizeof(parse));
@@ -866,7 +874,7 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
err = -EINVAL;
if (!parse.p.key)
goto error;
- if (parse.idx < 0 || parse.idx > 4)
+ if (parse.idx < 0 || parse.idx > 3)
goto error;
if (parse.def) {
if (def)
@@ -881,16 +889,24 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
parse.idx, false, NULL);
if (err)
goto error;
+ if (parse.p.cipher != WLAN_CIPHER_SUITE_WEP40 &&
+ parse.p.cipher != WLAN_CIPHER_SUITE_WEP104) {
+ err = -EINVAL;
+ goto error;
+ }
result->params[parse.idx].cipher = parse.p.cipher;
result->params[parse.idx].key_len = parse.p.key_len;
result->params[parse.idx].key = result->data[parse.idx];
memcpy(result->data[parse.idx], parse.p.key, parse.p.key_len);
- if (parse.p.cipher == WLAN_CIPHER_SUITE_WEP40 ||
- parse.p.cipher == WLAN_CIPHER_SUITE_WEP104) {
- if (no_ht)
- *no_ht = true;
- }
+ /* must be WEP key if we got here */
+ if (no_ht)
+ *no_ht = true;
+ }
+
+ if (result->def < 0) {
+ err = -EINVAL;
+ goto error;
}
return result;
@@ -2525,10 +2541,35 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
int if_idx = 0;
int wp_start = cb->args[0];
int if_start = cb->args[1];
+ int filter_wiphy = -1;
struct cfg80211_registered_device *rdev;
struct wireless_dev *wdev;
rtnl_lock();
+ if (!cb->args[2]) {
+ struct nl80211_dump_wiphy_state state = {
+ .filter_wiphy = -1,
+ };
+ int ret;
+
+ ret = nl80211_dump_wiphy_parse(skb, cb, &state);
+ if (ret)
+ return ret;
+
+ filter_wiphy = state.filter_wiphy;
+
+ /*
+ * if filtering, set cb->args[2] to +1 since 0 is the default
+ * value needed to determine that parsing is necessary.
+ */
+ if (filter_wiphy >= 0)
+ cb->args[2] = filter_wiphy + 1;
+ else
+ cb->args[2] = -1;
+ } else if (cb->args[2] > 0) {
+ filter_wiphy = cb->args[2] - 1;
+ }
+
list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk)))
continue;
@@ -2536,6 +2577,10 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
wp_idx++;
continue;
}
+
+ if (filter_wiphy >= 0 && filter_wiphy != rdev->wiphy_idx)
+ continue;
+
if_idx = 0;
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
@@ -6969,7 +7014,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
params.n_counter_offsets_presp = len / sizeof(u16);
if (rdev->wiphy.max_num_csa_counters &&
- (params.n_counter_offsets_beacon >
+ (params.n_counter_offsets_presp >
rdev->wiphy.max_num_csa_counters))
return -EINVAL;
@@ -7359,7 +7404,7 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
(key.p.cipher != WLAN_CIPHER_SUITE_WEP104 ||
key.p.key_len != WLAN_KEY_LEN_WEP104))
return -EINVAL;
- if (key.idx > 4)
+ if (key.idx > 3)
return -EINVAL;
} else {
key.p.key_len = 0;
@@ -7977,6 +8022,8 @@ __cfg80211_alloc_vendor_skb(struct cfg80211_registered_device *rdev,
}
data = nla_nest_start(skb, attr);
+ if (!data)
+ goto nla_put_failure;
((void **)skb->cb)[0] = rdev;
((void **)skb->cb)[1] = hdr;
@@ -9406,18 +9453,27 @@ static int nl80211_send_wowlan_nd(struct sk_buff *msg,
if (!freqs)
return -ENOBUFS;
- for (i = 0; i < req->n_channels; i++)
- nla_put_u32(msg, i, req->channels[i]->center_freq);
+ for (i = 0; i < req->n_channels; i++) {
+ if (nla_put_u32(msg, i, req->channels[i]->center_freq))
+ return -ENOBUFS;
+ }
nla_nest_end(msg, freqs);
if (req->n_match_sets) {
matches = nla_nest_start(msg, NL80211_ATTR_SCHED_SCAN_MATCH);
+ if (!matches)
+ return -ENOBUFS;
+
for (i = 0; i < req->n_match_sets; i++) {
match = nla_nest_start(msg, i);
- nla_put(msg, NL80211_SCHED_SCAN_MATCH_ATTR_SSID,
- req->match_sets[i].ssid.ssid_len,
- req->match_sets[i].ssid.ssid);
+ if (!match)
+ return -ENOBUFS;
+
+ if (nla_put(msg, NL80211_SCHED_SCAN_MATCH_ATTR_SSID,
+ req->match_sets[i].ssid.ssid_len,
+ req->match_sets[i].ssid.ssid))
+ return -ENOBUFS;
nla_nest_end(msg, match);
}
nla_nest_end(msg, matches);
@@ -9429,6 +9485,9 @@ static int nl80211_send_wowlan_nd(struct sk_buff *msg,
for (i = 0; i < req->n_scan_plans; i++) {
scan_plan = nla_nest_start(msg, i + 1);
+ if (!scan_plan)
+ return -ENOBUFS;
+
if (!scan_plan ||
nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_INTERVAL,
req->scan_plans[i].interval) ||
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 0358e12be54b..b5bd58d0f731 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -352,52 +352,48 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *rdev)
__cfg80211_bss_expire(rdev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE);
}
-const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
+const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len,
+ const u8 *match, int match_len,
+ int match_offset)
{
- while (len > 2 && ies[0] != eid) {
+ /* match_offset can't be smaller than 2, unless match_len is
+ * zero, in which case match_offset must be zero as well.
+ */
+ if (WARN_ON((match_len && match_offset < 2) ||
+ (!match_len && match_offset)))
+ return NULL;
+
+ while (len >= 2 && len >= ies[1] + 2) {
+ if ((ies[0] == eid) &&
+ (ies[1] + 2 >= match_offset + match_len) &&
+ !memcmp(ies + match_offset, match, match_len))
+ return ies;
+
len -= ies[1] + 2;
ies += ies[1] + 2;
}
- if (len < 2)
- return NULL;
- if (len < 2 + ies[1])
- return NULL;
- return ies;
+
+ return NULL;
}
-EXPORT_SYMBOL(cfg80211_find_ie);
+EXPORT_SYMBOL(cfg80211_find_ie_match);
const u8 *cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
const u8 *ies, int len)
{
- struct ieee80211_vendor_ie *ie;
- const u8 *pos = ies, *end = ies + len;
- int ie_oui;
+ const u8 *ie;
+ u8 match[] = { oui >> 16, oui >> 8, oui, oui_type };
+ int match_len = (oui_type < 0) ? 3 : sizeof(match);
if (WARN_ON(oui_type > 0xff))
return NULL;
- while (pos < end) {
- pos = cfg80211_find_ie(WLAN_EID_VENDOR_SPECIFIC, pos,
- end - pos);
- if (!pos)
- return NULL;
-
- ie = (struct ieee80211_vendor_ie *)pos;
-
- /* make sure we can access ie->len */
- BUILD_BUG_ON(offsetof(struct ieee80211_vendor_ie, len) != 1);
+ ie = cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC, ies, len,
+ match, match_len, 2);
- if (ie->len < sizeof(*ie))
- goto cont;
+ if (ie && (ie[1] < 4))
+ return NULL;
- ie_oui = ie->oui[0] << 16 | ie->oui[1] << 8 | ie->oui[2];
- if (ie_oui == oui &&
- (oui_type < 0 || ie->oui_type == oui_type))
- return pos;
-cont:
- pos += 2 + ie->len;
- }
- return NULL;
+ return ie;
}
EXPORT_SYMBOL(cfg80211_find_vendor_ie);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index add6824c44fd..c08a3b57dca1 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -1043,6 +1043,9 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
connect->crypto.ciphers_pairwise[0] = cipher;
}
}
+ } else {
+ if (WARN_ON(connkeys))
+ return -EINVAL;
}
wdev->connect_keys = connkeys;
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index e46469bc130f..0082f4b01795 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -57,7 +57,7 @@ static ssize_t addresses_show(struct device *dev,
return sprintf(buf, "%pM\n", wiphy->perm_addr);
for (i = 0; i < wiphy->n_addresses; i++)
- buf += sprintf(buf, "%pM\n", &wiphy->addresses[i].addr);
+ buf += sprintf(buf, "%pM\n", wiphy->addresses[i].addr);
return buf - start;
}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 0675f513e7b9..9e6e2aaa7766 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -218,7 +218,7 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
struct key_params *params, int key_idx,
bool pairwise, const u8 *mac_addr)
{
- if (key_idx > 5)
+ if (key_idx < 0 || key_idx > 5)
return -EINVAL;
if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
@@ -249,7 +249,13 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
/* Disallow BIP (group-only) cipher as pairwise cipher */
if (pairwise)
return -EINVAL;
+ if (key_idx < 4)
+ return -EINVAL;
break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ if (key_idx > 3)
+ return -EINVAL;
default:
break;
}
@@ -906,7 +912,7 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
if (!wdev->connect_keys)
return;
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < 4; i++) {
if (!wdev->connect_keys->params[i].cipher)
continue;
if (rdev_add_key(rdev, dev, i, false, NULL,
@@ -919,9 +925,6 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
netdev_err(dev, "failed to set defkey %d\n", i);
continue;
}
- if (wdev->connect_keys->defmgmt == i)
- if (rdev_set_default_mgmt_key(rdev, dev, i))
- netdev_err(dev, "failed to set mgtdef %d\n", i);
}
kzfree(wdev->connect_keys);
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 9f27221c8913..7b97d43b27e1 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -408,10 +408,10 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
if (!wdev->wext.keys) {
wdev->wext.keys = kzalloc(sizeof(*wdev->wext.keys),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!wdev->wext.keys)
return -ENOMEM;
- for (i = 0; i < 6; i++)
+ for (i = 0; i < 4; i++)
wdev->wext.keys->params[i].key =
wdev->wext.keys->data[i];
}
@@ -460,7 +460,7 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
if (err == -ENOENT)
err = 0;
if (!err) {
- if (!addr) {
+ if (!addr && idx < 4) {
memset(wdev->wext.keys->data[idx], 0,
sizeof(wdev->wext.keys->data[idx]));
wdev->wext.keys->params[idx].key_len = 0;
@@ -487,6 +487,9 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
err = 0;
if (wdev->current_bss)
err = rdev_add_key(rdev, dev, idx, pairwise, addr, params);
+ else if (params->cipher != WLAN_CIPHER_SUITE_WEP40 &&
+ params->cipher != WLAN_CIPHER_SUITE_WEP104)
+ return -EINVAL;
if (err)
return err;
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index dbb2738e356a..6250b1cfcde5 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -958,29 +958,8 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
return private(dev, iwr, cmd, info, handler);
}
/* Old driver API : call driver ioctl handler */
- if (dev->netdev_ops->ndo_do_ioctl) {
-#ifdef CONFIG_COMPAT
- if (info->flags & IW_REQUEST_FLAG_COMPAT) {
- int ret = 0;
- struct iwreq iwr_lcl;
- struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
-
- memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
- iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
- iwr_lcl.u.data.length = iwp_compat->length;
- iwr_lcl.u.data.flags = iwp_compat->flags;
-
- ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
-
- iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
- iwp_compat->length = iwr_lcl.u.data.length;
- iwp_compat->flags = iwr_lcl.u.data.flags;
-
- return ret;
- } else
-#endif
- return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
- }
+ if (dev->netdev_ops->ndo_do_ioctl)
+ return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
return -EOPNOTSUPP;
}
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index a4e8af3321d2..88f1f6931ab8 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -35,7 +35,6 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
if (wdev->wext.keys) {
wdev->wext.keys->def = wdev->wext.default_key;
- wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key;
if (wdev->wext.default_key != -1)
wdev->wext.connect.privacy = true;
}
@@ -43,11 +42,11 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
if (!wdev->wext.connect.ssid_len)
return 0;
- if (wdev->wext.keys) {
+ if (wdev->wext.keys && wdev->wext.keys->def != -1) {
ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL);
if (!ck)
return -ENOMEM;
- for (i = 0; i < 6; i++)
+ for (i = 0; i < 4; i++)
ck->params[i].key = ck->data[i];
}
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index a750f330b8dd..f83b74d3e2ac 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1500,12 +1500,8 @@ out_fac_release:
goto out_dtefac_release;
if (dtefacs.calling_len > X25_MAX_AE_LEN)
goto out_dtefac_release;
- if (dtefacs.calling_ae == NULL)
- goto out_dtefac_release;
if (dtefacs.called_len > X25_MAX_AE_LEN)
goto out_dtefac_release;
- if (dtefacs.called_ae == NULL)
- goto out_dtefac_release;
x25->dte_facilities = dtefacs;
rc = 0;
out_dtefac_release:
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 250e567ba3d6..44ac85fe2bc9 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -17,7 +17,7 @@
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <net/xfrm.h>
-#if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
+#if IS_ENABLED(CONFIG_INET_ESP) || IS_ENABLED(CONFIG_INET6_ESP)
#include <net/esp.h>
#endif
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 1c4ad477ce93..6e3f0254d8a1 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -207,15 +207,15 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
family = XFRM_SPI_SKB_CB(skb)->family;
/* if tunnel is present override skb->mark value with tunnel i_key */
- if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) {
- switch (family) {
- case AF_INET:
+ switch (family) {
+ case AF_INET:
+ if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
- break;
- case AF_INET6:
+ break;
+ case AF_INET6:
+ if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
- break;
- }
+ break;
}
/* Allocate new secpath or COW existing one. */
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b5e665b3cfb0..fd6986634e6f 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -49,6 +49,7 @@ static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
__read_mostly;
static struct kmem_cache *xfrm_dst_cache __read_mostly;
+static __read_mostly seqcount_t xfrm_policy_hash_generation;
static void xfrm_init_pmtu(struct dst_entry *dst);
static int stale_bundle(struct dst_entry *dst);
@@ -59,6 +60,11 @@ static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
int dir);
+static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
+{
+ return atomic_inc_not_zero(&policy->refcnt);
+}
+
static inline bool
__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
{
@@ -385,9 +391,11 @@ static struct hlist_head *policy_hash_bysel(struct net *net,
__get_hash_thresh(net, family, dir, &dbits, &sbits);
hash = __sel_hash(sel, family, hmask, dbits, sbits);
- return (hash == hmask + 1 ?
- &net->xfrm.policy_inexact[dir] :
- net->xfrm.policy_bydst[dir].table + hash);
+ if (hash == hmask + 1)
+ return &net->xfrm.policy_inexact[dir];
+
+ return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
+ lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
}
static struct hlist_head *policy_hash_direct(struct net *net,
@@ -403,7 +411,8 @@ static struct hlist_head *policy_hash_direct(struct net *net,
__get_hash_thresh(net, family, dir, &dbits, &sbits);
hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
- return net->xfrm.policy_bydst[dir].table + hash;
+ return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
+ lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
}
static void xfrm_dst_hash_transfer(struct net *net,
@@ -426,14 +435,14 @@ redo:
h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
pol->family, nhashmask, dbits, sbits);
if (!entry0) {
- hlist_del(&pol->bydst);
- hlist_add_head(&pol->bydst, ndsttable+h);
+ hlist_del_rcu(&pol->bydst);
+ hlist_add_head_rcu(&pol->bydst, ndsttable + h);
h0 = h;
} else {
if (h != h0)
continue;
- hlist_del(&pol->bydst);
- hlist_add_behind(&pol->bydst, entry0);
+ hlist_del_rcu(&pol->bydst);
+ hlist_add_behind_rcu(&pol->bydst, entry0);
}
entry0 = &pol->bydst;
}
@@ -468,22 +477,32 @@ static void xfrm_bydst_resize(struct net *net, int dir)
unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
unsigned int nhashmask = xfrm_new_hash_mask(hmask);
unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
- struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
struct hlist_head *ndst = xfrm_hash_alloc(nsize);
+ struct hlist_head *odst;
int i;
if (!ndst)
return;
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
+ write_seqcount_begin(&xfrm_policy_hash_generation);
+
+ odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
+ lockdep_is_held(&net->xfrm.xfrm_policy_lock));
+
+ odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
+ lockdep_is_held(&net->xfrm.xfrm_policy_lock));
for (i = hmask; i >= 0; i--)
xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
- net->xfrm.policy_bydst[dir].table = ndst;
+ rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
net->xfrm.policy_bydst[dir].hmask = nhashmask;
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ write_seqcount_end(&xfrm_policy_hash_generation);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
+
+ synchronize_rcu();
xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
}
@@ -500,7 +519,7 @@ static void xfrm_byidx_resize(struct net *net, int total)
if (!nidx)
return;
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
for (i = hmask; i >= 0; i--)
xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
@@ -508,7 +527,7 @@ static void xfrm_byidx_resize(struct net *net, int total)
net->xfrm.policy_byidx = nidx;
net->xfrm.policy_idx_hmask = nhashmask;
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
}
@@ -541,7 +560,6 @@ static inline int xfrm_byidx_should_resize(struct net *net, int total)
void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
{
- read_lock_bh(&net->xfrm.xfrm_policy_lock);
si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
@@ -550,7 +568,6 @@ void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
si->spdhcnt = net->xfrm.policy_idx_hmask;
si->spdhmcnt = xfrm_policy_hashmax;
- read_unlock_bh(&net->xfrm.xfrm_policy_lock);
}
EXPORT_SYMBOL(xfrm_spd_getinfo);
@@ -600,7 +617,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
rbits6 = net->xfrm.policy_hthresh.rbits6;
} while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
/* reset the bydst and inexact table in all directions */
for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
@@ -626,6 +643,10 @@ static void xfrm_hash_rebuild(struct work_struct *work)
/* re-insert all policies by order of creation */
list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
+ if (xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
+ /* skip socket policies */
+ continue;
+ }
newpos = NULL;
chain = policy_hash_bysel(net, &policy->selector,
policy->family,
@@ -642,7 +663,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
hlist_add_head(&policy->bydst, chain);
}
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
mutex_unlock(&hash_resize_mutex);
}
@@ -753,7 +774,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
struct hlist_head *chain;
struct hlist_node *newpos;
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
delpol = NULL;
newpos = NULL;
@@ -764,7 +785,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
xfrm_sec_ctx_match(pol->security, policy->security) &&
!WARN_ON(delpol)) {
if (excl) {
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return -EEXIST;
}
delpol = pol;
@@ -800,7 +821,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
policy->curlft.use_time = 0;
if (!mod_timer(&policy->timer, jiffies + HZ))
xfrm_pol_hold(policy);
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (delpol)
xfrm_policy_kill(delpol);
@@ -820,7 +841,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
struct hlist_head *chain;
*err = 0;
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
chain = policy_hash_bysel(net, sel, sel->family, dir);
ret = NULL;
hlist_for_each_entry(pol, chain, bydst) {
@@ -833,7 +854,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
*err = security_xfrm_policy_delete(
pol->security);
if (*err) {
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return pol;
}
__xfrm_policy_unlink(pol, dir);
@@ -842,7 +863,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
break;
}
}
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (ret && delete)
xfrm_policy_kill(ret);
@@ -861,7 +882,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
return NULL;
*err = 0;
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
chain = net->xfrm.policy_byidx + idx_hash(net, id);
ret = NULL;
hlist_for_each_entry(pol, chain, byidx) {
@@ -872,7 +893,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
*err = security_xfrm_policy_delete(
pol->security);
if (*err) {
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return pol;
}
__xfrm_policy_unlink(pol, dir);
@@ -881,7 +902,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
break;
}
}
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (ret && delete)
xfrm_policy_kill(ret);
@@ -939,7 +960,7 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
{
int dir, err = 0, cnt = 0;
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
err = xfrm_policy_flush_secctx_check(net, type, task_valid);
if (err)
@@ -955,14 +976,14 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
if (pol->type != type)
continue;
__xfrm_policy_unlink(pol, dir);
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
cnt++;
xfrm_audit_policy_delete(pol, 1, task_valid);
xfrm_policy_kill(pol);
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
goto again1;
}
@@ -974,13 +995,13 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
if (pol->type != type)
continue;
__xfrm_policy_unlink(pol, dir);
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
cnt++;
xfrm_audit_policy_delete(pol, 1, task_valid);
xfrm_policy_kill(pol);
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
goto again2;
}
}
@@ -989,7 +1010,7 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
if (!cnt)
err = -ESRCH;
out:
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return err;
}
EXPORT_SYMBOL(xfrm_policy_flush);
@@ -1009,7 +1030,7 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
if (list_empty(&walk->walk.all) && walk->seq != 0)
return 0;
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
if (list_empty(&walk->walk.all))
x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
else
@@ -1037,7 +1058,7 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
}
list_del_init(&walk->walk.all);
out:
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return error;
}
EXPORT_SYMBOL(xfrm_policy_walk);
@@ -1056,9 +1077,9 @@ void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
if (list_empty(&walk->walk.all))
return;
- write_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
list_del(&walk->walk.all);
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
}
EXPORT_SYMBOL(xfrm_policy_walk_done);
@@ -1096,17 +1117,24 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
struct xfrm_policy *pol, *ret;
const xfrm_address_t *daddr, *saddr;
struct hlist_head *chain;
- u32 priority = ~0U;
+ unsigned int sequence;
+ u32 priority;
daddr = xfrm_flowi_daddr(fl, family);
saddr = xfrm_flowi_saddr(fl, family);
if (unlikely(!daddr || !saddr))
return NULL;
- read_lock_bh(&net->xfrm.xfrm_policy_lock);
- chain = policy_hash_direct(net, daddr, saddr, family, dir);
+ rcu_read_lock();
+ retry:
+ do {
+ sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
+ chain = policy_hash_direct(net, daddr, saddr, family, dir);
+ } while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence));
+
+ priority = ~0U;
ret = NULL;
- hlist_for_each_entry(pol, chain, bydst) {
+ hlist_for_each_entry_rcu(pol, chain, bydst) {
err = xfrm_policy_match(pol, fl, type, family, dir);
if (err) {
if (err == -ESRCH)
@@ -1122,7 +1150,7 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
}
}
chain = &net->xfrm.policy_inexact[dir];
- hlist_for_each_entry(pol, chain, bydst) {
+ hlist_for_each_entry_rcu(pol, chain, bydst) {
if ((pol->priority >= priority) && ret)
break;
@@ -1140,9 +1168,13 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
}
}
- xfrm_pol_hold(ret);
+ if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence))
+ goto retry;
+
+ if (ret && !xfrm_pol_hold_rcu(ret))
+ goto retry;
fail:
- read_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ rcu_read_unlock();
return ret;
}
@@ -1219,10 +1251,9 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
const struct flowi *fl)
{
struct xfrm_policy *pol;
- struct net *net = sock_net(sk);
rcu_read_lock();
- read_lock_bh(&net->xfrm.xfrm_policy_lock);
+ again:
pol = rcu_dereference(sk->sk_policy[dir]);
if (pol != NULL) {
bool match = xfrm_selector_match(&pol->selector, fl,
@@ -1237,8 +1268,8 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
err = security_xfrm_policy_lookup(pol->security,
fl->flowi_secid,
policy_to_flow_dir(dir));
- if (!err)
- xfrm_pol_hold(pol);
+ if (!err && !xfrm_pol_hold_rcu(pol))
+ goto again;
else if (err == -ESRCH)
pol = NULL;
else
@@ -1247,7 +1278,6 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
pol = NULL;
}
out:
- read_unlock_bh(&net->xfrm.xfrm_policy_lock);
rcu_read_unlock();
return pol;
}
@@ -1271,7 +1301,7 @@ static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
/* Socket policies are not hashed. */
if (!hlist_unhashed(&pol->bydst)) {
- hlist_del(&pol->bydst);
+ hlist_del_rcu(&pol->bydst);
hlist_del(&pol->byidx);
}
@@ -1295,9 +1325,9 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
{
struct net *net = xp_net(pol);
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
pol = __xfrm_policy_unlink(pol, dir);
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (pol) {
xfrm_policy_kill(pol);
return 0;
@@ -1316,7 +1346,7 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
return -EINVAL;
#endif
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
old_pol = rcu_dereference_protected(sk->sk_policy[dir],
lockdep_is_held(&net->xfrm.xfrm_policy_lock));
if (pol) {
@@ -1334,7 +1364,7 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
*/
xfrm_sk_policy_unlink(old_pol, dir);
}
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (old_pol) {
xfrm_policy_kill(old_pol);
@@ -1364,9 +1394,9 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
newp->type = old->type;
memcpy(newp->xfrm_vec, old->xfrm_vec,
newp->xfrm_nr*sizeof(struct xfrm_tmpl));
- write_lock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
xfrm_sk_policy_link(newp, dir);
- write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
xfrm_pol_put(newp);
}
return newp;
@@ -3048,7 +3078,7 @@ static int __net_init xfrm_net_init(struct net *net)
/* Initialize the per-net locks here */
spin_lock_init(&net->xfrm.xfrm_state_lock);
- rwlock_init(&net->xfrm.xfrm_policy_lock);
+ spin_lock_init(&net->xfrm.xfrm_policy_lock);
mutex_init(&net->xfrm.xfrm_cfg_mutex);
return 0;
@@ -3082,6 +3112,7 @@ static struct pernet_operations __net_initdata xfrm_net_ops = {
void __init xfrm_init(void)
{
register_pernet_subsys(&xfrm_net_ops);
+ seqcount_init(&xfrm_policy_hash_generation);
xfrm_input_init();
}
@@ -3179,7 +3210,7 @@ static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *
struct hlist_head *chain;
u32 priority = ~0U;
- read_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME*/
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
hlist_for_each_entry(pol, chain, bydst) {
if (xfrm_migrate_selector_match(sel, &pol->selector) &&
@@ -3203,7 +3234,7 @@ static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *
xfrm_pol_hold(ret);
- read_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
return ret;
}
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 4fd725a0c500..cdc2e2e71bff 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -558,7 +558,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
x->repl->notify(x, XFRM_REPLAY_UPDATE);
}
-static struct xfrm_replay xfrm_replay_legacy = {
+static const struct xfrm_replay xfrm_replay_legacy = {
.advance = xfrm_replay_advance,
.check = xfrm_replay_check,
.recheck = xfrm_replay_check,
@@ -566,7 +566,7 @@ static struct xfrm_replay xfrm_replay_legacy = {
.overflow = xfrm_replay_overflow,
};
-static struct xfrm_replay xfrm_replay_bmp = {
+static const struct xfrm_replay xfrm_replay_bmp = {
.advance = xfrm_replay_advance_bmp,
.check = xfrm_replay_check_bmp,
.recheck = xfrm_replay_check_bmp,
@@ -574,7 +574,7 @@ static struct xfrm_replay xfrm_replay_bmp = {
.overflow = xfrm_replay_overflow_bmp,
};
-static struct xfrm_replay xfrm_replay_esn = {
+static const struct xfrm_replay xfrm_replay_esn = {
.advance = xfrm_replay_advance_esn,
.check = xfrm_replay_check_esn,
.recheck = xfrm_replay_recheck_esn,
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 9895a8c56d8c..419bf5d463bd 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -28,6 +28,11 @@
#include "xfrm_hash.h"
+#define xfrm_state_deref_prot(table, net) \
+ rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
+
+static void xfrm_state_gc_task(struct work_struct *work);
+
/* Each xfrm_state may be linked to two tables:
1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
@@ -36,6 +41,15 @@
*/
static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
+static __read_mostly seqcount_t xfrm_state_hash_generation = SEQCNT_ZERO(xfrm_state_hash_generation);
+
+static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
+static HLIST_HEAD(xfrm_state_gc_list);
+
+static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x)
+{
+ return atomic_inc_not_zero(&x->refcnt);
+}
static inline unsigned int xfrm_dst_hash(struct net *net,
const xfrm_address_t *daddr,
@@ -76,18 +90,18 @@ static void xfrm_hash_transfer(struct hlist_head *list,
h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
x->props.reqid, x->props.family,
nhashmask);
- hlist_add_head(&x->bydst, ndsttable+h);
+ hlist_add_head_rcu(&x->bydst, ndsttable + h);
h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
x->props.family,
nhashmask);
- hlist_add_head(&x->bysrc, nsrctable+h);
+ hlist_add_head_rcu(&x->bysrc, nsrctable + h);
if (x->id.spi) {
h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
x->id.proto, x->props.family,
nhashmask);
- hlist_add_head(&x->byspi, nspitable+h);
+ hlist_add_head_rcu(&x->byspi, nspitable + h);
}
}
}
@@ -122,25 +136,29 @@ static void xfrm_hash_resize(struct work_struct *work)
}
spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ write_seqcount_begin(&xfrm_state_hash_generation);
nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
+ odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
for (i = net->xfrm.state_hmask; i >= 0; i--)
- xfrm_hash_transfer(net->xfrm.state_bydst+i, ndst, nsrc, nspi,
- nhashmask);
+ xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nhashmask);
- odst = net->xfrm.state_bydst;
- osrc = net->xfrm.state_bysrc;
- ospi = net->xfrm.state_byspi;
+ osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net);
+ ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net);
ohashmask = net->xfrm.state_hmask;
- net->xfrm.state_bydst = ndst;
- net->xfrm.state_bysrc = nsrc;
- net->xfrm.state_byspi = nspi;
+ rcu_assign_pointer(net->xfrm.state_bydst, ndst);
+ rcu_assign_pointer(net->xfrm.state_bysrc, nsrc);
+ rcu_assign_pointer(net->xfrm.state_byspi, nspi);
net->xfrm.state_hmask = nhashmask;
+ write_seqcount_end(&xfrm_state_hash_generation);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
osize = (ohashmask + 1) * sizeof(struct hlist_head);
+
+ synchronize_rcu();
+
xfrm_hash_free(odst, osize);
xfrm_hash_free(osrc, osize);
xfrm_hash_free(ospi, osize);
@@ -332,6 +350,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
{
tasklet_hrtimer_cancel(&x->mtimer);
del_timer_sync(&x->rtimer);
+ kfree(x->aead);
kfree(x->aalg);
kfree(x->ealg);
kfree(x->calg);
@@ -355,15 +374,16 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
static void xfrm_state_gc_task(struct work_struct *work)
{
- struct net *net = container_of(work, struct net, xfrm.state_gc_work);
struct xfrm_state *x;
struct hlist_node *tmp;
struct hlist_head gc_list;
spin_lock_bh(&xfrm_state_gc_lock);
- hlist_move_list(&net->xfrm.state_gc_list, &gc_list);
+ hlist_move_list(&xfrm_state_gc_list, &gc_list);
spin_unlock_bh(&xfrm_state_gc_lock);
+ synchronize_rcu();
+
hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
xfrm_state_gc_destroy(x);
}
@@ -500,14 +520,12 @@ EXPORT_SYMBOL(xfrm_state_alloc);
void __xfrm_state_destroy(struct xfrm_state *x)
{
- struct net *net = xs_net(x);
-
WARN_ON(x->km.state != XFRM_STATE_DEAD);
spin_lock_bh(&xfrm_state_gc_lock);
- hlist_add_head(&x->gclist, &net->xfrm.state_gc_list);
+ hlist_add_head(&x->gclist, &xfrm_state_gc_list);
spin_unlock_bh(&xfrm_state_gc_lock);
- schedule_work(&net->xfrm.state_gc_work);
+ schedule_work(&xfrm_state_gc_work);
}
EXPORT_SYMBOL(__xfrm_state_destroy);
@@ -520,10 +538,10 @@ int __xfrm_state_delete(struct xfrm_state *x)
x->km.state = XFRM_STATE_DEAD;
spin_lock(&net->xfrm.xfrm_state_lock);
list_del(&x->km.all);
- hlist_del(&x->bydst);
- hlist_del(&x->bysrc);
+ hlist_del_rcu(&x->bydst);
+ hlist_del_rcu(&x->bysrc);
if (x->id.spi)
- hlist_del(&x->byspi);
+ hlist_del_rcu(&x->byspi);
net->xfrm.state_num--;
spin_unlock(&net->xfrm.xfrm_state_lock);
@@ -659,7 +677,7 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
struct xfrm_state *x;
- hlist_for_each_entry(x, net->xfrm.state_byspi+h, byspi) {
+ hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
if (x->props.family != family ||
x->id.spi != spi ||
x->id.proto != proto ||
@@ -668,7 +686,8 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
if ((mark & x->mark.m) != x->mark.v)
continue;
- xfrm_state_hold(x);
+ if (!xfrm_state_hold_rcu(x))
+ continue;
return x;
}
@@ -683,7 +702,7 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
struct xfrm_state *x;
- hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) {
+ hlist_for_each_entry_rcu(x, net->xfrm.state_bysrc + h, bysrc) {
if (x->props.family != family ||
x->id.proto != proto ||
!xfrm_addr_equal(&x->id.daddr, daddr, family) ||
@@ -692,7 +711,8 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
if ((mark & x->mark.m) != x->mark.v)
continue;
- xfrm_state_hold(x);
+ if (!xfrm_state_hold_rcu(x))
+ continue;
return x;
}
@@ -775,13 +795,16 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
struct xfrm_state *best = NULL;
u32 mark = pol->mark.v & pol->mark.m;
unsigned short encap_family = tmpl->encap_family;
+ unsigned int sequence;
struct km_event c;
to_put = NULL;
- spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ sequence = read_seqcount_begin(&xfrm_state_hash_generation);
+
+ rcu_read_lock();
h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
- hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
+ hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h, bydst) {
if (x->props.family == encap_family &&
x->props.reqid == tmpl->reqid &&
(mark & x->mark.m) == x->mark.v &&
@@ -797,7 +820,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
goto found;
h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
- hlist_for_each_entry(x, net->xfrm.state_bydst+h_wildcard, bydst) {
+ hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h_wildcard, bydst) {
if (x->props.family == encap_family &&
x->props.reqid == tmpl->reqid &&
(mark & x->mark.m) == x->mark.v &&
@@ -850,19 +873,21 @@ found:
}
if (km_query(x, tmpl, pol) == 0) {
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
x->km.state = XFRM_STATE_ACQ;
list_add(&x->km.all, &net->xfrm.state_all);
- hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
+ hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
h = xfrm_src_hash(net, daddr, saddr, encap_family);
- hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
+ hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
if (x->id.spi) {
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
- hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
+ hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
}
x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
net->xfrm.state_num++;
xfrm_hash_grow_check(net, x->bydst.next != NULL);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
} else {
x->km.state = XFRM_STATE_DEAD;
to_put = x;
@@ -871,13 +896,26 @@ found:
}
}
out:
- if (x)
- xfrm_state_hold(x);
- else
+ if (x) {
+ if (!xfrm_state_hold_rcu(x)) {
+ *err = -EAGAIN;
+ x = NULL;
+ }
+ } else {
*err = acquire_in_progress ? -EAGAIN : error;
- spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+ }
+ rcu_read_unlock();
if (to_put)
xfrm_state_put(to_put);
+
+ if (read_seqcount_retry(&xfrm_state_hash_generation, sequence)) {
+ *err = -EAGAIN;
+ if (x) {
+ xfrm_state_put(x);
+ x = NULL;
+ }
+ }
+
return x;
}
@@ -945,16 +983,16 @@ static void __xfrm_state_insert(struct xfrm_state *x)
h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
x->props.reqid, x->props.family);
- hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
+ hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family);
- hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
+ hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
if (x->id.spi) {
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto,
x->props.family);
- hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
+ hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
}
tasklet_hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
@@ -1063,9 +1101,9 @@ static struct xfrm_state *__find_acq_core(struct net *net,
xfrm_state_hold(x);
tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
list_add(&x->km.all, &net->xfrm.state_all);
- hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
+ hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
h = xfrm_src_hash(net, daddr, saddr, family);
- hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
+ hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
net->xfrm.state_num++;
@@ -1394,9 +1432,9 @@ xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32
{
struct xfrm_state *x;
- spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ rcu_read_lock();
x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
- spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+ rcu_read_unlock();
return x;
}
EXPORT_SYMBOL(xfrm_state_lookup);
@@ -1581,7 +1619,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
if (x->id.spi) {
spin_lock_bh(&net->xfrm.xfrm_state_lock);
h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
- hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
+ hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
err = 0;
@@ -2099,8 +2137,6 @@ int __net_init xfrm_state_init(struct net *net)
net->xfrm.state_num = 0;
INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
- INIT_HLIST_HEAD(&net->xfrm.state_gc_list);
- INIT_WORK(&net->xfrm.state_gc_work, xfrm_state_gc_task);
spin_lock_init(&net->xfrm.xfrm_state_lock);
return 0;
@@ -2118,7 +2154,7 @@ void xfrm_state_fini(struct net *net)
flush_work(&net->xfrm.state_hash_work);
xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
- flush_work(&net->xfrm.state_gc_work);
+ flush_work(&xfrm_state_gc_work);
WARN_ON(!list_empty(&net->xfrm.state_all));
diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
index 05a6e3d9c258..35a7e794ad04 100644
--- a/net/xfrm/xfrm_sysctl.c
+++ b/net/xfrm/xfrm_sysctl.c
@@ -17,13 +17,13 @@ static struct ctl_table xfrm_table[] = {
.procname = "xfrm_aevent_etime",
.maxlen = sizeof(u32),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_douintvec
},
{
.procname = "xfrm_aevent_rseqth",
.maxlen = sizeof(u32),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_douintvec
},
{
.procname = "xfrm_larval_drop",
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index d516845e16e3..08892091cfe3 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -581,9 +581,12 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
if (err)
goto error;
- if (attrs[XFRMA_SEC_CTX] &&
- security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX])))
- goto error;
+ if (attrs[XFRMA_SEC_CTX]) {
+ err = security_xfrm_state_alloc(x,
+ nla_data(attrs[XFRMA_SEC_CTX]));
+ if (err)
+ goto error;
+ }
if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
attrs[XFRMA_REPLAY_ESN_VAL])))
@@ -896,7 +899,8 @@ static int xfrm_dump_sa_done(struct netlink_callback *cb)
struct sock *sk = cb->skb->sk;
struct net *net = sock_net(sk);
- xfrm_state_walk_done(walk, net);
+ if (cb->args[0])
+ xfrm_state_walk_done(walk, net);
return 0;
}
@@ -921,8 +925,6 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
u8 proto = 0;
int err;
- cb->args[0] = 1;
-
err = nlmsg_parse(cb->nlh, 0, attrs, XFRMA_MAX,
xfrma_policy);
if (err < 0)
@@ -939,6 +941,7 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
proto = nla_get_u8(attrs[XFRMA_PROTO]);
xfrm_state_walk_init(walk, proto, filter);
+ cb->args[0] = 1;
}
(void) xfrm_state_walk(net, walk, dump_one_state, &info);
@@ -2051,9 +2054,6 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
if (up->hard) {
xfrm_policy_delete(xp, p->dir);
xfrm_audit_policy_delete(xp, 1, true);
- } else {
- // reset the timers here?
- WARN(1, "Don't know what to do with soft policy expire\n");
}
km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
@@ -2117,7 +2117,7 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
err = verify_newpolicy_info(&ua->policy);
if (err)
- goto bad_policy;
+ goto free_state;
/* build an XP */
xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
@@ -2149,8 +2149,6 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
return 0;
-bad_policy:
- WARN(1, "BAD policy passed\n");
free_state:
kfree(x);
nomem:
diff --git a/samples/bpf/tcbpf2_kern.c b/samples/bpf/tcbpf2_kern.c
index 7a15289da6cc..3303bb85593b 100644
--- a/samples/bpf/tcbpf2_kern.c
+++ b/samples/bpf/tcbpf2_kern.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2016 VMware
+ * Copyright (c) 2016 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -8,12 +9,15 @@
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
#include <uapi/linux/ip.h>
+#include <uapi/linux/ipv6.h>
#include <uapi/linux/in.h>
#include <uapi/linux/tcp.h>
#include <uapi/linux/filter.h>
#include <uapi/linux/pkt_cls.h>
+#include <net/ipv6.h>
#include "bpf_helpers.h"
+#define _htonl __builtin_bswap32
#define ERROR(ret) do {\
char fmt[] = "ERROR line:%d ret:%d\n";\
bpf_trace_printk(fmt, sizeof(fmt), __LINE__, ret); \
@@ -188,4 +192,190 @@ int _geneve_get_tunnel(struct __sk_buff *skb)
return TC_ACT_OK;
}
+SEC("ipip_set_tunnel")
+int _ipip_set_tunnel(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key key = {};
+ void *data = (void *)(long)skb->data;
+ struct iphdr *iph = data;
+ struct tcphdr *tcp = data + sizeof(*iph);
+ void *data_end = (void *)(long)skb->data_end;
+ int ret;
+
+ /* single length check */
+ if (data + sizeof(*iph) + sizeof(*tcp) > data_end) {
+ ERROR(1);
+ return TC_ACT_SHOT;
+ }
+
+ key.tunnel_ttl = 64;
+ if (iph->protocol == IPPROTO_ICMP) {
+ key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
+ } else {
+ if (iph->protocol != IPPROTO_TCP || iph->ihl != 5)
+ return TC_ACT_SHOT;
+
+ if (tcp->dest == htons(5200))
+ key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
+ else if (tcp->dest == htons(5201))
+ key.remote_ipv4 = 0xac100165; /* 172.16.1.101 */
+ else
+ return TC_ACT_SHOT;
+ }
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("ipip_get_tunnel")
+int _ipip_get_tunnel(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+ char fmt[] = "remote ip 0x%x\n";
+
+ ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ bpf_trace_printk(fmt, sizeof(fmt), key.remote_ipv4);
+ return TC_ACT_OK;
+}
+
+SEC("ipip6_set_tunnel")
+int _ipip6_set_tunnel(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key key = {};
+ void *data = (void *)(long)skb->data;
+ struct iphdr *iph = data;
+ struct tcphdr *tcp = data + sizeof(*iph);
+ void *data_end = (void *)(long)skb->data_end;
+ int ret;
+
+ /* single length check */
+ if (data + sizeof(*iph) + sizeof(*tcp) > data_end) {
+ ERROR(1);
+ return TC_ACT_SHOT;
+ }
+
+ key.remote_ipv6[0] = _htonl(0x2401db00);
+ key.tunnel_ttl = 64;
+
+ if (iph->protocol == IPPROTO_ICMP) {
+ key.remote_ipv6[3] = _htonl(1);
+ } else {
+ if (iph->protocol != IPPROTO_TCP || iph->ihl != 5) {
+ ERROR(iph->protocol);
+ return TC_ACT_SHOT;
+ }
+
+ if (tcp->dest == htons(5200)) {
+ key.remote_ipv6[3] = _htonl(1);
+ } else if (tcp->dest == htons(5201)) {
+ key.remote_ipv6[3] = _htonl(2);
+ } else {
+ ERROR(tcp->dest);
+ return TC_ACT_SHOT;
+ }
+ }
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), BPF_F_TUNINFO_IPV6);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("ipip6_get_tunnel")
+int _ipip6_get_tunnel(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+ char fmt[] = "remote ip6 %x::%x\n";
+
+ ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), BPF_F_TUNINFO_IPV6);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ bpf_trace_printk(fmt, sizeof(fmt), _htonl(key.remote_ipv6[0]),
+ _htonl(key.remote_ipv6[3]));
+ return TC_ACT_OK;
+}
+
+SEC("ip6ip6_set_tunnel")
+int _ip6ip6_set_tunnel(struct __sk_buff *skb)
+{
+ struct bpf_tunnel_key key = {};
+ void *data = (void *)(long)skb->data;
+ struct ipv6hdr *iph = data;
+ struct tcphdr *tcp = data + sizeof(*iph);
+ void *data_end = (void *)(long)skb->data_end;
+ int ret;
+
+ /* single length check */
+ if (data + sizeof(*iph) + sizeof(*tcp) > data_end) {
+ ERROR(1);
+ return TC_ACT_SHOT;
+ }
+
+ key.remote_ipv6[0] = _htonl(0x2401db00);
+ key.tunnel_ttl = 64;
+
+ if (iph->nexthdr == NEXTHDR_ICMP) {
+ key.remote_ipv6[3] = _htonl(1);
+ } else {
+ if (iph->nexthdr != NEXTHDR_TCP) {
+ ERROR(iph->nexthdr);
+ return TC_ACT_SHOT;
+ }
+
+ if (tcp->dest == htons(5200)) {
+ key.remote_ipv6[3] = _htonl(1);
+ } else if (tcp->dest == htons(5201)) {
+ key.remote_ipv6[3] = _htonl(2);
+ } else {
+ ERROR(tcp->dest);
+ return TC_ACT_SHOT;
+ }
+ }
+
+ ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), BPF_F_TUNINFO_IPV6);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ return TC_ACT_OK;
+}
+
+SEC("ip6ip6_get_tunnel")
+int _ip6ip6_get_tunnel(struct __sk_buff *skb)
+{
+ int ret;
+ struct bpf_tunnel_key key;
+ char fmt[] = "remote ip6 %x::%x\n";
+
+ ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), BPF_F_TUNINFO_IPV6);
+ if (ret < 0) {
+ ERROR(ret);
+ return TC_ACT_SHOT;
+ }
+
+ bpf_trace_printk(fmt, sizeof(fmt), _htonl(key.remote_ipv6[0]),
+ _htonl(key.remote_ipv6[3]));
+ return TC_ACT_OK;
+}
+
+
char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/test_ipip.sh b/samples/bpf/test_ipip.sh
new file mode 100755
index 000000000000..196925403ab4
--- /dev/null
+++ b/samples/bpf/test_ipip.sh
@@ -0,0 +1,178 @@
+#!/bin/bash
+
+function config_device {
+ ip netns add at_ns0
+ ip netns add at_ns1
+ ip netns add at_ns2
+ ip link add veth0 type veth peer name veth0b
+ ip link add veth1 type veth peer name veth1b
+ ip link add veth2 type veth peer name veth2b
+ ip link set veth0b up
+ ip link set veth1b up
+ ip link set veth2b up
+ ip link set dev veth0b mtu 1500
+ ip link set dev veth1b mtu 1500
+ ip link set dev veth2b mtu 1500
+ ip link set veth0 netns at_ns0
+ ip link set veth1 netns at_ns1
+ ip link set veth2 netns at_ns2
+ ip netns exec at_ns0 ip addr add 172.16.1.100/24 dev veth0
+ ip netns exec at_ns0 ip addr add 2401:db00::1/64 dev veth0 nodad
+ ip netns exec at_ns0 ip link set dev veth0 up
+ ip netns exec at_ns1 ip addr add 172.16.1.101/24 dev veth1
+ ip netns exec at_ns1 ip addr add 2401:db00::2/64 dev veth1 nodad
+ ip netns exec at_ns1 ip link set dev veth1 up
+ ip netns exec at_ns2 ip addr add 172.16.1.200/24 dev veth2
+ ip netns exec at_ns2 ip addr add 2401:db00::3/64 dev veth2 nodad
+ ip netns exec at_ns2 ip link set dev veth2 up
+ ip link add br0 type bridge
+ ip link set br0 up
+ ip link set dev br0 mtu 1500
+ ip link set veth0b master br0
+ ip link set veth1b master br0
+ ip link set veth2b master br0
+}
+
+function add_ipip_tunnel {
+ ip netns exec at_ns0 \
+ ip link add dev $DEV_NS type ipip local 172.16.1.100 remote 172.16.1.200
+ ip netns exec at_ns0 ip link set dev $DEV_NS up
+ ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+ ip netns exec at_ns1 \
+ ip link add dev $DEV_NS type ipip local 172.16.1.101 remote 172.16.1.200
+ ip netns exec at_ns1 ip link set dev $DEV_NS up
+ # same inner IP address in at_ns0 and at_ns1
+ ip netns exec at_ns1 ip addr add dev $DEV_NS 10.1.1.100/24
+
+ ip netns exec at_ns2 ip link add dev $DEV type ipip external
+ ip netns exec at_ns2 ip link set dev $DEV up
+ ip netns exec at_ns2 ip addr add dev $DEV 10.1.1.200/24
+}
+
+function add_ipip6_tunnel {
+ ip netns exec at_ns0 \
+ ip link add dev $DEV_NS type ip6tnl mode ipip6 local 2401:db00::1/64 remote 2401:db00::3/64
+ ip netns exec at_ns0 ip link set dev $DEV_NS up
+ ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+ ip netns exec at_ns1 \
+ ip link add dev $DEV_NS type ip6tnl mode ipip6 local 2401:db00::2/64 remote 2401:db00::3/64
+ ip netns exec at_ns1 ip link set dev $DEV_NS up
+ # same inner IP address in at_ns0 and at_ns1
+ ip netns exec at_ns1 ip addr add dev $DEV_NS 10.1.1.100/24
+
+ ip netns exec at_ns2 ip link add dev $DEV type ip6tnl mode ipip6 external
+ ip netns exec at_ns2 ip link set dev $DEV up
+ ip netns exec at_ns2 ip addr add dev $DEV 10.1.1.200/24
+}
+
+function add_ip6ip6_tunnel {
+ ip netns exec at_ns0 \
+ ip link add dev $DEV_NS type ip6tnl mode ip6ip6 local 2401:db00::1/64 remote 2401:db00::3/64
+ ip netns exec at_ns0 ip link set dev $DEV_NS up
+ ip netns exec at_ns0 ip addr add dev $DEV_NS 2601:646::1/64
+ ip netns exec at_ns1 \
+ ip link add dev $DEV_NS type ip6tnl mode ip6ip6 local 2401:db00::2/64 remote 2401:db00::3/64
+ ip netns exec at_ns1 ip link set dev $DEV_NS up
+ # same inner IP address in at_ns0 and at_ns1
+ ip netns exec at_ns1 ip addr add dev $DEV_NS 2601:646::1/64
+
+ ip netns exec at_ns2 ip link add dev $DEV type ip6tnl mode ip6ip6 external
+ ip netns exec at_ns2 ip link set dev $DEV up
+ ip netns exec at_ns2 ip addr add dev $DEV 2601:646::2/64
+}
+
+function attach_bpf {
+ DEV=$1
+ SET_TUNNEL=$2
+ GET_TUNNEL=$3
+ ip netns exec at_ns2 tc qdisc add dev $DEV clsact
+ ip netns exec at_ns2 tc filter add dev $DEV egress bpf da obj tcbpf2_kern.o sec $SET_TUNNEL
+ ip netns exec at_ns2 tc filter add dev $DEV ingress bpf da obj tcbpf2_kern.o sec $GET_TUNNEL
+}
+
+function test_ipip {
+ DEV_NS=ipip_std
+ DEV=ipip_bpf
+ config_device
+# tcpdump -nei br0 &
+ cat /sys/kernel/debug/tracing/trace_pipe &
+
+ add_ipip_tunnel
+ attach_bpf $DEV ipip_set_tunnel ipip_get_tunnel
+
+ ip netns exec at_ns0 ping -c 1 10.1.1.200
+ ip netns exec at_ns2 ping -c 1 10.1.1.100
+ ip netns exec at_ns0 iperf -sD -p 5200 > /dev/null
+ ip netns exec at_ns1 iperf -sD -p 5201 > /dev/null
+ sleep 0.2
+ # tcp check _same_ IP over different tunnels
+ ip netns exec at_ns2 iperf -c 10.1.1.100 -n 5k -p 5200
+ ip netns exec at_ns2 iperf -c 10.1.1.100 -n 5k -p 5201
+ cleanup
+}
+
+# IPv4 over IPv6 tunnel
+function test_ipip6 {
+ DEV_NS=ipip_std
+ DEV=ipip_bpf
+ config_device
+# tcpdump -nei br0 &
+ cat /sys/kernel/debug/tracing/trace_pipe &
+
+ add_ipip6_tunnel
+ attach_bpf $DEV ipip6_set_tunnel ipip6_get_tunnel
+
+ ip netns exec at_ns0 ping -c 1 10.1.1.200
+ ip netns exec at_ns2 ping -c 1 10.1.1.100
+ ip netns exec at_ns0 iperf -sD -p 5200 > /dev/null
+ ip netns exec at_ns1 iperf -sD -p 5201 > /dev/null
+ sleep 0.2
+ # tcp check _same_ IP over different tunnels
+ ip netns exec at_ns2 iperf -c 10.1.1.100 -n 5k -p 5200
+ ip netns exec at_ns2 iperf -c 10.1.1.100 -n 5k -p 5201
+ cleanup
+}
+
+# IPv6 over IPv6 tunnel
+function test_ip6ip6 {
+ DEV_NS=ipip_std
+ DEV=ipip_bpf
+ config_device
+# tcpdump -nei br0 &
+ cat /sys/kernel/debug/tracing/trace_pipe &
+
+ add_ip6ip6_tunnel
+ attach_bpf $DEV ip6ip6_set_tunnel ip6ip6_get_tunnel
+
+ ip netns exec at_ns0 ping -6 -c 1 2601:646::2
+ ip netns exec at_ns2 ping -6 -c 1 2601:646::1
+ ip netns exec at_ns0 iperf -6sD -p 5200 > /dev/null
+ ip netns exec at_ns1 iperf -6sD -p 5201 > /dev/null
+ sleep 0.2
+ # tcp check _same_ IP over different tunnels
+ ip netns exec at_ns2 iperf -6c 2601:646::1 -n 5k -p 5200
+ ip netns exec at_ns2 iperf -6c 2601:646::1 -n 5k -p 5201
+ cleanup
+}
+
+function cleanup {
+ set +ex
+ pkill iperf
+ ip netns delete at_ns0
+ ip netns delete at_ns1
+ ip netns delete at_ns2
+ ip link del veth0
+ ip link del veth1
+ ip link del veth2
+ ip link del br0
+ pkill tcpdump
+ pkill cat
+ set -ex
+}
+
+cleanup
+echo "Testing IP tunnels..."
+test_ipip
+test_ipip6
+test_ip6ip6
+echo "*** PASS ***"
diff --git a/samples/bpf/test_tunnel_bpf.sh b/samples/bpf/test_tunnel_bpf.sh
index 4956589a83ae..1ff634f187b7 100755
--- a/samples/bpf/test_tunnel_bpf.sh
+++ b/samples/bpf/test_tunnel_bpf.sh
@@ -9,15 +9,13 @@
# local 172.16.1.200 remote 172.16.1.100
# veth1 IP: 172.16.1.200, tunnel dev <type>11
-set -e
-
function config_device {
ip netns add at_ns0
ip link add veth0 type veth peer name veth1
ip link set veth0 netns at_ns0
ip netns exec at_ns0 ip addr add 172.16.1.100/24 dev veth0
ip netns exec at_ns0 ip link set dev veth0 up
- ip link set dev veth1 up
+ ip link set dev veth1 up mtu 1500
ip addr add dev veth1 172.16.1.200/24
}
@@ -67,6 +65,19 @@ function add_geneve_tunnel {
ip addr add dev $DEV 10.1.1.200/24
}
+function add_ipip_tunnel {
+ # in namespace
+ ip netns exec at_ns0 \
+ ip link add dev $DEV_NS type $TYPE local 172.16.1.100 remote 172.16.1.200
+ ip netns exec at_ns0 ip link set dev $DEV_NS up
+ ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
+
+ # out of namespace
+ ip link add dev $DEV type $TYPE external
+ ip link set dev $DEV up
+ ip addr add dev $DEV 10.1.1.200/24
+}
+
function attach_bpf {
DEV=$1
SET_TUNNEL=$2
@@ -85,6 +96,7 @@ function test_gre {
attach_bpf $DEV gre_set_tunnel gre_get_tunnel
ping -c 1 10.1.1.100
ip netns exec at_ns0 ping -c 1 10.1.1.200
+ cleanup
}
function test_vxlan {
@@ -96,6 +108,7 @@ function test_vxlan {
attach_bpf $DEV vxlan_set_tunnel vxlan_get_tunnel
ping -c 1 10.1.1.100
ip netns exec at_ns0 ping -c 1 10.1.1.200
+ cleanup
}
function test_geneve {
@@ -107,21 +120,48 @@ function test_geneve {
attach_bpf $DEV geneve_set_tunnel geneve_get_tunnel
ping -c 1 10.1.1.100
ip netns exec at_ns0 ping -c 1 10.1.1.200
+ cleanup
+}
+
+function test_ipip {
+ TYPE=ipip
+ DEV_NS=ipip00
+ DEV=ipip11
+ config_device
+ tcpdump -nei veth1 &
+ cat /sys/kernel/debug/tracing/trace_pipe &
+ add_ipip_tunnel
+ ethtool -K veth1 gso off gro off rx off tx off
+ ip link set dev veth1 mtu 1500
+ attach_bpf $DEV ipip_set_tunnel ipip_get_tunnel
+ ping -c 1 10.1.1.100
+ ip netns exec at_ns0 ping -c 1 10.1.1.200
+ ip netns exec at_ns0 iperf -sD -p 5200 > /dev/null
+ sleep 0.2
+ iperf -c 10.1.1.100 -n 5k -p 5200
+ cleanup
}
function cleanup {
+ set +ex
+ pkill iperf
ip netns delete at_ns0
ip link del veth1
- ip link del $DEV
+ ip link del ipip11
+ ip link del gretap11
+ ip link del geneve11
+ pkill tcpdump
+ pkill cat
+ set -ex
}
+cleanup
echo "Testing GRE tunnel..."
test_gre
-cleanup
echo "Testing VXLAN tunnel..."
test_vxlan
-cleanup
echo "Testing GENEVE tunnel..."
test_geneve
-cleanup
-echo "Success"
+echo "Testing IPIP tunnel..."
+test_ipip
+echo "*** PASS ***"
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c
index 78c6f131d94f..ac590d4b7f02 100644
--- a/samples/bpf/test_verifier.c
+++ b/samples/bpf/test_verifier.c
@@ -291,6 +291,29 @@ static struct bpf_test tests[] = {
.result = REJECT,
},
{
+ "invalid argument register",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 !read_ok",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "non-invalid argument register",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
"check valid spill/fill",
.insns = {
/* spill R1(ctx) into stack */
@@ -1210,6 +1233,54 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
+ "raw_stack: skb_load_bytes, negative len",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, -8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "raw_stack: skb_load_bytes, negative len 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, ~0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "raw_stack: skb_load_bytes, zero len",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid stack type R3",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
"raw_stack: skb_load_bytes, no init",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 4),
@@ -1511,7 +1582,105 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
},
{
- "direct packet access: test4",
+ "direct packet access: test4 (write)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test5 (pkt_end >= reg, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test6 (pkt_end >= reg, bad access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test7 (pkt_end >= reg, both accesses)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid access to packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test8 (double test, variant 1)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test9 (double test, variant 2)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
@@ -1519,12 +1688,35 @@ static struct bpf_test tests[] = {
offsetof(struct __sk_buff, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test10 (write invalid)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .errstr = "cannot write",
+ .errstr = "invalid access to packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
@@ -1634,6 +1826,343 @@ static struct bpf_test tests[] = {
.errstr = "invalid access to packet",
.prog_type = BPF_PROG_TYPE_XDP,
},
+ {
+ "helper access to packet: test6, cls valid packet_ptr range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {5},
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test7, cls unchecked packet_ptr",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {1},
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test8, cls variable add",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
+ BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {11},
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test9, cls packet_ptr with bad range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {7},
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test10, cls packet_ptr with too short range",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup = {6},
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test11, cls unsuitable helper 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 42),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_store_bytes),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "helper access to the packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test12, cls unsuitable helper 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 4),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "helper access to the packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test13, cls helper ok",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test14, cls helper fail sub",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "type=inv expected=fp",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test15, cls helper fail range 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test16, cls helper fail range 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, -9),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test17, cls helper fail range 3",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, ~0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test18, cls helper fail range zero",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test19, pkt end as input",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R1 type=pkt_end expected=fp",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "helper access to packet: test20, wrong reg",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
+ BPF_MOV64_IMM(BPF_REG_2, 4),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_4, 0),
+ BPF_MOV64_IMM(BPF_REG_5, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
};
static int probe_filter_length(struct bpf_insn *fp)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 4de3cc42fc50..206a6b346a8d 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3570,15 +3570,6 @@ sub process {
}
}
-# check for uses of DEFINE_PCI_DEVICE_TABLE
- if ($line =~ /\bDEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=/) {
- if (WARN("DEFINE_PCI_DEVICE_TABLE",
- "Prefer struct pci_device_id over deprecated DEFINE_PCI_DEVICE_TABLE\n" . $herecurr) &&
- $fix) {
- $fixed[$fixlinenr] =~ s/\b(?:static\s+|)DEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=\s*/static const struct pci_device_id $1\[\] = /;
- }
- }
-
# check for new typedefs, only function parameters and sparse annotations
# make sense.
if ($line =~ /\btypedef\s/ &&
diff --git a/scripts/faddr2line b/scripts/faddr2line
new file mode 100755
index 000000000000..450b33257339
--- /dev/null
+++ b/scripts/faddr2line
@@ -0,0 +1,177 @@
+#!/bin/bash
+#
+# Translate stack dump function offsets.
+#
+# addr2line doesn't work with KASLR addresses. This works similarly to
+# addr2line, but instead takes the 'func+0x123' format as input:
+#
+# $ ./scripts/faddr2line ~/k/vmlinux meminfo_proc_show+0x5/0x568
+# meminfo_proc_show+0x5/0x568:
+# meminfo_proc_show at fs/proc/meminfo.c:27
+#
+# If the address is part of an inlined function, the full inline call chain is
+# printed:
+#
+# $ ./scripts/faddr2line ~/k/vmlinux native_write_msr+0x6/0x27
+# native_write_msr+0x6/0x27:
+# arch_static_branch at arch/x86/include/asm/msr.h:121
+# (inlined by) static_key_false at include/linux/jump_label.h:125
+# (inlined by) native_write_msr at arch/x86/include/asm/msr.h:125
+#
+# The function size after the '/' in the input is optional, but recommended.
+# It's used to help disambiguate any duplicate symbol names, which can occur
+# rarely. If the size is omitted for a duplicate symbol then it's possible for
+# multiple code sites to be printed:
+#
+# $ ./scripts/faddr2line ~/k/vmlinux raw_ioctl+0x5
+# raw_ioctl+0x5/0x20:
+# raw_ioctl at drivers/char/raw.c:122
+#
+# raw_ioctl+0x5/0xb1:
+# raw_ioctl at net/ipv4/raw.c:876
+#
+# Multiple addresses can be specified on a single command line:
+#
+# $ ./scripts/faddr2line ~/k/vmlinux type_show+0x10/45 free_reserved_area+0x90
+# type_show+0x10/0x2d:
+# type_show at drivers/video/backlight/backlight.c:213
+#
+# free_reserved_area+0x90/0x123:
+# free_reserved_area at mm/page_alloc.c:6429 (discriminator 2)
+
+
+set -o errexit
+set -o nounset
+
+command -v awk >/dev/null 2>&1 || die "awk isn't installed"
+command -v readelf >/dev/null 2>&1 || die "readelf isn't installed"
+command -v addr2line >/dev/null 2>&1 || die "addr2line isn't installed"
+
+usage() {
+ echo "usage: faddr2line <object file> <func+offset> <func+offset>..." >&2
+ exit 1
+}
+
+warn() {
+ echo "$1" >&2
+}
+
+die() {
+ echo "ERROR: $1" >&2
+ exit 1
+}
+
+# Try to figure out the source directory prefix so we can remove it from the
+# addr2line output. HACK ALERT: This assumes that start_kernel() is in
+# kernel/init.c! This only works for vmlinux. Otherwise it falls back to
+# printing the absolute path.
+find_dir_prefix() {
+ local objfile=$1
+
+ local start_kernel_addr=$(readelf -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}')
+ [[ -z $start_kernel_addr ]] && return
+
+ local file_line=$(addr2line -e $objfile $start_kernel_addr)
+ [[ -z $file_line ]] && return
+
+ local prefix=${file_line%init/main.c:*}
+ if [[ -z $prefix ]] || [[ $prefix = $file_line ]]; then
+ return
+ fi
+
+ DIR_PREFIX=$prefix
+ return 0
+}
+
+__faddr2line() {
+ local objfile=$1
+ local func_addr=$2
+ local dir_prefix=$3
+ local print_warnings=$4
+
+ local func=${func_addr%+*}
+ local offset=${func_addr#*+}
+ offset=${offset%/*}
+ local size=
+ [[ $func_addr =~ "/" ]] && size=${func_addr#*/}
+
+ if [[ -z $func ]] || [[ -z $offset ]] || [[ $func = $func_addr ]]; then
+ warn "bad func+offset $func_addr"
+ DONE=1
+ return
+ fi
+
+ # Go through each of the object's symbols which match the func name.
+ # In rare cases there might be duplicates.
+ while read symbol; do
+ local fields=($symbol)
+ local sym_base=0x${fields[1]}
+ local sym_size=${fields[2]}
+ local sym_type=${fields[3]}
+
+ # calculate the address
+ local addr=$(($sym_base + $offset))
+ if [[ -z $addr ]] || [[ $addr = 0 ]]; then
+ warn "bad address: $sym_base + $offset"
+ DONE=1
+ return
+ fi
+ local hexaddr=0x$(printf %x $addr)
+
+ # weed out non-function symbols
+ if [[ $sym_type != "FUNC" ]]; then
+ [[ $print_warnings = 1 ]] &&
+ echo "skipping $func address at $hexaddr due to non-function symbol"
+ continue
+ fi
+
+ # if the user provided a size, make sure it matches the symbol's size
+ if [[ -n $size ]] && [[ $size -ne $sym_size ]]; then
+ [[ $print_warnings = 1 ]] &&
+ echo "skipping $func address at $hexaddr due to size mismatch ($size != $sym_size)"
+ continue;
+ fi
+
+ # make sure the provided offset is within the symbol's range
+ if [[ $offset -gt $sym_size ]]; then
+ [[ $print_warnings = 1 ]] &&
+ echo "skipping $func address at $hexaddr due to size mismatch ($offset > $sym_size)"
+ continue
+ fi
+
+ # separate multiple entries with a blank line
+ [[ $FIRST = 0 ]] && echo
+ FIRST=0
+
+ local hexsize=0x$(printf %x $sym_size)
+ echo "$func+$offset/$hexsize:"
+ addr2line -fpie $objfile $hexaddr | sed "s; $dir_prefix\(\./\)*; ;"
+ DONE=1
+
+ done < <(readelf -sW $objfile | awk -v f=$func '$8 == f {print}')
+}
+
+[[ $# -lt 2 ]] && usage
+
+objfile=$1
+[[ ! -f $objfile ]] && die "can't find objfile $objfile"
+shift
+
+DIR_PREFIX=supercalifragilisticexpialidocious
+find_dir_prefix $objfile
+
+FIRST=1
+while [[ $# -gt 0 ]]; do
+ func_addr=$1
+ shift
+
+ # print any matches found
+ DONE=0
+ __faddr2line $objfile $func_addr $DIR_PREFIX 0
+
+ # if no match was found, print warnings
+ if [[ $DONE = 0 ]]; then
+ __faddr2line $objfile $func_addr $DIR_PREFIX 1
+ warn "no match for $func_addr"
+ fi
+done
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index e1c09e2f9be7..8ea9fd2b6573 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -332,7 +332,9 @@ if grep -q '^CONFIG_STACK_VALIDATION=y' $KCONFIG_CONFIG ; then
(cd $objtree; find tools/objtool -type f -executable) >> "$objtree/debian/hdrobjfiles"
fi
(cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f) >> "$objtree/debian/hdrobjfiles"
-(cd $objtree; find scripts/gcc-plugins -name \*.so -o -name gcc-common.h) >> "$objtree/debian/hdrobjfiles"
+if grep -q '^CONFIG_GCC_PLUGINS=y' $KCONFIG_CONFIG ; then
+ (cd $objtree; find scripts/gcc-plugins -name \*.so -o -name gcc-common.h) >> "$objtree/debian/hdrobjfiles"
+fi
destdir=$kernel_headers_dir/usr/src/linux-headers-$version
mkdir -p "$destdir"
(cd $srctree; tar -c -f - -T -) < "$objtree/debian/hdrsrcfiles" | (cd $destdir; tar -xf -)
diff --git a/scripts/tags.sh b/scripts/tags.sh
index ed7eef24ef89..b3775a9604ea 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -206,7 +206,6 @@ regex_c=(
'/\<DEFINE_PER_CPU_SHARED_ALIGNED([^,]*, *\([[:alnum:]_]*\)/\1/v/'
'/\<DECLARE_WAIT_QUEUE_HEAD(\([[:alnum:]_]*\)/\1/v/'
'/\<DECLARE_\(TASKLET\|WORK\|DELAYED_WORK\)(\([[:alnum:]_]*\)/\2/v/'
- '/\<DEFINE_PCI_DEVICE_TABLE(\([[:alnum:]_]*\)/\1/v/'
'/\(^\s\)OFFSET(\([[:alnum:]_]*\)/\2/v/'
'/\(^\s\)DEFINE(\([[:alnum:]_]*\)/\2/v/'
'/\<DEFINE_HASHTABLE(\([[:alnum:]_]*\)/\1/v/'
diff --git a/security/Kconfig b/security/Kconfig
index da10d9b573a4..118f4549404e 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -147,6 +147,17 @@ config HARDENED_USERCOPY
or are part of the kernel text. This kills entire classes
of heap overflow exploits and similar kernel memory exposures.
+config HARDENED_USERCOPY_PAGESPAN
+ bool "Refuse to copy allocations that span multiple pages"
+ depends on HARDENED_USERCOPY
+ depends on EXPERT
+ help
+ When a multi-page allocation is done without __GFP_COMP,
+ hardened usercopy will reject attempts to copy it. There are,
+ however, several cases of this in the kernel that have not all
+ been removed. This config is intended to be used only while
+ trying to find such users.
+
source security/selinux/Kconfig
source security/smack/Kconfig
source security/tomoyo/Kconfig
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 795437b10082..b450a27588c8 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -1633,11 +1633,13 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
return -EBUSY;
}
list_add_tail(&rmidi->list, &snd_rawmidi_devices);
+ mutex_unlock(&register_mutex);
err = snd_register_device(SNDRV_DEVICE_TYPE_RAWMIDI,
rmidi->card, rmidi->device,
&snd_rawmidi_f_ops, rmidi, &rmidi->dev);
if (err < 0) {
rmidi_err(rmidi, "unable to register\n");
+ mutex_lock(&register_mutex);
list_del(&rmidi->list);
mutex_unlock(&register_mutex);
return err;
@@ -1645,6 +1647,7 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
if (rmidi->ops && rmidi->ops->dev_register &&
(err = rmidi->ops->dev_register(rmidi)) < 0) {
snd_unregister_device(&rmidi->dev);
+ mutex_lock(&register_mutex);
list_del(&rmidi->list);
mutex_unlock(&register_mutex);
return err;
@@ -1677,7 +1680,6 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
}
}
#endif /* CONFIG_SND_OSSEMUL */
- mutex_unlock(&register_mutex);
sprintf(name, "midi%d", rmidi->device);
entry = snd_info_create_card_entry(rmidi->card, name, rmidi->card->proc_root);
if (entry) {
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 9a6157ea6881..fc144f43faa6 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -35,6 +35,9 @@
#include <sound/initval.h>
#include <linux/kmod.h>
+/* internal flags */
+#define SNDRV_TIMER_IFLG_PAUSED 0x00010000
+
#if IS_ENABLED(CONFIG_SND_HRTIMER)
#define DEFAULT_TIMER_LIMIT 4
#else
@@ -294,8 +297,21 @@ int snd_timer_open(struct snd_timer_instance **ti,
get_device(&timer->card->card_dev);
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = slave_id;
- if (list_empty(&timer->open_list_head) && timer->hw.open)
- timer->hw.open(timer);
+
+ if (list_empty(&timer->open_list_head) && timer->hw.open) {
+ int err = timer->hw.open(timer);
+ if (err) {
+ kfree(timeri->owner);
+ kfree(timeri);
+
+ if (timer->card)
+ put_device(&timer->card->card_dev);
+ module_put(timer->module);
+ mutex_unlock(&register_mutex);
+ return err;
+ }
+ }
+
list_add_tail(&timeri->open_list, &timer->open_list_head);
snd_timer_check_master(timeri);
mutex_unlock(&register_mutex);
@@ -526,6 +542,10 @@ static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
}
}
timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
+ if (stop)
+ timeri->flags &= ~SNDRV_TIMER_IFLG_PAUSED;
+ else
+ timeri->flags |= SNDRV_TIMER_IFLG_PAUSED;
snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
SNDRV_TIMER_EVENT_CONTINUE);
unlock:
@@ -587,6 +607,10 @@ int snd_timer_stop(struct snd_timer_instance *timeri)
*/
int snd_timer_continue(struct snd_timer_instance *timeri)
{
+ /* timer can continue only after pause */
+ if (!(timeri->flags & SNDRV_TIMER_IFLG_PAUSED))
+ return -EINVAL;
+
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_start_slave(timeri, false);
else
@@ -813,6 +837,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
timer->tmr_subdevice = tid->subdevice;
if (id)
strlcpy(timer->id, id, sizeof(timer->id));
+ timer->sticks = 1;
INIT_LIST_HEAD(&timer->device_list);
INIT_LIST_HEAD(&timer->open_list_head);
INIT_LIST_HEAD(&timer->active_list_head);
@@ -1817,6 +1842,9 @@ static int snd_timer_user_continue(struct file *file)
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
+ /* start timer instead of continue if it's not used before */
+ if (!(tu->timeri->flags & SNDRV_TIMER_IFLG_PAUSED))
+ return snd_timer_user_start(file);
tu->timeri->lost = 0;
return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0;
}
@@ -1958,6 +1986,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
tu->qused--;
spin_unlock_irq(&tu->qlock);
+ mutex_lock(&tu->ioctl_lock);
if (tu->tread) {
if (copy_to_user(buffer, &tu->tqueue[qhead],
sizeof(struct snd_timer_tread)))
@@ -1967,6 +1996,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
sizeof(struct snd_timer_read)))
err = -EFAULT;
}
+ mutex_unlock(&tu->ioctl_lock);
spin_lock_irq(&tu->qlock);
if (err < 0)
diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h
index 03ed35237e2b..d73c12b8753d 100644
--- a/sound/firewire/fireworks/fireworks.h
+++ b/sound/firewire/fireworks/fireworks.h
@@ -108,7 +108,6 @@ struct snd_efw {
u8 *resp_buf;
u8 *pull_ptr;
u8 *push_ptr;
- unsigned int resp_queues;
};
int snd_efw_transaction_cmd(struct fw_unit *unit,
diff --git a/sound/firewire/fireworks/fireworks_hwdep.c b/sound/firewire/fireworks/fireworks_hwdep.c
index 33df8655fe81..2e1d9a23920c 100644
--- a/sound/firewire/fireworks/fireworks_hwdep.c
+++ b/sound/firewire/fireworks/fireworks_hwdep.c
@@ -25,6 +25,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
{
unsigned int length, till_end, type;
struct snd_efw_transaction *t;
+ u8 *pull_ptr;
long count = 0;
if (remained < sizeof(type) + sizeof(struct snd_efw_transaction))
@@ -38,8 +39,17 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
buf += sizeof(type);
/* write into buffer as many responses as possible */
- while (efw->resp_queues > 0) {
- t = (struct snd_efw_transaction *)(efw->pull_ptr);
+ spin_lock_irq(&efw->lock);
+
+ /*
+ * When another task reaches here during this task's access to user
+ * space, it picks up current position in buffer and can read the same
+ * series of responses.
+ */
+ pull_ptr = efw->pull_ptr;
+
+ while (efw->push_ptr != pull_ptr) {
+ t = (struct snd_efw_transaction *)(pull_ptr);
length = be32_to_cpu(t->length) * sizeof(__be32);
/* confirm enough space for this response */
@@ -49,26 +59,39 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
/* copy from ring buffer to user buffer */
while (length > 0) {
till_end = snd_efw_resp_buf_size -
- (unsigned int)(efw->pull_ptr - efw->resp_buf);
+ (unsigned int)(pull_ptr - efw->resp_buf);
till_end = min_t(unsigned int, length, till_end);
- if (copy_to_user(buf, efw->pull_ptr, till_end))
+ spin_unlock_irq(&efw->lock);
+
+ if (copy_to_user(buf, pull_ptr, till_end))
return -EFAULT;
- efw->pull_ptr += till_end;
- if (efw->pull_ptr >= efw->resp_buf +
- snd_efw_resp_buf_size)
- efw->pull_ptr -= snd_efw_resp_buf_size;
+ spin_lock_irq(&efw->lock);
+
+ pull_ptr += till_end;
+ if (pull_ptr >= efw->resp_buf + snd_efw_resp_buf_size)
+ pull_ptr -= snd_efw_resp_buf_size;
length -= till_end;
buf += till_end;
count += till_end;
remained -= till_end;
}
-
- efw->resp_queues--;
}
+ /*
+ * All of tasks can read from the buffer nearly simultaneously, but the
+ * last position for each task is different depending on the length of
+ * given buffer. Here, for simplicity, a position of buffer is set by
+ * the latest task. It's better for a listening application to allow one
+ * thread to read from the buffer. Unless, each task can read different
+ * sequence of responses depending on variation of buffer length.
+ */
+ efw->pull_ptr = pull_ptr;
+
+ spin_unlock_irq(&efw->lock);
+
return count;
}
@@ -76,14 +99,17 @@ static long
hwdep_read_locked(struct snd_efw *efw, char __user *buf, long count,
loff_t *offset)
{
- union snd_firewire_event event;
+ union snd_firewire_event event = {
+ .lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
+ };
- memset(&event, 0, sizeof(event));
+ spin_lock_irq(&efw->lock);
- event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
event.lock_status.status = (efw->dev_lock_count > 0);
efw->dev_lock_changed = false;
+ spin_unlock_irq(&efw->lock);
+
count = min_t(long, count, sizeof(event.lock_status));
if (copy_to_user(buf, &event, count))
@@ -98,10 +124,15 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
{
struct snd_efw *efw = hwdep->private_data;
DEFINE_WAIT(wait);
+ bool dev_lock_changed;
+ bool queued;
spin_lock_irq(&efw->lock);
- while ((!efw->dev_lock_changed) && (efw->resp_queues == 0)) {
+ dev_lock_changed = efw->dev_lock_changed;
+ queued = efw->push_ptr != efw->pull_ptr;
+
+ while (!dev_lock_changed && !queued) {
prepare_to_wait(&efw->hwdep_wait, &wait, TASK_INTERRUPTIBLE);
spin_unlock_irq(&efw->lock);
schedule();
@@ -109,15 +140,17 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
if (signal_pending(current))
return -ERESTARTSYS;
spin_lock_irq(&efw->lock);
+ dev_lock_changed = efw->dev_lock_changed;
+ queued = efw->push_ptr != efw->pull_ptr;
}
- if (efw->dev_lock_changed)
+ spin_unlock_irq(&efw->lock);
+
+ if (dev_lock_changed)
count = hwdep_read_locked(efw, buf, count, offset);
- else if (efw->resp_queues > 0)
+ else if (queued)
count = hwdep_read_resp_buf(efw, buf, count, offset);
- spin_unlock_irq(&efw->lock);
-
return count;
}
@@ -160,7 +193,7 @@ hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait)
poll_wait(file, &efw->hwdep_wait, wait);
spin_lock_irq(&efw->lock);
- if (efw->dev_lock_changed || (efw->resp_queues > 0))
+ if (efw->dev_lock_changed || efw->pull_ptr != efw->push_ptr)
events = POLLIN | POLLRDNORM;
else
events = 0;
diff --git a/sound/firewire/fireworks/fireworks_proc.c b/sound/firewire/fireworks/fireworks_proc.c
index 0639dcb13f7d..beb0a0ffee57 100644
--- a/sound/firewire/fireworks/fireworks_proc.c
+++ b/sound/firewire/fireworks/fireworks_proc.c
@@ -188,8 +188,8 @@ proc_read_queues_state(struct snd_info_entry *entry,
else
consumed = (unsigned int)(efw->push_ptr - efw->pull_ptr);
- snd_iprintf(buffer, "%d %d/%d\n",
- efw->resp_queues, consumed, snd_efw_resp_buf_size);
+ snd_iprintf(buffer, "%d/%d\n",
+ consumed, snd_efw_resp_buf_size);
}
static void
diff --git a/sound/firewire/fireworks/fireworks_transaction.c b/sound/firewire/fireworks/fireworks_transaction.c
index f550808d1784..36a08ba51ec7 100644
--- a/sound/firewire/fireworks/fireworks_transaction.c
+++ b/sound/firewire/fireworks/fireworks_transaction.c
@@ -121,11 +121,11 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
size_t capacity, till_end;
struct snd_efw_transaction *t;
- spin_lock_irq(&efw->lock);
-
t = (struct snd_efw_transaction *)data;
length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length);
+ spin_lock_irq(&efw->lock);
+
if (efw->push_ptr < efw->pull_ptr)
capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr);
else
@@ -155,7 +155,6 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
}
/* for hwdep */
- efw->resp_queues++;
wake_up(&efw->hwdep_wait);
*rcode = RCODE_COMPLETE;
diff --git a/sound/firewire/tascam/tascam-hwdep.c b/sound/firewire/tascam/tascam-hwdep.c
index 131267c3a042..106406cbfaa3 100644
--- a/sound/firewire/tascam/tascam-hwdep.c
+++ b/sound/firewire/tascam/tascam-hwdep.c
@@ -16,31 +16,14 @@
#include "tascam.h"
-static long hwdep_read_locked(struct snd_tscm *tscm, char __user *buf,
- long count)
-{
- union snd_firewire_event event;
-
- memset(&event, 0, sizeof(event));
-
- event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
- event.lock_status.status = (tscm->dev_lock_count > 0);
- tscm->dev_lock_changed = false;
-
- count = min_t(long, count, sizeof(event.lock_status));
-
- if (copy_to_user(buf, &event, count))
- return -EFAULT;
-
- return count;
-}
-
static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
loff_t *offset)
{
struct snd_tscm *tscm = hwdep->private_data;
DEFINE_WAIT(wait);
- union snd_firewire_event event;
+ union snd_firewire_event event = {
+ .lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
+ };
spin_lock_irq(&tscm->lock);
@@ -54,10 +37,16 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
spin_lock_irq(&tscm->lock);
}
- memset(&event, 0, sizeof(event));
- count = hwdep_read_locked(tscm, buf, count);
+ event.lock_status.status = (tscm->dev_lock_count > 0);
+ tscm->dev_lock_changed = false;
+
spin_unlock_irq(&tscm->lock);
+ count = min_t(long, count, sizeof(event.lock_status));
+
+ if (copy_to_user(buf, &event, count))
+ return -EFAULT;
+
return count;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 7100f05e651a..575cefd8cc4a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4855,6 +4855,7 @@ enum {
ALC221_FIXUP_HP_FRONT_MIC,
ALC292_FIXUP_TPT460,
ALC298_FIXUP_SPK_VOLUME,
+ ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -5516,6 +5517,15 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
},
+ [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1b, 0x90170151 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5560,6 +5570,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
@@ -5895,6 +5906,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x12, 0x90a60170},
{0x14, 0x90170120},
{0x21, 0x02211030}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell Inspiron 5468", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x90a60180},
+ {0x14, 0x90170120},
+ {0x21, 0x02211030}),
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC256_STANDARD_PINS),
SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 6cf1f3597455..152292e5ee2b 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1141,6 +1141,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
+ case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
index 0e8a1f7a292d..f39c0e9c0d5c 100644
--- a/tools/iio/iio_generic_buffer.c
+++ b/tools/iio/iio_generic_buffer.c
@@ -348,7 +348,7 @@ int main(int argc, char **argv)
int notrigger = 0;
char *dummy;
- struct iio_channel_info *channels;
+ struct iio_channel_info *channels = NULL;
register_cleanup();
@@ -456,7 +456,7 @@ int main(int argc, char **argv)
if (notrigger) {
printf("trigger-less mode selected\n");
- } if (trig_num >= 0) {
+ } else if (trig_num >= 0) {
char *trig_dev_name;
ret = asprintf(&trig_dev_name, "%strigger%d", iio_dir, trig_num);
if (ret < 0) {
diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c
index d9836c5eb694..11c8d9bc762e 100644
--- a/tools/lguest/lguest.c
+++ b/tools/lguest/lguest.c
@@ -3266,6 +3266,9 @@ int main(int argc, char *argv[])
}
}
+ /* If we exit via err(), this kills all the threads, restores tty. */
+ atexit(cleanup_devices);
+
/* We always have a console device, and it's always device 1. */
setup_console();
@@ -3369,9 +3372,6 @@ int main(int argc, char *argv[])
/* Ensure that we terminate if a device-servicing child dies. */
signal(SIGCHLD, kill_launcher);
- /* If we exit via err(), this kills all the threads, restores tty. */
- atexit(cleanup_devices);
-
/* If requested, chroot to a directory */
if (chroot_path) {
if (chroot(chroot_path) != 0)