summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.clang-format12
-rw-r--r--.mailmap5
-rw-r--r--Documentation/admin-guide/dynamic-debug-howto.rst1
-rw-r--r--Documentation/bpf/ringbuf.rst5
-rw-r--r--Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt7
-rw-r--r--Documentation/devicetree/bindings/net/can/microchip,mcp25xxfd.yaml79
-rw-r--r--Documentation/devicetree/bindings/net/dsa/mt7530.txt13
-rw-r--r--Documentation/devicetree/bindings/net/marvell,prestera.txt34
-rw-r--r--Documentation/devicetree/bindings/ptp/ptp-qoriq.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt16
-rw-r--r--Documentation/driver-api/dma-buf.rst2
-rw-r--r--Documentation/features/debug/debug-vm-pgtable/arch-support.txt2
-rw-r--r--Documentation/networking/device_drivers/ethernet/amazon/ena.rst25
-rw-r--r--Documentation/networking/ethtool-netlink.rst14
-rw-r--r--Documentation/networking/kapi.rst9
-rw-r--r--Documentation/networking/statistics.rst57
-rw-r--r--Documentation/virt/kvm/api.rst42
-rw-r--r--MAINTAINERS73
-rw-r--r--Makefile10
-rw-r--r--arch/arc/boot/dts/hsdk.dts6
-rw-r--r--arch/arc/include/asm/pgalloc.h4
-rw-r--r--arch/arc/kernel/perf_event.c14
-rw-r--r--arch/arc/kernel/troubleshoot.c77
-rw-r--r--arch/arc/mm/init.c27
-rw-r--r--arch/arc/plat-eznps/include/plat/ctop.h1
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_icp.dts2
-rw-r--r--arch/arm/boot/dts/bcm-hr2.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm-nsp.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm5301x.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6q-logicpd.dts2
-rw-r--r--arch/arm/boot/dts/imx6q-prtwd2.dts2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw51xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6sx-pinfunc.h2
-rw-r--r--arch/arm/boot/dts/imx7d-zii-rmu2.dts2
-rw-r--r--arch/arm/boot/dts/imx7ulp.dtsi8
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi29
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi2
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi2
-rw-r--r--arch/arm/boot/dts/omap5.dtsi20
-rw-r--r--arch/arm/boot/dts/socfpga_arria10.dtsi2
-rw-r--r--arch/arm/boot/dts/vfxxx.dtsi2
-rw-r--r--arch/arm/configs/integrator_defconfig16
-rw-r--r--arch/arm/mach-omap2/omap-iommu.c2
-rw-r--r--arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/Makefile1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts50
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts63
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi12
-rw-r--r--arch/arm64/configs/defconfig12
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h14
-rw-r--r--arch/arm64/include/asm/kvm_host.h2
-rw-r--r--arch/arm64/kernel/cpu_errata.c8
-rw-r--r--arch/arm64/kernel/paravirt.c26
-rw-r--r--arch/arm64/kvm/arm.c3
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h2
-rw-r--r--arch/arm64/kvm/mmu.c12
-rw-r--r--arch/arm64/kvm/pvtime.c29
-rw-r--r--arch/arm64/kvm/trace_arm.h16
-rw-r--r--arch/arm64/kvm/trace_handle_exit.h6
-rw-r--r--arch/arm64/net/bpf_jit_comp.c43
-rw-r--r--arch/ia64/include/asm/acpi.h2
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/kvm/mips.c2
-rw-r--r--arch/mips/sni/a20r.c9
-rw-r--r--arch/openrisc/include/asm/uaccess.h33
-rw-r--r--arch/openrisc/kernel/setup.c10
-rw-r--r--arch/openrisc/mm/cache.c2
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/configs/pasemi_defconfig1
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig1
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h10
-rw-r--r--arch/powerpc/kernel/dma-iommu.c3
-rw-r--r--arch/powerpc/kernel/vdso32/Makefile2
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S1
-rw-r--r--arch/powerpc/kernel/vdso64/Makefile2
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S3
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c15
-rw-r--r--arch/powerpc/mm/init_64.c11
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c2
-rw-r--r--arch/riscv/Kconfig1
-rw-r--r--arch/riscv/boot/dts/kendryte/k210.dtsi6
-rw-r--r--arch/riscv/include/asm/clint.h26
-rw-r--r--arch/riscv/include/asm/ftrace.h7
-rw-r--r--arch/riscv/include/asm/timex.h27
-rw-r--r--arch/riscv/kernel/ftrace.c19
-rw-r--r--arch/riscv/mm/init.c7
-rw-r--r--arch/s390/include/asm/ccwdev.h9
-rw-r--r--arch/s390/include/asm/chsc.h7
-rw-r--r--arch/s390/include/asm/css_chars.h4
-rw-r--r--arch/s390/kernel/entry.h1
-rw-r--r--arch/s390/kernel/idle.c5
-rw-r--r--arch/s390/kernel/pgm_check.S2
-rw-r--r--arch/s390/kernel/setup.c6
-rw-r--r--arch/s390/mm/fault.c20
-rw-r--r--arch/s390/net/bpf_jit_comp.c61
-rw-r--r--arch/s390/pci/pci.c4
-rw-r--r--arch/s390/pci/pci_event.c2
-rw-r--r--arch/sh/include/asm/smp.h1
-rw-r--r--arch/sh/kernel/entry-common.S1
-rw-r--r--arch/sh/kernel/ptrace_32.c15
-rw-r--r--arch/x86/boot/compressed/Makefile2
-rw-r--r--arch/x86/configs/i386_defconfig2
-rw-r--r--arch/x86/configs/x86_64_defconfig1
-rw-r--r--arch/x86/entry/common.c29
-rw-r--r--arch/x86/include/asm/acpi.h2
-rw-r--r--arch/x86/include/asm/entry-common.h12
-rw-r--r--arch/x86/include/asm/frame.h19
-rw-r--r--arch/x86/include/asm/nospec-branch.h16
-rw-r--r--arch/x86/include/asm/ptrace.h2
-rw-r--r--arch/x86/kernel/kvm.c4
-rw-r--r--arch/x86/kernel/process.c3
-rw-r--r--arch/x86/kernel/traps.c65
-rw-r--r--arch/x86/kvm/emulate.c22
-rw-r--r--arch/x86/kvm/mmu/mmu.c2
-rw-r--r--arch/x86/kvm/svm/nested.c7
-rw-r--r--arch/x86/kvm/svm/sev.c1
-rw-r--r--arch/x86/kvm/svm/svm.c36
-rw-r--r--arch/x86/kvm/vmx/nested.c10
-rw-r--r--arch/x86/kvm/vmx/vmx.c5
-rw-r--r--arch/x86/kvm/vmx/vmx.h1
-rw-r--r--arch/x86/kvm/x86.c5
-rw-r--r--arch/x86/lib/Makefile2
-rw-r--r--arch/x86/mm/fault.c78
-rw-r--r--arch/x86/mm/numa_emulation.c2
-rw-r--r--arch/x86/net/bpf_jit_comp.c265
-rw-r--r--block/bfq-iosched.c12
-rw-r--r--block/bio.c4
-rw-r--r--block/blk-mq-sched.h2
-rw-r--r--block/partitions/core.c2
-rw-r--r--drivers/acpi/processor_idle.c108
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/auxdisplay/arm-charlcd.c2
-rw-r--r--drivers/base/core.c8
-rw-r--r--drivers/base/firmware_loader/firmware.h2
-rw-r--r--drivers/base/firmware_loader/main.c17
-rw-r--r--drivers/block/rbd.c12
-rw-r--r--drivers/clk/bcm/Kconfig1
-rw-r--r--drivers/clk/davinci/pll.c2
-rw-r--r--drivers/clk/qcom/lpasscorecc-sc7180.c7
-rw-r--r--drivers/clk/rockchip/clk-rk3228.c2
-rw-r--r--drivers/clk/versatile/clk-impd1.c4
-rw-r--r--drivers/clocksource/timer-clint.c17
-rw-r--r--drivers/connector/connector.c7
-rw-r--r--drivers/counter/microchip-tcb-capture.c4
-rw-r--r--drivers/cpuidle/cpuidle-pseries.c15
-rw-r--r--drivers/cpuidle/cpuidle.c15
-rw-r--r--drivers/dax/device.c2
-rw-r--r--drivers/dax/super.c16
-rw-r--r--drivers/dma-buf/dma-buf.c6
-rw-r--r--drivers/dma-buf/dma-fence-chain.c1
-rw-r--r--drivers/edac/ghes_edac.c5
-rw-r--r--drivers/firmware/efi/efibc.c2
-rw-r--r--drivers/firmware/efi/embedded-firmware.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c48
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c314
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c14
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c15
-rw-r--r--drivers/gpu/drm/i915/i915_request.c25
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c10
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c20
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c20
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c19
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c26
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c25
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c27
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h11
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c2
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c22
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c15
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c6
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c9
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig1
-rw-r--r--drivers/hv/channel_mgmt.c7
-rw-r--r--drivers/hv/vmbus_drv.c9
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c35
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c26
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c6
-rw-r--r--drivers/i2c/busses/i2c-mxs.c10
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c8
-rw-r--r--drivers/i2c/i2c-core-base.c2
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c15
-rw-r--r--drivers/iio/accel/kxsd9.c16
-rw-r--r--drivers/iio/accel/mma7455_core.c16
-rw-r--r--drivers/iio/accel/mma8452.c11
-rw-r--r--drivers/iio/adc/Kconfig2
-rw-r--r--drivers/iio/adc/ina2xx-adc.c11
-rw-r--r--drivers/iio/adc/max1118.c10
-rw-r--r--drivers/iio/adc/mcp3422.c16
-rw-r--r--drivers/iio/adc/meson_saradc.c2
-rw-r--r--drivers/iio/adc/ti-adc081c.c11
-rw-r--r--drivers/iio/adc/ti-adc084s021.c10
-rw-r--r--drivers/iio/adc/ti-ads1015.c10
-rw-r--r--drivers/iio/chemical/ccs811.c13
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c5
-rw-r--r--drivers/iio/light/ltr501.c15
-rw-r--r--drivers/iio/light/max44000.c12
-rw-r--r--drivers/iio/magnetometer/ak8975.c16
-rw-r--r--drivers/iio/proximity/mb1232.c17
-rw-r--r--drivers/infiniband/core/cq.c4
-rw-r--r--drivers/infiniband/core/verbs.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c43
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c5
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c26
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h5
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h1
-rw-r--r--drivers/infiniband/hw/mlx4/main.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_sysfs.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c93
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h41
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c16
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c9
-rw-r--r--drivers/input/mouse/trackpoint.c10
-rw-r--r--drivers/input/mouse/trackpoint.h10
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h16
-rw-r--r--drivers/interconnect/core.c10
-rw-r--r--drivers/interconnect/qcom/bcm-voter.c27
-rw-r--r--drivers/iommu/amd/Kconfig2
-rw-r--r--drivers/iommu/amd/init.c21
-rw-r--r--drivers/iommu/amd/iommu.c32
-rw-r--r--drivers/iommu/amd/iommu_v2.c7
-rw-r--r--drivers/iommu/intel/iommu.c114
-rw-r--r--drivers/iommu/intel/irq_remapping.c10
-rw-r--r--drivers/md/dm-table.c10
-rw-r--r--drivers/md/dm.c5
-rw-r--r--drivers/misc/eeprom/at24.c11
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c2
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_masks.h32
-rw-r--r--drivers/mmc/core/sdio_ops.c39
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/mmc_spi.c86
-rw-r--r--drivers/mmc/host/sdhci-acpi.c31
-rw-r--r--drivers/mmc/host/sdhci-msm.c18
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c10
-rw-r--r--drivers/mtd/spi-nor/core.c57
-rw-r--r--drivers/mtd/spi-nor/core.h10
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/appletalk/ltpc.c2
-rw-r--r--drivers/net/can/Kconfig4
-rw-r--r--drivers/net/can/at91_can.c8
-rw-r--r--drivers/net/can/c_can/c_can.c9
-rw-r--r--drivers/net/can/cc770/cc770.c2
-rw-r--r--drivers/net/can/cc770/cc770.h2
-rw-r--r--drivers/net/can/dev.c45
-rw-r--r--drivers/net/can/flexcan.c535
-rw-r--r--drivers/net/can/grcan.c4
-rw-r--r--drivers/net/can/m_can/Kconfig2
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/can/mscan/mscan.c29
-rw-r--r--drivers/net/can/pch_can.c67
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c2
-rw-r--r--drivers/net/can/rx-offload.c11
-rw-r--r--drivers/net/can/sja1000/peak_pci.c2
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c2
-rw-r--r--drivers/net/can/softing/Kconfig6
-rw-r--r--drivers/net/can/softing/softing_fw.c8
-rw-r--r--drivers/net/can/softing/softing_main.c8
-rw-r--r--drivers/net/can/softing/softing_platform.h2
-rw-r--r--drivers/net/can/spi/Kconfig4
-rw-r--r--drivers/net/can/spi/Makefile1
-rw-r--r--drivers/net/can/spi/mcp251x.c345
-rw-r--r--drivers/net/can/spi/mcp25xxfd/Kconfig17
-rw-r--r--drivers/net/can/spi/mcp25xxfd/Makefile8
-rw-r--r--drivers/net/can/spi/mcp25xxfd/mcp25xxfd-core.c2911
-rw-r--r--drivers/net/can/spi/mcp25xxfd/mcp25xxfd-crc16.c89
-rw-r--r--drivers/net/can/spi/mcp25xxfd/mcp25xxfd-regmap.c556
-rw-r--r--drivers/net/can/spi/mcp25xxfd/mcp25xxfd.h835
-rw-r--r--drivers/net/can/ti_hecc.c29
-rw-r--r--drivers/net/can/usb/Kconfig2
-rw-r--r--drivers/net/can/usb/gs_usb.c4
-rw-r--r--drivers/net/can/usb/mcba_usb.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c166
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c4
-rw-r--r--drivers/net/can/usb/ucan.c4
-rw-r--r--drivers/net/can/usb/usb_8dev.c4
-rw-r--r--drivers/net/can/xilinx_can.c2
-rw-r--r--drivers/net/dsa/Kconfig6
-rw-r--r--drivers/net/dsa/b53/b53_common.c19
-rw-r--r--drivers/net/dsa/b53/b53_priv.h1
-rw-r--r--drivers/net/dsa/bcm_sf2.c13
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c20
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c29
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c13
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h3
-rw-r--r--drivers/net/dsa/mt7530.c1191
-rw-r--r--drivers/net/dsa/mt7530.h259
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c290
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h18
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c532
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.h21
-rw-r--r--drivers/net/dsa/ocelot/Kconfig22
-rw-r--r--drivers/net/dsa/ocelot/Makefile6
-rw-r--r--drivers/net/dsa/ocelot/felix.c60
-rw-r--r--drivers/net/dsa/ocelot/felix.h7
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c73
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c73
-rw-r--r--drivers/net/dsa/rtl8366.c23
-rw-r--r--drivers/net/dsa/rtl8366rb.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c7
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c6
-rw-r--r--drivers/net/ethernet/alteon/acenic.c9
-rw-r--r--drivers/net/ethernet/alteon/acenic.h3
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h91
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c226
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h33
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_common_defs.h31
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c84
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h37
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h31
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c33
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c139
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h31
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h31
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h31
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c19
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c11
-rw-r--r--drivers/net/ethernet/broadcom/b44.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c43
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c51
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c18
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c9
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c1
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c10
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/ael1002.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c92
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/Kconfig2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c64
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c44
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c56
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c65
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c44
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c80
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c80
-rw-r--r--drivers/net/ethernet/dlink/sundance.c21
-rw-r--r--drivers/net/ethernet/dnet.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c63
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c229
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h44
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c7
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c89
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h21
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c79
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h31
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c7
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c40
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c242
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c37
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h26
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c60
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c15
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c25
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c33
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c23
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c79
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c7
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c19
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_trace.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx_common.h13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c22
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c7
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h20
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c11
-rw-r--r--drivers/net/ethernet/jme.c40
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c21
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c28
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h103
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/Kconfig25
-rw-r--r--drivers/net/ethernet/marvell/prestera/Makefile7
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera.h206
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.c112
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.h23
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_dsa.c104
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_dsa.h35
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.c780
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.h11
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c1253
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.h182
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c663
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c769
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.c820
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.h19
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c1277
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.h13
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c3
-rw-r--r--drivers/net/ethernet/marvell/skge.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c534
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h93
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c115
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c915
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c632
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c944
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c446
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c911
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h93
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c605
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h101
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c550
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h90
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c377
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c163
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h2
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c17
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c24
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c5
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c48
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c26
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c3
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c249
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c63
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c73
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c18
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c4
-rw-r--r--drivers/net/ethernet/ni/nixge.c7
-rw-r--r--drivers/net/ethernet/pensando/Kconfig1
-rw-r--r--drivers/net/ethernet/pensando/ionic/Makefile2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.c14
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c71
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_fw.c206
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h33
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c46
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h5
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c27
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c27
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c11
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c10
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c4
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c83
-rw-r--r--drivers/net/ethernet/sfc/ef100.c1
-rw-r--r--drivers/net/ethernet/silan/sc92031.c40
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c55
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c270
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c53
-rw-r--r--drivers/net/ethernet/ti/cpsw.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c53
-rw-r--r--drivers/net/ethernet/ti/tlan.c61
-rw-r--r--drivers/net/geneve.c37
-rw-r--r--drivers/net/hyperv/hyperv_net.h7
-rw-r--r--drivers/net/hyperv/netvsc.c124
-rw-r--r--drivers/net/hyperv/netvsc_drv.c35
-rw-r--r--drivers/net/hyperv/rndis_filter.c73
-rw-r--r--drivers/net/ieee802154/adf7242.c4
-rw-r--r--drivers/net/ieee802154/ca8210.c1
-rw-r--r--drivers/net/ipa/gsi.c17
-rw-r--r--drivers/net/ipa/gsi.h1
-rw-r--r--drivers/net/ipa/ipa.h16
-rw-r--r--drivers/net/ipa/ipa_clock.c28
-rw-r--r--drivers/net/ipa/ipa_interrupt.c14
-rw-r--r--drivers/net/ipa/ipa_main.c64
-rw-r--r--drivers/net/ipa/ipa_table.c4
-rw-r--r--drivers/net/mdio/Kconfig1
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c109
-rw-r--r--drivers/net/netdevsim/Makefile2
-rw-r--r--drivers/net/netdevsim/dev.c8
-rw-r--r--drivers/net/netdevsim/ethtool.c64
-rw-r--r--drivers/net/netdevsim/netdev.c1
-rw-r--r--drivers/net/netdevsim/netdevsim.h11
-rw-r--r--drivers/net/pcs/pcs-lynx.c6
-rw-r--r--drivers/net/phy/bcm7xxx.c32
-rw-r--r--drivers/net/phy/dp83822.c13
-rw-r--r--drivers/net/phy/dp83869.c73
-rw-r--r--drivers/net/phy/micrel.c14
-rw-r--r--drivers/net/phy/phy-core.c36
-rw-r--r--drivers/net/phy/phy.c71
-rw-r--r--drivers/net/phy/phy_device.c11
-rw-r--r--drivers/net/phy/realtek.c9
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/wan/hdlc_cisco.c1
-rw-r--r--drivers/net/wan/hdlc_fr.c6
-rw-r--r--drivers/net/wan/hdlc_ppp.c17
-rw-r--r--drivers/net/wan/lapbether.c4
-rw-r--r--drivers/net/wan/x25_asy.c3
-rw-r--r--drivers/net/wireguard/noise.c5
-rw-r--r--drivers/net/wireguard/peerlookup.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c65
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c4
-rw-r--r--drivers/nvme/host/Kconfig1
-rw-r--r--drivers/nvme/host/core.c20
-rw-r--r--drivers/nvme/host/fabrics.c12
-rw-r--r--drivers/nvme/host/fc.c1
-rw-r--r--drivers/nvme/host/nvme.h1
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/nvme/host/rdma.c1
-rw-r--r--drivers/nvme/host/tcp.c1
-rw-r--r--drivers/nvme/target/passthru.c2
-rw-r--r--drivers/of/of_mdio.c38
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c16
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h2
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c47
-rw-r--r--drivers/powercap/intel_rapl_common.c4
-rw-r--r--drivers/ptp/ptp_qoriq.c20
-rw-r--r--drivers/rapidio/Kconfig2
-rw-r--r--drivers/regulator/core.c179
-rw-r--r--drivers/regulator/cros-ec-regulator.c3
-rw-r--r--drivers/regulator/fixed.c2
-rw-r--r--drivers/regulator/pwm-regulator.c2
-rw-r--r--drivers/s390/block/dasd_fba.c9
-rw-r--r--drivers/s390/cio/chsc.c22
-rw-r--r--drivers/s390/cio/chsc.h8
-rw-r--r--drivers/s390/cio/css.c11
-rw-r--r--drivers/s390/cio/css.h4
-rw-r--r--drivers/s390/cio/device_ops.c93
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c8
-rw-r--r--drivers/s390/net/qeth_core.h34
-rw-r--r--drivers/s390/net/qeth_core_main.c111
-rw-r--r--drivers/s390/net/qeth_core_sys.c65
-rw-r--r--drivers/s390/net/qeth_l2.h9
-rw-r--r--drivers/s390/net/qeth_l2_main.c772
-rw-r--r--drivers/s390/net/qeth_l2_sys.c16
-rw-r--r--drivers/s390/net/qeth_l3.h4
-rw-r--r--drivers/s390/net/qeth_l3_main.c88
-rw-r--r--drivers/s390/net/qeth_l3_sys.c64
-rw-r--r--drivers/scsi/libfc/fc_disc.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c5
-rw-r--r--drivers/scsi/libsas/sas_discover.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c6
-rw-r--r--drivers/soundwire/bus.c2
-rw-r--r--drivers/soundwire/stream.c8
-rw-r--r--drivers/spi/spi-cadence-quadspi.c17
-rw-r--r--drivers/spi/spi-loopback-test.c2
-rw-r--r--drivers/spi/spi-stm32.c8
-rw-r--r--drivers/spi/spi.c9
-rw-r--r--drivers/staging/greybus/audio_helper.c3
-rw-r--r--drivers/staging/greybus/audio_topology.c29
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c5
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c19
-rw-r--r--drivers/target/iscsi/iscsi_target.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c3
-rw-r--r--drivers/thunderbolt/eeprom.c20
-rw-r--r--drivers/thunderbolt/switch.c1
-rw-r--r--drivers/thunderbolt/tb.h2
-rw-r--r--drivers/thunderbolt/tunnel.c12
-rw-r--r--drivers/tty/serial/8250/8250_pci.c11
-rw-r--r--drivers/tty/serial/serial_core.c44
-rw-r--r--drivers/usb/class/usblp.c5
-rw-r--r--drivers/usb/core/message.c91
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/core/sysfs.c5
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c15
-rw-r--r--drivers/usb/host/ehci-hcd.c1
-rw-r--r--drivers/usb/host/ehci-hub.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/option.c22
-rw-r--r--drivers/usb/storage/uas.c14
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c26
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c22
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c6
-rw-r--r--drivers/video/console/Kconfig46
-rw-r--r--drivers/video/console/vgacon.c221
-rw-r--r--drivers/video/fbdev/core/bitblit.c11
-rw-r--r--drivers/video/fbdev/core/fbcon.c336
-rw-r--r--drivers/video/fbdev/core/fbcon.h2
-rw-r--r--drivers/video/fbdev/core/fbcon_ccw.c11
-rw-r--r--drivers/video/fbdev/core/fbcon_cw.c11
-rw-r--r--drivers/video/fbdev/core/fbcon_ud.c11
-rw-r--r--drivers/video/fbdev/core/tileblit.c2
-rw-r--r--drivers/video/fbdev/vga16fb.c2
-rw-r--r--drivers/xen/Kconfig10
-rw-r--r--drivers/xen/Makefile1
-rw-r--r--drivers/xen/balloon.c4
-rw-r--r--drivers/xen/grant-table.c4
-rw-r--r--drivers/xen/privcmd.c4
-rw-r--r--drivers/xen/unpopulated-alloc.c183
-rw-r--r--drivers/xen/xenbus/xenbus_client.c6
-rw-r--r--drivers/xen/xlate_mmu.c4
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent-tree.c19
-rw-r--r--fs/btrfs/ioctl.c3
-rw-r--r--fs/btrfs/print-tree.c12
-rw-r--r--fs/btrfs/transaction.c1
-rw-r--r--fs/btrfs/volumes.c10
-rw-r--r--fs/cifs/inode.c4
-rw-r--r--fs/debugfs/file.c4
-rw-r--r--fs/ext2/file.c6
-rw-r--r--fs/f2fs/data.c3
-rw-r--r--fs/f2fs/node.c3
-rw-r--r--fs/f2fs/segment.c8
-rw-r--r--fs/fs-writeback.c2
-rw-r--r--fs/fuse/file.c25
-rw-r--r--fs/io_uring.c104
-rw-r--r--fs/nfs/nfs4proc.c11
-rw-r--r--fs/vboxsf/super.c2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c2
-rw-r--r--fs/xfs/xfs_file.c12
-rw-r--r--include/linux/bpf.h30
-rw-r--r--include/linux/bpf_verifier.h3
-rw-r--r--include/linux/brcmphy.h1
-rw-r--r--include/linux/btf_ids.h8
-rw-r--r--include/linux/can/core.h2
-rw-r--r--include/linux/can/dev.h6
-rw-r--r--include/linux/can/rx-offload.h3
-rw-r--r--include/linux/compiler_attributes.h8
-rw-r--r--include/linux/compiler_types.h4
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/cpuidle.h1
-rw-r--r--include/linux/dax.h21
-rw-r--r--include/linux/device.h1
-rw-r--r--include/linux/dsa/8021q.h2
-rw-r--r--include/linux/dynamic_debug.h20
-rw-r--r--include/linux/efi_embedded_fw.h6
-rw-r--r--include/linux/entry-common.h51
-rw-r--r--include/linux/ethtool.h26
-rw-r--r--include/linux/filter.h4
-rw-r--r--include/linux/fs_parser.h2
-rw-r--r--include/linux/fsl/ptp_qoriq.h3
-rw-r--r--include/linux/ftrace.h3
-rw-r--r--include/linux/i2c-algo-pca.h15
-rw-r--r--include/linux/ieee80211.h156
-rw-r--r--include/linux/if_bridge.h8
-rw-r--r--include/linux/kprobes.h5
-rw-r--r--include/linux/kvm_host.h31
-rw-r--r--include/linux/log2.h2
-rw-r--r--include/linux/mdio.h3
-rw-r--r--include/linux/memremap.h9
-rw-r--r--include/linux/micrel_phy.h1
-rw-r--r--include/linux/mlx5/device.h3
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/netdev_features.h2
-rw-r--r--include/linux/netdevice.h17
-rw-r--r--include/linux/of_mdio.h6
-rw-r--r--include/linux/percpu-rwsem.h8
-rw-r--r--include/linux/phy.h426
-rw-r--r--include/linux/powercap.h11
-rw-r--r--include/linux/prefetch.h8
-rw-r--r--include/linux/qed/qed_if.h1
-rw-r--r--include/linux/serial_core.h1
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--include/linux/stackleak.h2
-rw-r--r--include/linux/wait.h1
-rw-r--r--include/net/cfg80211.h64
-rw-r--r--include/net/devlink.h32
-rw-r--r--include/net/dsa.h18
-rw-r--r--include/net/flow.h1
-rw-r--r--include/net/genetlink.h8
-rw-r--r--include/net/inet_connection_sock.h3
-rw-r--r--include/net/mac80211.h103
-rw-r--r--include/net/mptcp.h6
-rw-r--r--include/net/netlink.h2
-rw-r--r--include/net/netns/nexthop.h2
-rw-r--r--include/net/netns/nftables.h1
-rw-r--r--include/net/nexthop.h4
-rw-r--r--include/net/sch_generic.h6
-rw-r--r--include/net/sctp/structs.h8
-rw-r--r--include/net/sock.h4
-rw-r--r--include/net/switchdev.h1
-rw-r--r--include/net/tcp.h4
-rw-r--r--include/net/vxlan.h3
-rw-r--r--include/soc/mscc/ocelot.h6
-rw-r--r--include/soc/mscc/ocelot_ptp.h3
-rw-r--r--include/soc/nps/common.h6
-rw-r--r--include/sound/soc.h4
-rw-r--r--include/trace/events/rxrpc.h2
-rw-r--r--include/uapi/linux/bpf.h98
-rw-r--r--include/uapi/linux/devlink.h5
-rw-r--r--include/uapi/linux/ethtool.h2
-rw-r--r--include/uapi/linux/ethtool_netlink.h19
-rw-r--r--include/uapi/linux/if_bridge.h17
-rw-r--r--include/uapi/linux/if_link.h31
-rw-r--r--include/uapi/linux/kvm.h6
-rw-r--r--include/uapi/linux/nl80211.h95
-rw-r--r--include/uapi/linux/tipc.h2
-rw-r--r--include/uapi/linux/tipc_netlink.h2
-rw-r--r--include/xen/balloon.h4
-rw-r--r--include/xen/xen.h9
-rw-r--r--init/main.c4
-rw-r--r--ipc/ipc_sysctl.c2
-rw-r--r--kernel/bpf/arraymap.c55
-rw-r--r--kernel/bpf/bpf_inode_storage.c8
-rw-r--r--kernel/bpf/bpf_local_storage.c2
-rw-r--r--kernel/bpf/btf.c15
-rw-r--r--kernel/bpf/core.c18
-rw-r--r--kernel/bpf/hashtab.c15
-rw-r--r--kernel/bpf/inode.c4
-rw-r--r--kernel/bpf/stackmap.c5
-rw-r--r--kernel/bpf/syscall.c87
-rw-r--r--kernel/bpf/task_iter.c15
-rw-r--r--kernel/bpf/verifier.c540
-rw-r--r--kernel/entry/common.c41
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/gcov/Kconfig1
-rw-r--r--kernel/gcov/gcc_4_7.c4
-rw-r--r--kernel/kprobes.c36
-rw-r--r--kernel/locking/lockdep.c35
-rw-r--r--kernel/locking/lockdep_internals.h2
-rw-r--r--kernel/locking/percpu-rwsem.c4
-rw-r--r--kernel/padata.c5
-rw-r--r--kernel/rcu/tasks.h2
-rw-r--r--kernel/seccomp.c24
-rw-r--r--kernel/stackleak.c2
-rw-r--r--kernel/sysctl.c8
-rw-r--r--kernel/trace/bpf_trace.c23
-rw-r--r--kernel/trace/ftrace.c6
-rw-r--r--kernel/trace/trace.c38
-rw-r--r--kernel/trace/trace_events_hist.c1
-rw-r--r--kernel/trace/trace_output.c12
-rw-r--r--kernel/trace/trace_preemptirq.c4
-rw-r--r--lib/Kconfig.debug4
-rw-r--r--lib/dynamic_debug.c82
-rw-r--r--lib/kobject.c9
-rw-r--r--lib/test_firmware.c9
-rw-r--r--lib/test_rhashtable.c2
-rw-r--r--mm/filemap.c160
-rw-r--r--mm/huge_memory.c42
-rw-r--r--mm/hugetlb.c49
-rw-r--r--mm/khugepaged.c2
-rw-r--r--mm/ksm.c4
-rw-r--r--mm/madvise.c2
-rw-r--r--mm/memcontrol.c6
-rw-r--r--mm/memory.c37
-rw-r--r--mm/memory_hotplug.c14
-rw-r--r--mm/memremap.c2
-rw-r--r--mm/migrate.c32
-rw-r--r--mm/mlock.c24
-rw-r--r--mm/page_isolation.c8
-rw-r--r--mm/percpu.c2
-rw-r--r--mm/rmap.c9
-rw-r--r--mm/shmem.c10
-rw-r--r--mm/slub.c12
-rw-r--r--mm/swap.c6
-rw-r--r--mm/vmscan.c18
-rw-r--r--net/atm/lec.c2
-rw-r--r--net/atm/signaling.c2
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c145
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h4
-rw-r--r--net/batman-adv/multicast.c60
-rw-r--r--net/batman-adv/multicast.h15
-rw-r--r--net/batman-adv/routing.c4
-rw-r--r--net/batman-adv/soft-interface.c11
-rw-r--r--net/bluetooth/hci_conn.c2
-rw-r--r--net/bluetooth/hci_core.c2
-rw-r--r--net/bridge/br.c5
-rw-r--r--net/bridge/br_forward.c17
-rw-r--r--net/bridge/br_ioctl.c2
-rw-r--r--net/bridge/br_mdb.c371
-rw-r--r--net/bridge/br_multicast.c680
-rw-r--r--net/bridge/br_private.h49
-rw-r--r--net/bridge/br_vlan.c29
-rw-r--r--net/can/af_can.c4
-rw-r--r--net/can/bcm.c2
-rw-r--r--net/can/gw.c2
-rw-r--r--net/can/proc.c2
-rw-r--r--net/can/raw.c26
-rw-r--r--net/core/bpf_sk_storage.c40
-rw-r--r--net/core/dev.c83
-rw-r--r--net/core/devlink.c101
-rw-r--r--net/core/dst.c2
-rw-r--r--net/core/fib_rules.c2
-rw-r--r--net/core/filter.c68
-rw-r--r--net/core/net-procfs.c15
-rw-r--r--net/core/net_namespace.c22
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/core/sock.c7
-rw-r--r--net/core/sock_map.c284
-rw-r--r--net/dcb/dcbnl.c8
-rw-r--r--net/dsa/dsa.c28
-rw-r--r--net/dsa/dsa2.c19
-rw-r--r--net/dsa/dsa_priv.h66
-rw-r--r--net/dsa/port.c58
-rw-r--r--net/dsa/slave.c174
-rw-r--r--net/dsa/switch.c41
-rw-r--r--net/dsa/tag_8021q.c20
-rw-r--r--net/dsa/tag_brcm.c15
-rw-r--r--net/dsa/tag_ocelot.c13
-rw-r--r--net/dsa/tag_sja1105.c21
-rw-r--r--net/ethtool/common.c2
-rw-r--r--net/ethtool/linkmodes.c2
-rw-r--r--net/ethtool/pause.c63
-rw-r--r--net/ethtool/tunnels.c4
-rw-r--r--net/hsr/hsr_debugfs.c21
-rw-r--r--net/hsr/hsr_netlink.c6
-rw-r--r--net/ipv4/bpf_tcp_ca.c19
-rw-r--r--net/ipv4/fib_frontend.c1
-rw-r--r--net/ipv4/inet_diag.c20
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/ip_tunnel_core.c1
-rw-r--r--net/ipv4/nexthop.c17
-rw-r--r--net/ipv4/route.c14
-rw-r--r--net/ipv4/tcp.c29
-rw-r--r--net/ipv4/tcp_cong.c27
-rw-r--r--net/ipv4/tcp_input.c31
-rw-r--r--net/ipv4/tcp_output.c1
-rw-r--r--net/ipv6/Kconfig1
-rw-r--r--net/ipv6/ip6_fib.c13
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/l2tp/l2tp_core.c11
-rw-r--r--net/mac80211/airtime.c20
-rw-r--r--net/mac80211/cfg.c96
-rw-r--r--net/mac80211/debugfs.c1
-rw-r--r--net/mac80211/driver-ops.h29
-rw-r--r--net/mac80211/ieee80211_i.h19
-rw-r--r--net/mac80211/iface.c997
-rw-r--r--net/mac80211/key.c15
-rw-r--r--net/mac80211/mesh_hwmp.c4
-rw-r--r--net/mac80211/mesh_ps.c2
-rw-r--r--net/mac80211/mlme.c59
-rw-r--r--net/mac80211/rx.c14
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/mac80211/status.c213
-rw-r--r--net/mac80211/trace.h33
-rw-r--r--net/mac80211/tx.c170
-rw-r--r--net/mac80211/util.c7
-rw-r--r--net/mac80211/vht.c12
-rw-r--r--net/mac802154/tx.c8
-rw-r--r--net/mptcp/mib.c9
-rw-r--r--net/mptcp/mib.h9
-rw-r--r--net/mptcp/options.c84
-rw-r--r--net/mptcp/pm.c91
-rw-r--r--net/mptcp/pm_netlink.c334
-rw-r--r--net/mptcp/protocol.c540
-rw-r--r--net/mptcp/protocol.h60
-rw-r--r--net/mptcp/subflow.c107
-rw-r--r--net/netfilter/nf_conntrack_netlink.c22
-rw-r--r--net/netfilter/nf_conntrack_proto.c2
-rw-r--r--net/netfilter/nf_tables_api.c70
-rw-r--r--net/netfilter/nft_meta.c4
-rw-r--r--net/netlink/af_netlink.c8
-rw-r--r--net/openvswitch/conntrack.c4
-rw-r--r--net/packet/af_packet.c40
-rw-r--r--net/qrtr/qrtr.c21
-rw-r--r--net/rds/cong.c2
-rw-r--r--net/rds/ib_cm.c2
-rw-r--r--net/rds/rdma.c2
-rw-r--r--net/rxrpc/ar-internal.h2
-rw-r--r--net/rxrpc/conn_client.c7
-rw-r--r--net/sched/act_ife.c44
-rw-r--r--net/sched/act_tunnel_key.c1
-rw-r--r--net/sched/cls_flower.c5
-rw-r--r--net/sched/sch_generic.c48
-rw-r--r--net/sched/sch_taprio.c28
-rw-r--r--net/sctp/socket.c9
-rw-r--r--net/smc/af_smc.c4
-rw-r--r--net/smc/smc_close.c7
-rw-r--r--net/sunrpc/rpcb_clnt.c4
-rw-r--r--net/sunrpc/xprtrdma/verbs.c2
-rw-r--r--net/tipc/crypto.c981
-rw-r--r--net/tipc/crypto.h43
-rw-r--r--net/tipc/group.c14
-rw-r--r--net/tipc/link.c8
-rw-r--r--net/tipc/msg.c5
-rw-r--r--net/tipc/msg.h8
-rw-r--r--net/tipc/netlink.c2
-rw-r--r--net/tipc/node.c94
-rw-r--r--net/tipc/node.h2
-rw-r--r--net/tipc/socket.c7
-rw-r--r--net/tipc/sysctl.c9
-rw-r--r--net/tipc/topsrv.c1
-rw-r--r--net/tipc/udp_media.c1
-rw-r--r--net/unix/af_unix.c1
-rw-r--r--net/wireless/Kconfig1
-rw-r--r--net/wireless/chan.c130
-rw-r--r--net/wireless/lib80211.c2
-rw-r--r--net/wireless/nl80211.c127
-rw-r--r--net/wireless/reg.c70
-rw-r--r--net/wireless/util.c34
-rw-r--r--net/xdp/xdp_umem.c17
-rw-r--r--net/xdp/xsk.c7
-rw-r--r--net/xdp/xsk.h1
-rw-r--r--net/xdp/xsk_buff_pool.c6
-rw-r--r--net/xdp/xsk_diag.c14
-rw-r--r--net/xdp/xskmap.c5
-rw-r--r--samples/bpf/.gitignore1
-rw-r--r--samples/bpf/sockex3_user.c6
-rw-r--r--samples/bpf/spintest_user.c6
-rw-r--r--samples/bpf/test_map_in_map_kern.c7
-rw-r--r--samples/bpf/tracex5_user.c6
-rw-r--r--samples/bpf/xdp_redirect_cpu_user.c2
-rw-r--r--samples/bpf/xdpsock_user.c28
-rwxr-xr-xscripts/checkpatch.pl4
-rw-r--r--scripts/kconfig/lkc.h47
-rw-r--r--scripts/kconfig/lkc_proto.h14
-rw-r--r--scripts/kconfig/qconf.cc13
-rwxr-xr-xscripts/link-vmlinux.sh6
-rwxr-xr-xscripts/tags.sh8
-rw-r--r--security/device_cgroup.c3
-rw-r--r--sound/pci/hda/patch_realtek.c78
-rw-r--r--sound/soc/codecs/max98373-sdw.c4
-rw-r--r--sound/soc/codecs/pcm3168a.c7
-rw-r--r--sound/soc/codecs/rt1308-sdw.c4
-rw-r--r--sound/soc/codecs/rt700-sdw.c4
-rw-r--r--sound/soc/codecs/rt711-sdw.c4
-rw-r--r--sound/soc/codecs/rt715-sdw.c4
-rw-r--r--sound/soc/codecs/tlv320adcx140.c28
-rw-r--r--sound/soc/codecs/wm8994.c10
-rw-r--r--sound/soc/codecs/wm_hubs.c3
-rw-r--r--sound/soc/codecs/wm_hubs.h1
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c11
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c10
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_generic.c2
-rw-r--r--sound/soc/intel/boards/sof_maxim_common.c7
-rw-r--r--sound/soc/intel/haswell/sst-haswell-dsp.c185
-rw-r--r--sound/soc/meson/axg-toddr.c24
-rw-r--r--sound/soc/qcom/apq8016_sbc.c1
-rw-r--r--sound/soc/qcom/apq8096.c1
-rw-r--r--sound/soc/qcom/common.c6
-rw-r--r--sound/soc/qcom/sdm845.c1
-rw-r--r--sound/soc/qcom/storm.c1
-rw-r--r--sound/soc/soc-core.c13
-rw-r--r--sound/soc/soc-dai.c4
-rw-r--r--sound/soc/soc-pcm.c2
-rw-r--r--sound/soc/ti/ams-delta.c4
-rw-r--r--tools/bpf/Makefile4
-rw-r--r--tools/bpf/bpftool/Documentation/Makefile15
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-btf.rst37
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst33
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-feature.rst33
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-gen.rst37
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-iter.rst27
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-link.rst36
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst46
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-net.rst34
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-perf.rst34
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst34
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst35
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst34
-rw-r--r--tools/bpf/bpftool/Documentation/common_options.rst22
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool22
-rw-r--r--tools/bpf/bpftool/json_writer.c6
-rw-r--r--tools/bpf/bpftool/json_writer.h3
-rw-r--r--tools/bpf/bpftool/main.c33
-rw-r--r--tools/bpf/bpftool/map.c149
-rw-r--r--tools/bpf/bpftool/prog.c203
-rw-r--r--tools/bpf/resolve_btfids/Makefile3
-rw-r--r--tools/include/linux/btf_ids.h8
-rw-r--r--tools/include/uapi/linux/bpf.h98
-rw-r--r--tools/include/uapi/linux/in.h2
-rw-r--r--tools/include/uapi/linux/kvm.h6
-rw-r--r--tools/io_uring/io_uring-bench.c4
-rw-r--r--tools/lib/bpf/Makefile4
-rw-r--r--tools/lib/bpf/bpf.c16
-rw-r--r--tools/lib/bpf/bpf.h8
-rw-r--r--tools/lib/bpf/btf.h18
-rw-r--r--tools/lib/bpf/libbpf.c1358
-rw-r--r--tools/lib/bpf/libbpf.h5
-rw-r--r--tools/lib/bpf/libbpf.map2
-rw-r--r--tools/lib/bpf/libbpf_common.h2
-rw-r--r--tools/lib/bpf/xsk.c1
-rw-r--r--tools/objtool/check.c2
-rw-r--r--tools/perf/bench/sched-messaging.c4
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen1/core.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen2/core.json2
-rw-r--r--tools/perf/tests/attr/README1
-rw-r--r--tools/perf/tests/attr/test-record-group229
-rw-r--r--tools/perf/tests/bp_signal.c5
-rw-r--r--tools/perf/tests/parse-metric.c14
-rw-r--r--tools/perf/tests/pmu-events.c5
-rw-r--r--tools/perf/tests/pmu.c1
-rw-r--r--tools/perf/util/bpf-loader.c12
-rw-r--r--tools/perf/util/evlist.c11
-rw-r--r--tools/perf/util/evsel.c10
-rw-r--r--tools/perf/util/metricgroup.c35
-rw-r--r--tools/perf/util/parse-events.c2
-rw-r--r--tools/perf/util/pmu.c13
-rw-r--r--tools/perf/util/pmu.h2
-rw-r--r--tools/perf/util/record.c34
-rw-r--r--tools/perf/util/stat-shadow.c28
-rw-r--r--tools/testing/selftests/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/Makefile10
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.h8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c21
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c (renamed from tools/testing/selftests/bpf/test_btf.c)410
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cls_redirect.c72
-rw-r--r--tools/testing/selftests/bpf/prog_tests/d_path.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c21
-rw-r--r--tools/testing/selftests/bpf/prog_tests/global_data_init.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ksyms.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/l4lb_all.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/metadata.c141
-rw-r--r--tools/testing/selftests/bpf/prog_tests/reference_tracking.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_assign.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_basic.c89
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt_sk.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/subprogs.c31
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tailcalls.c332
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_noinline.c49
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter.h9
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c15
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c43
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_sockmap.h3
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_file.c10
-rw-r--r--tools/testing/selftests/bpf/progs/map_ptr_kern.c10
-rw-r--r--tools/testing/selftests/bpf/progs/metadata_unused.c15
-rw-r--r--tools/testing/selftests/bpf/progs/metadata_used.c15
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf.h11
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf_subprogs.c5
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta.h30
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta_subprogs.c10
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c38
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c41
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c61
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c61
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect.c105
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect_subprogs.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_d_path.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb_noinline.c41
-rw-r--r--tools/testing/selftests/bpf/progs/test_subprogs.c103
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_loop1.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_loop2.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_prog.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_noinline.c36
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_build.sh21
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_metadata.sh82
-rw-r--r--tools/testing/selftests/bpf/test_socket_cookie.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c6
-rw-r--r--tools/testing/selftests/bpf/verifier/map_ptr.c32
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/ethtool-pause.sh108
-rwxr-xr-xtools/testing/selftests/net/fib_nexthops.sh14
-rw-r--r--tools/testing/selftests/net/mptcp/Makefile3
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c18
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh4
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh193
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh293
-rw-r--r--tools/testing/selftests/net/nettest.c2
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh47
-rw-r--r--tools/testing/selftests/powerpc/mm/prot_sao.c9
-rw-r--r--tools/testing/selftests/timers/Makefile1
-rw-r--r--tools/testing/selftests/timers/settings1
-rw-r--r--tools/testing/selftests/vm/map_hugetlb.c2
-rw-r--r--virt/kvm/kvm_main.c21
1217 files changed, 39199 insertions, 13697 deletions
diff --git a/.clang-format b/.clang-format
index a0a96088c74f..badfc1ba440a 100644
--- a/.clang-format
+++ b/.clang-format
@@ -111,6 +111,7 @@ ForEachMacros:
- 'css_for_each_descendant_pre'
- 'device_for_each_child_node'
- 'dma_fence_chain_for_each'
+ - 'do_for_each_ftrace_op'
- 'drm_atomic_crtc_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane_state'
@@ -136,6 +137,7 @@ ForEachMacros:
- 'for_each_active_dev_scope'
- 'for_each_active_drhd_unit'
- 'for_each_active_iommu'
+ - 'for_each_aggr_pgid'
- 'for_each_available_child_of_node'
- 'for_each_bio'
- 'for_each_board_func_rsrc'
@@ -234,6 +236,7 @@ ForEachMacros:
- 'for_each_node_state'
- 'for_each_node_with_cpus'
- 'for_each_node_with_property'
+ - 'for_each_nonreserved_multicast_dest_pgid'
- 'for_each_of_allnodes'
- 'for_each_of_allnodes_from'
- 'for_each_of_cpu_node'
@@ -256,6 +259,7 @@ ForEachMacros:
- 'for_each_pci_dev'
- 'for_each_pci_msi_entry'
- 'for_each_pcm_streams'
+ - 'for_each_physmem_range'
- 'for_each_populated_zone'
- 'for_each_possible_cpu'
- 'for_each_present_cpu'
@@ -265,6 +269,8 @@ ForEachMacros:
- 'for_each_process_thread'
- 'for_each_property_of_node'
- 'for_each_registered_fb'
+ - 'for_each_requested_gpio'
+ - 'for_each_requested_gpio_in_range'
- 'for_each_reserved_mem_region'
- 'for_each_rtd_codec_dais'
- 'for_each_rtd_codec_dais_rollback'
@@ -278,12 +284,17 @@ ForEachMacros:
- 'for_each_sg'
- 'for_each_sg_dma_page'
- 'for_each_sg_page'
+ - 'for_each_sgtable_dma_page'
+ - 'for_each_sgtable_dma_sg'
+ - 'for_each_sgtable_page'
+ - 'for_each_sgtable_sg'
- 'for_each_sibling_event'
- 'for_each_subelement'
- 'for_each_subelement_extid'
- 'for_each_subelement_id'
- '__for_each_thread'
- 'for_each_thread'
+ - 'for_each_unicast_dest_pgid'
- 'for_each_wakeup_source'
- 'for_each_zone'
- 'for_each_zone_zonelist'
@@ -464,6 +475,7 @@ ForEachMacros:
- 'v4l2_m2m_for_each_src_buf'
- 'v4l2_m2m_for_each_src_buf_safe'
- 'virtio_device_for_each_vq'
+ - 'while_for_each_ftrace_op'
- 'xa_for_each'
- 'xa_for_each_marked'
- 'xa_for_each_range'
diff --git a/.mailmap b/.mailmap
index 332c7833057f..a780211468e4 100644
--- a/.mailmap
+++ b/.mailmap
@@ -169,6 +169,10 @@ Juha Yrjola <juha.yrjola@solidboot.com>
Julien Thierry <julien.thierry.kdev@gmail.com> <julien.thierry@arm.com>
Kamil Konieczny <k.konieczny@samsung.com> <k.konieczny@partner.samsung.com>
Kay Sievers <kay.sievers@vrfy.org>
+Kees Cook <keescook@chromium.org> <kees.cook@canonical.com>
+Kees Cook <keescook@chromium.org> <keescook@google.com>
+Kees Cook <keescook@chromium.org> <kees@outflux.net>
+Kees Cook <keescook@chromium.org> <kees@ubuntu.com>
Kenneth W Chen <kenneth.w.chen@intel.com>
Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
@@ -308,6 +312,7 @@ Tony Luck <tony.luck@intel.com>
TripleX Chung <xxx.phy@gmail.com> <triplex@zh-kernel.org>
TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn>
Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
+Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws>
Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
Uwe Kleine-König <ukl@pengutronix.de>
Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
diff --git a/Documentation/admin-guide/dynamic-debug-howto.rst b/Documentation/admin-guide/dynamic-debug-howto.rst
index e5a8def45f3f..6c04aea8f4cd 100644
--- a/Documentation/admin-guide/dynamic-debug-howto.rst
+++ b/Documentation/admin-guide/dynamic-debug-howto.rst
@@ -156,7 +156,6 @@ against. Possible keywords are:::
``line-range`` cannot contain space, e.g.
"1-30" is valid range but "1 - 30" is not.
- ``module=foo`` combined keyword=value form is interchangably accepted
The meanings of each keyword are:
diff --git a/Documentation/bpf/ringbuf.rst b/Documentation/bpf/ringbuf.rst
index 75f943f0009d..6a615cd62bda 100644
--- a/Documentation/bpf/ringbuf.rst
+++ b/Documentation/bpf/ringbuf.rst
@@ -182,9 +182,6 @@ in the order of reservations, but only after all previous records where
already committed. It is thus possible for slow producers to temporarily hold
off submitted records, that were reserved later.
-Reservation/commit/consumer protocol is verified by litmus tests in
-Documentation/litmus_tests/bpf-rb/_.
-
One interesting implementation bit, that significantly simplifies (and thus
speeds up as well) implementation of both producers and consumers is how data
area is mapped twice contiguously back-to-back in the virtual memory. This
@@ -200,7 +197,7 @@ a self-pacing notifications of new data being availability.
being available after commit only if consumer has already caught up right up to
the record being committed. If not, consumer still has to catch up and thus
will see new data anyways without needing an extra poll notification.
-Benchmarks (see tools/testing/selftests/bpf/benchs/bench_ringbuf.c_) show that
+Benchmarks (see tools/testing/selftests/bpf/benchs/bench_ringbufs.c) show that
this allows to achieve a very high throughput without having to resort to
tricks like "notify only every Nth sample", which are necessary with perf
buffer. For extreme cases, when BPF program wants more manual control of
diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt b/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
index 5a0111d4de58..381f8fb3e865 100644
--- a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
+++ b/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
@@ -12,6 +12,9 @@ Required properties:
Optional properties:
- vdd-supply: Regulator that powers the CAN controller.
- xceiver-supply: Regulator that powers the CAN transceiver.
+ - gpio-controller: Indicates this device is a GPIO controller.
+ - #gpio-cells: Should be two. The first cell is the pin number and
+ the second cell is used to specify the gpio polarity.
Example:
can0: can@1 {
@@ -19,7 +22,9 @@ Example:
reg = <1>;
clocks = <&clk24m>;
interrupt-parent = <&gpio4>;
- interrupts = <13 0x2>;
+ interrupts = <13 IRQ_TYPE_LEVEL_LOW>;
vdd-supply = <&reg5v0>;
xceiver-supply = <&reg5v0>;
+ gpio-controller;
+ #gpio-cells = <2>;
};
diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp25xxfd.yaml b/Documentation/devicetree/bindings/net/can/microchip,mcp25xxfd.yaml
new file mode 100644
index 000000000000..aa2cad14d6d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/microchip,mcp25xxfd.yaml
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/can/microchip,mcp25xxfd.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title:
+ Microchip MCP2517FD and MCP2518FD stand-alone CAN controller device tree
+ bindings
+
+maintainers:
+ - Marc Kleine-Budde <mkl@pengutronix.de>
+
+properties:
+ compatible:
+ oneOf:
+ - const: microchip,mcp2517fd
+ description: for MCP2517FD
+ - const: microchip,mcp2518fd
+ description: for MCP2518FD
+ - const: microchip,mcp25xxfd
+ description: to autodetect chip variant
+
+ reg:
+ maxItems: 1
+
+ interrupts-extended:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ vdd-supply:
+ description: Regulator that powers the CAN controller.
+ maxItems: 1
+
+ xceiver-supply:
+ description: Regulator that powers the CAN transceiver.
+ maxItems: 1
+
+ microchip,rx-int-gpios:
+ description:
+ GPIO phandle of GPIO connected to to INT1 pin of the MCP25XXFD, which
+ signals a pending RX interrupt.
+ maxItems: 1
+
+ spi-max-frequency:
+ description:
+ Must be half or less of "clocks" frequency.
+ maximum: 20000000
+
+required:
+ - compatible
+ - reg
+ - interrupts-extended
+ - clocks
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ spi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ can@0 {
+ compatible = "microchip,mcp25xxfd";
+ reg = <0>;
+ clocks = <&can0_osc>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&can0_pins>;
+ spi-max-frequency = <20000000>;
+ interrupts-extended = <&gpio 13 IRQ_TYPE_LEVEL_LOW>;
+ microchip,rx-int-gpios = <&gpio 27 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&reg5v0>;
+ xceiver-supply = <&reg5v0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/dsa/mt7530.txt b/Documentation/devicetree/bindings/net/dsa/mt7530.txt
index c5ed5d25f642..560369efad6c 100644
--- a/Documentation/devicetree/bindings/net/dsa/mt7530.txt
+++ b/Documentation/devicetree/bindings/net/dsa/mt7530.txt
@@ -5,6 +5,7 @@ Required properties:
- compatible: may be compatible = "mediatek,mt7530"
or compatible = "mediatek,mt7621"
+ or compatible = "mediatek,mt7531"
- #address-cells: Must be 1.
- #size-cells: Must be 0.
- mediatek,mcm: Boolean; if defined, indicates that either MT7530 is the part
@@ -32,10 +33,14 @@ Required properties for the child nodes within ports container:
- reg: Port address described must be 6 for CPU port and from 0 to 5 for
user ports.
-- phy-mode: String, must be either "trgmii" or "rgmii" for port labeled
- "cpu".
-
-Port 5 of the switch is muxed between:
+- phy-mode: String, the following values are acceptable for port labeled
+ "cpu":
+ If compatible mediatek,mt7530 or mediatek,mt7621 is set,
+ must be either "trgmii" or "rgmii"
+ If compatible mediatek,mt7531 is set,
+ must be either "sgmii", "1000base-x" or "2500base-x"
+
+Port 5 of mt7530 and mt7621 switch is muxed between:
1. GMAC5: GMAC5 can interface with another external MAC or PHY.
2. PHY of port 0 or port 4: PHY interfaces with an external MAC like 2nd GMAC
of the SOC. Used in many setups where port 0/4 becomes the WAN port.
diff --git a/Documentation/devicetree/bindings/net/marvell,prestera.txt b/Documentation/devicetree/bindings/net/marvell,prestera.txt
index 83370ebf5b89..e28938ddfdf5 100644
--- a/Documentation/devicetree/bindings/net/marvell,prestera.txt
+++ b/Documentation/devicetree/bindings/net/marvell,prestera.txt
@@ -45,3 +45,37 @@ dfx-server {
ranges = <0 MBUS_ID(0x08, 0x00) 0 0x100000>;
reg = <MBUS_ID(0x08, 0x00) 0 0x100000>;
};
+
+Marvell Prestera SwitchDev bindings
+-----------------------------------
+Optional properties:
+- compatible: must be "marvell,prestera"
+- base-mac-provider: describes handle to node which provides base mac address,
+ might be a static base mac address or nvme cell provider.
+
+Example:
+
+eeprom_mac_addr: eeprom-mac-addr {
+ compatible = "eeprom,mac-addr-cell";
+ status = "okay";
+
+ nvmem = <&eeprom_at24>;
+};
+
+prestera {
+ compatible = "marvell,prestera";
+ status = "okay";
+
+ base-mac-provider = <&eeprom_mac_addr>;
+};
+
+The current implementation of Prestera Switchdev PCI interface driver requires
+that BAR2 is assigned to 0xf6000000 as base address from the PCI IO range:
+
+&cp0_pcie0 {
+ ranges = <0x81000000 0x0 0xfb000000 0x0 0xfb000000 0x0 0xf0000
+ 0x82000000 0x0 0xf6000000 0x0 0xf6000000 0x0 0x2000000
+ 0x82000000 0x0 0xf9000000 0x0 0xf9000000 0x0 0x100000>;
+ phys = <&cp0_comphy0 0>;
+ status = "okay";
+};
diff --git a/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt b/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
index d48f9eb3636e..743eda754e65 100644
--- a/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
+++ b/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
@@ -18,6 +18,8 @@ Clock Properties:
- fsl,tmr-add Frequency compensation value.
- fsl,tmr-fiper1 Fixed interval period pulse generator.
- fsl,tmr-fiper2 Fixed interval period pulse generator.
+ - fsl,tmr-fiper3 Fixed interval period pulse generator.
+ Supported only on DPAA2 and ENETC hardware.
- fsl,max-adj Maximum frequency adjustment in parts per billion.
- fsl,extts-fifo The presence of this property indicates hardware
support for the external trigger stamp FIFO.
diff --git a/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt b/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt
index f5e518d099f2..62d4ed2d7fd7 100644
--- a/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt
+++ b/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt
@@ -23,8 +23,8 @@ Required properties:
- compatible:
Must be one of :
- "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-qspi" : MSPI+BSPI on BRCMSTB SoCs
- "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI
+ "brcm,spi-brcmstb-qspi", "brcm,spi-bcm-qspi" : MSPI+BSPI on BRCMSTB SoCs
+ "brcm,spi-brcmstb-mspi", "brcm,spi-bcm-qspi" : Second Instance of MSPI
BRCMSTB SoCs
"brcm,spi-bcm7425-qspi", "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI
BRCMSTB SoCs
@@ -36,8 +36,8 @@ Required properties:
BRCMSTB SoCs
"brcm,spi-bcm7278-qspi", "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI
BRCMSTB SoCs
- "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi" : MSPI+BSPI on Cygnus, NSP
- "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi" : NS2 SoCs
+ "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi" : MSPI+BSPI on Cygnus, NSP
+ "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi" : NS2 SoCs
- reg:
Define the bases and ranges of the associated I/O address spaces.
@@ -86,7 +86,7 @@ BRCMSTB SoC Example:
spi@f03e3400 {
#address-cells = <0x1>;
#size-cells = <0x0>;
- compatible = "brcm,spi-brcmstb-qspi", "brcm,spi-brcmstb-qspi";
+ compatible = "brcm,spi-brcmstb-qspi", "brcm,spi-bcm-qspi";
reg = <0xf03e0920 0x4 0xf03e3400 0x188 0xf03e3200 0x50>;
reg-names = "cs_reg", "mspi", "bspi";
interrupts = <0x6 0x5 0x4 0x3 0x2 0x1 0x0>;
@@ -149,7 +149,7 @@ BRCMSTB SoC Example:
#address-cells = <1>;
#size-cells = <0>;
clocks = <&upg_fixed>;
- compatible = "brcm,spi-brcmstb-qspi", "brcm,spi-brcmstb-mspi";
+ compatible = "brcm,spi-brcmstb-mspi", "brcm,spi-bcm-qspi";
reg = <0xf0416000 0x180>;
reg-names = "mspi";
interrupts = <0x14>;
@@ -160,7 +160,7 @@ BRCMSTB SoC Example:
iProc SoC Example:
qspi: spi@18027200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
+ compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
reg = <0x18027200 0x184>,
<0x18027000 0x124>,
<0x1811c408 0x004>,
@@ -191,7 +191,7 @@ iProc SoC Example:
NS2 SoC Example:
qspi: spi@66470200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi";
+ compatible = "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi";
reg = <0x66470200 0x184>,
<0x66470000 0x124>,
<0x67017408 0x004>,
diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst
index 100bfd227265..13ea0cc0a3fa 100644
--- a/Documentation/driver-api/dma-buf.rst
+++ b/Documentation/driver-api/dma-buf.rst
@@ -179,7 +179,7 @@ DMA Fence uABI/Sync File
:internal:
Indefinite DMA Fences
-~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~
At various times &dma_fence with an indefinite time until dma_fence_wait()
finishes have been proposed. Examples include:
diff --git a/Documentation/features/debug/debug-vm-pgtable/arch-support.txt b/Documentation/features/debug/debug-vm-pgtable/arch-support.txt
index 53da483c8326..1c49723e7534 100644
--- a/Documentation/features/debug/debug-vm-pgtable/arch-support.txt
+++ b/Documentation/features/debug/debug-vm-pgtable/arch-support.txt
@@ -22,7 +22,7 @@
| nios2: | TODO |
| openrisc: | TODO |
| parisc: | TODO |
- | powerpc: | ok |
+ | powerpc: | TODO |
| riscv: | ok |
| s390: | ok |
| sh: | TODO |
diff --git a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
index 11af6388ea87..3561a8a29fd2 100644
--- a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
+++ b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
@@ -39,16 +39,6 @@ debug logs.
Some of the ENA devices support a working mode called Low-latency
Queue (LLQ), which saves several more microseconds.
-Supported PCI vendor ID/device IDs
-==================================
-
-========= =======================
-1d0f:0ec2 ENA PF
-1d0f:1ec2 ENA PF with LLQ support
-1d0f:ec20 ENA VF
-1d0f:ec21 ENA VF with LLQ support
-========= =======================
-
ENA Source Code Directory Structure
===================================
@@ -212,20 +202,11 @@ In adaptive interrupt moderation mode the interrupt delay value is
updated by the driver dynamically and adjusted every NAPI cycle
according to the traffic nature.
-By default ENA driver applies adaptive coalescing on Rx traffic and
-conventional coalescing on Tx traffic.
-
Adaptive coalescing can be switched on/off through ethtool(8)
adaptive_rx on|off parameter.
-The driver chooses interrupt delay value according to the number of
-bytes and packets received between interrupt unmasking and interrupt
-posting. The driver uses interrupt delay table that subdivides the
-range of received bytes/packets into 5 levels and assigns interrupt
-delay value to each level.
-
-The user can enable/disable adaptive moderation, modify the interrupt
-delay table and restore its default values through sysfs.
+More information about Adaptive Interrupt Moderation (DIM) can be found in
+Documentation/networking/net_dim.rst
RX copybreak
============
@@ -274,7 +255,7 @@ RSS
inputs for hash functions.
- The driver configures RSS settings using the AQ SetFeature command
(ENA_ADMIN_RSS_HASH_FUNCTION, ENA_ADMIN_RSS_HASH_INPUT and
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG properties).
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG properties).
- If the NETIF_F_RXHASH flag is set, the 32-bit result of the hash
function delivered in the Rx CQ descriptor is set in the received
SKB.
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index d53bcb31645a..30b98245979f 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -68,6 +68,7 @@ the flags may not apply to requests. Recognized flags are:
================================= ===================================
``ETHTOOL_FLAG_COMPACT_BITSETS`` use compact format bitsets in reply
``ETHTOOL_FLAG_OMIT_REPLY`` omit optional reply (_SET and _ACT)
+ ``ETHTOOL_FLAG_STATS`` include optional device statistics
================================= ===================================
New request flags should follow the general idea that if the flag is not set,
@@ -206,6 +207,7 @@ Userspace to kernel:
``ETHTOOL_MSG_TSINFO_GET`` get timestamping info
``ETHTOOL_MSG_CABLE_TEST_ACT`` action start cable test
``ETHTOOL_MSG_CABLE_TEST_TDR_ACT`` action start raw TDR cable test
+ ``ETHTOOL_MSG_TUNNEL_INFO_GET`` get tunnel offload info
===================================== ================================
Kernel to userspace:
@@ -239,6 +241,7 @@ Kernel to userspace:
``ETHTOOL_MSG_TSINFO_GET_REPLY`` timestamping info
``ETHTOOL_MSG_CABLE_TEST_NTF`` Cable test results
``ETHTOOL_MSG_CABLE_TEST_TDR_NTF`` Cable test TDR results
+ ``ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY`` tunnel offload info
===================================== =================================
``GET`` requests are sent by userspace applications to retrieve device
@@ -989,8 +992,18 @@ Kernel response contents:
``ETHTOOL_A_PAUSE_AUTONEG`` bool pause autonegotiation
``ETHTOOL_A_PAUSE_RX`` bool receive pause frames
``ETHTOOL_A_PAUSE_TX`` bool transmit pause frames
+ ``ETHTOOL_A_PAUSE_STATS`` nested pause statistics
===================================== ====== ==========================
+``ETHTOOL_A_PAUSE_STATS`` are reported if ``ETHTOOL_FLAG_STATS`` was set
+in ``ETHTOOL_A_HEADER_FLAGS``.
+It will be empty if driver did not report any statistics. Drivers fill in
+the statistics in the following structure:
+
+.. kernel-doc:: include/linux/ethtool.h
+ :identifiers: ethtool_pause_stats
+
+Each member has a corresponding attribute defined.
PAUSE_SET
============
@@ -1363,4 +1376,5 @@ are netlink only.
``ETHTOOL_SFECPARAM`` n/a
n/a ''ETHTOOL_MSG_CABLE_TEST_ACT''
n/a ''ETHTOOL_MSG_CABLE_TEST_TDR_ACT''
+ n/a ``ETHTOOL_MSG_TUNNEL_INFO_GET``
=================================== =====================================
diff --git a/Documentation/networking/kapi.rst b/Documentation/networking/kapi.rst
index f03ae64be8bc..d198fa5eaacd 100644
--- a/Documentation/networking/kapi.rst
+++ b/Documentation/networking/kapi.rst
@@ -134,6 +134,15 @@ PHY Support
.. kernel-doc:: drivers/net/phy/phy.c
:internal:
+.. kernel-doc:: drivers/net/phy/phy-core.c
+ :export:
+
+.. kernel-doc:: drivers/net/phy/phy-c45.c
+ :export:
+
+.. kernel-doc:: include/linux/phy.h
+ :internal:
+
.. kernel-doc:: drivers/net/phy/phy_device.c
:export:
diff --git a/Documentation/networking/statistics.rst b/Documentation/networking/statistics.rst
index d490b535cd14..8e15bc98830b 100644
--- a/Documentation/networking/statistics.rst
+++ b/Documentation/networking/statistics.rst
@@ -4,16 +4,23 @@
Interface statistics
====================
+Overview
+========
+
This document is a guide to Linux network interface statistics.
-There are two main sources of interface statistics in Linux:
+There are three main sources of interface statistics in Linux:
- standard interface statistics based on
- :c:type:`struct rtnl_link_stats64 <rtnl_link_stats64>`; and
+ :c:type:`struct rtnl_link_stats64 <rtnl_link_stats64>`;
+ - protocol-specific statistics; and
- driver-defined statistics available via ethtool.
-There are multiple interfaces to reach the former. Most commonly used
-is the `ip` command from `iproute2`::
+Standard interface statistics
+-----------------------------
+
+There are multiple interfaces to reach the standard statistics.
+Most commonly used is the `ip` command from `iproute2`::
$ ip -s -s link show dev ens4u1u1
6: ens4u1u1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000
@@ -34,7 +41,26 @@ If `-s` is specified once the detailed errors won't be shown.
`ip` supports JSON formatting via the `-j` option.
-Ethtool statistics can be dumped using `ethtool -S $ifc`, e.g.::
+Protocol-specific statistics
+----------------------------
+
+Some of the interfaces used for configuring devices are also able
+to report related statistics. For example ethtool interface used
+to configure pause frames can report corresponding hardware counters::
+
+ $ ethtool --include-statistics -a eth0
+ Pause parameters for eth0:
+ Autonegotiate: on
+ RX: on
+ TX: on
+ Statistics:
+ tx_pause_frames: 1
+ rx_pause_frames: 1
+
+Driver-defined statistics
+-------------------------
+
+Driver-defined ethtool statistics can be dumped using `ethtool -S $ifc`, e.g.::
$ ethtool -S ens4u1u1
NIC statistics:
@@ -94,6 +120,17 @@ Identifiers via `ETHTOOL_GSTRINGS` with `string_set` set to `ETH_SS_STATS`,
and values via `ETHTOOL_GSTATS`. User space should use `ETHTOOL_GDRVINFO`
to retrieve the number of statistics (`.n_stats`).
+ethtool-netlink
+---------------
+
+Ethtool netlink is a replacement for the older IOCTL interface.
+
+Protocol-related statistics can be requested in get commands by setting
+the `ETHTOOL_FLAG_STATS` flag in `ETHTOOL_A_HEADER_FLAGS`. Currently
+statistics are supported in the following commands:
+
+ - `ETHTOOL_MSG_PAUSE_GET`
+
debugfs
-------
@@ -130,3 +167,13 @@ user space trying to read them.
Statistics must persist across routine operations like bringing the interface
down and up.
+
+Kernel-internal data structures
+-------------------------------
+
+The following structures are internal to the kernel, their members are
+translated to netlink attributes when dumped. Drivers must not overwrite
+the statistics they don't report with 0.
+
+.. kernel-doc:: include/linux/ethtool.h
+ :identifiers: ethtool_pause_stats
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index eb3a1316f03e..51191b56e61c 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -6130,7 +6130,7 @@ HvCallSendSyntheticClusterIpi, HvCallSendSyntheticClusterIpiEx.
8.21 KVM_CAP_HYPERV_DIRECT_TLBFLUSH
-----------------------------------
-:Architecture: x86
+:Architectures: x86
This capability indicates that KVM running on top of Hyper-V hypervisor
enables Direct TLB flush for its guests meaning that TLB flush
@@ -6143,19 +6143,53 @@ in CPUID and only exposes Hyper-V identification. In this case, guest
thinks it's running on Hyper-V and only use Hyper-V hypercalls.
8.22 KVM_CAP_S390_VCPU_RESETS
+-----------------------------
-Architectures: s390
+:Architectures: s390
This capability indicates that the KVM_S390_NORMAL_RESET and
KVM_S390_CLEAR_RESET ioctls are available.
8.23 KVM_CAP_S390_PROTECTED
+---------------------------
-Architecture: s390
-
+:Architectures: s390
This capability indicates that the Ultravisor has been initialized and
KVM can therefore start protected VMs.
This capability governs the KVM_S390_PV_COMMAND ioctl and the
KVM_MP_STATE_LOAD MP_STATE. KVM_SET_MP_STATE can fail for protected
guests when the state change is invalid.
+
+8.24 KVM_CAP_STEAL_TIME
+-----------------------
+
+:Architectures: arm64, x86
+
+This capability indicates that KVM supports steal time accounting.
+When steal time accounting is supported it may be enabled with
+architecture-specific interfaces. This capability and the architecture-
+specific interfaces must be consistent, i.e. if one says the feature
+is supported, than the other should as well and vice versa. For arm64
+see Documentation/virt/kvm/devices/vcpu.rst "KVM_ARM_VCPU_PVTIME_CTRL".
+For x86 see Documentation/virt/kvm/msr.rst "MSR_KVM_STEAL_TIME".
+
+8.25 KVM_CAP_S390_DIAG318
+-------------------------
+
+:Architectures: s390
+
+This capability enables a guest to set information about its control program
+(i.e. guest kernel type and version). The information is helpful during
+system/firmware service events, providing additional data about the guest
+environments running on the machine.
+
+The information is associated with the DIAGNOSE 0x318 instruction, which sets
+an 8-byte value consisting of a one-byte Control Program Name Code (CPNC) and
+a 7-byte Control Program Version Code (CPVC). The CPNC determines what
+environment the control program is running in (e.g. Linux, z/VM...), and the
+CPVC is used for information specific to OS (e.g. Linux version, Linux
+distribution...)
+
+If this capability is available, then the CPNC and CPVC can be synchronized
+between KVM and userspace via the sync regs mechanism (KVM_SYNC_DIAG318).
diff --git a/MAINTAINERS b/MAINTAINERS
index c99577961cc4..42c69d2eeece 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1694,7 +1694,6 @@ F: arch/arm/mach-cns3xxx/
ARM/CAVIUM THUNDER NETWORK DRIVER
M: Sunil Goutham <sgoutham@marvell.com>
-M: Robert Richter <rrichter@marvell.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: drivers/net/ethernet/cavium/thunder/
@@ -3948,8 +3947,8 @@ W: https://wireless.wiki.kernel.org/en/users/Drivers/carl9170
F: drivers/net/wireless/ath/carl9170/
CAVIUM I2C DRIVER
-M: Robert Richter <rrichter@marvell.com>
-S: Supported
+M: Robert Richter <rric@kernel.org>
+S: Odd Fixes
W: http://www.marvell.com
F: drivers/i2c/busses/i2c-octeon*
F: drivers/i2c/busses/i2c-thunderx*
@@ -3964,8 +3963,8 @@ W: http://www.marvell.com
F: drivers/net/ethernet/cavium/liquidio/
CAVIUM MMC DRIVER
-M: Robert Richter <rrichter@marvell.com>
-S: Supported
+M: Robert Richter <rric@kernel.org>
+S: Odd Fixes
W: http://www.marvell.com
F: drivers/mmc/host/cavium*
@@ -3977,9 +3976,9 @@ W: http://www.marvell.com
F: drivers/crypto/cavium/cpt/
CAVIUM THUNDERX2 ARM64 SOC
-M: Robert Richter <rrichter@marvell.com>
+M: Robert Richter <rric@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
+S: Odd Fixes
F: Documentation/devicetree/bindings/arm/cavium-thunder2.txt
F: arch/arm64/boot/dts/cavium/thunder2-99xx*
@@ -4258,6 +4257,8 @@ S: Maintained
F: .clang-format
CLANG/LLVM BUILD SUPPORT
+M: Nathan Chancellor <natechancellor@gmail.com>
+M: Nick Desaulniers <ndesaulniers@google.com>
L: clang-built-linux@googlegroups.com
S: Supported
W: https://clangbuiltlinux.github.io/
@@ -4407,12 +4408,6 @@ T: git git://git.infradead.org/users/hch/configfs.git
F: fs/configfs/
F: include/linux/configfs.h
-CONNECTOR
-M: Evgeniy Polyakov <zbr@ioremap.net>
-L: netdev@vger.kernel.org
-S: Maintained
-F: drivers/connector/
-
CONSOLE SUBSYSTEM
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
S: Supported
@@ -6188,7 +6183,7 @@ F: Documentation/devicetree/bindings/edac/aspeed-sdram-edac.txt
F: drivers/edac/aspeed_edac.c
EDAC-BLUEFIELD
-M: Shravan Kumar Ramani <sramani@nvidia.com>
+M: Shravan Kumar Ramani <shravankr@nvidia.com>
S: Supported
F: drivers/edac/bluefield_edac.c
@@ -6200,16 +6195,15 @@ F: drivers/edac/highbank*
EDAC-CAVIUM OCTEON
M: Ralf Baechle <ralf@linux-mips.org>
-M: Robert Richter <rrichter@marvell.com>
L: linux-edac@vger.kernel.org
L: linux-mips@vger.kernel.org
S: Supported
F: drivers/edac/octeon_edac*
EDAC-CAVIUM THUNDERX
-M: Robert Richter <rrichter@marvell.com>
+M: Robert Richter <rric@kernel.org>
L: linux-edac@vger.kernel.org
-S: Supported
+S: Odd Fixes
F: drivers/edac/thunderx_edac*
EDAC-CORE
@@ -6217,7 +6211,7 @@ M: Borislav Petkov <bp@alien8.de>
M: Mauro Carvalho Chehab <mchehab@kernel.org>
M: Tony Luck <tony.luck@intel.com>
R: James Morse <james.morse@arm.com>
-R: Robert Richter <rrichter@marvell.com>
+R: Robert Richter <rric@kernel.org>
L: linux-edac@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras.git edac-for-next
@@ -6913,6 +6907,14 @@ L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: drivers/dma/fsldma.*
+FREESCALE DSPI DRIVER
+M: Vladimir Oltean <olteanv@gmail.com>
+L: linux-spi@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt
+F: drivers/spi/spi-fsl-dspi.c
+F: include/linux/spi/spi-fsl-dspi.h
+
FREESCALE ENETC ETHERNET DRIVERS
M: Claudiu Manoil <claudiu.manoil@nxp.com>
L: netdev@vger.kernel.org
@@ -8284,7 +8286,7 @@ IA64 (Itanium) PLATFORM
M: Tony Luck <tony.luck@intel.com>
M: Fenghua Yu <fenghua.yu@intel.com>
L: linux-ia64@vger.kernel.org
-S: Maintained
+S: Odd Fixes
T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git
F: Documentation/ia64/
F: arch/ia64/
@@ -8333,8 +8335,9 @@ S: Supported
F: drivers/pci/hotplug/rpaphp*
IBM Power SRIOV Virtual NIC Device Driver
-M: Thomas Falcon <tlfalcon@linux.ibm.com>
-M: John Allen <jallen@linux.ibm.com>
+M: Dany Madden <drt@linux.ibm.com>
+M: Lijun Pan <ljp@linux.ibm.com>
+M: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/ibm/ibmvnic.*
@@ -8348,7 +8351,7 @@ F: arch/powerpc/platforms/powernv/copy-paste.h
F: arch/powerpc/platforms/powernv/vas*
IBM Power Virtual Ethernet Device Driver
-M: Thomas Falcon <tlfalcon@linux.ibm.com>
+M: Cristobal Forno <cforno12@linux.ibm.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/ibm/ibmveth.*
@@ -9255,7 +9258,7 @@ F: drivers/firmware/iscsi_ibft*
ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
M: Sagi Grimberg <sagi@grimberg.me>
-M: Max Gurtovoy <maxg@nvidia.com>
+M: Max Gurtovoy <mgurtovoy@nvidia.com>
L: linux-rdma@vger.kernel.org
S: Supported
W: http://www.openfabrics.org
@@ -9804,7 +9807,7 @@ F: drivers/scsi/53c700*
LEAKING_ADDRESSES
M: Tobin C. Harding <me@tobin.cc>
-M: Tycho Andersen <tycho@tycho.ws>
+M: Tycho Andersen <tycho@tycho.pizza>
L: kernel-hardening@lists.openwall.com
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tobin/leaks.git
@@ -10671,6 +10674,15 @@ L: linux-input@vger.kernel.org
S: Maintained
F: drivers/hid/hid-mcp2221.c
+MCP25XXFD SPI-CAN NETWORK DRIVER
+M: Marc Kleine-Budde <mkl@pengutronix.de>
+M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+R: Thomas Kopp <thomas.kopp@microchip.com>
+L: linux-can@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/net/can/microchip,mcp25xxfd.yaml
+F: drivers/net/can/spi/mcp25xxfd/
+
MCP4018 AND MCP4531 MICROCHIP DIGITAL POTENTIOMETER DRIVERS
M: Peter Rosin <peda@axentia.se>
L: linux-iio@vger.kernel.org
@@ -11053,6 +11065,7 @@ F: drivers/char/hw_random/mtk-rng.c
MEDIATEK SWITCH DRIVER
M: Sean Wang <sean.wang@mediatek.com>
+M: Landen Chao <Landen.Chao@mediatek.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/dsa/mt7530.*
@@ -12066,6 +12079,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
F: Documentation/devicetree/bindings/net/
+F: drivers/connector/
F: drivers/net/
F: include/linux/etherdevice.h
F: include/linux/fcdevice.h
@@ -13465,10 +13479,10 @@ F: Documentation/devicetree/bindings/pci/axis,artpec*
F: drivers/pci/controller/dwc/*artpec*
PCIE DRIVER FOR CAVIUM THUNDERX
-M: Robert Richter <rrichter@marvell.com>
+M: Robert Richter <rric@kernel.org>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Supported
+S: Odd Fixes
F: drivers/pci/controller/pci-thunder-*
PCIE DRIVER FOR HISILICON
@@ -14407,7 +14421,7 @@ M: Rob Clark <robdclark@gmail.com>
L: iommu@lists.linux-foundation.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
-F: drivers/iommu/qcom_iommu.c
+F: drivers/iommu/arm/arm-smmu/qcom_iommu.c
QUALCOMM IPCC MAILBOX DRIVER
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
@@ -15589,6 +15603,7 @@ F: include/uapi/linux/sed*
SECURITY CONTACT
M: Security Officers <security@kernel.org>
S: Supported
+F: Documentation/admin-guide/security-bugs.rst
SECURITY SUBSYSTEM
M: James Morris <jmorris@namei.org>
@@ -17258,8 +17273,8 @@ S: Maintained
F: drivers/net/thunderbolt.c
THUNDERX GPIO DRIVER
-M: Robert Richter <rrichter@marvell.com>
-S: Maintained
+M: Robert Richter <rric@kernel.org>
+S: Odd Fixes
F: drivers/gpio/gpio-thunderx.c
TI AM437X VPFE DRIVER
diff --git a/Makefile b/Makefile
index ff5e0731d26d..d387c5a239b7 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 9
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc6
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
@@ -882,10 +882,6 @@ KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
LDFLAGS_vmlinux += --gc-sections
endif
-ifdef CONFIG_LIVEPATCH
-KBUILD_CFLAGS += $(call cc-option, -flive-patching=inline-clone)
-endif
-
ifdef CONFIG_SHADOW_CALL_STACK
CC_FLAGS_SCS := -fsanitize=shadow-call-stack
KBUILD_CFLAGS += $(CC_FLAGS_SCS)
@@ -1084,13 +1080,15 @@ ifdef CONFIG_STACK_VALIDATION
endif
endif
+ifdef CONFIG_BPF
ifdef CONFIG_DEBUG_INFO_BTF
ifeq ($(has_libelf),1)
resolve_btfids_target := tools/bpf/resolve_btfids FORCE
else
ERROR_RESOLVE_BTFIDS := 1
endif
-endif
+endif # CONFIG_DEBUG_INFO_BTF
+endif # CONFIG_BPF
PHONY += prepare0
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 9acbeba832c0..dcaa44e408ac 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -88,6 +88,8 @@
arcpct: pct {
compatible = "snps,archs-pct";
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <20>;
};
/* TIMER0 with interrupt for clockevent */
@@ -208,7 +210,7 @@
reg = <0x8000 0x2000>;
interrupts = <10>;
interrupt-names = "macirq";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
snps,pbl = <32>;
snps,multicast-filter-bins = <256>;
clocks = <&gmacclk>;
@@ -226,7 +228,7 @@
#address-cells = <1>;
#size-cells = <0>;
compatible = "snps,dwmac-mdio";
- phy0: ethernet-phy@0 {
+ phy0: ethernet-phy@0 { /* Micrel KSZ9031 */
reg = <0>;
};
};
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index b747f2ec2928..6147db925248 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -18,10 +18,10 @@
* vineetg: April 2010
* -Switched pgtable_t from being struct page * to unsigned long
* =Needed so that Page Table allocator (pte_alloc_one) is not forced to
- * to deal with struct page. Thay way in future we can make it allocate
+ * deal with struct page. That way in future we can make it allocate
* multiple PG Tbls in one Page Frame
* =sweet side effect is avoiding calls to ugly page_address( ) from the
- * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate
+ * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate)
*
* Amit Bhor, Sameer Dhavale: Codito Technologies 2004
*/
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 661fd842ea97..79849f37e782 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -562,7 +562,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
{
struct arc_reg_pct_build pct_bcr;
struct arc_reg_cc_build cc_bcr;
- int i, has_interrupts;
+ int i, has_interrupts, irq;
int counter_size; /* in bits */
union cc_name {
@@ -637,13 +637,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
.attr_groups = arc_pmu->attr_groups,
};
- if (has_interrupts) {
- int irq = platform_get_irq(pdev, 0);
-
- if (irq < 0) {
- pr_err("Cannot get IRQ number for the platform\n");
- return -ENODEV;
- }
+ if (has_interrupts && (irq = platform_get_irq(pdev, 0) >= 0)) {
arc_pmu->irq = irq;
@@ -652,9 +646,9 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
this_cpu_ptr(&arc_pmu_cpu));
on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
-
- } else
+ } else {
arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+ }
/*
* perf parser doesn't really like '-' symbol in events name, so let's
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 28e8bf04b253..a331bb5d8319 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -18,44 +18,37 @@
#define ARC_PATH_MAX 256
-/*
- * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
- * -Prints 3 regs per line and a CR.
- * -To continue, callee regs right after scratch, special handling of CR
- */
-static noinline void print_reg_file(long *reg_rev, int start_num)
+static noinline void print_regs_scratch(struct pt_regs *regs)
{
- unsigned int i;
- char buf[512];
- int n = 0, len = sizeof(buf);
-
- for (i = start_num; i < start_num + 13; i++) {
- n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t",
- i, (unsigned long)*reg_rev);
-
- if (((i + 1) % 3) == 0)
- n += scnprintf(buf + n, len - n, "\n");
-
- /* because pt_regs has regs reversed: r12..r0, r25..r13 */
- if (is_isa_arcv2() && start_num == 0)
- reg_rev++;
- else
- reg_rev--;
- }
-
- if (start_num != 0)
- n += scnprintf(buf + n, len - n, "\n\n");
+ pr_cont("BTA: 0x%08lx\n SP: 0x%08lx FP: 0x%08lx BLK: %pS\n",
+ regs->bta, regs->sp, regs->fp, (void *)regs->blink);
+ pr_cont("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
+ regs->lp_start, regs->lp_end, regs->lp_count);
- /* To continue printing callee regs on same line as scratch regs */
- if (start_num == 0)
- pr_info("%s", buf);
- else
- pr_cont("%s\n", buf);
+ pr_info("r00: 0x%08lx\tr01: 0x%08lx\tr02: 0x%08lx\n" \
+ "r03: 0x%08lx\tr04: 0x%08lx\tr05: 0x%08lx\n" \
+ "r06: 0x%08lx\tr07: 0x%08lx\tr08: 0x%08lx\n" \
+ "r09: 0x%08lx\tr10: 0x%08lx\tr11: 0x%08lx\n" \
+ "r12: 0x%08lx\t",
+ regs->r0, regs->r1, regs->r2,
+ regs->r3, regs->r4, regs->r5,
+ regs->r6, regs->r7, regs->r8,
+ regs->r9, regs->r10, regs->r11,
+ regs->r12);
}
-static void show_callee_regs(struct callee_regs *cregs)
+static void print_regs_callee(struct callee_regs *regs)
{
- print_reg_file(&(cregs->r13), 13);
+ pr_cont("r13: 0x%08lx\tr14: 0x%08lx\n" \
+ "r15: 0x%08lx\tr16: 0x%08lx\tr17: 0x%08lx\n" \
+ "r18: 0x%08lx\tr19: 0x%08lx\tr20: 0x%08lx\n" \
+ "r21: 0x%08lx\tr22: 0x%08lx\tr23: 0x%08lx\n" \
+ "r24: 0x%08lx\tr25: 0x%08lx\n",
+ regs->r13, regs->r14,
+ regs->r15, regs->r16, regs->r17,
+ regs->r18, regs->r19, regs->r20,
+ regs->r21, regs->r22, regs->r23,
+ regs->r24, regs->r25);
}
static void print_task_path_n_nm(struct task_struct *tsk)
@@ -175,7 +168,7 @@ static void show_ecr_verbose(struct pt_regs *regs)
void show_regs(struct pt_regs *regs)
{
struct task_struct *tsk = current;
- struct callee_regs *cregs;
+ struct callee_regs *cregs = (struct callee_regs *)tsk->thread.callee_reg;
/*
* generic code calls us with preemption disabled, but some calls
@@ -204,25 +197,15 @@ void show_regs(struct pt_regs *regs)
STS_BIT(regs, A2), STS_BIT(regs, A1),
STS_BIT(regs, E2), STS_BIT(regs, E1));
#else
- pr_cont(" [%2s%2s%2s%2s]",
+ pr_cont(" [%2s%2s%2s%2s] ",
STS_BIT(regs, IE),
(regs->status32 & STATUS_U_MASK) ? "U " : "K ",
STS_BIT(regs, DE), STS_BIT(regs, AE));
#endif
- pr_cont(" BTA: 0x%08lx\n SP: 0x%08lx FP: 0x%08lx BLK: %pS\n",
- regs->bta, regs->sp, regs->fp, (void *)regs->blink);
- pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
- regs->lp_start, regs->lp_end, regs->lp_count);
-
- /* print regs->r0 thru regs->r12
- * Sequential printing was generating horrible code
- */
- print_reg_file(&(regs->r0), 0);
- /* If Callee regs were saved, display them too */
- cregs = (struct callee_regs *)current->thread.callee_reg;
+ print_regs_scratch(regs);
if (cregs)
- show_callee_regs(cregs);
+ print_regs_callee(cregs);
preempt_disable();
}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index f886ac69d8ad..3a35b82a718e 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -26,8 +26,8 @@ static unsigned long low_mem_sz;
#ifdef CONFIG_HIGHMEM
static unsigned long min_high_pfn, max_high_pfn;
-static u64 high_mem_start;
-static u64 high_mem_sz;
+static phys_addr_t high_mem_start;
+static phys_addr_t high_mem_sz;
#endif
#ifdef CONFIG_DISCONTIGMEM
@@ -69,6 +69,7 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
high_mem_sz = size;
in_use = 1;
memblock_add_node(base, size, 1);
+ memblock_reserve(base, size);
#endif
}
@@ -157,7 +158,7 @@ void __init setup_arch_memory(void)
min_high_pfn = PFN_DOWN(high_mem_start);
max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
- max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
+ max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn;
high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
kmap_init();
@@ -166,22 +167,26 @@ void __init setup_arch_memory(void)
free_area_init(max_zone_pfn);
}
-/*
- * mem_init - initializes memory
- *
- * Frees up bootmem
- * Calculates and displays memory available/used
- */
-void __init mem_init(void)
+static void __init highmem_init(void)
{
#ifdef CONFIG_HIGHMEM
unsigned long tmp;
- reset_all_zones_managed_pages();
+ memblock_free(high_mem_start, high_mem_sz);
for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++)
free_highmem_page(pfn_to_page(tmp));
#endif
+}
+/*
+ * mem_init - initializes memory
+ *
+ * Frees up bootmem
+ * Calculates and displays memory available/used
+ */
+void __init mem_init(void)
+{
memblock_free_all();
+ highmem_init();
mem_init_print_info(NULL);
}
diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h
index a4a61531c7fb..77712c5ffe84 100644
--- a/arch/arc/plat-eznps/include/plat/ctop.h
+++ b/arch/arc/plat-eznps/include/plat/ctop.h
@@ -33,7 +33,6 @@
#define CTOP_AUX_DPC (CTOP_AUX_BASE + 0x02C)
#define CTOP_AUX_LPC (CTOP_AUX_BASE + 0x030)
#define CTOP_AUX_EFLAGS (CTOP_AUX_BASE + 0x080)
-#define CTOP_AUX_IACK (CTOP_AUX_BASE + 0x088)
#define CTOP_AUX_GPA1 (CTOP_AUX_BASE + 0x08C)
#define CTOP_AUX_UDMC (CTOP_AUX_BASE + 0x300)
diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts
index 8d19925fc09e..6783cf16ff81 100644
--- a/arch/arm/boot/dts/at91-sama5d2_icp.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts
@@ -116,7 +116,6 @@
switch0: ksz8563@0 {
compatible = "microchip,ksz8563";
reg = <0>;
- phy-mode = "mii";
reset-gpios = <&pioA PIN_PD4 GPIO_ACTIVE_LOW>;
spi-max-frequency = <500000>;
@@ -140,6 +139,7 @@
reg = <2>;
label = "cpu";
ethernet = <&macb0>;
+ phy-mode = "mii";
fixed-link {
speed = <100>;
full-duplex;
diff --git a/arch/arm/boot/dts/bcm-hr2.dtsi b/arch/arm/boot/dts/bcm-hr2.dtsi
index cbebed5f050e..e8df458aad39 100644
--- a/arch/arm/boot/dts/bcm-hr2.dtsi
+++ b/arch/arm/boot/dts/bcm-hr2.dtsi
@@ -217,7 +217,7 @@
};
qspi: spi@27200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
+ compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
reg = <0x027200 0x184>,
<0x027000 0x124>,
<0x11c408 0x004>,
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index 0346ea621f0f..c846fa3c244d 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -284,7 +284,7 @@
};
qspi: spi@27200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
+ compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
reg = <0x027200 0x184>,
<0x027000 0x124>,
<0x11c408 0x004>,
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index 2d9b4dd05830..0016720ce530 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -488,7 +488,7 @@
};
spi@18029200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
+ compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
reg = <0x18029200 0x184>,
<0x18029000 0x124>,
<0x1811b408 0x004>,
diff --git a/arch/arm/boot/dts/imx6q-logicpd.dts b/arch/arm/boot/dts/imx6q-logicpd.dts
index 7a3d1d3e54a9..8f94364ba484 100644
--- a/arch/arm/boot/dts/imx6q-logicpd.dts
+++ b/arch/arm/boot/dts/imx6q-logicpd.dts
@@ -13,7 +13,7 @@
backlight: backlight-lvds {
compatible = "pwm-backlight";
- pwms = <&pwm3 0 20000>;
+ pwms = <&pwm3 0 20000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <6>;
power-supply = <&reg_lcd>;
diff --git a/arch/arm/boot/dts/imx6q-prtwd2.dts b/arch/arm/boot/dts/imx6q-prtwd2.dts
index dffafbcaa7af..349959d38020 100644
--- a/arch/arm/boot/dts/imx6q-prtwd2.dts
+++ b/arch/arm/boot/dts/imx6q-prtwd2.dts
@@ -30,7 +30,7 @@
};
/* PRTWD2 rev 1 bitbang I2C for Ethernet Switch */
- i2c@4 {
+ i2c {
compatible = "i2c-gpio";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c4>;
diff --git a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
index 7705285d9e3c..4d01c3300b97 100644
--- a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
@@ -22,8 +22,6 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
user-pb {
label = "user_pb";
diff --git a/arch/arm/boot/dts/imx6sx-pinfunc.h b/arch/arm/boot/dts/imx6sx-pinfunc.h
index 0b02c7e60c17..f4dc46207954 100644
--- a/arch/arm/boot/dts/imx6sx-pinfunc.h
+++ b/arch/arm/boot/dts/imx6sx-pinfunc.h
@@ -1026,7 +1026,7 @@
#define MX6SX_PAD_QSPI1B_DQS__SIM_M_HADDR_15 0x01B0 0x04F8 0x0000 0x7 0x0
#define MX6SX_PAD_QSPI1B_SCLK__QSPI1_B_SCLK 0x01B4 0x04FC 0x0000 0x0 0x0
#define MX6SX_PAD_QSPI1B_SCLK__UART3_DCE_RX 0x01B4 0x04FC 0x0840 0x1 0x4
-#define MX6SX_PAD_QSPI1B_SCLK__UART3_DTE_TX 0x01B4 0x04FC 0x0000 0x0 0x0
+#define MX6SX_PAD_QSPI1B_SCLK__UART3_DTE_TX 0x01B4 0x04FC 0x0000 0x1 0x0
#define MX6SX_PAD_QSPI1B_SCLK__ECSPI3_SCLK 0x01B4 0x04FC 0x0730 0x2 0x1
#define MX6SX_PAD_QSPI1B_SCLK__ESAI_RX_HF_CLK 0x01B4 0x04FC 0x0780 0x3 0x2
#define MX6SX_PAD_QSPI1B_SCLK__CSI1_DATA_16 0x01B4 0x04FC 0x06DC 0x4 0x1
diff --git a/arch/arm/boot/dts/imx7d-zii-rmu2.dts b/arch/arm/boot/dts/imx7d-zii-rmu2.dts
index e5e20b07f184..7cb6153fc650 100644
--- a/arch/arm/boot/dts/imx7d-zii-rmu2.dts
+++ b/arch/arm/boot/dts/imx7d-zii-rmu2.dts
@@ -58,7 +58,7 @@
<&clks IMX7D_ENET1_TIME_ROOT_CLK>;
assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
assigned-clock-rates = <0>, <100000000>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&fec1_phy>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi
index 367439639da9..b7ea37ad4e55 100644
--- a/arch/arm/boot/dts/imx7ulp.dtsi
+++ b/arch/arm/boot/dts/imx7ulp.dtsi
@@ -394,7 +394,7 @@
clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
<&pcc3 IMX7ULP_CLK_PCTLC>;
clock-names = "gpio", "port";
- gpio-ranges = <&iomuxc1 0 0 32>;
+ gpio-ranges = <&iomuxc1 0 0 20>;
};
gpio_ptd: gpio@40af0000 {
@@ -408,7 +408,7 @@
clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
<&pcc3 IMX7ULP_CLK_PCTLD>;
clock-names = "gpio", "port";
- gpio-ranges = <&iomuxc1 0 32 32>;
+ gpio-ranges = <&iomuxc1 0 32 12>;
};
gpio_pte: gpio@40b00000 {
@@ -422,7 +422,7 @@
clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
<&pcc3 IMX7ULP_CLK_PCTLE>;
clock-names = "gpio", "port";
- gpio-ranges = <&iomuxc1 0 64 32>;
+ gpio-ranges = <&iomuxc1 0 64 16>;
};
gpio_ptf: gpio@40b10000 {
@@ -436,7 +436,7 @@
clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
<&pcc3 IMX7ULP_CLK_PCTLF>;
clock-names = "gpio", "port";
- gpio-ranges = <&iomuxc1 0 96 32>;
+ gpio-ranges = <&iomuxc1 0 96 20>;
};
};
diff --git a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
index 100396f6c2fe..395e05f10d36 100644
--- a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
@@ -51,6 +51,8 @@
&mcbsp2 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcbsp2_pins>;
};
&charger {
@@ -102,35 +104,18 @@
regulator-max-microvolt = <3300000>;
};
- lcd0: display@0 {
- compatible = "panel-dpi";
- label = "28";
- status = "okay";
- /* default-on; */
+ lcd0: display {
+ /* This isn't the exact LCD, but the timings meet spec */
+ compatible = "logicpd,type28";
pinctrl-names = "default";
pinctrl-0 = <&lcd_enable_pin>;
- enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>; /* gpio155, lcd INI */
+ backlight = <&bl>;
+ enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>;
port {
lcd_in: endpoint {
remote-endpoint = <&dpi_out>;
};
};
-
- panel-timing {
- clock-frequency = <9000000>;
- hactive = <480>;
- vactive = <272>;
- hfront-porch = <3>;
- hback-porch = <2>;
- hsync-len = <42>;
- vback-porch = <3>;
- vfront-porch = <2>;
- vsync-len = <11>;
- hsync-active = <1>;
- vsync-active = <1>;
- de-active = <1>;
- pixelclk-active = <0>;
- };
};
bl: backlight {
diff --git a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
index 381f0e82bb70..b0f6613e6d54 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
@@ -81,6 +81,8 @@
};
&mcbsp2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcbsp2_pins>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 069af9a19bb6..827373ef1a54 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -182,7 +182,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x1550000 0x0 0x10000>,
- <0x0 0x40000000 0x0 0x40000000>;
+ <0x0 0x40000000 0x0 0x20000000>;
reg-names = "QuadSPI", "QuadSPI-memory";
interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "qspi_en", "qspi";
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 5da9cff7a53c..a82c96258a93 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -488,11 +488,11 @@
};
};
- target-module@5000 {
+ target-module@4000 {
compatible = "ti,sysc-omap2", "ti,sysc";
- reg = <0x5000 0x4>,
- <0x5010 0x4>,
- <0x5014 0x4>;
+ reg = <0x4000 0x4>,
+ <0x4010 0x4>,
+ <0x4014 0x4>;
reg-names = "rev", "sysc", "syss";
ti,sysc-sidle = <SYSC_IDLE_FORCE>,
<SYSC_IDLE_NO>,
@@ -504,7 +504,7 @@
ti,syss-mask = <1>;
#address-cells = <1>;
#size-cells = <1>;
- ranges = <0 0x5000 0x1000>;
+ ranges = <0 0x4000 0x1000>;
dsi1: encoder@0 {
compatible = "ti,omap5-dsi";
@@ -514,8 +514,9 @@
reg-names = "proto", "phy", "pll";
interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
- clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>;
- clock-names = "fck";
+ clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>,
+ <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>;
+ clock-names = "fck", "sys_clk";
};
};
@@ -545,8 +546,9 @@
reg-names = "proto", "phy", "pll";
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
- clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>;
- clock-names = "fck";
+ clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>,
+ <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>;
+ clock-names = "fck", "sys_clk";
};
};
diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi
index fc4abef143a0..0013ec3463c4 100644
--- a/arch/arm/boot/dts/socfpga_arria10.dtsi
+++ b/arch/arm/boot/dts/socfpga_arria10.dtsi
@@ -821,7 +821,7 @@
timer3: timer3@ffd00100 {
compatible = "snps,dw-apb-timer";
interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xffd01000 0x100>;
+ reg = <0xffd00100 0x100>;
clocks = <&l4_sys_free_clk>;
clock-names = "timer";
resets = <&rst L4SYSTIMER1_RESET>;
diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi
index 0fe03aa0367f..2259d11af721 100644
--- a/arch/arm/boot/dts/vfxxx.dtsi
+++ b/arch/arm/boot/dts/vfxxx.dtsi
@@ -495,7 +495,7 @@
};
ocotp: ocotp@400a5000 {
- compatible = "fsl,vf610-ocotp";
+ compatible = "fsl,vf610-ocotp", "syscon";
reg = <0x400a5000 0x1000>;
clocks = <&clks VF610_CLK_OCOTP>;
};
diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig
index a9755c501bec..b06e537d5149 100644
--- a/arch/arm/configs/integrator_defconfig
+++ b/arch/arm/configs/integrator_defconfig
@@ -1,13 +1,11 @@
CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_ARCH_MULTI_V4T=y
CONFIG_ARCH_MULTI_V5=y
# CONFIG_ARCH_MULTI_V7 is not set
@@ -15,19 +13,17 @@ CONFIG_ARCH_INTEGRATOR=y
CONFIG_ARCH_INTEGRATOR_AP=y
CONFIG_INTEGRATOR_IMPD1=y
CONFIG_ARCH_INTEGRATOR_CP=y
-CONFIG_PCI=y
-CONFIG_PREEMPT=y
CONFIG_AEABI=y
# CONFIG_ATAGS is not set
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="console=ttyAM0,38400n8 root=/dev/nfs ip=bootp"
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPUFREQ_DT=y
-CONFIG_CMA=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -37,6 +33,7 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
# CONFIG_IPV6 is not set
+CONFIG_PCI=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_AFS_PARTS=y
@@ -52,9 +49,12 @@ CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_NETDEVICES=y
CONFIG_E100=y
CONFIG_SMC91X=y
+CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_DRM=y
+CONFIG_DRM_DISPLAY_CONNECTOR=y
CONFIG_DRM_SIMPLE_BRIDGE=y
CONFIG_DRM_PL111=y
CONFIG_FB_MODE_HELPERS=y
diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c
index 54aff33e55e6..bfa5e1b8dba7 100644
--- a/arch/arm/mach-omap2/omap-iommu.c
+++ b/arch/arm/mach-omap2/omap-iommu.c
@@ -74,7 +74,7 @@ static struct powerdomain *_get_pwrdm(struct device *dev)
return pwrdm;
clk = of_clk_get(dev->of_node->parent, 0);
- if (!clk) {
+ if (IS_ERR(clk)) {
dev_err(dev, "no fck found\n");
return NULL;
}
diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
index 15f7b0ed3836..39802066232e 100644
--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
@@ -745,7 +745,7 @@
};
qspi: spi@66470200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi";
+ compatible = "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi";
reg = <0x66470200 0x184>,
<0x66470000 0x124>,
<0x67017408 0x004>,
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index a39f0a1723e0..903c0eb61290 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -28,6 +28,7 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-honeycomb.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-qds.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-rdb.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-beacon-kit.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mn-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mn-ddr4-evk.dtb
diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
index 9de2aa1c573c..a5154f13a18e 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
@@ -702,7 +702,7 @@
reg = <0x30bd0000 0x10000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MP_CLK_SDMA1_ROOT>,
- <&clk IMX8MP_CLK_SDMA1_ROOT>;
+ <&clk IMX8MP_CLK_AHB>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index f70435cf9ad5..561fa792fe5a 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -423,7 +423,7 @@
tmu: tmu@30260000 {
compatible = "fsl,imx8mq-tmu";
reg = <0x30260000 0x10000>;
- interrupt = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MQ_CLK_TMU_ROOT>;
little-endian;
fsl,tmu-range = <0xb0000 0xa0026 0x80048 0x70061>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
index d174ad214857..9a11e5c60c26 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
@@ -143,6 +143,56 @@
mdio: mdio-bus {
#address-cells = <1>;
#size-cells = <0>;
+
+ switch@0 {
+ compatible = "mediatek,mt7531";
+ reg = <0>;
+ reset-gpios = <&pio 54 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "wan";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan0";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan1";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan2";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "lan3";
+ };
+
+ port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&gmac0>;
+ phy-mode = "2500base-x";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+ };
+
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
index 0b4de627f96e..08ad0ffb24df 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
@@ -105,20 +105,71 @@
pinctrl-0 = <&eth_pins>;
status = "okay";
- gmac1: mac@1 {
+ gmac0: mac@0 {
compatible = "mediatek,eth-mac";
- reg = <1>;
- phy-handle = <&phy5>;
+ reg = <0>;
+ phy-mode = "2500base-x";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
};
mdio-bus {
#address-cells = <1>;
#size-cells = <0>;
- phy5: ethernet-phy@5 {
- reg = <5>;
- phy-mode = "sgmii";
+ switch@0 {
+ compatible = "mediatek,mt7531";
+ reg = <0>;
+ reset-gpios = <&pio 54 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan0";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan1";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan3";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "wan";
+ };
+
+ port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&gmac0>;
+ phy-mode = "2500base-x";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
};
+
};
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 9174ddc76bdc..3ec99f13c259 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -13,6 +13,7 @@
*/
#include <dt-bindings/power/xlnx-zynqmp-power.h>
+#include <dt-bindings/reset/xlnx-zynqmp-resets.h>
/ {
compatible = "xlnx,zynqmp";
@@ -558,6 +559,15 @@
};
};
+ psgtr: phy@fd400000 {
+ compatible = "xlnx,zynqmp-psgtr-v1.1";
+ status = "disabled";
+ reg = <0x0 0xfd400000 0x0 0x40000>,
+ <0x0 0xfd3d0000 0x0 0x1000>;
+ reg-names = "serdes", "siou";
+ #phy-cells = <4>;
+ };
+
rtc: rtc@ffa60000 {
compatible = "xlnx,zynqmp-rtc";
status = "disabled";
@@ -601,7 +611,7 @@
power-domains = <&zynqmp_firmware PD_SD_1>;
};
- smmu: smmu@fd800000 {
+ smmu: iommu@fd800000 {
compatible = "arm,mmu-500";
reg = <0x0 0xfd800000 0x0 0x20000>;
status = "disabled";
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index e0f33826819f..6d04b9577b0b 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -724,6 +724,17 @@ CONFIG_USB_GADGET=y
CONFIG_USB_RENESAS_USBHS_UDC=m
CONFIG_USB_RENESAS_USB3=m
CONFIG_USB_TEGRA_XUDC=m
+CONFIG_USB_CONFIGFS=m
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_ACM=y
+CONFIG_USB_CONFIGFS_OBEX=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_ECM=y
+CONFIG_USB_CONFIGFS_ECM_SUBSET=y
+CONFIG_USB_CONFIGFS_RNDIS=y
+CONFIG_USB_CONFIGFS_EEM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_TYPEC=m
CONFIG_TYPEC_TCPM=m
CONFIG_TYPEC_FUSB302=m
@@ -914,6 +925,7 @@ CONFIG_ARCH_TEGRA_194_SOC=y
CONFIG_ARCH_K3_AM6_SOC=y
CONFIG_ARCH_K3_J721E_SOC=y
CONFIG_TI_SCI_PM_DOMAINS=y
+CONFIG_EXTCON_PTN5150=m
CONFIG_EXTCON_USB_GPIO=y
CONFIG_EXTCON_USBC_CROS_EC=y
CONFIG_IIO=y
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 49a55be2b9a2..1cc5f5f72d0b 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -298,15 +298,15 @@ static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
}
-static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
+static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
}
+/* Always check for S1PTW *before* using this. */
static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
{
- return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) ||
- kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
+ return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
}
static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
@@ -335,6 +335,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
}
+static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
+}
+
static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
@@ -372,6 +377,9 @@ static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
{
+ if (kvm_vcpu_abt_iss1tw(vcpu))
+ return true;
+
if (kvm_vcpu_trap_is_iabt(vcpu))
return false;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e52c927aade5..905c2b87e05a 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -368,7 +368,6 @@ struct kvm_vcpu_arch {
/* Guest PV state */
struct {
- u64 steal;
u64 last_steal;
gpa_t base;
} steal;
@@ -544,6 +543,7 @@ long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
+bool kvm_arm_pvtime_supported(void);
int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index c332d49780dc..560ba69e13c1 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -910,8 +910,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.desc = "ARM erratum 1418040",
.capability = ARM64_WORKAROUND_1418040,
ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
- .type = (ARM64_CPUCAP_SCOPE_LOCAL_CPU |
- ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU),
+ /*
+ * We need to allow affected CPUs to come in late, but
+ * also need the non-affected CPUs to be able to come
+ * in at any point in time. Wonderful.
+ */
+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
},
#endif
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c
index 295d66490584..c07d7a034941 100644
--- a/arch/arm64/kernel/paravirt.c
+++ b/arch/arm64/kernel/paravirt.c
@@ -50,16 +50,19 @@ static u64 pv_steal_clock(int cpu)
struct pv_time_stolen_time_region *reg;
reg = per_cpu_ptr(&stolen_time_region, cpu);
- if (!reg->kaddr) {
- pr_warn_once("stolen time enabled but not configured for cpu %d\n",
- cpu);
+
+ /*
+ * paravirt_steal_clock() may be called before the CPU
+ * online notification callback runs. Until the callback
+ * has run we just return zero.
+ */
+ if (!reg->kaddr)
return 0;
- }
return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time));
}
-static int stolen_time_dying_cpu(unsigned int cpu)
+static int stolen_time_cpu_down_prepare(unsigned int cpu)
{
struct pv_time_stolen_time_region *reg;
@@ -73,7 +76,7 @@ static int stolen_time_dying_cpu(unsigned int cpu)
return 0;
}
-static int init_stolen_time_cpu(unsigned int cpu)
+static int stolen_time_cpu_online(unsigned int cpu)
{
struct pv_time_stolen_time_region *reg;
struct arm_smccc_res res;
@@ -103,19 +106,20 @@ static int init_stolen_time_cpu(unsigned int cpu)
return 0;
}
-static int pv_time_init_stolen_time(void)
+static int __init pv_time_init_stolen_time(void)
{
int ret;
- ret = cpuhp_setup_state(CPUHP_AP_ARM_KVMPV_STARTING,
- "hypervisor/arm/pvtime:starting",
- init_stolen_time_cpu, stolen_time_dying_cpu);
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "hypervisor/arm/pvtime:online",
+ stolen_time_cpu_online,
+ stolen_time_cpu_down_prepare);
if (ret < 0)
return ret;
return 0;
}
-static bool has_pv_steal_clock(void)
+static bool __init has_pv_steal_clock(void)
{
struct arm_smccc_res res;
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 46dc3d75cf13..b588c3b5c2f0 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -206,6 +206,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
*/
r = 1;
break;
+ case KVM_CAP_STEAL_TIME:
+ r = kvm_arm_pvtime_supported();
+ break;
default:
r = kvm_arch_vm_ioctl_check_extension(kvm, ext);
break;
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 5b6b8fa00f0a..0261308bf944 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -449,7 +449,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
kvm_vcpu_dabt_isvalid(vcpu) &&
!kvm_vcpu_abt_issea(vcpu) &&
- !kvm_vcpu_dabt_iss1tw(vcpu);
+ !kvm_vcpu_abt_iss1tw(vcpu);
if (valid) {
int ret = __vgic_v2_perform_cpuif_access(vcpu);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index ba00bcc0c884..3d26b47a1343 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1849,7 +1849,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu;
write_fault = kvm_is_write_fault(vcpu);
- exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
+ exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
VM_BUG_ON(write_fault && exec_fault);
if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
@@ -1877,6 +1877,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
!fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
force_pte = true;
vma_pagesize = PAGE_SIZE;
+ vma_shift = PAGE_SHIFT;
}
/*
@@ -1970,7 +1971,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
(fault_status == FSC_PERM &&
stage2_is_exec(mmu, fault_ipa, vma_pagesize));
- if (vma_pagesize == PUD_SIZE) {
+ /*
+ * If PUD_SIZE == PMD_SIZE, there is no real PUD level, and
+ * all we have is a 2-level page table. Trying to map a PUD in
+ * this case would be fatally wrong.
+ */
+ if (PUD_SIZE != PMD_SIZE && vma_pagesize == PUD_SIZE) {
pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
new_pud = kvm_pud_mkhuge(new_pud);
@@ -2125,7 +2131,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
goto out;
}
- if (kvm_vcpu_dabt_iss1tw(vcpu)) {
+ if (kvm_vcpu_abt_iss1tw(vcpu)) {
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
ret = 1;
goto out_unlock;
diff --git a/arch/arm64/kvm/pvtime.c b/arch/arm64/kvm/pvtime.c
index f7b52ce1557e..920ac43077ad 100644
--- a/arch/arm64/kvm/pvtime.c
+++ b/arch/arm64/kvm/pvtime.c
@@ -13,25 +13,22 @@
void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
- u64 steal;
- __le64 steal_le;
- u64 offset;
- int idx;
u64 base = vcpu->arch.steal.base;
+ u64 last_steal = vcpu->arch.steal.last_steal;
+ u64 offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
+ u64 steal = 0;
+ int idx;
if (base == GPA_INVALID)
return;
- /* Let's do the local bookkeeping */
- steal = vcpu->arch.steal.steal;
- steal += current->sched_info.run_delay - vcpu->arch.steal.last_steal;
- vcpu->arch.steal.last_steal = current->sched_info.run_delay;
- vcpu->arch.steal.steal = steal;
-
- steal_le = cpu_to_le64(steal);
idx = srcu_read_lock(&kvm->srcu);
- offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
- kvm_put_guest(kvm, base + offset, steal_le, u64);
+ if (!kvm_get_guest(kvm, base + offset, steal)) {
+ steal = le64_to_cpu(steal);
+ vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay);
+ steal += vcpu->arch.steal.last_steal - last_steal;
+ kvm_put_guest(kvm, base + offset, cpu_to_le64(steal));
+ }
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -43,7 +40,8 @@ long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
switch (feature) {
case ARM_SMCCC_HV_PV_TIME_FEATURES:
case ARM_SMCCC_HV_PV_TIME_ST:
- val = SMCCC_RET_SUCCESS;
+ if (vcpu->arch.steal.base != GPA_INVALID)
+ val = SMCCC_RET_SUCCESS;
break;
}
@@ -64,7 +62,6 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
* Start counting stolen time from the time the guest requests
* the feature enabled.
*/
- vcpu->arch.steal.steal = 0;
vcpu->arch.steal.last_steal = current->sched_info.run_delay;
idx = srcu_read_lock(&kvm->srcu);
@@ -74,7 +71,7 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
return base;
}
-static bool kvm_arm_pvtime_supported(void)
+bool kvm_arm_pvtime_supported(void)
{
return !!sched_info_on();
}
diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h
index 4691053c5ee4..ff0444352bba 100644
--- a/arch/arm64/kvm/trace_arm.h
+++ b/arch/arm64/kvm/trace_arm.h
@@ -23,7 +23,7 @@ TRACE_EVENT(kvm_entry,
__entry->vcpu_pc = vcpu_pc;
),
- TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
+ TP_printk("PC: 0x%016lx", __entry->vcpu_pc)
);
TRACE_EVENT(kvm_exit,
@@ -42,7 +42,7 @@ TRACE_EVENT(kvm_exit,
__entry->vcpu_pc = vcpu_pc;
),
- TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx",
+ TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%016lx",
__print_symbolic(__entry->ret, kvm_arm_exception_type),
__entry->esr_ec,
__print_symbolic(__entry->esr_ec, kvm_arm_exception_class),
@@ -69,7 +69,7 @@ TRACE_EVENT(kvm_guest_fault,
__entry->ipa = ipa;
),
- TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx",
+ TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#016lx",
__entry->ipa, __entry->hsr,
__entry->hxfar, __entry->vcpu_pc)
);
@@ -131,7 +131,7 @@ TRACE_EVENT(kvm_mmio_emulate,
__entry->cpsr = cpsr;
),
- TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)",
+ TP_printk("Emulate MMIO at: 0x%016lx (instr: %08lx, cpsr: %08lx)",
__entry->vcpu_pc, __entry->instr, __entry->cpsr)
);
@@ -149,7 +149,7 @@ TRACE_EVENT(kvm_unmap_hva_range,
__entry->end = end;
),
- TP_printk("mmu notifier unmap range: %#08lx -- %#08lx",
+ TP_printk("mmu notifier unmap range: %#016lx -- %#016lx",
__entry->start, __entry->end)
);
@@ -165,7 +165,7 @@ TRACE_EVENT(kvm_set_spte_hva,
__entry->hva = hva;
),
- TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
+ TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva)
);
TRACE_EVENT(kvm_age_hva,
@@ -182,7 +182,7 @@ TRACE_EVENT(kvm_age_hva,
__entry->end = end;
),
- TP_printk("mmu notifier age hva: %#08lx -- %#08lx",
+ TP_printk("mmu notifier age hva: %#016lx -- %#016lx",
__entry->start, __entry->end)
);
@@ -198,7 +198,7 @@ TRACE_EVENT(kvm_test_age_hva,
__entry->hva = hva;
),
- TP_printk("mmu notifier test age hva: %#08lx", __entry->hva)
+ TP_printk("mmu notifier test age hva: %#016lx", __entry->hva)
);
TRACE_EVENT(kvm_set_way_flush,
diff --git a/arch/arm64/kvm/trace_handle_exit.h b/arch/arm64/kvm/trace_handle_exit.h
index 2c56d1e0f5bd..8d78acc4fba7 100644
--- a/arch/arm64/kvm/trace_handle_exit.h
+++ b/arch/arm64/kvm/trace_handle_exit.h
@@ -22,7 +22,7 @@ TRACE_EVENT(kvm_wfx_arm64,
__entry->is_wfe = is_wfe;
),
- TP_printk("guest executed wf%c at: 0x%08lx",
+ TP_printk("guest executed wf%c at: 0x%016lx",
__entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc)
);
@@ -42,7 +42,7 @@ TRACE_EVENT(kvm_hvc_arm64,
__entry->imm = imm;
),
- TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx)",
+ TP_printk("HVC at 0x%016lx (r0: 0x%016lx, imm: 0x%lx)",
__entry->vcpu_pc, __entry->r0, __entry->imm)
);
@@ -135,7 +135,7 @@ TRACE_EVENT(trap_reg,
__entry->write_value = write_value;
),
- TP_printk("%s %s reg %d (0x%08llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value)
+ TP_printk("%s %s reg %d (0x%016llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value)
);
TRACE_EVENT(kvm_handle_sys_reg,
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index f8912e45be7a..ef9f1d5e989d 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -143,14 +143,17 @@ static inline void emit_addr_mov_i64(const int reg, const u64 val,
}
}
-static inline int bpf2a64_offset(int bpf_to, int bpf_from,
+static inline int bpf2a64_offset(int bpf_insn, int off,
const struct jit_ctx *ctx)
{
- int to = ctx->offset[bpf_to];
- /* -1 to account for the Branch instruction */
- int from = ctx->offset[bpf_from] - 1;
-
- return to - from;
+ /* BPF JMP offset is relative to the next instruction */
+ bpf_insn++;
+ /*
+ * Whereas arm64 branch instructions encode the offset
+ * from the branch itself, so we must subtract 1 from the
+ * instruction offset.
+ */
+ return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
}
static void jit_fill_hole(void *area, unsigned int size)
@@ -642,7 +645,7 @@ emit_bswap_uxt:
/* JUMP off */
case BPF_JMP | BPF_JA:
- jmp_offset = bpf2a64_offset(i + off, i, ctx);
+ jmp_offset = bpf2a64_offset(i, off, ctx);
check_imm26(jmp_offset);
emit(A64_B(jmp_offset), ctx);
break;
@@ -669,7 +672,7 @@ emit_bswap_uxt:
case BPF_JMP32 | BPF_JSLE | BPF_X:
emit(A64_CMP(is64, dst, src), ctx);
emit_cond_jmp:
- jmp_offset = bpf2a64_offset(i + off, i, ctx);
+ jmp_offset = bpf2a64_offset(i, off, ctx);
check_imm19(jmp_offset);
switch (BPF_OP(code)) {
case BPF_JEQ:
@@ -908,10 +911,21 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
const struct bpf_prog *prog = ctx->prog;
int i;
+ /*
+ * - offset[0] offset of the end of prologue,
+ * start of the 1st instruction.
+ * - offset[1] - offset of the end of 1st instruction,
+ * start of the 2nd instruction
+ * [....]
+ * - offset[3] - offset of the end of 3rd instruction,
+ * start of 4th instruction
+ */
for (i = 0; i < prog->len; i++) {
const struct bpf_insn *insn = &prog->insnsi[i];
int ret;
+ if (ctx->image == NULL)
+ ctx->offset[i] = ctx->idx;
ret = build_insn(insn, ctx, extra_pass);
if (ret > 0) {
i++;
@@ -919,11 +933,16 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
ctx->offset[i] = ctx->idx;
continue;
}
- if (ctx->image == NULL)
- ctx->offset[i] = ctx->idx;
if (ret)
return ret;
}
+ /*
+ * offset is allocated with prog->len + 1 so fill in
+ * the last element with the offset after the last
+ * instruction (end of program)
+ */
+ if (ctx->image == NULL)
+ ctx->offset[i] = ctx->idx;
return 0;
}
@@ -1002,7 +1021,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog;
- ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
+ ctx.offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
if (ctx.offset == NULL) {
prog = orig_prog;
goto out_off;
@@ -1089,7 +1108,7 @@ skip_init_ctx:
prog->jited_len = prog_size;
if (!prog->is_func || extra_pass) {
- bpf_prog_fill_jited_linfo(prog, ctx.offset);
+ bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
out_off:
kfree(ctx.offset);
kfree(jit_data);
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index b66ba907019c..87927eb824cc 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -74,8 +74,6 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf)
buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
}
-#define acpi_unlazy_tlb(x)
-
#ifdef CONFIG_ACPI_NUMA
extern cpumask_t early_cpu_possible_map;
#define for_each_possible_early_cpu(cpu) \
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index c95fa3a2484c..8f328298f8cc 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -877,6 +877,7 @@ config SNI_RM
select I8253
select I8259
select ISA
+ select MIPS_L1_CACHE_SHIFT_6
select SWAP_IO_SPACE if CPU_BIG_ENDIAN
select SYS_HAS_CPU_R4X00
select SYS_HAS_CPU_R5000
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 7de85d2253ff..0c50ac444222 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -137,6 +137,8 @@ extern void kvm_init_loongson_ipi(struct kvm *kvm);
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
switch (type) {
+ case KVM_VM_MIPS_AUTO:
+ break;
#ifdef CONFIG_KVM_MIPS_VZ
case KVM_VM_MIPS_VZ:
#else
diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c
index b09dc844985a..eeeec18c420a 100644
--- a/arch/mips/sni/a20r.c
+++ b/arch/mips/sni/a20r.c
@@ -143,7 +143,10 @@ static struct platform_device sc26xx_pdev = {
},
};
-static u32 a20r_ack_hwint(void)
+/*
+ * Trigger chipset to update CPU's CAUSE IP field
+ */
+static u32 a20r_update_cause_ip(void)
{
u32 status = read_c0_status();
@@ -205,12 +208,14 @@ static void a20r_hwint(void)
int irq;
clear_c0_status(IE_IRQ0);
- status = a20r_ack_hwint();
+ status = a20r_update_cause_ip();
cause = read_c0_cause();
irq = ffs(((cause & status) >> 8) & 0xf8);
if (likely(irq > 0))
do_IRQ(SNI_A20R_IRQ_BASE + irq - 1);
+
+ a20r_update_cause_ip();
set_c0_status(IE_IRQ0);
}
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index f0390211236b..120f5005461b 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -165,19 +165,19 @@ struct __large_struct {
#define __get_user_nocheck(x, ptr, size) \
({ \
- long __gu_err, __gu_val; \
- __get_user_size(__gu_val, (ptr), (size), __gu_err); \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ long __gu_err; \
+ __get_user_size((x), (ptr), (size), __gu_err); \
__gu_err; \
})
#define __get_user_check(x, ptr, size) \
({ \
- long __gu_err = -EFAULT, __gu_val = 0; \
+ long __gu_err = -EFAULT; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- if (access_ok(__gu_addr, size)) \
- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ if (access_ok(__gu_addr, size)) \
+ __get_user_size((x), __gu_addr, (size), __gu_err); \
+ else \
+ (x) = (__typeof__(*(ptr))) 0; \
__gu_err; \
})
@@ -191,11 +191,13 @@ do { \
case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \
case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \
case 8: __get_user_asm2(x, ptr, retval); break; \
- default: (x) = __get_user_bad(); \
+ default: (x) = (__typeof__(*(ptr)))__get_user_bad(); \
} \
} while (0)
#define __get_user_asm(x, addr, err, op) \
+{ \
+ unsigned long __gu_tmp; \
__asm__ __volatile__( \
"1: "op" %1,0(%2)\n" \
"2:\n" \
@@ -209,10 +211,14 @@ do { \
" .align 2\n" \
" .long 1b,3b\n" \
".previous" \
- : "=r"(err), "=r"(x) \
- : "r"(addr), "i"(-EFAULT), "0"(err))
+ : "=r"(err), "=r"(__gu_tmp) \
+ : "r"(addr), "i"(-EFAULT), "0"(err)); \
+ (x) = (__typeof__(*(addr)))__gu_tmp; \
+}
#define __get_user_asm2(x, addr, err) \
+{ \
+ unsigned long long __gu_tmp; \
__asm__ __volatile__( \
"1: l.lwz %1,0(%2)\n" \
"2: l.lwz %H1,4(%2)\n" \
@@ -229,8 +235,11 @@ do { \
" .long 1b,4b\n" \
" .long 2b,4b\n" \
".previous" \
- : "=r"(err), "=&r"(x) \
- : "r"(addr), "i"(-EFAULT), "0"(err))
+ : "=r"(err), "=&r"(__gu_tmp) \
+ : "r"(addr), "i"(-EFAULT), "0"(err)); \
+ (x) = (__typeof__(*(addr)))( \
+ (__typeof__((x)-(x)))__gu_tmp); \
+}
/* more complex routines */
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index b18e775f8be3..13c87f1f872b 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -80,6 +80,16 @@ static void __init setup_memory(void)
*/
memblock_reserve(__pa(_stext), _end - _stext);
+#ifdef CONFIG_BLK_DEV_INITRD
+ /* Then reserve the initrd, if any */
+ if (initrd_start && (initrd_end > initrd_start)) {
+ unsigned long aligned_start = ALIGN_DOWN(initrd_start, PAGE_SIZE);
+ unsigned long aligned_end = ALIGN(initrd_end, PAGE_SIZE);
+
+ memblock_reserve(__pa(aligned_start), aligned_end - aligned_start);
+ }
+#endif /* CONFIG_BLK_DEV_INITRD */
+
early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem();
diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c
index 08f56af387ac..534a52ec5e66 100644
--- a/arch/openrisc/mm/cache.c
+++ b/arch/openrisc/mm/cache.c
@@ -16,7 +16,7 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
-static void cache_loop(struct page *page, const unsigned int reg)
+static __always_inline void cache_loop(struct page *page, const unsigned int reg)
{
unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
unsigned long line = paddr & ~(L1_CACHE_BYTES - 1);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 65bed1fdeaad..787e829b6f25 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -116,7 +116,6 @@ config PPC
#
select ARCH_32BIT_OFF_T if PPC32
select ARCH_HAS_DEBUG_VIRTUAL
- select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index af9af03059e4..15ed8d0aa014 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -108,7 +108,6 @@ CONFIG_FB_NVIDIA=y
CONFIG_FB_NVIDIA_I2C=y
CONFIG_FB_RADEON=y
# CONFIG_LCD_CLASS_DEVICE is not set
-CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_LOGO=y
CONFIG_SOUND=y
CONFIG_SND=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 5e6f92ba3210..66e9a0fd64ff 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -743,7 +743,6 @@ CONFIG_FB_TRIDENT=m
CONFIG_FB_SM501=m
CONFIG_FB_IBM_GXT4500=y
CONFIG_LCD_PLATFORM=m
-CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
CONFIG_LOGO=y
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 55442d45c597..b392384a3b15 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -239,14 +239,14 @@ static inline void early_init_mmu_secondary(void)
extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size);
-extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
- phys_addr_t first_memblock_size);
static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{
- if (early_radix_enabled())
- return radix__setup_initial_memory_limit(first_memblock_base,
- first_memblock_size);
+ /*
+ * Hash has more strict restrictions. At this point we don't
+ * know which translations we will pick. Hence go with hash
+ * restrictions.
+ */
return hash__setup_initial_memory_limit(first_memblock_base,
first_memblock_size);
}
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 569fecd7b5b2..9053fc9d20c7 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -120,7 +120,8 @@ u64 dma_iommu_get_required_mask(struct device *dev)
if (!tbl)
return 0;
- mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
+ mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) +
+ tbl->it_page_shift - 1);
mask += mask - 1;
return mask;
diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile
index 87ab1152d5ce..e147bbdc12cd 100644
--- a/arch/powerpc/kernel/vdso32/Makefile
+++ b/arch/powerpc/kernel/vdso32/Makefile
@@ -50,7 +50,7 @@ $(obj-vdso32): %.o: %.S FORCE
# actual build commands
quiet_cmd_vdso32ld = VDSO32L $@
- cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ $(call cc-ldoption, -Wl$(comma)--orphan-handling=warn) -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
+ cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
quiet_cmd_vdso32as = VDSO32A $@
cmd_vdso32as = $(VDSOCC) $(a_flags) $(CC32FLAGS) -c -o $@ $<
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index 4c985467a668..5206c2eb2a1d 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -111,7 +111,6 @@ SECTIONS
*(.note.GNU-stack)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
- *(.glink .iplt .plt .rela*)
}
}
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile
index 38c317f25141..32ebb3522ea1 100644
--- a/arch/powerpc/kernel/vdso64/Makefile
+++ b/arch/powerpc/kernel/vdso64/Makefile
@@ -34,7 +34,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
# actual build commands
quiet_cmd_vdso64ld = VDSO64L $@
- cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) $(call cc-ldoption, -Wl$(comma)--orphan-handling=warn)
+ cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
# install commands for the unstripped file
quiet_cmd_vdso_install = INSTALL $@
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index 4e3a8d4ee614..256fb9720298 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -30,7 +30,7 @@ SECTIONS
. = ALIGN(16);
.text : {
*(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*)
- *(.sfpr)
+ *(.sfpr .glink)
} :text
PROVIDE(__etext = .);
PROVIDE(_etext = .);
@@ -111,7 +111,6 @@ SECTIONS
*(.branch_lt)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
- *(.glink .iplt .plt .rela*)
}
}
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 28c784976bed..d5f0c10d752a 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -734,21 +734,6 @@ void radix__mmu_cleanup_all(void)
}
}
-void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
- phys_addr_t first_memblock_size)
-{
- /*
- * We don't currently support the first MEMBLOCK not mapping 0
- * physical on those processors
- */
- BUG_ON(first_memblock_base != 0);
-
- /*
- * Radix mode is not limited by RMA / VRMA addressing.
- */
- ppc64_rma_size = ULONG_MAX;
-}
-
#ifdef CONFIG_MEMORY_HOTPLUG
static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
{
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 02e127fa5777..8459056cce67 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -433,9 +433,16 @@ void __init mmu_early_init_devtree(void)
if (!(mfmsr() & MSR_HV))
early_check_vec5();
- if (early_radix_enabled())
+ if (early_radix_enabled()) {
radix__early_init_devtree();
- else
+ /*
+ * We have finalized the translation we are going to use by now.
+ * Radix mode is not limited by RMA / VRMA addressing.
+ * Hence don't limit memblock allocations.
+ */
+ ppc64_rma_size = ULONG_MAX;
+ memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
+ } else
hash__early_init_devtree();
}
#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index f439f0dfea7d..a88a707a608a 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -822,7 +822,7 @@ free_stats:
kfree(stats);
return rc ? rc : seq_buf_used(&s);
}
-DEVICE_ATTR_RO(perf_stats);
+DEVICE_ATTR_ADMIN_RO(perf_stats);
static ssize_t flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index df18372861d8..7766e1289468 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -32,6 +32,7 @@ config RISCV
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select CLONE_BACKWARDS
+ select CLINT_TIMER if !MMU
select COMMON_CLK
select EDAC_SUPPORT
select GENERIC_ARCH_TOPOLOGY if SMP
diff --git a/arch/riscv/boot/dts/kendryte/k210.dtsi b/arch/riscv/boot/dts/kendryte/k210.dtsi
index c1df56ccb8d5..d2d0ff645632 100644
--- a/arch/riscv/boot/dts/kendryte/k210.dtsi
+++ b/arch/riscv/boot/dts/kendryte/k210.dtsi
@@ -95,10 +95,12 @@
#clock-cells = <1>;
};
- clint0: interrupt-controller@2000000 {
+ clint0: clint@2000000 {
+ #interrupt-cells = <1>;
compatible = "riscv,clint0";
reg = <0x2000000 0xC000>;
- interrupts-extended = <&cpu0_intc 3>, <&cpu1_intc 3>;
+ interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7
+ &cpu1_intc 3 &cpu1_intc 7>;
clocks = <&sysctl K210_CLK_ACLK>;
};
diff --git a/arch/riscv/include/asm/clint.h b/arch/riscv/include/asm/clint.h
new file mode 100644
index 000000000000..0789fd37b40a
--- /dev/null
+++ b/arch/riscv/include/asm/clint.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Google, Inc
+ */
+
+#ifndef _ASM_RISCV_CLINT_H
+#define _ASM_RISCV_CLINT_H
+
+#include <linux/types.h>
+#include <asm/mmio.h>
+
+#ifdef CONFIG_RISCV_M_MODE
+/*
+ * This lives in the CLINT driver, but is accessed directly by timex.h to avoid
+ * any overhead when accessing the MMIO timer.
+ *
+ * The ISA defines mtime as a 64-bit memory-mapped register that increments at
+ * a constant frequency, but it doesn't define some other constraints we depend
+ * on (most notably ordering constraints, but also some simpler stuff like the
+ * memory layout). Thus, this is called "clint_time_val" instead of something
+ * like "riscv_mtime", to signify that these non-ISA assumptions must hold.
+ */
+extern u64 __iomem *clint_time_val;
+#endif
+
+#endif
diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
index ace8a6e2d11d..845002cc2e57 100644
--- a/arch/riscv/include/asm/ftrace.h
+++ b/arch/riscv/include/asm/ftrace.h
@@ -66,6 +66,13 @@ do { \
* Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
*/
#define MCOUNT_INSN_SIZE 8
+
+#ifndef __ASSEMBLY__
+struct dyn_ftrace;
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
+#define ftrace_init_nop ftrace_init_nop
+#endif
+
#endif
#endif /* _ASM_RISCV_FTRACE_H */
diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h
index a3fb85d505d4..7f659dda0032 100644
--- a/arch/riscv/include/asm/timex.h
+++ b/arch/riscv/include/asm/timex.h
@@ -10,6 +10,31 @@
typedef unsigned long cycles_t;
+#ifdef CONFIG_RISCV_M_MODE
+
+#include <asm/clint.h>
+
+#ifdef CONFIG_64BIT
+static inline cycles_t get_cycles(void)
+{
+ return readq_relaxed(clint_time_val);
+}
+#else /* !CONFIG_64BIT */
+static inline u32 get_cycles(void)
+{
+ return readl_relaxed(((u32 *)clint_time_val));
+}
+#define get_cycles get_cycles
+
+static inline u32 get_cycles_hi(void)
+{
+ return readl_relaxed(((u32 *)clint_time_val) + 1);
+}
+#define get_cycles_hi get_cycles_hi
+#endif /* CONFIG_64BIT */
+
+#else /* CONFIG_RISCV_M_MODE */
+
static inline cycles_t get_cycles(void)
{
return csr_read(CSR_TIME);
@@ -41,6 +66,8 @@ static inline u64 get_cycles64(void)
}
#endif /* CONFIG_64BIT */
+#endif /* !CONFIG_RISCV_M_MODE */
+
#define ARCH_HAS_READ_CURRENT_TIMER
static inline int read_current_timer(unsigned long *timer_val)
{
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index 2ff63d0cbb50..99e12faa5498 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -97,6 +97,25 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return __ftrace_modify_call(rec->ip, addr, false);
}
+
+/*
+ * This is called early on, and isn't wrapped by
+ * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
+ * text_mutex, which triggers a lockdep failure. SMP isn't running so we could
+ * just directly poke the text, but it's simpler to just take the lock
+ * ourselves.
+ */
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
+{
+ int out;
+
+ ftrace_arch_code_modify_prepare();
+ out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
+ ftrace_arch_code_modify_post_process();
+
+ return out;
+}
+
int ftrace_update_ftrace_func(ftrace_func_t func)
{
int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 787c75f751a5..ca03762a3733 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -226,12 +226,11 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
ptep = &fixmap_pte[pte_index(addr)];
- if (pgprot_val(prot)) {
+ if (pgprot_val(prot))
set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
- } else {
+ else
pte_clear(&init_mm, addr, ptep);
- local_flush_tlb_page(addr);
- }
+ local_flush_tlb_page(addr);
}
static pte_t *__init get_pte_virt(phys_addr_t pa)
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index 3cfe1eb89838..c0be5fe1ddba 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -238,7 +238,10 @@ extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *);
struct channel_path_desc_fmt0 *ccw_device_get_chp_desc(struct ccw_device *, int);
u8 *ccw_device_get_util_str(struct ccw_device *cdev, int chp_idx);
int ccw_device_pnso(struct ccw_device *cdev,
- struct chsc_pnso_area *pnso_area,
- struct chsc_pnso_resume_token resume_token,
- int cnc);
+ struct chsc_pnso_area *pnso_area, u8 oc,
+ struct chsc_pnso_resume_token resume_token, int cnc);
+int ccw_device_get_cssid(struct ccw_device *cdev, u8 *cssid);
+int ccw_device_get_iid(struct ccw_device *cdev, u8 *iid);
+int ccw_device_get_chpid(struct ccw_device *cdev, int chp_idx, u8 *chpid);
+int ccw_device_get_chid(struct ccw_device *cdev, int chp_idx, u16 *chid);
#endif /* _S390_CCWDEV_H_ */
diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h
index 36ce2d25a5fc..ae4d2549cd67 100644
--- a/arch/s390/include/asm/chsc.h
+++ b/arch/s390/include/asm/chsc.h
@@ -12,6 +12,13 @@
#include <uapi/asm/chsc.h>
/**
+ * Operation codes for CHSC PNSO:
+ * PNSO_OC_NET_BRIDGE_INFO - only addresses that are visible to a bridgeport
+ * PNSO_OC_NET_ADDR_INFO - all addresses
+ */
+#define PNSO_OC_NET_BRIDGE_INFO 0
+#define PNSO_OC_NET_ADDR_INFO 3
+/**
* struct chsc_pnso_naid_l2 - network address information descriptor
* @nit: Network interface token
* @addr_lnid: network address and logical network id (VLAN ID)
diff --git a/arch/s390/include/asm/css_chars.h b/arch/s390/include/asm/css_chars.h
index 480bb02ccacd..638137d46c85 100644
--- a/arch/s390/include/asm/css_chars.h
+++ b/arch/s390/include/asm/css_chars.h
@@ -36,7 +36,9 @@ struct css_general_char {
u64 alt_ssi : 1; /* bit 108 */
u64 : 1;
u64 narf : 1; /* bit 110 */
- u64 : 12;
+ u64 : 5;
+ u64 enarf: 1; /* bit 116 */
+ u64 : 6;
u64 util_str : 1;/* bit 123 */
} __packed;
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index faca269d5f27..a44ddc2f2dec 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -26,6 +26,7 @@ void do_protection_exception(struct pt_regs *regs);
void do_dat_exception(struct pt_regs *regs);
void do_secure_storage_access(struct pt_regs *regs);
void do_non_secure_storage_access(struct pt_regs *regs);
+void do_secure_storage_violation(struct pt_regs *regs);
void addressing_exception(struct pt_regs *regs);
void data_exception(struct pt_regs *regs);
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
index c73f50649e7e..f7f1e64e0d98 100644
--- a/arch/s390/kernel/idle.c
+++ b/arch/s390/kernel/idle.c
@@ -39,14 +39,13 @@ void enabled_wait(void)
local_irq_restore(flags);
/* Account time spent with enabled wait psw loaded as idle time. */
- /* XXX seqcount has tracepoints that require RCU */
- write_seqcount_begin(&idle->seqcount);
+ raw_write_seqcount_begin(&idle->seqcount);
idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
idle->idle_time += idle_time;
idle->idle_count++;
account_idle_time(cputime_to_nsecs(idle_time));
- write_seqcount_end(&idle->seqcount);
+ raw_write_seqcount_end(&idle->seqcount);
}
NOKPROBE_SYMBOL(enabled_wait);
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index 2c27907a5ffc..9a92638360ee 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -80,7 +80,7 @@ PGM_CHECK(do_dat_exception) /* 3b */
PGM_CHECK_DEFAULT /* 3c */
PGM_CHECK(do_secure_storage_access) /* 3d */
PGM_CHECK(do_non_secure_storage_access) /* 3e */
-PGM_CHECK_DEFAULT /* 3f */
+PGM_CHECK(do_secure_storage_violation) /* 3f */
PGM_CHECK(monitor_event_exception) /* 40 */
PGM_CHECK_DEFAULT /* 41 */
PGM_CHECK_DEFAULT /* 42 */
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e600f6953d7c..c2c1b4e723ea 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -619,7 +619,7 @@ static struct notifier_block kdump_mem_nb = {
/*
* Make sure that the area behind memory_end is protected
*/
-static void reserve_memory_end(void)
+static void __init reserve_memory_end(void)
{
if (memory_end_set)
memblock_reserve(memory_end, ULONG_MAX);
@@ -628,7 +628,7 @@ static void reserve_memory_end(void)
/*
* Make sure that oldmem, where the dump is stored, is protected
*/
-static void reserve_oldmem(void)
+static void __init reserve_oldmem(void)
{
#ifdef CONFIG_CRASH_DUMP
if (OLDMEM_BASE)
@@ -640,7 +640,7 @@ static void reserve_oldmem(void)
/*
* Make sure that oldmem, where the dump is stored, is protected
*/
-static void remove_oldmem(void)
+static void __init remove_oldmem(void)
{
#ifdef CONFIG_CRASH_DUMP
if (OLDMEM_BASE)
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 4c8c063bce5b..996884dcc9fd 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -859,6 +859,21 @@ void do_non_secure_storage_access(struct pt_regs *regs)
}
NOKPROBE_SYMBOL(do_non_secure_storage_access);
+void do_secure_storage_violation(struct pt_regs *regs)
+{
+ /*
+ * Either KVM messed up the secure guest mapping or the same
+ * page is mapped into multiple secure guests.
+ *
+ * This exception is only triggered when a guest 2 is running
+ * and can therefore never occur in kernel context.
+ */
+ printk_ratelimited(KERN_WARNING
+ "Secure storage violation in task: %s, pid %d\n",
+ current->comm, current->pid);
+ send_sig(SIGSEGV, current, 0);
+}
+
#else
void do_secure_storage_access(struct pt_regs *regs)
{
@@ -869,4 +884,9 @@ void do_non_secure_storage_access(struct pt_regs *regs)
{
default_trap_handler(regs);
}
+
+void do_secure_storage_violation(struct pt_regs *regs)
+{
+ default_trap_handler(regs);
+}
#endif
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index be4b8532dd3c..0a4182792876 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -50,7 +50,6 @@ struct bpf_jit {
int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
int tail_call_start; /* Tail call start offset */
int excnt; /* Number of exception table entries */
- int labels[1]; /* Labels for local jumps */
};
#define SEEN_MEM BIT(0) /* use mem[] for temporary storage */
@@ -229,18 +228,18 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
REG_SET_SEEN(b3); \
})
-#define EMIT6_PCREL_LABEL(op1, op2, b1, b2, label, mask) \
+#define EMIT6_PCREL_RIEB(op1, op2, b1, b2, mask, target) \
({ \
- int rel = (jit->labels[label] - jit->prg) >> 1; \
+ unsigned int rel = (int)((target) - jit->prg) / 2; \
_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), \
(op2) | (mask) << 12); \
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
})
-#define EMIT6_PCREL_IMM_LABEL(op1, op2, b1, imm, label, mask) \
+#define EMIT6_PCREL_RIEC(op1, op2, b1, imm, mask, target) \
({ \
- int rel = (jit->labels[label] - jit->prg) >> 1; \
+ unsigned int rel = (int)((target) - jit->prg) / 2; \
_EMIT6((op1) | (reg_high(b1) | (mask)) << 16 | \
(rel & 0xffff), (op2) | ((imm) & 0xff) << 8); \
REG_SET_SEEN(b1); \
@@ -1282,7 +1281,9 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb9040000, BPF_REG_0, REG_2);
break;
}
- case BPF_JMP | BPF_TAIL_CALL:
+ case BPF_JMP | BPF_TAIL_CALL: {
+ int patch_1_clrj, patch_2_clij, patch_3_brc;
+
/*
* Implicit input:
* B1: pointer to ctx
@@ -1300,16 +1301,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
offsetof(struct bpf_array, map.max_entries));
/* if ((u32)%b3 >= (u32)%w1) goto out; */
- if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
- /* clrj %b3,%w1,0xa,label0 */
- EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
- REG_W1, 0, 0xa);
- } else {
- /* clr %b3,%w1 */
- EMIT2(0x1500, BPF_REG_3, REG_W1);
- /* brcl 0xa,label0 */
- EMIT6_PCREL_RILC(0xc0040000, 0xa, jit->labels[0]);
- }
+ /* clrj %b3,%w1,0xa,out */
+ patch_1_clrj = jit->prg;
+ EMIT6_PCREL_RIEB(0xec000000, 0x0077, BPF_REG_3, REG_W1, 0xa,
+ jit->prg);
/*
* if (tail_call_cnt++ > MAX_TAIL_CALL_CNT)
@@ -1324,16 +1319,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4_IMM(0xa7080000, REG_W0, 1);
/* laal %w1,%w0,off(%r15) */
EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
- if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
- /* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */
- EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1,
- MAX_TAIL_CALL_CNT, 0, 0x2);
- } else {
- /* clfi %w1,MAX_TAIL_CALL_CNT */
- EMIT6_IMM(0xc20f0000, REG_W1, MAX_TAIL_CALL_CNT);
- /* brcl 0x2,label0 */
- EMIT6_PCREL_RILC(0xc0040000, 0x2, jit->labels[0]);
- }
+ /* clij %w1,MAX_TAIL_CALL_CNT,0x2,out */
+ patch_2_clij = jit->prg;
+ EMIT6_PCREL_RIEC(0xec000000, 0x007f, REG_W1, MAX_TAIL_CALL_CNT,
+ 2, jit->prg);
/*
* prog = array->ptrs[index];
@@ -1348,13 +1337,9 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
/* ltg %r1,prog(%b2,%r1) */
EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2,
REG_1, offsetof(struct bpf_array, ptrs));
- if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
- /* brc 0x8,label0 */
- EMIT4_PCREL_RIC(0xa7040000, 0x8, jit->labels[0]);
- } else {
- /* brcl 0x8,label0 */
- EMIT6_PCREL_RILC(0xc0040000, 0x8, jit->labels[0]);
- }
+ /* brc 0x8,out */
+ patch_3_brc = jit->prg;
+ EMIT4_PCREL_RIC(0xa7040000, 8, jit->prg);
/*
* Restore registers before calling function
@@ -1371,8 +1356,16 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
/* bc 0xf,tail_call_start(%r1) */
_EMIT4(0x47f01000 + jit->tail_call_start);
/* out: */
- jit->labels[0] = jit->prg;
+ if (jit->prg_buf) {
+ *(u16 *)(jit->prg_buf + patch_1_clrj + 2) =
+ (jit->prg - patch_1_clrj) >> 1;
+ *(u16 *)(jit->prg_buf + patch_2_clij + 2) =
+ (jit->prg - patch_2_clij) >> 1;
+ *(u16 *)(jit->prg_buf + patch_3_brc + 2) =
+ (jit->prg - patch_3_brc) >> 1;
+ }
break;
+ }
case BPF_JMP | BPF_EXIT: /* return b0 */
last = (i == fp->len - 1) ? 1 : 0;
if (last)
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 4b62d6b55024..1804230dd8d8 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -668,6 +668,10 @@ EXPORT_SYMBOL_GPL(zpci_enable_device);
int zpci_disable_device(struct zpci_dev *zdev)
{
zpci_dma_exit_device(zdev);
+ /*
+ * The zPCI function may already be disabled by the platform, this is
+ * detected in clp_disable_fh() which becomes a no-op.
+ */
return clp_disable_fh(zdev);
}
EXPORT_SYMBOL_GPL(zpci_disable_device);
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index 9a3a291cad43..d9ae7456dd4c 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -143,6 +143,8 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
zpci_remove_device(zdev);
}
+ zdev->fh = ccdf->fh;
+ zpci_disable_device(zdev);
zdev->state = ZPCI_FN_STATE_STANDBY;
if (!clp_get_state(ccdf->fid, &state) &&
state == ZPCI_FN_STATE_RESERVED) {
diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h
index 1a0d7cf71c10..100bf241340b 100644
--- a/arch/sh/include/asm/smp.h
+++ b/arch/sh/include/asm/smp.h
@@ -8,7 +8,6 @@
#ifdef CONFIG_SMP
-#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <asm/current.h>
#include <asm/percpu.h>
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index ad963104d22d..91ab2607a1ff 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -370,7 +370,6 @@ syscall_trace_entry:
nop
cmp/eq #-1, r0
bt syscall_exit
- mov.l r0, @(OFF_R0,r15) ! Save return value
! Reload R0-R4 from kernel stack, where the
! parent may have modified them using
! ptrace(POKEUSR). (Note that R0-R2 are
diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c
index b05bf92f9c32..5281685f6ad1 100644
--- a/arch/sh/kernel/ptrace_32.c
+++ b/arch/sh/kernel/ptrace_32.c
@@ -455,16 +455,11 @@ long arch_ptrace(struct task_struct *child, long request,
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
{
- long ret = 0;
-
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
- tracehook_report_syscall_entry(regs))
- /*
- * Tracing decided this syscall should not happen.
- * We'll return a bogus call number to get an ENOSYS
- * error, but leave the original number in regs->regs[0].
- */
- ret = -1L;
+ tracehook_report_syscall_entry(regs)) {
+ regs->regs[0] = -ENOSYS;
+ return -1;
+ }
if (secure_computing() == -1)
return -1;
@@ -475,7 +470,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
audit_syscall_entry(regs->regs[3], regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]);
- return ret ?: regs->regs[0];
+ return 0;
}
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 3962f592633d..ff7894f39e0e 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -43,6 +43,8 @@ KBUILD_CFLAGS += -Wno-pointer-sign
KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS += -D__DISABLE_EXPORTS
+# Disable relocation relaxation in case the link is not PIE.
+KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index d7577fece9eb..78210793d357 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -19,6 +19,7 @@ CONFIG_CGROUP_CPUACCT=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
+# CONFIG_64BIT is not set
CONFIG_SMP=y
CONFIG_X86_GENERIC=y
CONFIG_HPET_TIMER=y
@@ -186,7 +187,6 @@ CONFIG_DRM_I915=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
CONFIG_FB_EFI=y
-CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index f85600143747..9936528e1939 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -181,7 +181,6 @@ CONFIG_DRM_I915=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
CONFIG_FB_EFI=y
-CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 48512c7944e7..2f84c7ca74ea 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -60,16 +60,10 @@ __visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs)
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
static __always_inline unsigned int syscall_32_enter(struct pt_regs *regs)
{
- unsigned int nr = (unsigned int)regs->orig_ax;
-
if (IS_ENABLED(CONFIG_IA32_EMULATION))
current_thread_info()->status |= TS_COMPAT;
- /*
- * Subtlety here: if ptrace pokes something larger than 2^32-1 into
- * orig_ax, the unsigned int return value truncates it. This may
- * or may not be necessary, but it matches the old asm behavior.
- */
- return (unsigned int)syscall_enter_from_user_mode(regs, nr);
+
+ return (unsigned int)regs->orig_ax;
}
/*
@@ -91,15 +85,29 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
{
unsigned int nr = syscall_32_enter(regs);
+ /*
+ * Subtlety here: if ptrace pokes something larger than 2^32-1 into
+ * orig_ax, the unsigned int return value truncates it. This may
+ * or may not be necessary, but it matches the old asm behavior.
+ */
+ nr = (unsigned int)syscall_enter_from_user_mode(regs, nr);
+
do_syscall_32_irqs_on(regs, nr);
syscall_exit_to_user_mode(regs);
}
static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
{
- unsigned int nr = syscall_32_enter(regs);
+ unsigned int nr = syscall_32_enter(regs);
int res;
+ /*
+ * This cannot use syscall_enter_from_user_mode() as it has to
+ * fetch EBP before invoking any of the syscall entry work
+ * functions.
+ */
+ syscall_enter_from_user_mode_prepare(regs);
+
instrumentation_begin();
/* Fetch EBP from where the vDSO stashed it. */
if (IS_ENABLED(CONFIG_X86_64)) {
@@ -122,6 +130,9 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
return false;
}
+ /* The case truncates any ptrace induced syscall nr > 2^32 -1 */
+ nr = (unsigned int)syscall_enter_from_user_mode_work(regs, nr);
+
/* Now this is just like a normal syscall. */
do_syscall_32_irqs_on(regs, nr);
syscall_exit_to_user_mode(regs);
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index ca0976456a6b..6d2df1ee427b 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -159,8 +159,6 @@ static inline u64 x86_default_get_root_pointer(void)
extern int x86_acpi_numa_init(void);
#endif /* CONFIG_ACPI_NUMA */
-#define acpi_unlazy_tlb(x) leave_mm(x)
-
#ifdef CONFIG_ACPI_APEI
static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
{
diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
index a8f9315b9eae..6fe54b2813c1 100644
--- a/arch/x86/include/asm/entry-common.h
+++ b/arch/x86/include/asm/entry-common.h
@@ -18,8 +18,16 @@ static __always_inline void arch_check_user_regs(struct pt_regs *regs)
* state, not the interrupt state as imagined by Xen.
*/
unsigned long flags = native_save_fl();
- WARN_ON_ONCE(flags & (X86_EFLAGS_AC | X86_EFLAGS_DF |
- X86_EFLAGS_NT));
+ unsigned long mask = X86_EFLAGS_DF | X86_EFLAGS_NT;
+
+ /*
+ * For !SMAP hardware we patch out CLAC on entry.
+ */
+ if (boot_cpu_has(X86_FEATURE_SMAP) ||
+ (IS_ENABLED(CONFIG_64_BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
+ mask |= X86_EFLAGS_AC;
+
+ WARN_ON_ONCE(flags & mask);
/* We think we came from user mode. Make sure pt_regs agrees. */
WARN_ON_ONCE(!user_mode(regs));
diff --git a/arch/x86/include/asm/frame.h b/arch/x86/include/asm/frame.h
index 296b346184b2..fb42659f6e98 100644
--- a/arch/x86/include/asm/frame.h
+++ b/arch/x86/include/asm/frame.h
@@ -60,12 +60,26 @@
#define FRAME_END "pop %" _ASM_BP "\n"
#ifdef CONFIG_X86_64
+
#define ENCODE_FRAME_POINTER \
"lea 1(%rsp), %rbp\n\t"
+
+static inline unsigned long encode_frame_pointer(struct pt_regs *regs)
+{
+ return (unsigned long)regs + 1;
+}
+
#else /* !CONFIG_X86_64 */
+
#define ENCODE_FRAME_POINTER \
"movl %esp, %ebp\n\t" \
"andl $0x7fffffff, %ebp\n\t"
+
+static inline unsigned long encode_frame_pointer(struct pt_regs *regs)
+{
+ return (unsigned long)regs & 0x7fffffff;
+}
+
#endif /* CONFIG_X86_64 */
#endif /* __ASSEMBLY__ */
@@ -83,6 +97,11 @@
#define ENCODE_FRAME_POINTER
+static inline unsigned long encode_frame_pointer(struct pt_regs *regs)
+{
+ return 0;
+}
+
#endif
#define FRAME_BEGIN
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index e7752b4038ff..e491c3d9f227 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -314,19 +314,19 @@ static inline void mds_idle_clear_cpu_buffers(void)
* lfence
* jmp spec_trap
* do_rop:
- * mov %rax,(%rsp) for x86_64
+ * mov %rcx,(%rsp) for x86_64
* mov %edx,(%esp) for x86_32
* retq
*
* Without retpolines configured:
*
- * jmp *%rax for x86_64
+ * jmp *%rcx for x86_64
* jmp *%edx for x86_32
*/
#ifdef CONFIG_RETPOLINE
# ifdef CONFIG_X86_64
-# define RETPOLINE_RAX_BPF_JIT_SIZE 17
-# define RETPOLINE_RAX_BPF_JIT() \
+# define RETPOLINE_RCX_BPF_JIT_SIZE 17
+# define RETPOLINE_RCX_BPF_JIT() \
do { \
EMIT1_off32(0xE8, 7); /* callq do_rop */ \
/* spec_trap: */ \
@@ -334,7 +334,7 @@ do { \
EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
/* do_rop: */ \
- EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \
+ EMIT4(0x48, 0x89, 0x0C, 0x24); /* mov %rcx,(%rsp) */ \
EMIT1(0xC3); /* retq */ \
} while (0)
# else /* !CONFIG_X86_64 */
@@ -352,9 +352,9 @@ do { \
# endif
#else /* !CONFIG_RETPOLINE */
# ifdef CONFIG_X86_64
-# define RETPOLINE_RAX_BPF_JIT_SIZE 2
-# define RETPOLINE_RAX_BPF_JIT() \
- EMIT2(0xFF, 0xE0); /* jmp *%rax */
+# define RETPOLINE_RCX_BPF_JIT_SIZE 2
+# define RETPOLINE_RCX_BPF_JIT() \
+ EMIT2(0xFF, 0xE1); /* jmp *%rcx */
# else /* !CONFIG_X86_64 */
# define RETPOLINE_EDX_BPF_JIT() \
EMIT2(0xFF, 0xE2) /* jmp *%edx */
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 40aa69d04862..d8324a236696 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -327,8 +327,8 @@ static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
static const unsigned int argument_offs[] = {
#ifdef __i386__
offsetof(struct pt_regs, ax),
- offsetof(struct pt_regs, cx),
offsetof(struct pt_regs, dx),
+ offsetof(struct pt_regs, cx),
#define NR_REG_ARGUMENTS 3
#else
offsetof(struct pt_regs, di),
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 08320b0b2b27..9663ba31347c 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -270,9 +270,8 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
{
struct pt_regs *old_regs = set_irq_regs(regs);
u32 token;
- irqentry_state_t state;
- state = irqentry_enter(regs);
+ ack_APIC_irq();
inc_irq_stat(irq_hv_callback_count);
@@ -283,7 +282,6 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
}
- irqentry_exit(regs, state);
set_irq_regs(old_regs);
}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 13ce616cc7af..ba4593a913fa 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -42,6 +42,7 @@
#include <asm/spec-ctrl.h>
#include <asm/io_bitmap.h>
#include <asm/proto.h>
+#include <asm/frame.h>
#include "process.h"
@@ -133,7 +134,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
fork_frame = container_of(childregs, struct fork_frame, regs);
frame = &fork_frame->frame;
- frame->bp = 0;
+ frame->bp = encode_frame_pointer(childregs);
frame->ret_addr = (unsigned long) ret_from_fork;
p->thread.sp = (unsigned long) fork_frame;
p->thread.io_bitmap = NULL;
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 1f66d2d1e998..81a2fb711091 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -729,20 +729,9 @@ static bool is_sysenter_singlestep(struct pt_regs *regs)
#endif
}
-static __always_inline void debug_enter(unsigned long *dr6, unsigned long *dr7)
+static __always_inline unsigned long debug_read_clear_dr6(void)
{
- /*
- * Disable breakpoints during exception handling; recursive exceptions
- * are exceedingly 'fun'.
- *
- * Since this function is NOKPROBE, and that also applies to
- * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
- * HW_BREAKPOINT_W on our stack)
- *
- * Entry text is excluded for HW_BP_X and cpu_entry_area, which
- * includes the entry stack is excluded for everything.
- */
- *dr7 = local_db_save();
+ unsigned long dr6;
/*
* The Intel SDM says:
@@ -755,15 +744,12 @@ static __always_inline void debug_enter(unsigned long *dr6, unsigned long *dr7)
*
* Keep it simple: clear DR6 immediately.
*/
- get_debugreg(*dr6, 6);
+ get_debugreg(dr6, 6);
set_debugreg(0, 6);
/* Filter out all the reserved bits which are preset to 1 */
- *dr6 &= ~DR6_RESERVED;
-}
+ dr6 &= ~DR6_RESERVED;
-static __always_inline void debug_exit(unsigned long dr7)
-{
- local_db_restore(dr7);
+ return dr6;
}
/*
@@ -863,6 +849,18 @@ out:
static __always_inline void exc_debug_kernel(struct pt_regs *regs,
unsigned long dr6)
{
+ /*
+ * Disable breakpoints during exception handling; recursive exceptions
+ * are exceedingly 'fun'.
+ *
+ * Since this function is NOKPROBE, and that also applies to
+ * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
+ * HW_BREAKPOINT_W on our stack)
+ *
+ * Entry text is excluded for HW_BP_X and cpu_entry_area, which
+ * includes the entry stack is excluded for everything.
+ */
+ unsigned long dr7 = local_db_save();
bool irq_state = idtentry_enter_nmi(regs);
instrumentation_begin();
@@ -883,6 +881,8 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
instrumentation_end();
idtentry_exit_nmi(regs, irq_state);
+
+ local_db_restore(dr7);
}
static __always_inline void exc_debug_user(struct pt_regs *regs,
@@ -894,6 +894,15 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
*/
WARN_ON_ONCE(!user_mode(regs));
+ /*
+ * NB: We can't easily clear DR7 here because
+ * idtentry_exit_to_usermode() can invoke ptrace, schedule, access
+ * user memory, etc. This means that a recursive #DB is possible. If
+ * this happens, that #DB will hit exc_debug_kernel() and clear DR7.
+ * Since we're not on the IST stack right now, everything will be
+ * fine.
+ */
+
irqentry_enter_from_user_mode(regs);
instrumentation_begin();
@@ -907,36 +916,24 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
/* IST stack entry */
DEFINE_IDTENTRY_DEBUG(exc_debug)
{
- unsigned long dr6, dr7;
-
- debug_enter(&dr6, &dr7);
- exc_debug_kernel(regs, dr6);
- debug_exit(dr7);
+ exc_debug_kernel(regs, debug_read_clear_dr6());
}
/* User entry, runs on regular task stack */
DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
{
- unsigned long dr6, dr7;
-
- debug_enter(&dr6, &dr7);
- exc_debug_user(regs, dr6);
- debug_exit(dr7);
+ exc_debug_user(regs, debug_read_clear_dr6());
}
#else
/* 32 bit does not have separate entry points. */
DEFINE_IDTENTRY_RAW(exc_debug)
{
- unsigned long dr6, dr7;
-
- debug_enter(&dr6, &dr7);
+ unsigned long dr6 = debug_read_clear_dr6();
if (user_mode(regs))
exc_debug_user(regs, dr6);
else
exc_debug_kernel(regs, dr6);
-
- debug_exit(dr7);
}
#endif
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 5299ef5ff18d..2f6510de6b0c 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2505,9 +2505,14 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
*reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
val = GET_SMSTATE(u32, smstate, 0x7fcc);
- ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
+
+ if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
+ return X86EMUL_UNHANDLEABLE;
+
val = GET_SMSTATE(u32, smstate, 0x7fc8);
- ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
+
+ if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
+ return X86EMUL_UNHANDLEABLE;
selector = GET_SMSTATE(u32, smstate, 0x7fc4);
set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
@@ -2560,16 +2565,23 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
val = GET_SMSTATE(u32, smstate, 0x7f68);
- ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
+
+ if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
+ return X86EMUL_UNHANDLEABLE;
+
val = GET_SMSTATE(u32, smstate, 0x7f60);
- ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
+
+ if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
+ return X86EMUL_UNHANDLEABLE;
cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
val = GET_SMSTATE(u64, smstate, 0x7ed0);
- ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
+
+ if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA))
+ return X86EMUL_UNHANDLEABLE;
selector = GET_SMSTATE(u32, smstate, 0x7e90);
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 43fdb0c12a5d..71aa3da2a0b7 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2469,7 +2469,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
}
if (sp->unsync_children)
- kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
+ kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
__clear_sp_write_flooding_count(sp);
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index fb68467e6049..e90bc436f584 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -586,7 +586,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
/* Give the current vmcb to the guest */
- svm_set_gif(svm, false);
nested_vmcb->save.es = vmcb->save.es;
nested_vmcb->save.cs = vmcb->save.cs;
@@ -632,6 +631,9 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
/* Restore the original control entries */
copy_vmcb_control_area(&vmcb->control, &hsave->control);
+ /* On vmexit the GIF is set to false */
+ svm_set_gif(svm, false);
+
svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
svm->vcpu.arch.l1_tsc_offset;
@@ -1132,6 +1134,9 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
load_nested_vmcb_control(svm, &ctl);
nested_prepare_vmcb_control(svm);
+ if (!nested_svm_vmrun_msrpm(svm))
+ return -EINVAL;
+
out_set_gif:
svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
return 0;
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 402dc4234e39..7bf7bf734979 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1106,6 +1106,7 @@ void sev_vm_destroy(struct kvm *kvm)
list_for_each_safe(pos, q, head) {
__unregister_enc_region_locked(kvm,
list_entry(pos, struct enc_region, list));
+ cond_resched();
}
}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 0194336b64a4..c44f3e9140d5 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2938,8 +2938,6 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
if (npt_enabled)
vcpu->arch.cr3 = svm->vmcb->save.cr3;
- svm_complete_interrupts(svm);
-
if (is_guest_mode(vcpu)) {
int vmexit;
@@ -3504,7 +3502,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
stgi();
/* Any pending NMI will happen here */
- exit_fastpath = svm_exit_handlers_fastpath(vcpu);
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_after_interrupt(&svm->vcpu);
@@ -3518,6 +3515,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
}
svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
+ vmcb_mark_all_clean(svm->vmcb);
/* if exit due to PF check for async PF */
if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
@@ -3537,7 +3535,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
SVM_EXIT_EXCP_BASE + MC_VECTOR))
svm_handle_mce(svm);
- vmcb_mark_all_clean(svm->vmcb);
+ svm_complete_interrupts(svm);
+ exit_fastpath = svm_exit_handlers_fastpath(vcpu);
return exit_fastpath;
}
@@ -3900,21 +3899,28 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
- struct vmcb *nested_vmcb;
struct kvm_host_map map;
- u64 guest;
- u64 vmcb;
int ret = 0;
- guest = GET_SMSTATE(u64, smstate, 0x7ed8);
- vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
+ if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
+ u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
+ u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
+ u64 vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
- if (guest) {
- if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL)
- return 1;
- nested_vmcb = map.hva;
- ret = enter_svm_guest_mode(svm, vmcb, nested_vmcb);
- kvm_vcpu_unmap(&svm->vcpu, &map, true);
+ if (guest) {
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
+ return 1;
+
+ if (!(saved_efer & EFER_SVME))
+ return 1;
+
+ if (kvm_vcpu_map(&svm->vcpu,
+ gpa_to_gfn(vmcb), &map) == -EINVAL)
+ return 1;
+
+ ret = enter_svm_guest_mode(svm, vmcb, map.hva);
+ kvm_vcpu_unmap(&svm->vcpu, &map, true);
+ }
}
return ret;
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 23b58c28a1c9..1bb6b31eb646 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4404,6 +4404,14 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
kvm_vcpu_flush_tlb_current(vcpu);
+ /*
+ * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
+ * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are
+ * up-to-date before switching to L1.
+ */
+ if (enable_ept && is_pae_paging(vcpu))
+ vmx_ept_load_pdptrs(vcpu);
+
leave_guest_mode(vcpu);
if (nested_cpu_has_preemption_timer(vmcs12))
@@ -4668,7 +4676,7 @@ void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
vmx->nested.msrs.entry_ctls_high &=
~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
vmx->nested.msrs.exit_ctls_high &=
- ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+ ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
}
}
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 819c185adf09..8646a797b7a8 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2971,7 +2971,7 @@ static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
vpid_sync_context(to_vmx(vcpu)->vpid);
}
-static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
+void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
@@ -3114,7 +3114,7 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd,
guest_cr3 = vcpu->arch.cr3;
else /* vmcs01.GUEST_CR3 is already up-to-date. */
update_guest_cr3 = false;
- ept_load_pdptrs(vcpu);
+ vmx_ept_load_pdptrs(vcpu);
} else {
guest_cr3 = pgd;
}
@@ -6054,6 +6054,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
(exit_reason != EXIT_REASON_EXCEPTION_NMI &&
exit_reason != EXIT_REASON_EPT_VIOLATION &&
exit_reason != EXIT_REASON_PML_FULL &&
+ exit_reason != EXIT_REASON_APIC_ACCESS &&
exit_reason != EXIT_REASON_TASK_SWITCH)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 26175a4759fa..a2f82127c170 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -356,6 +356,7 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
int vmx_find_msr_index(struct vmx_msrs *m, u32 msr);
int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
struct x86_exception *e);
+void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
#define POSTED_INTR_ON 0
#define POSTED_INTR_SN 1
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d39d6cf1d473..1994602a0851 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2731,7 +2731,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
return 1;
if (!lapic_in_kernel(vcpu))
- return 1;
+ return data ? 1 : 0;
vcpu->arch.apf.msr_en_val = data;
@@ -3578,6 +3578,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_SMALLER_MAXPHYADDR:
r = (int) allow_smaller_maxphyaddr;
break;
+ case KVM_CAP_STEAL_TIME:
+ r = sched_info_on();
+ break;
default:
break;
}
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index d46fff11f06f..aa067859a70b 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -24,7 +24,7 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_cmdline.o = -pg
endif
-CFLAGS_cmdline.o := -fno-stack-protector
+CFLAGS_cmdline.o := -fno-stack-protector -fno-jump-tables
endif
inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 35f1498e9832..6e3e8a124903 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -190,6 +190,53 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
return pmd_k;
}
+/*
+ * Handle a fault on the vmalloc or module mapping area
+ *
+ * This is needed because there is a race condition between the time
+ * when the vmalloc mapping code updates the PMD to the point in time
+ * where it synchronizes this update with the other page-tables in the
+ * system.
+ *
+ * In this race window another thread/CPU can map an area on the same
+ * PMD, finds it already present and does not synchronize it with the
+ * rest of the system yet. As a result v[mz]alloc might return areas
+ * which are not mapped in every page-table in the system, causing an
+ * unhandled page-fault when they are accessed.
+ */
+static noinline int vmalloc_fault(unsigned long address)
+{
+ unsigned long pgd_paddr;
+ pmd_t *pmd_k;
+ pte_t *pte_k;
+
+ /* Make sure we are in vmalloc area: */
+ if (!(address >= VMALLOC_START && address < VMALLOC_END))
+ return -1;
+
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "current" here. We might be inside
+ * an interrupt in the middle of a task switch..
+ */
+ pgd_paddr = read_cr3_pa();
+ pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
+ if (!pmd_k)
+ return -1;
+
+ if (pmd_large(*pmd_k))
+ return 0;
+
+ pte_k = pte_offset_kernel(pmd_k, address);
+ if (!pte_present(*pte_k))
+ return -1;
+
+ return 0;
+}
+NOKPROBE_SYMBOL(vmalloc_fault);
+
void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
{
unsigned long addr;
@@ -1110,6 +1157,37 @@ do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
*/
WARN_ON_ONCE(hw_error_code & X86_PF_PK);
+#ifdef CONFIG_X86_32
+ /*
+ * We can fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ *
+ * Before doing this on-demand faulting, ensure that the
+ * fault is not any of the following:
+ * 1. A fault on a PTE with a reserved bit set.
+ * 2. A fault caused by a user-mode access. (Do not demand-
+ * fault kernel memory due to user-mode accesses).
+ * 3. A fault caused by a page-level protection violation.
+ * (A demand fault would be on a non-present page which
+ * would have X86_PF_PROT==0).
+ *
+ * This is only needed to close a race condition on x86-32 in
+ * the vmalloc mapping/unmapping code. See the comment above
+ * vmalloc_fault() for details. On x86-64 the race does not
+ * exist as the vmalloc mappings don't need to be synchronized
+ * there.
+ */
+ if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
+ if (vmalloc_fault(address) >= 0)
+ return;
+ }
+#endif
+
/* Was the fault spurious, caused by lazy TLB invalidation? */
if (spurious_kernel_fault(hw_error_code, address))
return;
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index c5174b4e318b..683cd12f4793 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -321,7 +321,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
u64 addr, u64 max_addr, u64 size)
{
return split_nodes_size_interleave_uniform(ei, pi, addr, max_addr, size,
- 0, NULL, NUMA_NO_NODE);
+ 0, NULL, 0);
}
static int __init setup_emu2phys_nid(int *dfl_phys_nid)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 7d9ea7b41c71..26f43279b78b 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -221,14 +221,48 @@ struct jit_context {
/* Number of bytes emit_patch() needs to generate instructions */
#define X86_PATCH_SIZE 5
+/* Number of bytes that will be skipped on tailcall */
+#define X86_TAIL_CALL_OFFSET 11
-#define PROLOGUE_SIZE 25
+static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
+{
+ u8 *prog = *pprog;
+ int cnt = 0;
+
+ if (callee_regs_used[0])
+ EMIT1(0x53); /* push rbx */
+ if (callee_regs_used[1])
+ EMIT2(0x41, 0x55); /* push r13 */
+ if (callee_regs_used[2])
+ EMIT2(0x41, 0x56); /* push r14 */
+ if (callee_regs_used[3])
+ EMIT2(0x41, 0x57); /* push r15 */
+ *pprog = prog;
+}
+
+static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
+{
+ u8 *prog = *pprog;
+ int cnt = 0;
+
+ if (callee_regs_used[3])
+ EMIT2(0x41, 0x5F); /* pop r15 */
+ if (callee_regs_used[2])
+ EMIT2(0x41, 0x5E); /* pop r14 */
+ if (callee_regs_used[1])
+ EMIT2(0x41, 0x5D); /* pop r13 */
+ if (callee_regs_used[0])
+ EMIT1(0x5B); /* pop rbx */
+ *pprog = prog;
+}
/*
- * Emit x86-64 prologue code for BPF program and check its size.
- * bpf_tail_call helper will skip it while jumping into another program
+ * Emit x86-64 prologue code for BPF program.
+ * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
+ * while jumping to another program
*/
-static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
+static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
+ bool tail_call_reachable, bool is_subprog)
{
u8 *prog = *pprog;
int cnt = X86_PATCH_SIZE;
@@ -238,19 +272,18 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
*/
memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt);
prog += cnt;
+ if (!ebpf_from_cbpf) {
+ if (tail_call_reachable && !is_subprog)
+ EMIT2(0x31, 0xC0); /* xor eax, eax */
+ else
+ EMIT2(0x66, 0x90); /* nop2 */
+ }
EMIT1(0x55); /* push rbp */
EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
/* sub rsp, rounded_stack_depth */
EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
- EMIT1(0x53); /* push rbx */
- EMIT2(0x41, 0x55); /* push r13 */
- EMIT2(0x41, 0x56); /* push r14 */
- EMIT2(0x41, 0x57); /* push r15 */
- if (!ebpf_from_cbpf) {
- /* zero init tail_call_cnt */
- EMIT2(0x6a, 0x00);
- BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
- }
+ if (tail_call_reachable)
+ EMIT1(0x50); /* push rax */
*pprog = prog;
}
@@ -314,13 +347,14 @@ static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
mutex_lock(&text_mutex);
if (memcmp(ip, old_insn, X86_PATCH_SIZE))
goto out;
+ ret = 1;
if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
if (text_live)
text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
else
memcpy(ip, new_insn, X86_PATCH_SIZE);
+ ret = 0;
}
- ret = 0;
out:
mutex_unlock(&text_mutex);
return ret;
@@ -337,6 +371,22 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
}
+static int get_pop_bytes(bool *callee_regs_used)
+{
+ int bytes = 0;
+
+ if (callee_regs_used[3])
+ bytes += 2;
+ if (callee_regs_used[2])
+ bytes += 2;
+ if (callee_regs_used[1])
+ bytes += 2;
+ if (callee_regs_used[0])
+ bytes += 1;
+
+ return bytes;
+}
+
/*
* Generate the following code:
*
@@ -351,12 +401,26 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
* goto *(prog->bpf_func + prologue_size);
* out:
*/
-static void emit_bpf_tail_call_indirect(u8 **pprog)
+static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
+ u32 stack_depth)
{
+ int tcc_off = -4 - round_up(stack_depth, 8);
u8 *prog = *pprog;
- int label1, label2, label3;
+ int pop_bytes = 0;
+ int off1 = 49;
+ int off2 = 38;
+ int off3 = 16;
int cnt = 0;
+ /* count the additional bytes used for popping callee regs from stack
+ * that need to be taken into account for each of the offsets that
+ * are used for bailing out of the tail call
+ */
+ pop_bytes = get_pop_bytes(callee_regs_used);
+ off1 += pop_bytes;
+ off2 += pop_bytes;
+ off3 += pop_bytes;
+
/*
* rdi - pointer to ctx
* rsi - pointer to bpf_array
@@ -370,72 +434,106 @@ static void emit_bpf_tail_call_indirect(u8 **pprog)
EMIT2(0x89, 0xD2); /* mov edx, edx */
EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
offsetof(struct bpf_array, map.max_entries));
-#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* Number of bytes to jump */
+#define OFFSET1 (off1 + RETPOLINE_RCX_BPF_JIT_SIZE) /* Number of bytes to jump */
EMIT2(X86_JBE, OFFSET1); /* jbe out */
- label1 = cnt;
/*
* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
* goto out;
*/
- EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
+ EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
-#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
+#define OFFSET2 (off2 + RETPOLINE_RCX_BPF_JIT_SIZE)
EMIT2(X86_JA, OFFSET2); /* ja out */
- label2 = cnt;
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
- EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
+ EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
/* prog = array->ptrs[index]; */
- EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
+ EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
offsetof(struct bpf_array, ptrs));
/*
* if (prog == NULL)
* goto out;
*/
- EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
-#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
+ EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */
+#define OFFSET3 (off3 + RETPOLINE_RCX_BPF_JIT_SIZE)
EMIT2(X86_JE, OFFSET3); /* je out */
- label3 = cnt;
- /* goto *(prog->bpf_func + prologue_size); */
- EMIT4(0x48, 0x8B, 0x40, /* mov rax, qword ptr [rax + 32] */
- offsetof(struct bpf_prog, bpf_func));
- EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE); /* add rax, prologue_size */
+ *pprog = prog;
+ pop_callee_regs(pprog, callee_regs_used);
+ prog = *pprog;
+
+ EMIT1(0x58); /* pop rax */
+ EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */
+ round_up(stack_depth, 8));
+ /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
+ EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */
+ offsetof(struct bpf_prog, bpf_func));
+ EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */
+ X86_TAIL_CALL_OFFSET);
/*
- * Wow we're ready to jump into next BPF program
+ * Now we're ready to jump into next BPF program
* rdi == ctx (1st arg)
- * rax == prog->bpf_func + prologue_size
+ * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
*/
- RETPOLINE_RAX_BPF_JIT();
+ RETPOLINE_RCX_BPF_JIT();
/* out: */
- BUILD_BUG_ON(cnt - label1 != OFFSET1);
- BUILD_BUG_ON(cnt - label2 != OFFSET2);
- BUILD_BUG_ON(cnt - label3 != OFFSET3);
*pprog = prog;
}
static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
- u8 **pprog, int addr, u8 *image)
+ u8 **pprog, int addr, u8 *image,
+ bool *callee_regs_used, u32 stack_depth)
{
+ int tcc_off = -4 - round_up(stack_depth, 8);
u8 *prog = *pprog;
+ int pop_bytes = 0;
+ int off1 = 27;
+ int poke_off;
int cnt = 0;
+ /* count the additional bytes used for popping callee regs to stack
+ * that need to be taken into account for jump offset that is used for
+ * bailing out from of the tail call when limit is reached
+ */
+ pop_bytes = get_pop_bytes(callee_regs_used);
+ off1 += pop_bytes;
+
+ /*
+ * total bytes for:
+ * - nop5/ jmpq $off
+ * - pop callee regs
+ * - sub rsp, $val
+ * - pop rax
+ */
+ poke_off = X86_PATCH_SIZE + pop_bytes + 7 + 1;
+
/*
* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
* goto out;
*/
- EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
+ EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
- EMIT2(X86_JA, 14); /* ja out */
+ EMIT2(X86_JA, off1); /* ja out */
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
- EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
+ EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
+
+ poke->tailcall_bypass = image + (addr - poke_off - X86_PATCH_SIZE);
+ poke->adj_off = X86_TAIL_CALL_OFFSET;
+ poke->tailcall_target = image + (addr - X86_PATCH_SIZE);
+ poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
- poke->ip = image + (addr - X86_PATCH_SIZE);
- poke->adj_off = PROLOGUE_SIZE;
+ emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
+ poke->tailcall_bypass);
+
+ *pprog = prog;
+ pop_callee_regs(pprog, callee_regs_used);
+ prog = *pprog;
+ EMIT1(0x58); /* pop rax */
+ EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
prog += X86_PATCH_SIZE;
@@ -453,7 +551,7 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
for (i = 0; i < prog->aux->size_poke_tab; i++) {
poke = &prog->aux->poke_tab[i];
- WARN_ON_ONCE(READ_ONCE(poke->ip_stable));
+ WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
continue;
@@ -464,18 +562,25 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
if (target) {
/* Plain memcpy is used when image is not live yet
* and still not locked as read-only. Once poke
- * location is active (poke->ip_stable), any parallel
- * bpf_arch_text_poke() might occur still on the
- * read-write image until we finally locked it as
- * read-only. Both modifications on the given image
- * are under text_mutex to avoid interference.
+ * location is active (poke->tailcall_target_stable),
+ * any parallel bpf_arch_text_poke() might occur
+ * still on the read-write image until we finally
+ * locked it as read-only. Both modifications on
+ * the given image are under text_mutex to avoid
+ * interference.
*/
- ret = __bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP, NULL,
+ ret = __bpf_arch_text_poke(poke->tailcall_target,
+ BPF_MOD_JUMP, NULL,
(u8 *)target->bpf_func +
poke->adj_off, false);
BUG_ON(ret < 0);
+ ret = __bpf_arch_text_poke(poke->tailcall_bypass,
+ BPF_MOD_JUMP,
+ (u8 *)poke->tailcall_target +
+ X86_PATCH_SIZE, NULL, false);
+ BUG_ON(ret < 0);
}
- WRITE_ONCE(poke->ip_stable, true);
+ WRITE_ONCE(poke->tailcall_target_stable, true);
mutex_unlock(&array->aux->poke_mutex);
}
}
@@ -652,19 +757,49 @@ static bool ex_handler_bpf(const struct exception_table_entry *x,
return true;
}
+static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
+ bool *regs_used, bool *tail_call_seen)
+{
+ int i;
+
+ for (i = 1; i <= insn_cnt; i++, insn++) {
+ if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
+ *tail_call_seen = true;
+ if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
+ regs_used[0] = true;
+ if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
+ regs_used[1] = true;
+ if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
+ regs_used[2] = true;
+ if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
+ regs_used[3] = true;
+ }
+}
+
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
int oldproglen, struct jit_context *ctx)
{
+ bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
struct bpf_insn *insn = bpf_prog->insnsi;
+ bool callee_regs_used[4] = {};
int insn_cnt = bpf_prog->len;
+ bool tail_call_seen = false;
bool seen_exit = false;
u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
int i, cnt = 0, excnt = 0;
int proglen = 0;
u8 *prog = temp;
+ detect_reg_usage(insn, insn_cnt, callee_regs_used,
+ &tail_call_seen);
+
+ /* tail call's presence in current prog implies it is reachable */
+ tail_call_reachable |= tail_call_seen;
+
emit_prologue(&prog, bpf_prog->aux->stack_depth,
- bpf_prog_was_classic(bpf_prog));
+ bpf_prog_was_classic(bpf_prog), tail_call_reachable,
+ bpf_prog->aux->func_idx != 0);
+ push_callee_regs(&prog, callee_regs_used);
addrs[0] = prog - temp;
for (i = 1; i <= insn_cnt; i++, insn++) {
@@ -1102,16 +1237,27 @@ xadd: if (is_imm8(insn->off))
/* call */
case BPF_JMP | BPF_CALL:
func = (u8 *) __bpf_call_base + imm32;
- if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
- return -EINVAL;
+ if (tail_call_reachable) {
+ EMIT3_off32(0x48, 0x8B, 0x85,
+ -(bpf_prog->aux->stack_depth + 8));
+ if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
+ return -EINVAL;
+ } else {
+ if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
+ return -EINVAL;
+ }
break;
case BPF_JMP | BPF_TAIL_CALL:
if (imm32)
emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
- &prog, addrs[i], image);
+ &prog, addrs[i], image,
+ callee_regs_used,
+ bpf_prog->aux->stack_depth);
else
- emit_bpf_tail_call_indirect(&prog);
+ emit_bpf_tail_call_indirect(&prog,
+ callee_regs_used,
+ bpf_prog->aux->stack_depth);
break;
/* cond jump */
@@ -1294,12 +1440,9 @@ emit_jmp:
seen_exit = true;
/* Update cleanup_addr */
ctx->cleanup_addr = proglen;
- if (!bpf_prog_was_classic(bpf_prog))
- EMIT1(0x5B); /* get rid of tail_call_cnt */
- EMIT2(0x41, 0x5F); /* pop r15 */
- EMIT2(0x41, 0x5E); /* pop r14 */
- EMIT2(0x41, 0x5D); /* pop r13 */
- EMIT1(0x5B); /* pop rbx */
+ pop_callee_regs(&prog, callee_regs_used);
+ if (tail_call_reachable)
+ EMIT1(0x59); /* pop rcx, get rid of tail_call_cnt */
EMIT1(0xC9); /* leave */
EMIT1(0xC3); /* ret */
break;
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index c34b090178a9..fa98470df3f0 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -5896,18 +5896,6 @@ static void bfq_finish_requeue_request(struct request *rq)
struct bfq_data *bfqd;
/*
- * Requeue and finish hooks are invoked in blk-mq without
- * checking whether the involved request is actually still
- * referenced in the scheduler. To handle this fact, the
- * following two checks make this function exit in case of
- * spurious invocations, for which there is nothing to do.
- *
- * First, check whether rq has nothing to do with an elevator.
- */
- if (unlikely(!(rq->rq_flags & RQF_ELVPRIV)))
- return;
-
- /*
* rq either is not associated with any icq, or is an already
* requeued request that has not (yet) been re-inserted into
* a bfq_queue.
diff --git a/block/bio.c b/block/bio.c
index a9931f23d933..e865ea55b9f9 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -879,8 +879,10 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
if (page_is_mergeable(bv, page, len, off, same_page)) {
- if (bio->bi_iter.bi_size > UINT_MAX - len)
+ if (bio->bi_iter.bi_size > UINT_MAX - len) {
+ *same_page = false;
return false;
+ }
bv->bv_len += len;
bio->bi_iter.bi_size += len;
return true;
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 126021fc3a11..e81ca1bf6e10 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -66,7 +66,7 @@ static inline void blk_mq_sched_requeue_request(struct request *rq)
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
- if (e && e->type->ops.requeue_request)
+ if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request)
e->type->ops.requeue_request(rq);
}
diff --git a/block/partitions/core.c b/block/partitions/core.c
index 5b4869c08fb3..722406b841df 100644
--- a/block/partitions/core.c
+++ b/block/partitions/core.c
@@ -537,7 +537,7 @@ int bdev_del_partition(struct block_device *bdev, int partno)
bdevp = bdget_disk(bdev->bd_disk, partno);
if (!bdevp)
- return -ENOMEM;
+ return -ENXIO;
mutex_lock(&bdevp->bd_mutex);
mutex_lock_nested(&bdev->bd_mutex, 1);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 71a30b0d0f05..7ecb90e90afd 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -161,18 +161,10 @@ static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
}
/* Power(C) State timer broadcast control */
-static void lapic_timer_state_broadcast(struct acpi_processor *pr,
- struct acpi_processor_cx *cx,
- int broadcast)
+static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
+ struct acpi_processor_cx *cx)
{
- int state = cx - pr->power.states;
-
- if (state >= pr->power.timer_broadcast_on_state) {
- if (broadcast)
- tick_broadcast_enter();
- else
- tick_broadcast_exit();
- }
+ return cx - pr->power.states >= pr->power.timer_broadcast_on_state;
}
#else
@@ -180,9 +172,9 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
static void lapic_timer_check_state(int state, struct acpi_processor *pr,
struct acpi_processor_cx *cstate) { }
static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
-static void lapic_timer_state_broadcast(struct acpi_processor *pr,
- struct acpi_processor_cx *cx,
- int broadcast)
+
+static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
+ struct acpi_processor_cx *cx)
{
}
@@ -566,32 +558,43 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
/**
* acpi_idle_enter_bm - enters C3 with proper BM handling
+ * @drv: cpuidle driver
* @pr: Target processor
* @cx: Target state context
- * @timer_bc: Whether or not to change timer mode to broadcast
+ * @index: index of target state
*/
-static void acpi_idle_enter_bm(struct acpi_processor *pr,
- struct acpi_processor_cx *cx, bool timer_bc)
+static int acpi_idle_enter_bm(struct cpuidle_driver *drv,
+ struct acpi_processor *pr,
+ struct acpi_processor_cx *cx,
+ int index)
{
- acpi_unlazy_tlb(smp_processor_id());
-
- /*
- * Must be done before busmaster disable as we might need to
- * access HPET !
- */
- if (timer_bc)
- lapic_timer_state_broadcast(pr, cx, 1);
+ static struct acpi_processor_cx safe_cx = {
+ .entry_method = ACPI_CSTATE_HALT,
+ };
/*
* disable bus master
* bm_check implies we need ARB_DIS
* bm_control implies whether we can do ARB_DIS
*
- * That leaves a case where bm_check is set and bm_control is
- * not set. In that case we cannot do much, we enter C3
- * without doing anything.
+ * That leaves a case where bm_check is set and bm_control is not set.
+ * In that case we cannot do much, we enter C3 without doing anything.
*/
- if (pr->flags.bm_control) {
+ bool dis_bm = pr->flags.bm_control;
+
+ /* If we can skip BM, demote to a safe state. */
+ if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
+ dis_bm = false;
+ index = drv->safe_state_index;
+ if (index >= 0) {
+ cx = this_cpu_read(acpi_cstate[index]);
+ } else {
+ cx = &safe_cx;
+ index = -EBUSY;
+ }
+ }
+
+ if (dis_bm) {
raw_spin_lock(&c3_lock);
c3_cpu_count++;
/* Disable bus master arbitration when all CPUs are in C3 */
@@ -600,18 +603,21 @@ static void acpi_idle_enter_bm(struct acpi_processor *pr,
raw_spin_unlock(&c3_lock);
}
+ rcu_idle_enter();
+
acpi_idle_do_entry(cx);
+ rcu_idle_exit();
+
/* Re-enable bus master arbitration */
- if (pr->flags.bm_control) {
+ if (dis_bm) {
raw_spin_lock(&c3_lock);
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
c3_cpu_count--;
raw_spin_unlock(&c3_lock);
}
- if (timer_bc)
- lapic_timer_state_broadcast(pr, cx, 0);
+ return index;
}
static int acpi_idle_enter(struct cpuidle_device *dev,
@@ -625,32 +631,21 @@ static int acpi_idle_enter(struct cpuidle_device *dev,
return -EINVAL;
if (cx->type != ACPI_STATE_C1) {
+ if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check)
+ return acpi_idle_enter_bm(drv, pr, cx, index);
+
+ /* C2 to C1 demotion. */
if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
index = ACPI_IDLE_STATE_START;
cx = per_cpu(acpi_cstate[index], dev->cpu);
- } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
- if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
- acpi_idle_enter_bm(pr, cx, true);
- return index;
- } else if (drv->safe_state_index >= 0) {
- index = drv->safe_state_index;
- cx = per_cpu(acpi_cstate[index], dev->cpu);
- } else {
- acpi_safe_halt();
- return -EBUSY;
- }
}
}
- lapic_timer_state_broadcast(pr, cx, 1);
-
if (cx->type == ACPI_STATE_C3)
ACPI_FLUSH_CPU_CACHE();
acpi_idle_do_entry(cx);
- lapic_timer_state_broadcast(pr, cx, 0);
-
return index;
}
@@ -666,7 +661,13 @@ static int acpi_idle_enter_s2idle(struct cpuidle_device *dev,
return 0;
if (pr->flags.bm_check) {
- acpi_idle_enter_bm(pr, cx, false);
+ u8 bm_sts_skip = cx->bm_sts_skip;
+
+ /* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */
+ cx->bm_sts_skip = 1;
+ acpi_idle_enter_bm(drv, pr, cx, index);
+ cx->bm_sts_skip = bm_sts_skip;
+
return 0;
} else {
ACPI_FLUSH_CPU_CACHE();
@@ -682,11 +683,13 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
{
int i, count = ACPI_IDLE_STATE_START;
struct acpi_processor_cx *cx;
+ struct cpuidle_state *state;
if (max_cstate == 0)
max_cstate = 1;
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
+ state = &acpi_idle_driver.states[count];
cx = &pr->power.states[i];
if (!cx->valid)
@@ -694,6 +697,15 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
per_cpu(acpi_cstate[count], dev->cpu) = cx;
+ if (lapic_timer_needs_broadcast(pr, cx))
+ state->flags |= CPUIDLE_FLAG_TIMER_STOP;
+
+ if (cx->type == ACPI_STATE_C3) {
+ state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
+ if (pr->flags.bm_check)
+ state->flags |= CPUIDLE_FLAG_RCU_IDLE;
+ }
+
count++;
if (count == CPUIDLE_STATE_MAX)
break;
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 39be444534d0..316a9947541f 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -2224,7 +2224,7 @@ static int eni_init_one(struct pci_dev *pci_dev,
rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
if (rc < 0)
- goto out;
+ goto err_disable;
rc = -ENOMEM;
eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL);
diff --git a/drivers/auxdisplay/arm-charlcd.c b/drivers/auxdisplay/arm-charlcd.c
index dea031484cc4..0b1c99cca733 100644
--- a/drivers/auxdisplay/arm-charlcd.c
+++ b/drivers/auxdisplay/arm-charlcd.c
@@ -2,7 +2,7 @@
/*
* Driver for the on-board character LCD found on some ARM reference boards
* This is basically an Hitachi HD44780 LCD with a custom IP block to drive it
- * http://en.wikipedia.org/wiki/HD44780_Character_LCD
+ * https://en.wikipedia.org/wiki/HD44780_Character_LCD
* Currently it will just display the text "ARM Linux" and the linux version
*
* Author: Linus Walleij <triad@df.lth.se>
diff --git a/drivers/base/core.c b/drivers/base/core.c
index f6f620aa9408..bb5806a2bd4c 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -807,9 +807,7 @@ static void device_link_put_kref(struct device_link *link)
void device_link_del(struct device_link *link)
{
device_links_write_lock();
- device_pm_lock();
device_link_put_kref(link);
- device_pm_unlock();
device_links_write_unlock();
}
EXPORT_SYMBOL_GPL(device_link_del);
@@ -830,7 +828,6 @@ void device_link_remove(void *consumer, struct device *supplier)
return;
device_links_write_lock();
- device_pm_lock();
list_for_each_entry(link, &supplier->links.consumers, s_node) {
if (link->consumer == consumer) {
@@ -839,7 +836,6 @@ void device_link_remove(void *consumer, struct device *supplier)
}
}
- device_pm_unlock();
device_links_write_unlock();
}
EXPORT_SYMBOL_GPL(device_link_remove);
@@ -4237,10 +4233,10 @@ int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
vaf.va = &args;
if (err != -EPROBE_DEFER) {
- dev_err(dev, "error %d: %pV", err, &vaf);
+ dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
} else {
device_set_deferred_probe_reason(dev, &vaf);
- dev_dbg(dev, "error %d: %pV", err, &vaf);
+ dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
}
va_end(args);
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
index 933e2192fbe8..d08efc77cf16 100644
--- a/drivers/base/firmware_loader/firmware.h
+++ b/drivers/base/firmware_loader/firmware.h
@@ -142,10 +142,12 @@ int assign_fw(struct firmware *fw, struct device *device, u32 opt_flags);
void fw_free_paged_buf(struct fw_priv *fw_priv);
int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed);
int fw_map_paged_buf(struct fw_priv *fw_priv);
+bool fw_is_paged_buf(struct fw_priv *fw_priv);
#else
static inline void fw_free_paged_buf(struct fw_priv *fw_priv) {}
static inline int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; }
static inline int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; }
+static inline bool fw_is_paged_buf(struct fw_priv *fw_priv) { return false; }
#endif
#endif /* __FIRMWARE_LOADER_H */
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 9da0c9d5f538..63b9714a0154 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -252,9 +252,11 @@ static void __free_fw_priv(struct kref *ref)
list_del(&fw_priv->list);
spin_unlock(&fwc->lock);
- fw_free_paged_buf(fw_priv); /* free leftover pages */
- if (!fw_priv->allocated_size)
+ if (fw_is_paged_buf(fw_priv))
+ fw_free_paged_buf(fw_priv);
+ else if (!fw_priv->allocated_size)
vfree(fw_priv->data);
+
kfree_const(fw_priv->fw_name);
kfree(fw_priv);
}
@@ -268,6 +270,11 @@ static void free_fw_priv(struct fw_priv *fw_priv)
}
#ifdef CONFIG_FW_LOADER_PAGED_BUF
+bool fw_is_paged_buf(struct fw_priv *fw_priv)
+{
+ return fw_priv->is_paged_buf;
+}
+
void fw_free_paged_buf(struct fw_priv *fw_priv)
{
int i;
@@ -275,6 +282,8 @@ void fw_free_paged_buf(struct fw_priv *fw_priv)
if (!fw_priv->pages)
return;
+ vunmap(fw_priv->data);
+
for (i = 0; i < fw_priv->nr_pages; i++)
__free_page(fw_priv->pages[i]);
kvfree(fw_priv->pages);
@@ -328,10 +337,6 @@ int fw_map_paged_buf(struct fw_priv *fw_priv)
if (!fw_priv->data)
return -ENOMEM;
- /* page table is no longer needed after mapping, let's free */
- kvfree(fw_priv->pages);
- fw_priv->pages = NULL;
-
return 0;
}
#endif
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 011539039693..e77eaab5cf23 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -5120,6 +5120,9 @@ static ssize_t rbd_config_info_show(struct device *dev,
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
return sprintf(buf, "%s\n", rbd_dev->config_info);
}
@@ -5231,6 +5234,9 @@ static ssize_t rbd_image_refresh(struct device *dev,
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
int ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
ret = rbd_dev_refresh(rbd_dev);
if (ret)
return ret;
@@ -7059,6 +7065,9 @@ static ssize_t do_rbd_add(struct bus_type *bus,
struct rbd_client *rbdc;
int rc;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if (!try_module_get(THIS_MODULE))
return -ENODEV;
@@ -7209,6 +7218,9 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
bool force = false;
int ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
dev_id = -1;
opt_buf[0] = '\0';
sscanf(buf, "%d %5s", &dev_id, opt_buf);
diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig
index 784f12c72365..ec738f74a026 100644
--- a/drivers/clk/bcm/Kconfig
+++ b/drivers/clk/bcm/Kconfig
@@ -5,6 +5,7 @@ config CLK_BCM2711_DVP
depends on ARCH_BCM2835 ||COMPILE_TEST
depends on COMMON_CLK
default ARCH_BCM2835
+ select RESET_CONTROLLER
select RESET_SIMPLE
help
Enable common clock framework support for the Broadcom BCM2711
diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
index 6c35e4bb7940..0d750433eb42 100644
--- a/drivers/clk/davinci/pll.c
+++ b/drivers/clk/davinci/pll.c
@@ -491,7 +491,7 @@ struct clk *davinci_pll_clk_register(struct device *dev,
parent_name = postdiv_name;
}
- pllen = kzalloc(sizeof(*pllout), GFP_KERNEL);
+ pllen = kzalloc(sizeof(*pllen), GFP_KERNEL);
if (!pllen) {
ret = -ENOMEM;
goto err_unregister_postdiv;
diff --git a/drivers/clk/qcom/lpasscorecc-sc7180.c b/drivers/clk/qcom/lpasscorecc-sc7180.c
index d4c1864e1ee9..228d08f5d26f 100644
--- a/drivers/clk/qcom/lpasscorecc-sc7180.c
+++ b/drivers/clk/qcom/lpasscorecc-sc7180.c
@@ -420,17 +420,18 @@ static int lpass_core_sc7180_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
ret = pm_clk_create(&pdev->dev);
if (ret)
- return ret;
+ goto disable_pm_runtime;
ret = pm_clk_add(&pdev->dev, "iface");
if (ret < 0) {
dev_err(&pdev->dev, "failed to acquire iface clock\n");
- goto disable_pm_runtime;
+ goto destroy_pm_clk;
}
+ ret = -EINVAL;
clk_probe = of_device_get_match_data(&pdev->dev);
if (!clk_probe)
- return -EINVAL;
+ goto destroy_pm_clk;
ret = clk_probe(pdev);
if (ret)
diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
index d7243c09cc84..47d6482dda9d 100644
--- a/drivers/clk/rockchip/clk-rk3228.c
+++ b/drivers/clk/rockchip/clk-rk3228.c
@@ -137,7 +137,7 @@ PNAME(mux_usb480m_p) = { "usb480m_phy", "xin24m" };
PNAME(mux_hdmiphy_p) = { "hdmiphy_phy", "xin24m" };
PNAME(mux_aclk_cpu_src_p) = { "cpll_aclk_cpu", "gpll_aclk_cpu", "hdmiphy_aclk_cpu" };
-PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "hdmiphy" "usb480m" };
+PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "hdmiphy", "usb480m" };
PNAME(mux_pll_src_3plls_p) = { "cpll", "gpll", "hdmiphy" };
PNAME(mux_pll_src_2plls_p) = { "cpll", "gpll" };
PNAME(mux_sclk_hdmi_cec_p) = { "cpll", "gpll", "xin24m" };
diff --git a/drivers/clk/versatile/clk-impd1.c b/drivers/clk/versatile/clk-impd1.c
index ca798249544d..85c395df9c00 100644
--- a/drivers/clk/versatile/clk-impd1.c
+++ b/drivers/clk/versatile/clk-impd1.c
@@ -109,8 +109,10 @@ static int integrator_impd1_clk_probe(struct platform_device *pdev)
for_each_available_child_of_node(np, child) {
ret = integrator_impd1_clk_spawn(dev, np, child);
- if (ret)
+ if (ret) {
+ of_node_put(child);
break;
+ }
}
return ret;
diff --git a/drivers/clocksource/timer-clint.c b/drivers/clocksource/timer-clint.c
index 8eeafa82c03d..d17367dee02c 100644
--- a/drivers/clocksource/timer-clint.c
+++ b/drivers/clocksource/timer-clint.c
@@ -19,6 +19,11 @@
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <linux/smp.h>
+#include <linux/timex.h>
+
+#ifndef CONFIG_RISCV_M_MODE
+#include <asm/clint.h>
+#endif
#define CLINT_IPI_OFF 0
#define CLINT_TIMER_CMP_OFF 0x4000
@@ -31,6 +36,10 @@ static u64 __iomem *clint_timer_val;
static unsigned long clint_timer_freq;
static unsigned int clint_timer_irq;
+#ifdef CONFIG_RISCV_M_MODE
+u64 __iomem *clint_time_val;
+#endif
+
static void clint_send_ipi(const struct cpumask *target)
{
unsigned int cpu;
@@ -184,6 +193,14 @@ static int __init clint_timer_init_dt(struct device_node *np)
clint_timer_val = base + CLINT_TIMER_VAL_OFF;
clint_timer_freq = riscv_timebase;
+#ifdef CONFIG_RISCV_M_MODE
+ /*
+ * Yes, that's an odd naming scheme. time_val is public, but hopefully
+ * will die in favor of something cleaner.
+ */
+ clint_time_val = clint_timer_val;
+#endif
+
pr_info("%pOFP: timer running at %ld Hz\n", np, clint_timer_freq);
rc = clocksource_register_hz(&clint_clocksource, clint_timer_freq);
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 2d22d6bf52f2..7d59d18c6f26 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -197,17 +197,12 @@ int cn_add_callback(struct cb_id *id, const char *name,
void (*callback)(struct cn_msg *,
struct netlink_skb_parms *))
{
- int err;
struct cn_dev *dev = &cdev;
if (!cn_already_initialized)
return -EAGAIN;
- err = cn_queue_add_callback(dev->cbdev, name, id, callback);
- if (err)
- return err;
-
- return 0;
+ return cn_queue_add_callback(dev->cbdev, name, id, callback);
}
EXPORT_SYMBOL_GPL(cn_add_callback);
diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
index f7b7743ddb94..b7b252c5addf 100644
--- a/drivers/counter/microchip-tcb-capture.c
+++ b/drivers/counter/microchip-tcb-capture.c
@@ -320,8 +320,8 @@ static int mchp_tc_probe(struct platform_device *pdev)
}
regmap = syscon_node_to_regmap(np->parent);
- if (IS_ERR(priv->regmap))
- return PTR_ERR(priv->regmap);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
/* max. channels number is 2 when in QDEC mode */
priv->num_channels = of_property_count_u32_elems(np, "reg");
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
index ff6d99e923a4..a2b5c6f60cf0 100644
--- a/drivers/cpuidle/cpuidle-pseries.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -361,7 +361,10 @@ static void __init fixup_cede0_latency(void)
for (i = 0; i < nr_xcede_records; i++) {
struct xcede_latency_record *record = &payload->records[i];
u64 latency_tb = be64_to_cpu(record->latency_ticks);
- u64 latency_us = tb_to_ns(latency_tb) / NSEC_PER_USEC;
+ u64 latency_us = DIV_ROUND_UP_ULL(tb_to_ns(latency_tb), NSEC_PER_USEC);
+
+ if (latency_us == 0)
+ pr_warn("cpuidle: xcede record %d has an unrealistic latency of 0us.\n", i);
if (latency_us < min_latency_us)
min_latency_us = latency_us;
@@ -378,10 +381,14 @@ static void __init fixup_cede0_latency(void)
* Perform the fix-up.
*/
if (min_latency_us < dedicated_states[1].exit_latency) {
- u64 cede0_latency = min_latency_us - 1;
+ /*
+ * We set a minimum of 1us wakeup latency for cede0 to
+ * distinguish it from snooze
+ */
+ u64 cede0_latency = 1;
- if (cede0_latency <= 0)
- cede0_latency = min_latency_us;
+ if (min_latency_us > cede0_latency)
+ cede0_latency = min_latency_us - 1;
dedicated_states[1].exit_latency = cede0_latency;
dedicated_states[1].target_residency = 10 * (cede0_latency);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 04becd70cc41..6c7e5621cf9a 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -138,6 +138,7 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int index)
{
ktime_t time_start, time_end;
+ struct cpuidle_state *target_state = &drv->states[index];
time_start = ns_to_ktime(local_clock());
@@ -153,8 +154,9 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
* suspended is generally unsafe.
*/
stop_critical_timings();
- rcu_idle_enter();
- drv->states[index].enter_s2idle(dev, drv, index);
+ if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
+ rcu_idle_enter();
+ target_state->enter_s2idle(dev, drv, index);
if (WARN_ON_ONCE(!irqs_disabled()))
local_irq_disable();
/*
@@ -162,7 +164,8 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
* first CPU executing it calls functions containing RCU read-side
* critical sections, so tell RCU about that.
*/
- rcu_idle_exit();
+ if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
+ rcu_idle_exit();
tick_unfreeze();
start_critical_timings();
@@ -239,9 +242,11 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
time_start = ns_to_ktime(local_clock());
stop_critical_timings();
- rcu_idle_enter();
+ if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
+ rcu_idle_enter();
entered_state = target_state->enter(dev, drv, index);
- rcu_idle_exit();
+ if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
+ rcu_idle_exit();
start_critical_timings();
sched_clock_idle_wakeup_event();
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 4c0af2eb7e19..1e89513f3c59 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -429,7 +429,7 @@ int dev_dax_probe(struct device *dev)
return -EBUSY;
}
- dev_dax->pgmap.type = MEMORY_DEVICE_DEVDAX;
+ dev_dax->pgmap.type = MEMORY_DEVICE_GENERIC;
addr = devm_memremap_pages(dev, &dev_dax->pgmap);
if (IS_ERR(addr))
return PTR_ERR(addr);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 32642634c1bb..e84070b55463 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -85,6 +85,12 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
return false;
}
+ if (!dax_dev) {
+ pr_debug("%s: error: dax unsupported by block device\n",
+ bdevname(bdev, buf));
+ return false;
+ }
+
err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
if (err) {
pr_info("%s: error: unaligned partition for dax\n",
@@ -100,12 +106,6 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
return false;
}
- if (!dax_dev && !bdev_dax_supported(bdev, blocksize)) {
- pr_debug("%s: error: dax unsupported by block device\n",
- bdevname(bdev, buf));
- return false;
- }
-
id = dax_read_lock();
len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
@@ -325,11 +325,15 @@ EXPORT_SYMBOL_GPL(dax_direct_access);
bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
int blocksize, sector_t start, sector_t len)
{
+ if (!dax_dev)
+ return false;
+
if (!dax_alive(dax_dev))
return false;
return dax_dev->ops->dax_supported(dax_dev, bdev, blocksize, start, len);
}
+EXPORT_SYMBOL_GPL(dax_supported);
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i)
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 1699a8e309ef..58564d82a3a2 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -316,9 +316,9 @@ out:
* name of the dma-buf if the same piece of memory is used for multiple
* purpose between different devices.
*
- * @dmabuf [in] dmabuf buffer that will be renamed.
- * @buf: [in] A piece of userspace memory that contains the name of
- * the dma-buf.
+ * @dmabuf: [in] dmabuf buffer that will be renamed.
+ * @buf: [in] A piece of userspace memory that contains the name of
+ * the dma-buf.
*
* Returns 0 on success. If the dma-buf buffer is already attached to
* devices, return -EBUSY.
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c
index 3d123502ff12..7d129e68ac70 100644
--- a/drivers/dma-buf/dma-fence-chain.c
+++ b/drivers/dma-buf/dma-fence-chain.c
@@ -222,6 +222,7 @@ EXPORT_SYMBOL(dma_fence_chain_ops);
* @chain: the chain node to initialize
* @prev: the previous fence
* @fence: the current fence
+ * @seqno: the sequence number to use for the fence chain
*
* Initialize a new chain node and either start a new chain or add the node to
* the existing chain of the previous fence.
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 54ebc8afc6b1..94d1e3165052 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -508,6 +508,7 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
if (!force_load && idx < 0)
return -ENODEV;
} else {
+ force_load = true;
idx = 0;
}
@@ -629,9 +630,13 @@ void ghes_edac_unregister(struct ghes *ghes)
struct mem_ctl_info *mci;
unsigned long flags;
+ if (!force_load)
+ return;
+
mutex_lock(&ghes_reg_mutex);
system_scanned = false;
+ memset(&ghes_hw, 0, sizeof(struct ghes_hw_desc));
if (!refcount_dec_and_test(&ghes_refcount))
goto unlock;
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
index 35dccc88ac0a..15a47539dc56 100644
--- a/drivers/firmware/efi/efibc.c
+++ b/drivers/firmware/efi/efibc.c
@@ -84,7 +84,7 @@ static int __init efibc_init(void)
{
int ret;
- if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ if (!efivars_kobject() || !efivar_supports_writes())
return -ENODEV;
ret = register_reboot_notifier(&efibc_reboot_notifier);
diff --git a/drivers/firmware/efi/embedded-firmware.c b/drivers/firmware/efi/embedded-firmware.c
index e97a9c9d010c..21ae0c48232a 100644
--- a/drivers/firmware/efi/embedded-firmware.c
+++ b/drivers/firmware/efi/embedded-firmware.c
@@ -16,9 +16,9 @@
/* Exported for use by lib/test_firmware.c only */
LIST_HEAD(efi_embedded_fw_list);
-EXPORT_SYMBOL_GPL(efi_embedded_fw_list);
-
-static bool checked_for_fw;
+EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_list, TEST_FIRMWARE);
+bool efi_embedded_fw_checked;
+EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_checked, TEST_FIRMWARE);
static const struct dmi_system_id * const embedded_fw_table[] = {
#ifdef CONFIG_TOUCHSCREEN_DMI
@@ -116,14 +116,14 @@ void __init efi_check_for_embedded_firmwares(void)
}
}
- checked_for_fw = true;
+ efi_embedded_fw_checked = true;
}
int efi_get_embedded_fw(const char *name, const u8 **data, size_t *size)
{
struct efi_embedded_fw *iter, *fw = NULL;
- if (!checked_for_fw) {
+ if (!efi_embedded_fw_checked) {
pr_warn("Warning %s called while we did not check for embedded fw\n",
__func__);
return -ENOENT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index d8c6520ff74a..06757681b2ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -178,7 +178,7 @@ static int psp_sw_init(void *handle)
return ret;
}
- if (adev->asic_type == CHIP_NAVI10) {
+ if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_SIENNA_CICHLID) {
ret= psp_sysfs_init(adev);
if (ret) {
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index e16874f30d5d..6c5d9612abcb 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -58,7 +58,7 @@ MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
MODULE_FIRMWARE("amdgpu/sienna_cichlid_sos.bin");
MODULE_FIRMWARE("amdgpu/sienna_cichlid_ta.bin");
MODULE_FIRMWARE("amdgpu/navy_flounder_sos.bin");
-MODULE_FIRMWARE("amdgpu/navy_flounder_asd.bin");
+MODULE_FIRMWARE("amdgpu/navy_flounder_ta.bin");
/* address block */
#define smnMP1_FIRMWARE_FLAGS 0x3010024
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index e0e60b0d0669..0f4508b4903e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1216,6 +1216,8 @@ static int stop_cpsch(struct device_queue_manager *dqm)
dqm->sched_running = false;
dqm_unlock(dqm);
+ pm_release_ib(&dqm->packets);
+
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
pm_uninit(&dqm->packets, hanging);
@@ -1326,7 +1328,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
if (q->properties.is_active) {
increment_queue_count(dqm, q->properties.type);
- retval = execute_queues_cpsch(dqm,
+ execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index b51c527a3f0d..4ba8b54a2695 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5278,19 +5278,6 @@ static void dm_crtc_helper_disable(struct drm_crtc *crtc)
{
}
-static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
-{
- struct drm_device *dev = new_crtc_state->crtc->dev;
- struct drm_plane *plane;
-
- drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
- if (plane->type == DRM_PLANE_TYPE_CURSOR)
- return true;
- }
-
- return false;
-}
-
static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
{
struct drm_atomic_state *state = new_crtc_state->state;
@@ -5354,19 +5341,20 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
return ret;
}
- /* In some use cases, like reset, no stream is attached */
- if (!dm_crtc_state->stream)
- return 0;
-
/*
- * We want at least one hardware plane enabled to use
- * the stream with a cursor enabled.
+ * We require the primary plane to be enabled whenever the CRTC is, otherwise
+ * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
+ * planes are disabled, which is not supported by the hardware. And there is legacy
+ * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
*/
- if (state->enable && state->active &&
- does_crtc_have_active_cursor(state) &&
- dm_crtc_state->active_planes == 0)
+ if (state->enable &&
+ !(state->plane_mask & drm_plane_mask(crtc->primary)))
return -EINVAL;
+ /* In some use cases, like reset, no stream is attached */
+ if (!dm_crtc_state->stream)
+ return 0;
+
if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
return 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 9140b3fc767a..f31f48dd0da2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -409,8 +409,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
},
},
.num_states = 5,
- .sr_exit_time_us = 8.6,
- .sr_enter_plus_exit_time_us = 10.9,
+ .sr_exit_time_us = 11.6,
+ .sr_enter_plus_exit_time_us = 13.9,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
index d3192b9d0c3d..47f8ee2832ff 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -27,7 +27,7 @@
#define MOD_HDCP_LOG_H_
#ifdef CONFIG_DRM_AMD_DC_HDCP
-#define HDCP_LOG_ERR(hdcp, ...) DRM_WARN(__VA_ARGS__)
+#define HDCP_LOG_ERR(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index fb1161dd7ea8..3a367a5968ae 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -88,7 +88,7 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (!psp->dtm_context.dtm_initialized) {
- DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
+ DRM_INFO("Failed to add display topology, DTM TA is not initialized.");
display->state = MOD_HDCP_DISPLAY_INACTIVE;
return MOD_HDCP_STATUS_FAILURE;
}
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 0826625573dc..63f945f9f331 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -1126,7 +1126,7 @@ static int smu_disable_dpms(struct smu_context *smu)
*/
if (smu->uploading_custom_pp_table &&
(adev->asic_type >= CHIP_NAVI10) &&
- (adev->asic_type <= CHIP_NAVI12))
+ (adev->asic_type <= CHIP_NAVY_FLOUNDER))
return 0;
/*
@@ -1211,7 +1211,9 @@ static int smu_hw_fini(void *handle)
int smu_reset(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int ret = 0;
+ int ret;
+
+ amdgpu_gfx_off_ctrl(smu->adev, false);
ret = smu_hw_fini(adev);
if (ret)
@@ -1222,8 +1224,12 @@ int smu_reset(struct smu_context *smu)
return ret;
ret = smu_late_init(adev);
+ if (ret)
+ return ret;
- return ret;
+ amdgpu_gfx_off_ctrl(smu->adev, true);
+
+ return 0;
}
static int smu_suspend(void *handle)
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 68325678f5ef..b18c5ac2934d 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -14956,12 +14956,6 @@ static int intel_atomic_check(struct drm_device *dev,
if (dev_priv->wm.distrust_bios_wm)
any_ms = true;
- if (any_ms) {
- ret = intel_modeset_checks(state);
- if (ret)
- goto fail;
- }
-
intel_fbc_choose_crtc(dev_priv, state);
ret = calc_watermark_data(state);
if (ret)
@@ -14976,6 +14970,10 @@ static int intel_atomic_check(struct drm_device *dev,
goto fail;
if (any_ms) {
+ ret = intel_modeset_checks(state);
+ if (ret)
+ goto fail;
+
ret = intel_modeset_calc_cdclk(state);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index d0bdb6d447ed..ef755dd5e68f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -439,29 +439,36 @@ static bool __cancel_engine(struct intel_engine_cs *engine)
return __reset_engine(engine);
}
-static struct intel_engine_cs *__active_engine(struct i915_request *rq)
+static bool
+__active_engine(struct i915_request *rq, struct intel_engine_cs **active)
{
struct intel_engine_cs *engine, *locked;
+ bool ret = false;
/*
* Serialise with __i915_request_submit() so that it sees
* is-banned?, or we know the request is already inflight.
+ *
+ * Note that rq->engine is unstable, and so we double
+ * check that we have acquired the lock on the final engine.
*/
locked = READ_ONCE(rq->engine);
spin_lock_irq(&locked->active.lock);
while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
spin_unlock(&locked->active.lock);
- spin_lock(&engine->active.lock);
locked = engine;
+ spin_lock(&locked->active.lock);
}
- engine = NULL;
- if (i915_request_is_active(rq) && rq->fence.error != -EIO)
- engine = rq->engine;
+ if (!i915_request_completed(rq)) {
+ if (i915_request_is_active(rq) && rq->fence.error != -EIO)
+ *active = locked;
+ ret = true;
+ }
spin_unlock_irq(&locked->active.lock);
- return engine;
+ return ret;
}
static struct intel_engine_cs *active_engine(struct intel_context *ce)
@@ -472,17 +479,16 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
if (!ce->timeline)
return NULL;
- mutex_lock(&ce->timeline->mutex);
- list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
- if (i915_request_completed(rq))
- break;
+ rcu_read_lock();
+ list_for_each_entry_rcu(rq, &ce->timeline->requests, link) {
+ if (i915_request_is_active(rq) && i915_request_completed(rq))
+ continue;
/* Check with the backend if the request is inflight */
- engine = __active_engine(rq);
- if (engine)
+ if (__active_engine(rq, &engine))
break;
}
- mutex_unlock(&ce->timeline->mutex);
+ rcu_read_unlock();
return engine;
}
@@ -713,6 +719,7 @@ __create_context(struct drm_i915_private *i915)
ctx->i915 = i915;
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
mutex_init(&ctx->mutex);
+ INIT_LIST_HEAD(&ctx->link);
spin_lock_init(&ctx->stale.lock);
INIT_LIST_HEAD(&ctx->stale.engines);
@@ -740,10 +747,6 @@ __create_context(struct drm_i915_private *i915)
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
- spin_lock(&i915->gem.contexts.lock);
- list_add_tail(&ctx->link, &i915->gem.contexts.list);
- spin_unlock(&i915->gem.contexts.lock);
-
return ctx;
err_free:
@@ -931,6 +934,7 @@ static int gem_context_register(struct i915_gem_context *ctx,
struct drm_i915_file_private *fpriv,
u32 *id)
{
+ struct drm_i915_private *i915 = ctx->i915;
struct i915_address_space *vm;
int ret;
@@ -949,8 +953,16 @@ static int gem_context_register(struct i915_gem_context *ctx,
/* And finally expose ourselves to userspace via the idr */
ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
if (ret)
- put_pid(fetch_and_zero(&ctx->pid));
+ goto err_pid;
+
+ spin_lock(&i915->gem.contexts.lock);
+ list_add_tail(&ctx->link, &i915->gem.contexts.list);
+ spin_unlock(&i915->gem.contexts.lock);
+
+ return 0;
+err_pid:
+ put_pid(fetch_and_zero(&ctx->pid));
return ret;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 6b4ec66cb558..446e76e95c38 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -45,6 +45,13 @@ struct eb_vma_array {
struct eb_vma vma[];
};
+enum {
+ FORCE_CPU_RELOC = 1,
+ FORCE_GTT_RELOC,
+ FORCE_GPU_RELOC,
+#define DBG_FORCE_RELOC 0 /* choose one of the above! */
+};
+
#define __EXEC_OBJECT_HAS_PIN BIT(31)
#define __EXEC_OBJECT_HAS_FENCE BIT(30)
#define __EXEC_OBJECT_NEEDS_MAP BIT(29)
@@ -253,6 +260,8 @@ struct i915_execbuffer {
*/
struct reloc_cache {
struct drm_mm_node node; /** temporary GTT binding */
+ unsigned long vaddr; /** Current kmap address */
+ unsigned long page; /** Currently mapped page index */
unsigned int gen; /** Cached value of INTEL_GEN */
bool use_64bit_reloc : 1;
bool has_llc : 1;
@@ -596,6 +605,23 @@ eb_add_vma(struct i915_execbuffer *eb,
}
}
+static inline int use_cpu_reloc(const struct reloc_cache *cache,
+ const struct drm_i915_gem_object *obj)
+{
+ if (!i915_gem_object_has_struct_page(obj))
+ return false;
+
+ if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
+ return true;
+
+ if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
+ return false;
+
+ return (cache->has_llc ||
+ obj->cache_dirty ||
+ obj->cache_level != I915_CACHE_NONE);
+}
+
static int eb_reserve_vma(const struct i915_execbuffer *eb,
struct eb_vma *ev,
u64 pin_flags)
@@ -926,6 +952,8 @@ relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
static void reloc_cache_init(struct reloc_cache *cache,
struct drm_i915_private *i915)
{
+ cache->page = -1;
+ cache->vaddr = 0;
/* Must be a variable in the struct to allow GCC to unroll. */
cache->gen = INTEL_GEN(i915);
cache->has_llc = HAS_LLC(i915);
@@ -937,6 +965,25 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->target = NULL;
}
+static inline void *unmask_page(unsigned long p)
+{
+ return (void *)(uintptr_t)(p & PAGE_MASK);
+}
+
+static inline unsigned int unmask_flags(unsigned long p)
+{
+ return p & ~PAGE_MASK;
+}
+
+#define KMAP 0x4 /* after CLFLUSH_FLAGS */
+
+static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
+{
+ struct drm_i915_private *i915 =
+ container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
+ return &i915->ggtt;
+}
+
#define RELOC_TAIL 4
static int reloc_gpu_chain(struct reloc_cache *cache)
@@ -1049,6 +1096,181 @@ static int reloc_gpu_flush(struct reloc_cache *cache)
return err;
}
+static void reloc_cache_reset(struct reloc_cache *cache)
+{
+ void *vaddr;
+
+ if (!cache->vaddr)
+ return;
+
+ vaddr = unmask_page(cache->vaddr);
+ if (cache->vaddr & KMAP) {
+ if (cache->vaddr & CLFLUSH_AFTER)
+ mb();
+
+ kunmap_atomic(vaddr);
+ i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
+ } else {
+ struct i915_ggtt *ggtt = cache_to_ggtt(cache);
+
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
+ io_mapping_unmap_atomic((void __iomem *)vaddr);
+
+ if (drm_mm_node_allocated(&cache->node)) {
+ ggtt->vm.clear_range(&ggtt->vm,
+ cache->node.start,
+ cache->node.size);
+ mutex_lock(&ggtt->vm.mutex);
+ drm_mm_remove_node(&cache->node);
+ mutex_unlock(&ggtt->vm.mutex);
+ } else {
+ i915_vma_unpin((struct i915_vma *)cache->node.mm);
+ }
+ }
+
+ cache->vaddr = 0;
+ cache->page = -1;
+}
+
+static void *reloc_kmap(struct drm_i915_gem_object *obj,
+ struct reloc_cache *cache,
+ unsigned long page)
+{
+ void *vaddr;
+
+ if (cache->vaddr) {
+ kunmap_atomic(unmask_page(cache->vaddr));
+ } else {
+ unsigned int flushes;
+ int err;
+
+ err = i915_gem_object_prepare_write(obj, &flushes);
+ if (err)
+ return ERR_PTR(err);
+
+ BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
+ BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
+
+ cache->vaddr = flushes | KMAP;
+ cache->node.mm = (void *)obj;
+ if (flushes)
+ mb();
+ }
+
+ vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
+ cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
+ cache->page = page;
+
+ return vaddr;
+}
+
+static void *reloc_iomap(struct drm_i915_gem_object *obj,
+ struct reloc_cache *cache,
+ unsigned long page)
+{
+ struct i915_ggtt *ggtt = cache_to_ggtt(cache);
+ unsigned long offset;
+ void *vaddr;
+
+ if (cache->vaddr) {
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
+ io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
+ } else {
+ struct i915_vma *vma;
+ int err;
+
+ if (i915_gem_object_is_tiled(obj))
+ return ERR_PTR(-EINVAL);
+
+ if (use_cpu_reloc(cache, obj))
+ return NULL;
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ i915_gem_object_unlock(obj);
+ if (err)
+ return ERR_PTR(err);
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE |
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOEVICT);
+ if (IS_ERR(vma)) {
+ memset(&cache->node, 0, sizeof(cache->node));
+ mutex_lock(&ggtt->vm.mutex);
+ err = drm_mm_insert_node_in_range
+ (&ggtt->vm.mm, &cache->node,
+ PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
+ mutex_unlock(&ggtt->vm.mutex);
+ if (err) /* no inactive aperture space, use cpu reloc */
+ return NULL;
+ } else {
+ cache->node.start = vma->node.start;
+ cache->node.mm = (void *)vma;
+ }
+ }
+
+ offset = cache->node.start;
+ if (drm_mm_node_allocated(&cache->node)) {
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, page),
+ offset, I915_CACHE_NONE, 0);
+ } else {
+ offset += page << PAGE_SHIFT;
+ }
+
+ vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
+ offset);
+ cache->page = page;
+ cache->vaddr = (unsigned long)vaddr;
+
+ return vaddr;
+}
+
+static void *reloc_vaddr(struct drm_i915_gem_object *obj,
+ struct reloc_cache *cache,
+ unsigned long page)
+{
+ void *vaddr;
+
+ if (cache->page == page) {
+ vaddr = unmask_page(cache->vaddr);
+ } else {
+ vaddr = NULL;
+ if ((cache->vaddr & KMAP) == 0)
+ vaddr = reloc_iomap(obj, cache, page);
+ if (!vaddr)
+ vaddr = reloc_kmap(obj, cache, page);
+ }
+
+ return vaddr;
+}
+
+static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
+{
+ if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
+ if (flushes & CLFLUSH_BEFORE) {
+ clflushopt(addr);
+ mb();
+ }
+
+ *addr = value;
+
+ /*
+ * Writes to the same cacheline are serialised by the CPU
+ * (including clflush). On the write path, we only require
+ * that it hits memory in an orderly fashion and place
+ * mb barriers at the start and end of the relocation phase
+ * to ensure ordering of clflush wrt to the system.
+ */
+ if (flushes & CLFLUSH_AFTER)
+ clflushopt(addr);
+ } else
+ *addr = value;
+}
+
static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
@@ -1214,6 +1436,17 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
return cmd;
}
+static inline bool use_reloc_gpu(struct i915_vma *vma)
+{
+ if (DBG_FORCE_RELOC == FORCE_GPU_RELOC)
+ return true;
+
+ if (DBG_FORCE_RELOC)
+ return false;
+
+ return !dma_resv_test_signaled_rcu(vma->resv, true);
+}
+
static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
{
struct page *page;
@@ -1228,10 +1461,10 @@ static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
return addr + offset_in_page(offset);
}
-static int __reloc_entry_gpu(struct i915_execbuffer *eb,
- struct i915_vma *vma,
- u64 offset,
- u64 target_addr)
+static bool __reloc_entry_gpu(struct i915_execbuffer *eb,
+ struct i915_vma *vma,
+ u64 offset,
+ u64 target_addr)
{
const unsigned int gen = eb->reloc_cache.gen;
unsigned int len;
@@ -1247,7 +1480,7 @@ static int __reloc_entry_gpu(struct i915_execbuffer *eb,
batch = reloc_gpu(eb, vma, len);
if (IS_ERR(batch))
- return PTR_ERR(batch);
+ return false;
addr = gen8_canonical_addr(vma->node.start + offset);
if (gen >= 8) {
@@ -1296,21 +1529,55 @@ static int __reloc_entry_gpu(struct i915_execbuffer *eb,
*batch++ = target_addr;
}
- return 0;
+ return true;
+}
+
+static bool reloc_entry_gpu(struct i915_execbuffer *eb,
+ struct i915_vma *vma,
+ u64 offset,
+ u64 target_addr)
+{
+ if (eb->reloc_cache.vaddr)
+ return false;
+
+ if (!use_reloc_gpu(vma))
+ return false;
+
+ return __reloc_entry_gpu(eb, vma, offset, target_addr);
}
static u64
-relocate_entry(struct i915_execbuffer *eb,
- struct i915_vma *vma,
+relocate_entry(struct i915_vma *vma,
const struct drm_i915_gem_relocation_entry *reloc,
+ struct i915_execbuffer *eb,
const struct i915_vma *target)
{
u64 target_addr = relocation_target(reloc, target);
- int err;
-
- err = __reloc_entry_gpu(eb, vma, reloc->offset, target_addr);
- if (err)
- return err;
+ u64 offset = reloc->offset;
+
+ if (!reloc_entry_gpu(eb, vma, offset, target_addr)) {
+ bool wide = eb->reloc_cache.use_64bit_reloc;
+ void *vaddr;
+
+repeat:
+ vaddr = reloc_vaddr(vma->obj,
+ &eb->reloc_cache,
+ offset >> PAGE_SHIFT);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ GEM_BUG_ON(!IS_ALIGNED(offset, sizeof(u32)));
+ clflush_write32(vaddr + offset_in_page(offset),
+ lower_32_bits(target_addr),
+ eb->reloc_cache.vaddr);
+
+ if (wide) {
+ offset += sizeof(u32);
+ target_addr >>= 32;
+ wide = false;
+ goto repeat;
+ }
+ }
return target->node.start | UPDATE;
}
@@ -1375,7 +1642,8 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* If the relocation already has the right value in it, no
* more work needs to be done.
*/
- if (gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
+ if (!DBG_FORCE_RELOC &&
+ gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
return 0;
/* Check that the relocation address is valid... */
@@ -1407,7 +1675,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
ev->flags &= ~EXEC_OBJECT_ASYNC;
/* and update the user's relocation entry */
- return relocate_entry(eb, ev->vma, reloc, target->vma);
+ return relocate_entry(ev->vma, reloc, eb, target->vma);
}
static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
@@ -1445,8 +1713,10 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
* this is bad and so lockdep complains vehemently.
*/
copied = __copy_from_user(r, urelocs, count * sizeof(r[0]));
- if (unlikely(copied))
- return -EFAULT;
+ if (unlikely(copied)) {
+ remain = -EFAULT;
+ goto out;
+ }
remain -= count;
do {
@@ -1454,7 +1724,8 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
if (likely(offset == 0)) {
} else if ((s64)offset < 0) {
- return (int)offset;
+ remain = (int)offset;
+ goto out;
} else {
/*
* Note that reporting an error now
@@ -1484,8 +1755,9 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
} while (r++, --count);
urelocs += ARRAY_SIZE(stack);
} while (remain);
-
- return 0;
+out:
+ reloc_cache_reset(&eb->reloc_cache);
+ return remain;
}
static int eb_relocate(struct i915_execbuffer *eb)
@@ -2392,7 +2664,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.i915 = i915;
eb.file = file;
eb.args = args;
- if (!(args->flags & I915_EXEC_NO_RELOC))
+ if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
args->flags |= __EXEC_HAS_RELOC;
eb.exec = exec;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index e5b9276d254c..9cf4ad78ece6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -258,6 +258,10 @@ struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj,
unsigned int n);
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+ unsigned int n);
+
dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
unsigned long n,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index d15ff6748a50..e8a083743e09 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -548,6 +548,20 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
return nth_page(sg_page(sg), offset);
}
+/* Like i915_gem_object_get_page(), but mark the returned page dirty */
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+ unsigned int n)
+{
+ struct page *page;
+
+ page = i915_gem_object_get_page(obj, n);
+ if (!obj->mm.dirty)
+ set_page_dirty(page);
+
+ return page;
+}
+
dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
unsigned long n,
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
index 57c14d3340cd..a49016f8ee0d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
@@ -37,14 +37,20 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
return err;
/* 8-Byte aligned */
- err = __reloc_entry_gpu(eb, vma, offsets[0] * sizeof(u32), 0);
- if (err)
+ if (!__reloc_entry_gpu(eb, vma,
+ offsets[0] * sizeof(u32),
+ 0)) {
+ err = -EIO;
goto unpin_vma;
+ }
/* !8-Byte aligned */
- err = __reloc_entry_gpu(eb, vma, offsets[1] * sizeof(u32), 1);
- if (err)
+ if (!__reloc_entry_gpu(eb, vma,
+ offsets[1] * sizeof(u32),
+ 1)) {
+ err = -EIO;
goto unpin_vma;
+ }
/* Skip to the end of the cmd page */
i = PAGE_SIZE / sizeof(u32) - RELOC_TAIL - 1;
@@ -54,9 +60,12 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
eb->reloc_cache.rq_size += i;
/* Force batch chaining */
- err = __reloc_entry_gpu(eb, vma, offsets[2] * sizeof(u32), 2);
- if (err)
+ if (!__reloc_entry_gpu(eb, vma,
+ offsets[2] * sizeof(u32),
+ 2)) {
+ err = -EIO;
goto unpin_vma;
+ }
GEM_BUG_ON(!eb->reloc_cache.rq);
rq = i915_request_get(eb->reloc_cache.rq);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 24322ef08aa4..9eeaca957a7e 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -2060,6 +2060,14 @@ static inline void clear_ports(struct i915_request **ports, int count)
memset_p((void **)ports, NULL, count);
}
+static inline void
+copy_ports(struct i915_request **dst, struct i915_request **src, int count)
+{
+ /* A memcpy_p() would be very useful here! */
+ while (count--)
+ WRITE_ONCE(*dst++, *src++); /* avoid write tearing */
+}
+
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -2648,10 +2656,9 @@ static void process_csb(struct intel_engine_cs *engine)
/* switch pending to inflight */
GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
- memcpy(execlists->inflight,
- execlists->pending,
- execlists_num_ports(execlists) *
- sizeof(*execlists->pending));
+ copy_ports(execlists->inflight,
+ execlists->pending,
+ execlists_num_ports(execlists));
smp_wmb(); /* complete the seqlock */
WRITE_ONCE(execlists->active, execlists->inflight);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 0b2fe55e6194..781a6783affe 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -388,17 +388,38 @@ static bool __request_in_flight(const struct i915_request *signal)
* As we know that there are always preemption points between
* requests, we know that only the currently executing request
* may be still active even though we have cleared the flag.
- * However, we can't rely on our tracking of ELSP[0] to known
+ * However, we can't rely on our tracking of ELSP[0] to know
* which request is currently active and so maybe stuck, as
* the tracking maybe an event behind. Instead assume that
* if the context is still inflight, then it is still active
* even if the active flag has been cleared.
+ *
+ * To further complicate matters, if there a pending promotion, the HW
+ * may either perform a context switch to the second inflight execlists,
+ * or it may switch to the pending set of execlists. In the case of the
+ * latter, it may send the ACK and we process the event copying the
+ * pending[] over top of inflight[], _overwriting_ our *active. Since
+ * this implies the HW is arbitrating and not struck in *active, we do
+ * not worry about complete accuracy, but we do require no read/write
+ * tearing of the pointer [the read of the pointer must be valid, even
+ * as the array is being overwritten, for which we require the writes
+ * to avoid tearing.]
+ *
+ * Note that the read of *execlists->active may race with the promotion
+ * of execlists->pending[] to execlists->inflight[], overwritting
+ * the value at *execlists->active. This is fine. The promotion implies
+ * that we received an ACK from the HW, and so the context is not
+ * stuck -- if we do not see ourselves in *active, the inflight status
+ * is valid. If instead we see ourselves being copied into *active,
+ * we are inflight and may signal the callback.
*/
if (!intel_context_inflight(signal->context))
return false;
rcu_read_lock();
- for (port = __engine_active(signal->engine); (rq = *port); port++) {
+ for (port = __engine_active(signal->engine);
+ (rq = READ_ONCE(*port)); /* may race with promotion of pending[] */
+ port++) {
if (rq->context == signal->context) {
inflight = i915_seqno_passed(rq->fence.seqno,
signal->fence.seqno);
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 295b9829e2da..4cd2038cbe35 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -164,9 +164,13 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
do {
list_for_each_entry_safe(pos, next, &x->head, entry) {
- pos->func(pos,
- TASK_NORMAL, fence->error,
- &extra);
+ int wake_flags;
+
+ wake_flags = fence->error;
+ if (pos->func == autoremove_wake_function)
+ wake_flags = 0;
+
+ pos->func(pos, TASK_NORMAL, wake_flags, &extra);
}
if (list_empty(&extra))
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index ada990a7f911..b7074161ccf0 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -673,7 +673,7 @@ static void ingenic_drm_unbind_all(void *d)
component_unbind_all(priv->dev, &priv->drm);
}
-static int ingenic_drm_bind(struct device *dev)
+static int ingenic_drm_bind(struct device *dev, bool has_components)
{
struct platform_device *pdev = to_platform_device(dev);
const struct jz_soc_info *soc_info;
@@ -808,7 +808,7 @@ static int ingenic_drm_bind(struct device *dev)
return ret;
}
- if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU)) {
+ if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && has_components) {
ret = component_bind_all(dev, drm);
if (ret) {
if (ret != -EPROBE_DEFER)
@@ -939,6 +939,11 @@ err_pixclk_disable:
return ret;
}
+static int ingenic_drm_bind_with_components(struct device *dev)
+{
+ return ingenic_drm_bind(dev, true);
+}
+
static int compare_of(struct device *dev, void *data)
{
return dev->of_node == data;
@@ -957,7 +962,7 @@ static void ingenic_drm_unbind(struct device *dev)
}
static const struct component_master_ops ingenic_master_ops = {
- .bind = ingenic_drm_bind,
+ .bind = ingenic_drm_bind_with_components,
.unbind = ingenic_drm_unbind,
};
@@ -968,16 +973,15 @@ static int ingenic_drm_probe(struct platform_device *pdev)
struct device_node *np;
if (!IS_ENABLED(CONFIG_DRM_INGENIC_IPU))
- return ingenic_drm_bind(dev);
+ return ingenic_drm_bind(dev, false);
/* IPU is at port address 8 */
np = of_graph_get_remote_node(dev->of_node, 8, 0);
- if (!np) {
- dev_err(dev, "Unable to get IPU node\n");
- return -EINVAL;
- }
+ if (!np)
+ return ingenic_drm_bind(dev, false);
drm_of_component_match_add(dev, &match, compare_of, np);
+ of_node_put(np);
return component_master_add_with_match(dev, &ingenic_master_ops, match);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 3fc5511330b9..4d29568be3f5 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -831,13 +831,19 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
drm_crtc_index(&mtk_crtc->base));
mtk_crtc->cmdq_client = NULL;
}
- ret = of_property_read_u32_index(priv->mutex_node,
- "mediatek,gce-events",
- drm_crtc_index(&mtk_crtc->base),
- &mtk_crtc->cmdq_event);
- if (ret)
- dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
- drm_crtc_index(&mtk_crtc->base));
+
+ if (mtk_crtc->cmdq_client) {
+ ret = of_property_read_u32_index(priv->mutex_node,
+ "mediatek,gce-events",
+ drm_crtc_index(&mtk_crtc->base),
+ &mtk_crtc->cmdq_event);
+ if (ret) {
+ dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
+ drm_crtc_index(&mtk_crtc->base));
+ cmdq_mbox_destroy(mtk_crtc->cmdq_client);
+ mtk_crtc->cmdq_client = NULL;
+ }
+ }
#endif
return 0;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 57c88de9a329..526648885b97 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -496,6 +496,7 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (of_address_to_resource(node, 0, &res) != 0) {
dev_err(dev, "Missing reg in %s node\n", node->full_name);
+ put_device(&larb_pdev->dev);
return -EINVAL;
}
comp->regs_pa = res.start;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 040a8f393fe2..2d982740b1a4 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -27,7 +27,6 @@
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp.h"
-#include "mtk_drm_ddp.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#include "mtk_drm_gem.h"
@@ -165,7 +164,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
ret = drmm_mode_config_init(drm);
if (ret)
- return ret;
+ goto put_mutex_dev;
drm->mode_config.min_width = 64;
drm->mode_config.min_height = 64;
@@ -182,7 +181,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
ret = component_bind_all(drm->dev, drm);
if (ret)
- return ret;
+ goto put_mutex_dev;
/*
* We currently support two fixed data streams, each optional,
@@ -229,7 +228,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
}
if (!dma_dev->dma_parms) {
ret = -ENOMEM;
- goto err_component_unbind;
+ goto put_dma_dev;
}
ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
@@ -256,9 +255,12 @@ static int mtk_drm_kms_init(struct drm_device *drm)
err_unset_dma_parms:
if (private->dma_parms_allocated)
dma_dev->dma_parms = NULL;
+put_dma_dev:
+ put_device(private->dma_dev);
err_component_unbind:
component_unbind_all(drm->dev, drm);
-
+put_mutex_dev:
+ put_device(private->mutex_dev);
return ret;
}
@@ -544,8 +546,13 @@ err_pm:
pm_runtime_disable(dev);
err_node:
of_node_put(private->mutex_node);
- for (i = 0; i < DDP_COMPONENT_ID_MAX; i++)
+ for (i = 0; i < DDP_COMPONENT_ID_MAX; i++) {
of_node_put(private->comp_node[i]);
+ if (private->ddp_comp[i]) {
+ put_device(private->ddp_comp[i]->larb_dev);
+ private->ddp_comp[i] = NULL;
+ }
+ }
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 16fd99dcdacf..80b7a082e874 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -466,14 +466,13 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
- horizontal_backporch_byte =
- (vm->hback_porch * dsi_tmp_buf_bpp - 10);
+ horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp;
else
- horizontal_backporch_byte = ((vm->hback_porch + vm->hsync_len) *
- dsi_tmp_buf_bpp - 10);
+ horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) *
+ dsi_tmp_buf_bpp;
data_phy_cycles = timing->lpx + timing->da_hs_prepare +
- timing->da_hs_zero + timing->da_hs_exit + 3;
+ timing->da_hs_zero + timing->da_hs_exit;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index f2e9b429960b..a97725680d4e 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1507,25 +1507,30 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
dev_err(dev,
"Failed to get system configuration registers: %d\n",
ret);
- return ret;
+ goto put_device;
}
hdmi->sys_regmap = regmap;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hdmi->regs = devm_ioremap_resource(dev, mem);
- if (IS_ERR(hdmi->regs))
- return PTR_ERR(hdmi->regs);
+ if (IS_ERR(hdmi->regs)) {
+ ret = PTR_ERR(hdmi->regs);
+ goto put_device;
+ }
remote = of_graph_get_remote_node(np, 1, 0);
- if (!remote)
- return -EINVAL;
+ if (!remote) {
+ ret = -EINVAL;
+ goto put_device;
+ }
if (!of_device_is_compatible(remote, "hdmi-connector")) {
hdmi->next_bridge = of_drm_find_bridge(remote);
if (!hdmi->next_bridge) {
dev_err(dev, "Waiting for external bridge\n");
of_node_put(remote);
- return -EPROBE_DEFER;
+ ret = -EPROBE_DEFER;
+ goto put_device;
}
}
@@ -1534,7 +1539,8 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
dev_err(dev, "Failed to find ddc-i2c-bus node in %pOF\n",
remote);
of_node_put(remote);
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_device;
}
of_node_put(remote);
@@ -1542,10 +1548,14 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
of_node_put(i2c_np);
if (!hdmi->ddc_adpt) {
dev_err(dev, "Failed to get ddc i2c adapter by node\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_device;
}
return 0;
+put_device:
+ put_device(hdmi->cec_dev);
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 6021f8d9efd1..48fa49f69d6d 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -164,6 +164,11 @@ static int a2xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
+ gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
+
/* NOTE: PM4/micro-engine firmware registers look to be the same
* for a2xx and a3xx.. we could possibly push that part down to
* adreno_gpu base class. Or push both PM4 and PFP but
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 0a5ea9f56cb8..f6471145a7a6 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -211,6 +211,16 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
+ /*
+ * Use the default ringbuffer size and block size but disable the RPTR
+ * shadow
+ */
+ gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ /* Set the ringbuffer address */
+ gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
+
/* setup access protection: */
gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index b9b26b2bf9c5..954753600625 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -267,6 +267,16 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
+ /*
+ * Use the default ringbuffer size and block size but disable the RPTR
+ * shadow
+ */
+ gpu_write(gpu, REG_A4XX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ /* Set the ringbuffer address */
+ gpu_write(gpu, REG_A4XX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
+
/* Load PM4: */
ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 84a5d9c1f2a2..91726da82ed6 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -703,8 +703,6 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
- a5xx_preempt_hw_init(gpu);
-
if (!adreno_is_a510(adreno_gpu))
a5xx_gpmu_ucode_init(gpu);
@@ -712,6 +710,15 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
+ /* Set the ringbuffer address */
+ gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI,
+ gpu->rb[0]->iova);
+
+ gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ a5xx_preempt_hw_init(gpu);
+
/* Disable the interrupts through the initial bringup stage */
gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
@@ -1511,7 +1518,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
check_speed_bin(&pdev->dev);
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
+ /* Restricting nr_rings to 1 to temporarily disable preemption */
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base));
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 54868d4e3958..1e5b1a15a70f 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -31,6 +31,7 @@ struct a5xx_gpu {
struct msm_ringbuffer *next_ring;
struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
+ struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS];
struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 9cf9353a7ff1..9f3fe177b00e 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -226,19 +226,31 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
struct adreno_gpu *adreno_gpu = &a5xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
struct a5xx_preempt_record *ptr;
- struct drm_gem_object *bo = NULL;
- u64 iova = 0;
+ void *counters;
+ struct drm_gem_object *bo = NULL, *counters_bo = NULL;
+ u64 iova = 0, counters_iova = 0;
ptr = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
- MSM_BO_UNCACHED, gpu->aspace, &bo, &iova);
+ MSM_BO_UNCACHED | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
+ /* The buffer to store counters needs to be unprivileged */
+ counters = msm_gem_kernel_new(gpu->dev,
+ A5XX_PREEMPT_COUNTER_SIZE,
+ MSM_BO_UNCACHED, gpu->aspace, &counters_bo, &counters_iova);
+ if (IS_ERR(counters)) {
+ msm_gem_kernel_put(bo, gpu->aspace, true);
+ return PTR_ERR(counters);
+ }
+
msm_gem_object_set_name(bo, "preempt");
+ msm_gem_object_set_name(counters_bo, "preempt_counters");
a5xx_gpu->preempt_bo[ring->id] = bo;
+ a5xx_gpu->preempt_counters_bo[ring->id] = counters_bo;
a5xx_gpu->preempt_iova[ring->id] = iova;
a5xx_gpu->preempt[ring->id] = ptr;
@@ -249,7 +261,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
ptr->data = 0;
ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
ptr->rptr_addr = rbmemptr(ring, rptr);
- ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE;
+ ptr->counter = counters_iova;
return 0;
}
@@ -260,8 +272,11 @@ void a5xx_preempt_fini(struct msm_gpu *gpu)
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
- for (i = 0; i < gpu->nr_rings; i++)
+ for (i = 0; i < gpu->nr_rings; i++) {
msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace, true);
+ msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i],
+ gpu->aspace, true);
+ }
}
void a5xx_preempt_init(struct msm_gpu *gpu)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 3966abd523cc..66a95e22b7b3 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -678,7 +678,8 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
A6XX_PROTECT_RDONLY(0x980, 0x4));
gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
- if (adreno_is_a650(adreno_gpu)) {
+ /* Enable expanded apriv for targets that support it */
+ if (gpu->hw_apriv) {
gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
(1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1));
}
@@ -694,6 +695,13 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
if (ret)
goto out;
+ /* Set the ringbuffer address */
+ gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI,
+ gpu->rb[0]->iova);
+
+ gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
/* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0];
@@ -1056,6 +1064,9 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = NULL;
adreno_gpu->reg_offsets = a6xx_register_offsets;
+ if (adreno_is_a650(adreno_gpu))
+ adreno_gpu->base.hw_apriv = true;
+
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 288141fe4c58..862dd35b27d3 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -400,26 +400,6 @@ int adreno_hw_init(struct msm_gpu *gpu)
ring->memptrs->rptr = 0;
}
- /*
- * Setup REG_CP_RB_CNTL. The same value is used across targets (with
- * the excpetion of A430 that disables the RPTR shadow) - the cacluation
- * for the ringbuffer size and block size is moved to msm_gpu.h for the
- * pre-processor to deal with and the A430 variant is ORed in here
- */
- adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
- MSM_GPU_RB_CNTL_DEFAULT |
- (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
-
- /* Setup ringbuffer address - use ringbuffer[0] for GPU init */
- adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
- REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
-
- if (!adreno_is_a430(adreno_gpu)) {
- adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
- REG_ADRENO_CP_RB_RPTR_ADDR_HI,
- rbmemptr(gpu->rb[0], rptr));
- }
-
return 0;
}
@@ -427,11 +407,8 @@ int adreno_hw_init(struct msm_gpu *gpu)
static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
struct msm_ringbuffer *ring)
{
- if (adreno_is_a430(adreno_gpu))
- return ring->memptrs->rptr = adreno_gpu_read(
- adreno_gpu, REG_ADRENO_CP_RB_RPTR);
- else
- return ring->memptrs->rptr;
+ return ring->memptrs->rptr = adreno_gpu_read(
+ adreno_gpu, REG_ADRENO_CP_RB_RPTR);
}
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index d5645472b25d..57ddc9438351 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -908,7 +908,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
memptrs = msm_gem_kernel_new(drm,
sizeof(struct msm_rbmemptrs) * nr_rings,
- MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
+ check_apriv(gpu, MSM_BO_UNCACHED), gpu->aspace, &gpu->memptrs_bo,
&memptrs_iova);
if (IS_ERR(memptrs)) {
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 0db117a7339b..37cffac4cbe3 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -15,6 +15,7 @@
#include "msm_drv.h"
#include "msm_fence.h"
#include "msm_ringbuffer.h"
+#include "msm_gem.h"
struct msm_gem_submit;
struct msm_gpu_perfcntr;
@@ -139,6 +140,8 @@ struct msm_gpu {
} devfreq;
struct msm_gpu_state *crashstate;
+ /* True if the hardware supports expanded apriv (a650 and newer) */
+ bool hw_apriv;
};
/* It turns out that all targets use the same ringbuffer size */
@@ -327,4 +330,12 @@ static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
mutex_unlock(&gpu->dev->struct_mutex);
}
+/*
+ * Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
+ * support expanded privileges
+ */
+#define check_apriv(gpu, flags) \
+ (((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))
+
+
#endif /* __MSM_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 39ecb5a18431..935bf9b1d941 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -27,8 +27,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ring->id = id;
ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
- MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &ring->bo,
- &ring->iova);
+ check_apriv(gpu, MSM_BO_WC | MSM_BO_GPU_READONLY),
+ gpu->aspace, &ring->bo, &ring->iova);
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 7b69d6dfe44a..e0ae911ef427 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -933,7 +933,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
/* get matching reference and feedback divider */
*ref_div = min(max(den/post_div, 1u), ref_div_max);
- *fb_div = max(nom * *ref_div * post_div / den, 1u);
+ *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
/* limit fb divider to its maximum */
if (*fb_div > fb_div_max) {
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 072ea113e6be..ed5d86617802 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -589,8 +589,7 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
/* We can't have an alpha plane at the lowest position */
if (!backend->quirks->supports_lowest_plane_alpha &&
- (plane_states[0]->fb->format->has_alpha ||
- (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE)))
+ (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))
return -EINVAL;
for (i = 1; i < num_planes; i++) {
@@ -995,7 +994,6 @@ static const struct sun4i_backend_quirks sun6i_backend_quirks = {
static const struct sun4i_backend_quirks sun7i_backend_quirks = {
.needs_output_muxing = true,
- .supports_lowest_plane_alpha = true,
};
static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index ced9a8287dd8..e40c542254f6 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -1433,14 +1433,18 @@ static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon,
if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
encoder->encoder_type == DRM_MODE_ENCODER_TMDS) {
ret = sun8i_tcon_top_set_hdmi_src(&pdev->dev, id);
- if (ret)
+ if (ret) {
+ put_device(&pdev->dev);
return ret;
+ }
}
if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP)) {
ret = sun8i_tcon_top_de_config(&pdev->dev, tcon->id, id);
- if (ret)
+ if (ret) {
+ put_device(&pdev->dev);
return ret;
+ }
}
return 0;
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index 7f13f4d715bf..de8a11abd66a 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -889,7 +889,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi,
regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0),
sun6i_dsi_dcs_build_pkt_hdr(dsi, msg));
- bounce = kzalloc(msg->tx_len + sizeof(crc), GFP_KERNEL);
+ bounce = kzalloc(ALIGN(msg->tx_len + sizeof(crc), 4), GFP_KERNEL);
if (!bounce)
return -ENOMEM;
@@ -900,7 +900,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi,
memcpy((u8 *)bounce + msg->tx_len, &crc, sizeof(crc));
len += sizeof(crc);
- regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, len);
+ regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, DIV_ROUND_UP(len, 4));
regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG, len + 4 - 1);
kfree(bounce);
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 22c8c5375d0d..c0147af6a840 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -211,7 +211,7 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
return 0;
}
-static bool sun8i_vi_layer_get_csc_mode(const struct drm_format_info *format)
+static u32 sun8i_vi_layer_get_csc_mode(const struct drm_format_info *format)
{
if (!format->is_yuv)
return SUN8I_CSC_MODE_OFF;
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
index d733bbc4ac0e..17ff24d999d1 100644
--- a/drivers/gpu/drm/tve200/tve200_display.c
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -14,6 +14,7 @@
#include <linux/version.h>
#include <linux/dma-buf.h>
#include <linux/of_graph.h>
+#include <linux/delay.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
@@ -130,9 +131,25 @@ static void tve200_display_enable(struct drm_simple_display_pipe *pipe,
struct drm_connector *connector = priv->connector;
u32 format = fb->format->format;
u32 ctrl1 = 0;
+ int retries;
clk_prepare_enable(priv->clk);
+ /* Reset the TVE200 and wait for it to come back online */
+ writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4);
+ for (retries = 0; retries < 5; retries++) {
+ usleep_range(30000, 50000);
+ if (readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET)
+ continue;
+ else
+ break;
+ }
+ if (retries == 5 &&
+ readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET) {
+ dev_err(drm->dev, "can't get hardware out of reset\n");
+ return;
+ }
+
/* Function 1 */
ctrl1 |= TVE200_CTRL_CSMODE;
/* Interlace mode for CCIR656: parameterize? */
@@ -230,8 +247,9 @@ static void tve200_display_disable(struct drm_simple_display_pipe *pipe)
drm_crtc_vblank_off(crtc);
- /* Disable and Power Down */
+ /* Disable put into reset and Power Down */
writel(0, priv->regs + TVE200_CTRL);
+ writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4);
clk_disable_unprepare(priv->clk);
}
@@ -279,6 +297,8 @@ static int tve200_display_enable_vblank(struct drm_simple_display_pipe *pipe)
struct drm_device *drm = crtc->dev;
struct tve200_drm_dev_private *priv = drm->dev_private;
+ /* Clear any IRQs and enable */
+ writel(0xFF, priv->regs + TVE200_INT_CLR);
writel(TVE200_INT_V_STATUS, priv->regs + TVE200_INT_EN);
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index af55b334be2f..afd0f9200f90 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -97,9 +97,6 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
- struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
-
- output->enabled = true;
}
static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -111,7 +108,6 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
virtio_gpu_notify(vgdev);
- output->enabled = false;
}
static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
@@ -123,6 +119,17 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
+ struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
+
+ /*
+ * virtio-gpu can't do modeset and plane update operations
+ * independent from each other. So the actual modeset happens
+ * in the plane update callback, and here we just check
+ * whenever we must force the modeset.
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc->state)) {
+ output->needs_modeset = true;
+ }
}
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 9ff9f4ac0522..fbc04272db4f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -137,7 +137,7 @@ struct virtio_gpu_output {
struct edid *edid;
int cur_x;
int cur_y;
- bool enabled;
+ bool needs_modeset;
};
#define drm_crtc_to_virtio_gpu_output(x) \
container_of(x, struct virtio_gpu_output, crtc)
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index e83651b7747d..842f8b61aa89 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -151,7 +151,13 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
if (ret < 0)
return -EINVAL;
- shmem->pages = drm_gem_shmem_get_pages_sgt(&bo->base.base);
+ /*
+ * virtio_gpu uses drm_gem_shmem_get_sg_table instead of
+ * drm_gem_shmem_get_pages_sgt because virtio has it's own set of
+ * dma-ops. This is discouraged for other drivers, but should be fine
+ * since virtio_gpu doesn't support dma-buf import from other devices.
+ */
+ shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
if (!shmem->pages) {
drm_gem_shmem_unpin(&bo->base.base);
return -EINVAL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 52d24179bcec..6a311cd93440 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -142,7 +142,7 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
if (WARN_ON(!output))
return;
- if (!plane->state->fb || !output->enabled) {
+ if (!plane->state->fb || !output->crtc.state->active) {
DRM_DEBUG("nofb\n");
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
plane->state->src_w >> 16,
@@ -163,7 +163,9 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
plane->state->src_w != old_state->src_w ||
plane->state->src_h != old_state->src_h ||
plane->state->src_x != old_state->src_x ||
- plane->state->src_y != old_state->src_y) {
+ plane->state->src_y != old_state->src_y ||
+ output->needs_modeset) {
+ output->needs_modeset = false;
DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n",
bo->hw_res_handle,
plane->state->crtc_w, plane->state->crtc_h,
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 39ff95b75357..534daf37c97e 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -18,6 +18,7 @@
#include <drm/drm_probe_helper.h>
#include <xen/balloon.h>
+#include <xen/xen.h>
#include "xen_drm_front.h"
#include "xen_drm_front_gem.h"
@@ -99,8 +100,8 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
* allocate ballooned pages which will be used to map
* grant references provided by the backend
*/
- ret = alloc_xenballooned_pages(xen_obj->num_pages,
- xen_obj->pages);
+ ret = xen_alloc_unpopulated_pages(xen_obj->num_pages,
+ xen_obj->pages);
if (ret < 0) {
DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
xen_obj->num_pages, ret);
@@ -152,8 +153,8 @@ void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
} else {
if (xen_obj->pages) {
if (xen_obj->be_alloc) {
- free_xenballooned_pages(xen_obj->num_pages,
- xen_obj->pages);
+ xen_free_unpopulated_pages(xen_obj->num_pages,
+ xen_obj->pages);
gem_free_pages_array(xen_obj);
} else {
drm_gem_put_pages(&xen_obj->base,
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
index aa6cd889bd11..b52c6cdfc0b8 100644
--- a/drivers/gpu/drm/xlnx/Kconfig
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -2,6 +2,7 @@ config DRM_ZYNQMP_DPSUB
tristate "ZynqMP DisplayPort Controller Driver"
depends on ARCH_ZYNQMP || COMPILE_TEST
depends on COMMON_CLK && DRM && OF
+ depends on DMADEVICES
select DMA_ENGINE
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 591106cf58fc..1d44bb635bb8 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -731,7 +731,7 @@ static void vmbus_wait_for_unload(void)
void *page_addr;
struct hv_message *msg;
struct vmbus_channel_message_header *hdr;
- u32 message_type;
+ u32 message_type, i;
/*
* CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
@@ -741,8 +741,11 @@ static void vmbus_wait_for_unload(void)
* functional and vmbus_unload_response() will complete
* vmbus_connection.unload_event. If not, the last thing we can do is
* read message pages for all CPUs directly.
+ *
+ * Wait no more than 10 seconds so that the panic path can't get
+ * hung forever in case the response message isn't seen.
*/
- while (1) {
+ for (i = 0; i < 1000; i++) {
if (completion_done(&vmbus_connection.unload_event))
break;
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 910b6e90866c..946d0aba101f 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -2382,7 +2382,10 @@ static int vmbus_bus_suspend(struct device *dev)
if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
wait_for_completion(&vmbus_connection.ready_for_suspend_event);
- WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0);
+ if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
+ pr_err("Can not suspend due to a previous failed resuming\n");
+ return -EBUSY;
+ }
mutex_lock(&vmbus_connection.channel_mutex);
@@ -2456,7 +2459,9 @@ static int vmbus_bus_resume(struct device *dev)
vmbus_request_offers();
- wait_for_completion(&vmbus_connection.ready_for_resume_event);
+ if (wait_for_completion_timeout(
+ &vmbus_connection.ready_for_resume_event, 10 * HZ) == 0)
+ pr_err("Some vmbus device is missing after suspending?\n");
/* Reset the event for the next suspend. */
reinit_completion(&vmbus_connection.ready_for_suspend_event);
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 710fbef9a9c2..384af88e58ad 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -41,8 +41,22 @@ static void pca_reset(struct i2c_algo_pca_data *adap)
pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IPRESET);
pca_outw(adap, I2C_PCA_IND, 0xA5);
pca_outw(adap, I2C_PCA_IND, 0x5A);
+
+ /*
+ * After a reset we need to re-apply any configuration
+ * (calculated in pca_init) to get the bus in a working state.
+ */
+ pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IMODE);
+ pca_outw(adap, I2C_PCA_IND, adap->bus_settings.mode);
+ pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLL);
+ pca_outw(adap, I2C_PCA_IND, adap->bus_settings.tlow);
+ pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLH);
+ pca_outw(adap, I2C_PCA_IND, adap->bus_settings.thi);
+
+ pca_set_con(adap, I2C_PCA_CON_ENSIO);
} else {
adap->reset_chip(adap->data);
+ pca_set_con(adap, I2C_PCA_CON_ENSIO | adap->bus_settings.clock_freq);
}
}
@@ -423,13 +437,14 @@ static int pca_init(struct i2c_adapter *adap)
" Use the nominal frequency.\n", adap->name);
}
- pca_reset(pca_data);
-
clock = pca_clock(pca_data);
printk(KERN_INFO "%s: Clock frequency is %dkHz\n",
adap->name, freqs[clock]);
- pca_set_con(pca_data, I2C_PCA_CON_ENSIO | clock);
+ /* Store settings as these will be needed when the PCA chip is reset */
+ pca_data->bus_settings.clock_freq = clock;
+
+ pca_reset(pca_data);
} else {
int clock;
int mode;
@@ -496,19 +511,15 @@ static int pca_init(struct i2c_adapter *adap)
thi = tlow * min_thi / min_tlow;
}
+ /* Store settings as these will be needed when the PCA chip is reset */
+ pca_data->bus_settings.mode = mode;
+ pca_data->bus_settings.tlow = tlow;
+ pca_data->bus_settings.thi = thi;
+
pca_reset(pca_data);
printk(KERN_INFO
"%s: Clock frequency is %dHz\n", adap->name, clock * 100);
-
- pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_IMODE);
- pca_outw(pca_data, I2C_PCA_IND, mode);
- pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ISCLL);
- pca_outw(pca_data, I2C_PCA_IND, tlow);
- pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ISCLH);
- pca_outw(pca_data, I2C_PCA_IND, thi);
-
- pca_set_con(pca_data, I2C_PCA_CON_ENSIO);
}
udelay(500); /* 500 us for oscillator to stabilise */
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 31268074c422..724bf30600d6 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -69,6 +69,7 @@
* These share bit definitions, so use the same values for the enable &
* status bits.
*/
+#define ASPEED_I2CD_INTR_RECV_MASK 0xf000ffff
#define ASPEED_I2CD_INTR_SDA_DL_TIMEOUT BIT(14)
#define ASPEED_I2CD_INTR_BUS_RECOVER_DONE BIT(13)
#define ASPEED_I2CD_INTR_SLAVE_MATCH BIT(7)
@@ -604,6 +605,7 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE,
bus->base + ASPEED_I2C_INTR_STS_REG);
readl(bus->base + ASPEED_I2C_INTR_STS_REG);
+ irq_received &= ASPEED_I2CD_INTR_RECV_MASK;
irq_remaining = irq_received;
#if IS_ENABLED(CONFIG_I2C_SLAVE)
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index e32ef3f01fe8..ebb4c0b03057 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1709,6 +1709,16 @@ static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; }
static inline void i801_acpi_remove(struct i801_priv *priv) { }
#endif
+static unsigned char i801_setup_hstcfg(struct i801_priv *priv)
+{
+ unsigned char hstcfg = priv->original_hstcfg;
+
+ hstcfg &= ~SMBHSTCFG_I2C_EN; /* SMBus timing */
+ hstcfg |= SMBHSTCFG_HST_EN;
+ pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hstcfg);
+ return hstcfg;
+}
+
static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
unsigned char temp;
@@ -1830,14 +1840,10 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
return err;
}
- pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &temp);
- priv->original_hstcfg = temp;
- temp &= ~SMBHSTCFG_I2C_EN; /* SMBus timing */
- if (!(temp & SMBHSTCFG_HST_EN)) {
+ pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &priv->original_hstcfg);
+ temp = i801_setup_hstcfg(priv);
+ if (!(priv->original_hstcfg & SMBHSTCFG_HST_EN))
dev_info(&dev->dev, "Enabling SMBus device\n");
- temp |= SMBHSTCFG_HST_EN;
- }
- pci_write_config_byte(priv->pci_dev, SMBHSTCFG, temp);
if (temp & SMBHSTCFG_SMB_SMI_EN) {
dev_dbg(&dev->dev, "SMBus using interrupt SMI#\n");
@@ -1952,10 +1958,9 @@ static void i801_shutdown(struct pci_dev *dev)
#ifdef CONFIG_PM_SLEEP
static int i801_suspend(struct device *dev)
{
- struct pci_dev *pci_dev = to_pci_dev(dev);
- struct i801_priv *priv = pci_get_drvdata(pci_dev);
+ struct i801_priv *priv = dev_get_drvdata(dev);
- pci_write_config_byte(pci_dev, SMBHSTCFG, priv->original_hstcfg);
+ pci_write_config_byte(priv->pci_dev, SMBHSTCFG, priv->original_hstcfg);
return 0;
}
@@ -1963,6 +1968,7 @@ static int i801_resume(struct device *dev)
{
struct i801_priv *priv = dev_get_drvdata(dev);
+ i801_setup_hstcfg(priv);
i801_enable_host_notify(&priv->adapter);
return 0;
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index efc14041d45b..0cbdfbe605b5 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -681,8 +681,8 @@ static int mtk_i2c_calculate_speed(struct mtk_i2c *i2c, unsigned int clk_src,
unsigned int cnt_mul;
int ret = -EINVAL;
- if (target_speed > I2C_MAX_FAST_MODE_PLUS_FREQ)
- target_speed = I2C_MAX_FAST_MODE_PLUS_FREQ;
+ if (target_speed > I2C_MAX_HIGH_SPEED_MODE_FREQ)
+ target_speed = I2C_MAX_HIGH_SPEED_MODE_FREQ;
max_step_cnt = mtk_i2c_max_step_cnt(target_speed);
base_step_cnt = max_step_cnt;
@@ -759,7 +759,7 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
for (clk_div = 1; clk_div <= max_clk_div; clk_div++) {
clk_src = parent_clk / clk_div;
- if (target_speed > I2C_MAX_FAST_MODE_FREQ) {
+ if (target_speed > I2C_MAX_FAST_MODE_PLUS_FREQ) {
/* Set master code speed register */
ret = mtk_i2c_calculate_speed(i2c, clk_src,
I2C_MAX_FAST_MODE_FREQ,
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 9587347447f0..c4b08a924461 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -25,6 +25,7 @@
#include <linux/of_device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
+#include <linux/dma/mxs-dma.h>
#define DRIVER_NAME "mxs-i2c"
@@ -200,7 +201,8 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
dma_map_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE);
desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[0], 1,
DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ DMA_PREP_INTERRUPT |
+ MXS_DMA_CTRL_WAIT4END);
if (!desc) {
dev_err(i2c->dev,
"Failed to get DMA data write descriptor.\n");
@@ -228,7 +230,8 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
dma_map_sg(i2c->dev, &i2c->sg_io[1], 1, DMA_FROM_DEVICE);
desc = dmaengine_prep_slave_sg(i2c->dmach, &i2c->sg_io[1], 1,
DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ DMA_PREP_INTERRUPT |
+ MXS_DMA_CTRL_WAIT4END);
if (!desc) {
dev_err(i2c->dev,
"Failed to get DMA data write descriptor.\n");
@@ -260,7 +263,8 @@ static int mxs_i2c_dma_setup_xfer(struct i2c_adapter *adap,
dma_map_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE);
desc = dmaengine_prep_slave_sg(i2c->dmach, i2c->sg_io, 2,
DMA_MEM_TO_DEV,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ DMA_PREP_INTERRUPT |
+ MXS_DMA_CTRL_WAIT4END);
if (!desc) {
dev_err(i2c->dev,
"Failed to get DMA data write descriptor.\n");
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index 75f07138a6fa..dfcf04e1967f 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -2093,8 +2093,12 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
}
}
- /* Adaptive TimeOut: astimated time in usec + 100% margin */
- timeout_usec = (2 * 10000 / bus->bus_freq) * (2 + nread + nwrite);
+ /*
+ * Adaptive TimeOut: estimated time in usec + 100% margin:
+ * 2: double the timeout for clock stretching case
+ * 9: bits per transaction (including the ack/nack)
+ */
+ timeout_usec = (2 * 9 * USEC_PER_SEC / bus->bus_freq) * (2 + nread + nwrite);
timeout = max(msecs_to_jiffies(35), usecs_to_jiffies(timeout_usec));
if (nwrite >= 32 * 1024 || nread >= 32 * 1024) {
dev_err(bus->dev, "i2c%d buffer too big\n", bus->num);
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 5ec082e2039d..573b5da145d1 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -1464,8 +1464,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
/* create pre-declared device nodes */
of_i2c_register_devices(adap);
- i2c_acpi_register_devices(adap);
i2c_acpi_install_space_handler(adap);
+ i2c_acpi_register_devices(adap);
if (adap->nr < __i2c_first_dynamic_bus_num)
i2c_scan_static_board_info(adap);
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 24864d9dfab5..48435865fdaf 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -189,6 +189,14 @@ struct bmc150_accel_data {
struct mutex mutex;
u8 fifo_mode, watermark;
s16 buffer[8];
+ /*
+ * Ensure there is sufficient space and correct alignment for
+ * the timestamp if enabled
+ */
+ struct {
+ __le16 channels[3];
+ s64 ts __aligned(8);
+ } scan;
u8 bw_bits;
u32 slope_dur;
u32 slope_thres;
@@ -922,15 +930,16 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev,
* now.
*/
for (i = 0; i < count; i++) {
- u16 sample[8];
int j, bit;
j = 0;
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength)
- memcpy(&sample[j++], &buffer[i * 3 + bit], 2);
+ memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit],
+ sizeof(data->scan.channels[0]));
- iio_push_to_buffers_with_timestamp(indio_dev, sample, tstamp);
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ tstamp);
tstamp += sample_period;
}
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index 66b2e4cf24cf..0e18b92e2099 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -209,14 +209,20 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p)
const struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct kxsd9_state *st = iio_priv(indio_dev);
+ /*
+ * Ensure correct positioning and alignment of timestamp.
+ * No need to zero initialize as all elements written.
+ */
+ struct {
+ __be16 chan[4];
+ s64 ts __aligned(8);
+ } hw_values;
int ret;
- /* 4 * 16bit values AND timestamp */
- __be16 hw_values[8];
ret = regmap_bulk_read(st->map,
KXSD9_REG_X,
- &hw_values,
- 8);
+ hw_values.chan,
+ sizeof(hw_values.chan));
if (ret) {
dev_err(st->dev,
"error reading data\n");
@@ -224,7 +230,7 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p)
}
iio_push_to_buffers_with_timestamp(indio_dev,
- hw_values,
+ &hw_values,
iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c
index 7e99bcb3398d..922bd38ff6ea 100644
--- a/drivers/iio/accel/mma7455_core.c
+++ b/drivers/iio/accel/mma7455_core.c
@@ -52,6 +52,14 @@
struct mma7455_data {
struct regmap *regmap;
+ /*
+ * Used to reorganize data. Will ensure correct alignment of
+ * the timestamp if present
+ */
+ struct {
+ __le16 channels[3];
+ s64 ts __aligned(8);
+ } scan;
};
static int mma7455_drdy(struct mma7455_data *mma7455)
@@ -82,19 +90,19 @@ static irqreturn_t mma7455_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct mma7455_data *mma7455 = iio_priv(indio_dev);
- u8 buf[16]; /* 3 x 16-bit channels + padding + ts */
int ret;
ret = mma7455_drdy(mma7455);
if (ret)
goto done;
- ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL, buf,
- sizeof(__le16) * 3);
+ ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL,
+ mma7455->scan.channels,
+ sizeof(mma7455->scan.channels));
if (ret)
goto done;
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_push_to_buffers_with_timestamp(indio_dev, &mma7455->scan,
iio_get_time_ns(indio_dev));
done:
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 4e6e70250048..853febc29488 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -110,6 +110,12 @@ struct mma8452_data {
int sleep_val;
struct regulator *vdd_reg;
struct regulator *vddio_reg;
+
+ /* Ensure correct alignment of time stamp when present */
+ struct {
+ __be16 channels[3];
+ s64 ts __aligned(8);
+ } buffer;
};
/**
@@ -1091,14 +1097,13 @@ static irqreturn_t mma8452_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct mma8452_data *data = iio_priv(indio_dev);
- u8 buffer[16]; /* 3 16-bit channels + padding + ts */
int ret;
- ret = mma8452_read(data, (__be16 *)buffer);
+ ret = mma8452_read(data, data->buffer.channels);
if (ret < 0)
goto done;
- iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
iio_get_time_ns(indio_dev));
done:
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 66d9cc073157..d94dc800b842 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -865,6 +865,8 @@ config ROCKCHIP_SARADC
tristate "Rockchip SARADC driver"
depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST)
depends on RESET_CONTROLLER
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for the SARADC found in SoCs from
Rockchip.
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 5ed63e874292..b573ec60a8b8 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -146,6 +146,11 @@ struct ina2xx_chip_info {
int range_vbus; /* Bus voltage maximum in V */
int pga_gain_vshunt; /* Shunt voltage PGA gain */
bool allow_async_readout;
+ /* data buffer needs space for channel data and timestamp */
+ struct {
+ u16 chan[4];
+ u64 ts __aligned(8);
+ } scan;
};
static const struct ina2xx_config ina2xx_config[] = {
@@ -738,8 +743,6 @@ static int ina2xx_conversion_ready(struct iio_dev *indio_dev)
static int ina2xx_work_buffer(struct iio_dev *indio_dev)
{
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
- /* data buffer needs space for channel data and timestap */
- unsigned short data[4 + sizeof(s64)/sizeof(short)];
int bit, ret, i = 0;
s64 time;
@@ -758,10 +761,10 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
if (ret < 0)
return ret;
- data[i++] = val;
+ chip->scan.chan[i++] = val;
}
- iio_push_to_buffers_with_timestamp(indio_dev, data, time);
+ iio_push_to_buffers_with_timestamp(indio_dev, &chip->scan, time);
return 0;
};
diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c
index 01b20e420ac4..6efb0b43d938 100644
--- a/drivers/iio/adc/max1118.c
+++ b/drivers/iio/adc/max1118.c
@@ -36,6 +36,11 @@ struct max1118 {
struct spi_device *spi;
struct mutex lock;
struct regulator *reg;
+ /* Ensure natural alignment of buffer elements */
+ struct {
+ u8 channels[2];
+ s64 ts __aligned(8);
+ } scan;
u8 data ____cacheline_aligned;
};
@@ -166,7 +171,6 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct max1118 *adc = iio_priv(indio_dev);
- u8 data[16] = { }; /* 2x 8-bit ADC data + padding + 8 bytes timestamp */
int scan_index;
int i = 0;
@@ -184,10 +188,10 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p)
goto out;
}
- data[i] = ret;
+ adc->scan.channels[i] = ret;
i++;
}
- iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan,
iio_get_time_ns(indio_dev));
out:
mutex_unlock(&adc->lock);
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index 5f1706d1c3c0..da353dcb1e9d 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -96,16 +96,12 @@ static int mcp3422_update_config(struct mcp3422 *adc, u8 newconfig)
{
int ret;
- mutex_lock(&adc->lock);
-
ret = i2c_master_send(adc->i2c, &newconfig, 1);
if (ret > 0) {
adc->config = newconfig;
ret = 0;
}
- mutex_unlock(&adc->lock);
-
return ret;
}
@@ -138,6 +134,8 @@ static int mcp3422_read_channel(struct mcp3422 *adc,
u8 config;
u8 req_channel = channel->channel;
+ mutex_lock(&adc->lock);
+
if (req_channel != MCP3422_CHANNEL(adc->config)) {
config = adc->config;
config &= ~MCP3422_CHANNEL_MASK;
@@ -145,12 +143,18 @@ static int mcp3422_read_channel(struct mcp3422 *adc,
config &= ~MCP3422_PGA_MASK;
config |= MCP3422_PGA_VALUE(adc->pga[req_channel]);
ret = mcp3422_update_config(adc, config);
- if (ret < 0)
+ if (ret < 0) {
+ mutex_unlock(&adc->lock);
return ret;
+ }
msleep(mcp3422_read_times[MCP3422_SAMPLE_RATE(adc->config)]);
}
- return mcp3422_read(adc, value, &config);
+ ret = mcp3422_read(adc, value, &config);
+
+ mutex_unlock(&adc->lock);
+
+ return ret;
}
static int mcp3422_read_raw(struct iio_dev *iio,
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 93c2252c0b89..1a9189ba69ae 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -707,7 +707,7 @@ static int meson_sar_adc_temp_sensor_init(struct iio_dev *indio_dev)
size_t read_len;
int ret;
- temperature_calib = devm_nvmem_cell_get(&indio_dev->dev,
+ temperature_calib = devm_nvmem_cell_get(indio_dev->dev.parent,
"temperature_calib");
if (IS_ERR(temperature_calib)) {
ret = PTR_ERR(temperature_calib);
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index 9426f70a8005..cf63983a54d9 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -33,6 +33,12 @@ struct adc081c {
/* 8, 10 or 12 */
int bits;
+
+ /* Ensure natural alignment of buffer elements */
+ struct {
+ u16 channel;
+ s64 ts __aligned(8);
+ } scan;
};
#define REG_CONV_RES 0x00
@@ -128,14 +134,13 @@ static irqreturn_t adc081c_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct adc081c *data = iio_priv(indio_dev);
- u16 buf[8]; /* 2 bytes data + 6 bytes padding + 8 bytes timestamp */
int ret;
ret = i2c_smbus_read_word_swapped(data->i2c, REG_CONV_RES);
if (ret < 0)
goto out;
- buf[0] = ret;
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ data->scan.channel = ret;
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c
index 9017e1e24273..dfba34834a57 100644
--- a/drivers/iio/adc/ti-adc084s021.c
+++ b/drivers/iio/adc/ti-adc084s021.c
@@ -26,6 +26,11 @@ struct adc084s021 {
struct spi_transfer spi_trans;
struct regulator *reg;
struct mutex lock;
+ /* Buffer used to align data */
+ struct {
+ __be16 channels[4];
+ s64 ts __aligned(8);
+ } scan;
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache line.
@@ -141,14 +146,13 @@ static irqreturn_t adc084s021_buffer_trigger_handler(int irq, void *pollfunc)
struct iio_poll_func *pf = pollfunc;
struct iio_dev *indio_dev = pf->indio_dev;
struct adc084s021 *adc = iio_priv(indio_dev);
- __be16 data[8] = {0}; /* 4 * 16-bit words of data + 8 bytes timestamp */
mutex_lock(&adc->lock);
- if (adc084s021_adc_conversion(adc, &data) < 0)
+ if (adc084s021_adc_conversion(adc, adc->scan.channels) < 0)
dev_err(&adc->spi->dev, "Failed to read data\n");
- iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan,
iio_get_time_ns(indio_dev));
mutex_unlock(&adc->lock);
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index f42ab112986e..9fef39bcf997 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -316,6 +316,7 @@ static const struct iio_chan_spec ads1115_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP),
};
+#ifdef CONFIG_PM
static int ads1015_set_power_state(struct ads1015_data *data, bool on)
{
int ret;
@@ -333,6 +334,15 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on)
return ret < 0 ? ret : 0;
}
+#else /* !CONFIG_PM */
+
+static int ads1015_set_power_state(struct ads1015_data *data, bool on)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_PM */
+
static
int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
{
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
index 2b007e7568b2..60dd87e96f5f 100644
--- a/drivers/iio/chemical/ccs811.c
+++ b/drivers/iio/chemical/ccs811.c
@@ -78,6 +78,11 @@ struct ccs811_data {
struct iio_trigger *drdy_trig;
struct gpio_desc *wakeup_gpio;
bool drdy_trig_on;
+ /* Ensures correct alignment of timestamp if present */
+ struct {
+ s16 channels[2];
+ s64 ts __aligned(8);
+ } scan;
};
static const struct iio_chan_spec ccs811_channels[] = {
@@ -327,17 +332,17 @@ static irqreturn_t ccs811_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct ccs811_data *data = iio_priv(indio_dev);
struct i2c_client *client = data->client;
- s16 buf[8]; /* s16 eCO2 + s16 TVOC + padding + 8 byte timestamp */
int ret;
- ret = i2c_smbus_read_i2c_block_data(client, CCS811_ALG_RESULT_DATA, 4,
- (u8 *)&buf);
+ ret = i2c_smbus_read_i2c_block_data(client, CCS811_ALG_RESULT_DATA,
+ sizeof(data->scan.channels),
+ (u8 *)data->scan.channels);
if (ret != 4) {
dev_err(&client->dev, "cannot read sensor data\n");
goto err;
}
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
iio_get_time_ns(indio_dev));
err:
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index ea480c1d4349..1bc6efa47316 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -72,10 +72,13 @@ static void get_default_min_max_freq(enum motionsensor_type type,
switch (type) {
case MOTIONSENSE_TYPE_ACCEL:
- case MOTIONSENSE_TYPE_GYRO:
*min_freq = 12500;
*max_freq = 100000;
break;
+ case MOTIONSENSE_TYPE_GYRO:
+ *min_freq = 25000;
+ *max_freq = 100000;
+ break;
case MOTIONSENSE_TYPE_MAG:
*min_freq = 5000;
*max_freq = 25000;
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 4bac0646398d..b4323d2db0b1 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1243,13 +1243,16 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct ltr501_data *data = iio_priv(indio_dev);
- u16 buf[8];
+ struct {
+ u16 channels[3];
+ s64 ts __aligned(8);
+ } scan;
__le16 als_buf[2];
u8 mask = 0;
int j = 0;
int ret, psdata;
- memset(buf, 0, sizeof(buf));
+ memset(&scan, 0, sizeof(scan));
/* figure out which data needs to be ready */
if (test_bit(0, indio_dev->active_scan_mask) ||
@@ -1268,9 +1271,9 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
if (ret < 0)
return ret;
if (test_bit(0, indio_dev->active_scan_mask))
- buf[j++] = le16_to_cpu(als_buf[1]);
+ scan.channels[j++] = le16_to_cpu(als_buf[1]);
if (test_bit(1, indio_dev->active_scan_mask))
- buf[j++] = le16_to_cpu(als_buf[0]);
+ scan.channels[j++] = le16_to_cpu(als_buf[0]);
}
if (mask & LTR501_STATUS_PS_RDY) {
@@ -1278,10 +1281,10 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
&psdata, 2);
if (ret < 0)
goto done;
- buf[j++] = psdata & LTR501_PS_DATA_MASK;
+ scan.channels[j++] = psdata & LTR501_PS_DATA_MASK;
}
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan,
iio_get_time_ns(indio_dev));
done:
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index aa8ed1e3e89a..b8e721bced5b 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -75,6 +75,11 @@
struct max44000_data {
struct mutex lock;
struct regmap *regmap;
+ /* Ensure naturally aligned timestamp */
+ struct {
+ u16 channels[2];
+ s64 ts __aligned(8);
+ } scan;
};
/* Default scale is set to the minimum of 0.03125 or 1 / (1 << 5) lux */
@@ -488,7 +493,6 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct max44000_data *data = iio_priv(indio_dev);
- u16 buf[8]; /* 2x u16 + padding + 8 bytes timestamp */
int index = 0;
unsigned int regval;
int ret;
@@ -498,17 +502,17 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p)
ret = max44000_read_alsval(data);
if (ret < 0)
goto out_unlock;
- buf[index++] = ret;
+ data->scan.channels[index++] = ret;
}
if (test_bit(MAX44000_SCAN_INDEX_PRX, indio_dev->active_scan_mask)) {
ret = regmap_read(data->regmap, MAX44000_REG_PRX_DATA, &regval);
if (ret < 0)
goto out_unlock;
- buf[index] = regval;
+ data->scan.channels[index] = regval;
}
mutex_unlock(&data->lock);
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 03d71f796177..623766ff800b 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -366,6 +366,12 @@ struct ak8975_data {
struct iio_mount_matrix orientation;
struct regulator *vdd;
struct regulator *vid;
+
+ /* Ensure natural alignment of timestamp */
+ struct {
+ s16 channels[3];
+ s64 ts __aligned(8);
+ } scan;
};
/* Enable attached power regulator if any. */
@@ -793,7 +799,6 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev)
const struct i2c_client *client = data->client;
const struct ak_def *def = data->def;
int ret;
- s16 buff[8]; /* 3 x 16 bits axis values + 1 aligned 64 bits timestamp */
__le16 fval[3];
mutex_lock(&data->lock);
@@ -816,12 +821,13 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev)
mutex_unlock(&data->lock);
/* Clamp to valid range. */
- buff[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range);
- buff[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range);
- buff[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range);
+ data->scan.channels[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range);
+ data->scan.channels[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range);
+ data->scan.channels[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range);
- iio_push_to_buffers_with_timestamp(indio_dev, buff,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
iio_get_time_ns(indio_dev));
+
return;
unlock:
diff --git a/drivers/iio/proximity/mb1232.c b/drivers/iio/proximity/mb1232.c
index 654564c45248..ad4b1fb2607a 100644
--- a/drivers/iio/proximity/mb1232.c
+++ b/drivers/iio/proximity/mb1232.c
@@ -40,6 +40,11 @@ struct mb1232_data {
*/
struct completion ranging;
int irqnr;
+ /* Ensure correct alignment of data to push to IIO buffer */
+ struct {
+ s16 distance;
+ s64 ts __aligned(8);
+ } scan;
};
static irqreturn_t mb1232_handle_irq(int irq, void *dev_id)
@@ -113,17 +118,13 @@ static irqreturn_t mb1232_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct mb1232_data *data = iio_priv(indio_dev);
- /*
- * triggered buffer
- * 16-bit channel + 48-bit padding + 64-bit timestamp
- */
- s16 buffer[8] = { 0 };
- buffer[0] = mb1232_read_distance(data);
- if (buffer[0] < 0)
+ data->scan.distance = mb1232_read_distance(data);
+ if (data->scan.distance < 0)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ pf->timestamp);
err:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index 513825e424bf..a92fc3f90bb5 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -379,7 +379,7 @@ static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes,
{
LIST_HEAD(tmp_list);
unsigned int nr_cqs, i;
- struct ib_cq *cq;
+ struct ib_cq *cq, *n;
int ret;
if (poll_ctx > IB_POLL_LAST_POOL_TYPE) {
@@ -412,7 +412,7 @@ static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes,
return 0;
out_free_cqs:
- list_for_each_entry(cq, &tmp_list, pool_entry) {
+ list_for_each_entry_safe(cq, n, &tmp_list, pool_entry) {
cq->shared = false;
ib_free_cq(cq);
}
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 3096e73797b7..307886737646 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1801,7 +1801,7 @@ int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
dev_put(netdev);
- if (!rc) {
+ if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) {
netdev_speed = lksettings.base.speed;
} else {
netdev_speed = SPEED_1000;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 5ee272d27aaa..1d7a9ca5240c 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -752,12 +752,6 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
gsi_sqp = rdev->gsi_ctx.gsi_sqp;
gsi_sah = rdev->gsi_ctx.gsi_sah;
- /* remove from active qp list */
- mutex_lock(&rdev->qp_lock);
- list_del(&gsi_sqp->list);
- mutex_unlock(&rdev->qp_lock);
- atomic_dec(&rdev->qp_count);
-
ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n");
bnxt_qplib_destroy_ah(&rdev->qplib_res,
&gsi_sah->qplib_ah,
@@ -772,6 +766,12 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
}
bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
+ /* remove from active qp list */
+ mutex_lock(&rdev->qp_lock);
+ list_del(&gsi_sqp->list);
+ mutex_unlock(&rdev->qp_lock);
+ atomic_dec(&rdev->qp_count);
+
kfree(rdev->gsi_ctx.sqp_tbl);
kfree(gsi_sah);
kfree(gsi_sqp);
@@ -792,11 +792,6 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
unsigned int flags;
int rc;
- mutex_lock(&rdev->qp_lock);
- list_del(&qp->list);
- mutex_unlock(&rdev->qp_lock);
- atomic_dec(&rdev->qp_count);
-
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
@@ -819,6 +814,11 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
goto sh_fail;
}
+ mutex_lock(&rdev->qp_lock);
+ list_del(&qp->list);
+ mutex_unlock(&rdev->qp_lock);
+ atomic_dec(&rdev->qp_count);
+
ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem);
@@ -3264,6 +3264,19 @@ static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
wc->wc_flags |= IB_WC_GRH;
}
+static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
+ u16 vlan_id)
+{
+ /*
+ * Check if the vlan is configured in the host. If not configured, it
+ * can be a transparent VLAN. So dont report the vlan id.
+ */
+ if (!__vlan_find_dev_deep_rcu(rdev->netdev,
+ htons(ETH_P_8021Q), vlan_id))
+ return false;
+ return true;
+}
+
static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
u16 *vid, u8 *sl)
{
@@ -3332,9 +3345,11 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
wc->src_qp = orig_cqe->src_qp;
memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
- wc->vlan_id = vlan_id;
- wc->sl = sl;
- wc->wc_flags |= IB_WC_WITH_VLAN;
+ if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
+ wc->vlan_id = vlan_id;
+ wc->sl = sl;
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ }
}
wc->port_num = 1;
wc->vendor_err = orig_cqe->status;
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 17ac8b7c5710..53aee5a42ab8 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -1009,7 +1009,6 @@ static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
{
struct bnxt_re_ring_attr rattr = {};
- struct bnxt_qplib_ctx *qplib_ctx;
int num_vec_created = 0;
int rc = 0, i;
u8 type;
@@ -1032,13 +1031,11 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
if (rc)
goto dealloc_res;
- qplib_ctx = &rdev->qplib_ctx;
for (i = 0; i < rdev->num_msix - 1; i++) {
struct bnxt_qplib_nq *nq;
nq = &rdev->nq[i];
- nq->hwq.max_elements = (qplib_ctx->cq_count +
- qplib_ctx->srqc_count + 2);
+ nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]);
if (rc) {
ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x",
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index d60e3dcea087..f78da54a0bc5 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -818,6 +818,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
u16 cmd_flags = 0;
u32 qp_flags = 0;
u8 pg_sz_lvl;
+ u32 tbl_indx;
int rc;
RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
@@ -907,8 +908,9 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
rq->dbinfo.db = qp->dpi->dbr;
rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
}
- rcfw->qp_tbl[qp->id].qp_id = qp->id;
- rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
+ tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
+ rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
+ rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
return 0;
@@ -935,10 +937,10 @@ static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
sq = &qp->sq;
hwq = &sq->hwq;
+ /* First psn entry */
fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
if (!IS_ALIGNED(fpsne, PAGE_SIZE))
- indx_pad = ALIGN(fpsne, PAGE_SIZE) / size;
-
+ indx_pad = (fpsne & ~PAGE_MASK) / size;
hwq->pad_pgofft = indx_pad;
hwq->pad_pg = (u64 *)psn_pg;
hwq->pad_stride = size;
@@ -959,6 +961,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
u16 cmd_flags = 0;
u32 qp_flags = 0;
u8 pg_sz_lvl;
+ u32 tbl_indx;
u16 nsge;
RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
@@ -1111,8 +1114,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
rq->dbinfo.db = qp->dpi->dbr;
rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
}
- rcfw->qp_tbl[qp->id].qp_id = qp->id;
- rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
+ tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
+ rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
+ rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
return 0;
fail:
@@ -1457,10 +1461,12 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
struct cmdq_destroy_qp req;
struct creq_destroy_qp_resp resp;
u16 cmd_flags = 0;
+ u32 tbl_indx;
int rc;
- rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID;
- rcfw->qp_tbl[qp->id].qp_handle = NULL;
+ tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
+ rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
+ rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
@@ -1468,8 +1474,8 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
(void *)&resp, NULL, 0);
if (rc) {
- rcfw->qp_tbl[qp->id].qp_id = qp->id;
- rcfw->qp_tbl[qp->id].qp_handle = qp;
+ rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
+ rcfw->qp_tbl[tbl_indx].qp_handle = qp;
return rc;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 4e211162acee..f7736e34ac64 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -307,14 +307,15 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
__le16 mcookie;
u16 cookie;
int rc = 0;
- u32 qp_id;
+ u32 qp_id, tbl_indx;
pdev = rcfw->pdev;
switch (qp_event->event) {
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
err_event = (struct creq_qp_error_notification *)qp_event;
qp_id = le32_to_cpu(err_event->xid);
- qp = rcfw->qp_tbl[qp_id].qp_handle;
+ tbl_indx = map_qp_id_to_tbl_indx(qp_id, rcfw);
+ qp = rcfw->qp_tbl[tbl_indx].qp_handle;
dev_dbg(&pdev->dev, "Received QP error notification\n");
dev_dbg(&pdev->dev,
"qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
@@ -615,8 +616,9 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
cmdq->bmap_size = bmap_size;
- rcfw->qp_tbl_size = qp_tbl_sz;
- rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node),
+ /* Allocate one extra to hold the QP1 entries */
+ rcfw->qp_tbl_size = qp_tbl_sz + 1;
+ rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node),
GFP_KERNEL);
if (!rcfw->qp_tbl)
goto fail;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 157387636d00..5f2f0a5a3560 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -216,4 +216,9 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx, int is_virtfn);
void bnxt_qplib_mark_qp_error(void *qp_handle);
+static inline u32 map_qp_id_to_tbl_indx(u32 qid, struct bnxt_qplib_rcfw *rcfw)
+{
+ /* Last index of the qp_tbl is for QP1 ie. qp_tbl_size - 1*/
+ return (qid == 1) ? rcfw->qp_tbl_size - 1 : qid % rcfw->qp_tbl_size - 2;
+}
#endif /* __BNXT_QPLIB_RCFW_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 4cd475ea97a2..64d44f51db4b 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -149,7 +149,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
attr->l2_db_size = (sb->l2_db_space_size + 1) *
(0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
- attr->max_sgid = le32_to_cpu(sb->max_gid);
+ attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED;
bnxt_qplib_query_version(rcfw, attr->fw_ver);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 6404f0da1051..967890cd81f2 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -47,6 +47,7 @@
struct bnxt_qplib_dev_attr {
#define FW_VER_ARR_LEN 4
u8 fw_ver[FW_VER_ARR_LEN];
+#define BNXT_QPLIB_NUM_GIDS_SUPPORTED 256
u16 max_sgid;
u16 max_mrw;
u32 max_qp;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 5e7910a517da..bd4f975e7f9a 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -784,7 +784,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
props->ip_gids = true;
props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
props->max_msg_sz = mdev->dev->caps.max_msg_sz;
- props->pkey_tbl_len = 1;
+ if (mdev->dev->caps.pkey_table_len[port])
+ props->pkey_tbl_len = 1;
props->max_mtu = IB_MTU_4096;
props->max_vl_num = 2;
props->state = IB_PORT_DOWN;
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 907203afbd99..77f2c7cd1216 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -40,6 +40,8 @@ MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib");
MODULE_DESCRIPTION("Soft RDMA transport");
MODULE_LICENSE("Dual BSD/GPL");
+bool rxe_initialized;
+
/* free resources for a rxe device all objects created for this device must
* have been destroyed
*/
@@ -315,6 +317,7 @@ static int __init rxe_module_init(void)
return err;
rdma_link_register(&rxe_link_ops);
+ rxe_initialized = true;
pr_info("loaded\n");
return 0;
}
@@ -326,6 +329,7 @@ static void __exit rxe_module_exit(void)
rxe_net_exit();
rxe_cache_exit();
+ rxe_initialized = false;
pr_info("unloaded\n");
}
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index fb07eed9e402..cae1b0a24c85 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -67,6 +67,8 @@
#define RXE_ROCE_V2_SPORT (0xc000)
+extern bool rxe_initialized;
+
static inline u32 rxe_crc32(struct rxe_dev *rxe,
u32 crc, void *next, size_t len)
{
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index cdd811a45120..ce24144de16a 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -205,6 +205,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
vaddr = page_address(sg_page_iter_page(&sg_iter));
if (!vaddr) {
pr_warn("null vaddr\n");
+ ib_umem_release(umem);
err = -ENOMEM;
goto err1;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c
index ccda5f5a3bc0..2af31d421bfc 100644
--- a/drivers/infiniband/sw/rxe/rxe_sysfs.c
+++ b/drivers/infiniband/sw/rxe/rxe_sysfs.c
@@ -61,6 +61,11 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
struct net_device *ndev;
struct rxe_dev *exists;
+ if (!rxe_initialized) {
+ pr_err("Module parameters are not supported, use rdma link add or rxe_cfg\n");
+ return -EAGAIN;
+ }
+
len = sanitize_arg(val, intf, sizeof(intf));
if (!len) {
pr_err("add: invalid interface name\n");
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 658939e5c34a..8522e9a3e914 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -1056,7 +1056,7 @@ static ssize_t parent_show(struct device *device,
struct rxe_dev *rxe =
rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
- return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
+ return scnprintf(buf, PAGE_SIZE, "%s\n", rxe_parent_name(rxe, 1));
}
static DEVICE_ATTR_RO(parent);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index e86acda3cf8c..695f701dc43d 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -140,15 +140,15 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
rx_desc = isert_conn->rx_descs;
for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
- dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ dma_addr = ib_dma_map_single(ib_dev, rx_desc->buf,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
if (ib_dma_mapping_error(ib_dev, dma_addr))
goto dma_map_fail;
rx_desc->dma_addr = dma_addr;
rx_sg = &rx_desc->rx_sg;
- rx_sg->addr = rx_desc->dma_addr;
+ rx_sg->addr = rx_desc->dma_addr + isert_get_hdr_offset(rx_desc);
rx_sg->length = ISER_RX_PAYLOAD_SIZE;
rx_sg->lkey = device->pd->local_dma_lkey;
rx_desc->rx_cqe.done = isert_recv_done;
@@ -160,7 +160,7 @@ dma_map_fail:
rx_desc = isert_conn->rx_descs;
for (j = 0; j < i; j++, rx_desc++) {
ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
}
kfree(isert_conn->rx_descs);
isert_conn->rx_descs = NULL;
@@ -181,7 +181,7 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
rx_desc = isert_conn->rx_descs;
for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
}
kfree(isert_conn->rx_descs);
@@ -299,10 +299,9 @@ isert_free_login_buf(struct isert_conn *isert_conn)
ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
kfree(isert_conn->login_rsp_buf);
- ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
- ISER_RX_PAYLOAD_SIZE,
- DMA_FROM_DEVICE);
- kfree(isert_conn->login_req_buf);
+ ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
+ kfree(isert_conn->login_desc);
}
static int
@@ -311,25 +310,25 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
{
int ret;
- isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf),
+ isert_conn->login_desc = kzalloc(sizeof(*isert_conn->login_desc),
GFP_KERNEL);
- if (!isert_conn->login_req_buf)
+ if (!isert_conn->login_desc)
return -ENOMEM;
- isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
- isert_conn->login_req_buf,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
- ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
+ isert_conn->login_desc->dma_addr = ib_dma_map_single(ib_dev,
+ isert_conn->login_desc->buf,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
+ ret = ib_dma_mapping_error(ib_dev, isert_conn->login_desc->dma_addr);
if (ret) {
- isert_err("login_req_dma mapping error: %d\n", ret);
- isert_conn->login_req_dma = 0;
- goto out_free_login_req_buf;
+ isert_err("login_desc dma mapping error: %d\n", ret);
+ isert_conn->login_desc->dma_addr = 0;
+ goto out_free_login_desc;
}
isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
if (!isert_conn->login_rsp_buf) {
ret = -ENOMEM;
- goto out_unmap_login_req_buf;
+ goto out_unmap_login_desc;
}
isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
@@ -346,11 +345,11 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
out_free_login_rsp_buf:
kfree(isert_conn->login_rsp_buf);
-out_unmap_login_req_buf:
- ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
-out_free_login_req_buf:
- kfree(isert_conn->login_req_buf);
+out_unmap_login_desc:
+ ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
+out_free_login_desc:
+ kfree(isert_conn->login_desc);
return ret;
}
@@ -476,7 +475,7 @@ isert_connect_release(struct isert_conn *isert_conn)
if (isert_conn->qp)
isert_destroy_qp(isert_conn);
- if (isert_conn->login_req_buf)
+ if (isert_conn->login_desc)
isert_free_login_buf(isert_conn);
isert_device_put(device);
@@ -862,17 +861,18 @@ isert_login_post_recv(struct isert_conn *isert_conn)
int ret;
memset(&sge, 0, sizeof(struct ib_sge));
- sge.addr = isert_conn->login_req_dma;
+ sge.addr = isert_conn->login_desc->dma_addr +
+ isert_get_hdr_offset(isert_conn->login_desc);
sge.length = ISER_RX_PAYLOAD_SIZE;
sge.lkey = isert_conn->device->pd->local_dma_lkey;
isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
sge.addr, sge.length, sge.lkey);
- isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done;
+ isert_conn->login_desc->rx_cqe.done = isert_login_recv_done;
memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
- rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe;
+ rx_wr.wr_cqe = &isert_conn->login_desc->rx_cqe;
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
@@ -949,7 +949,7 @@ post_send:
static void
isert_rx_login_req(struct isert_conn *isert_conn)
{
- struct iser_rx_desc *rx_desc = isert_conn->login_req_buf;
+ struct iser_rx_desc *rx_desc = isert_conn->login_desc;
int rx_buflen = isert_conn->login_req_len;
struct iscsi_conn *conn = isert_conn->conn;
struct iscsi_login *login = conn->conn_login;
@@ -961,7 +961,7 @@ isert_rx_login_req(struct isert_conn *isert_conn)
if (login->first_request) {
struct iscsi_login_req *login_req =
- (struct iscsi_login_req *)&rx_desc->iscsi_header;
+ (struct iscsi_login_req *)isert_get_iscsi_hdr(rx_desc);
/*
* Setup the initial iscsi_login values from the leading
* login request PDU.
@@ -980,13 +980,13 @@ isert_rx_login_req(struct isert_conn *isert_conn)
login->tsih = be16_to_cpu(login_req->tsih);
}
- memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
+ memcpy(&login->req[0], isert_get_iscsi_hdr(rx_desc), ISCSI_HDR_LEN);
size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
isert_dbg("Using login payload size: %d, rx_buflen: %d "
"MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
MAX_KEY_VALUE_PAIRS);
- memcpy(login->req_buf, &rx_desc->data[0], size);
+ memcpy(login->req_buf, isert_get_data(rx_desc), size);
if (login->first_request) {
complete(&isert_conn->login_comp);
@@ -1051,14 +1051,15 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
if (imm_data_len != data_len) {
sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents,
- &rx_desc->data[0], imm_data_len);
+ isert_get_data(rx_desc), imm_data_len);
isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
sg_nents, imm_data_len);
} else {
sg_init_table(&isert_cmd->sg, 1);
cmd->se_cmd.t_data_sg = &isert_cmd->sg;
cmd->se_cmd.t_data_nents = 1;
- sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len);
+ sg_set_buf(&isert_cmd->sg, isert_get_data(rx_desc),
+ imm_data_len);
isert_dbg("Transfer Immediate imm_data_len: %d\n",
imm_data_len);
}
@@ -1127,9 +1128,9 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
}
isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
"sg_nents: %u from %p %u\n", sg_start, sg_off,
- sg_nents, &rx_desc->data[0], unsol_data_len);
+ sg_nents, isert_get_data(rx_desc), unsol_data_len);
- sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
+ sg_copy_from_buffer(sg_start, sg_nents, isert_get_data(rx_desc),
unsol_data_len);
rc = iscsit_check_dataout_payload(cmd, hdr, false);
@@ -1188,7 +1189,7 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
}
cmd->text_in_ptr = text_in;
- memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
+ memcpy(cmd->text_in_ptr, isert_get_data(rx_desc), payload_length);
return iscsit_process_text_cmd(conn, cmd, hdr);
}
@@ -1198,7 +1199,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
uint32_t read_stag, uint64_t read_va,
uint32_t write_stag, uint64_t write_va)
{
- struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
+ struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc);
struct iscsi_conn *conn = isert_conn->conn;
struct iscsi_cmd *cmd;
struct isert_cmd *isert_cmd;
@@ -1296,8 +1297,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
struct isert_conn *isert_conn = wc->qp->qp_context;
struct ib_device *ib_dev = isert_conn->cm_id->device;
struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe);
- struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
- struct iser_ctrl *iser_ctrl = &rx_desc->iser_header;
+ struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc);
+ struct iser_ctrl *iser_ctrl = isert_get_iser_hdr(rx_desc);
uint64_t read_va = 0, write_va = 0;
uint32_t read_stag = 0, write_stag = 0;
@@ -1311,7 +1312,7 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
rx_desc->in_use = true;
ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags,
@@ -1346,7 +1347,7 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
read_stag, read_va, write_stag, write_va);
ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
}
static void
@@ -1360,8 +1361,8 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
return;
}
- ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_desc->dma_addr,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN;
@@ -1376,8 +1377,8 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
complete(&isert_conn->login_req_comp);
mutex_unlock(&isert_conn->mutex);
- ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ ib_dma_sync_single_for_device(ib_dev, isert_conn->login_desc->dma_addr,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
}
static void
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index c55f7d9bfced..7fee4a65e181 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -59,9 +59,11 @@
ISERT_MAX_TX_MISC_PDUS + \
ISERT_MAX_RX_MISC_PDUS)
-#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
- (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
- sizeof(struct ib_cqe) + sizeof(bool)))
+/*
+ * RX size is default of 8k plus headers, but data needs to align to
+ * 512 boundary, so use 1024 to have the extra space for alignment.
+ */
+#define ISER_RX_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 1024)
/* Maximum support is 16MB I/O size */
#define ISCSI_ISER_MAX_SG_TABLESIZE 4096
@@ -81,21 +83,41 @@ enum iser_conn_state {
};
struct iser_rx_desc {
- struct iser_ctrl iser_header;
- struct iscsi_hdr iscsi_header;
- char data[ISCSI_DEF_MAX_RECV_SEG_LEN];
+ char buf[ISER_RX_SIZE];
u64 dma_addr;
struct ib_sge rx_sg;
struct ib_cqe rx_cqe;
bool in_use;
- char pad[ISER_RX_PAD_SIZE];
-} __packed;
+};
static inline struct iser_rx_desc *cqe_to_rx_desc(struct ib_cqe *cqe)
{
return container_of(cqe, struct iser_rx_desc, rx_cqe);
}
+static void *isert_get_iser_hdr(struct iser_rx_desc *desc)
+{
+ return PTR_ALIGN(desc->buf + ISER_HEADERS_LEN, 512) - ISER_HEADERS_LEN;
+}
+
+static size_t isert_get_hdr_offset(struct iser_rx_desc *desc)
+{
+ return isert_get_iser_hdr(desc) - (void *)desc->buf;
+}
+
+static void *isert_get_iscsi_hdr(struct iser_rx_desc *desc)
+{
+ return isert_get_iser_hdr(desc) + sizeof(struct iser_ctrl);
+}
+
+static void *isert_get_data(struct iser_rx_desc *desc)
+{
+ void *data = isert_get_iser_hdr(desc) + ISER_HEADERS_LEN;
+
+ WARN_ON((uintptr_t)data & 511);
+ return data;
+}
+
struct iser_tx_desc {
struct iser_ctrl iser_header;
struct iscsi_hdr iscsi_header;
@@ -142,9 +164,8 @@ struct isert_conn {
u32 responder_resources;
u32 initiator_depth;
bool pi_support;
- struct iser_rx_desc *login_req_buf;
+ struct iser_rx_desc *login_desc;
char *login_rsp_buf;
- u64 login_req_dma;
int login_req_len;
u64 login_rsp_dma;
struct iser_rx_desc *rx_descs;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index 3d7877534bcc..cf6a2be61695 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -152,13 +152,6 @@ static struct attribute_group rtrs_srv_stats_attr_group = {
.attrs = rtrs_srv_stats_attrs,
};
-static void rtrs_srv_dev_release(struct device *dev)
-{
- struct rtrs_srv *srv = container_of(dev, struct rtrs_srv, dev);
-
- kfree(srv);
-}
-
static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
{
struct rtrs_srv *srv = sess->srv;
@@ -172,7 +165,6 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
goto unlock;
}
srv->dev.class = rtrs_dev_class;
- srv->dev.release = rtrs_srv_dev_release;
err = dev_set_name(&srv->dev, "%s", sess->s.sessname);
if (err)
goto unlock;
@@ -182,16 +174,16 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
* sysfs files are created
*/
dev_set_uevent_suppress(&srv->dev, true);
- err = device_register(&srv->dev);
+ err = device_add(&srv->dev);
if (err) {
- pr_err("device_register(): %d\n", err);
+ pr_err("device_add(): %d\n", err);
goto put;
}
srv->kobj_paths = kobject_create_and_add("paths", &srv->dev.kobj);
if (!srv->kobj_paths) {
err = -ENOMEM;
pr_err("kobject_create_and_add(): %d\n", err);
- device_unregister(&srv->dev);
+ device_del(&srv->dev);
goto unlock;
}
dev_set_uevent_suppress(&srv->dev, false);
@@ -216,7 +208,7 @@ rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
kobject_del(srv->kobj_paths);
kobject_put(srv->kobj_paths);
mutex_unlock(&srv->paths_mutex);
- device_unregister(&srv->dev);
+ device_del(&srv->dev);
} else {
mutex_unlock(&srv->paths_mutex);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index a219bd1bdbc2..28f6414dfa3d 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -1319,6 +1319,13 @@ static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_sess *sess)
return sess->cur_cq_vector;
}
+static void rtrs_srv_dev_release(struct device *dev)
+{
+ struct rtrs_srv *srv = container_of(dev, struct rtrs_srv, dev);
+
+ kfree(srv);
+}
+
static struct rtrs_srv *__alloc_srv(struct rtrs_srv_ctx *ctx,
const uuid_t *paths_uuid)
{
@@ -1336,6 +1343,8 @@ static struct rtrs_srv *__alloc_srv(struct rtrs_srv_ctx *ctx,
uuid_copy(&srv->paths_uuid, paths_uuid);
srv->queue_depth = sess_queue_depth;
srv->ctx = ctx;
+ device_initialize(&srv->dev);
+ srv->dev.release = rtrs_srv_dev_release;
srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks),
GFP_KERNEL);
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 3eefee2ee2a1..854d5e758724 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -17,10 +17,12 @@
#include "trackpoint.h"
static const char * const trackpoint_variants[] = {
- [TP_VARIANT_IBM] = "IBM",
- [TP_VARIANT_ALPS] = "ALPS",
- [TP_VARIANT_ELAN] = "Elan",
- [TP_VARIANT_NXP] = "NXP",
+ [TP_VARIANT_IBM] = "IBM",
+ [TP_VARIANT_ALPS] = "ALPS",
+ [TP_VARIANT_ELAN] = "Elan",
+ [TP_VARIANT_NXP] = "NXP",
+ [TP_VARIANT_JYT_SYNAPTICS] = "JYT_Synaptics",
+ [TP_VARIANT_SYNAPTICS] = "Synaptics",
};
/*
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index 5cb93ed26085..eb5412904fe0 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -24,10 +24,12 @@
* 0x01 was the original IBM trackpoint, others implement very limited
* subset of trackpoint features.
*/
-#define TP_VARIANT_IBM 0x01
-#define TP_VARIANT_ALPS 0x02
-#define TP_VARIANT_ELAN 0x03
-#define TP_VARIANT_NXP 0x04
+#define TP_VARIANT_IBM 0x01
+#define TP_VARIANT_ALPS 0x02
+#define TP_VARIANT_ELAN 0x03
+#define TP_VARIANT_NXP 0x04
+#define TP_VARIANT_JYT_SYNAPTICS 0x05
+#define TP_VARIANT_SYNAPTICS 0x06
/*
* Commands
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 7d7f73702726..37fb9aa88f9c 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -548,6 +548,14 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
},
},
+ {
+ /* Entroware Proteus */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
+ },
+ },
{ }
};
@@ -676,6 +684,14 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"),
},
},
+ {
+ /* Entroware Proteus */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Entroware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Proteus"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "EL07R4"),
+ },
+ },
{ }
};
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index befd111049c0..cf07491b7415 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -55,12 +55,18 @@ static int icc_summary_show(struct seq_file *s, void *data)
icc_summary_show_one(s, n);
hlist_for_each_entry(r, &n->req_list, req_node) {
+ u32 avg_bw = 0, peak_bw = 0;
+
if (!r->dev)
continue;
+ if (r->enabled) {
+ avg_bw = r->avg_bw;
+ peak_bw = r->peak_bw;
+ }
+
seq_printf(s, " %-27s %12u %12u %12u\n",
- dev_name(r->dev), r->tag, r->avg_bw,
- r->peak_bw);
+ dev_name(r->dev), r->tag, avg_bw, peak_bw);
}
}
}
diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c
index a3d2ef1d9903..609db9c95fd7 100644
--- a/drivers/interconnect/qcom/bcm-voter.c
+++ b/drivers/interconnect/qcom/bcm-voter.c
@@ -52,8 +52,20 @@ static int cmp_vcd(void *priv, struct list_head *a, struct list_head *b)
return 1;
}
+static u64 bcm_div(u64 num, u32 base)
+{
+ /* Ensure that small votes aren't lost. */
+ if (num && num < base)
+ return 1;
+
+ do_div(num, base);
+
+ return num;
+}
+
static void bcm_aggregate(struct qcom_icc_bcm *bcm)
{
+ struct qcom_icc_node *node;
size_t i, bucket;
u64 agg_avg[QCOM_ICC_NUM_BUCKETS] = {0};
u64 agg_peak[QCOM_ICC_NUM_BUCKETS] = {0};
@@ -61,22 +73,21 @@ static void bcm_aggregate(struct qcom_icc_bcm *bcm)
for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) {
for (i = 0; i < bcm->num_nodes; i++) {
- temp = bcm->nodes[i]->sum_avg[bucket] * bcm->aux_data.width;
- do_div(temp, bcm->nodes[i]->buswidth * bcm->nodes[i]->channels);
+ node = bcm->nodes[i];
+ temp = bcm_div(node->sum_avg[bucket] * bcm->aux_data.width,
+ node->buswidth * node->channels);
agg_avg[bucket] = max(agg_avg[bucket], temp);
- temp = bcm->nodes[i]->max_peak[bucket] * bcm->aux_data.width;
- do_div(temp, bcm->nodes[i]->buswidth);
+ temp = bcm_div(node->max_peak[bucket] * bcm->aux_data.width,
+ node->buswidth);
agg_peak[bucket] = max(agg_peak[bucket], temp);
}
temp = agg_avg[bucket] * 1000ULL;
- do_div(temp, bcm->aux_data.unit);
- bcm->vote_x[bucket] = temp;
+ bcm->vote_x[bucket] = bcm_div(temp, bcm->aux_data.unit);
temp = agg_peak[bucket] * 1000ULL;
- do_div(temp, bcm->aux_data.unit);
- bcm->vote_y[bucket] = temp;
+ bcm->vote_y[bucket] = bcm_div(temp, bcm->aux_data.unit);
}
if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 &&
diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig
index 1f061d91e0b8..626b97d0dd21 100644
--- a/drivers/iommu/amd/Kconfig
+++ b/drivers/iommu/amd/Kconfig
@@ -10,7 +10,7 @@ config AMD_IOMMU
select IOMMU_API
select IOMMU_IOVA
select IOMMU_DMA
- depends on X86_64 && PCI && ACPI
+ depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE
help
With this option you can enable support for AMD IOMMU hardware in
your system. An IOMMU is a hardware component which provides
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index c652f16eb702..445a08d23fed 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -1511,7 +1511,14 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
else
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
- if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
+
+ /*
+ * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
+ * GAM also requires GA mode. Therefore, we need to
+ * check cmpxchg16b support before enabling it.
+ */
+ if (!boot_cpu_has(X86_FEATURE_CX16) ||
+ ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
break;
case 0x11:
@@ -1520,8 +1527,18 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
else
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
- if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
+
+ /*
+ * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
+ * XT, GAM also requires GA mode. Therefore, we need to
+ * check cmpxchg16b support before enabling them.
+ */
+ if (!boot_cpu_has(X86_FEATURE_CX16) ||
+ ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
+ break;
+ }
+
/*
* Note: Since iommu_update_intcapxt() leverages
* the IOMMU MMIO access to MSI capability block registers
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index ba9f3dbc5b94..10e4200d3552 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2659,7 +2659,12 @@ static int amd_iommu_def_domain_type(struct device *dev)
if (!dev_data)
return 0;
- if (dev_data->iommu_v2)
+ /*
+ * Do not identity map IOMMUv2 capable devices when memory encryption is
+ * active, because some of those devices (AMD GPUs) don't have the
+ * encryption bit in their DMA-mask and require remapping.
+ */
+ if (!mem_encrypt_active() && dev_data->iommu_v2)
return IOMMU_DOMAIN_IDENTITY;
return 0;
@@ -3292,6 +3297,7 @@ out:
static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
struct amd_ir_data *data)
{
+ bool ret;
struct irq_remap_table *table;
struct amd_iommu *iommu;
unsigned long flags;
@@ -3309,10 +3315,18 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
entry = (struct irte_ga *)table->table;
entry = &entry[index];
- entry->lo.fields_remap.valid = 0;
- entry->hi.val = irte->hi.val;
- entry->lo.val = irte->lo.val;
- entry->lo.fields_remap.valid = 1;
+
+ ret = cmpxchg_double(&entry->lo.val, &entry->hi.val,
+ entry->lo.val, entry->hi.val,
+ irte->lo.val, irte->hi.val);
+ /*
+ * We use cmpxchg16 to atomically update the 128-bit IRTE,
+ * and it cannot be updated by the hardware or other processors
+ * behind us, so the return value of cmpxchg16 should be the
+ * same as the old value.
+ */
+ WARN_ON(!ret);
+
if (data)
data->ref = entry;
@@ -3826,14 +3840,18 @@ int amd_iommu_activate_guest_mode(void *data)
{
struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+ u64 valid;
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
!entry || entry->lo.fields_vapic.guest_mode)
return 0;
+ valid = entry->lo.fields_vapic.valid;
+
entry->lo.val = 0;
entry->hi.val = 0;
+ entry->lo.fields_vapic.valid = valid;
entry->lo.fields_vapic.guest_mode = 1;
entry->lo.fields_vapic.ga_log_intr = 1;
entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr;
@@ -3850,14 +3868,18 @@ int amd_iommu_deactivate_guest_mode(void *data)
struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
struct irq_cfg *cfg = ir_data->cfg;
+ u64 valid;
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
!entry || !entry->lo.fields_vapic.guest_mode)
return 0;
+ valid = entry->lo.fields_remap.valid;
+
entry->lo.val = 0;
entry->hi.val = 0;
+ entry->lo.fields_remap.valid = valid;
entry->lo.fields_remap.dm = apic->irq_dest_mode;
entry->lo.fields_remap.int_type = apic->irq_delivery_mode;
entry->hi.fields.vector = cfg->vector;
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index c259108ab6dd..0d175aed1d92 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -737,6 +737,13 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
might_sleep();
+ /*
+ * When memory encryption is active the device is likely not in a
+ * direct-mapped domain. Forbid using IOMMUv2 functionality for now.
+ */
+ if (mem_encrypt_active())
+ return -ENODEV;
+
if (!amd_iommu_v2_supported())
return -ENODEV;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index f8177c59d229..87b17bac04c2 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -123,29 +123,29 @@ static inline unsigned int level_to_offset_bits(int level)
return (level - 1) * LEVEL_STRIDE;
}
-static inline int pfn_level_offset(unsigned long pfn, int level)
+static inline int pfn_level_offset(u64 pfn, int level)
{
return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
}
-static inline unsigned long level_mask(int level)
+static inline u64 level_mask(int level)
{
- return -1UL << level_to_offset_bits(level);
+ return -1ULL << level_to_offset_bits(level);
}
-static inline unsigned long level_size(int level)
+static inline u64 level_size(int level)
{
- return 1UL << level_to_offset_bits(level);
+ return 1ULL << level_to_offset_bits(level);
}
-static inline unsigned long align_to_level(unsigned long pfn, int level)
+static inline u64 align_to_level(u64 pfn, int level)
{
return (pfn + level_size(level) - 1) & level_mask(level);
}
static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
{
- return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
+ return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
}
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
@@ -364,7 +364,6 @@ static int iommu_skip_te_disable;
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
-#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
#define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
struct device_domain_info *get_domain_info(struct device *dev)
{
@@ -374,8 +373,7 @@ struct device_domain_info *get_domain_info(struct device *dev)
return NULL;
info = dev_iommu_priv_get(dev);
- if (unlikely(info == DUMMY_DEVICE_DOMAIN_INFO ||
- info == DEFER_DEVICE_DOMAIN_INFO))
+ if (unlikely(info == DEFER_DEVICE_DOMAIN_INFO))
return NULL;
return info;
@@ -742,11 +740,6 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
return &context[devfn];
}
-static int iommu_dummy(struct device *dev)
-{
- return dev_iommu_priv_get(dev) == DUMMY_DEVICE_DOMAIN_INFO;
-}
-
static bool attach_deferred(struct device *dev)
{
return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO;
@@ -779,6 +772,53 @@ is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
return false;
}
+static bool quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
+{
+ struct dmar_drhd_unit *drhd;
+ u32 vtbar;
+ int rc;
+
+ /* We know that this device on this chipset has its own IOMMU.
+ * If we find it under a different IOMMU, then the BIOS is lying
+ * to us. Hope that the IOMMU for this device is actually
+ * disabled, and it needs no translation...
+ */
+ rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
+ if (rc) {
+ /* "can't" happen */
+ dev_info(&pdev->dev, "failed to run vt-d quirk\n");
+ return false;
+ }
+ vtbar &= 0xffff0000;
+
+ /* we know that the this iommu should be at offset 0xa000 from vtbar */
+ drhd = dmar_find_matched_drhd_unit(pdev);
+ if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
+ pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+ return true;
+ }
+
+ return false;
+}
+
+static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev)
+{
+ if (!iommu || iommu->drhd->ignored)
+ return true;
+
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SNB &&
+ quirk_ioat_snb_local_iommu(pdev))
+ return true;
+ }
+
+ return false;
+}
+
struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
{
struct dmar_drhd_unit *drhd = NULL;
@@ -788,7 +828,7 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
u16 segment = 0;
int i;
- if (!dev || iommu_dummy(dev))
+ if (!dev)
return NULL;
if (dev_is_pci(dev)) {
@@ -805,7 +845,7 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
dev = &ACPI_COMPANION(dev)->dev;
rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
+ for_each_iommu(iommu, drhd) {
if (pdev && segment != drhd->segment)
continue;
@@ -841,6 +881,9 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
}
iommu = NULL;
out:
+ if (iommu_is_dummy(iommu, dev))
+ iommu = NULL;
+
rcu_read_unlock();
return iommu;
@@ -2447,7 +2490,7 @@ struct dmar_domain *find_domain(struct device *dev)
{
struct device_domain_info *info;
- if (unlikely(attach_deferred(dev) || iommu_dummy(dev)))
+ if (unlikely(attach_deferred(dev)))
return NULL;
/* No lock here, assumes no domain exit in normal case */
@@ -3989,35 +4032,6 @@ static void __init iommu_exit_mempool(void)
iova_cache_put();
}
-static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
-{
- struct dmar_drhd_unit *drhd;
- u32 vtbar;
- int rc;
-
- /* We know that this device on this chipset has its own IOMMU.
- * If we find it under a different IOMMU, then the BIOS is lying
- * to us. Hope that the IOMMU for this device is actually
- * disabled, and it needs no translation...
- */
- rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
- if (rc) {
- /* "can't" happen */
- dev_info(&pdev->dev, "failed to run vt-d quirk\n");
- return;
- }
- vtbar &= 0xffff0000;
-
- /* we know that the this iommu should be at offset 0xa000 from vtbar */
- drhd = dmar_find_matched_drhd_unit(pdev);
- if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
- pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
- add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
- dev_iommu_priv_set(&pdev->dev, DUMMY_DEVICE_DOMAIN_INFO);
- }
-}
-DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
-
static void __init init_no_remapping_devices(void)
{
struct dmar_drhd_unit *drhd;
@@ -4049,12 +4063,8 @@ static void __init init_no_remapping_devices(void)
/* This IOMMU has *only* gfx devices. Either bypass it or
set the gfx_mapped flag, as appropriate */
drhd->gfx_dedicated = 1;
- if (!dmar_map_gfx) {
+ if (!dmar_map_gfx)
drhd->ignored = 1;
- for_each_active_dev_scope(drhd->devices,
- drhd->devices_cnt, i, dev)
- dev_iommu_priv_set(dev, DUMMY_DEVICE_DOMAIN_INFO);
- }
}
}
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 23583b0e66a5..8f4ce72570ce 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -508,12 +508,18 @@ static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
/* Enable interrupt-remapping */
iommu->gcmd |= DMA_GCMD_IRE;
- iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
-
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRES), sts);
+ /* Block compatibility-format MSIs */
+ if (sts & DMA_GSTS_CFIS) {
+ iommu->gcmd &= ~DMA_GCMD_CFI;
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+ readl, !(sts & DMA_GSTS_CFIS), sts);
+ }
+
/*
* With CFI clear in the Global Command register, we should be
* protected from dangerous (i.e. compatibility) interrupts
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 5edc3079e7c1..229f461e7def 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -860,10 +860,14 @@ EXPORT_SYMBOL_GPL(dm_table_set_type);
int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- int blocksize = *(int *) data;
+ int blocksize = *(int *) data, id;
+ bool rc;
- return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize,
- start, len);
+ id = dax_read_lock();
+ rc = dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
+ dax_read_unlock(id);
+
+ return rc;
}
/* Check devices support synchronous DAX */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index fb0255d25e4b..4a40df8af7d3 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1136,15 +1136,16 @@ static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bd
{
struct mapped_device *md = dax_get_private(dax_dev);
struct dm_table *map;
+ bool ret = false;
int srcu_idx;
- bool ret;
map = dm_get_live_table(md, &srcu_idx);
if (!map)
- return false;
+ goto out;
ret = dm_table_supports_dax(map, device_supports_dax, &blocksize);
+out:
dm_put_live_table(md, srcu_idx);
return ret;
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 2591c21b2b5d..26a23abc053d 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -692,10 +692,6 @@ static int at24_probe(struct i2c_client *client)
nvmem_config.word_size = 1;
nvmem_config.size = byte_len;
- at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
- if (IS_ERR(at24->nvmem))
- return PTR_ERR(at24->nvmem);
-
i2c_set_clientdata(client, at24);
err = regulator_enable(at24->vcc_reg);
@@ -708,6 +704,13 @@ static int at24_probe(struct i2c_client *client)
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
+ at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
+ if (IS_ERR(at24->nvmem)) {
+ pm_runtime_disable(dev);
+ regulator_disable(at24->vcc_reg);
+ return PTR_ERR(at24->nvmem);
+ }
+
/*
* Perform a one-byte test read to verify that the
* chip is functional.
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index 37701e4f9d5a..aa77771635d3 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -982,7 +982,7 @@ static ssize_t hl_clk_gate_read(struct file *f, char __user *buf,
return 0;
sprintf(tmp_buf, "0x%llx\n", hdev->clock_gating_mask);
- rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
+ rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
strlen(tmp_buf) + 1);
return rc;
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
index 13ef6b2887fd..3510c42d24e3 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
@@ -378,15 +378,15 @@ enum axi_id {
((((y) & RAZWI_INITIATOR_Y_MASK) << RAZWI_INITIATOR_Y_SHIFT) | \
(((x) & RAZWI_INITIATOR_X_MASK) << RAZWI_INITIATOR_X_SHIFT))
-#define RAZWI_INITIATOR_ID_X_Y_TPC0_NIC0 RAZWI_INITIATOR_ID_X_Y(1, 0)
-#define RAZWI_INITIATOR_ID_X_Y_TPC1 RAZWI_INITIATOR_ID_X_Y(2, 0)
-#define RAZWI_INITIATOR_ID_X_Y_MME0_0 RAZWI_INITIATOR_ID_X_Y(3, 0)
-#define RAZWI_INITIATOR_ID_X_Y_MME0_1 RAZWI_INITIATOR_ID_X_Y(4, 0)
-#define RAZWI_INITIATOR_ID_X_Y_MME1_0 RAZWI_INITIATOR_ID_X_Y(5, 0)
-#define RAZWI_INITIATOR_ID_X_Y_MME1_1 RAZWI_INITIATOR_ID_X_Y(6, 0)
-#define RAZWI_INITIATOR_ID_X_Y_TPC2 RAZWI_INITIATOR_ID_X_Y(7, 0)
+#define RAZWI_INITIATOR_ID_X_Y_TPC0_NIC0 RAZWI_INITIATOR_ID_X_Y(1, 1)
+#define RAZWI_INITIATOR_ID_X_Y_TPC1 RAZWI_INITIATOR_ID_X_Y(2, 1)
+#define RAZWI_INITIATOR_ID_X_Y_MME0_0 RAZWI_INITIATOR_ID_X_Y(3, 1)
+#define RAZWI_INITIATOR_ID_X_Y_MME0_1 RAZWI_INITIATOR_ID_X_Y(4, 1)
+#define RAZWI_INITIATOR_ID_X_Y_MME1_0 RAZWI_INITIATOR_ID_X_Y(5, 1)
+#define RAZWI_INITIATOR_ID_X_Y_MME1_1 RAZWI_INITIATOR_ID_X_Y(6, 1)
+#define RAZWI_INITIATOR_ID_X_Y_TPC2 RAZWI_INITIATOR_ID_X_Y(7, 1)
#define RAZWI_INITIATOR_ID_X_Y_TPC3_PCI_CPU_PSOC \
- RAZWI_INITIATOR_ID_X_Y(8, 0)
+ RAZWI_INITIATOR_ID_X_Y(8, 1)
#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0 RAZWI_INITIATOR_ID_X_Y(0, 1)
#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0 RAZWI_INITIATOR_ID_X_Y(9, 1)
#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1 RAZWI_INITIATOR_ID_X_Y(0, 2)
@@ -395,14 +395,14 @@ enum axi_id {
#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0 RAZWI_INITIATOR_ID_X_Y(9, 3)
#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1 RAZWI_INITIATOR_ID_X_Y(0, 4)
#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1 RAZWI_INITIATOR_ID_X_Y(9, 4)
-#define RAZWI_INITIATOR_ID_X_Y_TPC4_NIC1_NIC2 RAZWI_INITIATOR_ID_X_Y(1, 5)
-#define RAZWI_INITIATOR_ID_X_Y_TPC5 RAZWI_INITIATOR_ID_X_Y(2, 5)
-#define RAZWI_INITIATOR_ID_X_Y_MME2_0 RAZWI_INITIATOR_ID_X_Y(3, 5)
-#define RAZWI_INITIATOR_ID_X_Y_MME2_1 RAZWI_INITIATOR_ID_X_Y(4, 5)
-#define RAZWI_INITIATOR_ID_X_Y_MME3_0 RAZWI_INITIATOR_ID_X_Y(5, 5)
-#define RAZWI_INITIATOR_ID_X_Y_MME3_1 RAZWI_INITIATOR_ID_X_Y(6, 5)
-#define RAZWI_INITIATOR_ID_X_Y_TPC6 RAZWI_INITIATOR_ID_X_Y(7, 5)
-#define RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5 RAZWI_INITIATOR_ID_X_Y(8, 5)
+#define RAZWI_INITIATOR_ID_X_Y_TPC4_NIC1_NIC2 RAZWI_INITIATOR_ID_X_Y(1, 6)
+#define RAZWI_INITIATOR_ID_X_Y_TPC5 RAZWI_INITIATOR_ID_X_Y(2, 6)
+#define RAZWI_INITIATOR_ID_X_Y_MME2_0 RAZWI_INITIATOR_ID_X_Y(3, 6)
+#define RAZWI_INITIATOR_ID_X_Y_MME2_1 RAZWI_INITIATOR_ID_X_Y(4, 6)
+#define RAZWI_INITIATOR_ID_X_Y_MME3_0 RAZWI_INITIATOR_ID_X_Y(5, 6)
+#define RAZWI_INITIATOR_ID_X_Y_MME3_1 RAZWI_INITIATOR_ID_X_Y(6, 6)
+#define RAZWI_INITIATOR_ID_X_Y_TPC6 RAZWI_INITIATOR_ID_X_Y(7, 6)
+#define RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5 RAZWI_INITIATOR_ID_X_Y(8, 6)
#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1
diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c
index 93d346c01110..4c229dd2b6e5 100644
--- a/drivers/mmc/core/sdio_ops.c
+++ b/drivers/mmc/core/sdio_ops.c
@@ -121,6 +121,7 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
struct sg_table sgtable;
unsigned int nents, left_size, i;
unsigned int seg_size = card->host->max_seg_size;
+ int err;
WARN_ON(blksz == 0);
@@ -170,28 +171,32 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
mmc_set_data_timeout(&data, card);
- mmc_wait_for_req(card->host, &mrq);
+ mmc_pre_req(card->host, &mrq);
- if (nents > 1)
- sg_free_table(&sgtable);
+ mmc_wait_for_req(card->host, &mrq);
if (cmd.error)
- return cmd.error;
- if (data.error)
- return data.error;
-
- if (mmc_host_is_spi(card->host)) {
+ err = cmd.error;
+ else if (data.error)
+ err = data.error;
+ else if (mmc_host_is_spi(card->host))
/* host driver already reported errors */
- } else {
- if (cmd.resp[0] & R5_ERROR)
- return -EIO;
- if (cmd.resp[0] & R5_FUNCTION_NUMBER)
- return -EINVAL;
- if (cmd.resp[0] & R5_OUT_OF_RANGE)
- return -ERANGE;
- }
+ err = 0;
+ else if (cmd.resp[0] & R5_ERROR)
+ err = -EIO;
+ else if (cmd.resp[0] & R5_FUNCTION_NUMBER)
+ err = -EINVAL;
+ else if (cmd.resp[0] & R5_OUT_OF_RANGE)
+ err = -ERANGE;
+ else
+ err = 0;
- return 0;
+ mmc_post_req(card->host, &mrq, err);
+
+ if (nents > 1)
+ sg_free_table(&sgtable);
+
+ return err;
}
int sdio_reset(struct mmc_host *host)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 9c89a5b780e8..9a34c827c96e 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -602,7 +602,7 @@ config MMC_GOLDFISH
config MMC_SPI
tristate "MMC/SD/SDIO over SPI"
- depends on SPI_MASTER && HAS_DMA
+ depends on SPI_MASTER
select CRC7
select CRC_ITU_T
help
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 39bb1e30c2d7..5055a7eb134a 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1278,6 +1278,52 @@ mmc_spi_detect_irq(int irq, void *mmc)
return IRQ_HANDLED;
}
+#ifdef CONFIG_HAS_DMA
+static int mmc_spi_dma_alloc(struct mmc_spi_host *host)
+{
+ struct spi_device *spi = host->spi;
+ struct device *dev;
+
+ if (!spi->master->dev.parent->dma_mask)
+ return 0;
+
+ dev = spi->master->dev.parent;
+
+ host->ones_dma = dma_map_single(dev, host->ones, MMC_SPI_BLOCKSIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, host->ones_dma))
+ return -ENOMEM;
+
+ host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, host->data_dma)) {
+ dma_unmap_single(dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ dma_sync_single_for_cpu(dev, host->data_dma, sizeof(*host->data),
+ DMA_BIDIRECTIONAL);
+
+ host->dma_dev = dev;
+ return 0;
+}
+
+static void mmc_spi_dma_free(struct mmc_spi_host *host)
+{
+ if (!host->dma_dev)
+ return;
+
+ dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
+ DMA_TO_DEVICE);
+ dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data),
+ DMA_BIDIRECTIONAL);
+}
+#else
+static inline mmc_spi_dma_alloc(struct mmc_spi_host *host) { return 0; }
+static inline void mmc_spi_dma_free(struct mmc_spi_host *host) {}
+#endif
+
static int mmc_spi_probe(struct spi_device *spi)
{
void *ones;
@@ -1374,23 +1420,9 @@ static int mmc_spi_probe(struct spi_device *spi)
if (!host->data)
goto fail_nobuf1;
- if (spi->master->dev.parent->dma_mask) {
- struct device *dev = spi->master->dev.parent;
-
- host->dma_dev = dev;
- host->ones_dma = dma_map_single(dev, ones,
- MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, host->ones_dma))
- goto fail_ones_dma;
- host->data_dma = dma_map_single(dev, host->data,
- sizeof(*host->data), DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, host->data_dma))
- goto fail_data_dma;
-
- dma_sync_single_for_cpu(host->dma_dev,
- host->data_dma, sizeof(*host->data),
- DMA_BIDIRECTIONAL);
- }
+ status = mmc_spi_dma_alloc(host);
+ if (status)
+ goto fail_dma;
/* setup message for status/busy readback */
spi_message_init(&host->readback);
@@ -1458,20 +1490,12 @@ static int mmc_spi_probe(struct spi_device *spi)
fail_add_host:
mmc_remove_host(mmc);
fail_glue_init:
- if (host->dma_dev)
- dma_unmap_single(host->dma_dev, host->data_dma,
- sizeof(*host->data), DMA_BIDIRECTIONAL);
-fail_data_dma:
- if (host->dma_dev)
- dma_unmap_single(host->dma_dev, host->ones_dma,
- MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
-fail_ones_dma:
+ mmc_spi_dma_free(host);
+fail_dma:
kfree(host->data);
-
fail_nobuf1:
mmc_free_host(mmc);
mmc_spi_put_pdata(spi);
-
nomem:
kfree(ones);
return status;
@@ -1489,13 +1513,7 @@ static int mmc_spi_remove(struct spi_device *spi)
mmc_remove_host(mmc);
- if (host->dma_dev) {
- dma_unmap_single(host->dma_dev, host->ones_dma,
- MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
- dma_unmap_single(host->dma_dev, host->data_dma,
- sizeof(*host->data), DMA_BIDIRECTIONAL);
- }
-
+ mmc_spi_dma_free(host);
kfree(host->data);
kfree(host->ones);
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 962f074ca174..284cba11e279 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -551,12 +551,18 @@ static int amd_select_drive_strength(struct mmc_card *card,
return MMC_SET_DRIVER_TYPE_A;
}
-static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host)
+static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host, bool enable)
{
+ struct sdhci_acpi_host *acpi_host = sdhci_priv(host);
+ struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host);
+
/* AMD Platform requires dll setting */
sdhci_writel(host, 0x40003210, SDHCI_AMD_RESET_DLL_REGISTER);
usleep_range(10, 20);
- sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER);
+ if (enable)
+ sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER);
+
+ amd_host->dll_enabled = enable;
}
/*
@@ -596,10 +602,8 @@ static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
/* DLL is only required for HS400 */
if (host->timing == MMC_TIMING_MMC_HS400 &&
- !amd_host->dll_enabled) {
- sdhci_acpi_amd_hs400_dll(host);
- amd_host->dll_enabled = true;
- }
+ !amd_host->dll_enabled)
+ sdhci_acpi_amd_hs400_dll(host, true);
}
}
@@ -620,10 +624,23 @@ static int amd_sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
return err;
}
+static void amd_sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_acpi_host *acpi_host = sdhci_priv(host);
+ struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host);
+
+ if (mask & SDHCI_RESET_ALL) {
+ amd_host->tuned_clock = false;
+ sdhci_acpi_amd_hs400_dll(host, false);
+ }
+
+ sdhci_reset(host, mask);
+}
+
static const struct sdhci_ops sdhci_acpi_ops_amd = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_reset,
+ .reset = amd_sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 5a33389037cd..729868abd2db 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1166,7 +1166,7 @@ static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
- int tuning_seq_cnt = 3;
+ int tuning_seq_cnt = 10;
u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
int rc;
struct mmc_ios ios = host->mmc->ios;
@@ -1222,6 +1222,22 @@ retry:
} while (++phase < ARRAY_SIZE(tuned_phases));
if (tuned_phase_cnt) {
+ if (tuned_phase_cnt == ARRAY_SIZE(tuned_phases)) {
+ /*
+ * All phases valid is _almost_ as bad as no phases
+ * valid. Probably all phases are not really reliable
+ * but we didn't detect where the unreliable place is.
+ * That means we'll essentially be guessing and hoping
+ * we get a good phase. Better to try a few times.
+ */
+ dev_dbg(mmc_dev(mmc), "%s: All phases valid; try again\n",
+ mmc_hostname(mmc));
+ if (--tuning_seq_cnt) {
+ tuned_phase_cnt = 0;
+ goto retry;
+ }
+ }
+
rc = msm_find_most_appropriate_phase(host, tuned_phases,
tuned_phase_cnt);
if (rc < 0)
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 7c73d243dc6c..45881b309956 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -81,6 +81,7 @@ struct sdhci_esdhc {
bool quirk_tuning_erratum_type2;
bool quirk_ignore_data_inhibit;
bool quirk_delay_before_data_reset;
+ bool quirk_trans_complete_erratum;
bool in_sw_tuning;
unsigned int peripheral_clock;
const struct esdhc_clk_fixup *clk_fixup;
@@ -1177,10 +1178,11 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host,
static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
u32 command;
- if (of_find_compatible_node(NULL, NULL,
- "fsl,p2020-esdhc")) {
+ if (esdhc->quirk_trans_complete_erratum) {
command = SDHCI_GET_CMD(sdhci_readw(host,
SDHCI_COMMAND));
if (command == MMC_WRITE_MULTIPLE_BLOCK &&
@@ -1334,8 +1336,10 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
esdhc->clk_fixup = match->data;
np = pdev->dev.of_node;
- if (of_device_is_compatible(np, "fsl,p2020-esdhc"))
+ if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
esdhc->quirk_delay_before_data_reset = true;
+ esdhc->quirk_trans_complete_erratum = true;
+ }
clk = of_clk_get(np, 0);
if (!IS_ERR(clk)) {
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 65eff4ce6ab1..0369d98b2d12 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -1907,16 +1907,15 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
}
/**
- * spi_nor_sr1_bit6_quad_enable() - Set/Unset the Quad Enable BIT(6) in the
- * Status Register 1.
+ * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
+ * Register 1.
* @nor: pointer to a 'struct spi_nor'
- * @enable: true to enable Quad mode, false to disable Quad mode.
*
* Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
*
* Return: 0 on success, -errno otherwise.
*/
-int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor, bool enable)
+int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
{
int ret;
@@ -1924,56 +1923,45 @@ int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor, bool enable)
if (ret)
return ret;
- if ((enable && (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)) ||
- (!enable && !(nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)))
+ if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
return 0;
- if (enable)
- nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
- else
- nor->bouncebuf[0] &= ~SR1_QUAD_EN_BIT6;
+ nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
}
/**
- * spi_nor_sr2_bit1_quad_enable() - set/unset the Quad Enable BIT(1) in the
- * Status Register 2.
+ * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
+ * Register 2.
* @nor: pointer to a 'struct spi_nor'.
- * @enable: true to enable Quad mode, false to disable Quad mode.
*
* Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
*
* Return: 0 on success, -errno otherwise.
*/
-int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor, bool enable)
+int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
{
int ret;
if (nor->flags & SNOR_F_NO_READ_CR)
- return spi_nor_write_16bit_cr_and_check(nor,
- enable ? SR2_QUAD_EN_BIT1 : 0);
+ return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
ret = spi_nor_read_cr(nor, nor->bouncebuf);
if (ret)
return ret;
- if ((enable && (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)) ||
- (!enable && !(nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)))
+ if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
return 0;
- if (enable)
- nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
- else
- nor->bouncebuf[0] &= ~SR2_QUAD_EN_BIT1;
+ nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
}
/**
- * spi_nor_sr2_bit7_quad_enable() - set/unset QE bit in Status Register 2.
+ * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
* @nor: pointer to a 'struct spi_nor'
- * @enable: true to enable Quad mode, false to disable Quad mode.
*
* Set the Quad Enable (QE) bit in the Status Register 2.
*
@@ -1983,7 +1971,7 @@ int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor, bool enable)
*
* Return: 0 on success, -errno otherwise.
*/
-int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor, bool enable)
+int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
{
u8 *sr2 = nor->bouncebuf;
int ret;
@@ -1993,15 +1981,11 @@ int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor, bool enable)
ret = spi_nor_read_sr2(nor, sr2);
if (ret)
return ret;
- if ((enable && (*sr2 & SR2_QUAD_EN_BIT7)) ||
- (!enable && !(*sr2 & SR2_QUAD_EN_BIT7)))
+ if (*sr2 & SR2_QUAD_EN_BIT7)
return 0;
/* Update the Quad Enable bit. */
- if (enable)
- *sr2 |= SR2_QUAD_EN_BIT7;
- else
- *sr2 &= ~SR2_QUAD_EN_BIT7;
+ *sr2 |= SR2_QUAD_EN_BIT7;
ret = spi_nor_write_sr2(nor, sr2);
if (ret)
@@ -2914,13 +2898,12 @@ static int spi_nor_init_params(struct spi_nor *nor)
}
/**
- * spi_nor_quad_enable() - enable/disable Quad I/O if needed.
+ * spi_nor_quad_enable() - enable Quad I/O if needed.
* @nor: pointer to a 'struct spi_nor'
- * @enable: true to enable Quad mode. false to disable Quad mode.
*
* Return: 0 on success, -errno otherwise.
*/
-static int spi_nor_quad_enable(struct spi_nor *nor, bool enable)
+static int spi_nor_quad_enable(struct spi_nor *nor)
{
if (!nor->params->quad_enable)
return 0;
@@ -2929,7 +2912,7 @@ static int spi_nor_quad_enable(struct spi_nor *nor, bool enable)
spi_nor_get_protocol_width(nor->write_proto) == 4))
return 0;
- return nor->params->quad_enable(nor, enable);
+ return nor->params->quad_enable(nor);
}
/**
@@ -2953,7 +2936,7 @@ static int spi_nor_init(struct spi_nor *nor)
{
int err;
- err = spi_nor_quad_enable(nor, true);
+ err = spi_nor_quad_enable(nor);
if (err) {
dev_dbg(nor->dev, "quad mode not supported\n");
return err;
@@ -3000,8 +2983,6 @@ void spi_nor_restore(struct spi_nor *nor)
if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
nor->flags & SNOR_F_BROKEN_RESET)
nor->params->set_4byte_addr_mode(nor, false);
-
- spi_nor_quad_enable(nor, false);
}
EXPORT_SYMBOL_GPL(spi_nor_restore);
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index 95aa32f3ceb1..6f2f6b27173f 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -198,7 +198,7 @@ struct spi_nor_locking_ops {
* higher index in the array, the higher priority.
* @erase_map: the erase map parsed from the SFDP Sector Map Parameter
* Table.
- * @quad_enable: enables/disables SPI NOR Quad mode.
+ * @quad_enable: enables SPI NOR quad mode.
* @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode.
* @convert_addr: converts an absolute address into something the flash
* will understand. Particularly useful when pagesize is
@@ -219,7 +219,7 @@ struct spi_nor_flash_parameter {
struct spi_nor_erase_map erase_map;
- int (*quad_enable)(struct spi_nor *nor, bool enable);
+ int (*quad_enable)(struct spi_nor *nor);
int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable);
u32 (*convert_addr)(struct spi_nor *nor, u32 addr);
int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps);
@@ -406,9 +406,9 @@ int spi_nor_write_ear(struct spi_nor *nor, u8 ear);
int spi_nor_wait_till_ready(struct spi_nor *nor);
int spi_nor_lock_and_prep(struct spi_nor *nor);
void spi_nor_unlock_and_unprep(struct spi_nor *nor);
-int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor, bool enable);
-int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor, bool enable);
-int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor, bool enable);
+int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor);
+int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor);
+int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor);
int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr);
ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 1c6c27f35ac4..ba8e70a8e312 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -70,6 +70,8 @@ static const char *version =
#include <linux/bitops.h>
#include <linux/jiffies.h>
+#include <net/Space.h>
+
#include <asm/io.h>
#include <asm/dma.h>
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 75a5a9b87c5a..c6f73aa3700c 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -229,6 +229,8 @@ static int dma;
#include <linux/bitops.h>
#include <linux/gfp.h>
+#include <net/Space.h>
+
#include <asm/dma.h>
#include <asm/io.h>
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index f07012a76c0c..424970939fd4 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -41,8 +41,8 @@ config CAN_SLCAN
www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
Userspace tools to attach the SLCAN line discipline (slcan_attach,
- slcand) can be found in the can-utils at the SocketCAN SVN, see
- http://developer.berlios.de/projects/socketcan for details.
+ slcand) can be found in the can-utils at the linux-can project, see
+ https://github.com/linux-can/can-utils for details.
The slcan driver supports up to 10 CAN netdevices by default which
can be changed by the 'maxdev=xx' module option. This driver can
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 38e9f80ed1ef..c14de95d2ca7 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -643,7 +643,7 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
*
* The first message goes into mb nr. 1 and issues an interrupt. All
* rx ints are disabled in the interrupt handler and a napi poll is
- * scheduled. We read the mailbox, but do _not_ reenable the mb (to
+ * scheduled. We read the mailbox, but do _not_ re-enable the mb (to
* receive another message).
*
* lower mbxs upper
@@ -661,13 +661,13 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
*
* The variable priv->rx_next points to the next mailbox to read a
* message from. As long we're in the lower mailboxes we just read the
- * mailbox but not reenable it.
+ * mailbox but not re-enable it.
*
- * With completion of the last of the lower mailboxes, we reenable the
+ * With completion of the last of the lower mailboxes, we re-enable the
* whole first group, but continue to look for filled mailboxes in the
* upper mailboxes. Imagine the second group like overflow mailboxes,
* which takes CAN messages if the lower goup is full. While in the
- * upper group we reenable the mailbox right after reading it. Giving
+ * upper group we re-enable the mailbox right after reading it. Giving
* the chip more room to store messages.
*
* After finishing we look again in the lower group if we've still
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 8e9f5620c9a2..1ccdbe89585b 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -356,15 +356,6 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
}
}
-static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
- int iface)
-{
- int i;
-
- for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++)
- c_can_object_get(dev, iface, i, IF_COMM_CLR_NEWDAT);
-}
-
static int c_can_handle_lost_msg_obj(struct net_device *dev,
int iface, int objno, u32 ctrl)
{
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 7cdc232cbfea..07e2b8df5153 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -538,7 +538,7 @@ static int cc770_err(struct net_device *dev, u8 status)
priv->can.can_stats.error_warning++;
}
} else {
- /* Back to error avtive */
+ /* Back to error active */
cf->can_id |= CAN_ERR_PROT;
cf->data[2] = CAN_ERR_PROT_ACTIVE;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h
index 948541491ab5..0628fd9e1980 100644
--- a/drivers/net/can/cc770/cc770.h
+++ b/drivers/net/can/cc770/cc770.h
@@ -184,7 +184,7 @@ struct cc770_priv {
u8 control_normal_mode; /* Control register for normal mode */
u8 cpu_interface; /* CPU interface register */
u8 clkout; /* Clock out register */
- u8 bus_config; /* Bus conffiguration register */
+ u8 bus_config; /* Bus configuration register */
struct sk_buff *tx_skb;
};
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 68834a2853c9..3c40bba71d5b 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -371,6 +371,28 @@ static int can_rx_state_to_frame(struct net_device *dev, enum can_state state)
}
}
+static const char *can_get_state_str(const enum can_state state)
+{
+ switch (state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ return "Error Active";
+ case CAN_STATE_ERROR_WARNING:
+ return "Error Warning";
+ case CAN_STATE_ERROR_PASSIVE:
+ return "Error Passive";
+ case CAN_STATE_BUS_OFF:
+ return "Bus Off";
+ case CAN_STATE_STOPPED:
+ return "Stopped";
+ case CAN_STATE_SLEEPING:
+ return "Sleeping";
+ default:
+ return "<unknown>";
+ }
+
+ return "<unknown>";
+}
+
void can_change_state(struct net_device *dev, struct can_frame *cf,
enum can_state tx_state, enum can_state rx_state)
{
@@ -382,7 +404,9 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
return;
}
- netdev_dbg(dev, "New error state: %d\n", new_state);
+ netdev_dbg(dev, "Controller changed from %s State (%d) into %s State (%d).\n",
+ can_get_state_str(priv->state), priv->state,
+ can_get_state_str(new_state), new_state);
can_update_state_error_stats(dev, new_state);
priv->state = new_state;
@@ -434,8 +458,8 @@ static void can_flush_echo_skb(struct net_device *dev)
* of the device driver. The driver must protect access to
* priv->echo_skb, if necessary.
*/
-void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
- unsigned int idx)
+int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ unsigned int idx)
{
struct can_priv *priv = netdev_priv(dev);
@@ -446,13 +470,13 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
(skb->protocol != htons(ETH_P_CAN) &&
skb->protocol != htons(ETH_P_CANFD))) {
kfree_skb(skb);
- return;
+ return 0;
}
if (!priv->echo_skb[idx]) {
skb = can_create_echo_skb(skb);
if (!skb)
- return;
+ return -ENOMEM;
/* make settings for echo to reduce code in irq context */
skb->pkt_type = PACKET_BROADCAST;
@@ -463,9 +487,12 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
priv->echo_skb[idx] = skb;
} else {
/* locking problem with netif_stop_queue() ?? */
- netdev_err(dev, "%s: BUG! echo_skb is occupied!\n", __func__);
+ netdev_err(dev, "%s: BUG! echo_skb %d is occupied!\n", __func__, idx);
kfree_skb(skb);
+ return -EBUSY;
}
+
+ return 0;
}
EXPORT_SYMBOL_GPL(can_put_echo_skb);
@@ -612,7 +639,11 @@ void can_bus_off(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
- netdev_info(dev, "bus-off\n");
+ if (priv->restart_ms)
+ netdev_info(dev, "bus-off, scheduling restart in %d ms\n",
+ priv->restart_ms);
+ else
+ netdev_info(dev, "bus-off\n");
netif_carrier_off(dev);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 94d10ec954a0..e86925134009 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -9,7 +9,7 @@
//
// Based on code originally by Andrey Volkov <avolkov@varma-el.com>
-#include <linux/netdevice.h>
+#include <linux/bitfield.h>
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
@@ -21,12 +21,14 @@
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/regulator/consumer.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#define DRV_NAME "flexcan"
@@ -52,6 +54,7 @@
#define FLEXCAN_MCR_IRMQ BIT(16)
#define FLEXCAN_MCR_LPRIO_EN BIT(13)
#define FLEXCAN_MCR_AEN BIT(12)
+#define FLEXCAN_MCR_FDEN BIT(11)
/* MCR_MAXMB: maximum used MBs is MAXMB + 1 */
#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x7f)
#define FLEXCAN_MCR_IDAM_A (0x0 << 8)
@@ -91,6 +94,7 @@
#define FLEXCAN_CTRL2_MRP BIT(18)
#define FLEXCAN_CTRL2_RRS BIT(17)
#define FLEXCAN_CTRL2_EACEN BIT(16)
+#define FLEXCAN_CTRL2_ISOCANFDEN BIT(12)
/* FLEXCAN memory error control register (MECR) bits */
#define FLEXCAN_MECR_ECRWRDIS BIT(31)
@@ -134,8 +138,35 @@
(FLEXCAN_ESR_ERR_BUS | FLEXCAN_ESR_ERR_STATE)
#define FLEXCAN_ESR_ALL_INT \
(FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | \
- FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT | \
- FLEXCAN_ESR_WAK_INT)
+ FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT)
+
+/* FLEXCAN Bit Timing register (CBT) bits */
+#define FLEXCAN_CBT_BTF BIT(31)
+#define FLEXCAN_CBT_EPRESDIV_MASK GENMASK(30, 21)
+#define FLEXCAN_CBT_ERJW_MASK GENMASK(20, 16)
+#define FLEXCAN_CBT_EPROPSEG_MASK GENMASK(15, 10)
+#define FLEXCAN_CBT_EPSEG1_MASK GENMASK(9, 5)
+#define FLEXCAN_CBT_EPSEG2_MASK GENMASK(4, 0)
+
+/* FLEXCAN FD control register (FDCTRL) bits */
+#define FLEXCAN_FDCTRL_FDRATE BIT(31)
+#define FLEXCAN_FDCTRL_MBDSR1 GENMASK(20, 19)
+#define FLEXCAN_FDCTRL_MBDSR0 GENMASK(17, 16)
+#define FLEXCAN_FDCTRL_MBDSR_8 0x0
+#define FLEXCAN_FDCTRL_MBDSR_12 0x1
+#define FLEXCAN_FDCTRL_MBDSR_32 0x2
+#define FLEXCAN_FDCTRL_MBDSR_64 0x3
+#define FLEXCAN_FDCTRL_TDCEN BIT(15)
+#define FLEXCAN_FDCTRL_TDCFAIL BIT(14)
+#define FLEXCAN_FDCTRL_TDCOFF GENMASK(12, 8)
+#define FLEXCAN_FDCTRL_TDCVAL GENMASK(5, 0)
+
+/* FLEXCAN FD Bit Timing register (FDCBT) bits */
+#define FLEXCAN_FDCBT_FPRESDIV_MASK GENMASK(29, 20)
+#define FLEXCAN_FDCBT_FRJW_MASK GENMASK(18, 16)
+#define FLEXCAN_FDCBT_FPROPSEG_MASK GENMASK(14, 10)
+#define FLEXCAN_FDCBT_FPSEG1_MASK GENMASK(7, 5)
+#define FLEXCAN_FDCBT_FPSEG2_MASK GENMASK(2, 0)
/* FLEXCAN interrupt flag register (IFLAG) bits */
/* Errata ERR005829 step7: Reserve first valid MB */
@@ -161,6 +192,9 @@
#define FLEXCAN_MB_CODE_TX_DATA (0xc << 24)
#define FLEXCAN_MB_CODE_TX_TANSWER (0xe << 24)
+#define FLEXCAN_MB_CNT_EDL BIT(31)
+#define FLEXCAN_MB_CNT_BRS BIT(30)
+#define FLEXCAN_MB_CNT_ESI BIT(29)
#define FLEXCAN_MB_CNT_SRR BIT(22)
#define FLEXCAN_MB_CNT_IDE BIT(21)
#define FLEXCAN_MB_CNT_RTR BIT(20)
@@ -172,26 +206,39 @@
/* FLEXCAN hardware feature flags
*
* Below is some version info we got:
- * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
- * Filter? connected? Passive detection ception in MB
- * MX25 FlexCAN2 03.00.00.00 no no no no no
- * MX28 FlexCAN2 03.00.04.00 yes yes no no no
- * MX35 FlexCAN2 03.00.00.00 no no no no no
- * MX53 FlexCAN2 03.00.00.00 yes no no no no
- * MX6s FlexCAN3 10.00.12.00 yes yes no no yes
- * VF610 FlexCAN3 ? no yes no yes yes?
- * LS1021A FlexCAN2 03.00.04.00 no yes no no yes
+ * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR rece- FD Mode
+ * Filter? connected? Passive detection ption in MB Supported?
+ * MX25 FlexCAN2 03.00.00.00 no no no no no no
+ * MX28 FlexCAN2 03.00.04.00 yes yes no no no no
+ * MX35 FlexCAN2 03.00.00.00 no no no no no no
+ * MX53 FlexCAN2 03.00.00.00 yes no no no no no
+ * MX6s FlexCAN3 10.00.12.00 yes yes no no yes no
+ * MX8QM FlexCAN3 03.00.23.00 yes yes no no yes yes
+ * VF610 FlexCAN3 ? no yes no yes yes? no
+ * LS1021A FlexCAN2 03.00.04.00 no yes no no yes no
+ * LX2160A FlexCAN3 03.00.23.00 no yes no no yes yes
*
* Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
*/
-#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1) /* [TR]WRN_INT not connected */
-#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */
-#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */
-#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */
-#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */
-#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */
-#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7) /* default to BE register access */
-#define FLEXCAN_QUIRK_SETUP_STOP_MODE BIT(8) /* Setup stop mode to support wakeup */
+
+/* [TR]WRN_INT not connected */
+#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1)
+ /* Disable RX FIFO Global mask */
+#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2)
+/* Enable EACEN and RRS bit in ctrl2 */
+#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3)
+/* Disable non-correctable errors interrupt and freeze mode */
+#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4)
+/* Use timestamp based offloading */
+#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5)
+/* No interrupt for error passive */
+#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6)
+/* default to BE register access */
+#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7)
+/* Setup stop mode to support wakeup */
+#define FLEXCAN_QUIRK_SETUP_STOP_MODE BIT(8)
+/* Support CAN-FD mode */
+#define FLEXCAN_QUIRK_SUPPORT_FD BIT(9)
/* Structure of the message buffer */
struct flexcan_mb {
@@ -203,12 +250,12 @@ struct flexcan_mb {
/* Structure of the hardware registers */
struct flexcan_regs {
u32 mcr; /* 0x00 */
- u32 ctrl; /* 0x04 */
+ u32 ctrl; /* 0x04 - Not affected by Soft Reset */
u32 timer; /* 0x08 */
- u32 _reserved1; /* 0x0c */
- u32 rxgmask; /* 0x10 */
- u32 rx14mask; /* 0x14 */
- u32 rx15mask; /* 0x18 */
+ u32 tcr; /* 0x0c */
+ u32 rxgmask; /* 0x10 - Not affected by Soft Reset */
+ u32 rx14mask; /* 0x14 - Not affected by Soft Reset */
+ u32 rx15mask; /* 0x18 - Not affected by Soft Reset */
u32 ecr; /* 0x1c */
u32 esr; /* 0x20 */
u32 imask2; /* 0x24 */
@@ -217,20 +264,24 @@ struct flexcan_regs {
u32 iflag1; /* 0x30 */
union { /* 0x34 */
u32 gfwr_mx28; /* MX28, MX53 */
- u32 ctrl2; /* MX6, VF610 */
+ u32 ctrl2; /* MX6, VF610 - Not affected by Soft Reset */
};
u32 esr2; /* 0x38 */
u32 imeur; /* 0x3c */
u32 lrfr; /* 0x40 */
u32 crcr; /* 0x44 */
u32 rxfgmask; /* 0x48 */
- u32 rxfir; /* 0x4c */
- u32 _reserved3[12]; /* 0x50 */
- u8 mb[2][512]; /* 0x80 */
+ u32 rxfir; /* 0x4c - Not affected by Soft Reset */
+ u32 cbt; /* 0x50 - Not affected by Soft Reset */
+ u32 _reserved2; /* 0x54 */
+ u32 dbg1; /* 0x58 */
+ u32 dbg2; /* 0x5c */
+ u32 _reserved3[8]; /* 0x60 */
+ u8 mb[2][512]; /* 0x80 - Not affected by Soft Reset */
/* FIFO-mode:
* MB
* 0x080...0x08f 0 RX message buffer
- * 0x090...0x0df 1-5 reserverd
+ * 0x090...0x0df 1-5 reserved
* 0x0e0...0x0ff 6-7 8 entry ID table
* (mx25, mx28, mx35, mx53)
* 0x0e0...0x2df 6-7..37 8..128 entry ID table
@@ -238,7 +289,7 @@ struct flexcan_regs {
* (mx6, vf610)
*/
u32 _reserved4[256]; /* 0x480 */
- u32 rximr[64]; /* 0x880 */
+ u32 rximr[64]; /* 0x880 - Not affected by Soft Reset */
u32 _reserved5[24]; /* 0x980 */
u32 gfwr_mx6; /* 0x9e0 - MX6 */
u32 _reserved6[63]; /* 0x9e4 */
@@ -250,8 +301,14 @@ struct flexcan_regs {
u32 rerrdr; /* 0xaf4 */
u32 rerrsynr; /* 0xaf8 */
u32 errsr; /* 0xafc */
+ u32 _reserved7[64]; /* 0xb00 */
+ u32 fdctrl; /* 0xc00 - Not affected by Soft Reset */
+ u32 fdcbt; /* 0xc04 - Not affected by Soft Reset */
+ u32 fdcrc; /* 0xc08 */
};
+static_assert(sizeof(struct flexcan_regs) == 0x4 + 0xc08);
+
struct flexcan_devtype_data {
u32 quirks; /* quirks needed for different IP cores */
};
@@ -313,6 +370,12 @@ static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
FLEXCAN_QUIRK_SETUP_STOP_MODE,
};
+static const struct flexcan_devtype_data fsl_imx8qm_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_SUPPORT_FD,
+};
+
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
@@ -325,6 +388,12 @@ static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
};
+static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_SUPPORT_FD,
+};
+
static const struct can_bittiming_const flexcan_bittiming_const = {
.name = DRV_NAME,
.tseg1_min = 4,
@@ -337,6 +406,30 @@ static const struct can_bittiming_const flexcan_bittiming_const = {
.brp_inc = 1,
};
+static const struct can_bittiming_const flexcan_fd_bittiming_const = {
+ .name = DRV_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 96,
+ .tseg2_min = 2,
+ .tseg2_max = 32,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const flexcan_fd_data_bittiming_const = {
+ .name = DRV_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 39,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
/* FlexCAN module is essentially modelled as a little-endian IP in most
* SoCs, i.e the registers as well as the message buffer areas are
* implemented in a little-endian fashion.
@@ -457,7 +550,6 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
1 << priv->stm.req_bit, 0);
-
reg_mcr = priv->read(&regs->mcr);
reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
priv->write(reg_mcr, &regs->mcr);
@@ -628,10 +720,10 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
const struct flexcan_priv *priv = netdev_priv(dev);
- struct can_frame *cf = (struct can_frame *)skb->data;
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
u32 can_id;
u32 data;
- u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | (cf->can_dlc << 16);
+ u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | ((can_len2dlc(cfd->len)) << 16);
int i;
if (can_dropped_invalid_skb(dev, skb))
@@ -639,18 +731,25 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
netif_stop_queue(dev);
- if (cf->can_id & CAN_EFF_FLAG) {
- can_id = cf->can_id & CAN_EFF_MASK;
+ if (cfd->can_id & CAN_EFF_FLAG) {
+ can_id = cfd->can_id & CAN_EFF_MASK;
ctrl |= FLEXCAN_MB_CNT_IDE | FLEXCAN_MB_CNT_SRR;
} else {
- can_id = (cf->can_id & CAN_SFF_MASK) << 18;
+ can_id = (cfd->can_id & CAN_SFF_MASK) << 18;
}
- if (cf->can_id & CAN_RTR_FLAG)
+ if (cfd->can_id & CAN_RTR_FLAG)
ctrl |= FLEXCAN_MB_CNT_RTR;
- for (i = 0; i < cf->can_dlc; i += sizeof(u32)) {
- data = be32_to_cpup((__be32 *)&cf->data[i]);
+ if (can_is_canfd_skb(skb)) {
+ ctrl |= FLEXCAN_MB_CNT_EDL;
+
+ if (cfd->flags & CANFD_BRS)
+ ctrl |= FLEXCAN_MB_CNT_BRS;
+ }
+
+ for (i = 0; i < cfd->len; i += sizeof(u32)) {
+ data = be32_to_cpup((__be32 *)&cfd->data[i]);
priv->write(data, &priv->tx_mb->data[i / sizeof(u32)]);
}
@@ -822,7 +921,7 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
struct flexcan_regs __iomem *regs = priv->regs;
struct flexcan_mb __iomem *mb;
struct sk_buff *skb;
- struct can_frame *cf;
+ struct canfd_frame *cfd;
u32 reg_ctrl, reg_id, reg_iflag1;
int i;
@@ -859,8 +958,11 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
reg_ctrl = priv->read(&mb->can_ctrl);
}
- skb = alloc_can_skb(offload->dev, &cf);
- if (!skb) {
+ if (reg_ctrl & FLEXCAN_MB_CNT_EDL)
+ skb = alloc_canfd_skb(offload->dev, &cfd);
+ else
+ skb = alloc_can_skb(offload->dev, (struct can_frame **)&cfd);
+ if (unlikely(!skb)) {
skb = ERR_PTR(-ENOMEM);
goto mark_as_read;
}
@@ -870,17 +972,28 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
reg_id = priv->read(&mb->can_id);
if (reg_ctrl & FLEXCAN_MB_CNT_IDE)
- cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ cfd->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
else
- cf->can_id = (reg_id >> 18) & CAN_SFF_MASK;
+ cfd->can_id = (reg_id >> 18) & CAN_SFF_MASK;
+
+ if (reg_ctrl & FLEXCAN_MB_CNT_EDL) {
+ cfd->len = can_dlc2len(get_canfd_dlc((reg_ctrl >> 16) & 0xf));
+
+ if (reg_ctrl & FLEXCAN_MB_CNT_BRS)
+ cfd->flags |= CANFD_BRS;
+ } else {
+ cfd->len = get_can_dlc((reg_ctrl >> 16) & 0xf);
+
+ if (reg_ctrl & FLEXCAN_MB_CNT_RTR)
+ cfd->can_id |= CAN_RTR_FLAG;
+ }
- if (reg_ctrl & FLEXCAN_MB_CNT_RTR)
- cf->can_id |= CAN_RTR_FLAG;
- cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf);
+ if (reg_ctrl & FLEXCAN_MB_CNT_ESI)
+ cfd->flags |= CANFD_ESI;
- for (i = 0; i < cf->can_dlc; i += sizeof(u32)) {
+ for (i = 0; i < cfd->len; i += sizeof(u32)) {
__be32 data = cpu_to_be32(priv->read(&mb->data[i / sizeof(u32)]));
- *(__be32 *)(cf->data + i) = data;
+ *(__be32 *)(cfd->data + i) = data;
}
mark_as_read:
@@ -961,10 +1074,10 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
reg_esr = priv->read(&regs->esr);
- /* ACK all bus error and state change IRQ sources */
- if (reg_esr & FLEXCAN_ESR_ALL_INT) {
+ /* ACK all bus error, state change and wake IRQ sources */
+ if (reg_esr & (FLEXCAN_ESR_ALL_INT | FLEXCAN_ESR_WAK_INT)) {
handled = IRQ_HANDLED;
- priv->write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
+ priv->write(reg_esr & (FLEXCAN_ESR_ALL_INT | FLEXCAN_ESR_WAK_INT), &regs->esr);
}
/* state change interrupt or broken error state quirk fix is enabled */
@@ -1019,7 +1132,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
return handled;
}
-static void flexcan_set_bittiming(struct net_device *dev)
+static void flexcan_set_bittiming_ctrl(const struct net_device *dev)
{
const struct flexcan_priv *priv = netdev_priv(dev);
const struct can_bittiming *bt = &priv->can.bittiming;
@@ -1031,10 +1144,7 @@ static void flexcan_set_bittiming(struct net_device *dev)
FLEXCAN_CTRL_RJW(0x3) |
FLEXCAN_CTRL_PSEG1(0x7) |
FLEXCAN_CTRL_PSEG2(0x7) |
- FLEXCAN_CTRL_PROPSEG(0x7) |
- FLEXCAN_CTRL_LPB |
- FLEXCAN_CTRL_SMP |
- FLEXCAN_CTRL_LOM);
+ FLEXCAN_CTRL_PROPSEG(0x7));
reg |= FLEXCAN_CTRL_PRESDIV(bt->brp - 1) |
FLEXCAN_CTRL_PSEG1(bt->phase_seg1 - 1) |
@@ -1042,6 +1152,130 @@ static void flexcan_set_bittiming(struct net_device *dev)
FLEXCAN_CTRL_RJW(bt->sjw - 1) |
FLEXCAN_CTRL_PROPSEG(bt->prop_seg - 1);
+ netdev_dbg(dev, "writing ctrl=0x%08x\n", reg);
+ priv->write(reg, &regs->ctrl);
+
+ /* print chip status */
+ netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__,
+ priv->read(&regs->mcr), priv->read(&regs->ctrl));
+}
+
+static void flexcan_set_bittiming_cbt(const struct net_device *dev)
+{
+ struct flexcan_priv *priv = netdev_priv(dev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct flexcan_regs __iomem *regs = priv->regs;
+ u32 reg_cbt, reg_fdctrl;
+
+ /* CBT */
+ /* CBT[EPSEG1] is 5 bit long and CBT[EPROPSEG] is 6 bit
+ * long. The can_calc_bittiming() tries to divide the tseg1
+ * equally between phase_seg1 and prop_seg, which may not fit
+ * in CBT register. Therefore, if phase_seg1 is more than
+ * possible value, increase prop_seg and decrease phase_seg1.
+ */
+ if (bt->phase_seg1 > 0x20) {
+ bt->prop_seg += (bt->phase_seg1 - 0x20);
+ bt->phase_seg1 = 0x20;
+ }
+
+ reg_cbt = FLEXCAN_CBT_BTF |
+ FIELD_PREP(FLEXCAN_CBT_EPRESDIV_MASK, bt->brp - 1) |
+ FIELD_PREP(FLEXCAN_CBT_ERJW_MASK, bt->sjw - 1) |
+ FIELD_PREP(FLEXCAN_CBT_EPROPSEG_MASK, bt->prop_seg - 1) |
+ FIELD_PREP(FLEXCAN_CBT_EPSEG1_MASK, bt->phase_seg1 - 1) |
+ FIELD_PREP(FLEXCAN_CBT_EPSEG2_MASK, bt->phase_seg2 - 1);
+
+ netdev_dbg(dev, "writing cbt=0x%08x\n", reg_cbt);
+ priv->write(reg_cbt, &regs->cbt);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ u32 reg_fdcbt, reg_ctrl2;
+
+ if (bt->brp != dbt->brp)
+ netdev_warn(dev, "Data brp=%d and brp=%d don't match, this may result in a phase error. Consider using different bitrate and/or data bitrate.\n",
+ dbt->brp, bt->brp);
+
+ /* FDCBT */
+ /* FDCBT[FPSEG1] is 3 bit long and FDCBT[FPROPSEG] is
+ * 5 bit long. The can_calc_bittiming tries to divide
+ * the tseg1 equally between phase_seg1 and prop_seg,
+ * which may not fit in FDCBT register. Therefore, if
+ * phase_seg1 is more than possible value, increase
+ * prop_seg and decrease phase_seg1
+ */
+ if (dbt->phase_seg1 > 0x8) {
+ dbt->prop_seg += (dbt->phase_seg1 - 0x8);
+ dbt->phase_seg1 = 0x8;
+ }
+
+ reg_fdcbt = priv->read(&regs->fdcbt);
+ reg_fdcbt &= ~(FIELD_PREP(FLEXCAN_FDCBT_FPRESDIV_MASK, 0x3ff) |
+ FIELD_PREP(FLEXCAN_FDCBT_FRJW_MASK, 0x7) |
+ FIELD_PREP(FLEXCAN_FDCBT_FPROPSEG_MASK, 0x1f) |
+ FIELD_PREP(FLEXCAN_FDCBT_FPSEG1_MASK, 0x7) |
+ FIELD_PREP(FLEXCAN_FDCBT_FPSEG2_MASK, 0x7));
+
+ reg_fdcbt |= FIELD_PREP(FLEXCAN_FDCBT_FPRESDIV_MASK, dbt->brp - 1) |
+ FIELD_PREP(FLEXCAN_FDCBT_FRJW_MASK, dbt->sjw - 1) |
+ FIELD_PREP(FLEXCAN_FDCBT_FPROPSEG_MASK, dbt->prop_seg) |
+ FIELD_PREP(FLEXCAN_FDCBT_FPSEG1_MASK, dbt->phase_seg1 - 1) |
+ FIELD_PREP(FLEXCAN_FDCBT_FPSEG2_MASK, dbt->phase_seg2 - 1);
+
+ netdev_dbg(dev, "writing fdcbt=0x%08x\n", reg_fdcbt);
+ priv->write(reg_fdcbt, &regs->fdcbt);
+
+ /* CTRL2 */
+ reg_ctrl2 = priv->read(&regs->ctrl2);
+ reg_ctrl2 &= ~FLEXCAN_CTRL2_ISOCANFDEN;
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO))
+ reg_ctrl2 |= FLEXCAN_CTRL2_ISOCANFDEN;
+
+ netdev_dbg(dev, "writing ctrl2=0x%08x\n", reg_ctrl2);
+ priv->write(reg_ctrl2, &regs->ctrl2);
+ }
+
+ /* FDCTRL */
+ reg_fdctrl = priv->read(&regs->fdctrl);
+ reg_fdctrl &= ~(FLEXCAN_FDCTRL_FDRATE |
+ FIELD_PREP(FLEXCAN_FDCTRL_TDCOFF, 0x1f));
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ reg_fdctrl |= FLEXCAN_FDCTRL_FDRATE;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+ /* TDC must be disabled for Loop Back mode */
+ reg_fdctrl &= ~FLEXCAN_FDCTRL_TDCEN;
+ } else {
+ reg_fdctrl |= FLEXCAN_FDCTRL_TDCEN |
+ FIELD_PREP(FLEXCAN_FDCTRL_TDCOFF,
+ ((dbt->phase_seg1 - 1) +
+ dbt->prop_seg + 2) *
+ ((dbt->brp - 1 ) + 1));
+ }
+ }
+
+ netdev_dbg(dev, "writing fdctrl=0x%08x\n", reg_fdctrl);
+ priv->write(reg_fdctrl, &regs->fdctrl);
+
+ netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x ctrl2=0x%08x fdctrl=0x%08x cbt=0x%08x fdcbt=0x%08x\n",
+ __func__,
+ priv->read(&regs->mcr), priv->read(&regs->ctrl),
+ priv->read(&regs->ctrl2), priv->read(&regs->fdctrl),
+ priv->read(&regs->cbt), priv->read(&regs->fdcbt));
+}
+
+static void flexcan_set_bittiming(struct net_device *dev)
+{
+ const struct flexcan_priv *priv = netdev_priv(dev);
+ struct flexcan_regs __iomem *regs = priv->regs;
+ u32 reg;
+
+ reg = priv->read(&regs->ctrl);
+ reg &= ~(FLEXCAN_CTRL_LPB | FLEXCAN_CTRL_SMP |
+ FLEXCAN_CTRL_LOM);
+
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
reg |= FLEXCAN_CTRL_LPB;
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
@@ -1052,9 +1286,10 @@ static void flexcan_set_bittiming(struct net_device *dev)
netdev_dbg(dev, "writing ctrl=0x%08x\n", reg);
priv->write(reg, &regs->ctrl);
- /* print chip status */
- netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__,
- priv->read(&regs->mcr), priv->read(&regs->ctrl));
+ if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD)
+ return flexcan_set_bittiming_cbt(dev);
+ else
+ return flexcan_set_bittiming_ctrl(dev);
}
/* flexcan_chip_start
@@ -1127,6 +1362,12 @@ static int flexcan_chip_start(struct net_device *dev)
else
reg_mcr |= FLEXCAN_MCR_SRX_DIS;
+ /* MCR - CAN-FD */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ reg_mcr |= FLEXCAN_MCR_FDEN;
+ else
+ reg_mcr &= ~FLEXCAN_MCR_FDEN;
+
netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
priv->write(reg_mcr, &regs->mcr);
@@ -1169,6 +1410,32 @@ static int flexcan_chip_start(struct net_device *dev)
priv->write(reg_ctrl2, &regs->ctrl2);
}
+ if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
+ u32 reg_fdctrl;
+
+ reg_fdctrl = priv->read(&regs->fdctrl);
+ reg_fdctrl &= ~(FIELD_PREP(FLEXCAN_FDCTRL_MBDSR1, 0x3) |
+ FIELD_PREP(FLEXCAN_FDCTRL_MBDSR0, 0x3));
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ reg_fdctrl |=
+ FIELD_PREP(FLEXCAN_FDCTRL_MBDSR1,
+ FLEXCAN_FDCTRL_MBDSR_64) |
+ FIELD_PREP(FLEXCAN_FDCTRL_MBDSR0,
+ FLEXCAN_FDCTRL_MBDSR_64);
+ } else {
+ reg_fdctrl |=
+ FIELD_PREP(FLEXCAN_FDCTRL_MBDSR1,
+ FLEXCAN_FDCTRL_MBDSR_8) |
+ FIELD_PREP(FLEXCAN_FDCTRL_MBDSR0,
+ FLEXCAN_FDCTRL_MBDSR_8);
+ }
+
+ netdev_dbg(dev, "%s: writing fdctrl=0x%08x",
+ __func__, reg_fdctrl);
+ priv->write(reg_fdctrl, &regs->fdctrl);
+ }
+
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) {
mb = flexcan_get_mb(priv, i);
@@ -1204,28 +1471,43 @@ static int flexcan_chip_start(struct net_device *dev)
for (i = 0; i < priv->mb_count; i++)
priv->write(0, &regs->rximr[i]);
- /* On Vybrid, disable memory error detection interrupts
- * and freeze mode.
- * This also works around errata e5295 which generates
- * false positive memory errors and put the device in
- * freeze mode.
+ /* On Vybrid, disable non-correctable errors interrupt and
+ * freeze mode. It still can correct the correctable errors
+ * when HW supports ECC.
+ *
+ * This also works around errata e5295 which generates false
+ * positive memory errors and put the device in freeze mode.
*/
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_MECR) {
/* Follow the protocol as described in "Detection
* and Correction of Memory Errors" to write to
- * MECR register
+ * MECR register (step 1 - 5)
+ *
+ * 1. By default, CTRL2[ECRWRE] = 0, MECR[ECRWRDIS] = 1
+ * 2. set CTRL2[ECRWRE]
*/
reg_ctrl2 = priv->read(&regs->ctrl2);
reg_ctrl2 |= FLEXCAN_CTRL2_ECRWRE;
priv->write(reg_ctrl2, &regs->ctrl2);
+ /* 3. clear MECR[ECRWRDIS] */
reg_mecr = priv->read(&regs->mecr);
reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
priv->write(reg_mecr, &regs->mecr);
- reg_mecr |= FLEXCAN_MECR_ECCDIS;
+
+ /* 4. all writes to MECR must keep MECR[ECRWRDIS] cleared */
reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
FLEXCAN_MECR_FANCEI_MSK);
priv->write(reg_mecr, &regs->mecr);
+
+ /* 5. after configuration done, lock MECR by either
+ * setting MECR[ECRWRDIS] or clearing CTRL2[ECRWRE]
+ */
+ reg_mecr |= FLEXCAN_MECR_ECRWRDIS;
+ priv->write(reg_mecr, &regs->mecr);
+
+ reg_ctrl2 &= ~FLEXCAN_CTRL2_ECRWRE;
+ priv->write(reg_ctrl2, &regs->ctrl2);
}
err = flexcan_transceiver_enable(priv);
@@ -1260,18 +1542,23 @@ static int flexcan_chip_start(struct net_device *dev)
return err;
}
-/* flexcan_chip_stop
+/* __flexcan_chip_stop
*
- * this functions is entered with clocks enabled
+ * this function is entered with clocks enabled
*/
-static void flexcan_chip_stop(struct net_device *dev)
+static int __flexcan_chip_stop(struct net_device *dev, bool disable_on_error)
{
struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->regs;
+ int err;
/* freeze + disable module */
- flexcan_chip_freeze(priv);
- flexcan_chip_disable(priv);
+ err = flexcan_chip_freeze(priv);
+ if (err && !disable_on_error)
+ return err;
+ err = flexcan_chip_disable(priv);
+ if (err && !disable_on_error)
+ goto out_chip_unfreeze;
/* Disable all interrupts */
priv->write(0, &regs->imask2);
@@ -1281,6 +1568,23 @@ static void flexcan_chip_stop(struct net_device *dev)
flexcan_transceiver_disable(priv);
priv->can.state = CAN_STATE_STOPPED;
+
+ return 0;
+
+ out_chip_unfreeze:
+ flexcan_chip_unfreeze(priv);
+
+ return err;
+}
+
+static inline int flexcan_chip_stop_disable_on_error(struct net_device *dev)
+{
+ return __flexcan_chip_stop(dev, true);
+}
+
+static inline int flexcan_chip_stop(struct net_device *dev)
+{
+ return __flexcan_chip_stop(dev, false);
}
static int flexcan_open(struct net_device *dev)
@@ -1288,6 +1592,12 @@ static int flexcan_open(struct net_device *dev)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) &&
+ (priv->can.ctrlmode & CAN_CTRLMODE_FD)) {
+ netdev_err(dev, "Three Samples mode and CAN-FD mode can't be used together\n");
+ return -EINVAL;
+ }
+
err = pm_runtime_get_sync(priv->dev);
if (err < 0)
return err;
@@ -1300,7 +1610,10 @@ static int flexcan_open(struct net_device *dev)
if (err)
goto out_close;
- priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN;
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ priv->mb_size = sizeof(struct flexcan_mb) + CANFD_MAX_DLEN;
+ else
+ priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN;
priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) +
(sizeof(priv->regs->mb[1]) / priv->mb_size);
@@ -1362,7 +1675,7 @@ static int flexcan_close(struct net_device *dev)
netif_stop_queue(dev);
can_rx_offload_disable(&priv->offload);
- flexcan_chip_stop(dev);
+ flexcan_chip_stop_disable_on_error(dev);
can_rx_offload_del(&priv->offload);
free_irq(dev->irq, dev);
@@ -1531,6 +1844,7 @@ out_put_node:
}
static const struct of_device_id flexcan_of_match[] = {
+ { .compatible = "fsl,imx8qm-flexcan", .data = &fsl_imx8qm_devtype_data, },
{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
{ .compatible = "fsl,imx53-flexcan", .data = &fsl_imx25_devtype_data, },
@@ -1539,6 +1853,7 @@ static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
{ .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
{ .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
+ { .compatible = "fsl,lx2160ar1-flexcan", .data = &fsl_lx2160a_r1_devtype_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, flexcan_of_match);
@@ -1562,11 +1877,13 @@ static int flexcan_probe(struct platform_device *pdev)
u8 clk_src = 1;
u32 clock_freq = 0;
- reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
+ reg_xceiver = devm_regulator_get_optional(&pdev->dev, "xceiver");
if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
return -EPROBE_DEFER;
- else if (IS_ERR(reg_xceiver))
+ else if (PTR_ERR(reg_xceiver) == -ENODEV)
reg_xceiver = NULL;
+ else if (IS_ERR(reg_xceiver))
+ return PTR_ERR(reg_xceiver);
if (pdev->dev.of_node) {
of_property_read_u32(pdev->dev.of_node,
@@ -1608,6 +1925,12 @@ static int flexcan_probe(struct platform_device *pdev)
return -ENODEV;
}
+ if ((devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) &&
+ !(devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)) {
+ dev_err(&pdev->dev, "CAN-FD mode doesn't work with FIFO mode!\n");
+ return -EINVAL;
+ }
+
dev = alloc_candev(sizeof(struct flexcan_priv), 1);
if (!dev)
return -ENOMEM;
@@ -1632,7 +1955,6 @@ static int flexcan_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
priv->can.clock.freq = clock_freq;
- priv->can.bittiming_const = &flexcan_bittiming_const;
priv->can.do_set_mode = flexcan_set_mode;
priv->can.do_get_berr_counter = flexcan_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
@@ -1645,6 +1967,16 @@ static int flexcan_probe(struct platform_device *pdev)
priv->devtype_data = devtype_data;
priv->reg_xceiver = reg_xceiver;
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) {
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_FD_NON_ISO;
+ priv->can.bittiming_const = &flexcan_fd_bittiming_const;
+ priv->can.data_bittiming_const =
+ &flexcan_fd_data_bittiming_const;
+ } else {
+ priv->can.bittiming_const = &flexcan_bittiming_const;
+ }
+
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
@@ -1655,6 +1987,7 @@ static int flexcan_probe(struct platform_device *pdev)
goto failed_register;
}
+ of_can_transceiver(dev);
devm_can_led_init(dev);
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE) {
@@ -1685,7 +2018,7 @@ static int __maybe_unused flexcan_suspend(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
- int err = 0;
+ int err;
if (netif_running(dev)) {
/* if wakeup is enabled, enter stop mode
@@ -1697,25 +2030,27 @@ static int __maybe_unused flexcan_suspend(struct device *device)
if (err)
return err;
} else {
- err = flexcan_chip_disable(priv);
+ err = flexcan_chip_stop(dev);
if (err)
return err;
- err = pm_runtime_force_suspend(device);
+ err = pinctrl_pm_select_sleep_state(device);
+ if (err)
+ return err;
}
netif_stop_queue(dev);
netif_device_detach(dev);
}
priv->can.state = CAN_STATE_SLEEPING;
- return err;
+ return 0;
}
static int __maybe_unused flexcan_resume(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
- int err = 0;
+ int err;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(dev)) {
@@ -1727,15 +2062,17 @@ static int __maybe_unused flexcan_resume(struct device *device)
if (err)
return err;
} else {
- err = pm_runtime_force_resume(device);
+ err = pinctrl_pm_select_default_state(device);
if (err)
return err;
- err = flexcan_chip_enable(priv);
+ err = flexcan_chip_start(dev);
+ if (err)
+ return err;
}
}
- return err;
+ return 0;
}
static int __maybe_unused flexcan_runtime_suspend(struct device *device)
@@ -1761,8 +2098,16 @@ static int __maybe_unused flexcan_noirq_suspend(struct device *device)
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
- if (netif_running(dev) && device_may_wakeup(device))
- flexcan_enable_wakeup_irq(priv, true);
+ if (netif_running(dev)) {
+ int err;
+
+ if (device_may_wakeup(device))
+ flexcan_enable_wakeup_irq(priv, true);
+
+ err = pm_runtime_force_suspend(device);
+ if (err)
+ return err;
+ }
return 0;
}
@@ -1772,8 +2117,16 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
- if (netif_running(dev) && device_may_wakeup(device))
- flexcan_enable_wakeup_irq(priv, false);
+ if (netif_running(dev)) {
+ int err;
+
+ err = pm_runtime_force_resume(device);
+ if (err)
+ return err;
+
+ if (device_may_wakeup(device))
+ flexcan_enable_wakeup_irq(priv, false);
+ }
return 0;
}
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 378200b682fa..39802f107eb1 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -726,7 +726,7 @@ static void grcan_err(struct net_device *dev, u32 sources, u32 status)
txrx = "on rx ";
stats->rx_errors++;
}
- netdev_err(dev, "Fatal AHB buss error %s- halting device\n",
+ netdev_err(dev, "Fatal AHB bus error %s- halting device\n",
txrx);
spin_lock_irqsave(&priv->lock, flags);
@@ -1243,7 +1243,7 @@ static int grcan_poll(struct napi_struct *napi, int budget)
int rx_budget = budget / 2;
int tx_budget = budget - rx_budget;
- /* Half of the budget for receiveing messages */
+ /* Half of the budget for receiving messages */
rx_work_done = grcan_receive(dev, rx_budget);
/* Half of the budget for transmitting messages as that can trigger echo
diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig
index d9216147ca93..48be627c85c2 100644
--- a/drivers/net/can/m_can/Kconfig
+++ b/drivers/net/can/m_can/Kconfig
@@ -20,5 +20,5 @@ config CAN_M_CAN_TCAN4X5X
tristate "TCAN4X5X M_CAN device"
help
Say Y here if you want support for Texas Instruments TCAN4x5x
- M_CAN controller. This device is a peripherial device that uses the
+ M_CAN controller. This device is a peripheral device that uses the
SPI bus for communication.
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index e4f4b5c9ebd6..e254e04ae257 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -5,7 +5,7 @@
* Copyright (C) 2004-2005 Andrey Volkov <avolkov@varma-el.com>,
* Varma Electronics Oy
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
- * Copyright (C) 2009 Wolfram Sang, Pengutronix <w.sang@pengutronix.de>
+ * Copyright (C) 2009 Wolfram Sang, Pengutronix <kernel@pengutronix.de>
*/
#include <linux/kernel.h>
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 99101d7027a8..640ba1b356ec 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -209,6 +209,7 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
* since buffer with lower id have higher priority (hell..)
*/
netif_stop_queue(dev);
+ fallthrough;
case 2:
if (buf_id < priv->prev_buf_id) {
priv->cur_pri++;
@@ -540,16 +541,12 @@ static int mscan_open(struct net_device *dev)
struct mscan_priv *priv = netdev_priv(dev);
struct mscan_regs __iomem *regs = priv->reg_base;
- if (priv->clk_ipg) {
- ret = clk_prepare_enable(priv->clk_ipg);
- if (ret)
- goto exit_retcode;
- }
- if (priv->clk_can) {
- ret = clk_prepare_enable(priv->clk_can);
- if (ret)
- goto exit_dis_ipg_clock;
- }
+ ret = clk_prepare_enable(priv->clk_ipg);
+ if (ret)
+ goto exit_retcode;
+ ret = clk_prepare_enable(priv->clk_can);
+ if (ret)
+ goto exit_dis_ipg_clock;
/* common open */
ret = open_candev(dev);
@@ -583,11 +580,9 @@ exit_napi_disable:
napi_disable(&priv->napi);
close_candev(dev);
exit_dis_can_clock:
- if (priv->clk_can)
- clk_disable_unprepare(priv->clk_can);
+ clk_disable_unprepare(priv->clk_can);
exit_dis_ipg_clock:
- if (priv->clk_ipg)
- clk_disable_unprepare(priv->clk_ipg);
+ clk_disable_unprepare(priv->clk_ipg);
exit_retcode:
return ret;
}
@@ -606,10 +601,8 @@ static int mscan_close(struct net_device *dev)
close_candev(dev);
free_irq(dev->irq, dev);
- if (priv->clk_can)
- clk_disable_unprepare(priv->clk_can);
- if (priv->clk_ipg)
- clk_disable_unprepare(priv->clk_ipg);
+ clk_disable_unprepare(priv->clk_can);
+ clk_disable_unprepare(priv->clk_ipg);
return 0;
}
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index db41dddd5771..5c180d2f3c3c 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -461,7 +461,7 @@ static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
PCH_ID2_DIR | (0x7ff << 2));
iowrite32(0x0, &priv->regs->ifregs[1].id1);
- /* Claring NewDat, TxRqst & IntPnd */
+ /* Clearing NewDat, TxRqst & IntPnd */
pch_can_bit_clear(&priv->regs->ifregs[1].mcont,
PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
PCH_IF_MCONT_TXRQXT);
@@ -834,7 +834,7 @@ static int pch_can_open(struct net_device *ndev)
struct pch_can_priv *priv = netdev_priv(ndev);
int retval;
- /* Regstering the interrupt. */
+ /* Registering the interrupt. */
retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED,
ndev->name, ndev);
if (retval) {
@@ -957,8 +957,7 @@ static void pch_can_remove(struct pci_dev *pdev)
free_candev(priv->ndev);
}
-#ifdef CONFIG_PM
-static void pch_can_set_int_custom(struct pch_can_priv *priv)
+static void __maybe_unused pch_can_set_int_custom(struct pch_can_priv *priv)
{
/* Clearing the IE, SIE and EIE bits of Can control register. */
pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
@@ -969,14 +968,14 @@ static void pch_can_set_int_custom(struct pch_can_priv *priv)
}
/* This function retrieves interrupt enabled for the CAN device. */
-static u32 pch_can_get_int_enables(struct pch_can_priv *priv)
+static u32 __maybe_unused pch_can_get_int_enables(struct pch_can_priv *priv)
{
/* Obtaining the status of IE, SIE and EIE interrupt bits. */
return (ioread32(&priv->regs->cont) & PCH_CTRL_IE_SIE_EIE) >> 1;
}
-static u32 pch_can_get_rxtx_ir(struct pch_can_priv *priv, u32 buff_num,
- enum pch_ifreg dir)
+static u32 __maybe_unused pch_can_get_rxtx_ir(struct pch_can_priv *priv,
+ u32 buff_num, enum pch_ifreg dir)
{
u32 ie, enable;
@@ -997,8 +996,8 @@ static u32 pch_can_get_rxtx_ir(struct pch_can_priv *priv, u32 buff_num,
return enable;
}
-static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
- u32 buffer_num, int set)
+static void __maybe_unused pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
+ u32 buffer_num, int set)
{
iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
@@ -1013,7 +1012,8 @@ static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
}
-static u32 pch_can_get_rx_buffer_link(struct pch_can_priv *priv, u32 buffer_num)
+static u32 __maybe_unused pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
+ u32 buffer_num)
{
u32 link;
@@ -1027,20 +1027,19 @@ static u32 pch_can_get_rx_buffer_link(struct pch_can_priv *priv, u32 buffer_num)
return link;
}
-static int pch_can_get_buffer_status(struct pch_can_priv *priv)
+static int __maybe_unused pch_can_get_buffer_status(struct pch_can_priv *priv)
{
return (ioread32(&priv->regs->treq1) & 0xffff) |
(ioread32(&priv->regs->treq2) << 16);
}
-static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused pch_can_suspend(struct device *dev_d)
{
int i;
- int retval;
u32 buf_stat; /* Variable for reading the transmit buffer status. */
int counter = PCH_COUNTER_LIMIT;
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(dev_d);
struct pch_can_priv *priv = netdev_priv(dev);
/* Stop the CAN controller */
@@ -1058,7 +1057,7 @@ static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
udelay(1);
}
if (!counter)
- dev_err(&pdev->dev, "%s -> Transmission time out.\n", __func__);
+ dev_err(dev_d, "%s -> Transmission time out.\n", __func__);
/* Save interrupt configuration and then disable them */
priv->int_enables = pch_can_get_int_enables(priv);
@@ -1081,35 +1080,16 @@ static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
/* Disable all Receive buffers */
pch_can_set_rx_all(priv, 0);
- retval = pci_save_state(pdev);
- if (retval) {
- dev_err(&pdev->dev, "pci_save_state failed.\n");
- } else {
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
- }
- return retval;
+ return 0;
}
-static int pch_can_resume(struct pci_dev *pdev)
+static int __maybe_unused pch_can_resume(struct device *dev_d)
{
int i;
- int retval;
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(dev_d);
struct pch_can_priv *priv = netdev_priv(dev);
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- retval = pci_enable_device(pdev);
- if (retval) {
- dev_err(&pdev->dev, "pci_enable_device failed.\n");
- return retval;
- }
-
- pci_enable_wake(pdev, PCI_D3hot, 0);
-
priv->can.state = CAN_STATE_ERROR_ACTIVE;
/* Disabling all interrupts. */
@@ -1146,12 +1126,8 @@ static int pch_can_resume(struct pci_dev *pdev)
/* Restore Run Mode */
pch_can_set_run_mode(priv, PCH_CAN_RUN);
- return retval;
+ return 0;
}
-#else
-#define pch_can_suspend NULL
-#define pch_can_resume NULL
-#endif
static int pch_can_get_berr_counter(const struct net_device *dev,
struct can_berr_counter *bec)
@@ -1252,13 +1228,16 @@ probe_exit_endev:
return rc;
}
+static SIMPLE_DEV_PM_OPS(pch_can_pm_ops,
+ pch_can_suspend,
+ pch_can_resume);
+
static struct pci_driver pch_can_pci_driver = {
.name = "pch_can",
.id_table = pch_pci_tbl,
.probe = pch_can_probe,
.remove = pch_can_remove,
- .suspend = pch_can_suspend,
- .resume = pch_can_resume,
+ .driver.pm = &pch_can_pm_ops,
};
module_pci_driver(pch_can_pci_driver);
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 9469d4421afe..0df1cdfa6835 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -116,8 +116,6 @@ MODULE_LICENSE("GPL v2");
#define CANFD_CTL_IRQ_CL_DEF 16 /* Rx msg max nb per IRQ in Rx DMA */
#define CANFD_CTL_IRQ_TL_DEF 10 /* Time before IRQ if < CL (x100 µs) */
-#define CANFD_OPTIONS_SET (CANFD_OPTION_ERROR | CANFD_OPTION_BUSLOAD)
-
/* Tx anticipation window (link logical address should be aligned on 2K
* boundary)
*/
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
index e8328910a234..3b180269a92d 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/rx-offload.c
@@ -351,6 +351,17 @@ int can_rx_offload_add_fifo(struct net_device *dev,
}
EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
+int can_rx_offload_add_manual(struct net_device *dev,
+ struct can_rx_offload *offload,
+ unsigned int weight)
+{
+ if (offload->mailbox_read)
+ return -EINVAL;
+
+ return can_rx_offload_init_queue(dev, offload, weight);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_add_manual);
+
void can_rx_offload_enable(struct can_rx_offload *offload)
{
napi_enable(&offload->napi);
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 8c0244f51059..4713921bd511 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -97,7 +97,7 @@ MODULE_DEVICE_TABLE(pci, peak_pci_tbl);
/* GPIOICR byte access offsets */
#define PITA_GPOUT 0x18 /* GPx output value */
#define PITA_GPIN 0x19 /* GPx input value */
-#define PITA_GPOEN 0x1A /* configure GPx as ouput pin */
+#define PITA_GPOEN 0x1A /* configure GPx as output pin */
/* I2C GP bits */
#define PITA_GPIN_SCL 0x01 /* Serial Clock Line */
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index 5e0d5e8101c8..cf951a783078 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -671,7 +671,7 @@ static int pcan_probe(struct pcmcia_device *pdev)
card->fw_major = pcan_read_reg(card, PCC_FW_MAJOR);
card->fw_minor = pcan_read_reg(card, PCC_FW_MINOR);
- /* display board name and firware version */
+ /* display board name and firmware version */
dev_info(&pdev->dev, "PEAK-System pcmcia card %s fw %d.%d\n",
pdev->prod_id[1] ? pdev->prod_id[1] : "PCAN-PC Card",
card->fw_major, card->fw_minor);
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig
index 16b9eec63490..8afd7d0a1000 100644
--- a/drivers/net/can/softing/Kconfig
+++ b/drivers/net/can/softing/Kconfig
@@ -5,14 +5,14 @@ config CAN_SOFTING
help
Support for CAN cards from Softing Gmbh & some cards
from Vector Gmbh.
- Softing Gmbh CAN cards come with 1 or 2 physical busses.
+ Softing Gmbh CAN cards come with 1 or 2 physical buses.
Those cards typically use Dual Port RAM to communicate
with the host CPU. The interface is then identical for PCI
and PCMCIA cards. This driver operates on a platform device,
which has been created by softing_cs or softing_pci driver.
Warning:
The API of the card does not allow fine control per bus, but
- controls the 2 busses on the card together.
+ controls the 2 buses on the card together.
As such, some actions (start/stop/busoff recovery) on 1 bus
must bring down the other bus too temporarily.
@@ -24,7 +24,7 @@ config CAN_SOFTING_CS
Support for PCMCIA cards from Softing Gmbh & some cards
from Vector Gmbh.
You need firmware for these, which you can get at
- http://developer.berlios.de/projects/socketcan/
+ https://github.com/linux-can/can-firmware
This version of the driver is written against
firmware version 4.6 (softing-fw-4.6-binaries.tar.gz)
In order to use the card as CAN device, you need the Softing generic
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index 8f44fdd8804b..ccd649a8e37b 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -273,7 +273,7 @@ int softing_load_app_fw(const char *file, struct softing *card)
goto failed;
}
- /* regualar data */
+ /* regular data */
for (sum = 0, j = 0; j < len; ++j)
sum += dat[j];
/* work in 16bit (target) */
@@ -474,14 +474,14 @@ int softing_startstop(struct net_device *dev, int up)
if (ret)
goto failed;
if (!bus_bitmask_start)
- /* no busses to be brought up */
+ /* no buses to be brought up */
goto card_done;
if ((bus_bitmask_start & 1) && (bus_bitmask_start & 2)
&& (softing_error_reporting(card->net[0])
!= softing_error_reporting(card->net[1]))) {
dev_alert(&card->pdev->dev,
- "err_reporting flag differs for busses\n");
+ "err_reporting flag differs for buses\n");
goto invalid;
}
error_reporting = 0;
@@ -635,7 +635,7 @@ int softing_startstop(struct net_device *dev, int up)
priv->can.state = CAN_STATE_ERROR_ACTIVE;
open_candev(netdev);
if (dev != netdev) {
- /* notify other busses on the restart */
+ /* notify other buses on the restart */
softing_netdev_rx(netdev, &msg, 0);
++priv->can.can_stats.restarts;
}
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index d1ddf763b188..11b0f3bcfe80 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -170,8 +170,8 @@ static int softing_handle_1(struct softing *card)
msg.can_dlc = CAN_ERR_DLC;
msg.data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
/*
- * service to all busses, we don't know which it was applicable
- * but only service busses that are online
+ * service to all buses, we don't know which it was applicable
+ * but only service buses that are online
*/
for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
netdev = card->net[j];
@@ -339,7 +339,7 @@ static irqreturn_t softing_irq_thread(int irq, void *dev_id)
continue;
priv = netdev_priv(netdev);
if (!canif_is_active(netdev))
- /* it makes no sense to wake dead busses */
+ /* it makes no sense to wake dead buses */
continue;
if (priv->tx.pending >= TX_ECHO_SKB_MAX)
continue;
@@ -374,7 +374,7 @@ static irqreturn_t softing_irq_v1(int irq, void *dev_id)
}
/*
- * netdev/candev inter-operability
+ * netdev/candev interoperability
*/
static int softing_netdev_open(struct net_device *ndev)
{
diff --git a/drivers/net/can/softing/softing_platform.h b/drivers/net/can/softing/softing_platform.h
index 68a161547644..cd8d7904c5f0 100644
--- a/drivers/net/can/softing/softing_platform.h
+++ b/drivers/net/can/softing/softing_platform.h
@@ -19,7 +19,7 @@ struct softing_platform_data {
* 16bit, shared interrupt
*/
int generation;
- int nbus; /* # busses on device */
+ int nbus; /* # buses on device */
unsigned int freq; /* operating frequency in Hz */
unsigned int max_brp;
unsigned int max_sjw;
diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig
index f780c79aac6f..a82240628c33 100644
--- a/drivers/net/can/spi/Kconfig
+++ b/drivers/net/can/spi/Kconfig
@@ -4,15 +4,15 @@ menu "CAN SPI interfaces"
config CAN_HI311X
tristate "Holt HI311x SPI CAN controllers"
- depends on CAN_DEV && SPI && HAS_DMA
help
Driver for the Holt HI311x SPI CAN controllers.
config CAN_MCP251X
tristate "Microchip MCP251x and MCP25625 SPI CAN controllers"
- depends on HAS_DMA
help
Driver for the Microchip MCP251x and MCP25625 SPI CAN
controllers.
+source "drivers/net/can/spi/mcp25xxfd/Kconfig"
+
endmenu
diff --git a/drivers/net/can/spi/Makefile b/drivers/net/can/spi/Makefile
index f115b2c46623..20c18ac96b1c 100644
--- a/drivers/net/can/spi/Makefile
+++ b/drivers/net/can/spi/Makefile
@@ -6,3 +6,4 @@
obj-$(CONFIG_CAN_HI311X) += hi311x.o
obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
+obj-y += mcp25xxfd/
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index d17608870f2d..22d814ae4edc 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -19,6 +19,7 @@
* Copyright 2007
*/
+#include <linux/bitfield.h>
#include <linux/can/core.h>
#include <linux/can/dev.h>
#include <linux/can/led.h>
@@ -27,17 +28,20 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/freezer.h>
+#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
-#include <linux/property.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/uaccess.h>
-#include <linux/regulator/consumer.h>
/* SPI interface instruction set */
#define INSTRUCTION_WRITE 0x02
@@ -52,6 +56,30 @@
#define INSTRUCTION_RTS(n) (0x80 | ((n) & 0x07))
/* MPC251x registers */
+#define BFPCTRL 0x0c
+# define BFPCTRL_B0BFM BIT(0)
+# define BFPCTRL_B1BFM BIT(1)
+# define BFPCTRL_BFM(n) (BFPCTRL_B0BFM << (n))
+# define BFPCTRL_BFM_MASK GENMASK(1, 0)
+# define BFPCTRL_B0BFE BIT(2)
+# define BFPCTRL_B1BFE BIT(3)
+# define BFPCTRL_BFE(n) (BFPCTRL_B0BFE << (n))
+# define BFPCTRL_BFE_MASK GENMASK(3, 2)
+# define BFPCTRL_B0BFS BIT(4)
+# define BFPCTRL_B1BFS BIT(5)
+# define BFPCTRL_BFS(n) (BFPCTRL_B0BFS << (n))
+# define BFPCTRL_BFS_MASK GENMASK(5, 4)
+#define TXRTSCTRL 0x0d
+# define TXRTSCTRL_B0RTSM BIT(0)
+# define TXRTSCTRL_B1RTSM BIT(1)
+# define TXRTSCTRL_B2RTSM BIT(2)
+# define TXRTSCTRL_RTSM(n) (TXRTSCTRL_B0RTSM << (n))
+# define TXRTSCTRL_RTSM_MASK GENMASK(2, 0)
+# define TXRTSCTRL_B0RTS BIT(3)
+# define TXRTSCTRL_B1RTS BIT(4)
+# define TXRTSCTRL_B2RTS BIT(5)
+# define TXRTSCTRL_RTS(n) (TXRTSCTRL_B0RTS << (n))
+# define TXRTSCTRL_RTS_MASK GENMASK(5, 3)
#define CANSTAT 0x0e
#define CANCTRL 0x0f
# define CANCTRL_REQOP_MASK 0xe0
@@ -225,6 +253,10 @@ struct mcp251x_priv {
struct regulator *power;
struct regulator *transceiver;
struct clk *clk;
+#ifdef CONFIG_GPIOLIB
+ struct gpio_chip gpio;
+ u8 reg_bfpctrl;
+#endif
};
#define MCP251X_IS(_model) \
@@ -290,8 +322,12 @@ static u8 mcp251x_read_reg(struct spi_device *spi, u8 reg)
priv->spi_tx_buf[0] = INSTRUCTION_READ;
priv->spi_tx_buf[1] = reg;
- mcp251x_spi_trans(spi, 3);
- val = priv->spi_rx_buf[2];
+ if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) {
+ spi_write_then_read(spi, priv->spi_tx_buf, 2, &val, 1);
+ } else {
+ mcp251x_spi_trans(spi, 3);
+ val = priv->spi_rx_buf[2];
+ }
return val;
}
@@ -303,10 +339,18 @@ static void mcp251x_read_2regs(struct spi_device *spi, u8 reg, u8 *v1, u8 *v2)
priv->spi_tx_buf[0] = INSTRUCTION_READ;
priv->spi_tx_buf[1] = reg;
- mcp251x_spi_trans(spi, 4);
+ if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) {
+ u8 val[2] = { 0 };
- *v1 = priv->spi_rx_buf[2];
- *v2 = priv->spi_rx_buf[3];
+ spi_write_then_read(spi, priv->spi_tx_buf, 2, val, 2);
+ *v1 = val[0];
+ *v2 = val[1];
+ } else {
+ mcp251x_spi_trans(spi, 4);
+
+ *v1 = priv->spi_rx_buf[2];
+ *v2 = priv->spi_rx_buf[3];
+ }
}
static void mcp251x_write_reg(struct spi_device *spi, u8 reg, u8 val)
@@ -345,6 +389,222 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
mcp251x_spi_trans(spi, 4);
}
+static u8 mcp251x_read_stat(struct spi_device *spi)
+{
+ return mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK;
+}
+
+#define mcp251x_read_stat_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(mcp251x_read_stat, addr, val, cond, \
+ delay_us, timeout_us)
+
+#ifdef CONFIG_GPIOLIB
+enum {
+ MCP251X_GPIO_TX0RTS = 0, /* inputs */
+ MCP251X_GPIO_TX1RTS,
+ MCP251X_GPIO_TX2RTS,
+ MCP251X_GPIO_RX0BF, /* outputs */
+ MCP251X_GPIO_RX1BF,
+};
+
+#define MCP251X_GPIO_INPUT_MASK \
+ GENMASK(MCP251X_GPIO_TX2RTS, MCP251X_GPIO_TX0RTS)
+#define MCP251X_GPIO_OUTPUT_MASK \
+ GENMASK(MCP251X_GPIO_RX1BF, MCP251X_GPIO_RX0BF)
+
+static const char * const mcp251x_gpio_names[] = {
+ [MCP251X_GPIO_TX0RTS] = "TX0RTS", /* inputs */
+ [MCP251X_GPIO_TX1RTS] = "TX1RTS",
+ [MCP251X_GPIO_TX2RTS] = "TX2RTS",
+ [MCP251X_GPIO_RX0BF] = "RX0BF", /* outputs */
+ [MCP251X_GPIO_RX1BF] = "RX1BF",
+};
+
+static inline bool mcp251x_gpio_is_input(unsigned int offset)
+{
+ return offset <= MCP251X_GPIO_TX2RTS;
+}
+
+static int mcp251x_gpio_request(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ u8 val;
+
+ /* nothing to be done for inputs */
+ if (mcp251x_gpio_is_input(offset))
+ return 0;
+
+ val = BFPCTRL_BFE(offset - MCP251X_GPIO_RX0BF);
+
+ mutex_lock(&priv->mcp_lock);
+ mcp251x_write_bits(priv->spi, BFPCTRL, val, val);
+ mutex_unlock(&priv->mcp_lock);
+
+ priv->reg_bfpctrl |= val;
+
+ return 0;
+}
+
+static void mcp251x_gpio_free(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ u8 val;
+
+ /* nothing to be done for inputs */
+ if (mcp251x_gpio_is_input(offset))
+ return;
+
+ val = BFPCTRL_BFE(offset - MCP251X_GPIO_RX0BF);
+
+ mutex_lock(&priv->mcp_lock);
+ mcp251x_write_bits(priv->spi, BFPCTRL, val, 0);
+ mutex_unlock(&priv->mcp_lock);
+
+ priv->reg_bfpctrl &= ~val;
+}
+
+static int mcp251x_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ if (mcp251x_gpio_is_input(offset))
+ return GPIOF_DIR_IN;
+
+ return GPIOF_DIR_OUT;
+}
+
+static int mcp251x_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ u8 reg, mask, val;
+
+ if (mcp251x_gpio_is_input(offset)) {
+ reg = TXRTSCTRL;
+ mask = TXRTSCTRL_RTS(offset);
+ } else {
+ reg = BFPCTRL;
+ mask = BFPCTRL_BFS(offset - MCP251X_GPIO_RX0BF);
+ }
+
+ mutex_lock(&priv->mcp_lock);
+ val = mcp251x_read_reg(priv->spi, reg);
+ mutex_unlock(&priv->mcp_lock);
+
+ return !!(val & mask);
+}
+
+static int mcp251x_gpio_get_multiple(struct gpio_chip *chip,
+ unsigned long *maskp, unsigned long *bitsp)
+{
+ struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ unsigned long bits = 0;
+ u8 val;
+
+ mutex_lock(&priv->mcp_lock);
+ if (maskp[0] & MCP251X_GPIO_INPUT_MASK) {
+ val = mcp251x_read_reg(priv->spi, TXRTSCTRL);
+ val = FIELD_GET(TXRTSCTRL_RTS_MASK, val);
+ bits |= FIELD_PREP(MCP251X_GPIO_INPUT_MASK, val);
+ }
+ if (maskp[0] & MCP251X_GPIO_OUTPUT_MASK) {
+ val = mcp251x_read_reg(priv->spi, BFPCTRL);
+ val = FIELD_GET(BFPCTRL_BFS_MASK, val);
+ bits |= FIELD_PREP(MCP251X_GPIO_OUTPUT_MASK, val);
+ }
+ mutex_unlock(&priv->mcp_lock);
+
+ bitsp[0] = bits;
+ return 0;
+}
+
+static void mcp251x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ u8 mask, val;
+
+ mask = BFPCTRL_BFS(offset - MCP251X_GPIO_RX0BF);
+ val = value ? mask : 0;
+
+ mutex_lock(&priv->mcp_lock);
+ mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
+ mutex_unlock(&priv->mcp_lock);
+
+ priv->reg_bfpctrl &= ~mask;
+ priv->reg_bfpctrl |= val;
+}
+
+static void
+mcp251x_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *maskp, unsigned long *bitsp)
+{
+ struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ u8 mask, val;
+
+ mask = FIELD_GET(MCP251X_GPIO_OUTPUT_MASK, maskp[0]);
+ mask = FIELD_PREP(BFPCTRL_BFS_MASK, mask);
+
+ val = FIELD_GET(MCP251X_GPIO_OUTPUT_MASK, bitsp[0]);
+ val = FIELD_PREP(BFPCTRL_BFS_MASK, val);
+
+ if (!mask)
+ return;
+
+ mutex_lock(&priv->mcp_lock);
+ mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
+ mutex_unlock(&priv->mcp_lock);
+
+ priv->reg_bfpctrl &= ~mask;
+ priv->reg_bfpctrl |= val;
+}
+
+static void mcp251x_gpio_restore(struct spi_device *spi)
+{
+ struct mcp251x_priv *priv = spi_get_drvdata(spi);
+
+ mcp251x_write_reg(spi, BFPCTRL, priv->reg_bfpctrl);
+}
+
+static int mcp251x_gpio_setup(struct mcp251x_priv *priv)
+{
+ struct gpio_chip *gpio = &priv->gpio;
+
+ if (!device_property_present(&priv->spi->dev, "gpio-controller"))
+ return 0;
+
+ /* gpiochip handles TX[0..2]RTS and RX[0..1]BF */
+ gpio->label = priv->spi->modalias;
+ gpio->parent = &priv->spi->dev;
+ gpio->owner = THIS_MODULE;
+ gpio->request = mcp251x_gpio_request;
+ gpio->free = mcp251x_gpio_free;
+ gpio->get_direction = mcp251x_gpio_get_direction;
+ gpio->get = mcp251x_gpio_get;
+ gpio->get_multiple = mcp251x_gpio_get_multiple;
+ gpio->set = mcp251x_gpio_set;
+ gpio->set_multiple = mcp251x_gpio_set_multiple;
+ gpio->base = -1;
+ gpio->ngpio = ARRAY_SIZE(mcp251x_gpio_names);
+ gpio->names = mcp251x_gpio_names;
+ gpio->can_sleep = true;
+#ifdef CONFIG_OF_GPIO
+ gpio->of_node = priv->spi->dev.of_node;
+#endif
+
+ return devm_gpiochip_add_data(&priv->spi->dev, gpio, priv);
+}
+#else
+static inline void mcp251x_gpio_restore(struct spi_device *spi)
+{
+}
+
+static inline int mcp251x_gpio_setup(struct mcp251x_priv *priv)
+{
+ return 0;
+}
+#endif
+
static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
int len, int tx_buf_idx)
{
@@ -409,8 +669,16 @@ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
} else {
priv->spi_tx_buf[RXBCTRL_OFF] = INSTRUCTION_READ_RXB(buf_idx);
- mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN);
- memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN);
+ if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) {
+ spi_write_then_read(spi, priv->spi_tx_buf, 1,
+ priv->spi_rx_buf,
+ SPI_TRANSFER_BUF_LEN);
+ memcpy(buf + 1, priv->spi_rx_buf,
+ SPI_TRANSFER_BUF_LEN - 1);
+ } else {
+ mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN);
+ memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN);
+ }
}
}
@@ -471,7 +739,8 @@ static void mcp251x_hw_sleep(struct spi_device *spi)
/* May only be called when device is sleeping! */
static int mcp251x_hw_wake(struct spi_device *spi)
{
- unsigned long timeout;
+ u8 value;
+ int ret;
/* Force wakeup interrupt to wake device, but don't execute IST */
disable_irq(spi->irq);
@@ -484,14 +753,12 @@ static int mcp251x_hw_wake(struct spi_device *spi)
mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_CONF);
/* Wait for the device to enter config mode */
- timeout = jiffies + HZ;
- while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) !=
- CANCTRL_REQOP_CONF) {
- schedule();
- if (time_after(jiffies, timeout)) {
- dev_err(&spi->dev, "MCP251x didn't enter in config mode\n");
- return -EBUSY;
- }
+ ret = mcp251x_read_stat_poll_timeout(spi, value, value == CANCTRL_REQOP_CONF,
+ MCP251X_OST_DELAY_MS * 1000,
+ USEC_PER_SEC);
+ if (ret) {
+ dev_err(&spi->dev, "MCP251x didn't enter in config mode\n");
+ return ret;
}
/* Disable and clear pending interrupts */
@@ -546,7 +813,8 @@ static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
static int mcp251x_set_normal_mode(struct spi_device *spi)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
- unsigned long timeout;
+ u8 value;
+ int ret;
/* Enable interrupts */
mcp251x_write_reg(spi, CANINTE,
@@ -564,13 +832,12 @@ static int mcp251x_set_normal_mode(struct spi_device *spi)
mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL);
/* Wait for the device to enter normal mode */
- timeout = jiffies + HZ;
- while (mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) {
- schedule();
- if (time_after(jiffies, timeout)) {
- dev_err(&spi->dev, "MCP251x didn't enter in normal mode\n");
- return -EBUSY;
- }
+ ret = mcp251x_read_stat_poll_timeout(spi, value, value == 0,
+ MCP251X_OST_DELAY_MS * 1000,
+ USEC_PER_SEC);
+ if (ret) {
+ dev_err(&spi->dev, "MCP251x didn't enter in normal mode\n");
+ return ret;
}
}
priv->can.state = CAN_STATE_ERROR_ACTIVE;
@@ -614,7 +881,7 @@ static int mcp251x_setup(struct net_device *net, struct spi_device *spi)
static int mcp251x_hw_reset(struct spi_device *spi)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
- unsigned long timeout;
+ u8 value;
int ret;
/* Wait for oscillator startup timer after power up */
@@ -629,19 +896,12 @@ static int mcp251x_hw_reset(struct spi_device *spi)
mdelay(MCP251X_OST_DELAY_MS);
/* Wait for reset to finish */
- timeout = jiffies + HZ;
- while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) !=
- CANCTRL_REQOP_CONF) {
- usleep_range(MCP251X_OST_DELAY_MS * 1000,
- MCP251X_OST_DELAY_MS * 1000 * 2);
-
- if (time_after(jiffies, timeout)) {
- dev_err(&spi->dev,
- "MCP251x didn't enter in conf mode after reset\n");
- return -EBUSY;
- }
- }
- return 0;
+ ret = mcp251x_read_stat_poll_timeout(spi, value, value == CANCTRL_REQOP_CONF,
+ MCP251X_OST_DELAY_MS * 1000,
+ USEC_PER_SEC);
+ if (ret)
+ dev_err(&spi->dev, "MCP251x didn't enter in conf mode after reset\n");
+ return ret;
}
static int mcp251x_hw_probe(struct spi_device *spi)
@@ -761,6 +1021,7 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
if (priv->after_suspend & AFTER_SUSPEND_POWER) {
mcp251x_hw_reset(spi);
mcp251x_setup(net, spi);
+ mcp251x_gpio_restore(spi);
} else {
mcp251x_hw_wake(spi);
}
@@ -1136,6 +1397,10 @@ static int mcp251x_can_probe(struct spi_device *spi)
devm_can_led_init(net);
+ ret = mcp251x_gpio_setup(priv);
+ if (ret)
+ goto error_probe;
+
netdev_info(net, "MCP%x successfully initialized.\n", priv->model);
return 0;
diff --git a/drivers/net/can/spi/mcp25xxfd/Kconfig b/drivers/net/can/spi/mcp25xxfd/Kconfig
new file mode 100644
index 000000000000..9eb596019a58
--- /dev/null
+++ b/drivers/net/can/spi/mcp25xxfd/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config CAN_MCP25XXFD
+ tristate "Microchip MCP25xxFD SPI CAN controllers"
+ select REGMAP
+ help
+ Driver for the Microchip MCP25XXFD SPI FD-CAN controller
+ family.
+
+config CAN_MCP25XXFD_SANITY
+ depends on CAN_MCP25XXFD
+ bool "Additional Sanity Checks"
+ help
+ This option enables additional sanity checks in the driver,
+ that compares various internal counters with the in chip
+ variants. This comes with a runtime overhead.
+ Disable if unsure.
diff --git a/drivers/net/can/spi/mcp25xxfd/Makefile b/drivers/net/can/spi/mcp25xxfd/Makefile
new file mode 100644
index 000000000000..4e17f592e22e
--- /dev/null
+++ b/drivers/net/can/spi/mcp25xxfd/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_CAN_MCP25XXFD) += mcp25xxfd.o
+
+mcp25xxfd-objs :=
+mcp25xxfd-objs += mcp25xxfd-core.o
+mcp25xxfd-objs += mcp25xxfd-crc16.o
+mcp25xxfd-objs += mcp25xxfd-regmap.o
diff --git a/drivers/net/can/spi/mcp25xxfd/mcp25xxfd-core.c b/drivers/net/can/spi/mcp25xxfd/mcp25xxfd-core.c
new file mode 100644
index 000000000000..bd2ba981ae36
--- /dev/null
+++ b/drivers/net/can/spi/mcp25xxfd/mcp25xxfd-core.c
@@ -0,0 +1,2911 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp25xxfd - Microchip MCP25xxFD Family CAN controller driver
+//
+// Copyright (c) 2019, 2020 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
+//
+// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+//
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+
+#include <asm/unaligned.h>
+
+#include "mcp25xxfd.h"
+
+#define DEVICE_NAME "mcp25xxfd"
+
+static const struct mcp25xxfd_devtype_data mcp25xxfd_devtype_data_mcp2517fd = {
+ .quirks = MCP25XXFD_QUIRK_MAB_NO_WARN | MCP25XXFD_QUIRK_CRC_REG |
+ MCP25XXFD_QUIRK_CRC_RX | MCP25XXFD_QUIRK_CRC_TX |
+ MCP25XXFD_QUIRK_ECC,
+ .model = MCP25XXFD_MODEL_MCP2517FD,
+};
+
+static const struct mcp25xxfd_devtype_data mcp25xxfd_devtype_data_mcp2518fd = {
+ .quirks = MCP25XXFD_QUIRK_CRC_REG | MCP25XXFD_QUIRK_CRC_RX |
+ MCP25XXFD_QUIRK_CRC_TX | MCP25XXFD_QUIRK_ECC,
+ .model = MCP25XXFD_MODEL_MCP2518FD,
+};
+
+/* Autodetect model, start with CRC enabled. */
+static const struct mcp25xxfd_devtype_data mcp25xxfd_devtype_data_mcp25xxfd = {
+ .quirks = MCP25XXFD_QUIRK_CRC_REG | MCP25XXFD_QUIRK_CRC_RX |
+ MCP25XXFD_QUIRK_CRC_TX | MCP25XXFD_QUIRK_ECC,
+ .model = MCP25XXFD_MODEL_MCP25XXFD,
+};
+
+static const struct can_bittiming_const mcp25xxfd_bittiming_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 256,
+ .tseg2_min = 1,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const mcp25xxfd_data_bittiming_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 32,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+static const char *__mcp25xxfd_get_model_str(enum mcp25xxfd_model model)
+{
+ switch (model) {
+ case MCP25XXFD_MODEL_MCP2517FD:
+ return "MCP2517FD"; break;
+ case MCP25XXFD_MODEL_MCP2518FD:
+ return "MCP2518FD"; break;
+ case MCP25XXFD_MODEL_MCP25XXFD:
+ return "MCP25xxFD"; break;
+ }
+
+ return "<unknown>";
+}
+
+static inline const char *
+mcp25xxfd_get_model_str(const struct mcp25xxfd_priv *priv)
+{
+ return __mcp25xxfd_get_model_str(priv->devtype_data.model);
+}
+
+static const char *mcp25xxfd_get_mode_str(const u8 mode)
+{
+ switch (mode) {
+ case MCP25XXFD_REG_CON_MODE_MIXED:
+ return "Mixed (CAN FD/CAN 2.0)"; break;
+ case MCP25XXFD_REG_CON_MODE_SLEEP:
+ return "Sleep"; break;
+ case MCP25XXFD_REG_CON_MODE_INT_LOOPBACK:
+ return "Internal Loopback"; break;
+ case MCP25XXFD_REG_CON_MODE_LISTENONLY:
+ return "Listen Only"; break;
+ case MCP25XXFD_REG_CON_MODE_CONFIG:
+ return "Configuration"; break;
+ case MCP25XXFD_REG_CON_MODE_EXT_LOOPBACK:
+ return "External Loopback"; break;
+ case MCP25XXFD_REG_CON_MODE_CAN2_0:
+ return "CAN 2.0"; break;
+ case MCP25XXFD_REG_CON_MODE_RESTRICTED:
+ return "Restricted Operation"; break;
+ }
+
+ return "<unknown>";
+}
+
+static inline int mcp25xxfd_vdd_enable(const struct mcp25xxfd_priv *priv)
+{
+ if (!priv->reg_vdd)
+ return 0;
+
+ return regulator_enable(priv->reg_vdd);
+}
+
+static inline int mcp25xxfd_vdd_disable(const struct mcp25xxfd_priv *priv)
+{
+ if (!priv->reg_vdd)
+ return 0;
+
+ return regulator_disable(priv->reg_vdd);
+}
+
+static inline int
+mcp25xxfd_transceiver_enable(const struct mcp25xxfd_priv *priv)
+{
+ if (!priv->reg_xceiver)
+ return 0;
+
+ return regulator_enable(priv->reg_xceiver);
+}
+
+static inline int
+mcp25xxfd_transceiver_disable(const struct mcp25xxfd_priv *priv)
+{
+ if (!priv->reg_xceiver)
+ return 0;
+
+ return regulator_disable(priv->reg_xceiver);
+}
+
+static int mcp25xxfd_clks_and_vdd_enable(const struct mcp25xxfd_priv *priv)
+{
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
+
+ err = mcp25xxfd_vdd_enable(priv);
+ if (err)
+ clk_disable_unprepare(priv->clk);
+
+ /* Wait for oscillator stabilisation time after power up */
+ usleep_range(MCP25XXFD_OSC_STAB_SLEEP_US,
+ 2 * MCP25XXFD_OSC_STAB_SLEEP_US);
+
+ return err;
+}
+
+static int mcp25xxfd_clks_and_vdd_disable(const struct mcp25xxfd_priv *priv)
+{
+ int err;
+
+ err = mcp25xxfd_vdd_disable(priv);
+ if (err)
+ return err;
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static inline u8
+mcp25xxfd_cmd_prepare_write_reg(const struct mcp25xxfd_priv *priv,
+ union mcp25xxfd_write_reg_buf *write_reg_buf,
+ const u16 reg, const u32 mask, const u32 val)
+{
+ u8 first_byte, last_byte, len;
+ u8 *data;
+ __le32 val_le32;
+
+ first_byte = mcp25xxfd_first_byte_set(mask);
+ last_byte = mcp25xxfd_last_byte_set(mask);
+ len = last_byte - first_byte + 1;
+
+ data = mcp25xxfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte);
+ val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
+ memcpy(data, &val_le32, len);
+
+ if (priv->devtype_data.quirks & MCP25XXFD_QUIRK_CRC_REG) {
+ u16 crc;
+
+ mcp25xxfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
+ len);
+ /* CRC */
+ len += sizeof(write_reg_buf->crc.cmd);
+ crc = mcp25xxfd_crc16_compute(&write_reg_buf->crc, len);
+ put_unaligned_be16(crc, (void *)write_reg_buf + len);
+
+ /* Total length */
+ len += sizeof(write_reg_buf->crc.crc);
+ } else {
+ len += sizeof(write_reg_buf->nocrc.cmd);
+ }
+
+ return len;
+}
+
+static inline int
+mcp25xxfd_tef_tail_get_from_chip(const struct mcp25xxfd_priv *priv,
+ u8 *tef_tail)
+{
+ u32 tef_ua;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP25XXFD_REG_TEFUA, &tef_ua);
+ if (err)
+ return err;
+
+ *tef_tail = tef_ua / sizeof(struct mcp25xxfd_hw_tef_obj);
+
+ return 0;
+}
+
+static inline int
+mcp25xxfd_tx_tail_get_from_chip(const struct mcp25xxfd_priv *priv,
+ u8 *tx_tail)
+{
+ u32 fifo_sta;
+ int err;
+
+ err = regmap_read(priv->map_reg,
+ MCP25XXFD_REG_FIFOSTA(MCP25XXFD_TX_FIFO),
+ &fifo_sta);
+ if (err)
+ return err;
+
+ *tx_tail = FIELD_GET(MCP25XXFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+
+ return 0;
+}
+
+static inline int
+mcp25xxfd_rx_head_get_from_chip(const struct mcp25xxfd_priv *priv,
+ const struct mcp25xxfd_rx_ring *ring,
+ u8 *rx_head)
+{
+ u32 fifo_sta;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP25XXFD_REG_FIFOSTA(ring->fifo_nr),
+ &fifo_sta);
+ if (err)
+ return err;
+
+ *rx_head = FIELD_GET(MCP25XXFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+
+ return 0;
+}
+
+static inline int
+mcp25xxfd_rx_tail_get_from_chip(const struct mcp25xxfd_priv *priv,
+ const struct mcp25xxfd_rx_ring *ring,
+ u8 *rx_tail)
+{
+ u32 fifo_ua;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP25XXFD_REG_FIFOUA(ring->fifo_nr),
+ &fifo_ua);
+ if (err)
+ return err;
+
+ fifo_ua -= ring->base - MCP25XXFD_RAM_START;
+ *rx_tail = fifo_ua / ring->obj_size;
+
+ return 0;
+}
+
+static void
+mcp25xxfd_tx_ring_init_tx_obj(const struct mcp25xxfd_priv *priv,
+ const struct mcp25xxfd_tx_ring *ring,
+ struct mcp25xxfd_tx_obj *tx_obj,
+ const u8 rts_buf_len,
+ const u8 n)
+{
+ struct spi_transfer *xfer;
+ u16 addr;
+
+ /* FIFO load */
+ addr = mcp25xxfd_get_tx_obj_addr(ring, n);
+ if (priv->devtype_data.quirks & MCP25XXFD_QUIRK_CRC_TX)
+ mcp25xxfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
+ addr);
+ else
+ mcp25xxfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
+ addr);
+
+ xfer = &tx_obj->xfer[0];
+ xfer->tx_buf = &tx_obj->buf;
+ xfer->len = 0; /* actual len is assigned on the fly */
+ xfer->cs_change = 1;
+ xfer->cs_change_delay.value = 0;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ /* FIFO request to send */
+ xfer = &tx_obj->xfer[1];
+ xfer->tx_buf = &ring->rts_buf;
+ xfer->len = rts_buf_len;
+
+ /* SPI message */
+ spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
+ ARRAY_SIZE(tx_obj->xfer));
+}
+
+static void mcp25xxfd_ring_init(struct mcp25xxfd_priv *priv)
+{
+ struct mcp25xxfd_tx_ring *tx_ring;
+ struct mcp25xxfd_rx_ring *rx_ring, *prev_rx_ring = NULL;
+ struct mcp25xxfd_tx_obj *tx_obj;
+ u32 val;
+ u16 addr;
+ u8 len;
+ int i;
+
+ /* TEF */
+ priv->tef.head = 0;
+ priv->tef.tail = 0;
+
+ /* TX */
+ tx_ring = priv->tx;
+ tx_ring->head = 0;
+ tx_ring->tail = 0;
+ tx_ring->base = mcp25xxfd_get_tef_obj_addr(tx_ring->obj_num);
+
+ /* FIFO request to send */
+ addr = MCP25XXFD_REG_FIFOCON(MCP25XXFD_TX_FIFO);
+ val = MCP25XXFD_REG_FIFOCON_TXREQ | MCP25XXFD_REG_FIFOCON_UINC;
+ len = mcp25xxfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
+ addr, val, val);
+
+ mcp25xxfd_for_each_tx_obj(tx_ring, tx_obj, i)
+ mcp25xxfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
+
+ /* RX */
+ mcp25xxfd_for_each_rx_ring(priv, rx_ring, i) {
+ rx_ring->head = 0;
+ rx_ring->tail = 0;
+ rx_ring->nr = i;
+ rx_ring->fifo_nr = MCP25XXFD_RX_FIFO(i);
+
+ if (!prev_rx_ring)
+ rx_ring->base =
+ mcp25xxfd_get_tx_obj_addr(tx_ring,
+ tx_ring->obj_num);
+ else
+ rx_ring->base = prev_rx_ring->base +
+ prev_rx_ring->obj_size *
+ prev_rx_ring->obj_num;
+
+ prev_rx_ring = rx_ring;
+ }
+}
+
+static void mcp25xxfd_ring_free(struct mcp25xxfd_priv *priv)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(priv->rx) - 1; i > 0; i--) {
+ kfree(priv->rx[i]);
+ priv->rx[i] = NULL;
+ }
+}
+
+static int mcp25xxfd_ring_alloc(struct mcp25xxfd_priv *priv)
+{
+ struct mcp25xxfd_tx_ring *tx_ring;
+ struct mcp25xxfd_rx_ring *rx_ring;
+ int tef_obj_size, tx_obj_size, rx_obj_size;
+ int tx_obj_num;
+ int ram_free, i;
+
+ tef_obj_size = sizeof(struct mcp25xxfd_hw_tef_obj);
+ /* listen-only mode works like FD mode */
+ if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) {
+ tx_obj_num = MCP25XXFD_TX_OBJ_NUM_CANFD;
+ tx_obj_size = sizeof(struct mcp25xxfd_hw_tx_obj_canfd);
+ rx_obj_size = sizeof(struct mcp25xxfd_hw_rx_obj_canfd);
+ } else {
+ tx_obj_num = MCP25XXFD_TX_OBJ_NUM_CAN;
+ tx_obj_size = sizeof(struct mcp25xxfd_hw_tx_obj_can);
+ rx_obj_size = sizeof(struct mcp25xxfd_hw_rx_obj_can);
+ }
+
+ tx_ring = priv->tx;
+ tx_ring->obj_num = tx_obj_num;
+ tx_ring->obj_size = tx_obj_size;
+
+ ram_free = MCP25XXFD_RAM_SIZE - tx_obj_num *
+ (tef_obj_size + tx_obj_size);
+
+ for (i = 0;
+ i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size;
+ i++) {
+ int rx_obj_num;
+
+ rx_obj_num = ram_free / rx_obj_size;
+ rx_obj_num = min(1 << (fls(rx_obj_num) - 1), 32);
+
+ rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
+ GFP_KERNEL);
+ if (!rx_ring) {
+ mcp25xxfd_ring_free(priv);
+ return -ENOMEM;
+ }
+ rx_ring->obj_num = rx_obj_num;
+ rx_ring->obj_size = rx_obj_size;
+ priv->rx[i] = rx_ring;
+
+ ram_free -= rx_ring->obj_num * rx_ring->obj_size;
+ }
+ priv->rx_ring_num = i;
+
+ netdev_dbg(priv->ndev,
+ "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n",
+ tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num,
+ tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num);
+
+ mcp25xxfd_for_each_rx_ring(priv, rx_ring, i) {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n",
+ i, rx_ring->obj_num, rx_ring->obj_size,
+ rx_ring->obj_size * rx_ring->obj_num);
+ }
+
+ netdev_dbg(priv->ndev,
+ "FIFO setup: free: %d bytes\n",
+ ram_free);
+
+ return 0;
+}
+
+static inline int
+mcp25xxfd_chip_get_mode(const struct mcp25xxfd_priv *priv, u8 *mode)
+{
+ u32 val;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP25XXFD_REG_CON, &val);
+ if (err)
+ return err;
+
+ *mode = FIELD_GET(MCP25XXFD_REG_CON_OPMOD_MASK, val);
+
+ return 0;
+}
+
+static int
+__mcp25xxfd_chip_set_mode(const struct mcp25xxfd_priv *priv,
+ const u8 mode_req, bool nowait)
+{
+ u32 con, con_reqop;
+ int err;
+
+ con_reqop = FIELD_PREP(MCP25XXFD_REG_CON_REQOP_MASK, mode_req);
+ err = regmap_update_bits(priv->map_reg, MCP25XXFD_REG_CON,
+ MCP25XXFD_REG_CON_REQOP_MASK, con_reqop);
+ if (err)
+ return err;
+
+ if (mode_req == MCP25XXFD_REG_CON_MODE_SLEEP || nowait)
+ return 0;
+
+ err = regmap_read_poll_timeout(priv->map_reg, MCP25XXFD_REG_CON, con,
+ FIELD_GET(MCP25XXFD_REG_CON_OPMOD_MASK,
+ con) == mode_req,
+ MCP25XXFD_POLL_SLEEP_US,
+ MCP25XXFD_POLL_TIMEOUT_US);
+ if (err) {
+ u8 mode = FIELD_GET(MCP25XXFD_REG_CON_OPMOD_MASK, con);
+
+ netdev_err(priv->ndev,
+ "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u).\n",
+ mcp25xxfd_get_mode_str(mode_req), mode_req,
+ mcp25xxfd_get_mode_str(mode), mode);
+ return err;
+ }
+
+ return 0;
+}
+
+static inline int
+mcp25xxfd_chip_set_mode(const struct mcp25xxfd_priv *priv,
+ const u8 mode_req)
+{
+ return __mcp25xxfd_chip_set_mode(priv, mode_req, false);
+}
+
+static inline int
+mcp25xxfd_chip_set_mode_nowait(const struct mcp25xxfd_priv *priv,
+ const u8 mode_req)
+{
+ return __mcp25xxfd_chip_set_mode(priv, mode_req, true);
+}
+
+static inline bool mcp25xxfd_osc_invalid(u32 reg)
+{
+ return reg == 0x0 || reg == 0xffffffff;
+}
+
+static int mcp25xxfd_chip_clock_enable(const struct mcp25xxfd_priv *priv)
+{
+ u32 osc, osc_reference, osc_mask;
+ int err;
+
+ /* Set Power On Defaults for "Clock Output Divisor" and remove
+ * "Oscillator Disable" bit.
+ */
+ osc = FIELD_PREP(MCP25XXFD_REG_OSC_CLKODIV_MASK,
+ MCP25XXFD_REG_OSC_CLKODIV_10);
+ osc_reference = MCP25XXFD_REG_OSC_OSCRDY;
+ osc_mask = MCP25XXFD_REG_OSC_OSCRDY | MCP25XXFD_REG_OSC_PLLRDY;
+
+ /* Note:
+ *
+ * If the controller is in Sleep Mode the following write only
+ * removes the "Oscillator Disable" bit and powers it up. All
+ * other bits are unaffected.
+ */
+ err = regmap_write(priv->map_reg, MCP25XXFD_REG_OSC, osc);
+ if (err)
+ return err;
+
+ /* Wait for "Oscillator Ready" bit */
+ err = regmap_read_poll_timeout(priv->map_reg, MCP25XXFD_REG_OSC, osc,
+ (osc & osc_mask) == osc_reference,
+ MCP25XXFD_OSC_STAB_SLEEP_US,
+ MCP25XXFD_OSC_STAB_TIMEOUT_US);
+ if (mcp25xxfd_osc_invalid(osc)) {
+ netdev_err(priv->ndev,
+ "Failed to detect %s (osc=0x%08x).\n",
+ mcp25xxfd_get_model_str(priv), osc);
+ return -ENODEV;
+ } else if (err == -ETIMEDOUT) {
+ netdev_err(priv->ndev,
+ "Timeout waiting for Oscillator Ready (osc=0x%08x, osc_reference=0x%08x)\n",
+ osc, osc_reference);
+ return -ETIMEDOUT;
+ } else if (err) {
+ return err;
+ }
+
+ return 0;
+}
+
+static int mcp25xxfd_chip_softreset_do(const struct mcp25xxfd_priv *priv)
+{
+ const __be16 cmd = mcp25xxfd_cmd_reset();
+ int err;
+
+ /* The Set Mode and SPI Reset command only seems to works if
+ * the controller is not in Sleep Mode.
+ */
+ err = mcp25xxfd_chip_clock_enable(priv);
+ if (err)
+ return err;
+
+ err = mcp25xxfd_chip_set_mode(priv, MCP25XXFD_REG_CON_MODE_CONFIG);
+ if (err)
+ return err;
+
+ /* spi_write_then_read() works with non DMA-safe buffers */
+ return spi_write_then_read(priv->spi, &cmd, sizeof(cmd), NULL, 0);
+}
+
+static int mcp25xxfd_chip_softreset_check(const struct mcp25xxfd_priv *priv)
+{
+ u32 osc, osc_reference;
+ u8 mode;
+ int err;
+
+ err = mcp25xxfd_chip_get_mode(priv, &mode);
+ if (err)
+ return err;
+
+ if (mode != MCP25XXFD_REG_CON_MODE_CONFIG) {
+ netdev_info(priv->ndev,
+ "Controller not in Config Mode after reset, but in %s Mode (%u).\n",
+ mcp25xxfd_get_mode_str(mode), mode);
+ return -ETIMEDOUT;
+ }
+
+ osc_reference = MCP25XXFD_REG_OSC_OSCRDY |
+ FIELD_PREP(MCP25XXFD_REG_OSC_CLKODIV_MASK,
+ MCP25XXFD_REG_OSC_CLKODIV_10);
+
+ /* check reset defaults of OSC reg */
+ err = regmap_read(priv->map_reg, MCP25XXFD_REG_OSC, &osc);
+ if (err)
+ return err;
+
+ if (osc != osc_reference) {
+ netdev_info(priv->ndev,
+ "Controller failed to reset. osc=0x%08x, reference value=0x%08x\n",
+ osc, osc_reference);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int mcp25xxfd_chip_softreset(const struct mcp25xxfd_priv *priv)
+{
+ int err, i;
+
+ for (i = 0; i < MCP25XXFD_SOFTRESET_RETRIES_MAX; i++) {
+ if (i)
+ netdev_info(priv->ndev,
+ "Retrying to reset Controller.\n");
+
+ err = mcp25xxfd_chip_softreset_do(priv);
+ if (err == -ETIMEDOUT)
+ continue;
+ if (err)
+ return err;
+
+ err = mcp25xxfd_chip_softreset_check(priv);
+ if (err == -ETIMEDOUT)
+ continue;
+ if (err)
+ return err;
+
+ return 0;
+ }
+
+ if (err)
+ return err;
+
+ return -ETIMEDOUT;
+}
+
+static int mcp25xxfd_chip_clock_init(const struct mcp25xxfd_priv *priv)
+{
+ u32 osc;
+ int err;
+
+ /* Activate Low Power Mode on Oscillator Disable. This only
+ * works on the MCP2518FD. The MCP2517FD will go into normal
+ * Sleep Mode instead.
+ */
+ osc = MCP25XXFD_REG_OSC_LPMEN |
+ FIELD_PREP(MCP25XXFD_REG_OSC_CLKODIV_MASK,
+ MCP25XXFD_REG_OSC_CLKODIV_10);
+ err = regmap_write(priv->map_reg, MCP25XXFD_REG_OSC, osc);
+ if (err)
+ return err;
+
+ /* Set Time Base Counter Prescaler to 1.
+ *
+ * This means an overflow of the 32 bit Time Base Counter
+ * register at 40 MHz every 107 seconds.
+ */
+ return regmap_write(priv->map_reg, MCP25XXFD_REG_TSCON,
+ MCP25XXFD_REG_TSCON_TBCEN);
+}
+
+static int mcp25xxfd_set_bittiming(const struct mcp25xxfd_priv *priv)
+{
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ u32 val = 0;
+ s8 tdco;
+ int err;
+
+ /* CAN Control Register
+ *
+ * - no transmit bandwidth sharing
+ * - config mode
+ * - disable transmit queue
+ * - store in transmit FIFO event
+ * - transition to restricted operation mode on system error
+ * - ESI is transmitted recessive when ESI of message is high or
+ * CAN controller error passive
+ * - restricted retransmission attempts,
+ * use TQXCON_TXAT and FIFOCON_TXAT
+ * - wake-up filter bits T11FILTER
+ * - use CAN bus line filter for wakeup
+ * - protocol exception is treated as a form error
+ * - Do not compare data bytes
+ */
+ val = FIELD_PREP(MCP25XXFD_REG_CON_REQOP_MASK,
+ MCP25XXFD_REG_CON_MODE_CONFIG) |
+ MCP25XXFD_REG_CON_STEF |
+ MCP25XXFD_REG_CON_ESIGM |
+ MCP25XXFD_REG_CON_RTXAT |
+ FIELD_PREP(MCP25XXFD_REG_CON_WFT_MASK,
+ MCP25XXFD_REG_CON_WFT_T11FILTER) |
+ MCP25XXFD_REG_CON_WAKFIL |
+ MCP25XXFD_REG_CON_PXEDIS;
+
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO))
+ val |= MCP25XXFD_REG_CON_ISOCRCEN;
+
+ err = regmap_write(priv->map_reg, MCP25XXFD_REG_CON, val);
+ if (err)
+ return err;
+
+ /* Nominal Bit Time */
+ val = FIELD_PREP(MCP25XXFD_REG_NBTCFG_BRP_MASK, bt->brp - 1) |
+ FIELD_PREP(MCP25XXFD_REG_NBTCFG_TSEG1_MASK,
+ bt->prop_seg + bt->phase_seg1 - 1) |
+ FIELD_PREP(MCP25XXFD_REG_NBTCFG_TSEG2_MASK,
+ bt->phase_seg2 - 1) |
+ FIELD_PREP(MCP25XXFD_REG_NBTCFG_SJW_MASK, bt->sjw - 1);
+
+ err = regmap_write(priv->map_reg, MCP25XXFD_REG_NBTCFG, val);
+ if (err)
+ return err;
+
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
+ return 0;
+
+ /* Data Bit Time */
+ val = FIELD_PREP(MCP25XXFD_REG_DBTCFG_BRP_MASK, dbt->brp - 1) |
+ FIELD_PREP(MCP25XXFD_REG_DBTCFG_TSEG1_MASK,
+ dbt->prop_seg + dbt->phase_seg1 - 1) |
+ FIELD_PREP(MCP25XXFD_REG_DBTCFG_TSEG2_MASK,
+ dbt->phase_seg2 - 1) |
+ FIELD_PREP(MCP25XXFD_REG_DBTCFG_SJW_MASK, dbt->sjw - 1);
+
+ err = regmap_write(priv->map_reg, MCP25XXFD_REG_DBTCFG, val);
+ if (err)
+ return err;
+
+ /* Transmitter Delay Compensation */
+ tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1),
+ -64, 63);
+ val = FIELD_PREP(MCP25XXFD_REG_TDC_TDCMOD_MASK,
+ MCP25XXFD_REG_TDC_TDCMOD_AUTO) |
+ FIELD_PREP(MCP25XXFD_REG_TDC_TDCO_MASK, tdco);
+
+ return regmap_write(priv->map_reg, MCP25XXFD_REG_TDC, val);
+}
+
+static int mcp25xxfd_chip_rx_int_enable(const struct mcp25xxfd_priv *priv)
+{
+ u32 val;
+
+ if (!priv->rx_int)
+ return 0;
+
+ /* Configure GPIOs:
+ * - PIN0: GPIO Input
+ * - PIN1: GPIO Input/RX Interrupt
+ *
+ * PIN1 must be Input, otherwise there is a glitch on the
+ * rx-INT line. It happens between setting the PIN as output
+ * (in the first byte of the SPI transfer) and configuring the
+ * PIN as interrupt (in the last byte of the SPI transfer).
+ */
+ val = MCP25XXFD_REG_IOCON_PM0 | MCP25XXFD_REG_IOCON_TRIS1 |
+ MCP25XXFD_REG_IOCON_TRIS0;
+ return regmap_write(priv->map_reg, MCP25XXFD_REG_IOCON, val);
+}
+
+static int mcp25xxfd_chip_rx_int_disable(const struct mcp25xxfd_priv *priv)
+{
+ u32 val;
+
+ if (!priv->rx_int)
+ return 0;
+
+ /* Configure GPIOs:
+ * - PIN0: GPIO Input
+ * - PIN1: GPIO Input
+ */
+ val = MCP25XXFD_REG_IOCON_PM1 | MCP25XXFD_REG_IOCON_PM0 |
+ MCP25XXFD_REG_IOCON_TRIS1 | MCP25XXFD_REG_IOCON_TRIS0;
+ return regmap_write(priv->map_reg, MCP25XXFD_REG_IOCON, val);
+}
+
+static int
+mcp25xxfd_chip_rx_fifo_init_one(const struct mcp25xxfd_priv *priv,
+ const struct mcp25xxfd_rx_ring *ring)
+{
+ u32 fifo_con;
+
+ /* Enable RXOVIE on _all_ RX FIFOs, not just the last one.
+ *
+ * FIFOs hit by a RX MAB overflow and RXOVIE enabled will
+ * generate a RXOVIF, use this to properly detect RX MAB
+ * overflows.
+ */
+ fifo_con = FIELD_PREP(MCP25XXFD_REG_FIFOCON_FSIZE_MASK,
+ ring->obj_num - 1) |
+ MCP25XXFD_REG_FIFOCON_RXTSEN |
+ MCP25XXFD_REG_FIFOCON_RXOVIE |
+ MCP25XXFD_REG_FIFOCON_TFNRFNIE;
+
+ if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
+ fifo_con |= FIELD_PREP(MCP25XXFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP25XXFD_REG_FIFOCON_PLSIZE_64);
+ else
+ fifo_con |= FIELD_PREP(MCP25XXFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP25XXFD_REG_FIFOCON_PLSIZE_8);
+
+ return regmap_write(priv->map_reg,
+ MCP25XXFD_REG_FIFOCON(ring->fifo_nr), fifo_con);
+}
+
+static int
+mcp25xxfd_chip_rx_filter_init_one(const struct mcp25xxfd_priv *priv,
+ const struct mcp25xxfd_rx_ring *ring)
+{
+ u32 fltcon;
+
+ fltcon = MCP25XXFD_REG_FLTCON_FLTEN(ring->nr) |
+ MCP25XXFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr);
+
+ return regmap_update_bits(priv->map_reg,
+ MCP25XXFD_REG_FLTCON(ring->nr >> 2),
+ MCP25XXFD_REG_FLTCON_FLT_MASK(ring->nr),
+ fltcon);
+}
+
+static int mcp25xxfd_chip_fifo_init(const struct mcp25xxfd_priv *priv)
+{
+ const struct mcp25xxfd_tx_ring *tx_ring = priv->tx;
+ const struct mcp25xxfd_rx_ring *rx_ring;
+ u32 val;
+ int err, n;
+
+ /* TEF */
+ val = FIELD_PREP(MCP25XXFD_REG_TEFCON_FSIZE_MASK,
+ tx_ring->obj_num - 1) |
+ MCP25XXFD_REG_TEFCON_TEFTSEN |
+ MCP25XXFD_REG_TEFCON_TEFOVIE |
+ MCP25XXFD_REG_TEFCON_TEFNEIE;
+
+ err = regmap_write(priv->map_reg, MCP25XXFD_REG_TEFCON, val);
+ if (err)
+ return err;
+
+ /* FIFO 1 - TX */
+ val = FIELD_PREP(MCP25XXFD_REG_FIFOCON_FSIZE_MASK,
+ tx_ring->obj_num - 1) |
+ MCP25XXFD_REG_FIFOCON_TXEN |
+ MCP25XXFD_REG_FIFOCON_TXATIE;
+
+ if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
+ val |= FIELD_PREP(MCP25XXFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP25XXFD_REG_FIFOCON_PLSIZE_64);
+ else
+ val |= FIELD_PREP(MCP25XXFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP25XXFD_REG_FIFOCON_PLSIZE_8);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ val |= FIELD_PREP(MCP25XXFD_REG_FIFOCON_TXAT_MASK,
+ MCP25XXFD_REG_FIFOCON_TXAT_ONE_SHOT);
+ else
+ val |= FIELD_PREP(MCP25XXFD_REG_FIFOCON_TXAT_MASK,
+ MCP25XXFD_REG_FIFOCON_TXAT_UNLIMITED);
+
+ err = regmap_write(priv->map_reg,
+ MCP25XXFD_REG_FIFOCON(MCP25XXFD_TX_FIFO),
+ val);
+ if (err)
+ return err;
+
+ /* RX FIFOs */
+ mcp25xxfd_for_each_rx_ring(priv, rx_ring, n) {
+ err = mcp25xxfd_chip_rx_fifo_init_one(priv, rx_ring);
+ if (err)
+ return err;
+
+ err = mcp25xxfd_chip_rx_filter_init_one(priv, rx_ring);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mcp25xxfd_chip_ecc_init(struct mcp25xxfd_priv *priv)
+{
+ struct mcp25xxfd_ecc *ecc = &priv->ecc;
+ void *ram;
+ u32 val = 0;
+ int err;
+
+ ecc->ecc_stat = 0;
+
+ if (priv->devtype_data.quirks & MCP25XXFD_QUIRK_ECC)
+ val = MCP25XXFD_REG_ECCCON_ECCEN;
+
+ err = regmap_update_bits(priv->map_reg, MCP25XXFD_REG_ECCCON,
+ MCP25XXFD_REG_ECCCON_ECCEN, val);
+ if (err)
+ return err;
+
+ ram = kzalloc(MCP25XXFD_RAM_SIZE, GFP_KERNEL);
+ if (!ram)
+ return -ENOMEM;
+
+ err = regmap_raw_write(priv->map_reg, MCP25XXFD_RAM_START, ram,
+ MCP25XXFD_RAM_SIZE);
+ kfree(ram);
+
+ return err;
+}
+
+static inline void mcp25xxfd_ecc_tefif_successful(struct mcp25xxfd_priv *priv)
+{
+ struct mcp25xxfd_ecc *ecc = &priv->ecc;
+
+ ecc->ecc_stat = 0;
+}
+
+static u8 mcp25xxfd_get_normal_mode(const struct mcp25xxfd_priv *priv)
+{
+ u8 mode;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ mode = MCP25XXFD_REG_CON_MODE_LISTENONLY;
+ else if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ mode = MCP25XXFD_REG_CON_MODE_MIXED;
+ else
+ mode = MCP25XXFD_REG_CON_MODE_CAN2_0;
+
+ return mode;
+}
+
+static int
+__mcp25xxfd_chip_set_normal_mode(const struct mcp25xxfd_priv *priv,
+ bool nowait)
+{
+ u8 mode;
+
+ mode = mcp25xxfd_get_normal_mode(priv);
+
+ return __mcp25xxfd_chip_set_mode(priv, mode, nowait);
+}
+
+static inline int
+mcp25xxfd_chip_set_normal_mode(const struct mcp25xxfd_priv *priv)
+{
+ return __mcp25xxfd_chip_set_normal_mode(priv, false);
+}
+
+static inline int
+mcp25xxfd_chip_set_normal_mode_nowait(const struct mcp25xxfd_priv *priv)
+{
+ return __mcp25xxfd_chip_set_normal_mode(priv, true);
+}
+
+static int mcp25xxfd_chip_interrupts_enable(const struct mcp25xxfd_priv *priv)
+{
+ u32 val;
+ int err;
+
+ val = MCP25XXFD_REG_CRC_FERRIE | MCP25XXFD_REG_CRC_CRCERRIE;
+ err = regmap_write(priv->map_reg, MCP25XXFD_REG_CRC, val);
+ if (err)
+ return err;
+
+ val = MCP25XXFD_REG_ECCCON_DEDIE | MCP25XXFD_REG_ECCCON_SECIE;
+ err = regmap_update_bits(priv->map_reg, MCP25XXFD_REG_ECCCON, val, val);
+ if (err)
+ return err;
+
+ val = MCP25XXFD_REG_INT_CERRIE |
+ MCP25XXFD_REG_INT_SERRIE |
+ MCP25XXFD_REG_INT_RXOVIE |
+ MCP25XXFD_REG_INT_TXATIE |
+ MCP25XXFD_REG_INT_SPICRCIE |
+ MCP25XXFD_REG_INT_ECCIE |
+ MCP25XXFD_REG_INT_TEFIE |
+ MCP25XXFD_REG_INT_MODIE |
+ MCP25XXFD_REG_INT_RXIE;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ val |= MCP25XXFD_REG_INT_IVMIE;
+
+ return regmap_write(priv->map_reg, MCP25XXFD_REG_INT, val);
+}
+
+static int mcp25xxfd_chip_interrupts_disable(const struct mcp25xxfd_priv *priv)
+{
+ int err;
+ u32 mask;
+
+ err = regmap_write(priv->map_reg, MCP25XXFD_REG_INT, 0);
+ if (err)
+ return err;
+
+ mask = MCP25XXFD_REG_ECCCON_DEDIE | MCP25XXFD_REG_ECCCON_SECIE;
+ err = regmap_update_bits(priv->map_reg, MCP25XXFD_REG_ECCCON,
+ mask, 0x0);
+ if (err)
+ return err;
+
+ return regmap_write(priv->map_reg, MCP25XXFD_REG_CRC, 0);
+}
+
+static int mcp25xxfd_chip_stop(struct mcp25xxfd_priv *priv,
+ const enum can_state state)
+{
+ priv->can.state = state;
+
+ mcp25xxfd_chip_interrupts_disable(priv);
+ mcp25xxfd_chip_rx_int_disable(priv);
+ return mcp25xxfd_chip_set_mode(priv, MCP25XXFD_REG_CON_MODE_SLEEP);
+}
+
+static int mcp25xxfd_chip_start(struct mcp25xxfd_priv *priv)
+{
+ int err;
+
+ err = mcp25xxfd_chip_softreset(priv);
+ if (err)
+ goto out_chip_stop;
+
+ err = mcp25xxfd_chip_clock_init(priv);
+ if (err)
+ goto out_chip_stop;
+
+ err = mcp25xxfd_set_bittiming(priv);
+ if (err)
+ goto out_chip_stop;
+
+ err = mcp25xxfd_chip_rx_int_enable(priv);
+ if (err)
+ return err;
+
+ err = mcp25xxfd_chip_ecc_init(priv);
+ if (err)
+ goto out_chip_stop;
+
+ mcp25xxfd_ring_init(priv);
+
+ err = mcp25xxfd_chip_fifo_init(priv);
+ if (err)
+ goto out_chip_stop;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ err = mcp25xxfd_chip_set_normal_mode(priv);
+ if (err)
+ goto out_chip_stop;
+
+ return 0;
+
+ out_chip_stop:
+ mcp25xxfd_chip_stop(priv, CAN_STATE_STOPPED);
+
+ return err;
+}
+
+static int mcp25xxfd_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ struct mcp25xxfd_priv *priv = netdev_priv(ndev);
+ int err;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ err = mcp25xxfd_chip_start(priv);
+ if (err)
+ return err;
+
+ err = mcp25xxfd_chip_interrupts_enable(priv);
+ if (err) {
+ mcp25xxfd_chip_stop(priv, CAN_STATE_STOPPED);
+ return err;
+ }
+
+ netif_wake_queue(ndev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int __mcp25xxfd_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ const struct mcp25xxfd_priv *priv = netdev_priv(ndev);
+ u32 trec;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP25XXFD_REG_TREC, &trec);
+ if (err)
+ return err;
+
+ if (trec & MCP25XXFD_REG_TREC_TXBO)
+ bec->txerr = 256;
+ else
+ bec->txerr = FIELD_GET(MCP25XXFD_REG_TREC_TEC_MASK, trec);
+ bec->rxerr = FIELD_GET(MCP25XXFD_REG_TREC_REC_MASK, trec);
+
+ return 0;
+}
+
+static int mcp25xxfd_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ const struct mcp25xxfd_priv *priv = netdev_priv(ndev);
+
+ /* Avoid waking up the controller if the interface is down */
+ if (!(ndev->flags & IFF_UP))
+ return 0;
+
+ /* The controller is powered down during Bus Off, use saved
+ * bec values.
+ */
+ if (priv->can.state == CAN_STATE_BUS_OFF) {
+ *bec = priv->bec;
+ return 0;
+ }
+
+ return __mcp25xxfd_get_berr_counter(ndev, bec);
+}
+
+static int mcp25xxfd_check_tef_tail(const struct mcp25xxfd_priv *priv)
+{
+ u8 tef_tail_chip, tef_tail;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_CAN_MCP25XXFD_SANITY))
+ return 0;
+
+ err = mcp25xxfd_tef_tail_get_from_chip(priv, &tef_tail_chip);
+ if (err)
+ return err;
+
+ tef_tail = mcp25xxfd_get_tef_tail(priv);
+ if (tef_tail_chip != tef_tail) {
+ netdev_err(priv->ndev,
+ "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n",
+ tef_tail_chip, tef_tail);
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+static int
+mcp25xxfd_check_rx_tail(const struct mcp25xxfd_priv *priv,
+ const struct mcp25xxfd_rx_ring *ring)
+{
+ u8 rx_tail_chip, rx_tail;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_CAN_MCP25XXFD_SANITY))
+ return 0;
+
+ err = mcp25xxfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip);
+ if (err)
+ return err;
+
+ rx_tail = mcp25xxfd_get_rx_tail(ring);
+ if (rx_tail_chip != rx_tail) {
+ netdev_err(priv->ndev,
+ "RX tail of chip (%d) and ours (%d) inconsistent.\n",
+ rx_tail_chip, rx_tail);
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+static int
+mcp25xxfd_handle_tefif_recover(const struct mcp25xxfd_priv *priv, const u32 seq)
+{
+ const struct mcp25xxfd_tx_ring *tx_ring = priv->tx;
+ u32 tef_sta;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP25XXFD_REG_TEFSTA, &tef_sta);
+ if (err)
+ return err;
+
+ if (tef_sta & MCP25XXFD_REG_TEFSTA_TEFOVIF) {
+ netdev_err(priv->ndev,
+ "Transmit Event FIFO buffer overflow.\n");
+ return -ENOBUFS;
+ }
+
+ netdev_info(priv->ndev,
+ "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x)\n",
+ tef_sta & MCP25XXFD_REG_TEFSTA_TEFFIF ?
+ "full" : tef_sta & MCP25XXFD_REG_TEFSTA_TEFNEIF ?
+ "not empty" : "empty",
+ seq, priv->tef.tail, priv->tef.head, tx_ring->head);
+
+ /* The Sequence Number in the TEF doesn't match our tef_tail. */
+ return -EAGAIN;
+}
+
+static int
+mcp25xxfd_handle_tefif_one(struct mcp25xxfd_priv *priv,
+ const struct mcp25xxfd_hw_tef_obj *hw_tef_obj)
+{
+ struct mcp25xxfd_tx_ring *tx_ring = priv->tx;
+ struct net_device_stats *stats = &priv->ndev->stats;
+ u32 seq, seq_masked, tef_tail_masked;
+ int err;
+
+ seq = FIELD_GET(MCP25XXFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
+ hw_tef_obj->flags);
+
+ /* Use the MCP2517FD mask on the MCP2518FD, too. We only
+ * compare 7 bits, this should be enough to detect
+ * net-yet-completed, i.e. old TEF objects.
+ */
+ seq_masked = seq &
+ field_mask(MCP25XXFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
+ tef_tail_masked = priv->tef.tail &
+ field_mask(MCP25XXFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
+ if (seq_masked != tef_tail_masked)
+ return mcp25xxfd_handle_tefif_recover(priv, seq);
+
+ stats->tx_bytes +=
+ can_rx_offload_get_echo_skb(&priv->offload,
+ mcp25xxfd_get_tef_tail(priv),
+ hw_tef_obj->ts);
+ stats->tx_packets++;
+
+ /* finally increment the TEF pointer */
+ err = regmap_update_bits(priv->map_reg, MCP25XXFD_REG_TEFCON,
+ GENMASK(15, 8),
+ MCP25XXFD_REG_TEFCON_UINC);
+ if (err)
+ return err;
+
+ priv->tef.tail++;
+ tx_ring->tail++;
+
+ return mcp25xxfd_check_tef_tail(priv);
+}
+
+static int mcp25xxfd_tef_ring_update(struct mcp25xxfd_priv *priv)
+{
+ const struct mcp25xxfd_tx_ring *tx_ring = priv->tx;
+ unsigned int new_head;
+ u8 chip_tx_tail;
+ int err;
+
+ err = mcp25xxfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
+ if (err)
+ return err;
+
+ /* chip_tx_tail, is the next TX-Object send by the HW.
+ * The new TEF head must be >= the old head, ...
+ */
+ new_head = round_down(priv->tef.head, tx_ring->obj_num) + chip_tx_tail;
+ if (new_head <= priv->tef.head)
+ new_head += tx_ring->obj_num;
+
+ /* ... but it cannot exceed the TX head. */
+ priv->tef.head = min(new_head, tx_ring->head);
+
+ return mcp25xxfd_check_tef_tail(priv);
+}
+
+static inline int
+mcp25xxfd_tef_obj_read(const struct mcp25xxfd_priv *priv,
+ struct mcp25xxfd_hw_tef_obj *hw_tef_obj,
+ const u8 offset, const u8 len)
+{
+ const struct mcp25xxfd_tx_ring *tx_ring = priv->tx;
+
+ if (IS_ENABLED(CONFIG_CAN_MCP25XXFD_SANITY) &&
+ (offset > tx_ring->obj_num ||
+ len > tx_ring->obj_num ||
+ offset + len > tx_ring->obj_num)) {
+ netdev_err(priv->ndev,
+ "Trying to read to many TEF objects (max=%d, offset=%d, len=%d).\n",
+ tx_ring->obj_num, offset, len);
+ return -ERANGE;
+ }
+
+ return regmap_bulk_read(priv->map_rx,
+ mcp25xxfd_get_tef_obj_addr(offset),
+ hw_tef_obj,
+ sizeof(*hw_tef_obj) / sizeof(u32) * len);
+}
+
+static int mcp25xxfd_handle_tefif(struct mcp25xxfd_priv *priv)
+{
+ struct mcp25xxfd_hw_tef_obj hw_tef_obj[MCP25XXFD_TX_OBJ_NUM_MAX];
+ u8 tef_tail, len, l;
+ int err, i;
+
+ err = mcp25xxfd_tef_ring_update(priv);
+ if (err)
+ return err;
+
+ tef_tail = mcp25xxfd_get_tef_tail(priv);
+ len = mcp25xxfd_get_tef_len(priv);
+ l = mcp25xxfd_get_tef_linear_len(priv);
+ err = mcp25xxfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
+ if (err)
+ return err;
+
+ if (l < len) {
+ err = mcp25xxfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < len; i++) {
+ err = mcp25xxfd_handle_tefif_one(priv, &hw_tef_obj[i]);
+ /* -EAGAIN means the Sequence Number in the TEF
+ * doesn't match our tef_tail. This can happen if we
+ * read the TEF objects too early. Leave loop let the
+ * interrupt handler call us again.
+ */
+ if (err == -EAGAIN)
+ goto out_netif_wake_queue;
+ if (err)
+ return err;
+ }
+
+ out_netif_wake_queue:
+ mcp25xxfd_ecc_tefif_successful(priv);
+
+ if (mcp25xxfd_get_tx_free(priv->tx)) {
+ /* Make sure that anybody stopping the queue after
+ * this sees the new tx_ring->tail.
+ */
+ smp_mb();
+ netif_wake_queue(priv->ndev);
+ }
+
+ return 0;
+}
+
+static int
+mcp25xxfd_rx_ring_update(const struct mcp25xxfd_priv *priv,
+ struct mcp25xxfd_rx_ring *ring)
+{
+ u32 new_head;
+ u8 chip_rx_head;
+ int err;
+
+ err = mcp25xxfd_rx_head_get_from_chip(priv, ring, &chip_rx_head);
+ if (err)
+ return err;
+
+ /* chip_rx_head, is the next RX-Object filled by the HW.
+ * The new RX head must be >= the old head.
+ */
+ new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
+ if (new_head <= ring->head)
+ new_head += ring->obj_num;
+
+ ring->head = new_head;
+
+ return mcp25xxfd_check_rx_tail(priv, ring);
+}
+
+static void
+mcp25xxfd_hw_rx_obj_to_skb(const struct mcp25xxfd_priv *priv,
+ const struct mcp25xxfd_hw_rx_obj_canfd *hw_rx_obj,
+ struct sk_buff *skb)
+{
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ if (hw_rx_obj->flags & MCP25XXFD_OBJ_FLAGS_IDE) {
+ u32 sid, eid;
+
+ eid = FIELD_GET(MCP25XXFD_OBJ_ID_EID_MASK, hw_rx_obj->id);
+ sid = FIELD_GET(MCP25XXFD_OBJ_ID_SID_MASK, hw_rx_obj->id);
+
+ cfd->can_id = CAN_EFF_FLAG |
+ FIELD_PREP(MCP25XXFD_REG_FRAME_EFF_EID_MASK, eid) |
+ FIELD_PREP(MCP25XXFD_REG_FRAME_EFF_SID_MASK, sid);
+ } else {
+ cfd->can_id = FIELD_GET(MCP25XXFD_OBJ_ID_SID_MASK,
+ hw_rx_obj->id);
+ }
+
+ /* CANFD */
+ if (hw_rx_obj->flags & MCP25XXFD_OBJ_FLAGS_FDF) {
+ u8 dlc;
+
+ if (hw_rx_obj->flags & MCP25XXFD_OBJ_FLAGS_ESI)
+ cfd->flags |= CANFD_ESI;
+
+ if (hw_rx_obj->flags & MCP25XXFD_OBJ_FLAGS_BRS)
+ cfd->flags |= CANFD_BRS;
+
+ dlc = FIELD_GET(MCP25XXFD_OBJ_FLAGS_DLC, hw_rx_obj->flags);
+ cfd->len = can_dlc2len(get_canfd_dlc(dlc));
+ } else {
+ if (hw_rx_obj->flags & MCP25XXFD_OBJ_FLAGS_RTR)
+ cfd->can_id |= CAN_RTR_FLAG;
+
+ cfd->len = get_can_dlc(FIELD_GET(MCP25XXFD_OBJ_FLAGS_DLC,
+ hw_rx_obj->flags));
+ }
+
+ memcpy(cfd->data, hw_rx_obj->data, cfd->len);
+}
+
+static int
+mcp25xxfd_handle_rxif_one(struct mcp25xxfd_priv *priv,
+ struct mcp25xxfd_rx_ring *ring,
+ const struct mcp25xxfd_hw_rx_obj_canfd *hw_rx_obj)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct sk_buff *skb;
+ struct canfd_frame *cfd;
+ int err;
+
+ if (hw_rx_obj->flags & MCP25XXFD_OBJ_FLAGS_FDF)
+ skb = alloc_canfd_skb(priv->ndev, &cfd);
+ else
+ skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
+
+ if (!cfd) {
+ stats->rx_dropped++;
+ return 0;
+ }
+
+ mcp25xxfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
+ err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ ring->tail++;
+
+ /* finally increment the RX pointer */
+ return regmap_update_bits(priv->map_reg,
+ MCP25XXFD_REG_FIFOCON(ring->fifo_nr),
+ GENMASK(15, 8),
+ MCP25XXFD_REG_FIFOCON_UINC);
+}
+
+static inline int
+mcp25xxfd_rx_obj_read(const struct mcp25xxfd_priv *priv,
+ const struct mcp25xxfd_rx_ring *ring,
+ struct mcp25xxfd_hw_rx_obj_canfd *hw_rx_obj,
+ const u8 offset, const u8 len)
+{
+ int err;
+
+ err = regmap_bulk_read(priv->map_rx,
+ mcp25xxfd_get_rx_obj_addr(ring, offset),
+ hw_rx_obj,
+ len * ring->obj_size / sizeof(u32));
+
+ return err;
+}
+
+static int
+mcp25xxfd_handle_rxif_ring(struct mcp25xxfd_priv *priv,
+ struct mcp25xxfd_rx_ring *ring)
+{
+ struct mcp25xxfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
+ u8 rx_tail, len;
+ int err, i;
+
+ err = mcp25xxfd_rx_ring_update(priv, ring);
+ if (err)
+ return err;
+
+ while ((len = mcp25xxfd_get_rx_linear_len(ring))) {
+ rx_tail = mcp25xxfd_get_rx_tail(ring);
+
+ err = mcp25xxfd_rx_obj_read(priv, ring, hw_rx_obj,
+ rx_tail, len);
+ if (err)
+ return err;
+
+ for (i = 0; i < len; i++) {
+ err = mcp25xxfd_handle_rxif_one(priv, ring,
+ (void *)hw_rx_obj +
+ i * ring->obj_size);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int mcp25xxfd_handle_rxif(struct mcp25xxfd_priv *priv)
+{
+ struct mcp25xxfd_rx_ring *ring;
+ int err, n;
+
+ mcp25xxfd_for_each_rx_ring(priv, ring, n) {
+ err = mcp25xxfd_handle_rxif_ring(priv, ring);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static inline int mcp25xxfd_get_timestamp(const struct mcp25xxfd_priv *priv,
+ u32 *timestamp)
+{
+ return regmap_read(priv->map_reg, MCP25XXFD_REG_TBC, timestamp);
+}
+
+static struct sk_buff *
+mcp25xxfd_alloc_can_err_skb(const struct mcp25xxfd_priv *priv,
+ struct can_frame **cf, u32 *timestamp)
+{
+ int err;
+
+ err = mcp25xxfd_get_timestamp(priv, timestamp);
+ if (err)
+ return NULL;
+
+ return alloc_can_err_skb(priv->ndev, cf);
+}
+
+static int mcp25xxfd_handle_rxovif(struct mcp25xxfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct mcp25xxfd_rx_ring *ring;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ u32 timestamp, rxovif;
+ int err, i;
+
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+
+ err = regmap_read(priv->map_reg, MCP25XXFD_REG_RXOVIF, &rxovif);
+ if (err)
+ return err;
+
+ mcp25xxfd_for_each_rx_ring(priv, ring, i) {
+ if (!(rxovif & BIT(ring->fifo_nr)))
+ continue;
+
+ /* If SERRIF is active, there was a RX MAB overflow. */
+ if (priv->regs_status.intf & MCP25XXFD_REG_INT_SERRIF) {
+ netdev_info(priv->ndev,
+ "RX-%d: MAB overflow detected.\n",
+ ring->nr);
+ } else {
+ netdev_info(priv->ndev,
+ "RX-%d: FIFO overflow.\n", ring->nr);
+ }
+
+ err = regmap_update_bits(priv->map_reg,
+ MCP25XXFD_REG_FIFOSTA(ring->fifo_nr),
+ MCP25XXFD_REG_FIFOSTA_RXOVIF,
+ 0x0);
+ if (err)
+ return err;
+ }
+
+ skb = mcp25xxfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ if (!skb)
+ return 0;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+ err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static int mcp25xxfd_handle_txatif(struct mcp25xxfd_priv *priv)
+{
+ netdev_info(priv->ndev, "%s\n", __func__);
+
+ return 0;
+}
+
+static int mcp25xxfd_handle_ivmif(struct mcp25xxfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ u32 bdiag1, timestamp;
+ struct sk_buff *skb;
+ struct can_frame *cf = NULL;
+ int err;
+
+ err = mcp25xxfd_get_timestamp(priv, &timestamp);
+ if (err)
+ return err;
+
+ err = regmap_read(priv->map_reg, MCP25XXFD_REG_BDIAG1, &bdiag1);
+ if (err)
+ return err;
+
+ /* Write 0s to clear error bits, don't write 1s to non active
+ * bits, as they will be set.
+ */
+ err = regmap_write(priv->map_reg, MCP25XXFD_REG_BDIAG1, 0x0);
+ if (err)
+ return err;
+
+ priv->can.can_stats.bus_error++;
+
+ skb = alloc_can_err_skb(priv->ndev, &cf);
+ if (cf)
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ /* Controller misconfiguration */
+ if (WARN_ON(bdiag1 & MCP25XXFD_REG_BDIAG1_DLCMM))
+ netdev_err(priv->ndev,
+ "recv'd DLC is larger than PLSIZE of FIFO element.");
+
+ /* RX errors */
+ if (bdiag1 & (MCP25XXFD_REG_BDIAG1_DCRCERR |
+ MCP25XXFD_REG_BDIAG1_NCRCERR)) {
+ netdev_dbg(priv->ndev, "CRC error\n");
+
+ stats->rx_errors++;
+ if (cf)
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ }
+ if (bdiag1 & (MCP25XXFD_REG_BDIAG1_DSTUFERR |
+ MCP25XXFD_REG_BDIAG1_NSTUFERR)) {
+ netdev_dbg(priv->ndev, "Stuff error\n");
+
+ stats->rx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ }
+ if (bdiag1 & (MCP25XXFD_REG_BDIAG1_DFORMERR |
+ MCP25XXFD_REG_BDIAG1_NFORMERR)) {
+ netdev_dbg(priv->ndev, "Format error\n");
+
+ stats->rx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ }
+
+ /* TX errors */
+ if (bdiag1 & MCP25XXFD_REG_BDIAG1_NACKERR) {
+ netdev_dbg(priv->ndev, "NACK error\n");
+
+ stats->tx_errors++;
+ if (cf) {
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ }
+ }
+ if (bdiag1 & (MCP25XXFD_REG_BDIAG1_DBIT1ERR |
+ MCP25XXFD_REG_BDIAG1_NBIT1ERR)) {
+ netdev_dbg(priv->ndev, "Bit1 error\n");
+
+ stats->tx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT1;
+ }
+ if (bdiag1 & (MCP25XXFD_REG_BDIAG1_DBIT0ERR |
+ MCP25XXFD_REG_BDIAG1_NBIT0ERR)) {
+ netdev_dbg(priv->ndev, "Bit0 error\n");
+
+ stats->tx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT0;
+ }
+
+ if (!cf)
+ return 0;
+
+ err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static int mcp25xxfd_handle_cerrif(struct mcp25xxfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct sk_buff *skb;
+ struct can_frame *cf = NULL;
+ enum can_state new_state, rx_state, tx_state;
+ u32 trec, timestamp;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP25XXFD_REG_TREC, &trec);
+ if (err)
+ return err;
+
+ if (trec & MCP25XXFD_REG_TREC_TXBO)
+ tx_state = CAN_STATE_BUS_OFF;
+ else if (trec & MCP25XXFD_REG_TREC_TXBP)
+ tx_state = CAN_STATE_ERROR_PASSIVE;
+ else if (trec & MCP25XXFD_REG_TREC_TXWARN)
+ tx_state = CAN_STATE_ERROR_WARNING;
+ else
+ tx_state = CAN_STATE_ERROR_ACTIVE;
+
+ if (trec & MCP25XXFD_REG_TREC_RXBP)
+ rx_state = CAN_STATE_ERROR_PASSIVE;
+ else if (trec & MCP25XXFD_REG_TREC_RXWARN)
+ rx_state = CAN_STATE_ERROR_WARNING;
+ else
+ rx_state = CAN_STATE_ERROR_ACTIVE;
+
+ new_state = max(tx_state, rx_state);
+ if (new_state == priv->can.state)
+ return 0;
+
+ /* The skb allocation might fail, but can_change_state()
+ * handles cf == NULL.
+ */
+ skb = mcp25xxfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ can_change_state(priv->ndev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ /* As we're going to switch off the chip now, let's
+ * save the error counters and return them to
+ * userspace, if do_get_berr_counter() is called while
+ * the chip is in Bus Off.
+ */
+ err = __mcp25xxfd_get_berr_counter(priv->ndev, &priv->bec);
+ if (err)
+ return err;
+
+ mcp25xxfd_chip_stop(priv, CAN_STATE_BUS_OFF);
+ can_bus_off(priv->ndev);
+ }
+
+ if (!skb)
+ return 0;
+
+ if (new_state != CAN_STATE_BUS_OFF) {
+ struct can_berr_counter bec;
+
+ err = mcp25xxfd_get_berr_counter(priv->ndev, &bec);
+ if (err)
+ return err;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+
+ err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static int
+mcp25xxfd_handle_modif(const struct mcp25xxfd_priv *priv, bool *set_normal_mode)
+{
+ const u8 mode_reference = mcp25xxfd_get_normal_mode(priv);
+ u8 mode;
+ int err;
+
+ err = mcp25xxfd_chip_get_mode(priv, &mode);
+ if (err)
+ return err;
+
+ if (mode == mode_reference) {
+ netdev_dbg(priv->ndev,
+ "Controller changed into %s Mode (%u).\n",
+ mcp25xxfd_get_mode_str(mode), mode);
+ return 0;
+ }
+
+ /* According to MCP2517FD errata DS80000792B 1., during a TX
+ * MAB underflow, the controller will transition to Restricted
+ * Operation Mode or Listen Only Mode (depending on SERR2LOM).
+ *
+ * However this is not always the case. If SERR2LOM is
+ * configured for Restricted Operation Mode (SERR2LOM not set)
+ * the MCP2517FD will sometimes transition to Listen Only Mode
+ * first. When polling this bit we see that it will transition
+ * to Restricted Operation Mode shortly after.
+ */
+ if ((priv->devtype_data.quirks & MCP25XXFD_QUIRK_MAB_NO_WARN) &&
+ (mode == MCP25XXFD_REG_CON_MODE_RESTRICTED ||
+ mode == MCP25XXFD_REG_CON_MODE_LISTENONLY))
+ netdev_dbg(priv->ndev,
+ "Controller changed into %s Mode (%u).\n",
+ mcp25xxfd_get_mode_str(mode), mode);
+ else
+ netdev_err(priv->ndev,
+ "Controller changed into %s Mode (%u).\n",
+ mcp25xxfd_get_mode_str(mode), mode);
+
+ /* After the application requests Normal mode, the Controller
+ * will automatically attempt to retransmit the message that
+ * caused the TX MAB underflow.
+ *
+ * However, if there is an ECC error in the TX-RAM, we first
+ * have to reload the tx-object before requesting Normal
+ * mode. This is done later in mcp25xxfd_handle_eccif().
+ */
+ if (priv->regs_status.intf & MCP25XXFD_REG_INT_ECCIF) {
+ *set_normal_mode = true;
+ return 0;
+ }
+
+ return mcp25xxfd_chip_set_normal_mode_nowait(priv);
+}
+
+static int mcp25xxfd_handle_serrif(struct mcp25xxfd_priv *priv)
+{
+ struct mcp25xxfd_ecc *ecc = &priv->ecc;
+ struct net_device_stats *stats = &priv->ndev->stats;
+ bool handled = false;
+
+ /* TX MAB underflow
+ *
+ * According to MCP2517FD Errata DS80000792B 1. a TX MAB
+ * underflow is indicated by SERRIF and MODIF.
+ *
+ * In addition to the effects mentioned in the Errata, there
+ * are Bus Errors due to the aborted CAN frame, so a IVMIF
+ * will be seen as well.
+ *
+ * Sometimes there is an ECC error in the TX-RAM, which leads
+ * to a TX MAB underflow.
+ *
+ * However, probably due to a race condition, there is no
+ * associated MODIF pending.
+ *
+ * Further, there are situations, where the SERRIF is caused
+ * by an ECC error in the TX-RAM, but not even the ECCIF is
+ * set. This only seems to happen _after_ the first occurrence
+ * of a ECCIF (which is tracked in ecc->cnt).
+ *
+ * Treat all as a known system errors..
+ */
+ if ((priv->regs_status.intf & MCP25XXFD_REG_INT_MODIF &&
+ priv->regs_status.intf & MCP25XXFD_REG_INT_IVMIF) ||
+ priv->regs_status.intf & MCP25XXFD_REG_INT_ECCIF ||
+ ecc->cnt) {
+ const char *msg;
+
+ if (priv->regs_status.intf & MCP25XXFD_REG_INT_ECCIF ||
+ ecc->cnt)
+ msg = "TX MAB underflow due to ECC error detected.";
+ else
+ msg = "TX MAB underflow detected.";
+
+ if (priv->devtype_data.quirks & MCP25XXFD_QUIRK_MAB_NO_WARN)
+ netdev_dbg(priv->ndev, "%s\n", msg);
+ else
+ netdev_info(priv->ndev, "%s\n", msg);
+
+ stats->tx_aborted_errors++;
+ stats->tx_errors++;
+ handled = true;
+ }
+
+ /* RX MAB overflow
+ *
+ * According to MCP2517FD Errata DS80000792B 1. a RX MAB
+ * overflow is indicated by SERRIF.
+ *
+ * In addition to the effects mentioned in the Errata, (most
+ * of the times) a RXOVIF is raised, if the FIFO that is being
+ * received into has the RXOVIE activated (and we have enabled
+ * RXOVIE on all FIFOs).
+ *
+ * Sometimes there is no RXOVIF just a RXIF is pending.
+ *
+ * Treat all as a known system errors..
+ */
+ if (priv->regs_status.intf & MCP25XXFD_REG_INT_RXOVIF ||
+ priv->regs_status.intf & MCP25XXFD_REG_INT_RXIF) {
+ stats->rx_dropped++;
+ handled = true;
+ }
+
+ if (!handled)
+ netdev_err(priv->ndev,
+ "Unhandled System Error Interrupt (intf=0x%08x)!\n",
+ priv->regs_status.intf);
+
+ return 0;
+}
+
+static int
+mcp25xxfd_handle_eccif_recover(struct mcp25xxfd_priv *priv, u8 nr)
+{
+ struct mcp25xxfd_tx_ring *tx_ring = priv->tx;
+ struct mcp25xxfd_ecc *ecc = &priv->ecc;
+ struct mcp25xxfd_tx_obj *tx_obj;
+ u8 chip_tx_tail, tx_tail, offset;
+ u16 addr;
+ int err;
+
+ addr = FIELD_GET(MCP25XXFD_REG_ECCSTAT_ERRADDR_MASK, ecc->ecc_stat);
+
+ err = mcp25xxfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
+ if (err)
+ return err;
+
+ tx_tail = mcp25xxfd_get_tx_tail(tx_ring);
+ offset = (nr - chip_tx_tail) & (tx_ring->obj_num - 1);
+
+ /* Bail out if one of the following is met:
+ * - tx_tail information is inconsistent
+ * - for mcp2517fd: offset not 0
+ * - for mcp2518fd: offset not 0 or 1
+ */
+ if (chip_tx_tail != tx_tail ||
+ !(offset == 0 || (offset == 1 && mcp25xxfd_is_2518(priv)))) {
+ netdev_err(priv->ndev,
+ "ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n",
+ addr, nr, tx_ring->tail, tx_tail, chip_tx_tail,
+ offset);
+ return -EINVAL;
+ }
+
+ netdev_info(priv->ndev,
+ "Recovering %s ECC Error at address 0x%04x (in TX-RAM, tx_obj=%d, tx_tail=0x%08x(%d), offset=%d).\n",
+ ecc->ecc_stat & MCP25XXFD_REG_ECCSTAT_SECIF ?
+ "Single" : "Double",
+ addr, nr, tx_ring->tail, tx_tail, offset);
+
+ /* reload tx_obj into controller RAM ... */
+ tx_obj = &tx_ring->obj[nr];
+ err = spi_sync_transfer(priv->spi, tx_obj->xfer, 1);
+ if (err)
+ return err;
+
+ /* ... and trigger retransmit */
+ return mcp25xxfd_chip_set_normal_mode(priv);
+}
+
+static int
+mcp25xxfd_handle_eccif(struct mcp25xxfd_priv *priv, bool set_normal_mode)
+{
+ struct mcp25xxfd_ecc *ecc = &priv->ecc;
+ const char *msg;
+ bool in_tx_ram;
+ u32 ecc_stat;
+ u16 addr;
+ u8 nr;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP25XXFD_REG_ECCSTAT, &ecc_stat);
+ if (err)
+ return err;
+
+ err = regmap_update_bits(priv->map_reg, MCP25XXFD_REG_ECCSTAT,
+ MCP25XXFD_REG_ECCSTAT_IF_MASK, ~ecc_stat);
+ if (err)
+ return err;
+
+ /* Check if ECC error occurred in TX-RAM */
+ addr = FIELD_GET(MCP25XXFD_REG_ECCSTAT_ERRADDR_MASK, ecc_stat);
+ err = mcp25xxfd_get_tx_nr_by_addr(priv->tx, &nr, addr);
+ if (!err)
+ in_tx_ram = true;
+ else if (err == -ENOENT)
+ in_tx_ram = false;
+ else
+ return err;
+
+ if (ecc_stat & MCP25XXFD_REG_ECCSTAT_SECIF)
+ msg = "Single ECC Error corrected at address";
+ else if (ecc_stat & MCP25XXFD_REG_ECCSTAT_DEDIF)
+ msg = "Double ECC Error detected at address";
+ else
+ return -EINVAL;
+
+ if (!in_tx_ram) {
+ ecc->ecc_stat = 0;
+
+ if (ecc_stat & MCP25XXFD_REG_ECCSTAT_SECIF)
+ netdev_info(priv->ndev, "%s 0x%04x.\n",
+ msg, addr);
+ else
+ netdev_notice(priv->ndev, "%s 0x%04x.\n",
+ msg, addr);
+ } else {
+ /* Re-occurring error? */
+ if (ecc->ecc_stat == ecc_stat) {
+ ecc->cnt++;
+ } else {
+ ecc->ecc_stat = ecc_stat;
+ ecc->cnt = 1;
+ }
+
+ netdev_info(priv->ndev,
+ "%s 0x%04x (in TX-RAM, tx_obj=%d), occurred %d time%s.\n",
+ msg, addr, nr, ecc->cnt, ecc->cnt > 1 ? "s" : "");
+
+ if (ecc->cnt >= MCP25XXFD_ECC_CNT_MAX)
+ return mcp25xxfd_handle_eccif_recover(priv, nr);
+ }
+
+ if (set_normal_mode)
+ return mcp25xxfd_chip_set_normal_mode_nowait(priv);
+
+ return 0;
+}
+
+static int mcp25xxfd_handle_spicrcif(struct mcp25xxfd_priv *priv)
+{
+ int err;
+ u32 crc;
+
+ err = regmap_read(priv->map_reg, MCP25XXFD_REG_CRC, &crc);
+ if (err)
+ return err;
+
+ err = regmap_update_bits(priv->map_reg, MCP25XXFD_REG_CRC,
+ MCP25XXFD_REG_CRC_IF_MASK,
+ ~crc);
+ if (err)
+ return err;
+
+ if (crc & MCP25XXFD_REG_CRC_FERRIF)
+ netdev_notice(priv->ndev, "CRC write command format error.\n");
+ else if (crc & MCP25XXFD_REG_CRC_CRCERRIF)
+ netdev_notice(priv->ndev,
+ "CRC write error detected. CRC=0x%04lx.\n",
+ FIELD_GET(MCP25XXFD_REG_CRC_MASK, crc));
+
+ return 0;
+}
+
+#define mcp25xxfd_handle(priv, irq, ...) \
+({ \
+ struct mcp25xxfd_priv *_priv = (priv); \
+ int err; \
+\
+ err = mcp25xxfd_handle_##irq(_priv, ## __VA_ARGS__); \
+ if (err) \
+ netdev_err(_priv->ndev, \
+ "IRQ handler mcp25xxfd_handle_%s() returned %d.\n", \
+ __stringify(irq), err); \
+ err; \
+})
+
+static irqreturn_t mcp25xxfd_irq(int irq, void *dev_id)
+{
+ struct mcp25xxfd_priv *priv = dev_id;
+ irqreturn_t handled = IRQ_NONE;
+ int err;
+
+ if (priv->rx_int)
+ do {
+ int rx_pending;
+
+ rx_pending = gpiod_get_value_cansleep(priv->rx_int);
+ if (!rx_pending)
+ break;
+
+ err = mcp25xxfd_handle(priv, rxif);
+ if (err)
+ goto out_fail;
+
+ handled = IRQ_HANDLED;
+ } while (1);
+
+ do {
+ u32 intf_pending, intf_pending_clearable;
+ bool set_normal_mode;
+
+ err = regmap_bulk_read(priv->map_reg, MCP25XXFD_REG_INT,
+ &priv->regs_status,
+ sizeof(priv->regs_status) /
+ sizeof(u32));
+ if (err)
+ goto out_fail;
+
+ intf_pending = FIELD_GET(MCP25XXFD_REG_INT_IF_MASK,
+ priv->regs_status.intf) &
+ FIELD_GET(MCP25XXFD_REG_INT_IE_MASK,
+ priv->regs_status.intf);
+
+ if (!(intf_pending))
+ return handled;
+
+ /* Some interrupts must be ACKed in the
+ * MCP25XXFD_REG_INT register.
+ * - First ACK then handle, to avoid lost-IRQ race
+ * condition on fast re-occurring interrupts.
+ * - Write "0" to clear active IRQs, "1" to all other,
+ * to avoid r/m/w race condition on the
+ * MCP25XXFD_REG_INT register.
+ */
+ intf_pending_clearable = intf_pending &
+ MCP25XXFD_REG_INT_IF_CLEARABLE_MASK;
+ if (intf_pending_clearable) {
+ err = regmap_update_bits(priv->map_reg,
+ MCP25XXFD_REG_INT,
+ MCP25XXFD_REG_INT_IF_MASK,
+ ~intf_pending_clearable);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP25XXFD_REG_INT_MODIF) {
+ err = mcp25xxfd_handle(priv, modif, &set_normal_mode);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP25XXFD_REG_INT_RXIF) {
+ err = mcp25xxfd_handle(priv, rxif);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP25XXFD_REG_INT_TEFIF) {
+ err = mcp25xxfd_handle(priv, tefif);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP25XXFD_REG_INT_RXOVIF) {
+ err = mcp25xxfd_handle(priv, rxovif);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP25XXFD_REG_INT_TXATIF) {
+ err = mcp25xxfd_handle(priv, txatif);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP25XXFD_REG_INT_IVMIF) {
+ err = mcp25xxfd_handle(priv, ivmif);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP25XXFD_REG_INT_SERRIF) {
+ err = mcp25xxfd_handle(priv, serrif);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP25XXFD_REG_INT_ECCIF) {
+ err = mcp25xxfd_handle(priv, eccif, set_normal_mode);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP25XXFD_REG_INT_SPICRCIF) {
+ err = mcp25xxfd_handle(priv, spicrcif);
+ if (err)
+ goto out_fail;
+ }
+
+ /* On the MCP2527FD and MCP2518FD, we don't get a
+ * CERRIF IRQ on the transition TX ERROR_WARNING -> TX
+ * ERROR_ACTIVE.
+ */
+ if (intf_pending & MCP25XXFD_REG_INT_CERRIF ||
+ priv->can.state > CAN_STATE_ERROR_ACTIVE) {
+ err = mcp25xxfd_handle(priv, cerrif);
+ if (err)
+ goto out_fail;
+
+ /* In Bus Off we completely shut down the
+ * controller. Every subsequent register read
+ * will read bogus data, and if
+ * MCP25XXFD_QUIRK_CRC_REG is enabled the CRC
+ * check will fail, too. So leave IRQ handler
+ * directly.
+ */
+ if (priv->can.state == CAN_STATE_BUS_OFF)
+ return IRQ_HANDLED;
+ }
+
+ handled = IRQ_HANDLED;
+ } while (1);
+
+ out_fail:
+ netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
+ err, priv->regs_status.intf);
+ mcp25xxfd_chip_interrupts_disable(priv);
+
+ return handled;
+}
+
+static inline struct
+mcp25xxfd_tx_obj *mcp25xxfd_get_tx_obj_next(struct mcp25xxfd_tx_ring *tx_ring)
+{
+ u8 tx_head;
+
+ tx_head = mcp25xxfd_get_tx_head(tx_ring);
+
+ return &tx_ring->obj[tx_head];
+}
+
+static void
+mcp25xxfd_tx_obj_from_skb(const struct mcp25xxfd_priv *priv,
+ struct mcp25xxfd_tx_obj *tx_obj,
+ const struct sk_buff *skb,
+ unsigned int seq)
+{
+ const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ struct mcp25xxfd_hw_tx_obj_raw *hw_tx_obj;
+ union mcp25xxfd_tx_obj_load_buf *load_buf;
+ u8 dlc;
+ u32 id, flags;
+ int offset, len;
+
+ if (cfd->can_id & CAN_EFF_FLAG) {
+ u32 sid, eid;
+
+ sid = FIELD_GET(MCP25XXFD_REG_FRAME_EFF_SID_MASK, cfd->can_id);
+ eid = FIELD_GET(MCP25XXFD_REG_FRAME_EFF_EID_MASK, cfd->can_id);
+
+ id = FIELD_PREP(MCP25XXFD_OBJ_ID_EID_MASK, eid) |
+ FIELD_PREP(MCP25XXFD_OBJ_ID_SID_MASK, sid);
+
+ flags = MCP25XXFD_OBJ_FLAGS_IDE;
+ } else {
+ id = FIELD_PREP(MCP25XXFD_OBJ_ID_SID_MASK, cfd->can_id);
+ flags = 0;
+ }
+
+ /* Use the MCP2518FD mask even on the MCP2517FD. It doesn't
+ * harm, only the lower 7 bits will be transferred into the
+ * TEF object.
+ */
+ dlc = can_len2dlc(cfd->len);
+ flags |= FIELD_PREP(MCP25XXFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq) |
+ FIELD_PREP(MCP25XXFD_OBJ_FLAGS_DLC, dlc);
+
+ if (cfd->can_id & CAN_RTR_FLAG)
+ flags |= MCP25XXFD_OBJ_FLAGS_RTR;
+
+ /* CANFD */
+ if (can_is_canfd_skb(skb)) {
+ if (cfd->flags & CANFD_ESI)
+ flags |= MCP25XXFD_OBJ_FLAGS_ESI;
+
+ flags |= MCP25XXFD_OBJ_FLAGS_FDF;
+
+ if (cfd->flags & CANFD_BRS)
+ flags |= MCP25XXFD_OBJ_FLAGS_BRS;
+ }
+
+ load_buf = &tx_obj->buf;
+ if (priv->devtype_data.quirks & MCP25XXFD_QUIRK_CRC_TX)
+ hw_tx_obj = &load_buf->crc.hw_tx_obj;
+ else
+ hw_tx_obj = &load_buf->nocrc.hw_tx_obj;
+
+ put_unaligned_le32(id, &hw_tx_obj->id);
+ put_unaligned_le32(flags, &hw_tx_obj->flags);
+
+ /* Clear data at end of CAN frame */
+ offset = round_down(cfd->len, sizeof(u32));
+ len = round_up(can_dlc2len(dlc), sizeof(u32)) - offset;
+ if (MCP25XXFD_SANITIZE_CAN && len)
+ memset(hw_tx_obj->data + offset, 0x0, len);
+ memcpy(hw_tx_obj->data, cfd->data, cfd->len);
+
+ /* Number of bytes to be written into the RAM of the controller */
+ len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
+ if (MCP25XXFD_SANITIZE_CAN)
+ len += round_up(can_dlc2len(dlc), sizeof(u32));
+ else
+ len += round_up(cfd->len, sizeof(u32));
+
+ if (priv->devtype_data.quirks & MCP25XXFD_QUIRK_CRC_TX) {
+ u16 crc;
+
+ mcp25xxfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd,
+ len);
+ /* CRC */
+ len += sizeof(load_buf->crc.cmd);
+ crc = mcp25xxfd_crc16_compute(&load_buf->crc, len);
+ put_unaligned_be16(crc, (void *)load_buf + len);
+
+ /* Total length */
+ len += sizeof(load_buf->crc.crc);
+ } else {
+ len += sizeof(load_buf->nocrc.cmd);
+ }
+
+ tx_obj->xfer[0].len = len;
+}
+
+static int mcp25xxfd_tx_obj_write(const struct mcp25xxfd_priv *priv,
+ struct mcp25xxfd_tx_obj *tx_obj)
+{
+ return spi_async(priv->spi, &tx_obj->msg);
+}
+
+static bool mcp25xxfd_tx_busy(const struct mcp25xxfd_priv *priv,
+ struct mcp25xxfd_tx_ring *tx_ring)
+{
+ if (mcp25xxfd_get_tx_free(tx_ring) > 0)
+ return false;
+
+ netif_stop_queue(priv->ndev);
+
+ /* Memory barrier before checking tx_free (head and tail) */
+ smp_mb();
+
+ if (mcp25xxfd_get_tx_free(tx_ring) == 0) {
+ netdev_dbg(priv->ndev,
+ "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
+ tx_ring->head, tx_ring->tail,
+ tx_ring->head - tx_ring->tail);
+
+ return true;
+ }
+
+ netif_start_queue(priv->ndev);
+
+ return false;
+}
+
+static netdev_tx_t mcp25xxfd_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct mcp25xxfd_priv *priv = netdev_priv(ndev);
+ struct mcp25xxfd_tx_ring *tx_ring = priv->tx;
+ struct mcp25xxfd_tx_obj *tx_obj;
+ u8 tx_head;
+ int err;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (mcp25xxfd_tx_busy(priv, tx_ring))
+ return NETDEV_TX_BUSY;
+
+ tx_obj = mcp25xxfd_get_tx_obj_next(tx_ring);
+ mcp25xxfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head);
+
+ /* Stop queue if we occupy the complete TX FIFO */
+ tx_head = mcp25xxfd_get_tx_head(tx_ring);
+ tx_ring->head++;
+ if (tx_ring->head - tx_ring->tail >= tx_ring->obj_num)
+ netif_stop_queue(ndev);
+
+ can_put_echo_skb(skb, ndev, tx_head);
+
+ err = mcp25xxfd_tx_obj_write(priv, tx_obj);
+ if (err)
+ goto out_err;
+
+ return NETDEV_TX_OK;
+
+ out_err:
+ netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
+
+ return NETDEV_TX_OK;
+}
+
+static int mcp25xxfd_open(struct net_device *ndev)
+{
+ struct mcp25xxfd_priv *priv = netdev_priv(ndev);
+ const struct spi_device *spi = priv->spi;
+ int err;
+
+ err = pm_runtime_get_sync(ndev->dev.parent);
+ if (err < 0) {
+ pm_runtime_put_noidle(ndev->dev.parent);
+ return err;
+ }
+
+ err = open_candev(ndev);
+ if (err)
+ goto out_pm_runtime_put;
+
+ err = mcp25xxfd_ring_alloc(priv);
+ if (err)
+ goto out_close_candev;
+
+ err = mcp25xxfd_transceiver_enable(priv);
+ if (err)
+ goto out_mcp25xxfd_ring_free;
+
+ err = mcp25xxfd_chip_start(priv);
+ if (err)
+ goto out_transceiver_disable;
+
+ can_rx_offload_enable(&priv->offload);
+
+ err = request_threaded_irq(spi->irq, NULL, mcp25xxfd_irq,
+ IRQF_ONESHOT, dev_name(&spi->dev),
+ priv);
+ if (err)
+ goto out_can_rx_offload_disable;
+
+ err = mcp25xxfd_chip_interrupts_enable(priv);
+ if (err)
+ goto out_free_irq;
+
+ netif_start_queue(ndev);
+
+ return 0;
+
+ out_free_irq:
+ free_irq(spi->irq, priv);
+ out_can_rx_offload_disable:
+ can_rx_offload_disable(&priv->offload);
+ out_transceiver_disable:
+ mcp25xxfd_transceiver_disable(priv);
+ out_mcp25xxfd_ring_free:
+ mcp25xxfd_ring_free(priv);
+ out_close_candev:
+ close_candev(ndev);
+ out_pm_runtime_put:
+ mcp25xxfd_chip_stop(priv, CAN_STATE_STOPPED);
+ pm_runtime_put(ndev->dev.parent);
+
+ return err;
+}
+
+static int mcp25xxfd_stop(struct net_device *ndev)
+{
+ struct mcp25xxfd_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ mcp25xxfd_chip_interrupts_disable(priv);
+ free_irq(ndev->irq, priv);
+ can_rx_offload_disable(&priv->offload);
+ mcp25xxfd_chip_stop(priv, CAN_STATE_STOPPED);
+ mcp25xxfd_transceiver_disable(priv);
+ mcp25xxfd_ring_free(priv);
+ close_candev(ndev);
+
+ pm_runtime_put(ndev->dev.parent);
+
+ return 0;
+}
+
+static const struct net_device_ops mcp25xxfd_netdev_ops = {
+ .ndo_open = mcp25xxfd_open,
+ .ndo_stop = mcp25xxfd_stop,
+ .ndo_start_xmit = mcp25xxfd_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static void
+mcp25xxfd_register_quirks(struct mcp25xxfd_priv *priv)
+{
+ const struct spi_device *spi = priv->spi;
+ const struct spi_controller *ctlr = spi->controller;
+
+ if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX)
+ priv->devtype_data.quirks |= MCP25XXFD_QUIRK_HALF_DUPLEX;
+}
+
+static int mcp25xxfd_register_chip_detect(struct mcp25xxfd_priv *priv)
+{
+ const struct net_device *ndev = priv->ndev;
+ const struct mcp25xxfd_devtype_data *devtype_data;
+ u32 osc;
+ int err;
+
+ /* The OSC_LPMEN is only supported on MCP2518FD, so use it to
+ * autodetect the model.
+ */
+ err = regmap_update_bits(priv->map_reg, MCP25XXFD_REG_OSC,
+ MCP25XXFD_REG_OSC_LPMEN,
+ MCP25XXFD_REG_OSC_LPMEN);
+ if (err)
+ return err;
+
+ err = regmap_read(priv->map_reg, MCP25XXFD_REG_OSC, &osc);
+ if (err)
+ return err;
+
+ if (osc & MCP25XXFD_REG_OSC_LPMEN)
+ devtype_data = &mcp25xxfd_devtype_data_mcp2518fd;
+ else
+ devtype_data = &mcp25xxfd_devtype_data_mcp2517fd;
+
+ if (!mcp25xxfd_is_25XX(priv) &&
+ priv->devtype_data.model != devtype_data->model) {
+ netdev_info(ndev,
+ "Detected %s, but firmware specifies a %s. Fixing up.",
+ __mcp25xxfd_get_model_str(devtype_data->model),
+ mcp25xxfd_get_model_str(priv));
+ }
+ priv->devtype_data = *devtype_data;
+
+ /* We need to preserve the Half Duplex Quirk. */
+ mcp25xxfd_register_quirks(priv);
+
+ /* Re-init regmap with quirks of detected model. */
+ return mcp25xxfd_regmap_init(priv);
+}
+
+static int mcp25xxfd_register_check_rx_int(struct mcp25xxfd_priv *priv)
+{
+ int err, rx_pending;
+
+ if (!priv->rx_int)
+ return 0;
+
+ err = mcp25xxfd_chip_rx_int_enable(priv);
+ if (err)
+ return err;
+
+ /* Check if RX_INT is properly working. The RX_INT should not
+ * be active after a softreset.
+ */
+ rx_pending = gpiod_get_value_cansleep(priv->rx_int);
+
+ err = mcp25xxfd_chip_rx_int_disable(priv);
+ if (err)
+ return err;
+
+ if (!rx_pending)
+ return 0;
+
+ netdev_info(priv->ndev,
+ "RX_INT active after softreset, disabling RX_INT support.");
+ devm_gpiod_put(&priv->spi->dev, priv->rx_int);
+ priv->rx_int = NULL;
+
+ return 0;
+}
+
+static int
+mcp25xxfd_register_get_dev_id(const struct mcp25xxfd_priv *priv,
+ u32 *dev_id, u32 *effective_speed_hz)
+{
+ struct mcp25xxfd_map_buf_nocrc *buf_rx;
+ struct mcp25xxfd_map_buf_nocrc *buf_tx;
+ struct spi_transfer xfer[2] = { };
+ int err;
+
+ buf_rx = kzalloc(sizeof(*buf_rx), GFP_KERNEL);
+ if (!buf_rx)
+ return -ENOMEM;
+
+ buf_tx = kzalloc(sizeof(*buf_tx), GFP_KERNEL);
+ if (!buf_tx) {
+ err = -ENOMEM;
+ goto out_kfree_buf_rx;
+ }
+
+ xfer[0].tx_buf = buf_tx;
+ xfer[0].len = sizeof(buf_tx->cmd);
+ xfer[1].rx_buf = buf_rx->data;
+ xfer[1].len = sizeof(dev_id);
+
+ mcp25xxfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP25XXFD_REG_DEVID);
+ err = spi_sync_transfer(priv->spi, xfer, ARRAY_SIZE(xfer));
+ if (err)
+ goto out_kfree_buf_tx;
+
+ *dev_id = be32_to_cpup((__be32 *)buf_rx->data);
+ *effective_speed_hz = xfer->effective_speed_hz;
+
+ out_kfree_buf_tx:
+ kfree(buf_tx);
+ out_kfree_buf_rx:
+ kfree(buf_rx);
+
+ return 0;
+}
+
+#define MCP25XXFD_QUIRK_ACTIVE(quirk) \
+ (priv->devtype_data.quirks & MCP25XXFD_QUIRK_##quirk ? '+' : '-')
+
+static int
+mcp25xxfd_register_done(const struct mcp25xxfd_priv *priv)
+{
+ u32 dev_id, effective_speed_hz;
+ int err;
+
+ err = mcp25xxfd_register_get_dev_id(priv, &dev_id,
+ &effective_speed_hz);
+ if (err)
+ return err;
+
+ netdev_info(priv->ndev,
+ "%s rev%lu.%lu (%cRX_INT %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD c:%u.%02uMHz m:%u.%02uMHz r:%u.%02uMHz e:%u.%02uMHz) successfully initialized.\n",
+ mcp25xxfd_get_model_str(priv),
+ FIELD_GET(MCP25XXFD_REG_DEVID_ID_MASK, dev_id),
+ FIELD_GET(MCP25XXFD_REG_DEVID_REV_MASK, dev_id),
+ priv->rx_int ? '+' : '-',
+ MCP25XXFD_QUIRK_ACTIVE(MAB_NO_WARN),
+ MCP25XXFD_QUIRK_ACTIVE(CRC_REG),
+ MCP25XXFD_QUIRK_ACTIVE(CRC_RX),
+ MCP25XXFD_QUIRK_ACTIVE(CRC_TX),
+ MCP25XXFD_QUIRK_ACTIVE(ECC),
+ MCP25XXFD_QUIRK_ACTIVE(HALF_DUPLEX),
+ priv->can.clock.freq / 1000000,
+ priv->can.clock.freq % 1000000 / 1000 / 10,
+ priv->spi_max_speed_hz_orig / 1000000,
+ priv->spi_max_speed_hz_orig % 1000000 / 1000 / 10,
+ priv->spi->max_speed_hz / 1000000,
+ priv->spi->max_speed_hz % 1000000 / 1000 / 10,
+ effective_speed_hz / 1000000,
+ effective_speed_hz % 1000000 / 1000 / 10);
+
+ return 0;
+}
+
+static int mcp25xxfd_register(struct mcp25xxfd_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+ int err;
+
+ err = mcp25xxfd_clks_and_vdd_enable(priv);
+ if (err)
+ return err;
+
+ pm_runtime_get_noresume(ndev->dev.parent);
+ err = pm_runtime_set_active(ndev->dev.parent);
+ if (err)
+ goto out_runtime_put_noidle;
+ pm_runtime_enable(ndev->dev.parent);
+
+ mcp25xxfd_register_quirks(priv);
+
+ err = mcp25xxfd_chip_softreset(priv);
+ if (err == -ENODEV)
+ goto out_runtime_disable;
+ if (err)
+ goto out_chip_set_mode_sleep;
+
+ err = mcp25xxfd_register_chip_detect(priv);
+ if (err)
+ goto out_chip_set_mode_sleep;
+
+ err = mcp25xxfd_register_check_rx_int(priv);
+ if (err)
+ goto out_chip_set_mode_sleep;
+
+ err = register_candev(ndev);
+ if (err)
+ goto out_chip_set_mode_sleep;
+
+ err = mcp25xxfd_register_done(priv);
+ if (err)
+ goto out_unregister_candev;
+
+ /* Put controller into sleep mode and let pm_runtime_put()
+ * disable the clocks and vdd. If CONFIG_PM is not enabled,
+ * the clocks and vdd will stay powered.
+ */
+ err = mcp25xxfd_chip_set_mode(priv, MCP25XXFD_REG_CON_MODE_SLEEP);
+ if (err)
+ goto out_unregister_candev;
+
+ pm_runtime_put(ndev->dev.parent);
+
+ return 0;
+
+ out_unregister_candev:
+ unregister_candev(ndev);
+ out_chip_set_mode_sleep:
+ mcp25xxfd_chip_set_mode(priv, MCP25XXFD_REG_CON_MODE_SLEEP);
+ out_runtime_disable:
+ pm_runtime_disable(ndev->dev.parent);
+ out_runtime_put_noidle:
+ pm_runtime_put_noidle(ndev->dev.parent);
+ mcp25xxfd_clks_and_vdd_disable(priv);
+
+ return err;
+}
+
+static inline void mcp25xxfd_unregister(struct mcp25xxfd_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+
+ unregister_candev(ndev);
+
+ pm_runtime_get_sync(ndev->dev.parent);
+ pm_runtime_put_noidle(ndev->dev.parent);
+ mcp25xxfd_clks_and_vdd_disable(priv);
+ pm_runtime_disable(ndev->dev.parent);
+}
+
+static const struct of_device_id mcp25xxfd_of_match[] = {
+ {
+ .compatible = "microchip,mcp2517fd",
+ .data = &mcp25xxfd_devtype_data_mcp2517fd,
+ }, {
+ .compatible = "microchip,mcp2518fd",
+ .data = &mcp25xxfd_devtype_data_mcp2518fd,
+ }, {
+ .compatible = "microchip,mcp25xxfd",
+ .data = &mcp25xxfd_devtype_data_mcp25xxfd,
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, mcp25xxfd_of_match);
+
+static const struct spi_device_id mcp25xxfd_id_table[] = {
+ {
+ .name = "mcp2517fd",
+ .driver_data = (kernel_ulong_t)&mcp25xxfd_devtype_data_mcp2517fd,
+ }, {
+ .name = "mcp2518fd",
+ .driver_data = (kernel_ulong_t)&mcp25xxfd_devtype_data_mcp2518fd,
+ }, {
+ .name = "mcp25xxfd",
+ .driver_data = (kernel_ulong_t)&mcp25xxfd_devtype_data_mcp25xxfd,
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(spi, mcp25xxfd_id_table);
+
+static int mcp25xxfd_probe(struct spi_device *spi)
+{
+ const void *match;
+ struct net_device *ndev;
+ struct mcp25xxfd_priv *priv;
+ struct gpio_desc *rx_int;
+ struct regulator *reg_vdd, *reg_xceiver;
+ struct clk *clk;
+ u32 freq;
+ int err;
+
+ rx_int = devm_gpiod_get_optional(&spi->dev, "microchip,rx-int",
+ GPIOD_IN);
+ if (PTR_ERR(rx_int) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (IS_ERR(rx_int))
+ return PTR_ERR(rx_int);
+
+ reg_vdd = devm_regulator_get_optional(&spi->dev, "vdd");
+ if (PTR_ERR(reg_vdd) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (PTR_ERR(reg_vdd) == -ENODEV)
+ reg_vdd = NULL;
+ else if (IS_ERR(reg_vdd))
+ return PTR_ERR(reg_vdd);
+
+ reg_xceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
+ if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (PTR_ERR(reg_xceiver) == -ENODEV)
+ reg_xceiver = NULL;
+ else if (IS_ERR(reg_xceiver))
+ return PTR_ERR(reg_xceiver);
+
+ clk = devm_clk_get(&spi->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&spi->dev, "No Oscillator (clock) defined.\n");
+ return PTR_ERR(clk);
+ }
+ freq = clk_get_rate(clk);
+
+ /* Sanity check */
+ if (freq < MCP25XXFD_SYSCLOCK_HZ_MIN ||
+ freq > MCP25XXFD_SYSCLOCK_HZ_MAX) {
+ dev_err(&spi->dev,
+ "Oscillator frequency (%u Hz) is too low or high.\n",
+ freq);
+ return -ERANGE;
+ }
+
+ if (freq <= MCP25XXFD_SYSCLOCK_HZ_MAX / MCP25XXFD_OSC_PLL_MULTIPLIER) {
+ dev_err(&spi->dev,
+ "Oscillator frequency (%u Hz) is too low and PLL is not supported.\n",
+ freq);
+ return -ERANGE;
+ }
+
+ ndev = alloc_candev(sizeof(struct mcp25xxfd_priv),
+ MCP25XXFD_TX_OBJ_NUM_MAX);
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, &spi->dev);
+
+ ndev->netdev_ops = &mcp25xxfd_netdev_ops;
+ ndev->irq = spi->irq;
+ ndev->flags |= IFF_ECHO;
+
+ priv = netdev_priv(ndev);
+ spi_set_drvdata(spi, priv);
+ priv->can.clock.freq = freq;
+ priv->can.do_set_mode = mcp25xxfd_set_mode;
+ priv->can.do_get_berr_counter = mcp25xxfd_get_berr_counter;
+ priv->can.bittiming_const = &mcp25xxfd_bittiming_const;
+ priv->can.data_bittiming_const = &mcp25xxfd_data_bittiming_const;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_FD_NON_ISO;
+ priv->ndev = ndev;
+ priv->spi = spi;
+ priv->rx_int = rx_int;
+ priv->clk = clk;
+ priv->reg_vdd = reg_vdd;
+ priv->reg_xceiver = reg_xceiver;
+
+ match = device_get_match_data(&spi->dev);
+ if (match)
+ priv->devtype_data = *(struct mcp25xxfd_devtype_data *)match;
+ else
+ priv->devtype_data = *(struct mcp25xxfd_devtype_data *)
+ spi_get_device_id(spi)->driver_data;
+
+ /* According to the datasheet the SPI clock must be less or
+ * equal SYSCLOCK / 2.
+ *
+ * It turns out, that the Controller is not stable at this
+ * rate. Known good and bad combinations are:
+ *
+ * MCP ext-clk SoC SPI SPI-clk max-clk parent-clk Status config
+ *
+ * 2518 20 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 8333333 Hz 83.33% 600000000 Hz good assigned-clocks = <&ccu CLK_SPIx>
+ * 2518 20 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 9375000 Hz 93.75% 600000000 Hz bad assigned-clocks = <&ccu CLK_SPIx>
+ * 2518 40 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 16666667 Hz 83.33% 600000000 Hz good assigned-clocks = <&ccu CLK_SPIx>
+ * 2518 40 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 18750000 Hz 93.75% 600000000 Hz bad assigned-clocks = <&ccu CLK_SPIx>
+ * 2517 20 MHz fsl,imx8mm fsl,imx51-ecspi 8333333 Hz 83.33% 16666667 Hz good assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT>
+ * 2517 20 MHz fsl,imx8mm fsl,imx51-ecspi 9523809 Hz 95.34% 28571429 Hz bad assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT>
+ * 2517 40 MHz atmel,sama5d27 atmel,at91rm9200-spi 16400000 Hz 82.00% 82000000 Hz good default
+ * 2518 40 MHz atmel,sama5d27 atmel,at91rm9200-spi 16400000 Hz 82.00% 82000000 Hz good default
+ *
+ * Limit SPI clock to 85% of SYSCLOCK / 2 for now.
+ */
+ priv->spi_max_speed_hz_orig = spi->max_speed_hz;
+ spi->max_speed_hz = min(spi->max_speed_hz, freq / 2 / 1000 * 850);
+ spi->bits_per_word = 8;
+ spi->rt = true;
+ err = spi_setup(spi);
+ if (err)
+ goto out_free_candev;
+
+ err = mcp25xxfd_regmap_init(priv);
+ if (err)
+ goto out_free_candev;
+
+ err = can_rx_offload_add_manual(ndev, &priv->offload,
+ MCP25XXFD_NAPI_WEIGHT);
+ if (err)
+ goto out_free_candev;
+
+ err = mcp25xxfd_register(priv);
+ if (err)
+ goto out_free_candev;
+
+ return 0;
+
+ out_free_candev:
+ spi->max_speed_hz = priv->spi_max_speed_hz_orig;
+
+ free_candev(ndev);
+
+ return err;
+}
+
+static int mcp25xxfd_remove(struct spi_device *spi)
+{
+ struct mcp25xxfd_priv *priv = spi_get_drvdata(spi);
+ struct net_device *ndev = priv->ndev;
+
+ can_rx_offload_del(&priv->offload);
+ mcp25xxfd_unregister(priv);
+ spi->max_speed_hz = priv->spi_max_speed_hz_orig;
+ free_candev(ndev);
+
+ return 0;
+}
+
+static int __maybe_unused mcp25xxfd_runtime_suspend(struct device *device)
+{
+ const struct mcp25xxfd_priv *priv = dev_get_drvdata(device);
+
+ return mcp25xxfd_clks_and_vdd_disable(priv);
+}
+
+static int __maybe_unused mcp25xxfd_runtime_resume(struct device *device)
+{
+ const struct mcp25xxfd_priv *priv = dev_get_drvdata(device);
+
+ return mcp25xxfd_clks_and_vdd_enable(priv);
+}
+
+static const struct dev_pm_ops mcp25xxfd_pm_ops = {
+ SET_RUNTIME_PM_OPS(mcp25xxfd_runtime_suspend,
+ mcp25xxfd_runtime_resume, NULL)
+};
+
+static struct spi_driver mcp25xxfd_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .pm = &mcp25xxfd_pm_ops,
+ .of_match_table = mcp25xxfd_of_match,
+ },
+ .probe = mcp25xxfd_probe,
+ .remove = mcp25xxfd_remove,
+ .id_table = mcp25xxfd_id_table,
+};
+module_spi_driver(mcp25xxfd_driver);
+
+MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
+MODULE_DESCRIPTION("Microchip MCP25xxFD Family CAN controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/spi/mcp25xxfd/mcp25xxfd-crc16.c b/drivers/net/can/spi/mcp25xxfd/mcp25xxfd-crc16.c
new file mode 100644
index 000000000000..79d09aaebf33
--- /dev/null
+++ b/drivers/net/can/spi/mcp25xxfd/mcp25xxfd-crc16.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp25xxfd - Microchip MCP25xxFD Family CAN controller driver
+//
+// Copyright (c) 2020 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
+//
+// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+//
+
+#include "mcp25xxfd.h"
+
+/* The standard crc16 in linux/crc16.h is unfortunately not computing
+ * the correct results (left shift vs. right shift). So here an
+ * implementation with a table generated with the help of:
+ *
+ * http://lkml.iu.edu/hypermail/linux/kernel/0508.1/1085.html
+ */
+static const u16 mcp25xxfd_crc16_table[] = {
+ 0x0000, 0x8005, 0x800f, 0x000a, 0x801b, 0x001e, 0x0014, 0x8011,
+ 0x8033, 0x0036, 0x003c, 0x8039, 0x0028, 0x802d, 0x8027, 0x0022,
+ 0x8063, 0x0066, 0x006c, 0x8069, 0x0078, 0x807d, 0x8077, 0x0072,
+ 0x0050, 0x8055, 0x805f, 0x005a, 0x804b, 0x004e, 0x0044, 0x8041,
+ 0x80c3, 0x00c6, 0x00cc, 0x80c9, 0x00d8, 0x80dd, 0x80d7, 0x00d2,
+ 0x00f0, 0x80f5, 0x80ff, 0x00fa, 0x80eb, 0x00ee, 0x00e4, 0x80e1,
+ 0x00a0, 0x80a5, 0x80af, 0x00aa, 0x80bb, 0x00be, 0x00b4, 0x80b1,
+ 0x8093, 0x0096, 0x009c, 0x8099, 0x0088, 0x808d, 0x8087, 0x0082,
+ 0x8183, 0x0186, 0x018c, 0x8189, 0x0198, 0x819d, 0x8197, 0x0192,
+ 0x01b0, 0x81b5, 0x81bf, 0x01ba, 0x81ab, 0x01ae, 0x01a4, 0x81a1,
+ 0x01e0, 0x81e5, 0x81ef, 0x01ea, 0x81fb, 0x01fe, 0x01f4, 0x81f1,
+ 0x81d3, 0x01d6, 0x01dc, 0x81d9, 0x01c8, 0x81cd, 0x81c7, 0x01c2,
+ 0x0140, 0x8145, 0x814f, 0x014a, 0x815b, 0x015e, 0x0154, 0x8151,
+ 0x8173, 0x0176, 0x017c, 0x8179, 0x0168, 0x816d, 0x8167, 0x0162,
+ 0x8123, 0x0126, 0x012c, 0x8129, 0x0138, 0x813d, 0x8137, 0x0132,
+ 0x0110, 0x8115, 0x811f, 0x011a, 0x810b, 0x010e, 0x0104, 0x8101,
+ 0x8303, 0x0306, 0x030c, 0x8309, 0x0318, 0x831d, 0x8317, 0x0312,
+ 0x0330, 0x8335, 0x833f, 0x033a, 0x832b, 0x032e, 0x0324, 0x8321,
+ 0x0360, 0x8365, 0x836f, 0x036a, 0x837b, 0x037e, 0x0374, 0x8371,
+ 0x8353, 0x0356, 0x035c, 0x8359, 0x0348, 0x834d, 0x8347, 0x0342,
+ 0x03c0, 0x83c5, 0x83cf, 0x03ca, 0x83db, 0x03de, 0x03d4, 0x83d1,
+ 0x83f3, 0x03f6, 0x03fc, 0x83f9, 0x03e8, 0x83ed, 0x83e7, 0x03e2,
+ 0x83a3, 0x03a6, 0x03ac, 0x83a9, 0x03b8, 0x83bd, 0x83b7, 0x03b2,
+ 0x0390, 0x8395, 0x839f, 0x039a, 0x838b, 0x038e, 0x0384, 0x8381,
+ 0x0280, 0x8285, 0x828f, 0x028a, 0x829b, 0x029e, 0x0294, 0x8291,
+ 0x82b3, 0x02b6, 0x02bc, 0x82b9, 0x02a8, 0x82ad, 0x82a7, 0x02a2,
+ 0x82e3, 0x02e6, 0x02ec, 0x82e9, 0x02f8, 0x82fd, 0x82f7, 0x02f2,
+ 0x02d0, 0x82d5, 0x82df, 0x02da, 0x82cb, 0x02ce, 0x02c4, 0x82c1,
+ 0x8243, 0x0246, 0x024c, 0x8249, 0x0258, 0x825d, 0x8257, 0x0252,
+ 0x0270, 0x8275, 0x827f, 0x027a, 0x826b, 0x026e, 0x0264, 0x8261,
+ 0x0220, 0x8225, 0x822f, 0x022a, 0x823b, 0x023e, 0x0234, 0x8231,
+ 0x8213, 0x0216, 0x021c, 0x8219, 0x0208, 0x820d, 0x8207, 0x0202
+};
+
+static inline u16 mcp25xxfd_crc16_byte(u16 crc, const u8 data)
+{
+ u8 index = (crc >> 8) ^ data;
+
+ return (crc << 8) ^ mcp25xxfd_crc16_table[index];
+}
+
+static u16 mcp25xxfd_crc16(u16 crc, u8 const *buffer, size_t len)
+{
+ while (len--)
+ crc = mcp25xxfd_crc16_byte(crc, *buffer++);
+
+ return crc;
+}
+
+u16 mcp25xxfd_crc16_compute(const void *data, size_t data_size)
+{
+ u16 crc = 0xffff;
+
+ return mcp25xxfd_crc16(crc, data, data_size);
+}
+
+u16 mcp25xxfd_crc16_compute2(const void *cmd, size_t cmd_size,
+ const void *data, size_t data_size)
+{
+ u16 crc;
+
+ crc = mcp25xxfd_crc16_compute(cmd, cmd_size);
+ crc = mcp25xxfd_crc16(crc, data, data_size);
+
+ return crc;
+}
diff --git a/drivers/net/can/spi/mcp25xxfd/mcp25xxfd-regmap.c b/drivers/net/can/spi/mcp25xxfd/mcp25xxfd-regmap.c
new file mode 100644
index 000000000000..376649c7e443
--- /dev/null
+++ b/drivers/net/can/spi/mcp25xxfd/mcp25xxfd-regmap.c
@@ -0,0 +1,556 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp25xxfd - Microchip MCP25xxFD Family CAN controller driver
+//
+// Copyright (c) 2019, 2020 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include "mcp25xxfd.h"
+
+#include <asm/unaligned.h>
+
+static const struct regmap_config mcp25xxfd_regmap_crc;
+
+static int
+mcp25xxfd_regmap_nocrc_write(void *context, const void *data, size_t count)
+{
+ struct spi_device *spi = context;
+
+ return spi_write(spi, data, count);
+}
+
+static int
+mcp25xxfd_regmap_nocrc_gather_write(void *context,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len)
+{
+ struct spi_device *spi = context;
+ struct mcp25xxfd_priv *priv = spi_get_drvdata(spi);
+ struct mcp25xxfd_map_buf_nocrc *buf_tx = priv->map_buf_nocrc_tx;
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = buf_tx,
+ .len = sizeof(buf_tx->cmd) + val_len,
+ },
+ };
+
+ BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16));
+
+ if (IS_ENABLED(CONFIG_CAN_MCP25XXFD_SANITY) &&
+ reg_len != sizeof(buf_tx->cmd.cmd))
+ return -EINVAL;
+
+ memcpy(&buf_tx->cmd, reg, sizeof(buf_tx->cmd));
+ memcpy(buf_tx->data, val, val_len);
+
+ return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+}
+
+static inline bool mcp25xxfd_update_bits_read_reg(unsigned int reg)
+{
+ switch (reg) {
+ case MCP25XXFD_REG_INT:
+ case MCP25XXFD_REG_TEFCON:
+ case MCP25XXFD_REG_FIFOCON(MCP25XXFD_RX_FIFO(0)):
+ case MCP25XXFD_REG_FLTCON(0):
+ case MCP25XXFD_REG_ECCSTAT:
+ case MCP25XXFD_REG_CRC:
+ return false;
+ case MCP25XXFD_REG_CON:
+ case MCP25XXFD_REG_FIFOSTA(MCP25XXFD_RX_FIFO(0)):
+ case MCP25XXFD_REG_OSC:
+ case MCP25XXFD_REG_ECCCON:
+ return true;
+ default:
+ WARN(1, "Status of reg 0x%04x unknown.\n", reg);
+ }
+
+ return true;
+}
+
+static int
+mcp25xxfd_regmap_nocrc_update_bits(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ struct spi_device *spi = context;
+ struct mcp25xxfd_priv *priv = spi_get_drvdata(spi);
+ struct mcp25xxfd_map_buf_nocrc *buf_rx = priv->map_buf_nocrc_rx;
+ struct mcp25xxfd_map_buf_nocrc *buf_tx = priv->map_buf_nocrc_tx;
+ __le32 orig_le32 = 0, mask_le32, val_le32, tmp_le32;
+ u8 first_byte, last_byte, len;
+ int err;
+
+ BUILD_BUG_ON(sizeof(buf_rx->cmd) != sizeof(__be16));
+ BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16));
+
+ if (IS_ENABLED(CONFIG_CAN_MCP25XXFD_SANITY) &&
+ mask == 0)
+ return -EINVAL;
+
+ first_byte = mcp25xxfd_first_byte_set(mask);
+ last_byte = mcp25xxfd_last_byte_set(mask);
+ len = last_byte - first_byte + 1;
+
+ if (mcp25xxfd_update_bits_read_reg(reg)) {
+ struct spi_transfer xfer[2] = { };
+ struct spi_message msg;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer[0], &msg);
+
+ if (priv->devtype_data.quirks & MCP25XXFD_QUIRK_HALF_DUPLEX) {
+ xfer[0].tx_buf = buf_tx;
+ xfer[0].len = sizeof(buf_tx->cmd);
+
+ xfer[1].rx_buf = buf_rx->data;
+ xfer[1].len = len;
+ spi_message_add_tail(&xfer[1], &msg);
+ } else {
+ xfer[0].tx_buf = buf_tx;
+ xfer[0].rx_buf = buf_rx;
+ xfer[0].len = sizeof(buf_tx->cmd) + len;
+
+ if (MCP25XXFD_SANITIZE_SPI)
+ memset(buf_tx->data, 0x0, len);
+ }
+
+ mcp25xxfd_spi_cmd_read_nocrc(&buf_tx->cmd, reg + first_byte);
+ err = spi_sync(spi, &msg);
+ if (err)
+ return err;
+
+ memcpy(&orig_le32, buf_rx->data, len);
+ }
+
+ mask_le32 = cpu_to_le32(mask >> BITS_PER_BYTE * first_byte);
+ val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
+
+ tmp_le32 = orig_le32 & ~mask_le32;
+ tmp_le32 |= val_le32 & mask_le32;
+
+ mcp25xxfd_spi_cmd_write_nocrc(&buf_tx->cmd, reg + first_byte);
+ memcpy(buf_tx->data, &tmp_le32, len);
+
+ return spi_write(spi, buf_tx, sizeof(buf_tx->cmd) + len);
+}
+
+static int
+mcp25xxfd_regmap_nocrc_read(void *context,
+ const void *reg, size_t reg_len,
+ void *val_buf, size_t val_len)
+{
+ struct spi_device *spi = context;
+ struct mcp25xxfd_priv *priv = spi_get_drvdata(spi);
+ struct mcp25xxfd_map_buf_nocrc *buf_rx = priv->map_buf_nocrc_rx;
+ struct mcp25xxfd_map_buf_nocrc *buf_tx = priv->map_buf_nocrc_tx;
+ struct spi_transfer xfer[2] = { };
+ struct spi_message msg;
+ int err;
+
+ BUILD_BUG_ON(sizeof(buf_rx->cmd) != sizeof(__be16));
+ BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16));
+
+ if (IS_ENABLED(CONFIG_CAN_MCP25XXFD_SANITY) &&
+ reg_len != sizeof(buf_tx->cmd.cmd))
+ return -EINVAL;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer[0], &msg);
+
+ if (priv->devtype_data.quirks & MCP25XXFD_QUIRK_HALF_DUPLEX) {
+ xfer[0].tx_buf = reg;
+ xfer[0].len = sizeof(buf_tx->cmd);
+
+ xfer[1].rx_buf = val_buf;
+ xfer[1].len = val_len;
+ spi_message_add_tail(&xfer[1], &msg);
+ } else {
+ xfer[0].tx_buf = buf_tx;
+ xfer[0].rx_buf = buf_rx;
+ xfer[0].len = sizeof(buf_tx->cmd) + val_len;
+
+ memcpy(&buf_tx->cmd, reg, sizeof(buf_tx->cmd));
+ if (MCP25XXFD_SANITIZE_SPI)
+ memset(buf_tx->data, 0x0, val_len);
+ };
+
+ err = spi_sync(spi, &msg);
+ if (err)
+ return err;
+
+ if (!(priv->devtype_data.quirks & MCP25XXFD_QUIRK_HALF_DUPLEX))
+ memcpy(val_buf, buf_rx->data, val_len);
+
+ return 0;
+}
+
+static int
+mcp25xxfd_regmap_crc_gather_write(void *context,
+ const void *reg_p, size_t reg_len,
+ const void *val, size_t val_len)
+{
+ struct spi_device *spi = context;
+ struct mcp25xxfd_priv *priv = spi_get_drvdata(spi);
+ struct mcp25xxfd_map_buf_crc *buf_tx = priv->map_buf_crc_tx;
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = buf_tx,
+ .len = sizeof(buf_tx->cmd) + val_len +
+ sizeof(buf_tx->crc),
+ },
+ };
+ u16 reg = *(u16 *)reg_p;
+ u16 crc;
+
+ BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16) + sizeof(u8));
+
+ if (IS_ENABLED(CONFIG_CAN_MCP25XXFD_SANITY) &&
+ reg_len != sizeof(buf_tx->cmd.cmd) +
+ mcp25xxfd_regmap_crc.pad_bits / BITS_PER_BYTE)
+ return -EINVAL;
+
+ mcp25xxfd_spi_cmd_write_crc(&buf_tx->cmd, reg, val_len);
+ memcpy(buf_tx->data, val, val_len);
+
+ crc = mcp25xxfd_crc16_compute(buf_tx, sizeof(buf_tx->cmd) + val_len);
+ put_unaligned_be16(crc, buf_tx->data + val_len);
+
+ return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+}
+
+static int
+mcp25xxfd_regmap_crc_write(void *context,
+ const void *data, size_t count)
+{
+ const size_t data_offset = sizeof(__be16) +
+ mcp25xxfd_regmap_crc.pad_bits / BITS_PER_BYTE;
+
+ return mcp25xxfd_regmap_crc_gather_write(context,
+ data, data_offset,
+ data + data_offset,
+ count - data_offset);
+}
+
+static int
+mcp25xxfd_regmap_crc_read_one(struct mcp25xxfd_priv *priv,
+ struct spi_message *msg, unsigned int data_len)
+{
+ const struct mcp25xxfd_map_buf_crc *buf_rx = priv->map_buf_crc_rx;
+ const struct mcp25xxfd_map_buf_crc *buf_tx = priv->map_buf_crc_tx;
+ u16 crc_received, crc_calculated;
+ int err;
+
+ BUILD_BUG_ON(sizeof(buf_rx->cmd) != sizeof(__be16) + sizeof(u8));
+ BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16) + sizeof(u8));
+
+ err = spi_sync(priv->spi, msg);
+ if (err)
+ return err;
+
+ crc_received = get_unaligned_be16(buf_rx->data + data_len);
+ crc_calculated = mcp25xxfd_crc16_compute2(&buf_tx->cmd,
+ sizeof(buf_tx->cmd),
+ buf_rx->data,
+ data_len);
+ if (crc_received != crc_calculated)
+ return -EBADMSG;
+
+ return 0;
+}
+
+static int
+mcp25xxfd_regmap_crc_read(void *context,
+ const void *reg_p, size_t reg_len,
+ void *val_buf, size_t val_len)
+{
+ struct spi_device *spi = context;
+ struct mcp25xxfd_priv *priv = spi_get_drvdata(spi);
+ struct mcp25xxfd_map_buf_crc *buf_rx = priv->map_buf_crc_rx;
+ struct mcp25xxfd_map_buf_crc *buf_tx = priv->map_buf_crc_tx;
+ struct spi_transfer xfer[2] = { };
+ struct spi_message msg;
+ u16 reg = *(u16 *)reg_p;
+ int i, err;
+
+ BUILD_BUG_ON(sizeof(buf_rx->cmd) != sizeof(__be16) + sizeof(u8));
+ BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16) + sizeof(u8));
+
+ if (IS_ENABLED(CONFIG_CAN_MCP25XXFD_SANITY) &&
+ reg_len != sizeof(buf_tx->cmd.cmd) +
+ mcp25xxfd_regmap_crc.pad_bits / BITS_PER_BYTE)
+ return -EINVAL;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer[0], &msg);
+
+ if (priv->devtype_data.quirks & MCP25XXFD_QUIRK_HALF_DUPLEX) {
+ xfer[0].tx_buf = buf_tx;
+ xfer[0].len = sizeof(buf_tx->cmd);
+
+ xfer[1].rx_buf = buf_rx->data;
+ xfer[1].len = val_len + sizeof(buf_tx->crc);
+ spi_message_add_tail(&xfer[1], &msg);
+ } else {
+ xfer[0].tx_buf = buf_tx;
+ xfer[0].rx_buf = buf_rx;
+ xfer[0].len = sizeof(buf_tx->cmd) + val_len +
+ sizeof(buf_tx->crc);
+
+ if (MCP25XXFD_SANITIZE_SPI)
+ memset(buf_tx->data, 0x0, val_len +
+ sizeof(buf_tx->crc));
+ }
+
+ mcp25xxfd_spi_cmd_read_crc(&buf_tx->cmd, reg, val_len);
+
+ for (i = 0; i < MCP25XXFD_READ_CRC_RETRIES_MAX; i++) {
+ err = mcp25xxfd_regmap_crc_read_one(priv, &msg, val_len);
+ if (!err)
+ goto out;
+ if (err != -EBADMSG)
+ return err;
+
+ /* MCP25XXFD_REG_OSC is the first ever reg we read from.
+ *
+ * The chip may be in deep sleep and this SPI transfer
+ * (i.e. the assertion of the CS) will wake the chip
+ * up. This takes about 3ms. The CRC of this transfer
+ * is wrong.
+ *
+ * Or there isn't a chip at all, in this case the CRC
+ * will be wrong, too.
+ *
+ * In both cases ignore the CRC and copy the read data
+ * to the caller. It will take care of both cases.
+ *
+ */
+ if (reg == MCP25XXFD_REG_OSC) {
+ err = 0;
+ goto out;
+ }
+
+ netdev_dbg(priv->ndev,
+ "CRC read error at address 0x%04x (length=%zd, data=%*ph, CRC=0x%04x) retrying.\n",
+ reg, val_len, (int)val_len, buf_rx->data,
+ get_unaligned_be16(buf_rx->data + val_len));
+ }
+
+ if (err) {
+ netdev_info(priv->ndev,
+ "CRC read error at address 0x%04x (length=%zd, data=%*ph, CRC=0x%04x).\n",
+ reg, val_len, (int)val_len, buf_rx->data,
+ get_unaligned_be16(buf_rx->data + val_len));
+
+ return err;
+ }
+ out:
+ memcpy(val_buf, buf_rx->data, val_len);
+
+ return 0;
+}
+
+static const struct regmap_range mcp25xxfd_reg_table_yes_range[] = {
+ regmap_reg_range(0x000, 0x2ec), /* CAN FD Controller Module SFR */
+ regmap_reg_range(0x400, 0xbfc), /* RAM */
+ regmap_reg_range(0xe00, 0xe14), /* MCP2517/18FD SFR */
+};
+
+static const struct regmap_access_table mcp25xxfd_reg_table = {
+ .yes_ranges = mcp25xxfd_reg_table_yes_range,
+ .n_yes_ranges = ARRAY_SIZE(mcp25xxfd_reg_table_yes_range),
+};
+
+static const struct regmap_config mcp25xxfd_regmap_nocrc = {
+ .name = "nocrc",
+ .reg_bits = 16,
+ .reg_stride = 4,
+ .pad_bits = 0,
+ .val_bits = 32,
+ .max_register = 0xffc,
+ .wr_table = &mcp25xxfd_reg_table,
+ .rd_table = &mcp25xxfd_reg_table,
+ .cache_type = REGCACHE_NONE,
+ .read_flag_mask = (__force unsigned long)
+ cpu_to_be16(MCP25XXFD_SPI_INSTRUCTION_READ),
+ .write_flag_mask = (__force unsigned long)
+ cpu_to_be16(MCP25XXFD_SPI_INSTRUCTION_WRITE),
+};
+
+static const struct regmap_bus mcp25xxfd_bus_nocrc = {
+ .write = mcp25xxfd_regmap_nocrc_write,
+ .gather_write = mcp25xxfd_regmap_nocrc_gather_write,
+ .reg_update_bits = mcp25xxfd_regmap_nocrc_update_bits,
+ .read = mcp25xxfd_regmap_nocrc_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .max_raw_read = sizeof_field(struct mcp25xxfd_map_buf_nocrc, data),
+ .max_raw_write = sizeof_field(struct mcp25xxfd_map_buf_nocrc, data),
+};
+
+static const struct regmap_config mcp25xxfd_regmap_crc = {
+ .name = "crc",
+ .reg_bits = 16,
+ .reg_stride = 4,
+ .pad_bits = 16, /* keep data bits aligned */
+ .val_bits = 32,
+ .max_register = 0xffc,
+ .wr_table = &mcp25xxfd_reg_table,
+ .rd_table = &mcp25xxfd_reg_table,
+ .cache_type = REGCACHE_NONE,
+};
+
+static const struct regmap_bus mcp25xxfd_bus_crc = {
+ .write = mcp25xxfd_regmap_crc_write,
+ .gather_write = mcp25xxfd_regmap_crc_gather_write,
+ .read = mcp25xxfd_regmap_crc_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .max_raw_read = sizeof_field(struct mcp25xxfd_map_buf_crc, data),
+ .max_raw_write = sizeof_field(struct mcp25xxfd_map_buf_crc, data),
+};
+
+static inline bool
+mcp25xxfd_regmap_use_nocrc(struct mcp25xxfd_priv *priv)
+{
+ return (!(priv->devtype_data.quirks & MCP25XXFD_QUIRK_CRC_REG)) ||
+ (!(priv->devtype_data.quirks & MCP25XXFD_QUIRK_CRC_RX));
+}
+
+static inline bool
+mcp25xxfd_regmap_use_crc(struct mcp25xxfd_priv *priv)
+{
+ return (priv->devtype_data.quirks & MCP25XXFD_QUIRK_CRC_REG) ||
+ (priv->devtype_data.quirks & MCP25XXFD_QUIRK_CRC_RX);
+}
+
+static int
+mcp25xxfd_regmap_init_nocrc(struct mcp25xxfd_priv *priv)
+{
+ if (!priv->map_nocrc) {
+ struct regmap *map;
+
+ map = devm_regmap_init(&priv->spi->dev, &mcp25xxfd_bus_nocrc,
+ priv->spi, &mcp25xxfd_regmap_nocrc);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ priv->map_nocrc = map;
+ }
+
+ if (!priv->map_buf_nocrc_rx) {
+ priv->map_buf_nocrc_rx =
+ devm_kzalloc(&priv->spi->dev,
+ sizeof(*priv->map_buf_nocrc_rx),
+ GFP_KERNEL);
+ if (!priv->map_buf_nocrc_rx)
+ return -ENOMEM;
+ }
+
+ if (!priv->map_buf_nocrc_tx) {
+ priv->map_buf_nocrc_tx =
+ devm_kzalloc(&priv->spi->dev,
+ sizeof(*priv->map_buf_nocrc_tx),
+ GFP_KERNEL);
+ if (!priv->map_buf_nocrc_tx)
+ return -ENOMEM;
+ }
+
+ if (!(priv->devtype_data.quirks & MCP25XXFD_QUIRK_CRC_REG))
+ priv->map_reg = priv->map_nocrc;
+
+ if (!(priv->devtype_data.quirks & MCP25XXFD_QUIRK_CRC_RX))
+ priv->map_rx = priv->map_nocrc;
+
+ return 0;
+}
+
+static void mcp25xxfd_regmap_destroy_nocrc(struct mcp25xxfd_priv *priv)
+{
+ if (priv->map_buf_nocrc_rx) {
+ devm_kfree(&priv->spi->dev, priv->map_buf_nocrc_rx);
+ priv->map_buf_nocrc_rx = NULL;
+ }
+ if (priv->map_buf_nocrc_tx) {
+ devm_kfree(&priv->spi->dev, priv->map_buf_nocrc_tx);
+ priv->map_buf_nocrc_tx = NULL;
+ }
+}
+
+static int
+mcp25xxfd_regmap_init_crc(struct mcp25xxfd_priv *priv)
+{
+ if (!priv->map_crc) {
+ struct regmap *map;
+
+ map = devm_regmap_init(&priv->spi->dev, &mcp25xxfd_bus_crc,
+ priv->spi, &mcp25xxfd_regmap_crc);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ priv->map_crc = map;
+ }
+
+ if (!priv->map_buf_crc_rx) {
+ priv->map_buf_crc_rx =
+ devm_kzalloc(&priv->spi->dev,
+ sizeof(*priv->map_buf_crc_rx),
+ GFP_KERNEL);
+ if (!priv->map_buf_crc_rx)
+ return -ENOMEM;
+ }
+
+ if (!priv->map_buf_crc_tx) {
+ priv->map_buf_crc_tx =
+ devm_kzalloc(&priv->spi->dev,
+ sizeof(*priv->map_buf_crc_tx),
+ GFP_KERNEL);
+ if (!priv->map_buf_crc_tx)
+ return -ENOMEM;
+ }
+
+ if (priv->devtype_data.quirks & MCP25XXFD_QUIRK_CRC_REG)
+ priv->map_reg = priv->map_crc;
+
+ if (priv->devtype_data.quirks & MCP25XXFD_QUIRK_CRC_RX)
+ priv->map_rx = priv->map_crc;
+
+ return 0;
+}
+
+static void mcp25xxfd_regmap_destroy_crc(struct mcp25xxfd_priv *priv)
+{
+ if (priv->map_buf_crc_rx) {
+ devm_kfree(&priv->spi->dev, priv->map_buf_crc_rx);
+ priv->map_buf_crc_rx = NULL;
+ }
+ if (priv->map_buf_crc_tx) {
+ devm_kfree(&priv->spi->dev, priv->map_buf_crc_tx);
+ priv->map_buf_crc_tx = NULL;
+ }
+}
+
+int mcp25xxfd_regmap_init(struct mcp25xxfd_priv *priv)
+{
+ int err;
+
+ if (mcp25xxfd_regmap_use_nocrc(priv)) {
+ err = mcp25xxfd_regmap_init_nocrc(priv);
+
+ if (err)
+ return err;
+ } else {
+ mcp25xxfd_regmap_destroy_nocrc(priv);
+ }
+
+ if (mcp25xxfd_regmap_use_crc(priv)) {
+ err = mcp25xxfd_regmap_init_crc(priv);
+
+ if (err)
+ return err;
+ } else {
+ mcp25xxfd_regmap_destroy_crc(priv);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/can/spi/mcp25xxfd/mcp25xxfd.h b/drivers/net/can/spi/mcp25xxfd/mcp25xxfd.h
new file mode 100644
index 000000000000..3bc799204cb0
--- /dev/null
+++ b/drivers/net/can/spi/mcp25xxfd/mcp25xxfd.h
@@ -0,0 +1,835 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * mcp25xxfd - Microchip MCP25xxFD Family CAN controller driver
+ *
+ * Copyright (c) 2019 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+ */
+
+#ifndef _MCP25XXFD_H
+#define _MCP25XXFD_H
+
+#include <linux/can/core.h>
+#include <linux/can/dev.h>
+#include <linux/can/rx-offload.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+/* MPC25xx registers */
+
+/* CAN FD Controller Module SFR */
+#define MCP25XXFD_REG_CON 0x00
+#define MCP25XXFD_REG_CON_TXBWS_MASK GENMASK(31, 28)
+#define MCP25XXFD_REG_CON_ABAT BIT(27)
+#define MCP25XXFD_REG_CON_REQOP_MASK GENMASK(26, 24)
+#define MCP25XXFD_REG_CON_MODE_MIXED 0
+#define MCP25XXFD_REG_CON_MODE_SLEEP 1
+#define MCP25XXFD_REG_CON_MODE_INT_LOOPBACK 2
+#define MCP25XXFD_REG_CON_MODE_LISTENONLY 3
+#define MCP25XXFD_REG_CON_MODE_CONFIG 4
+#define MCP25XXFD_REG_CON_MODE_EXT_LOOPBACK 5
+#define MCP25XXFD_REG_CON_MODE_CAN2_0 6
+#define MCP25XXFD_REG_CON_MODE_RESTRICTED 7
+#define MCP25XXFD_REG_CON_OPMOD_MASK GENMASK(23, 21)
+#define MCP25XXFD_REG_CON_TXQEN BIT(20)
+#define MCP25XXFD_REG_CON_STEF BIT(19)
+#define MCP25XXFD_REG_CON_SERR2LOM BIT(18)
+#define MCP25XXFD_REG_CON_ESIGM BIT(17)
+#define MCP25XXFD_REG_CON_RTXAT BIT(16)
+#define MCP25XXFD_REG_CON_BRSDIS BIT(12)
+#define MCP25XXFD_REG_CON_BUSY BIT(11)
+#define MCP25XXFD_REG_CON_WFT_MASK GENMASK(10, 9)
+#define MCP25XXFD_REG_CON_WFT_T00FILTER 0x0
+#define MCP25XXFD_REG_CON_WFT_T01FILTER 0x1
+#define MCP25XXFD_REG_CON_WFT_T10FILTER 0x2
+#define MCP25XXFD_REG_CON_WFT_T11FILTER 0x3
+#define MCP25XXFD_REG_CON_WAKFIL BIT(8)
+#define MCP25XXFD_REG_CON_PXEDIS BIT(6)
+#define MCP25XXFD_REG_CON_ISOCRCEN BIT(5)
+#define MCP25XXFD_REG_CON_DNCNT_MASK GENMASK(4, 0)
+
+#define MCP25XXFD_REG_NBTCFG 0x04
+#define MCP25XXFD_REG_NBTCFG_BRP_MASK GENMASK(31, 24)
+#define MCP25XXFD_REG_NBTCFG_TSEG1_MASK GENMASK(23, 16)
+#define MCP25XXFD_REG_NBTCFG_TSEG2_MASK GENMASK(14, 8)
+#define MCP25XXFD_REG_NBTCFG_SJW_MASK GENMASK(6, 0)
+
+#define MCP25XXFD_REG_DBTCFG 0x08
+#define MCP25XXFD_REG_DBTCFG_BRP_MASK GENMASK(31, 24)
+#define MCP25XXFD_REG_DBTCFG_TSEG1_MASK GENMASK(20, 16)
+#define MCP25XXFD_REG_DBTCFG_TSEG2_MASK GENMASK(11, 8)
+#define MCP25XXFD_REG_DBTCFG_SJW_MASK GENMASK(3, 0)
+
+#define MCP25XXFD_REG_TDC 0x0c
+#define MCP25XXFD_REG_TDC_EDGFLTEN BIT(25)
+#define MCP25XXFD_REG_TDC_SID11EN BIT(24)
+#define MCP25XXFD_REG_TDC_TDCMOD_MASK GENMASK(17, 16)
+#define MCP25XXFD_REG_TDC_TDCMOD_AUTO 2
+#define MCP25XXFD_REG_TDC_TDCMOD_MANUAL 1
+#define MCP25XXFD_REG_TDC_TDCMOD_DISABLED 0
+#define MCP25XXFD_REG_TDC_TDCO_MASK GENMASK(14, 8)
+#define MCP25XXFD_REG_TDC_TDCV_MASK GENMASK(5, 0)
+
+#define MCP25XXFD_REG_TBC 0x10
+
+#define MCP25XXFD_REG_TSCON 0x14
+#define MCP25XXFD_REG_TSCON_TSRES BIT(18)
+#define MCP25XXFD_REG_TSCON_TSEOF BIT(17)
+#define MCP25XXFD_REG_TSCON_TBCEN BIT(16)
+#define MCP25XXFD_REG_TSCON_TBCPRE_MASK GENMASK(9, 0)
+
+#define MCP25XXFD_REG_VEC 0x18
+#define MCP25XXFD_REG_VEC_RXCODE_MASK GENMASK(30, 24)
+#define MCP25XXFD_REG_VEC_TXCODE_MASK GENMASK(22, 16)
+#define MCP25XXFD_REG_VEC_FILHIT_MASK GENMASK(12, 8)
+#define MCP25XXFD_REG_VEC_ICODE_MASK GENMASK(6, 0)
+
+#define MCP25XXFD_REG_INT 0x1c
+#define MCP25XXFD_REG_INT_IF_MASK GENMASK(15, 0)
+#define MCP25XXFD_REG_INT_IE_MASK GENMASK(31, 16)
+#define MCP25XXFD_REG_INT_IVMIE BIT(31)
+#define MCP25XXFD_REG_INT_WAKIE BIT(30)
+#define MCP25XXFD_REG_INT_CERRIE BIT(29)
+#define MCP25XXFD_REG_INT_SERRIE BIT(28)
+#define MCP25XXFD_REG_INT_RXOVIE BIT(27)
+#define MCP25XXFD_REG_INT_TXATIE BIT(26)
+#define MCP25XXFD_REG_INT_SPICRCIE BIT(25)
+#define MCP25XXFD_REG_INT_ECCIE BIT(24)
+#define MCP25XXFD_REG_INT_TEFIE BIT(20)
+#define MCP25XXFD_REG_INT_MODIE BIT(19)
+#define MCP25XXFD_REG_INT_TBCIE BIT(18)
+#define MCP25XXFD_REG_INT_RXIE BIT(17)
+#define MCP25XXFD_REG_INT_TXIE BIT(16)
+#define MCP25XXFD_REG_INT_IVMIF BIT(15)
+#define MCP25XXFD_REG_INT_WAKIF BIT(14)
+#define MCP25XXFD_REG_INT_CERRIF BIT(13)
+#define MCP25XXFD_REG_INT_SERRIF BIT(12)
+#define MCP25XXFD_REG_INT_RXOVIF BIT(11)
+#define MCP25XXFD_REG_INT_TXATIF BIT(10)
+#define MCP25XXFD_REG_INT_SPICRCIF BIT(9)
+#define MCP25XXFD_REG_INT_ECCIF BIT(8)
+#define MCP25XXFD_REG_INT_TEFIF BIT(4)
+#define MCP25XXFD_REG_INT_MODIF BIT(3)
+#define MCP25XXFD_REG_INT_TBCIF BIT(2)
+#define MCP25XXFD_REG_INT_RXIF BIT(1)
+#define MCP25XXFD_REG_INT_TXIF BIT(0)
+/* These IRQ flags must be cleared by SW in the CAN_INT register */
+#define MCP25XXFD_REG_INT_IF_CLEARABLE_MASK \
+ (MCP25XXFD_REG_INT_IVMIF | MCP25XXFD_REG_INT_WAKIF | \
+ MCP25XXFD_REG_INT_CERRIF | MCP25XXFD_REG_INT_SERRIF | \
+ MCP25XXFD_REG_INT_MODIF)
+
+#define MCP25XXFD_REG_RXIF 0x20
+#define MCP25XXFD_REG_TXIF 0x24
+#define MCP25XXFD_REG_RXOVIF 0x28
+#define MCP25XXFD_REG_TXATIF 0x2c
+#define MCP25XXFD_REG_TXREQ 0x30
+
+#define MCP25XXFD_REG_TREC 0x34
+#define MCP25XXFD_REG_TREC_TXBO BIT(21)
+#define MCP25XXFD_REG_TREC_TXBP BIT(20)
+#define MCP25XXFD_REG_TREC_RXBP BIT(19)
+#define MCP25XXFD_REG_TREC_TXWARN BIT(18)
+#define MCP25XXFD_REG_TREC_RXWARN BIT(17)
+#define MCP25XXFD_REG_TREC_EWARN BIT(16)
+#define MCP25XXFD_REG_TREC_TEC_MASK GENMASK(15, 8)
+#define MCP25XXFD_REG_TREC_REC_MASK GENMASK(7, 0)
+
+#define MCP25XXFD_REG_BDIAG0 0x38
+#define MCP25XXFD_REG_BDIAG0_DTERRCNT_MASK GENMASK(31, 24)
+#define MCP25XXFD_REG_BDIAG0_DRERRCNT_MASK GENMASK(23, 16)
+#define MCP25XXFD_REG_BDIAG0_NTERRCNT_MASK GENMASK(15, 8)
+#define MCP25XXFD_REG_BDIAG0_NRERRCNT_MASK GENMASK(7, 0)
+
+#define MCP25XXFD_REG_BDIAG1 0x3c
+#define MCP25XXFD_REG_BDIAG1_DLCMM BIT(31)
+#define MCP25XXFD_REG_BDIAG1_ESI BIT(30)
+#define MCP25XXFD_REG_BDIAG1_DCRCERR BIT(29)
+#define MCP25XXFD_REG_BDIAG1_DSTUFERR BIT(28)
+#define MCP25XXFD_REG_BDIAG1_DFORMERR BIT(27)
+#define MCP25XXFD_REG_BDIAG1_DBIT1ERR BIT(25)
+#define MCP25XXFD_REG_BDIAG1_DBIT0ERR BIT(24)
+#define MCP25XXFD_REG_BDIAG1_TXBOERR BIT(23)
+#define MCP25XXFD_REG_BDIAG1_NCRCERR BIT(21)
+#define MCP25XXFD_REG_BDIAG1_NSTUFERR BIT(20)
+#define MCP25XXFD_REG_BDIAG1_NFORMERR BIT(19)
+#define MCP25XXFD_REG_BDIAG1_NACKERR BIT(18)
+#define MCP25XXFD_REG_BDIAG1_NBIT1ERR BIT(17)
+#define MCP25XXFD_REG_BDIAG1_NBIT0ERR BIT(16)
+#define MCP25XXFD_REG_BDIAG1_BERR_MASK \
+ (MCP25XXFD_REG_BDIAG1_DLCMM | MCP25XXFD_REG_BDIAG1_ESI | \
+ MCP25XXFD_REG_BDIAG1_DCRCERR | MCP25XXFD_REG_BDIAG1_DSTUFERR | \
+ MCP25XXFD_REG_BDIAG1_DFORMERR | MCP25XXFD_REG_BDIAG1_DBIT1ERR | \
+ MCP25XXFD_REG_BDIAG1_DBIT0ERR | MCP25XXFD_REG_BDIAG1_TXBOERR | \
+ MCP25XXFD_REG_BDIAG1_NCRCERR | MCP25XXFD_REG_BDIAG1_NSTUFERR | \
+ MCP25XXFD_REG_BDIAG1_NFORMERR | MCP25XXFD_REG_BDIAG1_NACKERR | \
+ MCP25XXFD_REG_BDIAG1_NBIT1ERR | MCP25XXFD_REG_BDIAG1_NBIT0ERR)
+#define MCP25XXFD_REG_BDIAG1_EFMSGCNT_MASK GENMASK(15, 0)
+
+#define MCP25XXFD_REG_TEFCON 0x40
+#define MCP25XXFD_REG_TEFCON_FSIZE_MASK GENMASK(28, 24)
+#define MCP25XXFD_REG_TEFCON_FRESET BIT(10)
+#define MCP25XXFD_REG_TEFCON_UINC BIT(8)
+#define MCP25XXFD_REG_TEFCON_TEFTSEN BIT(5)
+#define MCP25XXFD_REG_TEFCON_TEFOVIE BIT(3)
+#define MCP25XXFD_REG_TEFCON_TEFFIE BIT(2)
+#define MCP25XXFD_REG_TEFCON_TEFHIE BIT(1)
+#define MCP25XXFD_REG_TEFCON_TEFNEIE BIT(0)
+
+#define MCP25XXFD_REG_TEFSTA 0x44
+#define MCP25XXFD_REG_TEFSTA_TEFOVIF BIT(3)
+#define MCP25XXFD_REG_TEFSTA_TEFFIF BIT(2)
+#define MCP25XXFD_REG_TEFSTA_TEFHIF BIT(1)
+#define MCP25XXFD_REG_TEFSTA_TEFNEIF BIT(0)
+
+#define MCP25XXFD_REG_TEFUA 0x48
+
+#define MCP25XXFD_REG_TXQCON 0x50
+#define MCP25XXFD_REG_TXQCON_PLSIZE_MASK GENMASK(31, 29)
+#define MCP25XXFD_REG_TXQCON_PLSIZE_8 0
+#define MCP25XXFD_REG_TXQCON_PLSIZE_12 1
+#define MCP25XXFD_REG_TXQCON_PLSIZE_16 2
+#define MCP25XXFD_REG_TXQCON_PLSIZE_20 3
+#define MCP25XXFD_REG_TXQCON_PLSIZE_24 4
+#define MCP25XXFD_REG_TXQCON_PLSIZE_32 5
+#define MCP25XXFD_REG_TXQCON_PLSIZE_48 6
+#define MCP25XXFD_REG_TXQCON_PLSIZE_64 7
+#define MCP25XXFD_REG_TXQCON_FSIZE_MASK GENMASK(28, 24)
+#define MCP25XXFD_REG_TXQCON_TXAT_UNLIMITED 3
+#define MCP25XXFD_REG_TXQCON_TXAT_THREE_SHOT 1
+#define MCP25XXFD_REG_TXQCON_TXAT_ONE_SHOT 0
+#define MCP25XXFD_REG_TXQCON_TXAT_MASK GENMASK(22, 21)
+#define MCP25XXFD_REG_TXQCON_TXPRI_MASK GENMASK(20, 16)
+#define MCP25XXFD_REG_TXQCON_FRESET BIT(10)
+#define MCP25XXFD_REG_TXQCON_TXREQ BIT(9)
+#define MCP25XXFD_REG_TXQCON_UINC BIT(8)
+#define MCP25XXFD_REG_TXQCON_TXEN BIT(7)
+#define MCP25XXFD_REG_TXQCON_TXATIE BIT(4)
+#define MCP25XXFD_REG_TXQCON_TXQEIE BIT(2)
+#define MCP25XXFD_REG_TXQCON_TXQNIE BIT(0)
+
+#define MCP25XXFD_REG_TXQSTA 0x54
+#define MCP25XXFD_REG_TXQSTA_TXQCI_MASK GENMASK(12, 8)
+#define MCP25XXFD_REG_TXQSTA_TXABT BIT(7)
+#define MCP25XXFD_REG_TXQSTA_TXLARB BIT(6)
+#define MCP25XXFD_REG_TXQSTA_TXERR BIT(5)
+#define MCP25XXFD_REG_TXQSTA_TXATIF BIT(4)
+#define MCP25XXFD_REG_TXQSTA_TXQEIF BIT(2)
+#define MCP25XXFD_REG_TXQSTA_TXQNIF BIT(0)
+
+#define MCP25XXFD_REG_TXQUA 0x58
+
+#define MCP25XXFD_REG_FIFOCON(x) (0x50 + 0xc * (x))
+#define MCP25XXFD_REG_FIFOCON_PLSIZE_MASK GENMASK(31, 29)
+#define MCP25XXFD_REG_FIFOCON_PLSIZE_8 0
+#define MCP25XXFD_REG_FIFOCON_PLSIZE_12 1
+#define MCP25XXFD_REG_FIFOCON_PLSIZE_16 2
+#define MCP25XXFD_REG_FIFOCON_PLSIZE_20 3
+#define MCP25XXFD_REG_FIFOCON_PLSIZE_24 4
+#define MCP25XXFD_REG_FIFOCON_PLSIZE_32 5
+#define MCP25XXFD_REG_FIFOCON_PLSIZE_48 6
+#define MCP25XXFD_REG_FIFOCON_PLSIZE_64 7
+#define MCP25XXFD_REG_FIFOCON_FSIZE_MASK GENMASK(28, 24)
+#define MCP25XXFD_REG_FIFOCON_TXAT_MASK GENMASK(22, 21)
+#define MCP25XXFD_REG_FIFOCON_TXAT_ONE_SHOT 0
+#define MCP25XXFD_REG_FIFOCON_TXAT_THREE_SHOT 1
+#define MCP25XXFD_REG_FIFOCON_TXAT_UNLIMITED 3
+#define MCP25XXFD_REG_FIFOCON_TXPRI_MASK GENMASK(20, 16)
+#define MCP25XXFD_REG_FIFOCON_FRESET BIT(10)
+#define MCP25XXFD_REG_FIFOCON_TXREQ BIT(9)
+#define MCP25XXFD_REG_FIFOCON_UINC BIT(8)
+#define MCP25XXFD_REG_FIFOCON_TXEN BIT(7)
+#define MCP25XXFD_REG_FIFOCON_RTREN BIT(6)
+#define MCP25XXFD_REG_FIFOCON_RXTSEN BIT(5)
+#define MCP25XXFD_REG_FIFOCON_TXATIE BIT(4)
+#define MCP25XXFD_REG_FIFOCON_RXOVIE BIT(3)
+#define MCP25XXFD_REG_FIFOCON_TFERFFIE BIT(2)
+#define MCP25XXFD_REG_FIFOCON_TFHRFHIE BIT(1)
+#define MCP25XXFD_REG_FIFOCON_TFNRFNIE BIT(0)
+
+#define MCP25XXFD_REG_FIFOSTA(x) (0x54 + 0xc * (x))
+#define MCP25XXFD_REG_FIFOSTA_FIFOCI_MASK GENMASK(12, 8)
+#define MCP25XXFD_REG_FIFOSTA_TXABT BIT(7)
+#define MCP25XXFD_REG_FIFOSTA_TXLARB BIT(6)
+#define MCP25XXFD_REG_FIFOSTA_TXERR BIT(5)
+#define MCP25XXFD_REG_FIFOSTA_TXATIF BIT(4)
+#define MCP25XXFD_REG_FIFOSTA_RXOVIF BIT(3)
+#define MCP25XXFD_REG_FIFOSTA_TFERFFIF BIT(2)
+#define MCP25XXFD_REG_FIFOSTA_TFHRFHIF BIT(1)
+#define MCP25XXFD_REG_FIFOSTA_TFNRFNIF BIT(0)
+
+#define MCP25XXFD_REG_FIFOUA(x) (0x58 + 0xc * (x))
+
+#define MCP25XXFD_REG_FLTCON(x) (0x1d0 + 0x4 * (x))
+#define MCP25XXFD_REG_FLTCON_FLTEN3 BIT(31)
+#define MCP25XXFD_REG_FLTCON_F3BP_MASK GENMASK(28, 24)
+#define MCP25XXFD_REG_FLTCON_FLTEN2 BIT(23)
+#define MCP25XXFD_REG_FLTCON_F2BP_MASK GENMASK(20, 16)
+#define MCP25XXFD_REG_FLTCON_FLTEN1 BIT(15)
+#define MCP25XXFD_REG_FLTCON_F1BP_MASK GENMASK(12, 8)
+#define MCP25XXFD_REG_FLTCON_FLTEN0 BIT(7)
+#define MCP25XXFD_REG_FLTCON_F0BP_MASK GENMASK(4, 0)
+#define MCP25XXFD_REG_FLTCON_FLTEN(x) (BIT(7) << 8 * ((x) & 0x3))
+#define MCP25XXFD_REG_FLTCON_FLT_MASK(x) (GENMASK(7, 0) << (8 * ((x) & 0x3)))
+#define MCP25XXFD_REG_FLTCON_FBP(x, fifo) ((fifo) << 8 * ((x) & 0x3))
+
+#define MCP25XXFD_REG_FLTOBJ(x) (0x1f0 + 0x8 * (x))
+#define MCP25XXFD_REG_FLTOBJ_EXIDE BIT(30)
+#define MCP25XXFD_REG_FLTOBJ_SID11 BIT(29)
+#define MCP25XXFD_REG_FLTOBJ_EID_MASK GENMASK(28, 11)
+#define MCP25XXFD_REG_FLTOBJ_SID_MASK GENMASK(10, 0)
+
+#define MCP25XXFD_REG_FLTMASK(x) (0x1f4 + 0x8 * (x))
+#define MCP25XXFD_REG_MASK_MIDE BIT(30)
+#define MCP25XXFD_REG_MASK_MSID11 BIT(29)
+#define MCP25XXFD_REG_MASK_MEID_MASK GENMASK(28, 11)
+#define MCP25XXFD_REG_MASK_MSID_MASK GENMASK(10, 0)
+
+/* RAM */
+#define MCP25XXFD_RAM_START 0x400
+#define MCP25XXFD_RAM_SIZE SZ_2K
+
+/* Message Object */
+#define MCP25XXFD_OBJ_ID_SID11 BIT(29)
+#define MCP25XXFD_OBJ_ID_EID_MASK GENMASK(28, 11)
+#define MCP25XXFD_OBJ_ID_SID_MASK GENMASK(10, 0)
+#define MCP25XXFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK GENMASK(31, 9)
+#define MCP25XXFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK GENMASK(15, 9)
+#define MCP25XXFD_OBJ_FLAGS_SEQ_MASK MCP25XXFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK
+#define MCP25XXFD_OBJ_FLAGS_ESI BIT(8)
+#define MCP25XXFD_OBJ_FLAGS_FDF BIT(7)
+#define MCP25XXFD_OBJ_FLAGS_BRS BIT(6)
+#define MCP25XXFD_OBJ_FLAGS_RTR BIT(5)
+#define MCP25XXFD_OBJ_FLAGS_IDE BIT(4)
+#define MCP25XXFD_OBJ_FLAGS_DLC GENMASK(3, 0)
+
+#define MCP25XXFD_REG_FRAME_EFF_SID_MASK GENMASK(28, 18)
+#define MCP25XXFD_REG_FRAME_EFF_EID_MASK GENMASK(17, 0)
+
+/* MCP2517/18FD SFR */
+#define MCP25XXFD_REG_OSC 0xe00
+#define MCP25XXFD_REG_OSC_SCLKRDY BIT(12)
+#define MCP25XXFD_REG_OSC_OSCRDY BIT(10)
+#define MCP25XXFD_REG_OSC_PLLRDY BIT(8)
+#define MCP25XXFD_REG_OSC_CLKODIV_10 3
+#define MCP25XXFD_REG_OSC_CLKODIV_4 2
+#define MCP25XXFD_REG_OSC_CLKODIV_2 1
+#define MCP25XXFD_REG_OSC_CLKODIV_1 0
+#define MCP25XXFD_REG_OSC_CLKODIV_MASK GENMASK(6, 5)
+#define MCP25XXFD_REG_OSC_SCLKDIV BIT(4)
+#define MCP25XXFD_REG_OSC_LPMEN BIT(3) /* MCP2518FD only */
+#define MCP25XXFD_REG_OSC_OSCDIS BIT(2)
+#define MCP25XXFD_REG_OSC_PLLEN BIT(0)
+
+#define MCP25XXFD_REG_IOCON 0xe04
+#define MCP25XXFD_REG_IOCON_INTOD BIT(30)
+#define MCP25XXFD_REG_IOCON_SOF BIT(29)
+#define MCP25XXFD_REG_IOCON_TXCANOD BIT(28)
+#define MCP25XXFD_REG_IOCON_PM1 BIT(25)
+#define MCP25XXFD_REG_IOCON_PM0 BIT(24)
+#define MCP25XXFD_REG_IOCON_GPIO1 BIT(17)
+#define MCP25XXFD_REG_IOCON_GPIO0 BIT(16)
+#define MCP25XXFD_REG_IOCON_LAT1 BIT(9)
+#define MCP25XXFD_REG_IOCON_LAT0 BIT(8)
+#define MCP25XXFD_REG_IOCON_XSTBYEN BIT(6)
+#define MCP25XXFD_REG_IOCON_TRIS1 BIT(1)
+#define MCP25XXFD_REG_IOCON_TRIS0 BIT(0)
+
+#define MCP25XXFD_REG_CRC 0xe08
+#define MCP25XXFD_REG_CRC_FERRIE BIT(25)
+#define MCP25XXFD_REG_CRC_CRCERRIE BIT(24)
+#define MCP25XXFD_REG_CRC_FERRIF BIT(17)
+#define MCP25XXFD_REG_CRC_CRCERRIF BIT(16)
+#define MCP25XXFD_REG_CRC_IF_MASK GENMASK(17, 16)
+#define MCP25XXFD_REG_CRC_MASK GENMASK(15, 0)
+
+#define MCP25XXFD_REG_ECCCON 0xe0c
+#define MCP25XXFD_REG_ECCCON_PARITY_MASK GENMASK(14, 8)
+#define MCP25XXFD_REG_ECCCON_DEDIE BIT(2)
+#define MCP25XXFD_REG_ECCCON_SECIE BIT(1)
+#define MCP25XXFD_REG_ECCCON_ECCEN BIT(0)
+
+#define MCP25XXFD_REG_ECCSTAT 0xe10
+#define MCP25XXFD_REG_ECCSTAT_ERRADDR_MASK GENMASK(27, 16)
+#define MCP25XXFD_REG_ECCSTAT_IF_MASK GENMASK(2, 1)
+#define MCP25XXFD_REG_ECCSTAT_DEDIF BIT(2)
+#define MCP25XXFD_REG_ECCSTAT_SECIF BIT(1)
+
+#define MCP25XXFD_REG_DEVID 0xe14 /* MCP2518FD only */
+#define MCP25XXFD_REG_DEVID_ID_MASK GENMASK(7, 4)
+#define MCP25XXFD_REG_DEVID_REV_MASK GENMASK(3, 0)
+
+/* number of TX FIFO objects, depending on CAN mode
+ *
+ * FIFO setup: tef: 8*12 bytes = 96 bytes, tx: 8*16 bytes = 128 bytes
+ * FIFO setup: tef: 4*12 bytes = 48 bytes, tx: 4*72 bytes = 288 bytes
+ */
+#define MCP25XXFD_TX_OBJ_NUM_CAN 8
+#define MCP25XXFD_TX_OBJ_NUM_CANFD 4
+
+#if MCP25XXFD_TX_OBJ_NUM_CAN > MCP25XXFD_TX_OBJ_NUM_CANFD
+#define MCP25XXFD_TX_OBJ_NUM_MAX MCP25XXFD_TX_OBJ_NUM_CAN
+#else
+#define MCP25XXFD_TX_OBJ_NUM_MAX MCP25XXFD_TX_OBJ_NUM_CANFD
+#endif
+
+#define MCP25XXFD_NAPI_WEIGHT 32
+#define MCP25XXFD_TX_FIFO 1
+#define MCP25XXFD_RX_FIFO(x) (MCP25XXFD_TX_FIFO + 1 + (x))
+
+/* SPI commands */
+#define MCP25XXFD_SPI_INSTRUCTION_RESET 0x0000
+#define MCP25XXFD_SPI_INSTRUCTION_WRITE 0x2000
+#define MCP25XXFD_SPI_INSTRUCTION_READ 0x3000
+#define MCP25XXFD_SPI_INSTRUCTION_WRITE_CRC 0xa000
+#define MCP25XXFD_SPI_INSTRUCTION_READ_CRC 0xb000
+#define MCP25XXFD_SPI_INSTRUCTION_WRITE_CRC_SAFE 0xc000
+#define MCP25XXFD_SPI_ADDRESS_MASK GENMASK(11, 0)
+
+#define MCP25XXFD_SYSCLOCK_HZ_MAX 40000000
+#define MCP25XXFD_SYSCLOCK_HZ_MIN 1000000
+#define MCP25XXFD_SPICLOCK_HZ_MAX 20000000
+#define MCP25XXFD_OSC_PLL_MULTIPLIER 10
+#define MCP25XXFD_OSC_STAB_SLEEP_US (3 * USEC_PER_MSEC)
+#define MCP25XXFD_OSC_STAB_TIMEOUT_US (10 * MCP25XXFD_OSC_STAB_SLEEP_US)
+#define MCP25XXFD_POLL_SLEEP_US (10)
+#define MCP25XXFD_POLL_TIMEOUT_US (USEC_PER_MSEC)
+#define MCP25XXFD_SOFTRESET_RETRIES_MAX 3
+#define MCP25XXFD_READ_CRC_RETRIES_MAX 3
+#define MCP25XXFD_ECC_CNT_MAX 2
+#define MCP25XXFD_SANITIZE_SPI 1
+#define MCP25XXFD_SANITIZE_CAN 1
+
+/* Silence TX MAB overflow warnings */
+#define MCP25XXFD_QUIRK_MAB_NO_WARN BIT(0)
+/* Use CRC to access registers */
+#define MCP25XXFD_QUIRK_CRC_REG BIT(1)
+/* Use CRC to access RX/TEF-RAM */
+#define MCP25XXFD_QUIRK_CRC_RX BIT(2)
+/* Use CRC to access TX-RAM */
+#define MCP25XXFD_QUIRK_CRC_TX BIT(3)
+/* Enable ECC for RAM */
+#define MCP25XXFD_QUIRK_ECC BIT(4)
+/* Use Half Duplex SPI transfers */
+#define MCP25XXFD_QUIRK_HALF_DUPLEX BIT(5)
+
+struct mcp25xxfd_hw_tef_obj {
+ u32 id;
+ u32 flags;
+ u32 ts;
+};
+
+/* The tx_obj_raw version is used in spi async, i.e. without
+ * regmap. We have to take care of endianness ourselves.
+ */
+struct mcp25xxfd_hw_tx_obj_raw {
+ __le32 id;
+ __le32 flags;
+ u8 data[sizeof_field(struct canfd_frame, data)];
+};
+
+struct mcp25xxfd_hw_tx_obj_can {
+ u32 id;
+ u32 flags;
+ u8 data[sizeof_field(struct can_frame, data)];
+};
+
+struct mcp25xxfd_hw_tx_obj_canfd {
+ u32 id;
+ u32 flags;
+ u8 data[sizeof_field(struct canfd_frame, data)];
+};
+
+struct mcp25xxfd_hw_rx_obj_can {
+ u32 id;
+ u32 flags;
+ u32 ts;
+ u8 data[sizeof_field(struct can_frame, data)];
+};
+
+struct mcp25xxfd_hw_rx_obj_canfd {
+ u32 id;
+ u32 flags;
+ u32 ts;
+ u8 data[sizeof_field(struct canfd_frame, data)];
+};
+
+struct mcp25xxfd_tef_ring {
+ unsigned int head;
+ unsigned int tail;
+
+ /* u8 obj_num equals tx_ring->obj_num */
+ /* u8 obj_size equals sizeof(struct mcp25xxfd_hw_tef_obj) */
+};
+
+struct __packed mcp25xxfd_buf_cmd {
+ __be16 cmd;
+};
+
+struct __packed mcp25xxfd_buf_cmd_crc {
+ __be16 cmd;
+ u8 len;
+};
+
+union mcp25xxfd_tx_obj_load_buf {
+ struct __packed {
+ struct mcp25xxfd_buf_cmd cmd;
+ struct mcp25xxfd_hw_tx_obj_raw hw_tx_obj;
+ } nocrc;
+ struct __packed {
+ struct mcp25xxfd_buf_cmd_crc cmd;
+ struct mcp25xxfd_hw_tx_obj_raw hw_tx_obj;
+ __be16 crc;
+ } crc;
+} ____cacheline_aligned;
+
+union mcp25xxfd_write_reg_buf {
+ struct __packed {
+ struct mcp25xxfd_buf_cmd cmd;
+ u8 data[4];
+ } nocrc;
+ struct __packed {
+ struct mcp25xxfd_buf_cmd_crc cmd;
+ u8 data[4];
+ __be16 crc;
+ } crc;
+} ____cacheline_aligned;
+
+struct mcp25xxfd_tx_obj {
+ struct spi_message msg;
+ struct spi_transfer xfer[2];
+ union mcp25xxfd_tx_obj_load_buf buf;
+};
+
+struct mcp25xxfd_tx_ring {
+ unsigned int head;
+ unsigned int tail;
+
+ u16 base;
+ u8 obj_num;
+ u8 obj_size;
+
+ struct mcp25xxfd_tx_obj obj[MCP25XXFD_TX_OBJ_NUM_MAX];
+ union mcp25xxfd_write_reg_buf rts_buf;
+};
+
+struct mcp25xxfd_rx_ring {
+ unsigned int head;
+ unsigned int tail;
+
+ u16 base;
+ u8 nr;
+ u8 fifo_nr;
+ u8 obj_num;
+ u8 obj_size;
+
+ struct mcp25xxfd_hw_rx_obj_canfd obj[];
+};
+
+struct __packed mcp25xxfd_map_buf_nocrc {
+ struct mcp25xxfd_buf_cmd cmd;
+ u8 data[256];
+} ____cacheline_aligned;
+
+struct __packed mcp25xxfd_map_buf_crc {
+ struct mcp25xxfd_buf_cmd_crc cmd;
+ u8 data[256 - 4];
+ __be16 crc;
+} ____cacheline_aligned;
+
+struct mcp25xxfd_ecc {
+ u32 ecc_stat;
+ int cnt;
+};
+
+struct mcp25xxfd_regs_status {
+ u32 intf;
+};
+
+enum mcp25xxfd_model {
+ MCP25XXFD_MODEL_MCP2517FD = 0x2517,
+ MCP25XXFD_MODEL_MCP2518FD = 0x2518,
+ MCP25XXFD_MODEL_MCP25XXFD = 0xffff, /* autodetect model */
+};
+
+struct mcp25xxfd_devtype_data {
+ enum mcp25xxfd_model model;
+ u32 quirks;
+};
+
+struct mcp25xxfd_priv {
+ struct can_priv can;
+ struct can_rx_offload offload;
+ struct net_device *ndev;
+
+ struct regmap *map_reg; /* register access */
+ struct regmap *map_rx; /* RX/TEF RAM access */
+
+ struct regmap *map_nocrc;
+ struct mcp25xxfd_map_buf_nocrc *map_buf_nocrc_rx;
+ struct mcp25xxfd_map_buf_nocrc *map_buf_nocrc_tx;
+
+ struct regmap *map_crc;
+ struct mcp25xxfd_map_buf_crc *map_buf_crc_rx;
+ struct mcp25xxfd_map_buf_crc *map_buf_crc_tx;
+
+ struct spi_device *spi;
+ u32 spi_max_speed_hz_orig;
+
+ struct mcp25xxfd_tef_ring tef;
+ struct mcp25xxfd_tx_ring tx[1];
+ struct mcp25xxfd_rx_ring *rx[1];
+
+ u8 rx_ring_num;
+
+ struct mcp25xxfd_ecc ecc;
+ struct mcp25xxfd_regs_status regs_status;
+
+ struct gpio_desc *rx_int;
+ struct clk *clk;
+ struct regulator *reg_vdd;
+ struct regulator *reg_xceiver;
+
+ struct mcp25xxfd_devtype_data devtype_data;
+ struct can_berr_counter bec;
+};
+
+#define MCP25XXFD_IS(_model) \
+static inline bool \
+mcp25xxfd_is_##_model(const struct mcp25xxfd_priv *priv) \
+{ \
+ return priv->devtype_data.model == MCP25XXFD_MODEL_MCP##_model##FD; \
+}
+
+MCP25XXFD_IS(2517);
+MCP25XXFD_IS(2518);
+MCP25XXFD_IS(25XX);
+
+static inline u8 mcp25xxfd_first_byte_set(u32 mask)
+{
+ return (mask & 0x0000ffff) ?
+ ((mask & 0x000000ff) ? 0 : 1) :
+ ((mask & 0x00ff0000) ? 2 : 3);
+}
+
+static inline u8 mcp25xxfd_last_byte_set(u32 mask)
+{
+ return (mask & 0xffff0000) ?
+ ((mask & 0xff000000) ? 3 : 2) :
+ ((mask & 0x0000ff00) ? 1 : 0);
+}
+
+static inline __be16 mcp25xxfd_cmd_reset(void)
+{
+ return cpu_to_be16(MCP25XXFD_SPI_INSTRUCTION_RESET);
+}
+
+static inline void
+mcp25xxfd_spi_cmd_read_nocrc(struct mcp25xxfd_buf_cmd *cmd, u16 addr)
+{
+ cmd->cmd = cpu_to_be16(MCP25XXFD_SPI_INSTRUCTION_READ | addr);
+}
+
+static inline void
+mcp25xxfd_spi_cmd_write_nocrc(struct mcp25xxfd_buf_cmd *cmd, u16 addr)
+{
+ cmd->cmd = cpu_to_be16(MCP25XXFD_SPI_INSTRUCTION_WRITE | addr);
+}
+
+static inline bool mcp25xxfd_reg_in_ram(unsigned int reg)
+{
+ static const struct regmap_range range =
+ regmap_reg_range(MCP25XXFD_RAM_START,
+ MCP25XXFD_RAM_START + MCP25XXFD_RAM_SIZE - 4);
+
+ return regmap_reg_in_range(reg, &range);
+}
+
+static inline void
+__mcp25xxfd_spi_cmd_crc_set_len(struct mcp25xxfd_buf_cmd_crc *cmd,
+ u16 len, bool in_ram)
+{
+ /* Number of u32 for RAM access, number of u8 otherwise. */
+ if (in_ram)
+ cmd->len = len >> 2;
+ else
+ cmd->len = len;
+}
+
+static inline void
+mcp25xxfd_spi_cmd_crc_set_len_in_ram(struct mcp25xxfd_buf_cmd_crc *cmd, u16 len)
+{
+ __mcp25xxfd_spi_cmd_crc_set_len(cmd, len, true);
+}
+
+static inline void
+mcp25xxfd_spi_cmd_crc_set_len_in_reg(struct mcp25xxfd_buf_cmd_crc *cmd, u16 len)
+{
+ __mcp25xxfd_spi_cmd_crc_set_len(cmd, len, false);
+}
+
+static inline void
+mcp25xxfd_spi_cmd_read_crc_set_addr(struct mcp25xxfd_buf_cmd_crc *cmd, u16 addr)
+{
+ cmd->cmd = cpu_to_be16(MCP25XXFD_SPI_INSTRUCTION_READ_CRC | addr);
+}
+
+static inline void
+mcp25xxfd_spi_cmd_read_crc(struct mcp25xxfd_buf_cmd_crc *cmd,
+ u16 addr, u16 len)
+{
+ mcp25xxfd_spi_cmd_read_crc_set_addr(cmd, addr);
+ __mcp25xxfd_spi_cmd_crc_set_len(cmd, len, mcp25xxfd_reg_in_ram(addr));
+}
+
+static inline void
+mcp25xxfd_spi_cmd_write_crc_set_addr(struct mcp25xxfd_buf_cmd_crc *cmd,
+ u16 addr)
+{
+ cmd->cmd = cpu_to_be16(MCP25XXFD_SPI_INSTRUCTION_WRITE_CRC | addr);
+}
+
+static inline void
+mcp25xxfd_spi_cmd_write_crc(struct mcp25xxfd_buf_cmd_crc *cmd,
+ u16 addr, u16 len)
+{
+ mcp25xxfd_spi_cmd_write_crc_set_addr(cmd, addr);
+ __mcp25xxfd_spi_cmd_crc_set_len(cmd, len, mcp25xxfd_reg_in_ram(addr));
+}
+
+static inline u8 *
+mcp25xxfd_spi_cmd_write(const struct mcp25xxfd_priv *priv,
+ union mcp25xxfd_write_reg_buf *write_reg_buf,
+ u16 addr)
+{
+ u8 *data;
+
+ if (priv->devtype_data.quirks & MCP25XXFD_QUIRK_CRC_REG) {
+ mcp25xxfd_spi_cmd_write_crc_set_addr(&write_reg_buf->crc.cmd,
+ addr);
+ data = write_reg_buf->crc.data;
+ } else {
+ mcp25xxfd_spi_cmd_write_nocrc(&write_reg_buf->nocrc.cmd,
+ addr);
+ data = write_reg_buf->nocrc.data;
+ }
+
+ return data;
+}
+
+static inline u16 mcp25xxfd_get_tef_obj_addr(u8 n)
+{
+ return MCP25XXFD_RAM_START +
+ sizeof(struct mcp25xxfd_hw_tef_obj) * n;
+}
+
+static inline u16
+mcp25xxfd_get_tx_obj_addr(const struct mcp25xxfd_tx_ring *ring, u8 n)
+{
+ return ring->base + ring->obj_size * n;
+}
+
+static inline u16
+mcp25xxfd_get_rx_obj_addr(const struct mcp25xxfd_rx_ring *ring, u8 n)
+{
+ return ring->base + ring->obj_size * n;
+}
+
+static inline u8 mcp25xxfd_get_tef_head(const struct mcp25xxfd_priv *priv)
+{
+ return priv->tef.head & (priv->tx->obj_num - 1);
+}
+
+static inline u8 mcp25xxfd_get_tef_tail(const struct mcp25xxfd_priv *priv)
+{
+ return priv->tef.tail & (priv->tx->obj_num - 1);
+}
+
+static inline u8 mcp25xxfd_get_tef_len(const struct mcp25xxfd_priv *priv)
+{
+ return priv->tef.head - priv->tef.tail;
+}
+
+static inline u8 mcp25xxfd_get_tef_linear_len(const struct mcp25xxfd_priv *priv)
+{
+ u8 len;
+
+ len = mcp25xxfd_get_tef_len(priv);
+
+ return min_t(u8, len, priv->tx->obj_num - mcp25xxfd_get_tef_tail(priv));
+}
+
+static inline u8 mcp25xxfd_get_tx_head(const struct mcp25xxfd_tx_ring *ring)
+{
+ return ring->head & (ring->obj_num - 1);
+}
+
+static inline u8 mcp25xxfd_get_tx_tail(const struct mcp25xxfd_tx_ring *ring)
+{
+ return ring->tail & (ring->obj_num - 1);
+}
+
+static inline u8 mcp25xxfd_get_tx_free(const struct mcp25xxfd_tx_ring *ring)
+{
+ return ring->obj_num - (ring->head - ring->tail);
+}
+
+static inline int
+mcp25xxfd_get_tx_nr_by_addr(const struct mcp25xxfd_tx_ring *tx_ring, u8 *nr,
+ u16 addr)
+{
+ if (addr < mcp25xxfd_get_tx_obj_addr(tx_ring, 0) ||
+ addr >= mcp25xxfd_get_tx_obj_addr(tx_ring, tx_ring->obj_num))
+ return -ENOENT;
+
+ *nr = (addr - mcp25xxfd_get_tx_obj_addr(tx_ring, 0)) /
+ tx_ring->obj_size;
+
+ return 0;
+}
+
+static inline u8 mcp25xxfd_get_rx_head(const struct mcp25xxfd_rx_ring *ring)
+{
+ return ring->head & (ring->obj_num - 1);
+}
+
+static inline u8 mcp25xxfd_get_rx_tail(const struct mcp25xxfd_rx_ring *ring)
+{
+ return ring->tail & (ring->obj_num - 1);
+}
+
+static inline u8 mcp25xxfd_get_rx_len(const struct mcp25xxfd_rx_ring *ring)
+{
+ return ring->head - ring->tail;
+}
+
+static inline u8
+mcp25xxfd_get_rx_linear_len(const struct mcp25xxfd_rx_ring *ring)
+{
+ u8 len;
+
+ len = mcp25xxfd_get_rx_len(ring);
+
+ return min_t(u8, len, ring->obj_num - mcp25xxfd_get_rx_tail(ring));
+}
+
+#define mcp25xxfd_for_each_tx_obj(ring, _obj, n) \
+ for ((n) = 0, (_obj) = &(ring)->obj[(n)]; \
+ (n) < (ring)->obj_num; \
+ (n)++, (_obj) = &(ring)->obj[(n)])
+
+#define mcp25xxfd_for_each_rx_ring(priv, ring, n) \
+ for ((n) = 0, (ring) = *((priv)->rx + (n)); \
+ (n) < (priv)->rx_ring_num; \
+ (n)++, (ring) = *((priv)->rx + (n)))
+
+int mcp25xxfd_regmap_init(struct mcp25xxfd_priv *priv);
+u16 mcp25xxfd_crc16_compute2(const void *cmd, size_t cmd_size,
+ const void *data, size_t data_size);
+u16 mcp25xxfd_crc16_compute(const void *data, size_t data_size);
+
+#endif
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 94b1491b569f..1d63006c97bc 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -454,7 +454,7 @@ static int ti_hecc_get_berr_counter(const struct net_device *ndev,
/* ti_hecc_xmit: HECC Transmit
*
* The transmit mailboxes start from 0 to HECC_MAX_TX_MBOX. In HECC the
- * priority of the mailbox for tranmission is dependent upon priority setting
+ * priority of the mailbox for transmission is dependent upon priority setting
* field in mailbox registers. The mailbox with highest value in priority field
* is transmitted first. Only when two mailboxes have the same value in
* priority field the highest numbered mailbox is transmitted first.
@@ -857,7 +857,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
struct net_device *ndev = (struct net_device *)0;
struct ti_hecc_priv *priv;
struct device_node *np = pdev->dev.of_node;
- struct resource *res, *irq;
+ struct resource *irq;
struct regulator *reg_xceiver;
int err = -ENODEV;
@@ -878,39 +878,22 @@ static int ti_hecc_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
/* handle hecc memory */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hecc");
- if (!res) {
- dev_err(&pdev->dev, "can't get IORESOURCE_MEM hecc\n");
- return -EINVAL;
- }
-
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource_byname(pdev, "hecc");
if (IS_ERR(priv->base)) {
dev_err(&pdev->dev, "hecc ioremap failed\n");
return PTR_ERR(priv->base);
}
/* handle hecc-ram memory */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hecc-ram");
- if (!res) {
- dev_err(&pdev->dev, "can't get IORESOURCE_MEM hecc-ram\n");
- return -EINVAL;
- }
-
- priv->hecc_ram = devm_ioremap_resource(&pdev->dev, res);
+ priv->hecc_ram = devm_platform_ioremap_resource_byname(pdev,
+ "hecc-ram");
if (IS_ERR(priv->hecc_ram)) {
dev_err(&pdev->dev, "hecc-ram ioremap failed\n");
return PTR_ERR(priv->hecc_ram);
}
/* handle mbx memory */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mbx");
- if (!res) {
- dev_err(&pdev->dev, "can't get IORESOURCE_MEM mbx\n");
- return -EINVAL;
- }
-
- priv->mbx = devm_ioremap_resource(&pdev->dev, res);
+ priv->mbx = devm_platform_ioremap_resource_byname(pdev, "mbx");
if (IS_ERR(priv->mbx)) {
dev_err(&pdev->dev, "mbx ioremap failed\n");
return PTR_ERR(priv->mbx);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index 77fa830fe7dd..bcb331b0c958 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -90,7 +90,7 @@ config CAN_PEAK_USB
tristate "PEAK PCAN-USB/USB Pro interfaces for CAN 2.0b/CAN-FD"
help
This driver supports the PEAK-System Technik USB adapters that enable
- access to the CAN bus, with repect to the CAN 2.0b and/or CAN-FD
+ access to the CAN bus, with respect to the CAN 2.0b and/or CAN-FD
standards, that is:
PCAN-USB single CAN 2.0b channel USB adapter
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index a4b4b742c80c..3005157059ca 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -828,7 +828,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
- /* dev settup */
+ /* dev setup */
strcpy(dev->bt_const.name, "gs_usb");
dev->bt_const.tseg1_min = bt_const->tseg1_min;
dev->bt_const.tseg1_max = bt_const->tseg1_max;
@@ -852,7 +852,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
dev->tx_context[rc].echo_id = GS_MAX_TX_URBS;
}
- /* can settup */
+ /* can setup */
dev->can.state = CAN_STATE_STOPPED;
dev->can.clock.freq = bt_const->fclk_can;
dev->can.bittiming_const = &dev->bt_const;
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 21faa2ec4632..5857b37dcd96 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -28,7 +28,7 @@
#define MCBA_CTX_FREE MCBA_MAX_TX_URBS
/* RX buffer must be bigger than msg size since at the
- * beggining USB messages are stacked.
+ * beginning USB messages are stacked.
*/
#define MCBA_USB_RX_BUFF_SIZE 64
#define MCBA_USB_TX_BUFF_SIZE (sizeof(struct mcba_usb_msg))
@@ -793,7 +793,7 @@ static int mcba_usb_probe(struct usb_interface *intf,
{
struct net_device *netdev;
struct mcba_priv *priv;
- int err = -ENOMEM;
+ int err;
struct usb_device *usbdev = interface_to_usbdev(intf);
netdev = alloc_candev(sizeof(struct mcba_priv), MCBA_MAX_TX_URBS);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 66d0198e7834..63bd2ed96697 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -34,6 +34,23 @@ MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB adapter");
#define PCAN_USB_CMD_LEN (PCAN_USB_CMD_ARGS + \
PCAN_USB_CMD_ARGS_LEN)
+/* PCAN-USB commands */
+#define PCAN_USB_CMD_BITRATE 1
+#define PCAN_USB_CMD_SET_BUS 3
+#define PCAN_USB_CMD_DEVID 4
+#define PCAN_USB_CMD_SN 6
+#define PCAN_USB_CMD_REGISTER 9
+#define PCAN_USB_CMD_EXT_VCC 10
+#define PCAN_USB_CMD_ERR_FR 11
+
+/* PCAN_USB_CMD_SET_BUS number arg */
+#define PCAN_USB_BUS_XCVER 2
+#define PCAN_USB_BUS_SILENT_MODE 3
+
+/* PCAN_USB_CMD_xxx functions */
+#define PCAN_USB_GET 1
+#define PCAN_USB_SET 2
+
/* PCAN-USB command timeout (ms.) */
#define PCAN_USB_COMMAND_TIMEOUT 1000
@@ -66,6 +83,10 @@ MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB adapter");
#define PCAN_USB_ERROR_QOVR 0x40
#define PCAN_USB_ERROR_TXQFULL 0x80
+#define PCAN_USB_ERROR_BUS (PCAN_USB_ERROR_BUS_LIGHT | \
+ PCAN_USB_ERROR_BUS_HEAVY | \
+ PCAN_USB_ERROR_BUS_OFF)
+
/* SJA1000 modes */
#define SJA1000_MODE_NORMAL 0x00
#define SJA1000_MODE_INIT 0x01
@@ -85,11 +106,25 @@ MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB adapter");
#define PCAN_USB_REC_TS 4
#define PCAN_USB_REC_BUSEVT 5
+/* CAN bus events notifications selection mask */
+#define PCAN_USB_ERR_RXERR 0x02 /* ask for rxerr counter */
+#define PCAN_USB_ERR_TXERR 0x04 /* ask for txerr counter */
+
+/* This mask generates an usb packet each time the state of the bus changes.
+ * In other words, its interest is to know which side among rx and tx is
+ * responsible of the change of the bus state.
+ */
+#define PCAN_USB_BERR_MASK (PCAN_USB_ERR_RXERR | PCAN_USB_ERR_TXERR)
+
+/* identify bus event packets with rx/tx error counters */
+#define PCAN_USB_ERR_CNT 0x80
+
/* private to PCAN-USB adapter */
struct pcan_usb {
struct peak_usb_device dev;
struct peak_time_ref time_ref;
struct timer_list restart_timer;
+ struct can_berr_counter bec;
};
/* incoming message context for decoding */
@@ -172,7 +207,8 @@ static int pcan_usb_set_sja1000(struct peak_usb_device *dev, u8 mode)
[1] = mode,
};
- return pcan_usb_send_cmd(dev, 9, 2, args);
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_REGISTER, PCAN_USB_SET,
+ args);
}
static int pcan_usb_set_bus(struct peak_usb_device *dev, u8 onoff)
@@ -181,7 +217,8 @@ static int pcan_usb_set_bus(struct peak_usb_device *dev, u8 onoff)
[0] = !!onoff,
};
- return pcan_usb_send_cmd(dev, 3, 2, args);
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_SET_BUS, PCAN_USB_BUS_XCVER,
+ args);
}
static int pcan_usb_set_silent(struct peak_usb_device *dev, u8 onoff)
@@ -190,7 +227,18 @@ static int pcan_usb_set_silent(struct peak_usb_device *dev, u8 onoff)
[0] = !!onoff,
};
- return pcan_usb_send_cmd(dev, 3, 3, args);
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_SET_BUS,
+ PCAN_USB_BUS_SILENT_MODE, args);
+}
+
+/* send the cmd to be notified from bus errors */
+static int pcan_usb_set_err_frame(struct peak_usb_device *dev, u8 err_mask)
+{
+ u8 args[PCAN_USB_CMD_ARGS_LEN] = {
+ [0] = err_mask,
+ };
+
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_ERR_FR, PCAN_USB_SET, args);
}
static int pcan_usb_set_ext_vcc(struct peak_usb_device *dev, u8 onoff)
@@ -199,7 +247,7 @@ static int pcan_usb_set_ext_vcc(struct peak_usb_device *dev, u8 onoff)
[0] = !!onoff,
};
- return pcan_usb_send_cmd(dev, 10, 2, args);
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_EXT_VCC, PCAN_USB_SET, args);
}
/*
@@ -223,7 +271,7 @@ static int pcan_usb_set_bittiming(struct peak_usb_device *dev,
args[0] = btr1;
args[1] = btr0;
- return pcan_usb_send_cmd(dev, 1, 2, args);
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_BITRATE, PCAN_USB_SET, args);
}
/*
@@ -307,7 +355,7 @@ static int pcan_usb_get_serial(struct peak_usb_device *dev, u32 *serial_number)
u8 args[PCAN_USB_CMD_ARGS_LEN];
int err;
- err = pcan_usb_wait_rsp(dev, 6, 1, args);
+ err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_SN, PCAN_USB_GET, args);
if (err) {
netdev_err(dev->netdev, "getting serial failure: %d\n", err);
} else if (serial_number) {
@@ -328,7 +376,7 @@ static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id)
u8 args[PCAN_USB_CMD_ARGS_LEN];
int err;
- err = pcan_usb_wait_rsp(dev, 4, 1, args);
+ err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_DEVID, PCAN_USB_GET, args);
if (err)
netdev_err(dev->netdev, "getting device id failure: %d\n", err);
else if (device_id)
@@ -426,7 +474,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
new_state = CAN_STATE_BUS_OFF;
break;
}
- if (n & (PCAN_USB_ERROR_RXQOVR | PCAN_USB_ERROR_QOVR)) {
+ if (n & ~PCAN_USB_ERROR_BUS) {
/*
* trick to bypass next comparison and process other
* errors
@@ -450,7 +498,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
new_state = CAN_STATE_ERROR_WARNING;
break;
}
- if (n & (PCAN_USB_ERROR_RXQOVR | PCAN_USB_ERROR_QOVR)) {
+ if (n & ~PCAN_USB_ERROR_BUS) {
/*
* trick to bypass next comparison and process other
* errors
@@ -489,29 +537,50 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
case CAN_STATE_ERROR_PASSIVE:
cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE |
- CAN_ERR_CRTL_RX_PASSIVE;
+ cf->data[1] = (mc->pdev->bec.txerr > mc->pdev->bec.rxerr) ?
+ CAN_ERR_CRTL_TX_PASSIVE :
+ CAN_ERR_CRTL_RX_PASSIVE;
+ cf->data[6] = mc->pdev->bec.txerr;
+ cf->data[7] = mc->pdev->bec.rxerr;
+
mc->pdev->dev.can.can_stats.error_passive++;
break;
case CAN_STATE_ERROR_WARNING:
cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= CAN_ERR_CRTL_TX_WARNING |
- CAN_ERR_CRTL_RX_WARNING;
+ cf->data[1] = (mc->pdev->bec.txerr > mc->pdev->bec.rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ cf->data[6] = mc->pdev->bec.txerr;
+ cf->data[7] = mc->pdev->bec.rxerr;
+
mc->pdev->dev.can.can_stats.error_warning++;
break;
case CAN_STATE_ERROR_ACTIVE:
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+
+ /* sync local copies of rxerr/txerr counters */
+ mc->pdev->bec.txerr = 0;
+ mc->pdev->bec.rxerr = 0;
break;
default:
/* CAN_STATE_MAX (trick to handle other errors) */
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
- mc->netdev->stats.rx_over_errors++;
- mc->netdev->stats.rx_errors++;
+ if (n & PCAN_USB_ERROR_TXQFULL)
+ netdev_dbg(mc->netdev, "device Tx queue full)\n");
+
+ if (n & PCAN_USB_ERROR_RXQOVR) {
+ netdev_dbg(mc->netdev, "data overrun interrupt\n");
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+ mc->netdev->stats.rx_over_errors++;
+ mc->netdev->stats.rx_errors++;
+ }
+
+ cf->data[6] = mc->pdev->bec.txerr;
+ cf->data[7] = mc->pdev->bec.rxerr;
new_state = mc->pdev->dev.can.state;
break;
@@ -533,6 +602,30 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
return 0;
}
+/* decode bus event usb packet: first byte contains rxerr while 2nd one contains
+ * txerr.
+ */
+static int pcan_usb_handle_bus_evt(struct pcan_usb_msg_context *mc, u8 ir)
+{
+ struct pcan_usb *pdev = mc->pdev;
+
+ /* acccording to the content of the packet */
+ switch (ir) {
+ case PCAN_USB_ERR_CNT:
+
+ /* save rx/tx error counters from in the device context */
+ pdev->bec.rxerr = mc->ptr[0];
+ pdev->bec.txerr = mc->ptr[1];
+ break;
+
+ default:
+ /* reserved */
+ break;
+ }
+
+ return 0;
+}
+
/*
* decode non-data usb message
*/
@@ -587,9 +680,10 @@ static int pcan_usb_decode_status(struct pcan_usb_msg_context *mc,
break;
case PCAN_USB_REC_BUSEVT:
- /* error frame/bus event */
- if (n & PCAN_USB_ERROR_TXQFULL)
- netdev_dbg(mc->netdev, "device Tx queue full)\n");
+ /* bus event notifications (get rxerr/txerr) */
+ err = pcan_usb_handle_bus_evt(mc, n);
+ if (err)
+ return err;
break;
default:
netdev_err(mc->netdev, "unexpected function %u\n", f);
@@ -773,20 +867,44 @@ static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb,
return 0;
}
+/* socket callback used to copy berr counters values received through USB */
+static int pcan_usb_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+ struct pcan_usb *pdev = container_of(dev, struct pcan_usb, dev);
+
+ *bec = pdev->bec;
+
+ /* must return 0 */
+ return 0;
+}
+
/*
* start interface
*/
static int pcan_usb_start(struct peak_usb_device *dev)
{
struct pcan_usb *pdev = container_of(dev, struct pcan_usb, dev);
+ int err;
/* number of bits used in timestamps read from adapter struct */
peak_usb_init_time_ref(&pdev->time_ref, &pcan_usb);
+ pdev->bec.rxerr = 0;
+ pdev->bec.txerr = 0;
+
+ /* be notified on error counter changes (if requested by user) */
+ if (dev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ err = pcan_usb_set_err_frame(dev, PCAN_USB_BERR_MASK);
+ if (err)
+ netdev_warn(dev->netdev,
+ "Asking for BERR reporting error %u\n",
+ err);
+ }
+
/* if revision greater than 3, can put silent mode on/off */
if (dev->device_rev > 3) {
- int err;
-
err = pcan_usb_set_silent(dev,
dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY);
if (err)
@@ -873,7 +991,8 @@ const struct peak_usb_adapter pcan_usb = {
.name = "PCAN-USB",
.device_id = PCAN_USB_PRODUCT_ID,
.ctrl_count = 1,
- .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
+ .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_BERR_REPORTING,
.clock = {
.freq = PCAN_USB_CRYSTAL_HZ / 2 ,
},
@@ -906,4 +1025,5 @@ const struct peak_usb_adapter pcan_usb = {
.dev_encode_msg = pcan_usb_encode_msg,
.dev_start = pcan_usb_start,
.dev_restart_async = pcan_usb_restart_async,
+ .do_get_berr_counter = pcan_usb_get_berr_counter,
};
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 47cc1ff5b88e..ab63fd9eb982 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -35,7 +35,7 @@ MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro FD adapter");
#define PCAN_UFD_RX_BUFFER_SIZE 2048
#define PCAN_UFD_TX_BUFFER_SIZE 512
-/* read some versions info from the hw devcie */
+/* read some versions info from the hw device */
struct __packed pcan_ufd_fw_info {
__le16 size_of; /* sizeof this */
__le16 type; /* type of this structure */
@@ -796,7 +796,7 @@ static int pcan_usb_fd_start(struct peak_usb_device *dev)
return err;
}
-/* socket callback used to copy berr counters values receieved through USB */
+/* socket callback used to copy berr counters values received through USB */
static int pcan_usb_fd_get_berr_counter(const struct net_device *netdev,
struct can_berr_counter *bec)
{
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 1689ab387612..c7564773fb2b 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -186,7 +186,7 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, int id, ...)
len = pc - pm->rec_ptr;
if (len > 0) {
- *pm->u.rec_cnt = cpu_to_le32(le32_to_cpu(*pm->u.rec_cnt) + 1);
+ le32_add_cpu(pm->u.rec_cnt, 1);
*pm->rec_ptr = id;
pm->rec_ptr = pc;
@@ -973,7 +973,7 @@ int pcan_usb_pro_probe(struct usb_interface *intf)
struct usb_endpoint_descriptor *ep = &if_desc->endpoint[i].desc;
/*
- * below is the list of valid ep addreses. Any other ep address
+ * below is the list of valid ep addresses. Any other ep address
* is considered as not-CAN interface address => no dev created
*/
switch (ep->bEndpointAddress) {
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 81e942f713e6..dc5290b36598 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -1445,7 +1445,7 @@ static int ucan_probe(struct usb_interface *intf,
/* request the device information and store it in ctl_msg_buffer
*
- * note: ucan_ctrl_command_* wrappers connot be used yet
+ * note: ucan_ctrl_command_* wrappers cannot be used yet
* because `up` is initialised in Stage 3
*/
ret = usb_control_msg(udev,
@@ -1494,7 +1494,7 @@ static int ucan_probe(struct usb_interface *intf,
up = netdev_priv(netdev);
- /* initialze data */
+ /* initialize data */
up->udev = udev;
up->intf = intf;
up->netdev = netdev;
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 8fa224b28218..62749c67c959 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -88,7 +88,7 @@ enum usb_8dev_cmd {
/* status */
#define USB_8DEV_STATUSMSG_OK 0x00 /* Normal condition. */
-#define USB_8DEV_STATUSMSG_OVERRUN 0x01 /* Overrun occured when sending */
+#define USB_8DEV_STATUSMSG_OVERRUN 0x01 /* Overrun occurred when sending */
#define USB_8DEV_STATUSMSG_BUSLIGHT 0x02 /* Error counter has reached 96 */
#define USB_8DEV_STATUSMSG_BUSHEAVY 0x03 /* Error count. has reached 128 */
#define USB_8DEV_STATUSMSG_BUSOFF 0x04 /* Device is in BUSOFF */
@@ -165,7 +165,7 @@ struct __packed usb_8dev_rx_msg {
/* command frame */
struct __packed usb_8dev_cmd_msg {
u8 begin;
- u8 channel; /* unkown - always 0 */
+ u8 channel; /* unknown - always 0 */
u8 command; /* command to execute */
u8 opt1; /* optional parameter / return value */
u8 opt2; /* optional parameter 2 */
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index c1dbab8c896d..7ae268468d2c 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -1308,7 +1308,7 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
/**
* xcan_interrupt - CAN Isr
* @irq: irq number
- * @dev_id: device id poniter
+ * @dev_id: device id pointer
*
* This is the xilinx CAN Isr. It checks for the type of interrupt
* and invokes the corresponding ISR.
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 468b3c4273c5..2451f61a38e4 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -33,12 +33,12 @@ config NET_DSA_LANTIQ_GSWIP
the xrx200 / VR9 SoC.
config NET_DSA_MT7530
- tristate "Mediatek MT7530 Ethernet switch support"
+ tristate "MediaTek MT753x and MT7621 Ethernet switch support"
depends on NET_DSA
select NET_DSA_TAG_MTK
help
- This enables support for the Mediatek MT7530 Ethernet switch
- chip.
+ This enables support for the MediaTek MT7530, MT7531, and MT7621
+ Ethernet switch chips.
config NET_DSA_MV88E6060
tristate "Marvell 88E6060 ethernet switch chip support"
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 6a5796c32721..73507cff3bc4 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1377,23 +1377,6 @@ EXPORT_SYMBOL(b53_phylink_mac_link_up);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
{
struct b53_device *dev = ds->priv;
- u16 pvid, new_pvid;
-
- b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
- if (!vlan_filtering) {
- /* Filtering is currently enabled, use the default PVID since
- * the bridge does not expect tagging anymore
- */
- dev->ports[port].pvid = pvid;
- new_pvid = b53_default_pvid(dev);
- } else {
- /* Filtering is currently disabled, restore the previous PVID */
- new_pvid = dev->ports[port].pvid;
- }
-
- if (pvid != new_pvid)
- b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
- new_pvid);
b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering);
@@ -2619,6 +2602,8 @@ struct b53_device *b53_switch_alloc(struct device *base,
dev->priv = priv;
dev->ops = ops;
ds->ops = &b53_switch_ops;
+ ds->configure_vlan_while_not_filtering = true;
+ dev->vlan_enabled = ds->configure_vlan_while_not_filtering;
mutex_init(&dev->reg_mutex);
mutex_init(&dev->stats_mutex);
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index c55c0a9f1b47..24893b592216 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -91,7 +91,6 @@ enum {
struct b53_port {
u16 vlan_ctl_mask;
struct ethtool_eee eee;
- u16 pvid;
};
struct b53_vlan {
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 723820603107..0b5b2b33b3b6 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -457,6 +457,7 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
{
struct device_node *port;
unsigned int port_num;
+ struct property *prop;
phy_interface_t mode;
int err;
@@ -483,6 +484,16 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
if (of_property_read_bool(port, "brcm,use-bcm-hdr"))
priv->brcm_tag_mask |= 1 << port_num;
+
+ /* Ensure that port 5 is not picked up as a DSA CPU port
+ * flavour but a regular port instead. We should be using
+ * devlink to be able to set the port flavour.
+ */
+ if (port_num == 5 && priv->type == BCM7278_DEVICE_ID) {
+ prop = of_find_property(port, "ethernet", NULL);
+ if (prop)
+ of_remove_property(port, prop);
+ }
}
}
@@ -527,7 +538,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
* driver.
*/
if (of_machine_is_compatible("brcm,bcm7445d0"))
- priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR);
+ priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0);
else
priv->indir_phy_mask = 0;
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 8f1d15ea15d9..f5779e152377 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -932,11 +932,19 @@ static void ksz8795_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_ENABLE, true);
if (cpu_port) {
+ if (!p->interface && dev->compat_interface) {
+ dev_warn(dev->dev,
+ "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. "
+ "Please update your device tree.\n",
+ port);
+ p->interface = dev->compat_interface;
+ }
+
/* Configure MII interface for proper network communication. */
ksz_read8(dev, REG_PORT_5_CTRL_6, &data8);
data8 &= ~PORT_INTERFACE_TYPE;
data8 &= ~PORT_GMII_1GPS_MODE;
- switch (dev->interface) {
+ switch (p->interface) {
case PHY_INTERFACE_MODE_MII:
p->phydev.speed = SPEED_100;
break;
@@ -952,11 +960,11 @@ static void ksz8795_port_setup(struct ksz_device *dev, int port, bool cpu_port)
default:
data8 &= ~PORT_RGMII_ID_IN_ENABLE;
data8 &= ~PORT_RGMII_ID_OUT_ENABLE;
- if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- dev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ p->interface == PHY_INTERFACE_MODE_RGMII_RXID)
data8 |= PORT_RGMII_ID_IN_ENABLE;
- if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- dev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ p->interface == PHY_INTERFACE_MODE_RGMII_TXID)
data8 |= PORT_RGMII_ID_OUT_ENABLE;
data8 |= PORT_GMII_1GPS_MODE;
data8 |= PORT_INTERFACE_RGMII;
@@ -1252,7 +1260,7 @@ static int ksz8795_switch_init(struct ksz_device *dev)
}
/* set the real number of ports */
- dev->ds->num_ports = dev->port_cnt;
+ dev->ds->num_ports = dev->port_cnt + 1;
return 0;
}
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index b62dd64470a8..153664bf0e20 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -1208,7 +1208,7 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
/* configure MAC to 1G & RGMII mode */
ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8);
- switch (dev->interface) {
+ switch (p->interface) {
case PHY_INTERFACE_MODE_MII:
ksz9477_set_xmii(dev, 0, &data8);
ksz9477_set_gbit(dev, false, &data8);
@@ -1229,11 +1229,11 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz9477_set_gbit(dev, true, &data8);
data8 &= ~PORT_RGMII_ID_IG_ENABLE;
data8 &= ~PORT_RGMII_ID_EG_ENABLE;
- if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- dev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ p->interface == PHY_INTERFACE_MODE_RGMII_RXID)
data8 |= PORT_RGMII_ID_IG_ENABLE;
- if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- dev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ p->interface == PHY_INTERFACE_MODE_RGMII_TXID)
data8 |= PORT_RGMII_ID_EG_ENABLE;
/* On KSZ9893, disable RGMII in-band status support */
if (dev->features & IS_9893)
@@ -1274,15 +1274,25 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
dev->cpu_port = i;
dev->host_mask = (1 << dev->cpu_port);
dev->port_mask |= dev->host_mask;
+ p = &dev->ports[i];
/* Read from XMII register to determine host port
* interface. If set specifically in device tree
* note the difference to help debugging.
*/
interface = ksz9477_get_interface(dev, i);
- if (!dev->interface)
- dev->interface = interface;
- if (interface && interface != dev->interface) {
+ if (!p->interface) {
+ if (dev->compat_interface) {
+ dev_warn(dev->dev,
+ "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. "
+ "Please update your device tree.\n",
+ i);
+ p->interface = dev->compat_interface;
+ } else {
+ p->interface = interface;
+ }
+ }
+ if (interface && interface != p->interface) {
prev_msg = " instead of ";
prev_mode = phy_modes(interface);
} else {
@@ -1292,13 +1302,12 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
dev_info(dev->dev,
"Port%d: using phy mode %s%s%s\n",
i,
- phy_modes(dev->interface),
+ phy_modes(p->interface),
prev_msg,
prev_mode);
/* enable cpu port */
ksz9477_port_setup(dev, i, true);
- p = &dev->ports[dev->cpu_port];
p->vid_member = dev->port_mask;
p->on = 1;
}
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index a31738662d95..cb534547c715 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -388,6 +388,8 @@ int ksz_switch_register(struct ksz_device *dev,
const struct ksz_dev_ops *ops)
{
phy_interface_t interface;
+ struct device_node *port;
+ unsigned int port_num;
int ret;
if (dev->pdata)
@@ -422,10 +424,19 @@ int ksz_switch_register(struct ksz_device *dev,
/* Host port interface will be self detected, or specifically set in
* device tree.
*/
+ for (port_num = 0; port_num < dev->port_cnt; ++port_num)
+ dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA;
if (dev->dev->of_node) {
ret = of_get_phy_mode(dev->dev->of_node, &interface);
if (ret == 0)
- dev->interface = interface;
+ dev->compat_interface = interface;
+ for_each_available_child_of_node(dev->dev->of_node, port) {
+ if (of_property_read_u32(port, "reg", &port_num))
+ continue;
+ if (port_num >= dev->port_cnt)
+ return -EINVAL;
+ of_get_phy_mode(port, &dev->ports[port_num].interface);
+ }
dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
"microchip,synclko-125");
}
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 206838160f49..cf866e48ff66 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -39,6 +39,7 @@ struct ksz_port {
u32 freeze:1; /* MIB counter freeze is enabled */
struct ksz_port_mib mib;
+ phy_interface_t interface;
};
struct ksz_device {
@@ -72,7 +73,7 @@ struct ksz_device {
int mib_cnt;
int mib_port_cnt;
int last_port; /* ports after that not used */
- phy_interface_t interface;
+ phy_interface_t compat_interface;
u32 regs_size;
bool phy_errata_9477;
bool synclko_125;
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 1aaf47a0da2b..cb3efa7de7a8 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -234,6 +234,12 @@ mt7530_write(struct mt7530_priv *priv, u32 reg, u32 val)
}
static u32
+_mt7530_unlocked_read(struct mt7530_dummy_poll *p)
+{
+ return mt7530_mii_read(p->priv, p->reg);
+}
+
+static u32
_mt7530_read(struct mt7530_dummy_poll *p)
{
struct mii_bus *bus = p->priv->bus;
@@ -372,8 +378,9 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]);
}
+/* Setup TX circuit including relevant PAD and driving */
static int
-mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
+mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
u32 ncpo1, ssc_delta, trgint, i, xtal;
@@ -387,7 +394,7 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
return -EINVAL;
}
- switch (mode) {
+ switch (interface) {
case PHY_INTERFACE_MODE_RGMII:
trgint = 0;
/* PLL frequency: 125MHz */
@@ -409,7 +416,8 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
}
break;
default:
- dev_err(priv->dev, "xMII mode %d not supported\n", mode);
+ dev_err(priv->dev, "xMII interface %d not supported\n",
+ interface);
return -EINVAL;
}
@@ -481,6 +489,108 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
return 0;
}
+static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv)
+{
+ u32 val;
+
+ val = mt7530_read(priv, MT7531_TOP_SIG_SR);
+
+ return (val & PAD_DUAL_SGMII_EN) != 0;
+}
+
+static int
+mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 top_sig;
+ u32 hwstrap;
+ u32 xtal;
+ u32 val;
+
+ if (mt7531_dual_sgmii_supported(priv))
+ return 0;
+
+ val = mt7530_read(priv, MT7531_CREV);
+ top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
+ hwstrap = mt7530_read(priv, MT7531_HWTRAP);
+ if ((val & CHIP_REV_M) > 0)
+ xtal = (top_sig & PAD_MCM_SMI_EN) ? HWTRAP_XTAL_FSEL_40MHZ :
+ HWTRAP_XTAL_FSEL_25MHZ;
+ else
+ xtal = hwstrap & HWTRAP_XTAL_FSEL_MASK;
+
+ /* Step 1 : Disable MT7531 COREPLL */
+ val = mt7530_read(priv, MT7531_PLLGP_EN);
+ val &= ~EN_COREPLL;
+ mt7530_write(priv, MT7531_PLLGP_EN, val);
+
+ /* Step 2: switch to XTAL output */
+ val = mt7530_read(priv, MT7531_PLLGP_EN);
+ val |= SW_CLKSW;
+ mt7530_write(priv, MT7531_PLLGP_EN, val);
+
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val &= ~RG_COREPLL_EN;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+
+ /* Step 3: disable PLLGP and enable program PLLGP */
+ val = mt7530_read(priv, MT7531_PLLGP_EN);
+ val |= SW_PLLGP;
+ mt7530_write(priv, MT7531_PLLGP_EN, val);
+
+ /* Step 4: program COREPLL output frequency to 500MHz */
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val &= ~RG_COREPLL_POSDIV_M;
+ val |= 2 << RG_COREPLL_POSDIV_S;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+ usleep_range(25, 35);
+
+ switch (xtal) {
+ case HWTRAP_XTAL_FSEL_25MHZ:
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val &= ~RG_COREPLL_SDM_PCW_M;
+ val |= 0x140000 << RG_COREPLL_SDM_PCW_S;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+ break;
+ case HWTRAP_XTAL_FSEL_40MHZ:
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val &= ~RG_COREPLL_SDM_PCW_M;
+ val |= 0x190000 << RG_COREPLL_SDM_PCW_S;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+ break;
+ };
+
+ /* Set feedback divide ratio update signal to high */
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val |= RG_COREPLL_SDM_PCW_CHG;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+ /* Wait for at least 16 XTAL clocks */
+ usleep_range(10, 20);
+
+ /* Step 5: set feedback divide ratio update signal to low */
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val &= ~RG_COREPLL_SDM_PCW_CHG;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+
+ /* Enable 325M clock for SGMII */
+ mt7530_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000);
+
+ /* Enable 250SSC clock for RGMII */
+ mt7530_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000);
+
+ /* Step 6: Enable MT7531 PLL */
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val |= RG_COREPLL_EN;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+
+ val = mt7530_read(priv, MT7531_PLLGP_EN);
+ val |= EN_COREPLL;
+ mt7530_write(priv, MT7531_PLLGP_EN, val);
+ usleep_range(25, 35);
+
+ return 0;
+}
+
static void
mt7530_mib_reset(struct dsa_switch *ds)
{
@@ -505,6 +615,217 @@ static int mt7530_phy_write(struct dsa_switch *ds, int port, int regnum,
return mdiobus_write_nested(priv->bus, port, regnum, val);
}
+static int
+mt7531_ind_c45_phy_read(struct mt7530_priv *priv, int port, int devad,
+ int regnum)
+{
+ struct mii_bus *bus = priv->bus;
+ struct mt7530_dummy_poll p;
+ u32 reg, val;
+ int ret;
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_DEV_ADDR(devad) | regnum;
+ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ reg = MT7531_MDIO_CL45_READ | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_DEV_ADDR(devad);
+ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ ret = val & MT7531_MDIO_RW_DATA_MASK;
+out:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int
+mt7531_ind_c45_phy_write(struct mt7530_priv *priv, int port, int devad,
+ int regnum, u32 data)
+{
+ struct mii_bus *bus = priv->bus;
+ struct mt7530_dummy_poll p;
+ u32 val, reg;
+ int ret;
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_DEV_ADDR(devad) | regnum;
+ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ reg = MT7531_MDIO_CL45_WRITE | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_DEV_ADDR(devad) | data;
+ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+out:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int
+mt7531_ind_c22_phy_read(struct mt7530_priv *priv, int port, int regnum)
+{
+ struct mii_bus *bus = priv->bus;
+ struct mt7530_dummy_poll p;
+ int ret;
+ u32 val;
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ val = MT7531_MDIO_CL22_READ | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_REG_ADDR(regnum);
+
+ mt7530_mii_write(priv, MT7531_PHY_IAC, val | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ ret = val & MT7531_MDIO_RW_DATA_MASK;
+out:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int
+mt7531_ind_c22_phy_write(struct mt7530_priv *priv, int port, int regnum,
+ u16 data)
+{
+ struct mii_bus *bus = priv->bus;
+ struct mt7530_dummy_poll p;
+ int ret;
+ u32 reg;
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg,
+ !(reg & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ reg = MT7531_MDIO_CL22_WRITE | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_REG_ADDR(regnum) | data;
+
+ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg,
+ !(reg & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+out:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int
+mt7531_ind_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ struct mt7530_priv *priv = ds->priv;
+ int devad;
+ int ret;
+
+ if (regnum & MII_ADDR_C45) {
+ devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ ret = mt7531_ind_c45_phy_read(priv, port, devad,
+ regnum & MII_REGADDR_C45_MASK);
+ } else {
+ ret = mt7531_ind_c22_phy_read(priv, port, regnum);
+ }
+
+ return ret;
+}
+
+static int
+mt7531_ind_phy_write(struct dsa_switch *ds, int port, int regnum,
+ u16 data)
+{
+ struct mt7530_priv *priv = ds->priv;
+ int devad;
+ int ret;
+
+ if (regnum & MII_ADDR_C45) {
+ devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ ret = mt7531_ind_c45_phy_write(priv, port, devad,
+ regnum & MII_REGADDR_C45_MASK,
+ data);
+ } else {
+ ret = mt7531_ind_c22_phy_write(priv, port, regnum, data);
+ }
+
+ return ret;
+}
+
static void
mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
@@ -621,9 +942,18 @@ unlock_exit:
}
static int
-mt7530_cpu_port_enable(struct mt7530_priv *priv,
- int port)
+mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
{
+ struct mt7530_priv *priv = ds->priv;
+ int ret;
+
+ /* Setup max capability of CPU port at first */
+ if (priv->info->cpu_port_config) {
+ ret = priv->info->cpu_port_config(ds, port);
+ if (ret)
+ return ret;
+ }
+
/* Enable Mediatek header mode on the cpu port */
mt7530_write(priv, MT7530_PVC_P(port),
PORT_SPEC_TAG);
@@ -636,7 +966,7 @@ mt7530_cpu_port_enable(struct mt7530_priv *priv,
mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port));
/* CPU port gets connected to all user ports of
- * the switch
+ * the switch.
*/
mt7530_write(priv, MT7530_PCR_P(port),
PCR_MATRIX(dsa_user_ports(priv->ds)));
@@ -1130,27 +1460,42 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-static int mt7530_port_mirror_add(struct dsa_switch *ds, int port,
+static int mt753x_mirror_port_get(unsigned int id, u32 val)
+{
+ return (id == ID_MT7531) ? MT7531_MIRROR_PORT_GET(val) :
+ MIRROR_PORT(val);
+}
+
+static int mt753x_mirror_port_set(unsigned int id, u32 val)
+{
+ return (id == ID_MT7531) ? MT7531_MIRROR_PORT_SET(val) :
+ MIRROR_PORT(val);
+}
+
+static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress)
{
struct mt7530_priv *priv = ds->priv;
+ int monitor_port;
u32 val;
/* Check for existent entry */
if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
return -EEXIST;
- val = mt7530_read(priv, MT7530_MFC);
+ val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id));
/* MT7530 only supports one monitor port */
- if (val & MIRROR_EN && MIRROR_PORT(val) != mirror->to_local_port)
+ monitor_port = mt753x_mirror_port_get(priv->id, val);
+ if (val & MT753X_MIRROR_EN(priv->id) &&
+ monitor_port != mirror->to_local_port)
return -EEXIST;
- val |= MIRROR_EN;
- val &= ~MIRROR_MASK;
- val |= mirror->to_local_port;
- mt7530_write(priv, MT7530_MFC, val);
+ val |= MT753X_MIRROR_EN(priv->id);
+ val &= ~MT753X_MIRROR_MASK(priv->id);
+ val |= mt753x_mirror_port_set(priv->id, mirror->to_local_port);
+ mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val);
val = mt7530_read(priv, MT7530_PCR_P(port));
if (ingress) {
@@ -1165,7 +1510,7 @@ static int mt7530_port_mirror_add(struct dsa_switch *ds, int port,
return 0;
}
-static void mt7530_port_mirror_del(struct dsa_switch *ds, int port,
+static void mt753x_port_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror)
{
struct mt7530_priv *priv = ds->priv;
@@ -1182,9 +1527,9 @@ static void mt7530_port_mirror_del(struct dsa_switch *ds, int port,
mt7530_write(priv, MT7530_PCR_P(port), val);
if (!priv->mirror_rx && !priv->mirror_tx) {
- val = mt7530_read(priv, MT7530_MFC);
- val &= ~MIRROR_EN;
- mt7530_write(priv, MT7530_MFC, val);
+ val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id));
+ val &= ~MT753X_MIRROR_EN(priv->id);
+ mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val);
}
}
@@ -1290,9 +1635,11 @@ mt7530_setup(struct dsa_switch *ds)
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
- if (dsa_is_cpu_port(ds, i))
- mt7530_cpu_port_enable(priv, i);
- else
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = mt753x_cpu_port_enable(ds, i);
+ if (ret)
+ return ret;
+ } else
mt7530_port_disable(ds, i);
/* Enable consistent egress tag */
@@ -1352,51 +1699,492 @@ mt7530_setup(struct dsa_switch *ds)
return 0;
}
-static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
- unsigned int mode,
- const struct phylink_link_state *state)
+static int
+mt7531_setup(struct dsa_switch *ds)
+{
+ struct mt7530_priv *priv = ds->priv;
+ struct mt7530_dummy_poll p;
+ u32 val, id;
+ int ret, i;
+
+ /* Reset whole chip through gpio pin or memory-mapped registers for
+ * different type of hardware
+ */
+ if (priv->mcm) {
+ reset_control_assert(priv->rstc);
+ usleep_range(1000, 1100);
+ reset_control_deassert(priv->rstc);
+ } else {
+ gpiod_set_value_cansleep(priv->reset, 0);
+ usleep_range(1000, 1100);
+ gpiod_set_value_cansleep(priv->reset, 1);
+ }
+
+ /* Waiting for MT7530 got to stable */
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP);
+ ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
+ 20, 1000000);
+ if (ret < 0) {
+ dev_err(priv->dev, "reset timeout\n");
+ return ret;
+ }
+
+ id = mt7530_read(priv, MT7531_CREV);
+ id >>= CHIP_NAME_SHIFT;
+
+ if (id != MT7531_ID) {
+ dev_err(priv->dev, "chip %x can't be supported\n", id);
+ return -ENODEV;
+ }
+
+ /* Reset the switch through internal reset */
+ mt7530_write(priv, MT7530_SYS_CTRL,
+ SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
+ SYS_CTRL_REG_RST);
+
+ if (mt7531_dual_sgmii_supported(priv)) {
+ priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII;
+
+ /* Let ds->slave_mii_bus be able to access external phy. */
+ mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK,
+ MT7531_EXT_P_MDC_11);
+ mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK,
+ MT7531_EXT_P_MDIO_12);
+ } else {
+ priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
+ }
+ dev_dbg(ds->dev, "P5 support %s interface\n",
+ p5_intf_modes(priv->p5_intf_sel));
+
+ mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK,
+ MT7531_GPIO0_INTERRUPT);
+
+ /* Let phylink decide the interface later. */
+ priv->p5_interface = PHY_INTERFACE_MODE_NA;
+ priv->p6_interface = PHY_INTERFACE_MODE_NA;
+
+ /* Enable PHY core PLL, since phy_device has not yet been created
+ * provided for phy_[read,write]_mmd_indirect is called, we provide
+ * our own mt7531_ind_mmd_phy_[read,write] to complete this
+ * function.
+ */
+ val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR,
+ MDIO_MMD_VEND2, CORE_PLL_GROUP4);
+ val |= MT7531_PHY_PLL_BYPASS_MODE;
+ val &= ~MT7531_PHY_PLL_OFF;
+ mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
+ CORE_PLL_GROUP4, val);
+
+ /* BPDU to CPU port */
+ mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK,
+ BIT(MT7530_CPU_PORT));
+ mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
+ MT753X_BPDU_CPU_ONLY);
+
+ /* Enable and reset MIB counters */
+ mt7530_mib_reset(ds);
+
+ for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ /* Disable forwarding by default on all ports */
+ mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
+ PCR_MATRIX_CLR);
+
+ mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
+
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = mt753x_cpu_port_enable(ds, i);
+ if (ret)
+ return ret;
+ } else
+ mt7530_port_disable(ds, i);
+
+ /* Enable consistent egress tag */
+ mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
+ PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
+ }
+
+ ds->configure_vlan_while_not_filtering = true;
+
+ /* Flush the FDB table */
+ ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static bool
+mt7530_phy_mode_supported(struct dsa_switch *ds, int port,
+ const struct phylink_link_state *state)
{
struct mt7530_priv *priv = ds->priv;
- u32 mcr_cur, mcr_new;
switch (port) {
- case 0: /* Internal phy */
- case 1:
- case 2:
- case 3:
- case 4:
+ case 0 ... 4: /* Internal phy */
if (state->interface != PHY_INTERFACE_MODE_GMII)
- return;
+ return false;
break;
case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
- if (priv->p5_interface == state->interface)
- break;
if (!phy_interface_mode_is_rgmii(state->interface) &&
state->interface != PHY_INTERFACE_MODE_MII &&
state->interface != PHY_INTERFACE_MODE_GMII)
- return;
+ return false;
+ break;
+ case 6: /* 1st cpu port */
+ if (state->interface != PHY_INTERFACE_MODE_RGMII &&
+ state->interface != PHY_INTERFACE_MODE_TRGMII)
+ return false;
+ break;
+ default:
+ dev_err(priv->dev, "%s: unsupported port: %i\n", __func__,
+ port);
+ return false;
+ }
+
+ return true;
+}
- mt7530_setup_port5(ds, state->interface);
+static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port)
+{
+ return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII);
+}
+
+static bool
+mt7531_phy_mode_supported(struct dsa_switch *ds, int port,
+ const struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ switch (port) {
+ case 0 ... 4: /* Internal phy */
+ if (state->interface != PHY_INTERFACE_MODE_GMII)
+ return false;
+ break;
+ case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */
+ if (mt7531_is_rgmii_port(priv, port))
+ return phy_interface_mode_is_rgmii(state->interface);
+ fallthrough;
+ case 6: /* 1st cpu port supports sgmii/8023z only */
+ if (state->interface != PHY_INTERFACE_MODE_SGMII &&
+ !phy_interface_mode_is_8023z(state->interface))
+ return false;
+ break;
+ default:
+ dev_err(priv->dev, "%s: unsupported port: %i\n", __func__,
+ port);
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+mt753x_phy_mode_supported(struct dsa_switch *ds, int port,
+ const struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->phy_mode_supported(ds, port, state);
+}
+
+static int
+mt753x_pad_setup(struct dsa_switch *ds, const struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->pad_setup(ds, state->interface);
+}
+
+static int
+mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ /* Only need to setup port5. */
+ if (port != 5)
+ return 0;
+
+ mt7530_setup_port5(priv->ds, interface);
+
+ return 0;
+}
+
+static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
+ phy_interface_t interface,
+ struct phy_device *phydev)
+{
+ u32 val;
+
+ if (!mt7531_is_rgmii_port(priv, port)) {
+ dev_err(priv->dev, "RGMII mode is not available for port %d\n",
+ port);
+ return -EINVAL;
+ }
+
+ val = mt7530_read(priv, MT7531_CLKGEN_CTRL);
+ val |= GP_CLK_EN;
+ val &= ~GP_MODE_MASK;
+ val |= GP_MODE(MT7531_GP_MODE_RGMII);
+ val &= ~CLK_SKEW_IN_MASK;
+ val |= CLK_SKEW_IN(MT7531_CLK_SKEW_NO_CHG);
+ val &= ~CLK_SKEW_OUT_MASK;
+ val |= CLK_SKEW_OUT(MT7531_CLK_SKEW_NO_CHG);
+ val |= TXCLK_NO_REVERSE | RXCLK_NO_DELAY;
+
+ /* Do not adjust rgmii delay when vendor phy driver presents. */
+ if (!phydev || phy_driver_is_genphy(phydev)) {
+ val &= ~(TXCLK_NO_REVERSE | RXCLK_NO_DELAY);
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val |= TXCLK_NO_REVERSE;
+ val |= RXCLK_NO_DELAY;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ val |= TXCLK_NO_REVERSE;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val |= RXCLK_NO_DELAY;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ mt7530_write(priv, MT7531_CLKGEN_CTRL, val);
+
+ return 0;
+}
+
+static void mt7531_sgmii_validate(struct mt7530_priv *priv, int port,
+ unsigned long *supported)
+{
+ /* Port5 supports ethier RGMII or SGMII.
+ * Port6 supports SGMII only.
+ */
+ switch (port) {
+ case 5:
+ if (mt7531_is_rgmii_port(priv, port))
+ break;
+ fallthrough;
+ case 6:
+ phylink_set(supported, 1000baseX_Full);
+ phylink_set(supported, 2500baseX_Full);
+ phylink_set(supported, 2500baseT_Full);
+ }
+}
+
+static void
+mt7531_sgmii_link_up_force(struct dsa_switch *ds, int port,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex)
+{
+ struct mt7530_priv *priv = ds->priv;
+ unsigned int val;
+
+ /* For adjusting speed and duplex of SGMII force mode. */
+ if (interface != PHY_INTERFACE_MODE_SGMII ||
+ phylink_autoneg_inband(mode))
+ return;
+
+ /* SGMII force mode setting */
+ val = mt7530_read(priv, MT7531_SGMII_MODE(port));
+ val &= ~MT7531_SGMII_IF_MODE_MASK;
+
+ switch (speed) {
+ case SPEED_10:
+ val |= MT7531_SGMII_FORCE_SPEED_10;
+ break;
+ case SPEED_100:
+ val |= MT7531_SGMII_FORCE_SPEED_100;
+ break;
+ case SPEED_1000:
+ val |= MT7531_SGMII_FORCE_SPEED_1000;
+ break;
+ }
+
+ /* MT7531 SGMII 1G force mode can only work in full duplex mode,
+ * no matter MT7531_SGMII_FORCE_HALF_DUPLEX is set or not.
+ */
+ if ((speed == SPEED_10 || speed == SPEED_100) &&
+ duplex != DUPLEX_FULL)
+ val |= MT7531_SGMII_FORCE_HALF_DUPLEX;
+
+ mt7530_write(priv, MT7531_SGMII_MODE(port), val);
+}
+
+static bool mt753x_is_mac_port(u32 port)
+{
+ return (port == 5 || port == 6);
+}
+
+static int mt7531_sgmii_setup_mode_force(struct mt7530_priv *priv, u32 port,
+ phy_interface_t interface)
+{
+ u32 val;
+
+ if (!mt753x_is_mac_port(port))
+ return -EINVAL;
+
+ mt7530_set(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
+ MT7531_SGMII_PHYA_PWD);
+
+ val = mt7530_read(priv, MT7531_PHYA_CTRL_SIGNAL3(port));
+ val &= ~MT7531_RG_TPHY_SPEED_MASK;
+ /* Setup 2.5 times faster clock for 2.5Gbps data speeds with 10B/8B
+ * encoding.
+ */
+ val |= (interface == PHY_INTERFACE_MODE_2500BASEX) ?
+ MT7531_RG_TPHY_SPEED_3_125G : MT7531_RG_TPHY_SPEED_1_25G;
+ mt7530_write(priv, MT7531_PHYA_CTRL_SIGNAL3(port), val);
+
+ mt7530_clear(priv, MT7531_PCS_CONTROL_1(port), MT7531_SGMII_AN_ENABLE);
+
+ /* MT7531 SGMII 1G and 2.5G force mode can only work in full duplex
+ * mode, no matter MT7531_SGMII_FORCE_HALF_DUPLEX is set or not.
+ */
+ mt7530_rmw(priv, MT7531_SGMII_MODE(port),
+ MT7531_SGMII_IF_MODE_MASK | MT7531_SGMII_REMOTE_FAULT_DIS,
+ MT7531_SGMII_FORCE_SPEED_1000);
+
+ mt7530_write(priv, MT7531_QPHY_PWR_STATE_CTRL(port), 0);
+
+ return 0;
+}
+
+static int mt7531_sgmii_setup_mode_an(struct mt7530_priv *priv, int port,
+ phy_interface_t interface)
+{
+ if (!mt753x_is_mac_port(port))
+ return -EINVAL;
+
+ mt7530_set(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
+ MT7531_SGMII_PHYA_PWD);
+
+ mt7530_rmw(priv, MT7531_PHYA_CTRL_SIGNAL3(port),
+ MT7531_RG_TPHY_SPEED_MASK, MT7531_RG_TPHY_SPEED_1_25G);
+
+ mt7530_set(priv, MT7531_SGMII_MODE(port),
+ MT7531_SGMII_REMOTE_FAULT_DIS |
+ MT7531_SGMII_SPEED_DUPLEX_AN);
+
+ mt7530_rmw(priv, MT7531_PCS_SPEED_ABILITY(port),
+ MT7531_SGMII_TX_CONFIG_MASK, 1);
+
+ mt7530_set(priv, MT7531_PCS_CONTROL_1(port), MT7531_SGMII_AN_ENABLE);
+
+ mt7530_set(priv, MT7531_PCS_CONTROL_1(port), MT7531_SGMII_AN_RESTART);
+
+ mt7530_write(priv, MT7531_QPHY_PWR_STATE_CTRL(port), 0);
+
+ return 0;
+}
+
+static void mt7531_sgmii_restart_an(struct dsa_switch *ds, int port)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 val;
+
+ /* Only restart AN when AN is enabled */
+ val = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
+ if (val & MT7531_SGMII_AN_ENABLE) {
+ val |= MT7531_SGMII_AN_RESTART;
+ mt7530_write(priv, MT7531_PCS_CONTROL_1(port), val);
+ }
+}
+
+static int
+mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mt7530_priv *priv = ds->priv;
+ struct phy_device *phydev;
+ struct dsa_port *dp;
+
+ if (!mt753x_is_mac_port(port)) {
+ dev_err(priv->dev, "port %d is not a MAC port\n", port);
+ return -EINVAL;
+ }
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ dp = dsa_to_port(ds, port);
+ phydev = dp->slave->phydev;
+ return mt7531_rgmii_setup(priv, port, interface, phydev);
+ case PHY_INTERFACE_MODE_SGMII:
+ return mt7531_sgmii_setup_mode_an(priv, port, interface);
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ if (phylink_autoneg_inband(mode))
+ return -EINVAL;
+
+ return mt7531_sgmii_setup_mode_force(priv, port, interface);
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static int
+mt753x_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->mac_port_config(ds, port, mode, state->interface);
+}
+
+static void
+mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 mcr_cur, mcr_new;
+
+ if (!mt753x_phy_mode_supported(ds, port, state))
+ goto unsupported;
+
+ switch (port) {
+ case 0 ... 4: /* Internal phy */
+ if (state->interface != PHY_INTERFACE_MODE_GMII)
+ goto unsupported;
+ break;
+ case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
+ if (priv->p5_interface == state->interface)
+ break;
+
+ if (mt753x_mac_config(ds, port, mode, state) < 0)
+ goto unsupported;
+
+ if (priv->p5_intf_sel != P5_DISABLED)
+ priv->p5_interface = state->interface;
break;
case 6: /* 1st cpu port */
if (priv->p6_interface == state->interface)
break;
- if (state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_TRGMII)
- return;
+ mt753x_pad_setup(ds, state);
- /* Setup TX circuit incluing relevant PAD and driving */
- mt7530_pad_clk_setup(ds, state->interface);
+ if (mt753x_mac_config(ds, port, mode, state) < 0)
+ goto unsupported;
priv->p6_interface = state->interface;
break;
default:
- dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
+unsupported:
+ dev_err(ds->dev, "%s: unsupported %s port: %i\n",
+ __func__, phy_modes(state->interface), port);
return;
}
- if (phylink_autoneg_inband(mode)) {
+ if (phylink_autoneg_inband(mode) &&
+ state->interface != PHY_INTERFACE_MODE_SGMII) {
dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
__func__);
return;
@@ -1406,7 +2194,7 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
mcr_new = mcr_cur;
mcr_new &= ~PMCR_LINK_SETTINGS_MASK;
mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
- PMCR_BACKPR_EN | PMCR_FORCE_MODE;
+ PMCR_BACKPR_EN | PMCR_FORCE_MODE_ID(priv->id);
/* Are we connected to external phy */
if (port == 5 && dsa_is_user_port(ds, 5))
@@ -1416,7 +2204,18 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
mt7530_write(priv, MT7530_PMCR_P(port), mcr_new);
}
-static void mt7530_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void
+mt753x_phylink_mac_an_restart(struct dsa_switch *ds, int port)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ if (!priv->info->mac_pcs_an_restart)
+ return;
+
+ priv->info->mac_pcs_an_restart(ds, port);
+}
+
+static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
{
@@ -1425,7 +2224,19 @@ static void mt7530_phylink_mac_link_down(struct dsa_switch *ds, int port,
mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
}
-static void mt7530_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void mt753x_mac_pcs_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ if (!priv->info->mac_pcs_link_up)
+ return;
+
+ priv->info->mac_pcs_link_up(ds, port, mode, interface, speed, duplex);
+}
+
+static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
struct phy_device *phydev,
@@ -1435,8 +2246,19 @@ static void mt7530_phylink_mac_link_up(struct dsa_switch *ds, int port,
struct mt7530_priv *priv = ds->priv;
u32 mcr;
+ mt753x_mac_pcs_link_up(ds, port, mode, interface, speed, duplex);
+
mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK;
+ /* MT753x MAC works in 1G full duplex mode for all up-clocked
+ * variants.
+ */
+ if (interface == PHY_INTERFACE_MODE_TRGMII ||
+ (phy_interface_mode_is_8023z(interface))) {
+ speed = SPEED_1000;
+ duplex = DUPLEX_FULL;
+ }
+
switch (speed) {
case SPEED_1000:
mcr |= PMCR_FORCE_SPEED_1000;
@@ -1456,66 +2278,107 @@ static void mt7530_phylink_mac_link_up(struct dsa_switch *ds, int port,
mt7530_set(priv, MT7530_PMCR_P(port), mcr);
}
-static void mt7530_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static int
+mt7531_cpu_port_config(struct dsa_switch *ds, int port)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ struct mt7530_priv *priv = ds->priv;
+ phy_interface_t interface;
+ int speed;
+ int ret;
switch (port) {
- case 0: /* Internal phy */
- case 1:
- case 2:
- case 3:
- case 4:
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_GMII)
- goto unsupported;
- break;
- case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- !phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_GMII)
- goto unsupported;
+ case 5:
+ if (mt7531_is_rgmii_port(priv, port))
+ interface = PHY_INTERFACE_MODE_RGMII;
+ else
+ interface = PHY_INTERFACE_MODE_2500BASEX;
+
+ priv->p5_interface = interface;
break;
- case 6: /* 1st cpu port */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_TRGMII)
- goto unsupported;
+ case 6:
+ interface = PHY_INTERFACE_MODE_2500BASEX;
+
+ mt7531_pad_setup(ds, interface);
+
+ priv->p6_interface = interface;
break;
default:
- dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
-unsupported:
+ return -EINVAL;
+ }
+
+ if (interface == PHY_INTERFACE_MODE_2500BASEX)
+ speed = SPEED_2500;
+ else
+ speed = SPEED_1000;
+
+ ret = mt7531_mac_config(ds, port, MLO_AN_FIXED, interface);
+ if (ret)
+ return ret;
+ mt7530_write(priv, MT7530_PMCR_P(port),
+ PMCR_CPU_PORT_SETTING(priv->id));
+ mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL,
+ speed, DUPLEX_FULL, true, true);
+
+ return 0;
+}
+
+static void
+mt7530_mac_port_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported)
+{
+ if (port == 5)
+ phylink_set(supported, 1000baseX_Full);
+}
+
+static void mt7531_mac_port_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ mt7531_sgmii_validate(priv, port, supported);
+}
+
+static void
+mt753x_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ struct mt7530_priv *priv = ds->priv;
+
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ !mt753x_phy_mode_supported(ds, port, state)) {
linkmode_zero(supported);
return;
}
phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- if (state->interface == PHY_INTERFACE_MODE_TRGMII) {
- phylink_set(mask, 1000baseT_Full);
- } else {
+ if (state->interface != PHY_INTERFACE_MODE_TRGMII ||
+ !phy_interface_mode_is_8023z(state->interface)) {
phylink_set(mask, 10baseT_Half);
phylink_set(mask, 10baseT_Full);
phylink_set(mask, 100baseT_Half);
phylink_set(mask, 100baseT_Full);
-
- if (state->interface != PHY_INTERFACE_MODE_MII) {
- /* This switch only supports 1G full-duplex. */
- phylink_set(mask, 1000baseT_Full);
- if (port == 5)
- phylink_set(mask, 1000baseX_Full);
- }
+ phylink_set(mask, Autoneg);
}
+ /* This switch only supports 1G full-duplex. */
+ if (state->interface != PHY_INTERFACE_MODE_MII)
+ phylink_set(mask, 1000baseT_Full);
+
+ priv->info->mac_port_validate(ds, port, mask);
+
phylink_set(mask, Pause);
phylink_set(mask, Asym_Pause);
linkmode_and(supported, supported, mask);
linkmode_and(state->advertising, state->advertising, mask);
+
+ /* We can only operate at 2500BaseX or 1000BaseX. If requested
+ * to advertise both, only report advertising at 2500BaseX.
+ */
+ phylink_helper_basex_speed(state);
}
static int
@@ -1558,12 +2421,96 @@ mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port,
return 1;
}
+static int
+mt7531_sgmii_pcs_get_state_an(struct mt7530_priv *priv, int port,
+ struct phylink_link_state *state)
+{
+ u32 status, val;
+ u16 config_reg;
+
+ status = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
+ state->link = !!(status & MT7531_SGMII_LINK_STATUS);
+ if (state->interface == PHY_INTERFACE_MODE_SGMII &&
+ (status & MT7531_SGMII_AN_ENABLE)) {
+ val = mt7530_read(priv, MT7531_PCS_SPEED_ABILITY(port));
+ config_reg = val >> 16;
+
+ switch (config_reg & LPA_SGMII_SPD_MASK) {
+ case LPA_SGMII_1000:
+ state->speed = SPEED_1000;
+ break;
+ case LPA_SGMII_100:
+ state->speed = SPEED_100;
+ break;
+ case LPA_SGMII_10:
+ state->speed = SPEED_10;
+ break;
+ default:
+ dev_err(priv->dev, "invalid sgmii PHY speed\n");
+ state->link = false;
+ return -EINVAL;
+ }
+
+ if (config_reg & LPA_SGMII_FULL_DUPLEX)
+ state->duplex = DUPLEX_FULL;
+ else
+ state->duplex = DUPLEX_HALF;
+ }
+
+ return 0;
+}
+
+static int
+mt7531_phylink_mac_link_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ if (state->interface == PHY_INTERFACE_MODE_SGMII)
+ return mt7531_sgmii_pcs_get_state_an(priv, port, state);
+
+ return -EOPNOTSUPP;
+}
+
+static int
+mt753x_phylink_mac_link_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->mac_port_get_state(ds, port, state);
+}
+
+static int
+mt753x_setup(struct dsa_switch *ds)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->sw_setup(ds);
+}
+
+static int
+mt753x_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->phy_read(ds, port, regnum);
+}
+
+static int
+mt753x_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->phy_write(ds, port, regnum, val);
+}
+
static const struct dsa_switch_ops mt7530_switch_ops = {
.get_tag_protocol = mtk_get_tag_protocol,
- .setup = mt7530_setup,
+ .setup = mt753x_setup,
.get_strings = mt7530_get_strings,
- .phy_read = mt7530_phy_read,
- .phy_write = mt7530_phy_write,
+ .phy_read = mt753x_phy_read,
+ .phy_write = mt753x_phy_write,
.get_ethtool_stats = mt7530_get_ethtool_stats,
.get_sset_count = mt7530_get_sset_count,
.port_enable = mt7530_port_enable,
@@ -1578,18 +2525,59 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.port_vlan_prepare = mt7530_port_vlan_prepare,
.port_vlan_add = mt7530_port_vlan_add,
.port_vlan_del = mt7530_port_vlan_del,
- .port_mirror_add = mt7530_port_mirror_add,
- .port_mirror_del = mt7530_port_mirror_del,
- .phylink_validate = mt7530_phylink_validate,
- .phylink_mac_link_state = mt7530_phylink_mac_link_state,
- .phylink_mac_config = mt7530_phylink_mac_config,
- .phylink_mac_link_down = mt7530_phylink_mac_link_down,
- .phylink_mac_link_up = mt7530_phylink_mac_link_up,
+ .port_mirror_add = mt753x_port_mirror_add,
+ .port_mirror_del = mt753x_port_mirror_del,
+ .phylink_validate = mt753x_phylink_validate,
+ .phylink_mac_link_state = mt753x_phylink_mac_link_state,
+ .phylink_mac_config = mt753x_phylink_mac_config,
+ .phylink_mac_an_restart = mt753x_phylink_mac_an_restart,
+ .phylink_mac_link_down = mt753x_phylink_mac_link_down,
+ .phylink_mac_link_up = mt753x_phylink_mac_link_up,
+};
+
+static const struct mt753x_info mt753x_table[] = {
+ [ID_MT7621] = {
+ .id = ID_MT7621,
+ .sw_setup = mt7530_setup,
+ .phy_read = mt7530_phy_read,
+ .phy_write = mt7530_phy_write,
+ .pad_setup = mt7530_pad_clk_setup,
+ .phy_mode_supported = mt7530_phy_mode_supported,
+ .mac_port_validate = mt7530_mac_port_validate,
+ .mac_port_get_state = mt7530_phylink_mac_link_state,
+ .mac_port_config = mt7530_mac_config,
+ },
+ [ID_MT7530] = {
+ .id = ID_MT7530,
+ .sw_setup = mt7530_setup,
+ .phy_read = mt7530_phy_read,
+ .phy_write = mt7530_phy_write,
+ .pad_setup = mt7530_pad_clk_setup,
+ .phy_mode_supported = mt7530_phy_mode_supported,
+ .mac_port_validate = mt7530_mac_port_validate,
+ .mac_port_get_state = mt7530_phylink_mac_link_state,
+ .mac_port_config = mt7530_mac_config,
+ },
+ [ID_MT7531] = {
+ .id = ID_MT7531,
+ .sw_setup = mt7531_setup,
+ .phy_read = mt7531_ind_phy_read,
+ .phy_write = mt7531_ind_phy_write,
+ .pad_setup = mt7531_pad_setup,
+ .cpu_port_config = mt7531_cpu_port_config,
+ .phy_mode_supported = mt7531_phy_mode_supported,
+ .mac_port_validate = mt7531_mac_port_validate,
+ .mac_port_get_state = mt7531_phylink_mac_link_state,
+ .mac_port_config = mt7531_mac_config,
+ .mac_pcs_an_restart = mt7531_sgmii_restart_an,
+ .mac_pcs_link_up = mt7531_sgmii_link_up_force,
+ },
};
static const struct of_device_id mt7530_of_match[] = {
- { .compatible = "mediatek,mt7621", .data = (void *)ID_MT7621, },
- { .compatible = "mediatek,mt7530", .data = (void *)ID_MT7530, },
+ { .compatible = "mediatek,mt7621", .data = &mt753x_table[ID_MT7621], },
+ { .compatible = "mediatek,mt7530", .data = &mt753x_table[ID_MT7530], },
+ { .compatible = "mediatek,mt7531", .data = &mt753x_table[ID_MT7531], },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, mt7530_of_match);
@@ -1630,8 +2618,21 @@ mt7530_probe(struct mdio_device *mdiodev)
/* Get the hardware identifier from the devicetree node.
* We will need it for some of the clock and regulator setup.
*/
- priv->id = (unsigned int)(unsigned long)
- of_device_get_match_data(&mdiodev->dev);
+ priv->info = of_device_get_match_data(&mdiodev->dev);
+ if (!priv->info)
+ return -EINVAL;
+
+ /* Sanity check if these required device operations are filled
+ * properly.
+ */
+ if (!priv->info->sw_setup || !priv->info->pad_setup ||
+ !priv->info->phy_read || !priv->info->phy_write ||
+ !priv->info->phy_mode_supported ||
+ !priv->info->mac_port_validate ||
+ !priv->info->mac_port_get_state || !priv->info->mac_port_config)
+ return -EINVAL;
+
+ priv->id = priv->info->id;
if (priv->id == ID_MT7530) {
priv->core_pwr = devm_regulator_get(&mdiodev->dev, "core");
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 14de60d0b9ca..9278a8e3d04e 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -11,9 +11,10 @@
#define MT7530_NUM_FDB_RECORDS 2048
#define MT7530_ALL_MEMBERS 0xff
-enum {
+enum mt753x_id {
ID_MT7530 = 0,
ID_MT7621 = 1,
+ ID_MT7531 = 2,
};
#define NUM_TRGMII_CTRL 5
@@ -41,6 +42,33 @@ enum {
#define MIRROR_PORT(x) ((x) & 0x7)
#define MIRROR_MASK 0x7
+/* Registers for CPU forward control */
+#define MT7531_CFC 0x4
+#define MT7531_MIRROR_EN BIT(19)
+#define MT7531_MIRROR_MASK (MIRROR_MASK << 16)
+#define MT7531_MIRROR_PORT_GET(x) (((x) >> 16) & MIRROR_MASK)
+#define MT7531_MIRROR_PORT_SET(x) (((x) & MIRROR_MASK) << 16)
+#define MT7531_CPU_PMAP_MASK GENMASK(7, 0)
+
+#define MT753X_MIRROR_REG(id) (((id) == ID_MT7531) ? \
+ MT7531_CFC : MT7530_MFC)
+#define MT753X_MIRROR_EN(id) (((id) == ID_MT7531) ? \
+ MT7531_MIRROR_EN : MIRROR_EN)
+#define MT753X_MIRROR_MASK(id) (((id) == ID_MT7531) ? \
+ MT7531_MIRROR_MASK : MIRROR_MASK)
+
+/* Registers for BPDU and PAE frame control*/
+#define MT753X_BPC 0x24
+#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0)
+
+enum mt753x_bpdu_port_fw {
+ MT753X_BPDU_FOLLOW_MFC,
+ MT753X_BPDU_CPU_EXCLUDE = 4,
+ MT753X_BPDU_CPU_INCLUDE = 5,
+ MT753X_BPDU_CPU_ONLY = 6,
+ MT753X_BPDU_DROP = 7,
+};
+
/* Registers for address table access */
#define MT7530_ATA1 0x74
#define STATIC_EMP 0
@@ -220,10 +248,30 @@ enum mt7530_vlan_port_attr {
#define PMCR_FORCE_LNK BIT(0)
#define PMCR_SPEED_MASK (PMCR_FORCE_SPEED_100 | \
PMCR_FORCE_SPEED_1000)
+#define MT7531_FORCE_LNK BIT(31)
+#define MT7531_FORCE_SPD BIT(30)
+#define MT7531_FORCE_DPX BIT(29)
+#define MT7531_FORCE_RX_FC BIT(28)
+#define MT7531_FORCE_TX_FC BIT(27)
+#define MT7531_FORCE_MODE (MT7531_FORCE_LNK | \
+ MT7531_FORCE_SPD | \
+ MT7531_FORCE_DPX | \
+ MT7531_FORCE_RX_FC | \
+ MT7531_FORCE_TX_FC)
+#define PMCR_FORCE_MODE_ID(id) (((id) == ID_MT7531) ? \
+ MT7531_FORCE_MODE : \
+ PMCR_FORCE_MODE)
#define PMCR_LINK_SETTINGS_MASK (PMCR_TX_EN | PMCR_FORCE_SPEED_1000 | \
PMCR_RX_EN | PMCR_FORCE_SPEED_100 | \
PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
PMCR_FORCE_FDX | PMCR_FORCE_LNK)
+#define PMCR_CPU_PORT_SETTING(id) (PMCR_FORCE_MODE_ID((id)) | \
+ PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
+ PMCR_BACKOFF_EN | PMCR_BACKPR_EN | \
+ PMCR_TX_EN | PMCR_RX_EN | \
+ PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
+ PMCR_FORCE_SPEED_1000 | \
+ PMCR_FORCE_FDX | PMCR_FORCE_LNK)
#define MT7530_PMSR_P(x) (0x3008 + (x) * 0x100)
#define PMSR_EEE1G BIT(7)
@@ -237,6 +285,10 @@ enum mt7530_vlan_port_attr {
#define PMSR_DPX BIT(1)
#define PMSR_LINK BIT(0)
+/* Register for port debug count */
+#define MT7531_DBG_CNT(x) (0x3018 + (x) * 0x100)
+#define MT7531_DIS_CLR BIT(31)
+
/* Register for MIB */
#define MT7530_PORT_MIB_COUNTER(x) (0x4000 + (x) * 0x100)
#define MT7530_MIB_CCR 0x4fe0
@@ -254,12 +306,118 @@ enum mt7530_vlan_port_attr {
CCR_RX_OCT_CNT_BAD | \
CCR_TX_OCT_CNT_GOOD | \
CCR_TX_OCT_CNT_BAD)
+
+/* MT7531 SGMII register group */
+#define MT7531_SGMII_REG_BASE 0x5000
+#define MT7531_SGMII_REG(p, r) (MT7531_SGMII_REG_BASE + \
+ ((p) - 5) * 0x1000 + (r))
+
+/* Register forSGMII PCS_CONTROL_1 */
+#define MT7531_PCS_CONTROL_1(p) MT7531_SGMII_REG(p, 0x00)
+#define MT7531_SGMII_LINK_STATUS BIT(18)
+#define MT7531_SGMII_AN_ENABLE BIT(12)
+#define MT7531_SGMII_AN_RESTART BIT(9)
+
+/* Register for SGMII PCS_SPPED_ABILITY */
+#define MT7531_PCS_SPEED_ABILITY(p) MT7531_SGMII_REG(p, 0x08)
+#define MT7531_SGMII_TX_CONFIG_MASK GENMASK(15, 0)
+#define MT7531_SGMII_TX_CONFIG BIT(0)
+
+/* Register for SGMII_MODE */
+#define MT7531_SGMII_MODE(p) MT7531_SGMII_REG(p, 0x20)
+#define MT7531_SGMII_REMOTE_FAULT_DIS BIT(8)
+#define MT7531_SGMII_IF_MODE_MASK GENMASK(5, 1)
+#define MT7531_SGMII_FORCE_DUPLEX BIT(4)
+#define MT7531_SGMII_FORCE_SPEED_MASK GENMASK(3, 2)
+#define MT7531_SGMII_FORCE_SPEED_1000 BIT(3)
+#define MT7531_SGMII_FORCE_SPEED_100 BIT(2)
+#define MT7531_SGMII_FORCE_SPEED_10 0
+#define MT7531_SGMII_SPEED_DUPLEX_AN BIT(1)
+
+enum mt7531_sgmii_force_duplex {
+ MT7531_SGMII_FORCE_FULL_DUPLEX = 0,
+ MT7531_SGMII_FORCE_HALF_DUPLEX = 0x10,
+};
+
+/* Fields of QPHY_PWR_STATE_CTRL */
+#define MT7531_QPHY_PWR_STATE_CTRL(p) MT7531_SGMII_REG(p, 0xe8)
+#define MT7531_SGMII_PHYA_PWD BIT(4)
+
+/* Values of SGMII SPEED */
+#define MT7531_PHYA_CTRL_SIGNAL3(p) MT7531_SGMII_REG(p, 0x128)
+#define MT7531_RG_TPHY_SPEED_MASK (BIT(2) | BIT(3))
+#define MT7531_RG_TPHY_SPEED_1_25G 0x0
+#define MT7531_RG_TPHY_SPEED_3_125G BIT(2)
+
/* Register for system reset */
#define MT7530_SYS_CTRL 0x7000
#define SYS_CTRL_PHY_RST BIT(2)
#define SYS_CTRL_SW_RST BIT(1)
#define SYS_CTRL_REG_RST BIT(0)
+/* Register for PHY Indirect Access Control */
+#define MT7531_PHY_IAC 0x701C
+#define MT7531_PHY_ACS_ST BIT(31)
+#define MT7531_MDIO_REG_ADDR_MASK (0x1f << 25)
+#define MT7531_MDIO_PHY_ADDR_MASK (0x1f << 20)
+#define MT7531_MDIO_CMD_MASK (0x3 << 18)
+#define MT7531_MDIO_ST_MASK (0x3 << 16)
+#define MT7531_MDIO_RW_DATA_MASK (0xffff)
+#define MT7531_MDIO_REG_ADDR(x) (((x) & 0x1f) << 25)
+#define MT7531_MDIO_DEV_ADDR(x) (((x) & 0x1f) << 25)
+#define MT7531_MDIO_PHY_ADDR(x) (((x) & 0x1f) << 20)
+#define MT7531_MDIO_CMD(x) (((x) & 0x3) << 18)
+#define MT7531_MDIO_ST(x) (((x) & 0x3) << 16)
+
+enum mt7531_phy_iac_cmd {
+ MT7531_MDIO_ADDR = 0,
+ MT7531_MDIO_WRITE = 1,
+ MT7531_MDIO_READ = 2,
+ MT7531_MDIO_READ_CL45 = 3,
+};
+
+/* MDIO_ST: MDIO start field */
+enum mt7531_mdio_st {
+ MT7531_MDIO_ST_CL45 = 0,
+ MT7531_MDIO_ST_CL22 = 1,
+};
+
+#define MT7531_MDIO_CL22_READ (MT7531_MDIO_ST(MT7531_MDIO_ST_CL22) | \
+ MT7531_MDIO_CMD(MT7531_MDIO_READ))
+#define MT7531_MDIO_CL22_WRITE (MT7531_MDIO_ST(MT7531_MDIO_ST_CL22) | \
+ MT7531_MDIO_CMD(MT7531_MDIO_WRITE))
+#define MT7531_MDIO_CL45_ADDR (MT7531_MDIO_ST(MT7531_MDIO_ST_CL45) | \
+ MT7531_MDIO_CMD(MT7531_MDIO_ADDR))
+#define MT7531_MDIO_CL45_READ (MT7531_MDIO_ST(MT7531_MDIO_ST_CL45) | \
+ MT7531_MDIO_CMD(MT7531_MDIO_READ))
+#define MT7531_MDIO_CL45_WRITE (MT7531_MDIO_ST(MT7531_MDIO_ST_CL45) | \
+ MT7531_MDIO_CMD(MT7531_MDIO_WRITE))
+
+/* Register for RGMII clock phase */
+#define MT7531_CLKGEN_CTRL 0x7500
+#define CLK_SKEW_OUT(x) (((x) & 0x3) << 8)
+#define CLK_SKEW_OUT_MASK GENMASK(9, 8)
+#define CLK_SKEW_IN(x) (((x) & 0x3) << 6)
+#define CLK_SKEW_IN_MASK GENMASK(7, 6)
+#define RXCLK_NO_DELAY BIT(5)
+#define TXCLK_NO_REVERSE BIT(4)
+#define GP_MODE(x) (((x) & 0x3) << 1)
+#define GP_MODE_MASK GENMASK(2, 1)
+#define GP_CLK_EN BIT(0)
+
+enum mt7531_gp_mode {
+ MT7531_GP_MODE_RGMII = 0,
+ MT7531_GP_MODE_MII = 1,
+ MT7531_GP_MODE_REV_MII = 2
+};
+
+enum mt7531_clk_skew {
+ MT7531_CLK_SKEW_NO_CHG = 0,
+ MT7531_CLK_SKEW_DLY_100PPS = 1,
+ MT7531_CLK_SKEW_DLY_200PPS = 2,
+ MT7531_CLK_SKEW_REVERSE = 3,
+};
+
/* Register for hw trap status */
#define MT7530_HWTRAP 0x7800
#define HWTRAP_XTAL_MASK (BIT(10) | BIT(9))
@@ -267,6 +425,16 @@ enum mt7530_vlan_port_attr {
#define HWTRAP_XTAL_40MHZ (BIT(10))
#define HWTRAP_XTAL_20MHZ (BIT(9))
+#define MT7531_HWTRAP 0x7800
+#define HWTRAP_XTAL_FSEL_MASK BIT(7)
+#define HWTRAP_XTAL_FSEL_25MHZ BIT(7)
+#define HWTRAP_XTAL_FSEL_40MHZ 0
+/* Unique fields of (M)HWSTRAP for MT7531 */
+#define XTAL_FSEL_S 7
+#define XTAL_FSEL_M BIT(7)
+#define PHY_EN BIT(6)
+#define CHG_STRAP BIT(8)
+
/* Register for hw trap modification */
#define MT7530_MHWTRAP 0x7804
#define MHWTRAP_PHY0_SEL BIT(20)
@@ -281,14 +449,37 @@ enum mt7530_vlan_port_attr {
#define MT7530_TOP_SIG_CTRL 0x7808
#define TOP_SIG_CTRL_NORMAL (BIT(17) | BIT(16))
+#define MT7531_TOP_SIG_SR 0x780c
+#define PAD_DUAL_SGMII_EN BIT(1)
+#define PAD_MCM_SMI_EN BIT(0)
+
#define MT7530_IO_DRV_CR 0x7810
#define P5_IO_CLK_DRV(x) ((x) & 0x3)
#define P5_IO_DATA_DRV(x) (((x) & 0x3) << 4)
+#define MT7531_CHIP_REV 0x781C
+
+#define MT7531_PLLGP_EN 0x7820
+#define EN_COREPLL BIT(2)
+#define SW_CLKSW BIT(1)
+#define SW_PLLGP BIT(0)
+
#define MT7530_P6ECR 0x7830
#define P6_INTF_MODE_MASK 0x3
#define P6_INTF_MODE(x) ((x) & 0x3)
+#define MT7531_PLLGP_CR0 0x78a8
+#define RG_COREPLL_EN BIT(22)
+#define RG_COREPLL_POSDIV_S 23
+#define RG_COREPLL_POSDIV_M 0x3800000
+#define RG_COREPLL_SDM_PCW_S 1
+#define RG_COREPLL_SDM_PCW_M 0x3ffffe
+#define RG_COREPLL_SDM_PCW_CHG BIT(0)
+
+/* Registers for RGMII and SGMII PLL clock */
+#define MT7531_ANA_PLLGP_CR2 0x78b0
+#define MT7531_ANA_PLLGP_CR5 0x78bc
+
/* Registers for TRGMII on the both side */
#define MT7530_TRGMII_RCK_CTRL 0x7a00
#define RX_RST BIT(31)
@@ -327,10 +518,25 @@ enum mt7530_vlan_port_attr {
#define MT7530_P5RGMIITXCR 0x7b04
#define CSR_RGMII_TXC_CFG(x) ((x) & 0x1f)
+/* Registers for GPIO mode */
+#define MT7531_GPIO_MODE0 0x7c0c
+#define MT7531_GPIO0_MASK GENMASK(3, 0)
+#define MT7531_GPIO0_INTERRUPT 1
+
+#define MT7531_GPIO_MODE1 0x7c10
+#define MT7531_GPIO11_RG_RXD2_MASK GENMASK(15, 12)
+#define MT7531_EXT_P_MDC_11 (2 << 12)
+#define MT7531_GPIO12_RG_RXD3_MASK GENMASK(19, 16)
+#define MT7531_EXT_P_MDIO_12 (2 << 16)
+
#define MT7530_CREV 0x7ffc
#define CHIP_NAME_SHIFT 16
#define MT7530_ID 0x7530
+#define MT7531_CREV 0x781C
+#define CHIP_REV_M 0x0f
+#define MT7531_ID 0x7531
+
/* Registers for core PLL access through mmd indirect */
#define CORE_PLL_GROUP2 0x401
#define RG_SYSPLL_EN_NORMAL BIT(15)
@@ -347,6 +553,10 @@ enum mt7530_vlan_port_attr {
#define RG_SYSPLL_DDSFBK_EN BIT(12)
#define RG_SYSPLL_BIAS_EN BIT(11)
#define RG_SYSPLL_BIAS_LPF_EN BIT(10)
+#define MT7531_PHY_PLL_OFF BIT(5)
+#define MT7531_PHY_PLL_BYPASS_MODE BIT(4)
+
+#define MT753X_CTRL_PHY_ADDR 0
#define CORE_PLL_GROUP5 0x404
#define RG_LCDDS_PCW_NCPO1(x) ((x) & 0xffff)
@@ -425,6 +635,7 @@ enum p5_interface_select {
P5_INTF_SEL_PHY_P0,
P5_INTF_SEL_PHY_P4,
P5_INTF_SEL_GMAC5,
+ P5_INTF_SEL_GMAC5_SGMII,
};
static const char *p5_intf_modes(unsigned int p5_interface)
@@ -438,11 +649,56 @@ static const char *p5_intf_modes(unsigned int p5_interface)
return "PHY P4";
case P5_INTF_SEL_GMAC5:
return "GMAC5";
+ case P5_INTF_SEL_GMAC5_SGMII:
+ return "GMAC5_SGMII";
default:
return "unknown";
}
}
+/* struct mt753x_info - This is the main data structure for holding the specific
+ * part for each supported device
+ * @sw_setup: Holding the handler to a device initialization
+ * @phy_read: Holding the way reading PHY port
+ * @phy_write: Holding the way writing PHY port
+ * @pad_setup: Holding the way setting up the bus pad for a certain
+ * MAC port
+ * @phy_mode_supported: Check if the PHY type is being supported on a certain
+ * port
+ * @mac_port_validate: Holding the way to set addition validate type for a
+ * certan MAC port
+ * @mac_port_get_state: Holding the way getting the MAC/PCS state for a certain
+ * MAC port
+ * @mac_port_config: Holding the way setting up the PHY attribute to a
+ * certain MAC port
+ * @mac_pcs_an_restart Holding the way restarting PCS autonegotiation for a
+ * certain MAC port
+ * @mac_pcs_link_up: Holding the way setting up the PHY attribute to the pcs
+ * of the certain MAC port
+ */
+struct mt753x_info {
+ enum mt753x_id id;
+
+ int (*sw_setup)(struct dsa_switch *ds);
+ int (*phy_read)(struct dsa_switch *ds, int port, int regnum);
+ int (*phy_write)(struct dsa_switch *ds, int port, int regnum, u16 val);
+ int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface);
+ int (*cpu_port_config)(struct dsa_switch *ds, int port);
+ bool (*phy_mode_supported)(struct dsa_switch *ds, int port,
+ const struct phylink_link_state *state);
+ void (*mac_port_validate)(struct dsa_switch *ds, int port,
+ unsigned long *supported);
+ int (*mac_port_get_state)(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state);
+ int (*mac_port_config)(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface);
+ void (*mac_pcs_an_restart)(struct dsa_switch *ds, int port);
+ void (*mac_pcs_link_up)(struct dsa_switch *ds, int port,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex);
+};
+
/* struct mt7530_priv - This is the main data structure for holding the state
* of the driver
* @dev: The device pointer
@@ -468,6 +724,7 @@ struct mt7530_priv {
struct regulator *core_pwr;
struct regulator *io_pwr;
struct gpio_desc *reset;
+ const struct mt753x_info *info;
unsigned int id;
bool mcm;
phy_interface_t p6_interface;
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index aa645ff86f64..4b080b448ce7 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
mv88e6xxx-objs := chip.o
+mv88e6xxx-objs += devlink.o
mv88e6xxx-objs += global1.o
mv88e6xxx-objs += global1_atu.o
mv88e6xxx-objs += global1_vtu.o
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 15b97a4f8d93..9417412e5fce 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -32,6 +32,7 @@
#include <net/dsa.h>
#include "chip.h"
+#include "devlink.h"
#include "global1.h"
#include "global2.h"
#include "hwtstamp.h"
@@ -1465,21 +1466,21 @@ static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip,
return chip->info->ops->vtu_loadpurge(chip, entry);
}
-static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
+int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap)
{
- DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
struct mv88e6xxx_vtu_entry vlan;
int i, err;
+ u16 fid;
bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
/* Set every FID bit used by the (un)bridged ports */
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- err = mv88e6xxx_port_get_fid(chip, i, fid);
+ err = mv88e6xxx_port_get_fid(chip, i, &fid);
if (err)
return err;
- set_bit(*fid, fid_bitmap);
+ set_bit(fid, fid_bitmap);
}
/* Set every FID bit used by the VLAN entries */
@@ -1497,6 +1498,18 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
set_bit(vlan.fid, fid_bitmap);
} while (vlan.vid < chip->info->max_vid);
+ return 0;
+}
+
+static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
+{
+ DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
+ int err;
+
+ err = mv88e6xxx_fid_map(chip, fid_bitmap);
+ if (err)
+ return err;
+
/* The reset value 0x000 is used to indicate that multiple address
* databases are not needed. Return the next positive available.
*/
@@ -1508,22 +1521,6 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
return mv88e6xxx_g1_atu_flush(chip, *fid, true);
}
-static int mv88e6xxx_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash)
-{
- if (chip->info->ops->atu_get_hash)
- return chip->info->ops->atu_get_hash(chip, hash);
-
- return -EOPNOTSUPP;
-}
-
-static int mv88e6xxx_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash)
-{
- if (chip->info->ops->atu_set_hash)
- return chip->info->ops->atu_set_hash(chip, hash);
-
- return -EOPNOTSUPP;
-}
-
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
u16 vid_begin, u16 vid_end)
{
@@ -2837,248 +2834,11 @@ static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
return mv88e6xxx_software_reset(chip);
}
-enum mv88e6xxx_devlink_param_id {
- MV88E6XXX_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
- MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
-};
-
-static int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- mv88e6xxx_reg_lock(chip);
-
- switch (id) {
- case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
- err = mv88e6xxx_atu_get_hash(chip, &ctx->val.vu8);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- mv88e6xxx_reg_unlock(chip);
-
- return err;
-}
-
-static int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- mv88e6xxx_reg_lock(chip);
-
- switch (id) {
- case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
- err = mv88e6xxx_atu_set_hash(chip, ctx->val.vu8);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- mv88e6xxx_reg_unlock(chip);
-
- return err;
-}
-
-static const struct devlink_param mv88e6xxx_devlink_params[] = {
- DSA_DEVLINK_PARAM_DRIVER(MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
- "ATU_hash", DEVLINK_PARAM_TYPE_U8,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
-};
-
-static int mv88e6xxx_setup_devlink_params(struct dsa_switch *ds)
-{
- return dsa_devlink_params_register(ds, mv88e6xxx_devlink_params,
- ARRAY_SIZE(mv88e6xxx_devlink_params));
-}
-
-static void mv88e6xxx_teardown_devlink_params(struct dsa_switch *ds)
-{
- dsa_devlink_params_unregister(ds, mv88e6xxx_devlink_params,
- ARRAY_SIZE(mv88e6xxx_devlink_params));
-}
-
-enum mv88e6xxx_devlink_resource_id {
- MV88E6XXX_RESOURCE_ID_ATU,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
-};
-
-static u64 mv88e6xxx_devlink_atu_bin_get(struct mv88e6xxx_chip *chip,
- u16 bin)
-{
- u16 occupancy = 0;
- int err;
-
- mv88e6xxx_reg_lock(chip);
-
- err = mv88e6xxx_g2_atu_stats_set(chip, MV88E6XXX_G2_ATU_STATS_MODE_ALL,
- bin);
- if (err) {
- dev_err(chip->dev, "failed to set ATU stats kind/bin\n");
- goto unlock;
- }
-
- err = mv88e6xxx_g1_atu_get_next(chip, 0);
- if (err) {
- dev_err(chip->dev, "failed to perform ATU get next\n");
- goto unlock;
- }
-
- err = mv88e6xxx_g2_atu_stats_get(chip, &occupancy);
- if (err) {
- dev_err(chip->dev, "failed to get ATU stats\n");
- goto unlock;
- }
-
- occupancy &= MV88E6XXX_G2_ATU_STATS_MASK;
-
-unlock:
- mv88e6xxx_reg_unlock(chip);
-
- return occupancy;
-}
-
-static u64 mv88e6xxx_devlink_atu_bin_0_get(void *priv)
-{
- struct mv88e6xxx_chip *chip = priv;
-
- return mv88e6xxx_devlink_atu_bin_get(chip,
- MV88E6XXX_G2_ATU_STATS_BIN_0);
-}
-
-static u64 mv88e6xxx_devlink_atu_bin_1_get(void *priv)
-{
- struct mv88e6xxx_chip *chip = priv;
-
- return mv88e6xxx_devlink_atu_bin_get(chip,
- MV88E6XXX_G2_ATU_STATS_BIN_1);
-}
-
-static u64 mv88e6xxx_devlink_atu_bin_2_get(void *priv)
-{
- struct mv88e6xxx_chip *chip = priv;
-
- return mv88e6xxx_devlink_atu_bin_get(chip,
- MV88E6XXX_G2_ATU_STATS_BIN_2);
-}
-
-static u64 mv88e6xxx_devlink_atu_bin_3_get(void *priv)
-{
- struct mv88e6xxx_chip *chip = priv;
-
- return mv88e6xxx_devlink_atu_bin_get(chip,
- MV88E6XXX_G2_ATU_STATS_BIN_3);
-}
-
-static u64 mv88e6xxx_devlink_atu_get(void *priv)
-{
- return mv88e6xxx_devlink_atu_bin_0_get(priv) +
- mv88e6xxx_devlink_atu_bin_1_get(priv) +
- mv88e6xxx_devlink_atu_bin_2_get(priv) +
- mv88e6xxx_devlink_atu_bin_3_get(priv);
-}
-
-static int mv88e6xxx_setup_devlink_resources(struct dsa_switch *ds)
-{
- struct devlink_resource_size_params size_params;
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- devlink_resource_size_params_init(&size_params,
- mv88e6xxx_num_macs(chip),
- mv88e6xxx_num_macs(chip),
- 1, DEVLINK_RESOURCE_UNIT_ENTRY);
-
- err = dsa_devlink_resource_register(ds, "ATU",
- mv88e6xxx_num_macs(chip),
- MV88E6XXX_RESOURCE_ID_ATU,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &size_params);
- if (err)
- goto out;
-
- devlink_resource_size_params_init(&size_params,
- mv88e6xxx_num_macs(chip) / 4,
- mv88e6xxx_num_macs(chip) / 4,
- 1, DEVLINK_RESOURCE_UNIT_ENTRY);
-
- err = dsa_devlink_resource_register(ds, "ATU_bin_0",
- mv88e6xxx_num_macs(chip) / 4,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
- MV88E6XXX_RESOURCE_ID_ATU,
- &size_params);
- if (err)
- goto out;
-
- err = dsa_devlink_resource_register(ds, "ATU_bin_1",
- mv88e6xxx_num_macs(chip) / 4,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
- MV88E6XXX_RESOURCE_ID_ATU,
- &size_params);
- if (err)
- goto out;
-
- err = dsa_devlink_resource_register(ds, "ATU_bin_2",
- mv88e6xxx_num_macs(chip) / 4,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
- MV88E6XXX_RESOURCE_ID_ATU,
- &size_params);
- if (err)
- goto out;
-
- err = dsa_devlink_resource_register(ds, "ATU_bin_3",
- mv88e6xxx_num_macs(chip) / 4,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
- MV88E6XXX_RESOURCE_ID_ATU,
- &size_params);
- if (err)
- goto out;
-
- dsa_devlink_resource_occ_get_register(ds,
- MV88E6XXX_RESOURCE_ID_ATU,
- mv88e6xxx_devlink_atu_get,
- chip);
-
- dsa_devlink_resource_occ_get_register(ds,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
- mv88e6xxx_devlink_atu_bin_0_get,
- chip);
-
- dsa_devlink_resource_occ_get_register(ds,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
- mv88e6xxx_devlink_atu_bin_1_get,
- chip);
-
- dsa_devlink_resource_occ_get_register(ds,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
- mv88e6xxx_devlink_atu_bin_2_get,
- chip);
-
- dsa_devlink_resource_occ_get_register(ds,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
- mv88e6xxx_devlink_atu_bin_3_get,
- chip);
-
- return 0;
-
-out:
- dsa_devlink_resources_unregister(ds);
- return err;
-}
-
static void mv88e6xxx_teardown(struct dsa_switch *ds)
{
mv88e6xxx_teardown_devlink_params(ds);
dsa_devlink_resources_unregister(ds);
+ mv88e6xxx_teardown_devlink_regions(ds);
}
static int mv88e6xxx_setup(struct dsa_switch *ds)
@@ -3211,7 +2971,18 @@ unlock:
err = mv88e6xxx_setup_devlink_params(ds);
if (err)
- dsa_devlink_resources_unregister(ds);
+ goto out_resources;
+
+ err = mv88e6xxx_setup_devlink_regions(ds);
+ if (err)
+ goto out_params;
+
+ return 0;
+
+out_params:
+ mv88e6xxx_teardown_devlink_params(ds);
+out_resources:
+ dsa_devlink_resources_unregister(ds);
return err;
}
@@ -5607,6 +5378,7 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_ts_info = mv88e6xxx_get_ts_info,
.devlink_param_get = mv88e6xxx_devlink_param_get,
.devlink_param_set = mv88e6xxx_devlink_param_set,
+ .devlink_info_get = mv88e6xxx_devlink_info_get,
};
static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 823ae89e5fca..81c244fc0419 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -238,6 +238,19 @@ struct mv88e6xxx_port {
bool mirror_egress;
unsigned int serdes_irq;
char serdes_irq_name[64];
+ struct devlink_region *region;
+};
+
+enum mv88e6xxx_region_id {
+ MV88E6XXX_REGION_GLOBAL1 = 0,
+ MV88E6XXX_REGION_GLOBAL2,
+ MV88E6XXX_REGION_ATU,
+
+ _MV88E6XXX_REGION_MAX,
+};
+
+struct mv88e6xxx_region_priv {
+ enum mv88e6xxx_region_id id;
};
struct mv88e6xxx_chip {
@@ -334,6 +347,9 @@ struct mv88e6xxx_chip {
/* Array of port structures. */
struct mv88e6xxx_port ports[DSA_MAX_PORTS];
+
+ /* devlink regions */
+ struct devlink_region *regions[_MV88E6XXX_REGION_MAX];
};
struct mv88e6xxx_bus_ops {
@@ -689,4 +705,6 @@ static inline void mv88e6xxx_reg_unlock(struct mv88e6xxx_chip *chip)
mutex_unlock(&chip->reg_lock);
}
+int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *bitmap);
+
#endif /* _MV88E6XXX_CHIP_H */
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
new file mode 100644
index 000000000000..81e1560db206
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/devlink.c
@@ -0,0 +1,532 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <net/dsa.h>
+
+#include "chip.h"
+#include "devlink.h"
+#include "global1.h"
+#include "global2.h"
+#include "port.h"
+
+static int mv88e6xxx_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash)
+{
+ if (chip->info->ops->atu_get_hash)
+ return chip->info->ops->atu_get_hash(chip, hash);
+
+ return -EOPNOTSUPP;
+}
+
+static int mv88e6xxx_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash)
+{
+ if (chip->info->ops->atu_set_hash)
+ return chip->info->ops->atu_set_hash(chip, hash);
+
+ return -EOPNOTSUPP;
+}
+
+enum mv88e6xxx_devlink_param_id {
+ MV88E6XXX_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
+};
+
+int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ switch (id) {
+ case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
+ err = mv88e6xxx_atu_get_hash(chip, &ctx->val.vu8);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ switch (id) {
+ case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
+ err = mv88e6xxx_atu_set_hash(chip, ctx->val.vu8);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static const struct devlink_param mv88e6xxx_devlink_params[] = {
+ DSA_DEVLINK_PARAM_DRIVER(MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
+ "ATU_hash", DEVLINK_PARAM_TYPE_U8,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
+};
+
+int mv88e6xxx_setup_devlink_params(struct dsa_switch *ds)
+{
+ return dsa_devlink_params_register(ds, mv88e6xxx_devlink_params,
+ ARRAY_SIZE(mv88e6xxx_devlink_params));
+}
+
+void mv88e6xxx_teardown_devlink_params(struct dsa_switch *ds)
+{
+ dsa_devlink_params_unregister(ds, mv88e6xxx_devlink_params,
+ ARRAY_SIZE(mv88e6xxx_devlink_params));
+}
+
+enum mv88e6xxx_devlink_resource_id {
+ MV88E6XXX_RESOURCE_ID_ATU,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+};
+
+static u64 mv88e6xxx_devlink_atu_bin_get(struct mv88e6xxx_chip *chip,
+ u16 bin)
+{
+ u16 occupancy = 0;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_g2_atu_stats_set(chip, MV88E6XXX_G2_ATU_STATS_MODE_ALL,
+ bin);
+ if (err) {
+ dev_err(chip->dev, "failed to set ATU stats kind/bin\n");
+ goto unlock;
+ }
+
+ err = mv88e6xxx_g1_atu_get_next(chip, 0);
+ if (err) {
+ dev_err(chip->dev, "failed to perform ATU get next\n");
+ goto unlock;
+ }
+
+ err = mv88e6xxx_g2_atu_stats_get(chip, &occupancy);
+ if (err) {
+ dev_err(chip->dev, "failed to get ATU stats\n");
+ goto unlock;
+ }
+
+ occupancy &= MV88E6XXX_G2_ATU_STATS_MASK;
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+
+ return occupancy;
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_0_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_0);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_1_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_1);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_2_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_2);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_3_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_3);
+}
+
+static u64 mv88e6xxx_devlink_atu_get(void *priv)
+{
+ return mv88e6xxx_devlink_atu_bin_0_get(priv) +
+ mv88e6xxx_devlink_atu_bin_1_get(priv) +
+ mv88e6xxx_devlink_atu_bin_2_get(priv) +
+ mv88e6xxx_devlink_atu_bin_3_get(priv);
+}
+
+int mv88e6xxx_setup_devlink_resources(struct dsa_switch *ds)
+{
+ struct devlink_resource_size_params size_params;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ devlink_resource_size_params_init(&size_params,
+ mv88e6xxx_num_macs(chip),
+ mv88e6xxx_num_macs(chip),
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "ATU",
+ mv88e6xxx_num_macs(chip),
+ MV88E6XXX_RESOURCE_ID_ATU,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+ if (err)
+ goto out;
+
+ devlink_resource_size_params_init(&size_params,
+ mv88e6xxx_num_macs(chip) / 4,
+ mv88e6xxx_num_macs(chip) / 4,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_0",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_1",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_2",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_3",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ mv88e6xxx_devlink_atu_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ mv88e6xxx_devlink_atu_bin_0_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ mv88e6xxx_devlink_atu_bin_1_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ mv88e6xxx_devlink_atu_bin_2_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+ mv88e6xxx_devlink_atu_bin_3_get,
+ chip);
+
+ return 0;
+
+out:
+ dsa_devlink_resources_unregister(ds);
+ return err;
+}
+
+static int mv88e6xxx_region_global_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct mv88e6xxx_region_priv *region_priv = ops->priv;
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ u16 *registers;
+ int i, err;
+
+ registers = kmalloc_array(32, sizeof(u16), GFP_KERNEL);
+ if (!registers)
+ return -ENOMEM;
+
+ mv88e6xxx_reg_lock(chip);
+ for (i = 0; i < 32; i++) {
+ switch (region_priv->id) {
+ case MV88E6XXX_REGION_GLOBAL1:
+ err = mv88e6xxx_g1_read(chip, i, &registers[i]);
+ break;
+ case MV88E6XXX_REGION_GLOBAL2:
+ err = mv88e6xxx_g2_read(chip, i, &registers[i]);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ if (err) {
+ kfree(registers);
+ goto out;
+ }
+ }
+ *data = (u8 *)registers;
+out:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+/* The ATU entry varies between mv88e6xxx chipset generations. Define
+ * a generic format which covers all the current and hopefully future
+ * mv88e6xxx generations
+ */
+
+struct mv88e6xxx_devlink_atu_entry {
+ /* The FID is scattered over multiple registers. */
+ u16 fid;
+ u16 atu_op;
+ u16 atu_data;
+ u16 atu_01;
+ u16 atu_23;
+ u16 atu_45;
+};
+
+static int mv88e6xxx_region_atu_snapshot_fid(struct mv88e6xxx_chip *chip,
+ int fid,
+ struct mv88e6xxx_devlink_atu_entry *table,
+ int *count)
+{
+ u16 atu_op, atu_data, atu_01, atu_23, atu_45;
+ struct mv88e6xxx_atu_entry addr;
+ int err;
+
+ addr.state = 0;
+ eth_broadcast_addr(addr.mac);
+
+ do {
+ err = mv88e6xxx_g1_atu_getnext(chip, fid, &addr);
+ if (err)
+ return err;
+
+ if (!addr.state)
+ break;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &atu_op);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_DATA, &atu_data);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_MAC01, &atu_01);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_MAC23, &atu_23);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_MAC45, &atu_45);
+ if (err)
+ return err;
+
+ table[*count].fid = fid;
+ table[*count].atu_op = atu_op;
+ table[*count].atu_data = atu_data;
+ table[*count].atu_01 = atu_01;
+ table[*count].atu_23 = atu_23;
+ table[*count].atu_45 = atu_45;
+ (*count)++;
+ } while (!is_broadcast_ether_addr(addr.mac));
+
+ return 0;
+}
+
+static int mv88e6xxx_region_atu_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
+ struct mv88e6xxx_devlink_atu_entry *table;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int fid = -1, count, err;
+
+ table = kmalloc_array(mv88e6xxx_num_databases(chip),
+ sizeof(struct mv88e6xxx_devlink_atu_entry),
+ GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ memset(table, 0, mv88e6xxx_num_databases(chip) *
+ sizeof(struct mv88e6xxx_devlink_atu_entry));
+
+ count = 0;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_fid_map(chip, fid_bitmap);
+ if (err)
+ goto out;
+
+ while (1) {
+ fid = find_next_bit(fid_bitmap, MV88E6XXX_N_FID, fid + 1);
+ if (fid == MV88E6XXX_N_FID)
+ break;
+
+ err = mv88e6xxx_region_atu_snapshot_fid(chip, fid, table,
+ &count);
+ if (err) {
+ kfree(table);
+ goto out;
+ }
+ }
+ *data = (u8 *)table;
+out:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static struct mv88e6xxx_region_priv mv88e6xxx_region_global1_priv = {
+ .id = MV88E6XXX_REGION_GLOBAL1,
+};
+
+static struct devlink_region_ops mv88e6xxx_region_global1_ops = {
+ .name = "global1",
+ .snapshot = mv88e6xxx_region_global_snapshot,
+ .destructor = kfree,
+ .priv = &mv88e6xxx_region_global1_priv,
+};
+
+static struct mv88e6xxx_region_priv mv88e6xxx_region_global2_priv = {
+ .id = MV88E6XXX_REGION_GLOBAL2,
+};
+
+static struct devlink_region_ops mv88e6xxx_region_global2_ops = {
+ .name = "global2",
+ .snapshot = mv88e6xxx_region_global_snapshot,
+ .destructor = kfree,
+ .priv = &mv88e6xxx_region_global2_priv,
+};
+
+static struct devlink_region_ops mv88e6xxx_region_atu_ops = {
+ .name = "atu",
+ .snapshot = mv88e6xxx_region_atu_snapshot,
+ .destructor = kfree,
+};
+
+struct mv88e6xxx_region {
+ struct devlink_region_ops *ops;
+ u64 size;
+};
+
+static struct mv88e6xxx_region mv88e6xxx_regions[] = {
+ [MV88E6XXX_REGION_GLOBAL1] = {
+ .ops = &mv88e6xxx_region_global1_ops,
+ .size = 32 * sizeof(u16)
+ },
+ [MV88E6XXX_REGION_GLOBAL2] = {
+ .ops = &mv88e6xxx_region_global2_ops,
+ .size = 32 * sizeof(u16) },
+ [MV88E6XXX_REGION_ATU] = {
+ .ops = &mv88e6xxx_region_atu_ops
+ /* calculated at runtime */
+ },
+};
+
+static void
+mv88e6xxx_teardown_devlink_regions_global(struct mv88e6xxx_chip *chip)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++)
+ dsa_devlink_region_destroy(chip->regions[i]);
+}
+
+void mv88e6xxx_teardown_devlink_regions(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ mv88e6xxx_teardown_devlink_regions_global(chip);
+}
+
+static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds,
+ struct mv88e6xxx_chip *chip)
+{
+ struct devlink_region_ops *ops;
+ struct devlink_region *region;
+ u64 size;
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++) {
+ ops = mv88e6xxx_regions[i].ops;
+ size = mv88e6xxx_regions[i].size;
+
+ if (i == MV88E6XXX_REGION_ATU)
+ size = mv88e6xxx_num_databases(chip) *
+ sizeof(struct mv88e6xxx_devlink_atu_entry);
+
+ region = dsa_devlink_region_create(ds, ops, 1, size);
+ if (IS_ERR(region))
+ goto out;
+ chip->regions[i] = region;
+ }
+ return 0;
+
+out:
+ for (j = 0; j < i; j++)
+ dsa_devlink_region_destroy(chip->regions[j]);
+
+ return PTR_ERR(region);
+}
+
+int mv88e6xxx_setup_devlink_regions(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ return mv88e6xxx_setup_devlink_regions_global(ds, chip);
+}
+
+int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ err = devlink_info_driver_name_put(req, "mv88e6xxx");
+ if (err)
+ return err;
+
+ return devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
+ chip->info->name);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.h b/drivers/net/dsa/mv88e6xxx/devlink.h
new file mode 100644
index 000000000000..3d72db3dcf95
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/devlink.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+/* Marvell 88E6xxx Switch devlink support. */
+
+#ifndef _MV88E6XXX_DEVLINK_H
+#define _MV88E6XXX_DEVLINK_H
+
+int mv88e6xxx_setup_devlink_params(struct dsa_switch *ds);
+void mv88e6xxx_teardown_devlink_params(struct dsa_switch *ds);
+int mv88e6xxx_setup_devlink_resources(struct dsa_switch *ds);
+int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int mv88e6xxx_setup_devlink_regions(struct dsa_switch *ds);
+void mv88e6xxx_teardown_devlink_regions(struct dsa_switch *ds);
+
+int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+#endif /* _MV88E6XXX_DEVLINK_H */
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
index e19718d4a7d4..c110e82a7973 100644
--- a/drivers/net/dsa/ocelot/Kconfig
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -10,11 +10,17 @@ config NET_DSA_MSCC_FELIX
select FSL_ENETC_MDIO
select PCS_LYNX
help
- This driver supports network switches from the Vitesse /
- Microsemi / Microchip Ocelot family of switching cores that are
- connected to their host CPU via Ethernet.
- The following switches are supported:
- - VSC9959 (Felix): embedded as a PCIe function of the NXP LS1028A
- ENETC integrated endpoint.
- - VSC9953 (Seville): embedded as a platform device on the
- NXP T1040 SoC.
+ This driver supports the VSC9959 (Felix) switch, which is embedded as
+ a PCIe function of the NXP LS1028A ENETC RCiEP.
+
+config NET_DSA_MSCC_SEVILLE
+ tristate "Ocelot / Seville Ethernet switch support"
+ depends on NET_DSA
+ depends on NET_VENDOR_MICROSEMI
+ depends on HAS_IOMEM
+ select MSCC_OCELOT_SWITCH_LIB
+ select NET_DSA_TAG_OCELOT
+ select PCS_LYNX
+ help
+ This driver supports the VSC9953 (Seville) switch, which is embedded
+ as a platform device on the NXP T1040 SoC.
diff --git a/drivers/net/dsa/ocelot/Makefile b/drivers/net/dsa/ocelot/Makefile
index ec57a5a12330..f6dd131e7491 100644
--- a/drivers/net/dsa/ocelot/Makefile
+++ b/drivers/net/dsa/ocelot/Makefile
@@ -1,7 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_NET_DSA_MSCC_FELIX) += mscc_felix.o
+obj-$(CONFIG_NET_DSA_MSCC_SEVILLE) += mscc_seville.o
mscc_felix-objs := \
felix.o \
- felix_vsc9959.o \
+ felix_vsc9959.o
+
+mscc_seville-objs := \
+ felix.o \
seville_vsc9953.o
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index a1e1d3824110..a56fc50f5be4 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -538,23 +538,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return 0;
}
-static struct ptp_clock_info ocelot_ptp_clock_info = {
- .owner = THIS_MODULE,
- .name = "felix ptp",
- .max_adj = 0x7fffffff,
- .n_alarm = 0,
- .n_ext_ts = 0,
- .n_per_out = OCELOT_PTP_PINS_NUM,
- .n_pins = OCELOT_PTP_PINS_NUM,
- .pps = 0,
- .gettime64 = ocelot_ptp_gettime64,
- .settime64 = ocelot_ptp_settime64,
- .adjtime = ocelot_ptp_adjtime,
- .adjfine = ocelot_ptp_adjfine,
- .verify = ocelot_ptp_verify,
- .enable = ocelot_ptp_enable,
-};
-
/* Hardware initialization done here so that we can allocate structures with
* devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing
* us to allocate structures twice (leak memory) and map PCI memory twice
@@ -571,9 +554,12 @@ static int felix_setup(struct dsa_switch *ds)
if (err)
return err;
- ocelot_init(ocelot);
+ err = ocelot_init(ocelot);
+ if (err)
+ return err;
+
if (ocelot->ptp) {
- err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info);
+ err = ocelot_init_timestamp(ocelot, felix->info->ptp_caps);
if (err) {
dev_err(ocelot->dev,
"Timestamp initialization failed\n");
@@ -621,10 +607,13 @@ static void felix_teardown(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
+ int port;
if (felix->info->mdio_bus_free)
felix->info->mdio_bus_free(ocelot);
+ for (port = 0; port < ocelot->num_phys_ports; port++)
+ ocelot_deinit_port(ocelot, port);
ocelot_deinit_timestamp(ocelot);
/* stop workqueue thread */
ocelot_deinit(ocelot);
@@ -680,8 +669,11 @@ static bool felix_txtstamp(struct dsa_switch *ds, int port,
struct ocelot *ocelot = ds->priv;
struct ocelot_port *ocelot_port = ocelot->ports[port];
- if (!ocelot_port_add_txtstamp_skb(ocelot_port, clone))
+ if (ocelot->ptp && (skb_shinfo(clone)->tx_flags & SKBTX_HW_TSTAMP) &&
+ ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ ocelot_port_add_txtstamp_skb(ocelot, port, clone);
return true;
+ }
return false;
}
@@ -797,31 +789,5 @@ const struct dsa_switch_ops felix_switch_ops = {
.cls_flower_add = felix_cls_flower_add,
.cls_flower_del = felix_cls_flower_del,
.cls_flower_stats = felix_cls_flower_stats,
- .port_setup_tc = felix_port_setup_tc,
+ .port_setup_tc = felix_port_setup_tc,
};
-
-static int __init felix_init(void)
-{
- int err;
-
- err = pci_register_driver(&felix_vsc9959_pci_driver);
- if (err)
- return err;
-
- err = platform_driver_register(&seville_vsc9953_driver);
- if (err)
- return err;
-
- return 0;
-}
-module_init(felix_init);
-
-static void __exit felix_exit(void)
-{
- pci_unregister_driver(&felix_vsc9959_pci_driver);
- platform_driver_unregister(&seville_vsc9953_driver);
-}
-module_exit(felix_exit);
-
-MODULE_DESCRIPTION("Felix Switch driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 9bceb994b7db..cc3ec83a600a 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -20,12 +20,13 @@ struct felix_info {
const struct ocelot_stat_layout *stats_layout;
unsigned int num_stats;
int num_ports;
- int num_tx_queues;
+ int num_tx_queues;
struct vcap_field *vcap_is2_keys;
struct vcap_field *vcap_is2_actions;
const struct vcap_props *vcap;
int switch_pci_bar;
int imdio_pci_bar;
+ const struct ptp_clock_info *ptp_caps;
int (*mdio_bus_alloc)(struct ocelot *ocelot);
void (*mdio_bus_free)(struct ocelot *ocelot);
void (*phylink_validate)(struct ocelot *ocelot, int port,
@@ -41,8 +42,6 @@ struct felix_info {
};
extern const struct dsa_switch_ops felix_switch_ops;
-extern struct pci_driver felix_vsc9959_pci_driver;
-extern struct platform_driver seville_vsc9953_driver;
/* DSA glue / front-end for struct ocelot */
struct felix {
@@ -55,6 +54,4 @@ struct felix {
resource_size_t imdio_base;
};
-void vsc9959_mdio_bus_free(struct ocelot *ocelot);
-
#endif
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 126a53a811f7..3ab6d6847c5b 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -296,15 +296,15 @@ static const u32 vsc9959_sys_regmap[] = {
};
static const u32 vsc9959_ptp_regmap[] = {
- REG(PTP_PIN_CFG, 0x000000),
- REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
- REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
- REG(PTP_PIN_TOD_NSEC, 0x00000c),
- REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014),
- REG(PTP_PIN_WF_LOW_PERIOD, 0x000018),
- REG(PTP_CFG_MISC, 0x0000a0),
- REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
- REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
+ REG(PTP_PIN_CFG, 0x000000),
+ REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
+ REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
+ REG(PTP_PIN_TOD_NSEC, 0x00000c),
+ REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014),
+ REG(PTP_PIN_WF_LOW_PERIOD, 0x000018),
+ REG(PTP_CFG_MISC, 0x0000a0),
+ REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
+ REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
};
static const u32 vsc9959_gcb_regmap[] = {
@@ -646,17 +646,17 @@ static struct vcap_field vsc9959_vcap_is2_keys[] = {
[VCAP_IS2_HK_DIP_EQ_SIP] = {118, 1},
/* IP4_TCP_UDP (TYPE=100) */
[VCAP_IS2_HK_TCP] = {119, 1},
- [VCAP_IS2_HK_L4_SPORT] = {120, 16},
- [VCAP_IS2_HK_L4_DPORT] = {136, 16},
+ [VCAP_IS2_HK_L4_DPORT] = {120, 16},
+ [VCAP_IS2_HK_L4_SPORT] = {136, 16},
[VCAP_IS2_HK_L4_RNG] = {152, 8},
[VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {160, 1},
[VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {161, 1},
- [VCAP_IS2_HK_L4_URG] = {162, 1},
- [VCAP_IS2_HK_L4_ACK] = {163, 1},
- [VCAP_IS2_HK_L4_PSH] = {164, 1},
- [VCAP_IS2_HK_L4_RST] = {165, 1},
- [VCAP_IS2_HK_L4_SYN] = {166, 1},
- [VCAP_IS2_HK_L4_FIN] = {167, 1},
+ [VCAP_IS2_HK_L4_FIN] = {162, 1},
+ [VCAP_IS2_HK_L4_SYN] = {163, 1},
+ [VCAP_IS2_HK_L4_RST] = {164, 1},
+ [VCAP_IS2_HK_L4_PSH] = {165, 1},
+ [VCAP_IS2_HK_L4_ACK] = {166, 1},
+ [VCAP_IS2_HK_L4_URG] = {167, 1},
[VCAP_IS2_HK_L4_1588_DOM] = {168, 8},
[VCAP_IS2_HK_L4_1588_VER] = {176, 4},
/* IP4_OTHER (TYPE=101) */
@@ -719,6 +719,23 @@ static const struct vcap_props vsc9959_vcap_props[] = {
},
};
+static const struct ptp_clock_info vsc9959_ptp_caps = {
+ .owner = THIS_MODULE,
+ .name = "felix ptp",
+ .max_adj = 0x7fffffff,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = OCELOT_PTP_PINS_NUM,
+ .n_pins = OCELOT_PTP_PINS_NUM,
+ .pps = 0,
+ .gettime64 = ocelot_ptp_gettime64,
+ .settime64 = ocelot_ptp_settime64,
+ .adjtime = ocelot_ptp_adjtime,
+ .adjfine = ocelot_ptp_adjfine,
+ .verify = ocelot_ptp_verify,
+ .enable = ocelot_ptp_enable,
+};
+
#define VSC9959_INIT_TIMEOUT 50000
#define VSC9959_GCB_RST_SLEEP 100
#define VSC9959_SYS_RAMINIT_SLEEP 80
@@ -727,7 +744,7 @@ static int vsc9959_gcb_soft_rst_status(struct ocelot *ocelot)
{
int val;
- regmap_field_read(ocelot->regfields[GCB_SOFT_RST_SWC_RST], &val);
+ ocelot_field_read(ocelot, GCB_SOFT_RST_SWC_RST, &val);
return val;
}
@@ -737,12 +754,15 @@ static int vsc9959_sys_ram_init_status(struct ocelot *ocelot)
return ocelot_read(ocelot, SYS_RAM_INIT);
}
+/* CORE_ENA is in SYS:SYSTEM:RESET_CFG
+ * RAM_INIT is in SYS:RAM_CTRL:RAM_INIT
+ */
static int vsc9959_reset(struct ocelot *ocelot)
{
int val, err;
/* soft-reset the switch core */
- regmap_field_write(ocelot->regfields[GCB_SOFT_RST_SWC_RST], 1);
+ ocelot_field_write(ocelot, GCB_SOFT_RST_SWC_RST, 1);
err = readx_poll_timeout(vsc9959_gcb_soft_rst_status, ocelot, val, !val,
VSC9959_GCB_RST_SLEEP, VSC9959_INIT_TIMEOUT);
@@ -762,7 +782,7 @@ static int vsc9959_reset(struct ocelot *ocelot)
}
/* enable switch core */
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
+ ocelot_field_write(ocelot, SYS_RESET_CFG_CORE_ENA, 1);
return 0;
}
@@ -933,7 +953,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
return 0;
}
-void vsc9959_mdio_bus_free(struct ocelot *ocelot)
+static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
{
struct felix *felix = ocelot_to_felix(ocelot);
int port;
@@ -1166,12 +1186,13 @@ static const struct felix_info felix_info_vsc9959 = {
.num_tx_queues = FELIX_NUM_TC,
.switch_pci_bar = 4,
.imdio_pci_bar = 0,
+ .ptp_caps = &vsc9959_ptp_caps,
.mdio_bus_alloc = vsc9959_mdio_bus_alloc,
.mdio_bus_free = vsc9959_mdio_bus_free,
.phylink_validate = vsc9959_phylink_validate,
.prevalidate_phy_mode = vsc9959_prevalidate_phy_mode,
- .port_setup_tc = vsc9959_port_setup_tc,
- .port_sched_speed_set = vsc9959_sched_speed_set,
+ .port_setup_tc = vsc9959_port_setup_tc,
+ .port_sched_speed_set = vsc9959_sched_speed_set,
.xmit_template_populate = vsc9959_xmit_template_populate,
};
@@ -1307,9 +1328,13 @@ static struct pci_device_id felix_ids[] = {
};
MODULE_DEVICE_TABLE(pci, felix_ids);
-struct pci_driver felix_vsc9959_pci_driver = {
+static struct pci_driver felix_vsc9959_pci_driver = {
.name = "mscc_felix",
.id_table = felix_ids,
.probe = felix_pci_probe,
.remove = felix_pci_remove,
};
+module_pci_driver(felix_vsc9959_pci_driver);
+
+MODULE_DESCRIPTION("Felix Switch driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 2d6a5f5758f8..b0ff90c0ae16 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -16,23 +16,12 @@
#define VSC9953_VCAP_IS2_ENTRY_WIDTH 376
#define VSC9953_VCAP_PORT_CNT 10
-#define MSCC_MIIM_REG_STATUS 0x0
-#define MSCC_MIIM_STATUS_STAT_BUSY BIT(3)
-#define MSCC_MIIM_REG_CMD 0x8
-#define MSCC_MIIM_CMD_OPR_WRITE BIT(1)
-#define MSCC_MIIM_CMD_OPR_READ BIT(2)
-#define MSCC_MIIM_CMD_WRDATA_SHIFT 4
-#define MSCC_MIIM_CMD_REGAD_SHIFT 20
-#define MSCC_MIIM_CMD_PHYAD_SHIFT 25
-#define MSCC_MIIM_CMD_VLD BIT(31)
-#define MSCC_MIIM_REG_DATA 0xC
-#define MSCC_MIIM_DATA_ERROR (BIT(16) | BIT(17))
-
-#define MSCC_PHY_REG_PHY_CFG 0x0
-#define PHY_CFG_PHY_ENA (BIT(0) | BIT(1) | BIT(2) | BIT(3))
-#define PHY_CFG_PHY_COMMON_RESET BIT(4)
-#define PHY_CFG_PHY_RESET (BIT(5) | BIT(6) | BIT(7) | BIT(8))
-#define MSCC_PHY_REG_PHY_STATUS 0x4
+#define MSCC_MIIM_CMD_OPR_WRITE BIT(1)
+#define MSCC_MIIM_CMD_OPR_READ BIT(2)
+#define MSCC_MIIM_CMD_WRDATA_SHIFT 4
+#define MSCC_MIIM_CMD_REGAD_SHIFT 20
+#define MSCC_MIIM_CMD_PHYAD_SHIFT 25
+#define MSCC_MIIM_CMD_VLD BIT(31)
static const u32 vsc9953_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x00b500),
@@ -660,17 +649,17 @@ static struct vcap_field vsc9953_vcap_is2_keys[] = {
[VCAP_IS2_HK_DIP_EQ_SIP] = {122, 1},
/* IP4_TCP_UDP (TYPE=100) */
[VCAP_IS2_HK_TCP] = {123, 1},
- [VCAP_IS2_HK_L4_SPORT] = {124, 16},
- [VCAP_IS2_HK_L4_DPORT] = {140, 16},
+ [VCAP_IS2_HK_L4_DPORT] = {124, 16},
+ [VCAP_IS2_HK_L4_SPORT] = {140, 16},
[VCAP_IS2_HK_L4_RNG] = {156, 8},
[VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {164, 1},
[VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {165, 1},
- [VCAP_IS2_HK_L4_URG] = {166, 1},
- [VCAP_IS2_HK_L4_ACK] = {167, 1},
- [VCAP_IS2_HK_L4_PSH] = {168, 1},
- [VCAP_IS2_HK_L4_RST] = {169, 1},
- [VCAP_IS2_HK_L4_SYN] = {170, 1},
- [VCAP_IS2_HK_L4_FIN] = {171, 1},
+ [VCAP_IS2_HK_L4_FIN] = {166, 1},
+ [VCAP_IS2_HK_L4_SYN] = {167, 1},
+ [VCAP_IS2_HK_L4_RST] = {168, 1},
+ [VCAP_IS2_HK_L4_PSH] = {169, 1},
+ [VCAP_IS2_HK_L4_ACK] = {170, 1},
+ [VCAP_IS2_HK_L4_URG] = {171, 1},
/* IP4_OTHER (TYPE=101) */
[VCAP_IS2_HK_IP4_L3_PROTO] = {123, 8},
[VCAP_IS2_HK_L3_PAYLOAD] = {131, 56},
@@ -820,6 +809,10 @@ out:
return err;
}
+/* CORE_ENA is in SYS:SYSTEM:RESET_CFG
+ * MEM_INIT is in SYS:SYSTEM:RESET_CFG
+ * MEM_ENA is in SYS:SYSTEM:RESET_CFG
+ */
static int vsc9953_reset(struct ocelot *ocelot)
{
int val, err;
@@ -835,8 +828,8 @@ static int vsc9953_reset(struct ocelot *ocelot)
}
/* initialize switch mem ~40us */
- ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_INIT, 1);
ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_ENA, 1);
+ ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_INIT, 1);
err = readx_poll_timeout(vsc9953_sys_ram_init_status, ocelot, val, !val,
VSC9953_SYS_RAMINIT_SLEEP,
@@ -847,7 +840,6 @@ static int vsc9953_reset(struct ocelot *ocelot)
}
/* enable switch core */
- ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_ENA, 1);
ocelot_field_write(ocelot, SYS_RESET_CFG_CORE_ENA, 1);
return 0;
@@ -989,6 +981,23 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
return 0;
}
+static void vsc9953_mdio_bus_free(struct ocelot *ocelot)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct lynx_pcs *pcs = felix->pcs[port];
+
+ if (!pcs)
+ continue;
+
+ mdio_device_free(pcs->mdio);
+ lynx_pcs_destroy(pcs);
+ }
+ mdiobus_unregister(felix->imdio);
+}
+
static void vsc9953_xmit_template_populate(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
@@ -1018,11 +1027,11 @@ static const struct felix_info seville_info_vsc9953 = {
.vcap_is2_keys = vsc9953_vcap_is2_keys,
.vcap_is2_actions = vsc9953_vcap_is2_actions,
.vcap = vsc9953_vcap_props,
- .shared_queue_sz = 128 * 1024,
+ .shared_queue_sz = 2048 * 1024,
.num_mact_rows = 2048,
.num_ports = 10,
.mdio_bus_alloc = vsc9953_mdio_bus_alloc,
- .mdio_bus_free = vsc9959_mdio_bus_free,
+ .mdio_bus_free = vsc9953_mdio_bus_free,
.phylink_validate = vsc9953_phylink_validate,
.prevalidate_phy_mode = vsc9953_prevalidate_phy_mode,
.xmit_template_populate = vsc9953_xmit_template_populate,
@@ -1101,7 +1110,7 @@ static const struct of_device_id seville_of_match[] = {
};
MODULE_DEVICE_TABLE(of, seville_of_match);
-struct platform_driver seville_vsc9953_driver = {
+static struct platform_driver seville_vsc9953_driver = {
.probe = seville_probe,
.remove = seville_remove,
.driver = {
@@ -1109,3 +1118,7 @@ struct platform_driver seville_vsc9953_driver = {
.of_match_table = of_match_ptr(seville_of_match),
},
};
+module_platform_driver(seville_vsc9953_driver);
+
+MODULE_DESCRIPTION("Seville Switch driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
index 2dcde7a91721..c58ca324a4b2 100644
--- a/drivers/net/dsa/rtl8366.c
+++ b/drivers/net/dsa/rtl8366.c
@@ -436,6 +436,9 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
"failed to set up VLAN %04x",
vid);
+ if (!pvid)
+ continue;
+
ret = rtl8366_set_pvid(smi, port, vid);
if (ret)
dev_err(smi->dev,
@@ -471,13 +474,19 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
return ret;
if (vid == vlanmc.vid) {
- /* clear VLAN member configurations */
- vlanmc.vid = 0;
- vlanmc.priority = 0;
- vlanmc.member = 0;
- vlanmc.untag = 0;
- vlanmc.fid = 0;
-
+ /* Remove this port from the VLAN */
+ vlanmc.member &= ~BIT(port);
+ vlanmc.untag &= ~BIT(port);
+ /*
+ * If no ports are members of this VLAN
+ * anymore then clear the whole member
+ * config so it can be reused.
+ */
+ if (!vlanmc.member && vlanmc.untag) {
+ vlanmc.vid = 0;
+ vlanmc.priority = 0;
+ vlanmc.fid = 0;
+ }
ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
if (ret) {
dev_err(smi->dev,
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
index ddc24f5e4123..a7ceffc2a70b 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/rtl8366rb.c
@@ -1318,7 +1318,7 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
if (smi->vlan4k_enabled)
max = RTL8366RB_NUM_VIDS - 1;
- if (vlan == 0 || vlan >= max)
+ if (vlan == 0 || vlan > max)
return false;
return true;
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 967430e8ceb8..4a298729937b 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -3038,7 +3038,11 @@ static int sja1105_setup(struct dsa_switch *ds)
* default, and that means vlan_filtering is 0 since they're not under
* a bridge, so it's safe to set up switch tagging at this time.
*/
- return sja1105_setup_8021q_tagging(ds, true);
+ rtnl_lock();
+ rc = sja1105_setup_8021q_tagging(ds, true);
+ rtnl_unlock();
+
+ return rc;
}
static void sja1105_teardown(struct dsa_switch *ds)
@@ -3532,6 +3536,7 @@ static int sja1105_probe(struct spi_device *spi)
return -ENOMEM;
priv->dsa_8021q_ctx->ops = &sja1105_dsa_8021q_ops;
+ priv->dsa_8021q_ctx->proto = htons(ETH_P_8021Q);
priv->dsa_8021q_ctx->ds = ds;
INIT_LIST_HEAD(&priv->dsa_8021q_ctx->crosschip_links);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index b3b8a8010142..862ea44beea7 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -640,13 +640,11 @@ static irqreturn_t emac_interrupt(int irq, void *dev_id)
struct net_device *dev = dev_id;
struct emac_board_info *db = netdev_priv(dev);
int int_status;
- unsigned long flags;
unsigned int reg_val;
/* A real interrupt coming */
- /* holders of db->lock must always block IRQs */
- spin_lock_irqsave(&db->lock, flags);
+ spin_lock(&db->lock);
/* Disable all interrupts */
writel(0, db->membase + EMAC_INT_CTL_REG);
@@ -680,7 +678,7 @@ static irqreturn_t emac_interrupt(int irq, void *dev_id)
reg_val |= (0xf << 0) | (0x01 << 8);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
}
- spin_unlock_irqrestore(&db->lock, flags);
+ spin_unlock(&db->lock);
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 8470c836fa18..1a7e4df9b3e9 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -465,6 +465,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
ap = netdev_priv(dev);
+ ap->ndev = dev;
ap->pdev = pdev;
ap->name = pci_name(pdev);
@@ -1562,10 +1563,10 @@ static void ace_watchdog(struct net_device *data, unsigned int txqueue)
}
-static void ace_tasklet(unsigned long arg)
+static void ace_tasklet(struct tasklet_struct *t)
{
- struct net_device *dev = (struct net_device *) arg;
- struct ace_private *ap = netdev_priv(dev);
+ struct ace_private *ap = from_tasklet(ap, t, ace_tasklet);
+ struct net_device *dev = ap->ndev;
int cur_size;
cur_size = atomic_read(&ap->cur_rx_bufs);
@@ -2269,7 +2270,7 @@ static int ace_open(struct net_device *dev)
/*
* Setup the bottom half rx ring refill handler
*/
- tasklet_init(&ap->ace_tasklet, ace_tasklet, (unsigned long)dev);
+ tasklet_setup(&ap->ace_tasklet, ace_tasklet);
return 0;
}
diff --git a/drivers/net/ethernet/alteon/acenic.h b/drivers/net/ethernet/alteon/acenic.h
index c670067b1541..265fa601a258 100644
--- a/drivers/net/ethernet/alteon/acenic.h
+++ b/drivers/net/ethernet/alteon/acenic.h
@@ -633,6 +633,7 @@ struct ace_skb
*/
struct ace_private
{
+ struct net_device *ndev; /* backpointer */
struct ace_info *info;
struct ace_regs __iomem *regs; /* register base */
struct ace_skb *skb;
@@ -776,7 +777,7 @@ static int ace_open(struct net_device *dev);
static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int ace_close(struct net_device *dev);
-static void ace_tasklet(unsigned long dev);
+static void ace_tasklet(struct tasklet_struct *t);
static void ace_dump_trace(struct ace_private *ap);
static void ace_set_multicast_list(struct net_device *dev);
static int ace_change_mtu(struct net_device *dev, int new_mtu);
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 86869baa7b8e..4164eacc5c28 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -1,37 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _ENA_ADMIN_H_
#define _ENA_ADMIN_H_
+#define ENA_ADMIN_RSS_KEY_PARTS 10
enum ena_admin_aq_opcode {
ENA_ADMIN_CREATE_SQ = 1,
@@ -55,6 +29,7 @@ enum ena_admin_aq_completion_status {
ENA_ADMIN_RESOURCE_BUSY = 7,
};
+/* subcommands for the set/get feature admin commands */
enum ena_admin_aq_feature_id {
ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
ENA_ADMIN_MAX_QUEUES_NUM = 2,
@@ -63,7 +38,7 @@ enum ena_admin_aq_feature_id {
ENA_ADMIN_MAX_QUEUES_EXT = 7,
ENA_ADMIN_RSS_HASH_FUNCTION = 10,
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG = 12,
ENA_ADMIN_MTU = 14,
ENA_ADMIN_RSS_HASH_INPUT = 18,
ENA_ADMIN_INTERRUPT_MODERATION = 20,
@@ -195,7 +170,7 @@ struct ena_admin_acq_common_desc {
u16 extended_status;
/* indicates to the driver which AQ entry has been consumed by the
- * device and could be reused
+ * device and could be reused
*/
u16 sq_head_indx;
};
@@ -240,8 +215,8 @@ struct ena_admin_aq_create_sq_cmd {
*/
u8 sq_caps_3;
- /* associated completion queue id. This CQ must be created prior to
- * SQ creation
+ /* associated completion queue id. This CQ must be created prior to SQ
+ * creation
*/
u16 cq_idx;
@@ -380,7 +355,7 @@ struct ena_admin_aq_get_stats_cmd {
u16 queue_idx;
/* device id, value 0xFFFF means mine. only privileged device can get
- * stats of other device
+ * stats of other device
*/
u16 device_id;
};
@@ -475,7 +450,9 @@ struct ena_admin_device_attr_feature_desc {
u32 device_version;
- /* bitmap of ena_admin_aq_feature_id */
+ /* bitmap of ena_admin_aq_feature_id, which represents supported
+ * subcommands for the set/get feature admin commands.
+ */
u32 supported_features;
u32 reserved3;
@@ -561,32 +538,30 @@ struct ena_admin_feature_llq_desc {
u32 max_llq_depth;
- /* specify the header locations the device supports. bitfield of
- * enum ena_admin_llq_header_location.
+ /* specify the header locations the device supports. bitfield of enum
+ * ena_admin_llq_header_location.
*/
u16 header_location_ctrl_supported;
/* the header location the driver selected to use. */
u16 header_location_ctrl_enabled;
- /* if inline header is specified - this is the size of descriptor
- * list entry. If header in a separate ring is specified - this is
- * the size of header ring entry. bitfield of enum
- * ena_admin_llq_ring_entry_size. specify the entry sizes the device
- * supports
+ /* if inline header is specified - this is the size of descriptor list
+ * entry. If header in a separate ring is specified - this is the size
+ * of header ring entry. bitfield of enum ena_admin_llq_ring_entry_size.
+ * specify the entry sizes the device supports
*/
u16 entry_size_ctrl_supported;
/* the entry size the driver selected to use. */
u16 entry_size_ctrl_enabled;
- /* valid only if inline header is specified. First entry associated
- * with the packet includes descriptors and header. Rest of the
- * entries occupied by descriptors. This parameter defines the max
- * number of descriptors precedding the header in the first entry.
- * The field is bitfield of enum
- * ena_admin_llq_num_descs_before_header and specify the values the
- * device supports
+ /* valid only if inline header is specified. First entry associated with
+ * the packet includes descriptors and header. Rest of the entries
+ * occupied by descriptors. This parameter defines the max number of
+ * descriptors precedding the header in the first entry. The field is
+ * bitfield of enum ena_admin_llq_num_descs_before_header and specify
+ * the values the device supports
*/
u16 desc_num_before_header_supported;
@@ -594,7 +569,7 @@ struct ena_admin_feature_llq_desc {
u16 desc_num_before_header_enabled;
/* valid only if inline was chosen. bitfield of enum
- * ena_admin_llq_stride_ctrl
+ * ena_admin_llq_stride_ctrl
*/
u16 descriptors_stride_ctrl_supported;
@@ -629,8 +604,8 @@ struct ena_admin_queue_ext_feature_fields {
u32 max_tx_header_size;
- /* Maximum Descriptors number, including meta descriptor, allowed for
- * a single Tx packet
+ /* Maximum Descriptors number, including meta descriptor, allowed for a
+ * single Tx packet
*/
u16 max_per_packet_tx_descs;
@@ -653,8 +628,8 @@ struct ena_admin_queue_feature_desc {
u32 max_header_size;
- /* Maximum Descriptors number, including meta descriptor, allowed for
- * a single Tx packet
+ /* Maximum Descriptors number, including meta descriptor, allowed for a
+ * single Tx packet
*/
u16 max_packet_tx_descs;
@@ -742,11 +717,11 @@ enum ena_admin_hash_functions {
};
struct ena_admin_feature_rss_flow_hash_control {
- u32 keys_num;
+ u32 key_parts;
u32 reserved;
- u32 key[10];
+ u32 key[ENA_ADMIN_RSS_KEY_PARTS];
};
struct ena_admin_feature_rss_flow_hash_function {
@@ -1042,7 +1017,7 @@ struct ena_admin_set_feat_resp {
struct ena_admin_aenq_common_desc {
u16 group;
- u16 syndrom;
+ u16 syndrome;
/* 0 : phase
* 7:1 : reserved - MBZ
@@ -1066,7 +1041,7 @@ enum ena_admin_aenq_group {
ENA_ADMIN_AENQ_GROUPS_NUM = 5,
};
-enum ena_admin_aenq_notification_syndrom {
+enum ena_admin_aenq_notification_syndrome {
ENA_ADMIN_SUSPEND = 0,
ENA_ADMIN_RESUME = 1,
ENA_ADMIN_UPDATE_HINTS = 2,
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 452e66b39a17..34434c6b6000 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "ena_com.h"
@@ -98,7 +71,7 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
dma_addr_t addr)
{
if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
- pr_err("dma address has more bits that the device supports\n");
+ pr_err("DMA address has more bits that the device supports\n");
return -EINVAL;
}
@@ -108,16 +81,16 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
return 0;
}
-static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
+static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
{
- struct ena_com_admin_sq *sq = &queue->sq;
- u16 size = ADMIN_SQ_SIZE(queue->q_depth);
+ struct ena_com_admin_sq *sq = &admin_queue->sq;
+ u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
- sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
- GFP_KERNEL);
+ sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
+ &sq->dma_addr, GFP_KERNEL);
if (!sq->entries) {
- pr_err("memory allocation failed\n");
+ pr_err("Memory allocation failed\n");
return -ENOMEM;
}
@@ -130,16 +103,16 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
return 0;
}
-static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
+static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
{
- struct ena_com_admin_cq *cq = &queue->cq;
- u16 size = ADMIN_CQ_SIZE(queue->q_depth);
+ struct ena_com_admin_cq *cq = &admin_queue->cq;
+ u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
- cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
- GFP_KERNEL);
+ cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
+ &cq->dma_addr, GFP_KERNEL);
if (!cq->entries) {
- pr_err("memory allocation failed\n");
+ pr_err("Memory allocation failed\n");
return -ENOMEM;
}
@@ -149,20 +122,20 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
return 0;
}
-static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
+static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
struct ena_aenq_handlers *aenq_handlers)
{
- struct ena_com_aenq *aenq = &dev->aenq;
+ struct ena_com_aenq *aenq = &ena_dev->aenq;
u32 addr_low, addr_high, aenq_caps;
u16 size;
- dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
+ ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
- aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr,
- GFP_KERNEL);
+ aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size,
+ &aenq->dma_addr, GFP_KERNEL);
if (!aenq->entries) {
- pr_err("memory allocation failed\n");
+ pr_err("Memory allocation failed\n");
return -ENOMEM;
}
@@ -172,18 +145,18 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
- writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
- writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
+ writel(addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
+ writel(addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
aenq_caps = 0;
- aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
+ aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
<< ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
- writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
+ writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
if (unlikely(!aenq_handlers)) {
- pr_err("aenq handlers pointer is NULL\n");
+ pr_err("AENQ handlers pointer is NULL\n");
return -EINVAL;
}
@@ -199,31 +172,31 @@ static void comp_ctxt_release(struct ena_com_admin_queue *queue,
atomic_dec(&queue->outstanding_cmds);
}
-static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
+static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue,
u16 command_id, bool capture)
{
- if (unlikely(command_id >= queue->q_depth)) {
- pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
- command_id, queue->q_depth);
+ if (unlikely(command_id >= admin_queue->q_depth)) {
+ pr_err("Command id is larger than the queue size. cmd_id: %u queue size %d\n",
+ command_id, admin_queue->q_depth);
return NULL;
}
- if (unlikely(!queue->comp_ctx)) {
+ if (unlikely(!admin_queue->comp_ctx)) {
pr_err("Completion context is NULL\n");
return NULL;
}
- if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
+ if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
pr_err("Completion context is occupied\n");
return NULL;
}
if (capture) {
- atomic_inc(&queue->outstanding_cmds);
- queue->comp_ctx[command_id].occupied = true;
+ atomic_inc(&admin_queue->outstanding_cmds);
+ admin_queue->comp_ctx[command_id].occupied = true;
}
- return &queue->comp_ctx[command_id];
+ return &admin_queue->comp_ctx[command_id];
}
static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
@@ -244,7 +217,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
/* In case of queue FULL */
cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
- pr_debug("admin queue is full.\n");
+ pr_debug("Admin queue is full.\n");
admin_queue->stats.out_of_space++;
return ERR_PTR(-ENOSPC);
}
@@ -284,20 +257,21 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
return comp_ctx;
}
-static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
+static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
{
- size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
+ size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
struct ena_comp_ctx *comp_ctx;
u16 i;
- queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
- if (unlikely(!queue->comp_ctx)) {
- pr_err("memory allocation failed\n");
+ admin_queue->comp_ctx =
+ devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
+ if (unlikely(!admin_queue->comp_ctx)) {
+ pr_err("Memory allocation failed\n");
return -ENOMEM;
}
- for (i = 0; i < queue->q_depth; i++) {
- comp_ctx = get_comp_ctxt(queue, i, false);
+ for (i = 0; i < admin_queue->q_depth; i++) {
+ comp_ctx = get_comp_ctxt(admin_queue, i, false);
if (comp_ctx)
init_completion(&comp_ctx->wait_event);
}
@@ -363,7 +337,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
}
if (!io_sq->desc_addr.virt_addr) {
- pr_err("memory allocation failed\n");
+ pr_err("Memory allocation failed\n");
return -ENOMEM;
}
}
@@ -389,7 +363,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
if (!io_sq->bounce_buf_ctrl.base_buffer) {
- pr_err("bounce buffer memory allocation failed\n");
+ pr_err("Bounce buffer memory allocation failed\n");
return -ENOMEM;
}
@@ -449,7 +423,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
}
if (!io_cq->cdesc_addr.virt_addr) {
- pr_err("memory allocation failed\n");
+ pr_err("Memory allocation failed\n");
return -ENOMEM;
}
@@ -525,7 +499,7 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
static int ena_com_comp_status_to_errno(u8 comp_status)
{
if (unlikely(comp_status != 0))
- pr_err("admin command failed[%u]\n", comp_status);
+ pr_err("Admin command failed[%u]\n", comp_status);
switch (comp_status) {
case ENA_ADMIN_SUCCESS:
@@ -539,6 +513,8 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
case ENA_ADMIN_ILLEGAL_PARAMETER:
case ENA_ADMIN_UNKNOWN_ERROR:
return -EINVAL;
+ case ENA_ADMIN_RESOURCE_BUSY:
+ return -EAGAIN;
}
return -EINVAL;
@@ -717,7 +693,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
/* The desc list entry size should be whole multiply of 8
* This requirement comes from __iowrite64_copy()
*/
- pr_err("illegal entry size %d\n", llq_info->desc_list_entry_size);
+ pr_err("Illegal entry size %d\n", llq_info->desc_list_entry_size);
return -EINVAL;
}
@@ -858,7 +834,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
}
if (unlikely(i == timeout)) {
- pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
+ pr_err("Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
mmio_read->seq_num, offset, read_resp->req_id,
read_resp->reg_off);
ret = ENA_MMIO_READ_TIMEOUT;
@@ -925,7 +901,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
sizeof(destroy_resp));
if (unlikely(ret && (ret != -ENODEV)))
- pr_err("failed to destroy io sq error: %d\n", ret);
+ pr_err("Failed to destroy io sq error: %d\n", ret);
return ret;
}
@@ -1034,7 +1010,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
&get_cmd.control_buffer.address,
control_buf_dma_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
@@ -1081,11 +1057,10 @@ static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
(ena_dev->rss).hash_key;
netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
- /* The key is stored in the device in u32 array
- * as well as the API requires the key to be passed in this
- * format. Thus the size of our array should be divided by 4
+ /* The key buffer is stored in the device in an array of
+ * uint32 elements.
*/
- hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
+ hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS;
}
static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
@@ -1149,13 +1124,13 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
int ret;
ret = ena_com_get_feature(ena_dev, &get_resp,
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0);
if (unlikely(ret))
return ret;
if ((get_resp.u.ind_table.min_size > log_size) ||
(get_resp.u.ind_table.max_size < log_size)) {
- pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
+ pr_err("Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1 << log_size, 1 << get_resp.u.ind_table.min_size,
1 << get_resp.u.ind_table.max_size);
return -EINVAL;
@@ -1248,7 +1223,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
&create_cmd.sq_ba,
io_sq->desc_addr.phys_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
}
@@ -1277,7 +1252,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
cmd_completion.llq_descriptors_offset);
}
- pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
+ pr_debug("Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
return ret;
}
@@ -1390,7 +1365,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
&create_cmd.cq_ba,
io_cq->cdesc_addr.phys_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
@@ -1419,7 +1394,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
cmd_completion.numa_node_register_offset);
- pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
+ pr_debug("Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
return ret;
}
@@ -1612,12 +1587,12 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
return -ETIME;
}
- pr_info("ena device version: %d.%d\n",
+ pr_info("ENA device version: %d.%d\n",
(ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
- pr_info("ena controller version: %d.%d.%d implementation version %d\n",
+ pr_info("ENA controller version: %d.%d.%d implementation version %d\n",
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
@@ -1640,6 +1615,19 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
return 0;
}
+static void
+ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev,
+ struct ena_com_admin_queue *admin_queue)
+
+{
+ if (!admin_queue->comp_ctx)
+ return;
+
+ devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
+
+ admin_queue->comp_ctx = NULL;
+}
+
void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
@@ -1648,9 +1636,8 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
struct ena_com_aenq *aenq = &ena_dev->aenq;
u16 size;
- if (admin_queue->comp_ctx)
- devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
- admin_queue->comp_ctx = NULL;
+ ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue);
+
size = ADMIN_SQ_SIZE(admin_queue->q_depth);
if (sq->entries)
dma_free_coherent(ena_dev->dmadev, size, sq->entries,
@@ -1928,6 +1915,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
sizeof(get_resp.u.dev_attr));
+
ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
@@ -2006,10 +1994,10 @@ void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
/* ena_handle_specific_aenq_event:
* return the handler that is relevant to the specific event group
*/
-static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
+static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev,
u16 group)
{
- struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
+ struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers;
if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
return aenq_handlers->handlers[group];
@@ -2021,11 +2009,11 @@ static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
* handles the aenq incoming events.
* pop events from the queue and apply the specific handler
*/
-void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
+void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
{
struct ena_admin_aenq_entry *aenq_e;
struct ena_admin_aenq_common_desc *aenq_common;
- struct ena_com_aenq *aenq = &dev->aenq;
+ struct ena_com_aenq *aenq = &ena_dev->aenq;
u64 timestamp;
ena_aenq_handler handler_cb;
u16 masked_head, processed = 0;
@@ -2045,12 +2033,13 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
dma_rmb();
timestamp = (u64)aenq_common->timestamp_low |
- ((u64)aenq_common->timestamp_high << 32);
- pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
- aenq_common->group, aenq_common->syndrom, timestamp);
+ ((u64)aenq_common->timestamp_high << 32);
+
+ pr_debug("AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
+ aenq_common->group, aenq_common->syndrome, timestamp);
/* Handle specific event*/
- handler_cb = ena_com_get_specific_aenq_cb(dev,
+ handler_cb = ena_com_get_specific_aenq_cb(ena_dev,
aenq_common->group);
handler_cb(data, aenq_e); /* call the actual event handler*/
@@ -2075,7 +2064,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
/* write the aenq doorbell after all AENQ descriptors were read */
mb();
- writel_relaxed((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+ writel_relaxed((u32)aenq->head,
+ ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}
int ena_com_dev_reset(struct ena_com_dev *ena_dev,
@@ -2288,7 +2278,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
&cmd.control_buffer.address,
rss->hash_key_dma_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
@@ -2346,7 +2336,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
}
memcpy(hash_key->key, key, key_len);
rss->hash_init_val = init_val;
- hash_key->keys_num = key_len >> 2;
+ hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
}
break;
case ENA_ADMIN_CRC32:
@@ -2401,7 +2391,8 @@ int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
ena_dev->rss.hash_key;
if (key)
- memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
+ memcpy(key, hash_key->key,
+ (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0]));
return 0;
}
@@ -2457,7 +2448,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
&cmd.control_buffer.address,
rss->hash_ctrl_dma_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
cmd.control_buffer.length = sizeof(*hash_ctrl);
@@ -2518,7 +2509,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
available_fields = hash_ctrl->selected_fields[i].fields &
hash_ctrl->supported_fields[i].fields;
if (available_fields != hash_ctrl->selected_fields[i].fields) {
- pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
+ pr_err("Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
i, hash_ctrl->supported_fields[i].fields,
hash_ctrl->selected_fields[i].fields);
return -EOPNOTSUPP;
@@ -2556,7 +2547,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
/* Make sure all the fields are supported */
supported_fields = hash_ctrl->supported_fields[proto].fields;
if ((hash_fields & supported_fields) != hash_fields) {
- pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
+ pr_err("Proto %d doesn't support the required fields %x. supports only: %x\n",
proto, hash_fields, supported_fields);
}
@@ -2596,9 +2587,9 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
int ret;
if (!ena_com_check_supported_feature_id(
- ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
+ ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
pr_debug("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
return -EOPNOTSUPP;
}
@@ -2613,7 +2604,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
cmd.aq_common_descriptor.flags =
ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
- cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
+ cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG;
cmd.u.ind_table.size = rss->tbl_log_size;
cmd.u.ind_table.inline_index = 0xFFFFFFFF;
@@ -2621,7 +2612,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
&cmd.control_buffer.address,
rss->rss_ind_tbl_dma_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
@@ -2651,7 +2642,7 @@ int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
sizeof(struct ena_admin_rss_ind_table_entry);
rc = ena_com_get_feature_ex(ena_dev, &get_resp,
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG,
rss->rss_ind_tbl_dma_addr,
tbl_size, 0);
if (unlikely(rc))
@@ -2734,8 +2725,7 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
host_attr->debug_area_virt_addr =
dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
- &host_attr->debug_area_dma_addr,
- GFP_KERNEL);
+ &host_attr->debug_area_dma_addr, GFP_KERNEL);
if (unlikely(!host_attr->debug_area_virt_addr)) {
host_attr->debug_area_size = 0;
return -ENOMEM;
@@ -2792,7 +2782,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
&cmd.u.host_attr.debug_ba,
host_attr->debug_area_dma_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
@@ -2800,7 +2790,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
&cmd.u.host_attr.os_info_ba,
host_attr->host_info_dma_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
@@ -2919,7 +2909,7 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
if (unlikely(ena_dev->tx_max_header_size == 0)) {
- pr_err("the size of the LLQ entry is smaller than needed\n");
+ pr_err("The size of the LLQ entry is smaller than needed\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index e4aafeda0cae..55097750d062 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef ENA_COM
@@ -536,7 +509,7 @@ void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
* This method goes over the async event notification queue and calls the proper
* aenq handler.
*/
-void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data);
+void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data);
/* ena_com_abort_admin_commands - Abort all the outstanding admin commands.
* @ena_dev: ENA communication layer struct
diff --git a/drivers/net/ethernet/amazon/ena/ena_common_defs.h b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
index 8a8ded0de9ac..e210c8a81fc0 100644
--- a/drivers/net/ethernet/amazon/ena/ena_common_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _ENA_COMMON_H_
#define _ENA_COMMON_H_
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index ccd440589565..ad30cacc1622 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "ena_eth_com.h"
@@ -45,8 +18,9 @@ static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
- desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
+ desc_phase = (READ_ONCE(cdesc->status) &
+ ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
if (desc_phase != expected_phase)
return NULL;
@@ -89,7 +63,7 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
}
io_sq->entries_in_tx_burst_left--;
- pr_debug("decreasing entries_in_tx_burst_left of queue %d to %d\n",
+ pr_debug("Decreasing entries_in_tx_burst_left of queue %d to %d\n",
io_sq->qid, io_sq->entries_in_tx_burst_left);
}
@@ -128,12 +102,12 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
if (unlikely((header_offset + header_len) >
llq_info->desc_list_entry_size)) {
- pr_err("trying to write header larger than llq entry can accommodate\n");
+ pr_err("Trying to write header larger than llq entry can accommodate\n");
return -EFAULT;
}
if (unlikely(!bounce_buffer)) {
- pr_err("bounce buffer is NULL\n");
+ pr_err("Bounce buffer is NULL\n");
return -EFAULT;
}
@@ -151,7 +125,7 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
bounce_buffer = pkt_ctrl->curr_bounce_buf;
if (unlikely(!bounce_buffer)) {
- pr_err("bounce buffer is NULL\n");
+ pr_err("Bounce buffer is NULL\n");
return NULL;
}
@@ -262,8 +236,9 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
ena_com_cq_inc_head(io_cq);
count++;
- last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+ last = (READ_ONCE(cdesc->status) &
+ ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
} while (!last);
if (last) {
@@ -275,7 +250,7 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
io_cq->cur_rx_pkt_cdesc_count = 0;
io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
- pr_debug("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
+ pr_debug("ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
io_cq->qid, *first_cdesc_idx, count);
} else {
io_cq->cur_rx_pkt_cdesc_count += count;
@@ -291,6 +266,9 @@ static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
meta_desc = get_sq_desc(io_sq);
+ if (unlikely(!meta_desc))
+ return -EFAULT;
+
memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
@@ -298,7 +276,7 @@ static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
/* bits 0-9 of the mss */
- meta_desc->word2 |= (ena_meta->mss <<
+ meta_desc->word2 |= ((u32)ena_meta->mss <<
ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
/* bits 10-13 of the mss */
@@ -308,7 +286,7 @@ static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
/* Extended meta desc */
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
- meta_desc->len_ctrl |= (io_sq->phase <<
+ meta_desc->len_ctrl |= ((u32)io_sq->phase <<
ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
@@ -321,7 +299,7 @@ static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
- meta_desc->word2 |= (ena_meta->l4_hdr_len <<
+ meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len <<
ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
@@ -358,7 +336,7 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
}
static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
- struct ena_eth_io_rx_cdesc_base *cdesc)
+ struct ena_eth_io_rx_cdesc_base *cdesc)
{
ena_rx_ctx->l3_proto = cdesc->status &
ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
@@ -379,7 +357,7 @@ static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
- pr_debug("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
+ pr_debug("l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
@@ -412,7 +390,7 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
}
if (unlikely(header_len > io_sq->tx_max_header_size)) {
- pr_err("header size is too large %d max header: %d\n",
+ pr_err("Header size is too large %d max header: %d\n",
header_len, io_sq->tx_max_header_size);
return -EINVAL;
}
@@ -427,7 +405,7 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
if (unlikely(rc)) {
- pr_err("failed to create and store tx meta desc\n");
+ pr_err("Failed to create and store tx meta desc\n");
return rc;
}
@@ -447,16 +425,16 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
if (!have_meta)
desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
- desc->buff_addr_hi_hdr_sz |= (header_len <<
+ desc->buff_addr_hi_hdr_sz |= ((u32)header_len <<
ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
- desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
+ desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
ENA_ETH_IO_TX_DESC_PHASE_MASK;
desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
/* Bits 0-9 */
- desc->meta_ctrl |= (ena_tx_ctx->req_id <<
+ desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id <<
ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
@@ -502,7 +480,7 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
- desc->len_ctrl |= (io_sq->phase <<
+ desc->len_ctrl |= ((u32)io_sq->phase <<
ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
ENA_ETH_IO_TX_DESC_PHASE_MASK;
}
@@ -550,7 +528,7 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
return 0;
}
- pr_debug("fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
+ pr_debug("Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
nb_hw_desc);
if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
@@ -606,9 +584,9 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->length = ena_buf->len;
desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
- ENA_ETH_IO_RX_DESC_LAST_MASK |
- (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) |
- ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
+ ENA_ETH_IO_RX_DESC_LAST_MASK |
+ (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) |
+ ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
desc->req_id = req_id;
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index b6592cb93b04..2c16c218818a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef ENA_ETH_COM_H_
@@ -167,7 +140,7 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
llq_info->descs_per_entry);
}
- pr_debug("queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid,
+ pr_debug("Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid,
num_descs, num_entries_needed);
return num_entries_needed > io_sq->entries_in_tx_burst_left;
@@ -178,13 +151,13 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
u16 tail = io_sq->tail;
- pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
+ pr_debug("Write submission queue doorbell for queue: %d tail: %d\n",
io_sq->qid, tail);
writel(tail, io_sq->db_addr);
if (is_llq_max_tx_burst_exists(io_sq)) {
- pr_debug("reset available entries in tx burst for queue %d to %d\n",
+ pr_debug("Reset available entries in tx burst for queue %d to %d\n",
io_sq->qid, max_entries_in_tx_burst);
io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
index d105c9c56192..332ac0d28ac7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _ENA_ETH_IO_H_
#define _ENA_ETH_IO_H_
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 1e6457dd662a..3b2cd28f962d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include <linux/pci.h>
@@ -966,7 +939,7 @@ static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
GFP_ATOMIC);
if (!strings_buf) {
netif_err(adapter, drv, netdev,
- "failed to alloc strings_buf\n");
+ "Failed to allocate strings_buf\n");
return;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index b52e8d0c7951..e8131dadc22c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -139,7 +112,7 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu)
ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
if (!ret) {
- netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu);
+ netif_dbg(adapter, drv, dev, "Set MTU to %d\n", new_mtu);
update_rx_ring_mtu(adapter, new_mtu);
dev->mtu = new_mtu;
} else {
@@ -178,7 +151,7 @@ static int ena_xmit_common(struct net_device *dev,
*/
if (unlikely(rc)) {
netif_err(adapter, tx_queued, dev,
- "failed to prepare tx bufs\n");
+ "Failed to prepare tx bufs\n");
u64_stats_update_begin(&ring->syncp);
ring->tx_stats.prepare_ctx_err++;
u64_stats_update_end(&ring->syncp);
@@ -292,7 +265,7 @@ error_report_dma_error:
u64_stats_update_begin(&xdp_ring->syncp);
xdp_ring->tx_stats.dma_mapping_err++;
u64_stats_update_end(&xdp_ring->syncp);
- netdev_warn(adapter->netdev, "failed to map xdp buff\n");
+ netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
xdp_return_frame_rx_napi(tx_info->xdpf);
tx_info->xdpf = NULL;
@@ -564,7 +537,7 @@ static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
if (!old_bpf_prog)
netif_info(adapter, drv, adapter->netdev,
- "xdp program set, changing the max_mtu from %d to %d",
+ "XDP program is set, changing the max_mtu from %d to %d",
prev_mtu, netdev->max_mtu);
} else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
@@ -983,7 +956,7 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
return -EIO;
}
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
- "alloc page %p, rx_info %p\n", page, rx_info);
+ "Allocate page %p, rx_info %p\n", page, rx_info);
rx_info->page = page;
rx_info->page_offset = 0;
@@ -1033,7 +1006,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
GFP_ATOMIC | __GFP_COMP);
if (unlikely(rc < 0)) {
netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
- "failed to alloc buffer for rx queue %d\n",
+ "Failed to allocate buffer for rx queue %d\n",
rx_ring->qid);
break;
}
@@ -1042,7 +1015,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
req_id);
if (unlikely(rc)) {
netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
- "failed to add buffer for rx queue %d\n",
+ "Failed to add buffer for rx queue %d\n",
rx_ring->qid);
break;
}
@@ -1054,9 +1027,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->rx_stats.refil_partial++;
u64_stats_update_end(&rx_ring->syncp);
- netdev_warn(rx_ring->netdev,
- "refilled rx qid %d with only %d buffers (from %d)\n",
- rx_ring->qid, i, num);
+ netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
+ "Refilled rx qid %d with only %d buffers (from %d)\n",
+ rx_ring->qid, i, num);
}
/* ena_com_write_sq_doorbell issues a wmb() */
@@ -1097,7 +1070,7 @@ static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
if (unlikely(rc != bufs_num))
netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
- "refilling Queue %d failed. allocated %d buffers from: %d\n",
+ "Refilling Queue %d failed. allocated %d buffers from: %d\n",
i, rc, bufs_num);
}
}
@@ -1155,14 +1128,14 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
continue;
if (print_once) {
- netdev_notice(tx_ring->netdev,
- "free uncompleted tx skb qid %d idx 0x%x\n",
- tx_ring->qid, i);
+ netif_notice(tx_ring->adapter, ifdown, tx_ring->netdev,
+ "Free uncompleted tx skb qid %d idx 0x%x\n",
+ tx_ring->qid, i);
print_once = false;
} else {
- netdev_dbg(tx_ring->netdev,
- "free uncompleted tx skb qid %d idx 0x%x\n",
- tx_ring->qid, i);
+ netif_dbg(tx_ring->adapter, ifdown, tx_ring->netdev,
+ "Free uncompleted tx skb qid %d idx 0x%x\n",
+ tx_ring->qid, i);
}
ena_unmap_tx_buff(tx_ring, tx_info);
@@ -1414,7 +1387,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
return NULL;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
- "rx allocated small packet. len %d. data_len %d\n",
+ "RX allocated small packet. len %d. data_len %d\n",
skb->len, skb->data_len);
/* sync this buffer for CPU use */
@@ -1451,7 +1424,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
rx_info->page_offset = 0;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
- "rx skb updated. len %d. data_len %d\n",
+ "RX skb updated. len %d. data_len %d\n",
skb->len, skb->data_len);
rx_info->page = NULL;
@@ -1658,6 +1631,11 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
&next_to_clean);
if (unlikely(!skb)) {
+ /* The page might not actually be freed here since the
+ * page reference count is incremented in
+ * ena_xdp_xmit_buff(), and it will be decreased only
+ * when send completion was received from the device
+ */
if (xdp_verdict == XDP_TX)
ena_free_rx_page(rx_ring,
&rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]);
@@ -1785,6 +1763,7 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring,
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->tx_stats.unmask_interrupt++;
u64_stats_update_end(&tx_ring->syncp);
+
/* It is a shared MSI-X.
* Tx and Rx CQ have pointer to it.
* So we use one of them to reach the intr reg
@@ -2002,7 +1981,7 @@ static int ena_enable_msix(struct ena_adapter *adapter)
/* Reserved the max msix vectors we might need */
msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
netif_dbg(adapter, probe, adapter->netdev,
- "trying to enable MSI-X, vectors %d\n", msix_vecs);
+ "Trying to enable MSI-X, vectors %d\n", msix_vecs);
irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
msix_vecs, PCI_IRQ_MSIX);
@@ -2015,7 +1994,7 @@ static int ena_enable_msix(struct ena_adapter *adapter)
if (irq_cnt != msix_vecs) {
netif_notice(adapter, probe, adapter->netdev,
- "enable only %d MSI-X (out of %d), reduce the number of queues\n",
+ "Enable only %d MSI-X (out of %d), reduce the number of queues\n",
irq_cnt, msix_vecs);
adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
}
@@ -2085,12 +2064,12 @@ static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
irq->data);
if (rc) {
netif_err(adapter, probe, adapter->netdev,
- "failed to request admin irq\n");
+ "Failed to request admin irq\n");
return rc;
}
netif_dbg(adapter, probe, adapter->netdev,
- "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
+ "Set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
irq->affinity_hint_mask.bits[0], irq->vector);
irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
@@ -2123,7 +2102,7 @@ static int ena_request_io_irq(struct ena_adapter *adapter)
}
netif_dbg(adapter, ifup, adapter->netdev,
- "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
+ "Set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
i, irq->affinity_hint_mask.bits[0], irq->vector);
irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
@@ -2563,7 +2542,7 @@ static int ena_up(struct ena_adapter *adapter)
{
int io_queue_count, rc, i;
- netdev_dbg(adapter->netdev, "%s\n", __func__);
+ netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__);
io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
ena_setup_io_intr(adapter);
@@ -2647,7 +2626,8 @@ static void ena_down(struct ena_adapter *adapter)
rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
if (rc)
- dev_err(&adapter->pdev->dev, "Device reset failed\n");
+ netif_err(adapter, ifdown, adapter->netdev,
+ "Device reset failed\n");
/* stop submitting admin commands on a device that was reset */
ena_com_set_admin_running_state(adapter->ena_dev, false);
}
@@ -2969,7 +2949,7 @@ error_report_dma_error:
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->tx_stats.dma_mapping_err++;
u64_stats_update_end(&tx_ring->syncp);
- netdev_warn(adapter->netdev, "failed to map skb\n");
+ netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n");
tx_info->skb = NULL;
@@ -3107,13 +3087,14 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
{
+ struct device *dev = &pdev->dev;
struct ena_admin_host_info *host_info;
int rc;
/* Allocate only the host info */
rc = ena_com_allocate_host_info(ena_dev);
if (rc) {
- pr_err("Cannot allocate host info\n");
+ dev_err(dev, "Cannot allocate host info\n");
return;
}
@@ -3143,9 +3124,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
rc = ena_com_set_host_attributes(ena_dev);
if (rc) {
if (rc == -EOPNOTSUPP)
- pr_warn("Cannot set host attributes\n");
+ dev_warn(dev, "Cannot set host attributes\n");
else
- pr_err("Cannot set host attributes\n");
+ dev_err(dev, "Cannot set host attributes\n");
goto err;
}
@@ -3173,7 +3154,8 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
if (rc) {
- pr_err("Cannot allocate debug area\n");
+ netif_err(adapter, drv, adapter->netdev,
+ "Cannot allocate debug area\n");
return;
}
@@ -3377,7 +3359,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
rc = ena_com_mmio_reg_read_request_init(ena_dev);
if (rc) {
- dev_err(dev, "failed to init mmio read less\n");
+ dev_err(dev, "Failed to init mmio read less\n");
return rc;
}
@@ -3395,7 +3377,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
rc = ena_com_validate_version(ena_dev);
if (rc) {
- dev_err(dev, "device version is too low\n");
+ dev_err(dev, "Device version is too low\n");
goto err_mmio_read_less;
}
@@ -3464,7 +3446,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
&llq_config);
if (rc) {
- dev_err(&pdev->dev, "ena device init failed\n");
+ dev_err(dev, "ENA device init failed\n");
goto err_admin_init;
}
@@ -3600,9 +3582,10 @@ static int ena_restore_device(struct ena_adapter *adapter)
netif_carrier_on(adapter->netdev);
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
- dev_err(&pdev->dev, "Device reset completed successfully\n");
adapter->last_keep_alive_jiffies = jiffies;
+ dev_err(&pdev->dev, "Device reset completed successfully\n");
+
return rc;
err_disable_msix:
ena_free_mgmnt_irq(adapter);
@@ -3804,7 +3787,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter)
u64_stats_update_end(&rx_ring->syncp);
netif_err(adapter, drv, adapter->netdev,
- "trigger refill for ring %d\n", i);
+ "Trigger refill for ring %d\n", i);
napi_schedule(rx_ring->napi);
rx_ring->empty_rx_queue = 0;
@@ -4166,14 +4149,13 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
*/
static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
+ struct ena_calc_queue_size_ctx calc_queue_ctx = {};
struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_com_dev *ena_dev = NULL;
struct ena_adapter *adapter;
struct net_device *netdev;
static int adapters_found;
u32 max_num_io_queues;
- char *queue_type_str;
bool wd_state;
int bars, rc;
@@ -4205,7 +4187,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_resource_start(pdev, ENA_REG_BAR),
pci_resource_len(pdev, ENA_REG_BAR));
if (!ena_dev->reg_bar) {
- dev_err(&pdev->dev, "failed to remap regs bar\n");
+ dev_err(&pdev->dev, "Failed to remap regs bar\n");
rc = -EFAULT;
goto err_free_region;
}
@@ -4216,7 +4198,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
if (rc) {
- dev_err(&pdev->dev, "ena device init failed\n");
+ dev_err(&pdev->dev, "ENA device init failed\n");
if (rc == -ETIME)
rc = -EPROBE_DEFER;
goto err_free_region;
@@ -4224,7 +4206,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
if (rc) {
- dev_err(&pdev->dev, "ena llq bar mapping failed\n");
+ dev_err(&pdev->dev, "ENA llq bar mapping failed\n");
goto err_free_ena_dev;
}
@@ -4351,15 +4333,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
timer_setup(&adapter->timer_service, ena_timer_service, 0);
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
- if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
- queue_type_str = "Regular";
- else
- queue_type_str = "Low Latency";
-
dev_info(&pdev->dev,
- "%s found at mem %lx, mac addr %pM, Placement policy: %s\n",
+ "%s found at mem %lx, mac addr %pM\n",
DEVICE_NAME, (long)pci_resource_start(pdev, 0),
- netdev->dev_addr, queue_type_str);
+ netdev->dev_addr);
set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
@@ -4489,7 +4466,7 @@ static int __maybe_unused ena_suspend(struct device *dev_d)
rtnl_lock();
if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
dev_err(&pdev->dev,
- "ignoring device reset request as the device is being suspended\n");
+ "Ignoring device reset request as the device is being suspended\n");
clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
ena_destroy_device(adapter, true);
@@ -4564,7 +4541,7 @@ static void ena_update_on_link_change(void *adapter_data,
ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
if (status) {
- netdev_dbg(adapter->netdev, "%s\n", __func__);
+ netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__);
set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
netif_carrier_on(adapter->netdev);
@@ -4608,7 +4585,7 @@ static void ena_notification(void *adapter_data,
aenq_e->aenq_common_desc.group,
ENA_ADMIN_NOTIFICATION);
- switch (aenq_e->aenq_common_desc.syndrom) {
+ switch (aenq_e->aenq_common_desc.syndrome) {
case ENA_ADMIN_UPDATE_HINTS:
hints = (struct ena_admin_ena_hw_hints *)
(&aenq_e->inline_data_w4);
@@ -4617,7 +4594,7 @@ static void ena_notification(void *adapter_data,
default:
netif_err(adapter, drv, adapter->netdev,
"Invalid aenq notification link state %d\n",
- aenq_e->aenq_common_desc.syndrom);
+ aenq_e->aenq_common_desc.syndrome);
}
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 52abb6a4f87e..30eb686749dc 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef ENA_H
diff --git a/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h b/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h
index 426e57e10a7f..3ecdf29160ca 100644
--- a/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h
+++ b/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef ENA_PCI_ID_TBL_H_
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index b514bb1b855d..1e007a41a525 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _ENA_REGS_H_
#define _ENA_REGS_H_
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 4ba75551cb17..2709a2db5657 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -403,9 +403,9 @@ static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
return false;
}
-static void xgbe_ecc_isr_task(unsigned long data)
+static void xgbe_ecc_isr_task(struct tasklet_struct *t)
{
- struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+ struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_ecc);
unsigned int ecc_isr;
bool stop = false;
@@ -468,14 +468,14 @@ static irqreturn_t xgbe_ecc_isr(int irq, void *data)
if (pdata->isr_as_tasklet)
tasklet_schedule(&pdata->tasklet_ecc);
else
- xgbe_ecc_isr_task((unsigned long)pdata);
+ xgbe_ecc_isr_task(&pdata->tasklet_ecc);
return IRQ_HANDLED;
}
-static void xgbe_isr_task(unsigned long data)
+static void xgbe_isr_task(struct tasklet_struct *t)
{
- struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+ struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_dev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_channel *channel;
unsigned int dma_isr, dma_ch_isr;
@@ -582,7 +582,7 @@ isr_done:
/* If there is not a separate ECC irq, handle it here */
if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
- xgbe_ecc_isr_task((unsigned long)pdata);
+ xgbe_ecc_isr_task(&pdata->tasklet_ecc);
/* If there is not a separate I2C irq, handle it here */
if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
@@ -607,7 +607,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
if (pdata->isr_as_tasklet)
tasklet_schedule(&pdata->tasklet_dev);
else
- xgbe_isr_task((unsigned long)pdata);
+ xgbe_isr_task(&pdata->tasklet_dev);
return IRQ_HANDLED;
}
@@ -991,9 +991,8 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
unsigned int i;
int ret;
- tasklet_init(&pdata->tasklet_dev, xgbe_isr_task, (unsigned long)pdata);
- tasklet_init(&pdata->tasklet_ecc, xgbe_ecc_isr_task,
- (unsigned long)pdata);
+ tasklet_setup(&pdata->tasklet_dev, xgbe_isr_task);
+ tasklet_setup(&pdata->tasklet_ecc, xgbe_ecc_isr_task);
ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
netdev_name(netdev), pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
index 4d9062d35930..22d4fc547a0a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -274,9 +274,9 @@ static void xgbe_i2c_clear_isr_interrupts(struct xgbe_prv_data *pdata,
XI2C_IOREAD(pdata, IC_CLR_STOP_DET);
}
-static void xgbe_i2c_isr_task(unsigned long data)
+static void xgbe_i2c_isr_task(struct tasklet_struct *t)
{
- struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+ struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_i2c);
struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
unsigned int isr;
@@ -324,7 +324,7 @@ static irqreturn_t xgbe_i2c_isr(int irq, void *data)
if (pdata->isr_as_tasklet)
tasklet_schedule(&pdata->tasklet_i2c);
else
- xgbe_i2c_isr_task((unsigned long)pdata);
+ xgbe_i2c_isr_task(&pdata->tasklet_i2c);
return IRQ_HANDLED;
}
@@ -369,7 +369,7 @@ static void xgbe_i2c_set_target(struct xgbe_prv_data *pdata, unsigned int addr)
static irqreturn_t xgbe_i2c_combined_isr(struct xgbe_prv_data *pdata)
{
- xgbe_i2c_isr_task((unsigned long)pdata);
+ xgbe_i2c_isr_task(&pdata->tasklet_i2c);
return IRQ_HANDLED;
}
@@ -462,8 +462,7 @@ static int xgbe_i2c_start(struct xgbe_prv_data *pdata)
/* If we have a separate I2C irq, enable it */
if (pdata->dev_irq != pdata->i2c_irq) {
- tasklet_init(&pdata->tasklet_i2c, xgbe_i2c_isr_task,
- (unsigned long)pdata);
+ tasklet_setup(&pdata->tasklet_i2c, xgbe_i2c_isr_task);
ret = devm_request_irq(pdata->dev, pdata->i2c_irq,
xgbe_i2c_isr, 0, pdata->i2c_name,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 8a3a60bb2688..93ef5a30cb8d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -688,9 +688,9 @@ static void xgbe_an73_isr(struct xgbe_prv_data *pdata)
}
}
-static void xgbe_an_isr_task(unsigned long data)
+static void xgbe_an_isr_task(struct tasklet_struct *t)
{
- struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+ struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_an);
netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
@@ -715,14 +715,14 @@ static irqreturn_t xgbe_an_isr(int irq, void *data)
if (pdata->isr_as_tasklet)
tasklet_schedule(&pdata->tasklet_an);
else
- xgbe_an_isr_task((unsigned long)pdata);
+ xgbe_an_isr_task(&pdata->tasklet_an);
return IRQ_HANDLED;
}
static irqreturn_t xgbe_an_combined_isr(struct xgbe_prv_data *pdata)
{
- xgbe_an_isr_task((unsigned long)pdata);
+ xgbe_an_isr_task(&pdata->tasklet_an);
return IRQ_HANDLED;
}
@@ -1414,8 +1414,7 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
/* If we have a separate AN irq, enable it */
if (pdata->dev_irq != pdata->an_irq) {
- tasklet_init(&pdata->tasklet_an, xgbe_an_isr_task,
- (unsigned long)pdata);
+ tasklet_setup(&pdata->tasklet_an, xgbe_an_isr_task);
ret = devm_request_irq(pdata->dev, pdata->an_irq,
xgbe_an_isr, 0, pdata->an_name,
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 6fb620e25208..74c1778d841e 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2210,12 +2210,12 @@ static void b44_adjust_link(struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
- bool status_changed = 0;
+ bool status_changed = false;
BUG_ON(!phydev);
if (bp->old_link != phydev->link) {
- status_changed = 1;
+ status_changed = true;
bp->old_link = phydev->link;
}
@@ -2223,11 +2223,11 @@ static void b44_adjust_link(struct net_device *dev)
if (phydev->link) {
if ((phydev->duplex == DUPLEX_HALF) &&
(bp->flags & B44_FLAG_FULL_DUPLEX)) {
- status_changed = 1;
+ status_changed = true;
bp->flags &= ~B44_FLAG_FULL_DUPLEX;
} else if ((phydev->duplex == DUPLEX_FULL) &&
!(bp->flags & B44_FLAG_FULL_DUPLEX)) {
- status_changed = 1;
+ status_changed = true;
bp->flags |= B44_FLAG_FULL_DUPLEX;
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 3c543dd7a8f3..35f659310084 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12390,7 +12390,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
}
if (CHIP_IS_E1(bp))
- bp->dropless_fc = 0;
+ bp->dropless_fc = false;
else
bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
@@ -15412,7 +15412,7 @@ static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
return -EINVAL;
}
- bp->hwtstamp_ioctl_called = 1;
+ bp->hwtstamp_ioctl_called = true;
bp->tx_type = config.tx_type;
bp->rx_filter = config.rx_filter;
@@ -15494,7 +15494,7 @@ void bnx2x_init_ptp(struct bnx2x *bp)
bnx2x_init_cyclecounter(bp);
timecounter_init(&bp->timecounter, &bp->cyclecounter,
ktime_to_ns(ktime_get_real()));
- bp->timecounter_init_done = 1;
+ bp->timecounter_init_done = true;
}
DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 53f64ca673c3..65c298f1f333 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3782,6 +3782,7 @@ static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
return -EOPNOTSUPP;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -3852,7 +3853,7 @@ static void bnxt_init_stats(struct bnxt *bp)
tx_masks = stats->hw_masks;
tx_count = sizeof(struct tx_port_stats_ext) / 8;
- flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
+ flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
rc = bnxt_hwrm_port_qstats_ext(bp, flags);
if (rc) {
mask = (1ULL << 40) - 1;
@@ -4305,7 +4306,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
u16 dst = BNXT_HWRM_CHNL_CHIMP;
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ if (BNXT_NO_FW_ACCESS(bp))
return -EBUSY;
if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
@@ -5723,7 +5724,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
u16 error_code;
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ if (BNXT_NO_FW_ACCESS(bp))
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
@@ -7817,7 +7818,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
if (set_tpa)
tpa_flags = bp->flags & BNXT_FLAG_TPA;
- else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ else if (BNXT_NO_FW_ACCESS(bp))
return 0;
for (i = 0; i < bp->nr_vnics; i++) {
rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
@@ -9310,18 +9311,16 @@ static ssize_t bnxt_show_temp(struct device *dev,
struct hwrm_temp_monitor_query_output *resp;
struct bnxt *bp = dev_get_drvdata(dev);
u32 len = 0;
+ int rc;
resp = bp->hwrm_cmd_resp_addr;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
- if (!_hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
mutex_unlock(&bp->hwrm_cmd_lock);
-
- if (len)
- return len;
-
- return sprintf(buf, "unknown\n");
+ return rc ?: len;
}
static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
@@ -9341,7 +9340,16 @@ static void bnxt_hwmon_close(struct bnxt *bp)
static void bnxt_hwmon_open(struct bnxt *bp)
{
+ struct hwrm_temp_monitor_query_input req = {0};
struct pci_dev *pdev = bp->pdev;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
+ rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc == -EACCES || rc == -EOPNOTSUPP) {
+ bnxt_hwmon_close(bp);
+ return;
+ }
if (bp->hwmon_dev)
return;
@@ -11778,6 +11786,10 @@ static void bnxt_remove_one(struct pci_dev *pdev)
if (BNXT_PF(bp))
bnxt_sriov_disable(bp);
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ bnxt_cancel_sp_work(bp);
+ bp->sp_event = 0;
+
bnxt_dl_fw_reporters_destroy(bp, true);
if (BNXT_PF(bp))
devlink_port_type_clear(&bp->dl_port);
@@ -11785,9 +11797,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
bnxt_dl_unregister(bp);
bnxt_shutdown_tc(bp);
- clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- bnxt_cancel_sp_work(bp);
- bp->sp_event = 0;
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
@@ -12088,7 +12097,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
static void bnxt_vpd_read_info(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
- int i, len, pos, ro_size;
+ int i, len, pos, ro_size, size;
ssize_t vpd_size;
u8 *vpd_data;
@@ -12123,7 +12132,8 @@ static void bnxt_vpd_read_info(struct bnxt *bp)
if (len + pos > vpd_size)
goto read_sn;
- strlcpy(bp->board_partno, &vpd_data[pos], min(len, BNXT_VPD_FLD_LEN));
+ size = min(len, BNXT_VPD_FLD_LEN - 1);
+ memcpy(bp->board_partno, &vpd_data[pos], size);
read_sn:
pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
@@ -12136,7 +12146,8 @@ read_sn:
if (len + pos > vpd_size)
goto exit;
- strlcpy(bp->board_serialno, &vpd_data[pos], min(len, BNXT_VPD_FLD_LEN));
+ size = min(len, BNXT_VPD_FLD_LEN - 1);
+ memcpy(bp->board_serialno, &vpd_data[pos], size);
exit:
kfree(vpd_data);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 5a13eb66beda..0ef89dabfd61 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1737,6 +1737,10 @@ struct bnxt {
#define BNXT_STATE_FW_FATAL_COND 6
#define BNXT_STATE_DRV_REGISTERED 7
+#define BNXT_NO_FW_ACCESS(bp) \
+ (test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
+ pci_channel_offline((bp)->pdev))
+
struct bnxt_irq *irq_tbl;
int total_irqs;
u8 mac_addr[ETH_ALEN];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index d0928334bdc8..6a3453f46d9a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1322,6 +1322,9 @@ static int bnxt_get_regs_len(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
int reg_len;
+ if (!BNXT_PF(bp))
+ return -EOPNOTSUPP;
+
reg_len = BNXT_PXP_REG_LEN;
if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
@@ -1778,6 +1781,22 @@ static void bnxt_get_pauseparam(struct net_device *dev,
epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
}
+static void bnxt_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *epstat)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u64 *rx, *tx;
+
+ if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
+ return;
+
+ rx = bp->port_stats.sw_stats;
+ tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+
+ epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames);
+ epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames);
+}
+
static int bnxt_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *epause)
{
@@ -1788,9 +1807,12 @@ static int bnxt_set_pauseparam(struct net_device *dev,
if (!BNXT_PHY_CFG_ABLE(bp))
return -EOPNOTSUPP;
+ mutex_lock(&bp->link_lock);
if (epause->autoneg) {
- if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
- return -EINVAL;
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+ rc = -EINVAL;
+ goto pause_exit;
+ }
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
if (bp->hwrm_spec_code >= 0x10201)
@@ -1811,11 +1833,11 @@ static int bnxt_set_pauseparam(struct net_device *dev,
if (epause->tx_pause)
link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
- if (netif_running(dev)) {
- mutex_lock(&bp->link_lock);
+ if (netif_running(dev))
rc = bnxt_hwrm_set_pause(bp);
- mutex_unlock(&bp->link_lock);
- }
+
+pause_exit:
+ mutex_unlock(&bp->link_lock);
return rc;
}
@@ -2552,8 +2574,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
struct bnxt *bp = netdev_priv(dev);
struct ethtool_eee *eee = &bp->eee;
struct bnxt_link_info *link_info = &bp->link_info;
- u32 advertising =
- _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+ u32 advertising;
int rc = 0;
if (!BNXT_PHY_CFG_ABLE(bp))
@@ -2562,19 +2583,23 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
if (!(bp->flags & BNXT_FLAG_EEE_CAP))
return -EOPNOTSUPP;
+ mutex_lock(&bp->link_lock);
+ advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
if (!edata->eee_enabled)
goto eee_ok;
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
netdev_warn(dev, "EEE requires autoneg\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto eee_exit;
}
if (edata->tx_lpi_enabled) {
if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
bp->lpi_tmr_lo, bp->lpi_tmr_hi);
- return -EINVAL;
+ rc = -EINVAL;
+ goto eee_exit;
} else if (!bp->lpi_tmr_hi) {
edata->tx_lpi_timer = eee->tx_lpi_timer;
}
@@ -2584,7 +2609,8 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
} else if (edata->advertised & ~advertising) {
netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
edata->advertised, advertising);
- return -EINVAL;
+ rc = -EINVAL;
+ goto eee_exit;
}
eee->advertised = edata->advertised;
@@ -2596,6 +2622,8 @@ eee_ok:
if (netif_running(dev))
rc = bnxt_hwrm_set_link_setting(bp, false, true);
+eee_exit:
+ mutex_unlock(&bp->link_lock);
return rc;
}
@@ -3645,6 +3673,7 @@ const struct ethtool_ops bnxt_ethtool_ops = {
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
+ .get_pause_stats = bnxt_get_pause_stats,
.get_pauseparam = bnxt_get_pauseparam,
.set_pauseparam = bnxt_set_pauseparam,
.get_drvinfo = bnxt_get_drvinfo,
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 84536292b031..f7f10cfb3476 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -3009,10 +3009,10 @@ static int cnic_service_bnx2(void *data, void *status_blk)
return cnic_service_bnx2_queues(dev);
}
-static void cnic_service_bnx2_msix(unsigned long data)
+static void cnic_service_bnx2_msix(struct tasklet_struct *t)
{
- struct cnic_dev *dev = (struct cnic_dev *) data;
- struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
+ struct cnic_dev *dev = cp->dev;
cp->last_status_idx = cnic_service_bnx2_queues(dev);
@@ -3134,10 +3134,10 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
return last_status;
}
-static void cnic_service_bnx2x_bh(unsigned long data)
+static void cnic_service_bnx2x_bh(struct tasklet_struct *t)
{
- struct cnic_dev *dev = (struct cnic_dev *) data;
- struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
+ struct cnic_dev *dev = cp->dev;
struct bnx2x *bp = netdev_priv(dev->netdev);
u32 status_idx, new_status_idx;
@@ -4458,8 +4458,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
cp->last_status_idx = cp->status_blk.bnx2->status_idx;
- tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
- (unsigned long) dev);
+ tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2_msix);
err = cnic_request_irq(dev);
if (err)
return err;
@@ -4868,8 +4867,7 @@ static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
struct cnic_eth_dev *ethdev = cp->ethdev;
int err = 0;
- tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
- (unsigned long) dev);
+ tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2x_bh);
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
err = cnic_request_irq(dev);
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 6761f404b8aa..9c8f40e8a721 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -647,8 +647,7 @@ static void macb_mac_link_up(struct phylink_config *config,
ctrl |= GEM_BIT(GBE);
}
- /* We do not support MLO_PAUSE_RX yet */
- if (tx_pause)
+ if (rx_pause)
ctrl |= MACB_BIT(PAE);
macb_set_tx_clk(bp->tx_clk, speed, ndev);
@@ -1466,9 +1465,9 @@ static int macb_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static void macb_hresp_error_task(unsigned long data)
+static void macb_hresp_error_task(struct tasklet_struct *t)
{
- struct macb *bp = (struct macb *)data;
+ struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet);
struct net_device *dev = bp->dev;
struct macb_queue *queue;
unsigned int q;
@@ -4560,8 +4559,7 @@ static int macb_probe(struct platform_device *pdev)
goto err_out_unregister_mdio;
}
- tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
- (unsigned long)bp);
+ tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task);
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
index 50b533ff58e6..cd5d5d6e7e5e 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
@@ -26,6 +26,7 @@
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct)
{
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 8e0ed01e7f03..9eac0d43b58d 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -161,13 +161,13 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
static struct handshake handshake[MAX_OCTEON_DEVICES];
static struct completion first_stage;
-static void octeon_droq_bh(unsigned long pdev)
+static void octeon_droq_bh(struct tasklet_struct *t)
{
int q_no;
int reschedule = 0;
- struct octeon_device *oct = (struct octeon_device *)pdev;
- struct octeon_device_priv *oct_priv =
- (struct octeon_device_priv *)oct->priv;
+ struct octeon_device_priv *oct_priv = from_tasklet(oct_priv, t,
+ droq_tasklet);
+ struct octeon_device *oct = oct_priv->dev;
for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
@@ -4193,8 +4193,7 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
/* Initialize the tasklet that handles output queue packet processing.*/
dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
- tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
- (unsigned long)octeon_dev);
+ tasklet_setup(&oct_priv->droq_tasklet, octeon_droq_bh);
/* Setup the interrupt handler and record the INT SUM register address
*/
@@ -4298,6 +4297,7 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
complete(&handshake[octeon_dev->octeon_id].init);
atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
+ oct_priv->dev = octeon_dev;
return 0;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index ac32facaa427..fbde7c58c4db 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -1324,7 +1324,7 @@ u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
{
u64 val64;
unsigned long flags;
- u32 val32, addrhi;
+ u32 addrhi;
spin_lock_irqsave(&oct->pci_win_lock, flags);
@@ -1339,10 +1339,10 @@ u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
writel(addrhi, oct->reg_list.pci_win_rd_addr_hi);
/* Read back to preserve ordering of writes */
- val32 = readl(oct->reg_list.pci_win_rd_addr_hi);
+ readl(oct->reg_list.pci_win_rd_addr_hi);
writel(addr & 0xffffffff, oct->reg_list.pci_win_rd_addr_lo);
- val32 = readl(oct->reg_list.pci_win_rd_addr_lo);
+ readl(oct->reg_list.pci_win_rd_addr_lo);
val64 = readq(oct->reg_list.pci_win_rd_data);
@@ -1355,7 +1355,6 @@ void lio_pci_writeq(struct octeon_device *oct,
u64 val,
u64 addr)
{
- u32 val32;
unsigned long flags;
spin_lock_irqsave(&oct->pci_win_lock, flags);
@@ -1365,7 +1364,7 @@ void lio_pci_writeq(struct octeon_device *oct,
/* The write happens when the LSB is written. So write MSB first. */
writel(val >> 32, oct->reg_list.pci_win_wr_data_hi);
/* Read the MSB to ensure ordering of writes. */
- val32 = readl(oct->reg_list.pci_win_wr_data_hi);
+ readl(oct->reg_list.pci_win_wr_data_hi);
writel(val & 0xffffffff, oct->reg_list.pci_win_wr_data_lo);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index 073d0647b439..5b4cb725f60f 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -39,6 +39,7 @@ struct octeon_device_priv {
/** Tasklet structures for this device. */
struct tasklet_struct droq_tasklet;
unsigned long napi_mask;
+ struct octeon_device *dev;
};
/** This structure is used by NIC driver to store information required
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
index 4c85ae643b7b..7ccab36143c1 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
@@ -22,6 +22,7 @@
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
+#include "octeon_mem_ops.h"
#define MEMOPS_IDX BAR1_INDEX_DYNAMIC_MAP
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 3e17ce0d2314..e9d6a5b61046 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -315,9 +315,9 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
netif_wake_queue(p->netdev);
}
-static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
+static void octeon_mgmt_clean_tx_tasklet(struct tasklet_struct *t)
{
- struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
+ struct octeon_mgmt *p = from_tasklet(p, t, tx_clean_tasklet);
octeon_mgmt_clean_tx_buffers(p);
octeon_mgmt_enable_tx_irq(p);
}
@@ -1489,8 +1489,8 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
skb_queue_head_init(&p->tx_list);
skb_queue_head_init(&p->rx_list);
- tasklet_init(&p->tx_clean_tasklet,
- octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
+ tasklet_setup(&p->tx_clean_tasklet,
+ octeon_mgmt_clean_tx_tasklet);
netdev->priv_flags |= IFF_UNICAST_FLT;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 063e560d9c1b..0a94c396173b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -985,9 +985,9 @@ static int nicvf_poll(struct napi_struct *napi, int budget)
*
* As of now only CQ errors are handled
*/
-static void nicvf_handle_qs_err(unsigned long data)
+static void nicvf_handle_qs_err(struct tasklet_struct *t)
{
- struct nicvf *nic = (struct nicvf *)data;
+ struct nicvf *nic = from_tasklet(nic, t, qs_err_task);
struct queue_set *qs = nic->qs;
int qidx;
u64 status;
@@ -1493,12 +1493,10 @@ int nicvf_open(struct net_device *netdev)
}
/* Init tasklet for handling Qset err interrupt */
- tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err,
- (unsigned long)nic);
+ tasklet_setup(&nic->qs_err_task, nicvf_handle_qs_err);
/* Init RBDR tasklet which will refill RBDR */
- tasklet_init(&nic->rbdr_task, nicvf_rbdr_task,
- (unsigned long)nic);
+ tasklet_setup(&nic->rbdr_task, nicvf_rbdr_task);
INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
/* Configure CPI alorithm */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index a45223f0cca5..7a141ce32e86 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -460,9 +460,9 @@ void nicvf_rbdr_work(struct work_struct *work)
}
/* In Softirq context, alloc rcv buffers in atomic mode */
-void nicvf_rbdr_task(unsigned long data)
+void nicvf_rbdr_task(struct tasklet_struct *t)
{
- struct nicvf *nic = (struct nicvf *)data;
+ struct nicvf *nic = from_tasklet(nic, t, rbdr_task);
nicvf_refill_rbdr(nic, GFP_ATOMIC);
if (nic->rb_alloc_fail) {
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 2460451fc48f..8453defc296c 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -348,7 +348,7 @@ void nicvf_xdp_sq_doorbell(struct nicvf *nic, struct snd_queue *sq, int sq_num);
struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
struct cqe_rx_t *cqe_rx, bool xdp);
-void nicvf_rbdr_task(unsigned long data);
+void nicvf_rbdr_task(struct tasklet_struct *t);
void nicvf_rbdr_work(struct work_struct *work);
void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 21016de20b2d..2d9c2b5a690a 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -239,8 +239,10 @@ struct sched {
unsigned int num; /* num skbs in per port queues */
struct sched_port p[MAX_NPORTS];
struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
+ struct sge *sge;
};
-static void restart_sched(unsigned long);
+
+static void restart_sched(struct tasklet_struct *t);
/*
@@ -378,7 +380,8 @@ static int tx_sched_init(struct sge *sge)
return -ENOMEM;
pr_debug("tx_sched_init\n");
- tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
+ tasklet_setup(&s->sched_tsk, restart_sched);
+ s->sge = sge;
sge->tx_sched = s;
for (i = 0; i < MAX_NPORTS; i++) {
@@ -1305,9 +1308,10 @@ static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
* Called from tasklet. Checks the scheduler for any
* pending skbs that can be sent.
*/
-static void restart_sched(unsigned long arg)
+static void restart_sched(struct tasklet_struct *t)
{
- struct sge *sge = (struct sge *) arg;
+ struct sched *s = from_tasklet(s, t, sched_tsk);
+ struct sge *sge = s->sge;
struct adapter *adapter = sge->adapter;
struct cmdQ *q = &sge->cmdQ[0];
struct sk_buff *skb;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/ael1002.c b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c
index dadf11e3dddb..9d591f0ddfc5 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/ael1002.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c
@@ -815,17 +815,12 @@ static const struct cphy_ops ael2020_ops = {
int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
const struct mdio_ops *mdio_ops)
{
- int err;
-
cphy_init(phy, adapter, phy_addr, &ael2020_ops, mdio_ops,
SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE |
SUPPORTED_IRQ, "10GBASE-R");
msleep(125);
- err = set_phy_regs(phy, ael2020_reset_regs);
- if (err)
- return err;
- return 0;
+ return set_phy_regs(phy, ael2020_reset_regs);
}
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index ee6188dea705..a978a00acc1e 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -1516,14 +1516,14 @@ static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
/**
* restart_ctrlq - restart a suspended control queue
- * @qs: the queue set cotaining the control queue
+ * @t: pointer to the tasklet associated with this handler
*
* Resumes transmission on a suspended Tx control queue.
*/
-static void restart_ctrlq(unsigned long data)
+static void restart_ctrlq(struct tasklet_struct *t)
{
struct sk_buff *skb;
- struct sge_qset *qs = (struct sge_qset *)data;
+ struct sge_qset *qs = from_tasklet(qs, t, txq[TXQ_CTRL].qresume_tsk);
struct sge_txq *q = &qs->txq[TXQ_CTRL];
spin_lock(&q->lock);
@@ -1733,14 +1733,14 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
/**
* restart_offloadq - restart a suspended offload queue
- * @qs: the queue set cotaining the offload queue
+ * @t: pointer to the tasklet associated with this handler
*
* Resumes transmission on a suspended Tx offload queue.
*/
-static void restart_offloadq(unsigned long data)
+static void restart_offloadq(struct tasklet_struct *t)
{
struct sk_buff *skb;
- struct sge_qset *qs = (struct sge_qset *)data;
+ struct sge_qset *qs = from_tasklet(qs, t, txq[TXQ_OFLD].qresume_tsk);
struct sge_txq *q = &qs->txq[TXQ_OFLD];
const struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter;
@@ -3081,10 +3081,8 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
skb_queue_head_init(&q->txq[i].sendq);
}
- tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
- (unsigned long)q);
- tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
- (unsigned long)q);
+ tasklet_setup(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq);
+ tasklet_setup(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq);
q->fl[0].gen = q->fl[1].gen = 1;
q->fl[0].size = p->fl_size;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index f6c1ec140e09..6ec5f2f26f05 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -1908,13 +1908,16 @@ out:
static int configure_filter_tcb(struct adapter *adap, unsigned int tid,
struct filter_entry *f)
{
- if (f->fs.hitcnts)
+ if (f->fs.hitcnts) {
set_tcb_field(adap, f, tid, TCB_TIMESTAMP_W,
- TCB_TIMESTAMP_V(TCB_TIMESTAMP_M) |
+ TCB_TIMESTAMP_V(TCB_TIMESTAMP_M),
+ TCB_TIMESTAMP_V(0ULL),
+ 1);
+ set_tcb_field(adap, f, tid, TCB_RTT_TS_RECENT_AGE_W,
TCB_RTT_TS_RECENT_AGE_V(TCB_RTT_TS_RECENT_AGE_M),
- TCB_TIMESTAMP_V(0ULL) |
TCB_RTT_TS_RECENT_AGE_V(0ULL),
1);
+ }
if (f->fs.newdmac)
set_tcb_tflag(adap, f, tid, TF_CCTRL_ECE_S, 1,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
index b1a073eea60b..a020e8490681 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c
@@ -229,7 +229,7 @@ void cxgb4_free_mps_ref_entries(struct adapter *adap)
{
struct mps_entries_ref *mps_entry, *tmp;
- if (!list_empty(&adap->mps_ref))
+ if (list_empty(&adap->mps_ref))
return;
spin_lock(&adap->mps_ref_lock);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 437c054ef749..2b942b4825d4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2660,15 +2660,15 @@ static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
/**
* restart_ctrlq - restart a suspended control queue
- * @data: the control queue to restart
+ * @t: pointer to the tasklet associated with this handler
*
* Resumes transmission on a suspended Tx control queue.
*/
-static void restart_ctrlq(unsigned long data)
+static void restart_ctrlq(struct tasklet_struct *t)
{
struct sk_buff *skb;
unsigned int written = 0;
- struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
+ struct sge_ctrl_txq *q = from_tasklet(q, t, qresume_tsk);
spin_lock(&q->sendq.lock);
reclaim_completed_tx_imm(&q->q);
@@ -2961,13 +2961,13 @@ static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
/**
* restart_ofldq - restart a suspended offload queue
- * @data: the offload queue to restart
+ * @t: pointer to the tasklet associated with this handler
*
* Resumes transmission on a suspended Tx offload queue.
*/
-static void restart_ofldq(unsigned long data)
+static void restart_ofldq(struct tasklet_struct *t)
{
- struct sge_uld_txq *q = (struct sge_uld_txq *)data;
+ struct sge_uld_txq *q = from_tasklet(q, t, qresume_tsk);
spin_lock(&q->sendq.lock);
q->full = 0; /* the queue actually is completely empty now */
@@ -4580,7 +4580,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
- tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
+ tasklet_setup(&txq->qresume_tsk, restart_ctrlq);
txq->full = 0;
return 0;
}
@@ -4670,7 +4670,7 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
txq->q.q_type = CXGB4_TXQ_ULD;
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
- tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
+ tasklet_setup(&txq->qresume_tsk, restart_ofldq);
txq->full = 0;
txq->mapping_err = 0;
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index e2fe78e2e242..2820a0bb971b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2017,33 +2017,14 @@ static void mboxlog_stop(struct seq_file *seq, void *v)
{
}
-static const struct seq_operations mboxlog_seq_ops = {
+static const struct seq_operations mboxlog_sops = {
.start = mboxlog_start,
.next = mboxlog_next,
.stop = mboxlog_stop,
.show = mboxlog_show
};
-static int mboxlog_open(struct inode *inode, struct file *file)
-{
- int res = seq_open(file, &mboxlog_seq_ops);
-
- if (!res) {
- struct seq_file *seq = file->private_data;
-
- seq->private = inode->i_private;
- }
- return res;
-}
-
-static const struct file_operations mboxlog_fops = {
- .owner = THIS_MODULE,
- .open = mboxlog_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
+DEFINE_SEQ_ATTRIBUTE(mboxlog);
/*
* Show SGE Queue Set information. We display QPL Queues Sets per line.
*/
@@ -2171,31 +2152,14 @@ static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
}
-static const struct seq_operations sge_qinfo_seq_ops = {
+static const struct seq_operations sge_qinfo_sops = {
.start = sge_queue_start,
.next = sge_queue_next,
.stop = sge_queue_stop,
.show = sge_qinfo_show
};
-static int sge_qinfo_open(struct inode *inode, struct file *file)
-{
- int res = seq_open(file, &sge_qinfo_seq_ops);
-
- if (!res) {
- struct seq_file *seq = file->private_data;
- seq->private = inode->i_private;
- }
- return res;
-}
-
-static const struct file_operations sge_qinfo_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = sge_qinfo_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(sge_qinfo);
/*
* Show SGE Queue Set statistics. We display QPL Queues Sets per line.
@@ -2317,31 +2281,14 @@ static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
}
-static const struct seq_operations sge_qstats_seq_ops = {
+static const struct seq_operations sge_qstats_sops = {
.start = sge_qstats_start,
.next = sge_qstats_next,
.stop = sge_qstats_stop,
.show = sge_qstats_show
};
-static int sge_qstats_open(struct inode *inode, struct file *file)
-{
- int res = seq_open(file, &sge_qstats_seq_ops);
-
- if (res == 0) {
- struct seq_file *seq = file->private_data;
- seq->private = inode->i_private;
- }
- return res;
-}
-
-static const struct file_operations sge_qstats_proc_fops = {
- .owner = THIS_MODULE,
- .open = sge_qstats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(sge_qstats);
/*
* Show PCI-E SR-IOV Virtual Function Resource Limits.
@@ -2415,31 +2362,14 @@ static void interfaces_stop(struct seq_file *seq, void *v)
{
}
-static const struct seq_operations interfaces_seq_ops = {
+static const struct seq_operations interfaces_sops = {
.start = interfaces_start,
.next = interfaces_next,
.stop = interfaces_stop,
.show = interfaces_show
};
-static int interfaces_open(struct inode *inode, struct file *file)
-{
- int res = seq_open(file, &interfaces_seq_ops);
-
- if (res == 0) {
- struct seq_file *seq = file->private_data;
- seq->private = inode->i_private;
- }
- return res;
-}
-
-static const struct file_operations interfaces_proc_fops = {
- .owner = THIS_MODULE,
- .open = interfaces_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(interfaces);
/*
* /sys/kernel/debugfs/cxgb4vf/ files list.
@@ -2452,10 +2382,10 @@ struct cxgb4vf_debugfs_entry {
static struct cxgb4vf_debugfs_entry debugfs_files[] = {
{ "mboxlog", 0444, &mboxlog_fops },
- { "sge_qinfo", 0444, &sge_qinfo_debugfs_fops },
- { "sge_qstats", 0444, &sge_qstats_proc_fops },
+ { "sge_qinfo", 0444, &sge_qinfo_fops },
+ { "sge_qstats", 0444, &sge_qstats_fops },
{ "resources", 0444, &resources_fops },
- { "interfaces", 0444, &interfaces_proc_fops },
+ { "interfaces", 0444, &interfaces_fops },
};
/*
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/Kconfig b/drivers/net/ethernet/chelsio/inline_crypto/Kconfig
index 1923e713b53a..7dfa57348d54 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/Kconfig
+++ b/drivers/net/ethernet/chelsio/inline_crypto/Kconfig
@@ -15,6 +15,7 @@ if CHELSIO_INLINE_CRYPTO
config CRYPTO_DEV_CHELSIO_TLS
tristate "Chelsio Crypto Inline TLS Driver"
+ depends on CHELSIO_T4
depends on TLS_TOE
help
Support Chelsio Inline TLS with Chelsio crypto accelerator.
@@ -25,6 +26,7 @@ config CRYPTO_DEV_CHELSIO_TLS
config CHELSIO_IPSEC_INLINE
tristate "Chelsio IPSec XFRM Tx crypto offload"
+ depends on CHELSIO_T4
depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
help
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 9dcf47f576c6..07e9dee03c98 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -509,7 +509,6 @@ static int gmac_init(struct net_device *netdev)
.rel_threshold = 0,
} };
union gmac_config0 tmp;
- u32 val;
config0.bits.max_len = gmac_pick_rx_max_len(netdev->mtu);
tmp.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
@@ -519,7 +518,7 @@ static int gmac_init(struct net_device *netdev)
writel(config2.bits32, port->gmac_base + GMAC_CONFIG2);
writel(config3.bits32, port->gmac_base + GMAC_CONFIG3);
- val = readl(port->dma_base + GMAC_AHB_WEIGHT_REG);
+ readl(port->dma_base + GMAC_AHB_WEIGHT_REG);
writel(ahb_weight.bits32, port->dma_base + GMAC_AHB_WEIGHT_REG);
writel(hw_weigh.bits32,
@@ -2107,9 +2106,8 @@ static void gmac_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *rp)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
- union gmac_config0 config0;
- config0.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
+ readl(port->gmac_base + GMAC_CONFIG0);
rp->rx_max_pending = 1 << 15;
rp->rx_mini_max_pending = 0;
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index cb116b530f5e..d9f6c19940ef 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -85,7 +85,7 @@ MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copi
#define DSL CONFIG_DE2104X_DSL
#endif
-#define DE_RX_RING_SIZE 64
+#define DE_RX_RING_SIZE 128
#define DE_TX_RING_SIZE 64
#define DE_RING_BYTES \
((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
@@ -443,21 +443,23 @@ static void de_rx (struct de_private *de)
}
if (!copying_skb) {
- pci_unmap_single(de->pdev, mapping,
- buflen, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&de->pdev->dev, mapping, buflen,
+ DMA_FROM_DEVICE);
skb_put(skb, len);
mapping =
de->rx_skb[rx_tail].mapping =
- pci_map_single(de->pdev, copy_skb->data,
- buflen, PCI_DMA_FROMDEVICE);
+ dma_map_single(&de->pdev->dev, copy_skb->data,
+ buflen, DMA_FROM_DEVICE);
de->rx_skb[rx_tail].skb = copy_skb;
} else {
- pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&de->pdev->dev, mapping, len,
+ DMA_FROM_DEVICE);
skb_reserve(copy_skb, RX_OFFSET);
skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
len);
- pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&de->pdev->dev, mapping,
+ len, DMA_FROM_DEVICE);
/* We'll reuse the original ring buffer. */
skb = copy_skb;
@@ -554,13 +556,15 @@ static void de_tx (struct de_private *de)
goto next;
if (unlikely(skb == DE_SETUP_SKB)) {
- pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
- sizeof(de->setup_frame), PCI_DMA_TODEVICE);
+ dma_unmap_single(&de->pdev->dev,
+ de->tx_skb[tx_tail].mapping,
+ sizeof(de->setup_frame),
+ DMA_TO_DEVICE);
goto next;
}
- pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&de->pdev->dev, de->tx_skb[tx_tail].mapping,
+ skb->len, DMA_TO_DEVICE);
if (status & LastFrag) {
if (status & TxError) {
@@ -620,7 +624,8 @@ static netdev_tx_t de_start_xmit (struct sk_buff *skb,
txd = &de->tx_ring[entry];
len = skb->len;
- mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&de->pdev->dev, skb->data, len,
+ DMA_TO_DEVICE);
if (entry == (DE_TX_RING_SIZE - 1))
flags |= RingEnd;
if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
@@ -763,8 +768,8 @@ static void __de_set_rx_mode (struct net_device *dev)
de->tx_skb[entry].skb = DE_SETUP_SKB;
de->tx_skb[entry].mapping = mapping =
- pci_map_single (de->pdev, de->setup_frame,
- sizeof (de->setup_frame), PCI_DMA_TODEVICE);
+ dma_map_single(&de->pdev->dev, de->setup_frame,
+ sizeof(de->setup_frame), DMA_TO_DEVICE);
/* Put the setup frame on the Tx list. */
txd = &de->tx_ring[entry];
@@ -1279,8 +1284,10 @@ static int de_refill_rx (struct de_private *de)
if (!skb)
goto err_out;
- de->rx_skb[i].mapping = pci_map_single(de->pdev,
- skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ de->rx_skb[i].mapping = dma_map_single(&de->pdev->dev,
+ skb->data,
+ de->rx_buf_sz,
+ DMA_FROM_DEVICE);
de->rx_skb[i].skb = skb;
de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
@@ -1313,7 +1320,8 @@ static int de_init_rings (struct de_private *de)
static int de_alloc_rings (struct de_private *de)
{
- de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
+ de->rx_ring = dma_alloc_coherent(&de->pdev->dev, DE_RING_BYTES,
+ &de->ring_dma, GFP_KERNEL);
if (!de->rx_ring)
return -ENOMEM;
de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
@@ -1333,8 +1341,9 @@ static void de_clean_rings (struct de_private *de)
for (i = 0; i < DE_RX_RING_SIZE; i++) {
if (de->rx_skb[i].skb) {
- pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
- de->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&de->pdev->dev,
+ de->rx_skb[i].mapping, de->rx_buf_sz,
+ DMA_FROM_DEVICE);
dev_kfree_skb(de->rx_skb[i].skb);
}
}
@@ -1344,15 +1353,15 @@ static void de_clean_rings (struct de_private *de)
if ((skb) && (skb != DE_DUMMY_SKB)) {
if (skb != DE_SETUP_SKB) {
de->dev->stats.tx_dropped++;
- pci_unmap_single(de->pdev,
- de->tx_skb[i].mapping,
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&de->pdev->dev,
+ de->tx_skb[i].mapping,
+ skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
} else {
- pci_unmap_single(de->pdev,
- de->tx_skb[i].mapping,
- sizeof(de->setup_frame),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&de->pdev->dev,
+ de->tx_skb[i].mapping,
+ sizeof(de->setup_frame),
+ DMA_TO_DEVICE);
}
}
}
@@ -1364,7 +1373,8 @@ static void de_clean_rings (struct de_private *de)
static void de_free_rings (struct de_private *de)
{
de_clean_rings(de);
- pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
+ dma_free_coherent(&de->pdev->dev, DE_RING_BYTES, de->rx_ring,
+ de->ring_dma);
de->rx_ring = NULL;
de->tx_ring = NULL;
}
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index c3b4abff48b5..87a27fe2992d 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -380,7 +380,7 @@ static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
pr_warn("32-bit PCI DMA not available\n");
err = -ENODEV;
goto err_out_free;
@@ -422,15 +422,17 @@ static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
db = netdev_priv(dev);
/* Allocate Tx/Rx descriptor memory */
- db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
- DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
+ db->desc_pool_ptr = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+ &db->desc_pool_dma_ptr, GFP_KERNEL);
if (!db->desc_pool_ptr) {
err = -ENOMEM;
goto err_out_res;
}
- db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
- TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
+ db->buf_pool_ptr = dma_alloc_coherent(&pdev->dev,
+ TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ &db->buf_pool_dma_ptr, GFP_KERNEL);
if (!db->buf_pool_ptr) {
err = -ENOMEM;
goto err_out_free_desc;
@@ -492,11 +494,12 @@ static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err_out_unmap:
pci_iounmap(pdev, db->ioaddr);
err_out_free_buf:
- pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
- db->buf_pool_ptr, db->buf_pool_dma_ptr);
+ dma_free_coherent(&pdev->dev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ db->buf_pool_ptr, db->buf_pool_dma_ptr);
err_out_free_desc:
- pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
- db->desc_pool_ptr, db->desc_pool_dma_ptr);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+ db->desc_pool_ptr, db->desc_pool_dma_ptr);
err_out_res:
pci_release_regions(pdev);
err_out_disable:
@@ -519,11 +522,12 @@ static void dmfe_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
pci_iounmap(db->pdev, db->ioaddr);
- pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
- DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
- db->desc_pool_dma_ptr);
- pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
- db->buf_pool_ptr, db->buf_pool_dma_ptr);
+ dma_free_coherent(&db->pdev->dev,
+ sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+ db->desc_pool_ptr, db->desc_pool_dma_ptr);
+ dma_free_coherent(&db->pdev->dev,
+ TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ db->buf_pool_ptr, db->buf_pool_dma_ptr);
pci_release_regions(pdev);
free_netdev(dev); /* free board information */
}
@@ -955,8 +959,8 @@ static void dmfe_rx_packet(struct net_device *dev, struct dmfe_board_info *db)
db->rx_avail_cnt--;
db->interval_rx_cnt++;
- pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
- RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&db->pdev->dev, le32_to_cpu(rxptr->rdes2),
+ RX_ALLOC_SIZE, DMA_FROM_DEVICE);
if ( (rdes0 & 0x300) != 0x300) {
/* A packet without First/Last flag */
@@ -1329,8 +1333,8 @@ static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
rxptr->rx_skb_ptr = skb;
- rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
- skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+ rxptr->rdes2 = cpu_to_le32(dma_map_single(&db->pdev->dev, skb->data,
+ RX_ALLOC_SIZE, DMA_FROM_DEVICE));
wmb();
rxptr->rdes0 = cpu_to_le32(0x80000000);
db->rx_avail_cnt++;
@@ -1544,8 +1548,8 @@ static void allocate_rx_buffer(struct net_device *dev)
if ( ( skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE) ) == NULL )
break;
rxptr->rx_skb_ptr = skb; /* FIXME (?) */
- rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
- RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+ rxptr->rdes2 = cpu_to_le32(dma_map_single(&db->pdev->dev, skb->data,
+ RX_ALLOC_SIZE, DMA_FROM_DEVICE));
wmb();
rxptr->rdes0 = cpu_to_le32(0x80000000);
rxptr = rxptr->next_rx_desc;
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
index c1ca0765d56d..54560f9a1651 100644
--- a/drivers/net/ethernet/dec/tulip/interrupt.c
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -74,8 +74,8 @@ int tulip_refill_rx(struct net_device *dev)
if (skb == NULL)
break;
- mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
- PCI_DMA_FROMDEVICE);
+ mapping = dma_map_single(&tp->pdev->dev, skb->data,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
if (dma_mapping_error(&tp->pdev->dev, mapping)) {
dev_kfree_skb(skb);
tp->rx_buffers[entry].skb = NULL;
@@ -210,9 +210,10 @@ int tulip_poll(struct napi_struct *napi, int budget)
if (pkt_len < tulip_rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
- pci_dma_sync_single_for_cpu(tp->pdev,
- tp->rx_buffers[entry].mapping,
- pkt_len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&tp->pdev->dev,
+ tp->rx_buffers[entry].mapping,
+ pkt_len,
+ DMA_FROM_DEVICE);
#if ! defined(__alpha__)
skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
pkt_len);
@@ -222,9 +223,10 @@ int tulip_poll(struct napi_struct *napi, int budget)
tp->rx_buffers[entry].skb->data,
pkt_len);
#endif
- pci_dma_sync_single_for_device(tp->pdev,
- tp->rx_buffers[entry].mapping,
- pkt_len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&tp->pdev->dev,
+ tp->rx_buffers[entry].mapping,
+ pkt_len,
+ DMA_FROM_DEVICE);
} else { /* Pass up the skb already on the Rx ring. */
char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
pkt_len);
@@ -240,8 +242,10 @@ int tulip_poll(struct napi_struct *napi, int budget)
}
#endif
- pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&tp->pdev->dev,
+ tp->rx_buffers[entry].mapping,
+ PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
tp->rx_buffers[entry].skb = NULL;
tp->rx_buffers[entry].mapping = 0;
@@ -436,9 +440,10 @@ static int tulip_rx(struct net_device *dev)
if (pkt_len < tulip_rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
- pci_dma_sync_single_for_cpu(tp->pdev,
- tp->rx_buffers[entry].mapping,
- pkt_len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&tp->pdev->dev,
+ tp->rx_buffers[entry].mapping,
+ pkt_len,
+ DMA_FROM_DEVICE);
#if ! defined(__alpha__)
skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
pkt_len);
@@ -448,9 +453,10 @@ static int tulip_rx(struct net_device *dev)
tp->rx_buffers[entry].skb->data,
pkt_len);
#endif
- pci_dma_sync_single_for_device(tp->pdev,
- tp->rx_buffers[entry].mapping,
- pkt_len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&tp->pdev->dev,
+ tp->rx_buffers[entry].mapping,
+ pkt_len,
+ DMA_FROM_DEVICE);
} else { /* Pass up the skb already on the Rx ring. */
char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
pkt_len);
@@ -466,8 +472,9 @@ static int tulip_rx(struct net_device *dev)
}
#endif
- pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&tp->pdev->dev,
+ tp->rx_buffers[entry].mapping,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
tp->rx_buffers[entry].skb = NULL;
tp->rx_buffers[entry].mapping = 0;
@@ -597,10 +604,10 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
if (tp->tx_buffers[entry].skb == NULL) {
/* test because dummy frames not mapped */
if (tp->tx_buffers[entry].mapping)
- pci_unmap_single(tp->pdev,
- tp->tx_buffers[entry].mapping,
- sizeof(tp->setup_frame),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&tp->pdev->dev,
+ tp->tx_buffers[entry].mapping,
+ sizeof(tp->setup_frame),
+ DMA_TO_DEVICE);
continue;
}
@@ -629,9 +636,10 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
dev->stats.tx_packets++;
}
- pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
+ dma_unmap_single(&tp->pdev->dev,
+ tp->tx_buffers[entry].mapping,
tp->tx_buffers[entry].skb->len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
/* Free the original skb. */
dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 3a8659c5da06..e7b0d7de40fd 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -350,9 +350,9 @@ static void tulip_up(struct net_device *dev)
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
- mapping = pci_map_single(tp->pdev, tp->setup_frame,
+ mapping = dma_map_single(&tp->pdev->dev, tp->setup_frame,
sizeof(tp->setup_frame),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tp->tx_buffers[tp->cur_tx].skb = NULL;
tp->tx_buffers[tp->cur_tx].mapping = mapping;
@@ -630,8 +630,8 @@ static void tulip_init_ring(struct net_device *dev)
tp->rx_buffers[i].skb = skb;
if (skb == NULL)
break;
- mapping = pci_map_single(tp->pdev, skb->data,
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ mapping = dma_map_single(&tp->pdev->dev, skb->data,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
tp->rx_buffers[i].mapping = mapping;
tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
@@ -664,8 +664,8 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
entry = tp->cur_tx % TX_RING_SIZE;
tp->tx_buffers[entry].skb = skb;
- mapping = pci_map_single(tp->pdev, skb->data,
- skb->len, PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&tp->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
tp->tx_buffers[entry].mapping = mapping;
tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
@@ -716,16 +716,17 @@ static void tulip_clean_tx_ring(struct tulip_private *tp)
if (tp->tx_buffers[entry].skb == NULL) {
/* test because dummy frames not mapped */
if (tp->tx_buffers[entry].mapping)
- pci_unmap_single(tp->pdev,
- tp->tx_buffers[entry].mapping,
- sizeof(tp->setup_frame),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&tp->pdev->dev,
+ tp->tx_buffers[entry].mapping,
+ sizeof(tp->setup_frame),
+ DMA_TO_DEVICE);
continue;
}
- pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
- tp->tx_buffers[entry].skb->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&tp->pdev->dev,
+ tp->tx_buffers[entry].mapping,
+ tp->tx_buffers[entry].skb->len,
+ DMA_TO_DEVICE);
/* Free the original skb. */
dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
@@ -795,8 +796,8 @@ static void tulip_free_ring (struct net_device *dev)
/* An invalid address. */
tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
if (skb) {
- pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&tp->pdev->dev, mapping, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
dev_kfree_skb (skb);
}
}
@@ -805,8 +806,9 @@ static void tulip_free_ring (struct net_device *dev)
struct sk_buff *skb = tp->tx_buffers[i].skb;
if (skb != NULL) {
- pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&tp->pdev->dev,
+ tp->tx_buffers[i].mapping, skb->len,
+ DMA_TO_DEVICE);
dev_kfree_skb (skb);
}
tp->tx_buffers[i].skb = NULL;
@@ -1149,9 +1151,10 @@ static void set_rx_mode(struct net_device *dev)
tp->tx_buffers[entry].skb = NULL;
tp->tx_buffers[entry].mapping =
- pci_map_single(tp->pdev, tp->setup_frame,
+ dma_map_single(&tp->pdev->dev,
+ tp->setup_frame,
sizeof(tp->setup_frame),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
/* Put the setup frame on the Tx list. */
if (entry == TX_RING_SIZE-1)
tx_flags |= DESC_RING_WRAP; /* Wrap ring. */
@@ -1422,10 +1425,10 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp = netdev_priv(dev);
tp->dev = dev;
- tp->rx_ring = pci_alloc_consistent(pdev,
- sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
- sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
- &tp->rx_ring_dma);
+ tp->rx_ring = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
+ &tp->rx_ring_dma, GFP_KERNEL);
if (!tp->rx_ring)
goto err_out_mtable;
tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
@@ -1757,10 +1760,10 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_out_free_ring:
- pci_free_consistent (pdev,
- sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
- sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
- tp->rx_ring, tp->rx_ring_dma);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
+ tp->rx_ring, tp->rx_ring_dma);
err_out_mtable:
kfree (tp->mtable);
@@ -1878,10 +1881,10 @@ static void tulip_remove_one(struct pci_dev *pdev)
tp = netdev_priv(dev);
unregister_netdev(dev);
- pci_free_consistent (pdev,
- sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
- sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
- tp->rx_ring, tp->rx_ring_dma);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
+ tp->rx_ring, tp->rx_ring_dma);
kfree (tp->mtable);
pci_iounmap(pdev, tp->base_addr);
free_netdev (dev);
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index f942399f0f32..13e73ed15ef0 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -282,7 +282,7 @@ static int uli526x_init_one(struct pci_dev *pdev,
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
pr_warn("32-bit PCI DMA not available\n");
err = -ENODEV;
goto err_out_free;
@@ -317,11 +317,15 @@ static int uli526x_init_one(struct pci_dev *pdev,
/* Allocate Tx/Rx descriptor memory */
err = -ENOMEM;
- db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
+ db->desc_pool_ptr = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+ &db->desc_pool_dma_ptr, GFP_KERNEL);
if (!db->desc_pool_ptr)
goto err_out_release;
- db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
+ db->buf_pool_ptr = dma_alloc_coherent(&pdev->dev,
+ TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ &db->buf_pool_dma_ptr, GFP_KERNEL);
if (!db->buf_pool_ptr)
goto err_out_free_tx_desc;
@@ -401,11 +405,12 @@ static int uli526x_init_one(struct pci_dev *pdev,
err_out_unmap:
pci_iounmap(pdev, db->ioaddr);
err_out_free_tx_buf:
- pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
- db->buf_pool_ptr, db->buf_pool_dma_ptr);
+ dma_free_coherent(&pdev->dev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ db->buf_pool_ptr, db->buf_pool_dma_ptr);
err_out_free_tx_desc:
- pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
- db->desc_pool_ptr, db->desc_pool_dma_ptr);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+ db->desc_pool_ptr, db->desc_pool_dma_ptr);
err_out_release:
pci_release_regions(pdev);
err_out_disable:
@@ -424,11 +429,11 @@ static void uli526x_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
pci_iounmap(pdev, db->ioaddr);
- pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
- DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
- db->desc_pool_dma_ptr);
- pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
- db->buf_pool_ptr, db->buf_pool_dma_ptr);
+ dma_free_coherent(&db->pdev->dev,
+ sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+ db->desc_pool_ptr, db->desc_pool_dma_ptr);
+ dma_free_coherent(&db->pdev->dev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ db->buf_pool_ptr, db->buf_pool_dma_ptr);
pci_release_regions(pdev);
pci_disable_device(pdev);
free_netdev(dev);
@@ -810,7 +815,8 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
db->rx_avail_cnt--;
db->interval_rx_cnt++;
- pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&db->pdev->dev, le32_to_cpu(rxptr->rdes2),
+ RX_ALLOC_SIZE, DMA_FROM_DEVICE);
if ( (rdes0 & 0x300) != 0x300) {
/* A packet without First/Last flag */
/* reuse this SKB */
@@ -1234,10 +1240,8 @@ static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * sk
if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
rxptr->rx_skb_ptr = skb;
- rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev,
- skb_tail_pointer(skb),
- RX_ALLOC_SIZE,
- PCI_DMA_FROMDEVICE));
+ rxptr->rdes2 = cpu_to_le32(dma_map_single(&db->pdev->dev, skb_tail_pointer(skb),
+ RX_ALLOC_SIZE, DMA_FROM_DEVICE));
wmb();
rxptr->rdes0 = cpu_to_le32(0x80000000);
db->rx_avail_cnt++;
@@ -1409,10 +1413,8 @@ static void allocate_rx_buffer(struct net_device *dev)
if (skb == NULL)
break;
rxptr->rx_skb_ptr = skb; /* FIXME (?) */
- rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev,
- skb_tail_pointer(skb),
- RX_ALLOC_SIZE,
- PCI_DMA_FROMDEVICE));
+ rxptr->rdes2 = cpu_to_le32(dma_map_single(&db->pdev->dev, skb_tail_pointer(skb),
+ RX_ALLOC_SIZE, DMA_FROM_DEVICE));
wmb();
rxptr->rdes0 = cpu_to_le32(0x80000000);
rxptr = rxptr->next_rx_desc;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 5a43be327f58..89cbdc1f4857 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -364,7 +364,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
irq = pdev->irq;
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
pr_warn("Device %s disabled due to DMA limitations\n",
pci_name(pdev));
return -EIO;
@@ -630,9 +630,10 @@ static int netdev_open(struct net_device *dev)
goto out_err;
if (debug > 1)
- netdev_dbg(dev, "w89c840_open() irq %d\n", irq);
+ netdev_dbg(dev, "%s() irq %d\n", __func__, irq);
- if((i=alloc_ringdesc(dev)))
+ i = alloc_ringdesc(dev);
+ if (i)
goto out_err;
spin_lock_irq(&np->lock);
@@ -642,7 +643,7 @@ static int netdev_open(struct net_device *dev)
netif_start_queue(dev);
if (debug > 2)
- netdev_dbg(dev, "Done netdev_open()\n");
+ netdev_dbg(dev, "Done %s()\n", __func__);
/* Set the timer to check for link beat. */
timer_setup(&np->timer, netdev_timer, 0);
@@ -802,8 +803,9 @@ static void init_rxtx_rings(struct net_device *dev)
np->rx_skbuff[i] = skb;
if (skb == NULL)
break;
- np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
- np->rx_buf_sz,PCI_DMA_FROMDEVICE);
+ np->rx_addr[i] = dma_map_single(&np->pci_dev->dev, skb->data,
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
np->rx_ring[i].buffer1 = np->rx_addr[i];
np->rx_ring[i].status = DescOwned;
@@ -833,20 +835,17 @@ static void free_rxtx_rings(struct netdev_private* np)
for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].status = 0;
if (np->rx_skbuff[i]) {
- pci_unmap_single(np->pci_dev,
- np->rx_addr[i],
- np->rx_skbuff[i]->len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->rx_addr[i],
+ np->rx_skbuff[i]->len,
+ DMA_FROM_DEVICE);
dev_kfree_skb(np->rx_skbuff[i]);
}
np->rx_skbuff[i] = NULL;
}
for (i = 0; i < TX_RING_SIZE; i++) {
if (np->tx_skbuff[i]) {
- pci_unmap_single(np->pci_dev,
- np->tx_addr[i],
- np->tx_skbuff[i]->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->tx_addr[i],
+ np->tx_skbuff[i]->len, DMA_TO_DEVICE);
dev_kfree_skb(np->tx_skbuff[i]);
}
np->tx_skbuff[i] = NULL;
@@ -964,10 +963,10 @@ static int alloc_ringdesc(struct net_device *dev)
np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
- np->rx_ring = pci_alloc_consistent(np->pci_dev,
- sizeof(struct w840_rx_desc)*RX_RING_SIZE +
- sizeof(struct w840_tx_desc)*TX_RING_SIZE,
- &np->ring_dma_addr);
+ np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev,
+ sizeof(struct w840_rx_desc) * RX_RING_SIZE +
+ sizeof(struct w840_tx_desc) * TX_RING_SIZE,
+ &np->ring_dma_addr, GFP_KERNEL);
if(!np->rx_ring)
return -ENOMEM;
init_rxtx_rings(dev);
@@ -976,10 +975,10 @@ static int alloc_ringdesc(struct net_device *dev)
static void free_ringdesc(struct netdev_private *np)
{
- pci_free_consistent(np->pci_dev,
- sizeof(struct w840_rx_desc)*RX_RING_SIZE +
- sizeof(struct w840_tx_desc)*TX_RING_SIZE,
- np->rx_ring, np->ring_dma_addr);
+ dma_free_coherent(&np->pci_dev->dev,
+ sizeof(struct w840_rx_desc) * RX_RING_SIZE +
+ sizeof(struct w840_tx_desc) * TX_RING_SIZE,
+ np->rx_ring, np->ring_dma_addr);
}
@@ -994,8 +993,8 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
/* Calculate the next Tx descriptor entry. */
entry = np->cur_tx % TX_RING_SIZE;
- np->tx_addr[entry] = pci_map_single(np->pci_dev,
- skb->data,skb->len, PCI_DMA_TODEVICE);
+ np->tx_addr[entry] = dma_map_single(&np->pci_dev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
np->tx_skbuff[entry] = skb;
np->tx_ring[entry].buffer1 = np->tx_addr[entry];
@@ -1078,9 +1077,8 @@ static void netdev_tx_done(struct net_device *dev)
np->stats.tx_packets++;
}
/* Free the original skb. */
- pci_unmap_single(np->pci_dev,np->tx_addr[entry],
- np->tx_skbuff[entry]->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->tx_addr[entry],
+ np->tx_skbuff[entry]->len, DMA_TO_DEVICE);
np->tx_q_bytes -= np->tx_skbuff[entry]->len;
dev_kfree_skb_irq(np->tx_skbuff[entry]);
np->tx_skbuff[entry] = NULL;
@@ -1217,18 +1215,21 @@ static int netdev_rx(struct net_device *dev)
if (pkt_len < rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
- pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
- np->rx_skbuff[entry]->len,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&np->pci_dev->dev,
+ np->rx_addr[entry],
+ np->rx_skbuff[entry]->len,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
- pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
- np->rx_skbuff[entry]->len,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&np->pci_dev->dev,
+ np->rx_addr[entry],
+ np->rx_skbuff[entry]->len,
+ DMA_FROM_DEVICE);
} else {
- pci_unmap_single(np->pci_dev,np->rx_addr[entry],
- np->rx_skbuff[entry]->len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pci_dev->dev,
+ np->rx_addr[entry],
+ np->rx_skbuff[entry]->len,
+ DMA_FROM_DEVICE);
skb_put(skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
}
@@ -1258,9 +1259,10 @@ static int netdev_rx(struct net_device *dev)
np->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
- np->rx_addr[entry] = pci_map_single(np->pci_dev,
- skb->data,
- np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ np->rx_addr[entry] = dma_map_single(&np->pci_dev->dev,
+ skb->data,
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
np->rx_ring[entry].buffer1 = np->rx_addr[entry];
}
wmb();
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index e8e563d6e86b..734acb834c98 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -222,13 +222,15 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata (pdev, dev);
- ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma);
+ ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
+ GFP_KERNEL);
if (!ring_space)
goto err_out_iounmap;
np->tx_ring = ring_space;
np->tx_ring_dma = ring_dma;
- ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma);
+ ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
+ GFP_KERNEL);
if (!ring_space)
goto err_out_unmap_tx;
np->rx_ring = ring_space;
@@ -279,9 +281,11 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_out_unmap_rx:
- pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
+ np->rx_ring_dma);
err_out_unmap_tx:
- pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
+ np->tx_ring_dma);
err_out_iounmap:
#ifdef MEM_MAPPING
pci_iounmap(pdev, np->ioaddr);
@@ -435,8 +439,9 @@ static void free_list(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) {
skb = np->rx_skbuff[i];
if (skb) {
- pci_unmap_single(np->pdev, desc_to_dma(&np->rx_ring[i]),
- skb->len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pdev->dev,
+ desc_to_dma(&np->rx_ring[i]),
+ skb->len, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
np->rx_skbuff[i] = NULL;
}
@@ -446,8 +451,9 @@ static void free_list(struct net_device *dev)
for (i = 0; i < TX_RING_SIZE; i++) {
skb = np->tx_skbuff[i];
if (skb) {
- pci_unmap_single(np->pdev, desc_to_dma(&np->tx_ring[i]),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&np->pdev->dev,
+ desc_to_dma(&np->tx_ring[i]),
+ skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
np->tx_skbuff[i] = NULL;
}
@@ -504,9 +510,8 @@ static int alloc_list(struct net_device *dev)
sizeof(struct netdev_desc));
/* Rubicon now supports 40 bits of addressing space. */
np->rx_ring[i].fraginfo =
- cpu_to_le64(pci_map_single(
- np->pdev, skb->data, np->rx_buf_sz,
- PCI_DMA_FROMDEVICE));
+ cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE));
np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
}
@@ -672,9 +677,8 @@ rio_timer (struct timer_list *t)
}
np->rx_skbuff[entry] = skb;
np->rx_ring[entry].fraginfo =
- cpu_to_le64 (pci_map_single
- (np->pdev, skb->data, np->rx_buf_sz,
- PCI_DMA_FROMDEVICE));
+ cpu_to_le64 (dma_map_single(&np->pdev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE));
}
np->rx_ring[entry].fraginfo |=
cpu_to_le64((u64)np->rx_buf_sz << 48);
@@ -728,9 +732,8 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
((u64)np->vlan << 32) |
((u64)skb->priority << 45);
}
- txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,
- skb->len,
- PCI_DMA_TODEVICE));
+ txdesc->fraginfo = cpu_to_le64 (dma_map_single(&np->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE));
txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48);
/* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode
@@ -827,9 +830,9 @@ rio_free_tx (struct net_device *dev, int irq)
if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone)))
break;
skb = np->tx_skbuff[entry];
- pci_unmap_single (np->pdev,
- desc_to_dma(&np->tx_ring[entry]),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&np->pdev->dev,
+ desc_to_dma(&np->tx_ring[entry]), skb->len,
+ DMA_TO_DEVICE);
if (irq)
dev_consume_skb_irq(skb);
else
@@ -949,25 +952,25 @@ receive_packet (struct net_device *dev)
/* Small skbuffs for short packets */
if (pkt_len > copy_thresh) {
- pci_unmap_single (np->pdev,
- desc_to_dma(desc),
- np->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pdev->dev,
+ desc_to_dma(desc),
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
skb_put (skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
} else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
- pci_dma_sync_single_for_cpu(np->pdev,
- desc_to_dma(desc),
- np->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&np->pdev->dev,
+ desc_to_dma(desc),
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data (skb,
np->rx_skbuff[entry]->data,
pkt_len);
skb_put (skb, pkt_len);
- pci_dma_sync_single_for_device(np->pdev,
- desc_to_dma(desc),
- np->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&np->pdev->dev,
+ desc_to_dma(desc),
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
}
skb->protocol = eth_type_trans (skb, dev);
#if 0
@@ -1000,9 +1003,8 @@ receive_packet (struct net_device *dev)
}
np->rx_skbuff[entry] = skb;
np->rx_ring[entry].fraginfo =
- cpu_to_le64 (pci_map_single
- (np->pdev, skb->data, np->rx_buf_sz,
- PCI_DMA_FROMDEVICE));
+ cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE));
}
np->rx_ring[entry].fraginfo |=
cpu_to_le64((u64)np->rx_buf_sz << 48);
@@ -1796,10 +1798,10 @@ rio_remove1 (struct pci_dev *pdev)
struct netdev_private *np = netdev_priv(dev);
unregister_netdev (dev);
- pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring,
- np->rx_ring_dma);
- pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring,
- np->tx_ring_dma);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
+ np->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
+ np->tx_ring_dma);
#ifdef MEM_MAPPING
pci_iounmap(pdev, np->ioaddr);
#endif
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index b3f8597e77aa..e3a8858915b3 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -367,6 +367,7 @@ struct netdev_private {
dma_addr_t tx_ring_dma;
dma_addr_t rx_ring_dma;
struct timer_list timer; /* Media monitoring timer. */
+ struct net_device *ndev; /* backpointer */
/* ethtool extra stats */
struct {
u64 tx_multiple_collisions;
@@ -429,8 +430,8 @@ static void init_ring(struct net_device *dev);
static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
static int reset_tx (struct net_device *dev);
static irqreturn_t intr_handler(int irq, void *dev_instance);
-static void rx_poll(unsigned long data);
-static void tx_poll(unsigned long data);
+static void rx_poll(struct tasklet_struct *t);
+static void tx_poll(struct tasklet_struct *t);
static void refill_rx (struct net_device *dev);
static void netdev_error(struct net_device *dev, int intr_status);
static void netdev_error(struct net_device *dev, int intr_status);
@@ -531,14 +532,15 @@ static int sundance_probe1(struct pci_dev *pdev,
cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
np = netdev_priv(dev);
+ np->ndev = dev;
np->base = ioaddr;
np->pci_dev = pdev;
np->chip_id = chip_idx;
np->msg_enable = (1 << debug) - 1;
spin_lock_init(&np->lock);
spin_lock_init(&np->statlock);
- tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
- tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
+ tasklet_setup(&np->rx_tasklet, rx_poll);
+ tasklet_setup(&np->tx_tasklet, tx_poll);
ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
&ring_dma, GFP_KERNEL);
@@ -1054,10 +1056,9 @@ static void init_ring(struct net_device *dev)
}
}
-static void tx_poll (unsigned long data)
+static void tx_poll(struct tasklet_struct *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct netdev_private *np = netdev_priv(dev);
+ struct netdev_private *np = from_tasklet(np, t, tx_tasklet);
unsigned head = np->cur_task % TX_RING_SIZE;
struct netdev_desc *txdesc =
&np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
@@ -1312,10 +1313,10 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
return IRQ_RETVAL(handled);
}
-static void rx_poll(unsigned long data)
+static void rx_poll(struct tasklet_struct *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct netdev_private *np = netdev_priv(dev);
+ struct netdev_private *np = from_tasklet(np, t, rx_tasklet);
+ struct net_device *dev = np->ndev;
int entry = np->cur_rx % RX_RING_SIZE;
int boguscnt = np->budget;
void __iomem *ioaddr = np->base;
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 3143df9a398c..7f87b0f51404 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -507,12 +507,12 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dnet *bp = netdev_priv(dev);
- u32 tx_status, irq_enable;
+ u32 irq_enable;
unsigned int i, tx_cmd, wrsz;
unsigned long flags;
unsigned int *bufp;
- tx_status = dnet_readl(bp, TX_STATUS);
+ dnet_readl(bp, TX_STATUS);
pr_debug("start_xmit: len %u head %p data %p\n",
skb->len, skb->head, skb->data);
@@ -520,7 +520,7 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&bp->lock, flags);
- tx_status = dnet_readl(bp, TX_STATUS);
+ dnet_readl(bp, TX_STATUS);
bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL);
wrsz = (u32) skb->len + 3;
@@ -542,7 +542,7 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) {
netif_stop_queue(dev);
- tx_status = dnet_readl(bp, INTR_SRC);
+ dnet_readl(bp, INTR_SRC);
irq_enable = dnet_readl(bp, INTR_ENB);
irq_enable |= DNET_INTR_ENB_TX_FIFOAE;
dnet_writel(bp, irq_enable, INTR_ENB);
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
index feea797cde02..cfd369cf4c8c 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
@@ -3,6 +3,7 @@ config FSL_DPAA2_ETH
tristate "Freescale DPAA2 Ethernet"
depends on FSL_MC_BUS && FSL_MC_DPIO
select PHYLINK
+ select PCS_LYNX
help
This is the DPAA2 Ethernet driver supporting Freescale SoCs
with DPAA2 (DataPath Acceleration Architecture v2).
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
index 56d9927fbfda..b87db0846e10 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
@@ -42,24 +42,7 @@ static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
return 0;
}
-static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
-{
- int err;
- struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
-
- err = single_open(file, dpaa2_dbg_cpu_show, priv);
- if (err < 0)
- netdev_err(priv->net_dev, "single_open() failed\n");
-
- return err;
-}
-
-static const struct file_operations dpaa2_dbg_cpu_ops = {
- .open = dpaa2_dbg_cpu_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_cpu);
static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
{
@@ -106,24 +89,7 @@ static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
return 0;
}
-static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
-{
- int err;
- struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
-
- err = single_open(file, dpaa2_dbg_fqs_show, priv);
- if (err < 0)
- netdev_err(priv->net_dev, "single_open() failed\n");
-
- return err;
-}
-
-static const struct file_operations dpaa2_dbg_fq_ops = {
- .open = dpaa2_dbg_fqs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_fqs);
static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
{
@@ -151,24 +117,7 @@ static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
return 0;
}
-static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
-{
- int err;
- struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
-
- err = single_open(file, dpaa2_dbg_ch_show, priv);
- if (err < 0)
- netdev_err(priv->net_dev, "single_open() failed\n");
-
- return err;
-}
-
-static const struct file_operations dpaa2_dbg_ch_ops = {
- .open = dpaa2_dbg_ch_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_ch);
void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
{
@@ -179,13 +128,13 @@ void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
priv->dbg.dir = dir;
/* per-cpu stats file */
- debugfs_create_file("cpu_stats", 0444, dir, priv, &dpaa2_dbg_cpu_ops);
+ debugfs_create_file("cpu_stats", 0444, dir, priv, &dpaa2_dbg_cpu_fops);
/* per-fq stats file */
- debugfs_create_file("fq_stats", 0444, dir, priv, &dpaa2_dbg_fq_ops);
+ debugfs_create_file("fq_stats", 0444, dir, priv, &dpaa2_dbg_fqs_fops);
/* per-fq stats file */
- debugfs_create_file("ch_stats", 0444, dir, priv, &dpaa2_dbg_ch_ops);
+ debugfs_create_file("ch_stats", 0444, dir, priv, &dpaa2_dbg_ch_fops);
}
void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index ceaf76158e23..5bc965186f8c 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -11,10 +11,11 @@
#include <linux/msi.h>
#include <linux/kthread.h>
#include <linux/iommu.h>
-#include <linux/net_tstamp.h>
#include <linux/fsl/mc.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
+#include <linux/fsl/ptp_qoriq.h>
+#include <linux/ptp_classify.h>
#include <net/pkt_cls.h>
#include <net/sock.h>
@@ -30,6 +31,9 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
+struct ptp_qoriq *dpaa2_ptp;
+EXPORT_SYMBOL(dpaa2_ptp);
+
static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
dma_addr_t iova_addr)
{
@@ -560,11 +564,57 @@ static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
return cleaned;
}
+static int dpaa2_eth_ptp_parse(struct sk_buff *skb,
+ u8 *msgtype, u8 *twostep, u8 *udp,
+ u16 *correction_offset,
+ u16 *origintimestamp_offset)
+{
+ unsigned int ptp_class;
+ struct ptp_header *hdr;
+ unsigned int type;
+ u8 *base;
+
+ ptp_class = ptp_classify_raw(skb);
+ if (ptp_class == PTP_CLASS_NONE)
+ return -EINVAL;
+
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ return -EINVAL;
+
+ *msgtype = ptp_get_msgtype(hdr, ptp_class);
+ *twostep = hdr->flag_field[0] & 0x2;
+
+ type = ptp_class & PTP_CLASS_PMASK;
+ if (type == PTP_CLASS_IPV4 ||
+ type == PTP_CLASS_IPV6)
+ *udp = 1;
+ else
+ *udp = 0;
+
+ base = skb_mac_header(skb);
+ *correction_offset = (u8 *)&hdr->correction - base;
+ *origintimestamp_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
+
+ return 0;
+}
+
/* Configure the egress frame annotation for timestamp update */
-static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
+static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
+ struct dpaa2_fd *fd,
+ void *buf_start,
+ struct sk_buff *skb)
{
+ struct ptp_tstamp origin_timestamp;
+ struct dpni_single_step_cfg cfg;
+ u8 msgtype, twostep, udp;
struct dpaa2_faead *faead;
+ struct dpaa2_fas *fas;
+ struct timespec64 ts;
+ u16 offset1, offset2;
u32 ctrl, frc;
+ __le64 *ns;
+ u8 *data;
/* Mark the egress frame annotation area as valid */
frc = dpaa2_fd_get_frc(fd);
@@ -580,12 +630,52 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
faead = dpaa2_get_faead(buf_start, true);
faead->ctrl = cpu_to_le32(ctrl);
+
+ if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
+ if (dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
+ &offset1, &offset2) ||
+ msgtype != 0 || twostep) {
+ WARN_ONCE(1, "Bad packet for one-step timestamping\n");
+ return;
+ }
+
+ /* Mark the frame annotation status as valid */
+ frc = dpaa2_fd_get_frc(fd);
+ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FASV);
+
+ /* Mark the PTP flag for one step timestamping */
+ fas = dpaa2_get_fas(buf_start, true);
+ fas->status = cpu_to_le32(DPAA2_FAS_PTP);
+
+ dpaa2_ptp->caps.gettime64(&dpaa2_ptp->caps, &ts);
+ ns = dpaa2_get_ts(buf_start, true);
+ *ns = cpu_to_le64(timespec64_to_ns(&ts) /
+ DPAA2_PTP_CLK_PERIOD_NS);
+
+ /* Update current time to PTP message originTimestamp field */
+ ns_to_ptp_tstamp(&origin_timestamp, le64_to_cpup(ns));
+ data = skb_mac_header(skb);
+ *(__be16 *)(data + offset2) = htons(origin_timestamp.sec_msb);
+ *(__be32 *)(data + offset2 + 2) =
+ htonl(origin_timestamp.sec_lsb);
+ *(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec);
+
+ cfg.en = 1;
+ cfg.ch_update = udp;
+ cfg.offset = offset1;
+ cfg.peer_delay = 0;
+
+ if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token,
+ &cfg))
+ WARN_ONCE(1, "Failed to set single step register");
+ }
}
/* Create a frame descriptor based on a fragmented skb */
static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
struct sk_buff *skb,
- struct dpaa2_fd *fd)
+ struct dpaa2_fd *fd,
+ void **swa_addr)
{
struct device *dev = priv->net_dev->dev.parent;
void *sgt_buf = NULL;
@@ -607,7 +697,7 @@ static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
return -EINVAL;
- scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
+ scl = kmalloc_array(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
if (unlikely(!scl))
return -ENOMEM;
@@ -654,6 +744,7 @@ static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
* skb backpointer in the software annotation area. We'll need
* all of them on Tx Conf.
*/
+ *swa_addr = (void *)sgt_buf;
swa = (struct dpaa2_eth_swa *)sgt_buf;
swa->type = DPAA2_ETH_SWA_SG;
swa->sg.skb = skb;
@@ -673,9 +764,6 @@ static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_len(fd, skb->len);
dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
- if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
- dpaa2_eth_enable_tx_tstamp(fd, sgt_buf);
-
return 0;
dma_map_single_failed:
@@ -695,7 +783,8 @@ dma_map_sg_failed:
*/
static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
struct sk_buff *skb,
- struct dpaa2_fd *fd)
+ struct dpaa2_fd *fd,
+ void **swa_addr)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpaa2_eth_sgt_cache *sgt_cache;
@@ -733,6 +822,7 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
dpaa2_sg_set_final(sgt, true);
/* Store the skb backpointer in the SGT buffer */
+ *swa_addr = (void *)sgt_buf;
swa = (struct dpaa2_eth_swa *)sgt_buf;
swa->type = DPAA2_ETH_SWA_SINGLE;
swa->single.skb = skb;
@@ -751,9 +841,6 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_len(fd, skb->len);
dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
- if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
- dpaa2_eth_enable_tx_tstamp(fd, sgt_buf);
-
return 0;
sgt_map_failed:
@@ -770,14 +857,15 @@ data_map_failed:
/* Create a frame descriptor based on a linear skb */
static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
struct sk_buff *skb,
- struct dpaa2_fd *fd)
+ struct dpaa2_fd *fd,
+ void **swa_addr)
{
struct device *dev = priv->net_dev->dev.parent;
u8 *buffer_start, *aligned_start;
struct dpaa2_eth_swa *swa;
dma_addr_t addr;
- buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
+ buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
/* If there's enough room to align the FD address, do it.
* It will help hardware optimize accesses.
@@ -791,6 +879,7 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
* (in the private data area) such that we can release it
* on Tx confirm
*/
+ *swa_addr = (void *)buffer_start;
swa = (struct dpaa2_eth_swa *)buffer_start;
swa->type = DPAA2_ETH_SWA_SINGLE;
swa->single.skb = skb;
@@ -807,9 +896,6 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_format(fd, dpaa2_fd_single);
dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
- if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
- dpaa2_eth_enable_tx_tstamp(fd, buffer_start);
-
return 0;
}
@@ -820,7 +906,7 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
* This can be called either from dpaa2_eth_tx_conf() or on the error path of
* dpaa2_eth_tx().
*/
-static void dpaa2_eth_free_tx_fd(const struct dpaa2_eth_priv *priv,
+static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_fq *fq,
const struct dpaa2_fd *fd, bool in_napi)
{
@@ -893,7 +979,7 @@ static void dpaa2_eth_free_tx_fd(const struct dpaa2_eth_priv *priv,
}
/* Get the timestamp value */
- if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ if (skb->cb[0] == TX_TSTAMP) {
struct skb_shared_hwtstamps shhwtstamps;
__le64 *ts = dpaa2_get_ts(buffer_start, true);
u64 ns;
@@ -903,6 +989,8 @@ static void dpaa2_eth_free_tx_fd(const struct dpaa2_eth_priv *priv,
ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
shhwtstamps.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(skb, &shhwtstamps);
+ } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
+ mutex_unlock(&priv->onestep_tstamp_lock);
}
/* Free SGT buffer allocated on tx */
@@ -922,7 +1010,8 @@ static void dpaa2_eth_free_tx_fd(const struct dpaa2_eth_priv *priv,
napi_consume_skb(skb, in_napi);
}
-static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
+static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
+ struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpaa2_fd fd;
@@ -935,11 +1024,12 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
u32 fd_len;
u8 prio = 0;
int err, i;
+ void *swa;
percpu_stats = this_cpu_ptr(priv->percpu_stats);
percpu_extras = this_cpu_ptr(priv->percpu_extras);
- needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
+ needed_headroom = dpaa2_eth_needed_headroom(skb);
/* We'll be holding a back-reference to the skb until Tx Confirmation;
* we don't want that overwritten by a concurrent Tx with a cloned skb.
@@ -955,17 +1045,17 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
memset(&fd, 0, sizeof(fd));
if (skb_is_nonlinear(skb)) {
- err = dpaa2_eth_build_sg_fd(priv, skb, &fd);
+ err = dpaa2_eth_build_sg_fd(priv, skb, &fd, &swa);
percpu_extras->tx_sg_frames++;
percpu_extras->tx_sg_bytes += skb->len;
} else if (skb_headroom(skb) < needed_headroom) {
- err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, &fd);
+ err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, &fd, &swa);
percpu_extras->tx_sg_frames++;
percpu_extras->tx_sg_bytes += skb->len;
percpu_extras->tx_converted_sg_frames++;
percpu_extras->tx_converted_sg_bytes += skb->len;
} else {
- err = dpaa2_eth_build_single_fd(priv, skb, &fd);
+ err = dpaa2_eth_build_single_fd(priv, skb, &fd, &swa);
}
if (unlikely(err)) {
@@ -973,6 +1063,9 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
goto err_build_fd;
}
+ if (skb->cb[0])
+ dpaa2_eth_enable_tx_tstamp(priv, &fd, swa, skb);
+
/* Tracing point */
trace_dpaa2_tx_fd(net_dev, &fd);
@@ -1026,6 +1119,63 @@ err_build_fd:
return NETDEV_TX_OK;
}
+static void dpaa2_eth_tx_onestep_tstamp(struct work_struct *work)
+{
+ struct dpaa2_eth_priv *priv = container_of(work, struct dpaa2_eth_priv,
+ tx_onestep_tstamp);
+ struct sk_buff *skb;
+
+ while (true) {
+ skb = skb_dequeue(&priv->tx_skbs);
+ if (!skb)
+ return;
+
+ /* Lock just before TX one-step timestamping packet,
+ * and release the lock in dpaa2_eth_free_tx_fd when
+ * confirm the packet has been sent on hardware, or
+ * when clean up during transmit failure.
+ */
+ mutex_lock(&priv->onestep_tstamp_lock);
+ __dpaa2_eth_tx(skb, priv->net_dev);
+ }
+}
+
+static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u8 msgtype, twostep, udp;
+ u16 offset1, offset2;
+
+ /* Utilize skb->cb[0] for timestamping request per skb */
+ skb->cb[0] = 0;
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && dpaa2_ptp) {
+ if (priv->tx_tstamp_type == HWTSTAMP_TX_ON)
+ skb->cb[0] = TX_TSTAMP;
+ else if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ skb->cb[0] = TX_TSTAMP_ONESTEP_SYNC;
+ }
+
+ /* TX for one-step timestamping PTP Sync packet */
+ if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
+ if (!dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
+ &offset1, &offset2))
+ if (msgtype == 0 && twostep == 0) {
+ skb_queue_tail(&priv->tx_skbs, skb);
+ queue_work(priv->dpaa2_ptp_wq,
+ &priv->tx_onestep_tstamp);
+ return NETDEV_TX_OK;
+ }
+ /* Use two-step timestamping if not one-step timestamping
+ * PTP Sync packet
+ */
+ skb->cb[0] = TX_TSTAMP;
+ }
+
+ /* TX for other packets */
+ return __dpaa2_eth_tx(skb, net_dev);
+}
+
/* Tx confirmation frame processing routine */
static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch __always_unused,
@@ -1889,15 +2039,17 @@ static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
struct dpaa2_eth_priv *priv = netdev_priv(dev);
struct hwtstamp_config config;
+ if (!dpaa2_ptp)
+ return -EINVAL;
+
if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
return -EFAULT;
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
- priv->tx_tstamp = false;
- break;
case HWTSTAMP_TX_ON:
- priv->tx_tstamp = true;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ priv->tx_tstamp_type = config.tx_type;
break;
default:
return -ERANGE;
@@ -2092,7 +2244,6 @@ static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
struct xdp_frame *xdpf,
struct dpaa2_fd *fd)
{
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct device *dev = net_dev->dev.parent;
unsigned int needed_headroom;
struct dpaa2_eth_swa *swa;
@@ -2102,7 +2253,7 @@ static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
/* We require a minimum headroom to be able to transmit the frame.
* Otherwise return an error and let the original net_device handle it
*/
- needed_headroom = dpaa2_eth_needed_headroom(priv, NULL);
+ needed_headroom = dpaa2_eth_needed_headroom(NULL);
if (xdpf->headroom < needed_headroom)
return -EINVAL;
@@ -2723,8 +2874,10 @@ static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
/* tx buffer */
buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
buf_layout.pass_timestamp = true;
+ buf_layout.pass_frame_status = true;
buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
- DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_TX, &buf_layout);
if (err) {
@@ -2733,7 +2886,8 @@ static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
}
/* tx-confirm buffer */
- buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_TX_CONFIRM, &buf_layout);
if (err) {
@@ -3958,6 +4112,19 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
priv->iommu_domain = iommu_get_domain_for_dev(dev);
+ priv->tx_tstamp_type = HWTSTAMP_TX_OFF;
+ priv->rx_tstamp = false;
+
+ priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0);
+ if (!priv->dpaa2_ptp_wq) {
+ err = -ENOMEM;
+ goto err_wq_alloc;
+ }
+
+ INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
+
+ skb_queue_head_init(&priv->tx_skbs);
+
/* Obtain a MC portal */
err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
&priv->mc_io);
@@ -4096,6 +4263,8 @@ err_dpio_setup:
err_dpni_setup:
fsl_mc_portal_free(priv->mc_io);
err_portal_alloc:
+ destroy_workqueue(priv->dpaa2_ptp_wq);
+err_wq_alloc:
dev_set_drvdata(dev, NULL);
free_netdev(net_dev);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 7f3c41dc98f2..6436fa3b25cb 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -10,6 +10,7 @@
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/fsl/mc.h>
+#include <linux/net_tstamp.h>
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
@@ -194,6 +195,24 @@ struct dpaa2_faead {
#define DPAA2_FAEAD_EBDDV 0x00002000
#define DPAA2_FAEAD_UPD 0x00000010
+struct ptp_tstamp {
+ u16 sec_msb;
+ u32 sec_lsb;
+ u32 nsec;
+};
+
+static inline void ns_to_ptp_tstamp(struct ptp_tstamp *tstamp, u64 ns)
+{
+ u64 sec, nsec;
+
+ sec = ns;
+ nsec = do_div(sec, 1000000000);
+
+ tstamp->sec_lsb = sec & 0xFFFFFFFF;
+ tstamp->sec_msb = (sec >> 32) & 0xFFFF;
+ tstamp->nsec = nsec;
+}
+
/* Accessors for the hardware annotation fields that we use */
static inline void *dpaa2_get_hwa(void *buf_addr, bool swa)
{
@@ -433,8 +452,8 @@ struct dpaa2_eth_priv {
u16 bpid;
struct iommu_domain *iommu_domain;
- bool tx_tstamp; /* Tx timestamping enabled */
- bool rx_tstamp; /* Rx timestamping enabled */
+ enum hwtstamp_tx_types tx_tstamp_type; /* Tx timestamping type */
+ bool rx_tstamp; /* Rx timestamping enabled */
u16 tx_qdid;
struct fsl_mc_io *mc_io;
@@ -473,8 +492,22 @@ struct dpaa2_eth_priv {
#endif
struct dpaa2_mac *mac;
+ struct workqueue_struct *dpaa2_ptp_wq;
+ struct work_struct tx_onestep_tstamp;
+ struct sk_buff_head tx_skbs;
+ /* The one-step timestamping configuration on hardware
+ * registers could only be done when no one-step
+ * timestamping frames are in flight. So we use a mutex
+ * lock here to make sure the lock is released by last
+ * one-step timestamping packet through TX confirmation
+ * queue before transmit current packet.
+ */
+ struct mutex onestep_tstamp_lock;
};
+#define TX_TSTAMP 0x1
+#define TX_TSTAMP_ONESTEP_SYNC 0x2
+
#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
| RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \
| RXH_L4_B_2_3)
@@ -491,6 +524,7 @@ struct dpaa2_eth_priv {
extern const struct ethtool_ops dpaa2_ethtool_ops;
extern int dpaa2_phc_index;
+extern struct ptp_qoriq *dpaa2_ptp;
static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
u16 ver_major, u16 ver_minor)
@@ -560,9 +594,7 @@ static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options)
return !!(link_options & DPNI_LINK_OPT_PAUSE);
}
-static inline
-unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
- struct sk_buff *skb)
+static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb)
{
unsigned int headroom = DPAA2_ETH_SWA_SIZE;
@@ -579,7 +611,7 @@ unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
return 0;
/* If we have Tx timestamping, need 128B hardware annotation */
- if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ if (skb->cb[0])
headroom += DPAA2_ETH_TX_HWA_SIZE;
return headroom;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 26bd99b76765..11e0c047dbd2 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2014-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
+ * Copyright 2020 NXP
*/
#include <linux/net_tstamp.h>
@@ -763,6 +764,9 @@ EXPORT_SYMBOL(dpaa2_phc_index);
static int dpaa2_eth_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
+ if (!dpaa2_ptp)
+ return ethtool_op_get_ts_info(dev, info);
+
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -770,7 +774,8 @@ static int dpaa2_eth_get_ts_info(struct net_device *dev,
info->phc_index = dpaa2_phc_index;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
+ (1 << HWTSTAMP_TX_ON) |
+ (1 << HWTSTAMP_TX_ONESTEP_SYNC);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index 3ee236c5fc37..6ff64dd1cf27 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -15,6 +15,18 @@ static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
case DPMAC_ETH_IF_RGMII:
*if_mode = PHY_INTERFACE_MODE_RGMII;
break;
+ case DPMAC_ETH_IF_USXGMII:
+ *if_mode = PHY_INTERFACE_MODE_USXGMII;
+ break;
+ case DPMAC_ETH_IF_QSGMII:
+ *if_mode = PHY_INTERFACE_MODE_QSGMII;
+ break;
+ case DPMAC_ETH_IF_SGMII:
+ *if_mode = PHY_INTERFACE_MODE_SGMII;
+ break;
+ case DPMAC_ETH_IF_XFI:
+ *if_mode = PHY_INTERFACE_MODE_10GBASER;
+ break;
default:
return -EINVAL;
}
@@ -67,6 +79,10 @@ static bool dpaa2_mac_phy_mode_mismatch(struct dpaa2_mac *mac,
phy_interface_t interface)
{
switch (interface) {
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
@@ -95,6 +111,17 @@ static void dpaa2_mac_validate(struct phylink_config *config,
phylink_set(mask, Asym_Pause);
switch (state->interface) {
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_USXGMII:
+ phylink_set(mask, 10000baseT_Full);
+ if (state->interface == PHY_INTERFACE_MODE_10GBASER)
+ break;
+ phylink_set(mask, 5000baseT_Full);
+ phylink_set(mask, 2500baseT_Full);
+ fallthrough;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
@@ -227,6 +254,52 @@ out:
return fixed;
}
+static int dpaa2_pcs_create(struct dpaa2_mac *mac,
+ struct device_node *dpmac_node, int id)
+{
+ struct mdio_device *mdiodev;
+ struct device_node *node;
+
+ node = of_parse_phandle(dpmac_node, "pcs-handle", 0);
+ if (!node) {
+ /* do not error out on old DTS files */
+ netdev_warn(mac->net_dev, "pcs-handle node not found\n");
+ return 0;
+ }
+
+ if (!of_device_is_available(node) ||
+ !of_device_is_available(node->parent)) {
+ netdev_err(mac->net_dev, "pcs-handle node not available\n");
+ return -ENODEV;
+ }
+
+ mdiodev = of_mdio_find_device(node);
+ of_node_put(node);
+ if (!mdiodev)
+ return -EPROBE_DEFER;
+
+ mac->pcs = lynx_pcs_create(mdiodev);
+ if (!mac->pcs) {
+ netdev_err(mac->net_dev, "lynx_pcs_create() failed\n");
+ put_device(&mdiodev->dev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void dpaa2_pcs_destroy(struct dpaa2_mac *mac)
+{
+ struct lynx_pcs *pcs = mac->pcs;
+ struct device *dev = &pcs->mdio->dev;
+
+ if (pcs) {
+ lynx_pcs_destroy(pcs);
+ put_device(dev);
+ mac->pcs = NULL;
+ }
+}
+
int dpaa2_mac_connect(struct dpaa2_mac *mac)
{
struct fsl_mc_device *dpmac_dev = mac->mc_dev;
@@ -278,6 +351,13 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
goto err_put_node;
}
+ if (attr.link_type == DPMAC_LINK_TYPE_PHY &&
+ attr.eth_if != DPMAC_ETH_IF_RGMII) {
+ err = dpaa2_pcs_create(mac, dpmac_node, attr.id);
+ if (err)
+ goto err_put_node;
+ }
+
mac->phylink_config.dev = &net_dev->dev;
mac->phylink_config.type = PHYLINK_NETDEV;
@@ -286,10 +366,13 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
&dpaa2_mac_phylink_ops);
if (IS_ERR(phylink)) {
err = PTR_ERR(phylink);
- goto err_put_node;
+ goto err_pcs_destroy;
}
mac->phylink = phylink;
+ if (mac->pcs)
+ phylink_set_pcs(mac->phylink, &mac->pcs->pcs);
+
err = phylink_of_phy_connect(mac->phylink, dpmac_node, 0);
if (err) {
netdev_err(net_dev, "phylink_of_phy_connect() = %d\n", err);
@@ -302,6 +385,8 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
err_phylink_destroy:
phylink_destroy(mac->phylink);
+err_pcs_destroy:
+ dpaa2_pcs_destroy(mac);
err_put_node:
of_node_put(dpmac_node);
err_close_dpmac:
@@ -316,6 +401,8 @@ void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
phylink_disconnect_phy(mac->phylink);
phylink_destroy(mac->phylink);
+ dpaa2_pcs_destroy(mac);
+
dpmac_close(mac->mc_io, 0, mac->mc_dev->mc_handle);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index 2130d9c7d40e..955a52856210 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -7,6 +7,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phylink.h>
+#include <linux/pcs-lynx.h>
#include "dpmac.h"
#include "dpmac-cmd.h"
@@ -21,6 +22,7 @@ struct dpaa2_mac {
struct phylink *phylink;
phy_interface_t if_mode;
enum dpmac_link_type if_link_type;
+ struct lynx_pcs *pcs;
};
bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
index cc1b7f85e433..32b5faa87bb8 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -2,6 +2,7 @@
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016-2018 NXP
+ * Copyright 2020 NXP
*/
#include <linux/module.h>
@@ -9,7 +10,6 @@
#include <linux/of_address.h>
#include <linux/msi.h>
#include <linux/fsl/mc.h>
-#include <linux/fsl/ptp_qoriq.h>
#include "dpaa2-ptp.h"
@@ -201,6 +201,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
goto err_free_threaded_irq;
dpaa2_phc_index = ptp_qoriq->phc_index;
+ dpaa2_ptp = ptp_qoriq;
dev_set_drvdata(dev, ptp_qoriq);
return 0;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
index df2458a5e9ef..e1023538b4c3 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
@@ -1,14 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2018 NXP
+ * Copyright 2020 NXP
*/
#ifndef __RTC_H
#define __RTC_H
+#include <linux/fsl/ptp_qoriq.h>
+
#include "dprtc.h"
#include "dprtc-cmd.h"
extern int dpaa2_phc_index;
+extern struct ptp_qoriq *dpaa2_ptp;
#endif
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
index 3ea51dd9374b..a24b20f76938 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
@@ -66,8 +66,8 @@ struct dpmac_cmd_get_counter {
};
struct dpmac_rsp_get_counter {
- u64 pad;
- u64 counter;
+ __le64 pad;
+ __le64 counter;
};
#endif /* _FSL_DPMAC_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
index 593e3812af93..1222a4ea9c71 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
+ * Copyright 2020 NXP
*/
#ifndef _FSL_DPNI_CMD_H
#define _FSL_DPNI_CMD_H
@@ -90,6 +91,9 @@
#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
#define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278)
+#define DPNI_CMDID_SET_SINGLE_STEP_CFG DPNI_CMD(0x279)
+#define DPNI_CMDID_GET_SINGLE_STEP_CFG DPNI_CMD(0x27a)
+
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
@@ -639,4 +643,21 @@ struct dpni_cmd_set_tx_shaping {
u8 coupled;
};
+#define DPNI_PTP_ENABLE_SHIFT 0
+#define DPNI_PTP_ENABLE_SIZE 1
+#define DPNI_PTP_CH_UPDATE_SHIFT 1
+#define DPNI_PTP_CH_UPDATE_SIZE 1
+
+struct dpni_cmd_single_step_cfg {
+ __le16 flags;
+ __le16 offset;
+ __le32 peer_delay;
+};
+
+struct dpni_rsp_single_step_cfg {
+ __le16 flags;
+ __le16 offset;
+ __le32 peer_delay;
+};
+
#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index 68ed4c41b282..6ea7db66a632 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
+ * Copyright 2020 NXP
*/
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -1999,3 +2000,81 @@ int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+
+/**
+ * dpni_get_single_step_cfg() - return current configuration for
+ * single step PTP
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @ptp_cfg: ptp single step configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ */
+int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_single_step_cfg *ptp_cfg)
+{
+ struct dpni_rsp_single_step_cfg *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SINGLE_STEP_CFG,
+ cmd_flags, token);
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* read command response */
+ rsp_params = (struct dpni_rsp_single_step_cfg *)cmd.params;
+ ptp_cfg->offset = le16_to_cpu(rsp_params->offset);
+ ptp_cfg->en = dpni_get_field(le16_to_cpu(rsp_params->flags),
+ PTP_ENABLE) ? 1 : 0;
+ ptp_cfg->ch_update = dpni_get_field(le16_to_cpu(rsp_params->flags),
+ PTP_CH_UPDATE) ? 1 : 0;
+ ptp_cfg->peer_delay = le32_to_cpu(rsp_params->peer_delay);
+
+ return err;
+}
+
+/**
+ * dpni_set_single_step_cfg() - enable/disable and configure single step PTP
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @ptp_cfg: ptp single step configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * The function has effect only when dpni object is connected to a dpmac
+ * object. If the dpni is not connected to a dpmac the configuration will
+ * be stored inside and applied when connection is made.
+ */
+int dpni_set_single_step_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_single_step_cfg *ptp_cfg)
+{
+ struct dpni_cmd_single_step_cfg *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ u16 flags;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_SINGLE_STEP_CFG,
+ cmd_flags, token);
+ cmd_params = (struct dpni_cmd_single_step_cfg *)cmd.params;
+ cmd_params->offset = cpu_to_le16(ptp_cfg->offset);
+ cmd_params->peer_delay = cpu_to_le32(ptp_cfg->peer_delay);
+
+ flags = le16_to_cpu(cmd_params->flags);
+ dpni_set_field(flags, PTP_ENABLE, !!ptp_cfg->en);
+ dpni_set_field(flags, PTP_CH_UPDATE, !!ptp_cfg->ch_update);
+ cmd_params->flags = cpu_to_le16(flags);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index 39387991a1f9..74456a37a997 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
+ * Copyright 2020 NXP
*/
#ifndef __FSL_DPNI_H
#define __FSL_DPNI_H
@@ -1079,4 +1080,34 @@ int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
const struct dpni_tx_shaping_cfg *tx_er_shaper,
int coupled);
+/**
+ * struct dpni_single_step_cfg - configure single step PTP (IEEE 1588)
+ * @en: enable single step PTP. When enabled the PTPv1 functionality
+ * will not work. If the field is zero, offset and ch_update
+ * parameters will be ignored
+ * @offset: start offset from the beginning of the frame where
+ * timestamp field is found. The offset must respect all MAC
+ * headers, VLAN tags and other protocol headers
+ * @ch_update: when set UDP checksum will be updated inside packet
+ * @peer_delay: For peer-to-peer transparent clocks add this value to the
+ * correction field in addition to the transient time update.
+ * The value expresses nanoseconds.
+ */
+struct dpni_single_step_cfg {
+ u8 en;
+ u8 ch_update;
+ u16 offset;
+ u32 peer_delay;
+};
+
+int dpni_set_single_step_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_single_step_cfg *ptp_cfg);
+
+int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_single_step_cfg *ptp_cfg);
+
#endif /* __FSL_DPNI_H */
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 26d5981b798f..177334f0adb1 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -1053,7 +1053,6 @@ static int enetc_pf_probe(struct pci_dev *pdev,
err_reg_netdev:
enetc_teardown_serdes(priv);
- enetc_mdio_remove(pf);
enetc_free_msix(priv);
err_alloc_msix:
enetc_free_si_resources(priv);
@@ -1061,6 +1060,7 @@ err_alloc_si_res:
si->ndev = NULL;
free_netdev(ndev);
err_alloc_netdev:
+ enetc_mdio_remove(pf);
enetc_of_put_phy(pf);
err_map_pf_space:
enetc_pci_remove(pdev);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index f14576212a0e..7b5c82c7e4e5 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -78,16 +78,11 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct sockaddr *saddr = addr;
- int err;
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- err = enetc_msg_vsi_set_primary_mac_addr(priv, saddr);
- if (err)
- return err;
-
- return 0;
+ return enetc_msg_vsi_set_primary_mac_addr(priv, saddr);
}
static int enetc_vf_set_features(struct net_device *ndev,
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index a0c1f4410306..0405a3975f3f 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -520,13 +520,12 @@ static void fec_time_keep(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
- u64 ns;
unsigned long flags;
mutex_lock(&fep->ptp_clk_mutex);
if (fep->ptp_clk_on) {
spin_lock_irqsave(&fep->tmreg_lock, flags);
- ns = timecounter_read(&fep->tc);
+ timecounter_read(&fep->tc);
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
}
mutex_unlock(&fep->ptp_clk_mutex);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index ed3829ae4ef1..a769273b36f7 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -334,7 +334,7 @@ static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
* bit6-11 for ppe0-5
* bit12-17 for roce0-5
* bit18-19 for com/dfx
- * @enable: false - request reset , true - drop reset
+ * @dereset: false - request reset , true - drop reset
*/
static void
hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
@@ -357,7 +357,7 @@ hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
* bit6-11 for ppe0-5
* bit12-17 for roce0-5
* bit18-19 for com/dfx
- * @enable: false - request reset , true - drop reset
+ * @dereset: false - request reset , true - drop reset
*/
static void
hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 4eb50296f653..14e60c9e491d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -463,8 +463,8 @@ static int __lb_clean_rings(struct hns_nic_priv *priv,
/**
* nic_run_loopback_test - run loopback test
- * @nic_dev: net device
- * @loopback_type: loopback type
+ * @ndev: net device
+ * @loop_mode: loopback mode
*/
static int __lb_run_test(struct net_device *ndev,
enum hnae_loop loop_mode)
@@ -572,7 +572,7 @@ static int __lb_down(struct net_device *ndev, enum hnae_loop loop)
/**
* hns_nic_self_test - self test
- * @dev: net device
+ * @ndev: net device
* @eth_test: test cmd
* @data: test result
*/
@@ -633,7 +633,7 @@ static void hns_nic_self_test(struct net_device *ndev,
/**
* hns_nic_get_drvinfo - get net driver info
- * @dev: net device
+ * @net_dev: net device
* @drvinfo: driver info
*/
static void hns_nic_get_drvinfo(struct net_device *net_dev,
@@ -658,7 +658,7 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev,
/**
* hns_get_ringparam - get ring parameter
- * @dev: net device
+ * @net_dev: net device
* @param: ethtool parameter
*/
static void hns_get_ringparam(struct net_device *net_dev,
@@ -683,7 +683,7 @@ static void hns_get_ringparam(struct net_device *net_dev,
/**
* hns_get_pauseparam - get pause parameter
- * @dev: net device
+ * @net_dev: net device
* @param: pause parameter
*/
static void hns_get_pauseparam(struct net_device *net_dev,
@@ -701,7 +701,7 @@ static void hns_get_pauseparam(struct net_device *net_dev,
/**
* hns_set_pauseparam - set pause parameter
- * @dev: net device
+ * @net_dev: net device
* @param: pause parameter
*
* Return 0 on success, negative on failure
@@ -725,7 +725,7 @@ static int hns_set_pauseparam(struct net_device *net_dev,
/**
* hns_get_coalesce - get coalesce info.
- * @dev: net device
+ * @net_dev: net device
* @ec: coalesce info.
*
* Return 0 on success, negative on failure.
@@ -769,7 +769,7 @@ static int hns_get_coalesce(struct net_device *net_dev,
/**
* hns_set_coalesce - set coalesce info.
- * @dev: net device
+ * @net_dev: net device
* @ec: coalesce info.
*
* Return 0 on success, negative on failure.
@@ -808,7 +808,7 @@ static int hns_set_coalesce(struct net_device *net_dev,
/**
* hns_get_channels - get channel info.
- * @dev: net device
+ * @net_dev: net device
* @ch: channel info.
*/
static void
@@ -825,7 +825,7 @@ hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
/**
* get_ethtool_stats - get detail statistics.
- * @dev: net device
+ * @netdev: net device
* @stats: statistics info.
* @data: statistics data.
*/
@@ -883,8 +883,8 @@ static void hns_get_ethtool_stats(struct net_device *netdev,
/**
* get_strings: Return a set of strings that describe the requested objects
- * @dev: net device
- * @stats: string set ID.
+ * @netdev: net device
+ * @stringset: string set ID.
* @data: objects data.
*/
static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
@@ -972,7 +972,7 @@ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
/**
* nic_get_sset_count - get string set count witch returned by nic_get_strings.
- * @dev: net device
+ * @netdev: net device
* @stringset: string set index, 0: self test string; 1: statistics string.
*
* Return string set count.
@@ -1006,7 +1006,7 @@ static int hns_get_sset_count(struct net_device *netdev, int stringset)
/**
* hns_phy_led_set - set phy LED status.
- * @dev: net device
+ * @netdev: net device
* @value: LED state.
*
* Return 0 on success, negative on failure.
@@ -1028,7 +1028,7 @@ static int hns_phy_led_set(struct net_device *netdev, int value)
/**
* nic_set_phys_id - set phy identify LED.
- * @dev: net device
+ * @netdev: net device
* @state: LED state.
*
* Return 0 on success, negative on failure.
@@ -1104,9 +1104,9 @@ hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
/**
* hns_get_regs - get net device register
- * @dev: net device
+ * @net_dev: net device
* @cmd: ethtool cmd
- * @date: register data
+ * @data: register data
*/
static void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
void *data)
@@ -1126,7 +1126,7 @@ static void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
/**
* nic_get_regs_len - get total register len.
- * @dev: net device
+ * @net_dev: net device
*
* Return total register len.
*/
@@ -1151,7 +1151,7 @@ static int hns_get_regs_len(struct net_device *net_dev)
/**
* hns_nic_nway_reset - nway reset
- * @dev: net device
+ * @netdev: net device
*
* Return 0 on success, negative on failure
*/
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 088550db2de7..ad7257ebe51e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -42,8 +42,9 @@
#define HNAE3_DEV_ID_50GE_RDMA 0xA224
#define HNAE3_DEV_ID_50GE_RDMA_MACSEC 0xA225
#define HNAE3_DEV_ID_100G_RDMA_MACSEC 0xA226
-#define HNAE3_DEV_ID_100G_VF 0xA22E
-#define HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF 0xA22F
+#define HNAE3_DEV_ID_200G_RDMA 0xA228
+#define HNAE3_DEV_ID_VF 0xA22E
+#define HNAE3_DEV_ID_RDMA_DCB_PFC_VF 0xA22F
#define HNAE3_CLASS_NAME_SIZE 16
@@ -152,6 +153,7 @@ enum hnae3_hw_error_type {
HNAE3_PPU_POISON_ERROR,
HNAE3_CMDQ_ECC_ERROR,
HNAE3_IMP_RD_POISON_ERROR,
+ HNAE3_ROCEE_AXI_RESP_ERROR,
};
enum hnae3_reset_type {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index fe7fb565da19..4fab82c95a02 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -19,7 +19,7 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
struct hns3_enet_ring *ring;
u32 base_add_l, base_add_h;
u32 queue_num, queue_max;
- u32 value, i = 0;
+ u32 value, i;
int cnt;
if (!priv->ring) {
@@ -264,6 +264,7 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "dump qs shaper [qs id]\n");
dev_info(&h->pdev->dev, "dump uc mac list <func id>\n");
dev_info(&h->pdev->dev, "dump mc mac list <func id>\n");
+ dev_info(&h->pdev->dev, "dump intr\n");
memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
strncat(printf_buf, "dump reg [[bios common] [ssu <port_id>]",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 93825a433915..00e400244809 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -81,8 +81,10 @@ static const struct pci_device_id hns3_pci_tbl[] = {
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
/* required last entry */
{0, }
@@ -1254,7 +1256,7 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
{
- int i = 0;
+ int i;
for (i = 0; i < MAX_SKB_FRAGS; i++)
size[i] = skb_frag_size(&shinfo->frags[i]);
@@ -1383,6 +1385,27 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
return bd_num;
}
+static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
+ bool doorbell)
+{
+ ring->pending_buf += num;
+
+ if (!doorbell) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_more++;
+ u64_stats_update_end(&ring->syncp);
+ return;
+ }
+
+ if (!ring->pending_buf)
+ return;
+
+ writel(ring->pending_buf,
+ ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
+ ring->pending_buf = 0;
+ WRITE_ONCE(ring->last_to_use, ring->next_to_use);
+}
+
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -1391,11 +1414,14 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
int pre_ntu, next_to_use_head;
struct sk_buff *frag_skb;
int bd_num = 0;
+ bool doorbell;
int ret;
/* Hardware can only handle short frames above 32 bytes */
- if (skb_put_padto(skb, HNS3_MIN_TX_LEN))
+ if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
+ hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
return NETDEV_TX_OK;
+ }
/* Prefetch the data used later */
prefetch(skb->data);
@@ -1406,6 +1432,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_busy++;
u64_stats_update_end(&ring->syncp);
+ hns3_tx_doorbell(ring, 0, true);
return NETDEV_TX_BUSY;
} else if (ret == -ENOMEM) {
u64_stats_update_begin(&ring->syncp);
@@ -1446,11 +1473,9 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Complete translate all packets */
dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
- netdev_tx_sent_queue(dev_queue, skb->len);
-
- wmb(); /* Commit all data before submit */
-
- hnae3_queue_xmit(ring->tqp, bd_num);
+ doorbell = __netdev_tx_sent_queue(dev_queue, skb->len,
+ netdev_xmit_more());
+ hns3_tx_doorbell(ring, bd_num, doorbell);
return NETDEV_TX_OK;
@@ -1459,6 +1484,7 @@ fill_err:
out_err_tx_ok:
dev_kfree_skb_any(skb);
+ hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
return NETDEV_TX_OK;
}
@@ -1839,13 +1865,13 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
tx_ring->next_to_clean, napi->state);
netdev_info(ndev,
- "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n",
+ "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n",
tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
- tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt);
+ tx_ring->stats.sw_err_cnt, tx_ring->pending_buf);
netdev_info(ndev,
- "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n",
- tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt,
+ "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n",
+ tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more,
tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
/* When mac received many pause frames continuous, it's unable to send
@@ -2020,9 +2046,10 @@ bool hns3_is_phys_func(struct pci_dev *pdev)
case HNAE3_DEV_ID_50GE_RDMA:
case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
case HNAE3_DEV_ID_100G_RDMA_MACSEC:
+ case HNAE3_DEV_ID_200G_RDMA:
return true;
- case HNAE3_DEV_ID_100G_VF:
- case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
+ case HNAE3_DEV_ID_VF:
+ case HNAE3_DEV_ID_RDMA_DCB_PFC_VF:
return false;
default:
dev_warn(&pdev->dev, "un-recognized pci device-id %u",
@@ -2302,17 +2329,19 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
cb->buf = page_address(p);
cb->length = hns3_page_size(ring);
cb->type = DESC_TYPE_PAGE;
+ page_ref_add(p, USHRT_MAX - 1);
+ cb->pagecnt_bias = USHRT_MAX;
return 0;
}
static void hns3_free_buffer(struct hns3_enet_ring *ring,
- struct hns3_desc_cb *cb)
+ struct hns3_desc_cb *cb, int budget)
{
if (cb->type == DESC_TYPE_SKB)
- dev_kfree_skb_any((struct sk_buff *)cb->priv);
- else if (!HNAE3_IS_TX_RING(ring))
- put_page((struct page *)cb->priv);
+ napi_consume_skb(cb->priv, budget);
+ else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias)
+ __page_frag_cache_drain(cb->priv, cb->pagecnt_bias);
memset(cb, 0, sizeof(*cb));
}
@@ -2344,7 +2373,8 @@ static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
ring->desc[i].addr = 0;
}
-static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
+static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
+ int budget)
{
struct hns3_desc_cb *cb = &ring->desc_cb[i];
@@ -2352,7 +2382,7 @@ static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
return;
hns3_buffer_detach(ring, i);
- hns3_free_buffer(ring, cb);
+ hns3_free_buffer(ring, cb, budget);
}
static void hns3_free_buffers(struct hns3_enet_ring *ring)
@@ -2360,7 +2390,7 @@ static void hns3_free_buffers(struct hns3_enet_ring *ring)
int i;
for (i = 0; i < ring->desc_num; i++)
- hns3_free_buffer_detach(ring, i);
+ hns3_free_buffer_detach(ring, i, 0);
}
/* free desc along with its attached buffer */
@@ -2405,7 +2435,7 @@ static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring,
return 0;
out_with_buf:
- hns3_free_buffer(ring, cb);
+ hns3_free_buffer(ring, cb, 0);
out:
return ret;
}
@@ -2437,7 +2467,7 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
out_buffer_fail:
for (j = i - 1; j >= 0; j--)
- hns3_free_buffer_detach(ring, j);
+ hns3_free_buffer_detach(ring, j, 0);
return ret;
}
@@ -2464,71 +2494,62 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
DMA_FROM_DEVICE);
}
-static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head,
- int *bytes, int *pkts)
+static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
+ int *bytes, int *pkts, int budget)
{
+ /* pair with ring->last_to_use update in hns3_tx_doorbell(),
+ * smp_store_release() is not used in hns3_tx_doorbell() because
+ * the doorbell operation already have the needed barrier operation.
+ */
+ int ltu = smp_load_acquire(&ring->last_to_use);
int ntc = ring->next_to_clean;
struct hns3_desc_cb *desc_cb;
+ bool reclaimed = false;
+ struct hns3_desc *desc;
+
+ while (ltu != ntc) {
+ desc = &ring->desc[ntc];
+
+ if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) &
+ BIT(HNS3_TXD_VLD_B))
+ break;
- while (head != ntc) {
desc_cb = &ring->desc_cb[ntc];
(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
(*bytes) += desc_cb->length;
/* desc_cb will be cleaned, after hnae3_free_buffer_detach */
- hns3_free_buffer_detach(ring, ntc);
+ hns3_free_buffer_detach(ring, ntc, budget);
if (++ntc == ring->desc_num)
ntc = 0;
/* Issue prefetch for next Tx descriptor */
prefetch(&ring->desc_cb[ntc]);
+ reclaimed = true;
}
+ if (unlikely(!reclaimed))
+ return false;
+
/* This smp_store_release() pairs with smp_load_acquire() in
* ring_space called by hns3_nic_net_xmit.
*/
smp_store_release(&ring->next_to_clean, ntc);
+ return true;
}
-static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
-{
- int u = ring->next_to_use;
- int c = ring->next_to_clean;
-
- if (unlikely(h > ring->desc_num))
- return 0;
-
- return u > c ? (h > c && h <= u) : (h > c || h <= u);
-}
-
-void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
+void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
{
struct net_device *netdev = ring_to_netdev(ring);
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct netdev_queue *dev_queue;
int bytes, pkts;
- int head;
-
- head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
-
- if (is_ring_empty(ring) || head == ring->next_to_clean)
- return; /* no data to poll */
-
- rmb(); /* Make sure head is ready before touch any data */
-
- if (unlikely(!is_valid_clean_head(ring, head))) {
- hns3_rl_err(netdev, "wrong head (%d, %d-%d)\n", head,
- ring->next_to_use, ring->next_to_clean);
-
- u64_stats_update_begin(&ring->syncp);
- ring->stats.io_err_cnt++;
- u64_stats_update_end(&ring->syncp);
- return;
- }
bytes = 0;
pkts = 0;
- hns3_nic_reclaim_desc(ring, head, &bytes, &pkts);
+
+ if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts, budget)))
+ return;
ring->tqp_vector->tx_group.total_bytes += bytes;
ring->tqp_vector->tx_group.total_packets += pkts;
@@ -2600,8 +2621,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
ring_ptr_move_fw(ring, next_to_use);
}
- wmb(); /* Make all data has been write before submit */
- writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
+ writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
}
static bool hns3_page_is_reusable(struct page *page)
@@ -2610,6 +2630,11 @@ static bool hns3_page_is_reusable(struct page *page)
!page_is_pfmemalloc(page);
}
+static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
+{
+ return (page_count(cb->priv) - cb->pagecnt_bias) == 1;
+}
+
static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb)
@@ -2618,6 +2643,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
int size = le16_to_cpu(desc->rx.size);
u32 truesize = hns3_buf_size(ring);
+ desc_cb->pagecnt_bias--;
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize);
@@ -2625,20 +2651,27 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
* when page_offset rollback to zero, flag default unreuse
*/
if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) ||
- (!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
+ (!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) {
+ __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
return;
+ }
/* Move offset up to the next cache line */
desc_cb->page_offset += truesize;
if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) {
desc_cb->reuse_flag = 1;
- /* Bump ref count on page before it is given */
- get_page(desc_cb->priv);
- } else if (page_count(desc_cb->priv) == 1) {
+ } else if (hns3_can_reuse_page(desc_cb)) {
desc_cb->reuse_flag = 1;
desc_cb->page_offset = 0;
- get_page(desc_cb->priv);
+ } else if (desc_cb->pagecnt_bias) {
+ __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
+ return;
+ }
+
+ if (unlikely(!desc_cb->pagecnt_bias)) {
+ page_ref_add(desc_cb->priv, USHRT_MAX);
+ desc_cb->pagecnt_bias = USHRT_MAX;
}
}
@@ -2814,6 +2847,16 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
}
}
+static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
+{
+ ring->desc[ring->next_to_clean].rx.bd_base_info &=
+ cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
+ ring->next_to_clean += 1;
+
+ if (unlikely(ring->next_to_clean == ring->desc_num))
+ ring->next_to_clean = 0;
+}
+
static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
unsigned char *va)
{
@@ -2846,9 +2889,10 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
if (likely(hns3_page_is_reusable(desc_cb->priv)))
desc_cb->reuse_flag = 1;
else /* This page cannot be reused so discard it */
- put_page(desc_cb->priv);
+ __page_frag_cache_drain(desc_cb->priv,
+ desc_cb->pagecnt_bias);
- ring_ptr_move_fw(ring, next_to_clean);
+ hns3_rx_ring_move_fw(ring);
return 0;
}
u64_stats_update_begin(&ring->syncp);
@@ -2859,7 +2903,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
__skb_put(skb, ring->pull_len);
hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
desc_cb);
- ring_ptr_move_fw(ring, next_to_clean);
+ hns3_rx_ring_move_fw(ring);
return 0;
}
@@ -2914,7 +2958,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring)
hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
trace_hns3_rx_desc(ring);
- ring_ptr_move_fw(ring, next_to_clean);
+ hns3_rx_ring_move_fw(ring);
ring->pending_buf++;
} while (!(bd_base_info & BIT(HNS3_RXD_FE_B)));
@@ -3056,32 +3100,32 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
prefetch(desc);
- length = le16_to_cpu(desc->rx.size);
- bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ if (!skb) {
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
- /* Check valid BD */
- if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
- return -ENXIO;
+ /* Check valid BD */
+ if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
+ return -ENXIO;
+
+ dma_rmb();
+ length = le16_to_cpu(desc->rx.size);
- if (!skb) {
ring->va = desc_cb->buf + desc_cb->page_offset;
dma_sync_single_for_cpu(ring_to_dev(ring),
desc_cb->dma + desc_cb->page_offset,
hns3_buf_size(ring),
DMA_FROM_DEVICE);
- }
- /* Prefetch first cache line of first page
- * Idea is to cache few bytes of the header of the packet. Our L1 Cache
- * line size is 64B so need to prefetch twice to make it 128B. But in
- * actual we can have greater size of caches with 128B Level 1 cache
- * lines. In such a case, single fetch would suffice to cache in the
- * relevant part of the header.
- */
- net_prefetch(ring->va);
+ /* Prefetch first cache line of first page.
+ * Idea is to cache few bytes of the header of the packet.
+ * Our L1 Cache line size is 64B so need to prefetch twice to make
+ * it 128B. But in actual we can have greater size of caches with
+ * 128B Level 1 cache lines. In such a case, single fetch would
+ * suffice to cache in the relevant part of the header.
+ */
+ net_prefetch(ring->va);
- if (!skb) {
ret = hns3_alloc_skb(ring, length, ring->va);
skb = ring->skb;
@@ -3121,19 +3165,11 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int unused_count = hns3_desc_unused(ring);
int recv_pkts = 0;
- int recv_bds = 0;
- int err, num;
+ int err;
- num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
- num -= unused_count;
unused_count -= ring->pending_buf;
- if (num <= 0)
- goto out;
-
- rmb(); /* Make sure num taken effect before the other data is touched */
-
- while (recv_pkts < budget && recv_bds < num) {
+ while (recv_pkts < budget) {
/* Reuse or realloc buffers */
if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
hns3_nic_alloc_rx_buffers(ring, unused_count);
@@ -3151,7 +3187,6 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
recv_pkts++;
}
- recv_bds += ring->pending_buf;
unused_count += ring->pending_buf;
ring->skb = NULL;
ring->pending_buf = 0;
@@ -3320,7 +3355,7 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
hns3_for_each_ring(ring, tqp_vector->tx_group)
- hns3_clean_tx_ring(ring);
+ hns3_clean_tx_ring(ring, budget);
/* make sure rx ring budget not smaller than 1 */
if (tqp_vector->num_tqps > 1)
@@ -3479,7 +3514,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
struct hnae3_ring_chain_node vector_ring_chain;
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_tqp_vector *tqp_vector;
- int ret = 0;
+ int ret;
int i;
hns3_nic_set_cpumask(priv);
@@ -3673,6 +3708,7 @@ static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring->desc_num = desc_num;
ring->next_to_use = 0;
ring->next_to_clean = 0;
+ ring->last_to_use = 0;
}
static void hns3_queue_to_ring(struct hnae3_queue *tqp,
@@ -3752,6 +3788,7 @@ void hns3_fini_ring(struct hns3_enet_ring *ring)
ring->desc_cb = NULL;
ring->next_to_clean = 0;
ring->next_to_use = 0;
+ ring->last_to_use = 0;
ring->pending_buf = 0;
if (ring->skb) {
dev_kfree_skb_any(ring->skb);
@@ -4162,9 +4199,11 @@ static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
{
while (ring->next_to_clean != ring->next_to_use) {
ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
- hns3_free_buffer_detach(ring, ring->next_to_clean);
+ hns3_free_buffer_detach(ring, ring->next_to_clean, 0);
ring_ptr_move_fw(ring, next_to_clean);
}
+
+ ring->pending_buf = 0;
}
static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
@@ -4267,6 +4306,7 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
hns3_clear_tx_ring(&priv->ring[i]);
priv->ring[i].next_to_clean = 0;
priv->ring[i].next_to_use = 0;
+ priv->ring[i].last_to_use = 0;
rx_ring = &priv->ring[i + h->kinfo.num_tqps];
hns3_init_ring_hw(rx_ring);
@@ -4563,6 +4603,8 @@ static const struct hns3_hw_error_info hns3_hw_err[] = {
.msg = "IMP CMDQ error" },
{ .type = HNAE3_IMP_RD_POISON_ERROR,
.msg = "IMP RD poison" },
+ { .type = HNAE3_ROCEE_AXI_RESP_ERROR,
+ .msg = "ROCEE AXI RESP error" },
};
static void hns3_process_hw_error(struct hnae3_handle *handle,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 98ca6ead0b7d..8a6c8bae8fa6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -287,6 +287,7 @@ struct hns3_desc_cb {
/* desc type, used by the ring user to mark the type of the priv data */
u16 type;
+ u16 pagecnt_bias;
};
enum hns3_pkt_l3type {
@@ -343,14 +344,13 @@ enum hns3_pkt_ol4type {
};
struct ring_stats {
- u64 io_err_cnt;
u64 sw_err_cnt;
u64 seg_pkt_cnt;
union {
struct {
u64 tx_pkts;
u64 tx_bytes;
- u64 tx_err_cnt;
+ u64 tx_more;
u64 restart_queue;
u64 tx_busy;
u64 tx_copy;
@@ -396,8 +396,10 @@ struct hns3_enet_ring {
* next_to_use
*/
int next_to_clean;
-
- u32 pull_len; /* head length for current packet */
+ union {
+ int last_to_use; /* last idx used by xmit */
+ u32 pull_len; /* memcpy len for current rx packet */
+ };
u32 frag_num;
void *va; /* first buffer address for current packet */
@@ -512,11 +514,6 @@ static inline int ring_space(struct hns3_enet_ring *ring)
(begin - end)) - 1;
}
-static inline int is_ring_empty(struct hns3_enet_ring *ring)
-{
- return ring->next_to_use == ring->next_to_clean;
-}
-
static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
{
return readl(base + reg);
@@ -542,9 +539,6 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define hns3_write_dev(a, reg, value) \
hns3_write_reg((a)->io_base, (reg), (value))
-#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
- (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG)
-
#define ring_to_dev(ring) ((ring)->dev)
#define ring_to_netdev(ring) ((ring)->tqp_vector->napi.dev)
@@ -582,7 +576,7 @@ void hns3_ethtool_set_ops(struct net_device *netdev);
int hns3_set_channels(struct net_device *netdev,
struct ethtool_channels *ch);
-void hns3_clean_tx_ring(struct hns3_enet_ring *ring);
+void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
int hns3_init_all_ring(struct hns3_nic_priv *priv);
int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
int hns3_nic_reset_all_ring(struct hnae3_handle *h);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 2622e04e8eed..52c780475d3a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -27,12 +27,11 @@ struct hns3_sfp_type {
static const struct hns3_stats hns3_txq_stats[] = {
/* Tx per-queue statistics */
- HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
HNS3_TQP_STAT("dropped", sw_err_cnt),
HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
HNS3_TQP_STAT("packets", tx_pkts),
HNS3_TQP_STAT("bytes", tx_bytes),
- HNS3_TQP_STAT("errors", tx_err_cnt),
+ HNS3_TQP_STAT("more", tx_more),
HNS3_TQP_STAT("wake", restart_queue),
HNS3_TQP_STAT("busy", tx_busy),
HNS3_TQP_STAT("copy", tx_copy),
@@ -46,7 +45,6 @@ static const struct hns3_stats hns3_txq_stats[] = {
static const struct hns3_stats hns3_rxq_stats[] = {
/* Rx per-queue statistics */
- HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
HNS3_TQP_STAT("dropped", sw_err_cnt),
HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
HNS3_TQP_STAT("packets", rx_pkts),
@@ -232,7 +230,7 @@ static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid,
for (i = start_ringid; i <= end_ringid; i++) {
struct hns3_enet_ring *ring = &priv->ring[i];
- hns3_clean_tx_ring(ring);
+ hns3_clean_tx_ring(ring, 0);
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 1d6c328bd9fb..81aa67b555ad 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -261,7 +261,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
bool complete = false;
u32 timeout = 0;
int handle = 0;
- int retval = 0;
+ int retval;
int ntc;
spin_lock_bh(&hw->cmq.csq.lock);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 463f29151ef0..30983f068db3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -491,6 +491,8 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24)
#define HCLGE_CFG_SPEED_ABILITY_S 0
#define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0)
+#define HCLGE_CFG_SPEED_ABILITY_EXT_S 10
+#define HCLGE_CFG_SPEED_ABILITY_EXT_M GENMASK(15, 10)
#define HCLGE_CFG_UMV_TBL_SPACE_S 16
#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index d6c3952aba04..f990f6915226 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -2,6 +2,7 @@
// Copyright (c) 2016-2017 Hisilicon Limited.
#include "hclge_main.h"
+#include "hclge_dcb.h"
#include "hclge_tm.h"
#include "hnae3.h"
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 26f6f068b01d..16df050e72cf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -8,7 +8,7 @@
#include "hclge_tm.h"
#include "hnae3.h"
-static struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
+static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
{ .reg_type = "bios common",
.dfx_msg = &hclge_dbg_bios_common_reg[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
@@ -115,14 +115,14 @@ static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
}
static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
- struct hclge_dbg_reg_type_info *reg_info,
+ const struct hclge_dbg_reg_type_info *reg_info,
const char *cmd_buf)
{
#define IDX_OFFSET 1
const char *s = &cmd_buf[strlen(reg_info->reg_type) + IDX_OFFSET];
- struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
- struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
+ const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
+ const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
struct hclge_desc *desc_src;
struct hclge_desc *desc;
int entries_per_desc;
@@ -399,7 +399,7 @@ err_dcb_cmd_send:
static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
{
- struct hclge_dbg_reg_type_info *reg_info;
+ const struct hclge_dbg_reg_type_info *reg_info;
bool has_dump = false;
int i;
@@ -428,17 +428,13 @@ static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
}
}
-static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index,
- char *title_buf, char *true_buf,
- char *false_buf)
+static void hclge_print_tc_info(struct hclge_dev *hdev, bool flag, int index)
{
if (flag)
- dev_info(&hdev->pdev->dev, "%s(%d): %s weight: %u\n",
- title_buf, index, true_buf,
- hdev->tm_info.pg_info[0].tc_dwrr[index]);
+ dev_info(&hdev->pdev->dev, "tc(%d): no sp mode weight: %u\n",
+ index, hdev->tm_info.pg_info[0].tc_dwrr[index]);
else
- dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
- false_buf);
+ dev_info(&hdev->pdev->dev, "tc(%d): sp mode\n", index);
}
static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
@@ -469,8 +465,7 @@ static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
ets_weight->weight_offset);
for (i = 0; i < HNAE3_MAX_TC; i++)
- hclge_title_idx_print(hdev, ets_weight->tc_weight[i], i,
- "tc", "no sp mode", "sp mode");
+ hclge_print_tc_info(hdev, ets_weight->tc_weight[i], i);
}
static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
@@ -1170,6 +1165,14 @@ static void hclge_dbg_dump_serv_info(struct hclge_dev *hdev)
hdev->serv_processed_cnt);
}
+static void hclge_dbg_dump_interrupt(struct hclge_dev *hdev)
+{
+ dev_info(&hdev->pdev->dev, "num_nic_msi: %u\n", hdev->num_nic_msi);
+ dev_info(&hdev->pdev->dev, "num_roce_msi: %u\n", hdev->num_roce_msi);
+ dev_info(&hdev->pdev->dev, "num_msi_used: %u\n", hdev->num_msi_used);
+ dev_info(&hdev->pdev->dev, "num_msi_left: %u\n", hdev->num_msi_left);
+}
+
static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
{
struct hclge_desc *desc_src, *desc_tmp;
@@ -1494,6 +1497,7 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
#define DUMP_REG "dump reg"
#define DUMP_TM_MAP "dump tm map"
#define DUMP_LOOPBACK "dump loopback"
+#define DUMP_INTERRUPT "dump intr"
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -1541,6 +1545,9 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
hclge_dbg_dump_mac_list(hdev,
&cmd_buf[sizeof("dump mc mac list")],
false);
+ } else if (strncmp(cmd_buf, DUMP_INTERRUPT,
+ strlen(DUMP_INTERRUPT)) == 0) {
+ hclge_dbg_dump_interrupt(hdev);
} else {
dev_info(&hdev->pdev->dev, "unknown command\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index 38b79321c4c4..a9066e6ff697 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -81,13 +81,13 @@ struct hclge_dbg_dfx_message {
#define HCLGE_DBG_MAC_REG_TYPE_LEN 32
struct hclge_dbg_reg_type_info {
const char *reg_type;
- struct hclge_dbg_dfx_message *dfx_msg;
+ const struct hclge_dbg_dfx_message *dfx_msg;
struct hclge_dbg_reg_common_msg reg_msg;
};
#pragma pack()
-static struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
{false, "Reserved"},
{true, "BP_CPU_STATE"},
{true, "DFX_MSIX_INFO_NIC_0"},
@@ -103,7 +103,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
{false, "Reserved"},
{true, "SSU_ETS_PORT_STATUS"},
{true, "SSU_ETS_TCG_STATUS"},
@@ -175,7 +175,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
{true, "prt_id"},
{true, "PACKET_TC_CURR_BUFFER_CNT_0"},
{true, "PACKET_TC_CURR_BUFFER_CNT_1"},
@@ -282,7 +282,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
{true, "OQ_INDEX"},
{true, "QUEUE_CNT"},
{false, "Reserved"},
@@ -291,7 +291,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
{true, "prt_id"},
{true, "IGU_RX_ERR_PKT"},
{true, "IGU_RX_NO_SOF_PKT"},
@@ -356,7 +356,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
{true, "tc_queue_num"},
{true, "FSM_DFX_ST0"},
{true, "FSM_DFX_ST1"},
@@ -365,7 +365,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
{true, "BUF_WAIT_TIMEOUT_QID"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
{false, "Reserved"},
{true, "FIFO_DFX_ST0"},
{true, "FIFO_DFX_ST1"},
@@ -381,7 +381,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
{false, "Reserved"},
{true, "NCSI_EGU_TX_FIFO_STS"},
{true, "NCSI_PAUSE_STATUS"},
@@ -453,7 +453,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
{true, "NCSI_MAC_RX_PAUSE_FRAMES"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
{false, "Reserved"},
{true, "LGE_IGU_AFIFO_DFX_0"},
{true, "LGE_IGU_AFIFO_DFX_1"},
@@ -483,7 +483,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
{false, "Reserved"},
{true, "DROP_FROM_PRT_PKT_CNT"},
{true, "DROP_FROM_HOST_PKT_CNT"},
@@ -639,7 +639,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
{false, "Reserved"},
{true, "FSM_DFX_ST0"},
{true, "FSM_DFX_ST1"},
@@ -711,7 +711,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
{true, "q_num"},
{true, "RCB_CFG_RX_RING_TAIL"},
{true, "RCB_CFG_RX_RING_HEAD"},
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 50d5ef71756b..39b7f71b32e7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1507,6 +1507,8 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
reset_type = HNAE3_FUNC_RESET;
+ hclge_report_hw_error(hdev, HNAE3_ROCEE_AXI_RESP_ERROR);
+
ret = hclge_log_rocee_axi_error(hdev);
if (ret)
return HNAE3_GLOBAL_RESET;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 40d68a4ff83d..9bdad645a01c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -84,6 +84,7 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
/* required last entry */
{0, }
};
@@ -622,7 +623,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
u8 *buff = data;
- int i = 0;
+ int i;
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
@@ -965,6 +966,9 @@ static int hclge_parse_speed(int speed_cmd, int *speed)
case 5:
*speed = HCLGE_MAC_SPEED_100G;
break;
+ case 8:
+ *speed = HCLGE_MAC_SPEED_200G;
+ break;
default:
return -EINVAL;
}
@@ -1004,6 +1008,9 @@ static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
case HCLGE_MAC_SPEED_100G:
speed_bit = HCLGE_SUPPORT_100G_BIT;
break;
+ case HCLGE_MAC_SPEED_200G:
+ speed_bit = HCLGE_SUPPORT_200G_BIT;
+ break;
default:
return -EINVAL;
}
@@ -1014,7 +1021,7 @@ static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
return -EINVAL;
}
-static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
+static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
@@ -1031,9 +1038,12 @@ static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
+ mac->supported);
}
-static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
+static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
@@ -1050,9 +1060,13 @@ static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
+ mac->supported);
}
-static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
+static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
@@ -1069,9 +1083,12 @@ static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
+ mac->supported);
}
-static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
+static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
{
if (speed_ability & HCLGE_SUPPORT_1G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
@@ -1091,6 +1108,9 @@ static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
+ mac->supported);
}
static void hclge_convert_setting_fec(struct hclge_mac *mac)
@@ -1115,6 +1135,7 @@ static void hclge_convert_setting_fec(struct hclge_mac *mac)
BIT(HNAE3_FEC_AUTO);
break;
case HCLGE_MAC_SPEED_100G:
+ case HCLGE_MAC_SPEED_200G:
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
break;
@@ -1125,7 +1146,7 @@ static void hclge_convert_setting_fec(struct hclge_mac *mac)
}
static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
- u8 speed_ability)
+ u16 speed_ability)
{
struct hclge_mac *mac = &hdev->hw.mac;
@@ -1145,7 +1166,7 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
}
static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
- u8 speed_ability)
+ u16 speed_ability)
{
struct hclge_mac *mac = &hdev->hw.mac;
@@ -1158,7 +1179,7 @@ static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
}
static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
- u8 speed_ability)
+ u16 speed_ability)
{
unsigned long *supported = hdev->hw.mac.supported;
@@ -1188,7 +1209,7 @@ static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
}
-static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
+static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
{
u8 media_type = hdev->hw.mac.media_type;
@@ -1200,8 +1221,11 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
hclge_parse_backplane_link_mode(hdev, speed_ability);
}
-static u32 hclge_get_max_speed(u8 speed_ability)
+static u32 hclge_get_max_speed(u16 speed_ability)
{
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ return HCLGE_MAC_SPEED_200G;
+
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
return HCLGE_MAC_SPEED_100G;
@@ -1231,8 +1255,11 @@ static u32 hclge_get_max_speed(u8 speed_ability)
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{
+#define SPEED_ABILITY_EXT_SHIFT 8
+
struct hclge_cfg_param_cmd *req;
u64 mac_addr_tmp_high;
+ u16 speed_ability_ext;
u64 mac_addr_tmp;
unsigned int i;
@@ -1281,6 +1308,11 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_SPEED_ABILITY_M,
HCLGE_CFG_SPEED_ABILITY_S);
+ speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_SPEED_ABILITY_EXT_M,
+ HCLGE_CFG_SPEED_ABILITY_EXT_S);
+ cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
+
cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_UMV_TBL_SPACE_M,
HCLGE_CFG_UMV_TBL_SPACE_S);
@@ -2422,6 +2454,10 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, 5);
break;
+ case HCLGE_MAC_SPEED_200G:
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 8);
+ break;
default:
dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
return -EINVAL;
@@ -3211,7 +3247,7 @@ static int hclge_notify_roce_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type)
{
struct hnae3_client *client = hdev->roce_client;
- int ret = 0;
+ int ret;
u16 i;
if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
@@ -11093,7 +11129,7 @@ static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
{
struct hclge_vport *vport = &hdev->vport[0];
struct hnae3_handle *handle = &vport->nic;
- u8 tmp_flags = 0;
+ u8 tmp_flags;
int ret;
if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 3975332a1a10..64e6afdb61b8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -199,6 +199,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_SUPPORT_40G_BIT BIT(5)
#define HCLGE_SUPPORT_100M_BIT BIT(6)
#define HCLGE_SUPPORT_10M_BIT BIT(7)
+#define HCLGE_SUPPORT_200G_BIT BIT(8)
#define HCLGE_SUPPORT_GE \
(HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT)
@@ -238,7 +239,8 @@ enum HCLGE_MAC_SPEED {
HCLGE_MAC_SPEED_25G = 25000, /* 25000 Mbps = 25 Gbps */
HCLGE_MAC_SPEED_40G = 40000, /* 40000 Mbps = 40 Gbps */
HCLGE_MAC_SPEED_50G = 50000, /* 50000 Mbps = 50 Gbps */
- HCLGE_MAC_SPEED_100G = 100000 /* 100000 Mbps = 100 Gbps */
+ HCLGE_MAC_SPEED_100G = 100000, /* 100000 Mbps = 100 Gbps */
+ HCLGE_MAC_SPEED_200G = 200000 /* 200000 Mbps = 200 Gbps */
};
enum HCLGE_MAC_DUPLEX {
@@ -349,7 +351,7 @@ struct hclge_cfg {
u8 mac_addr[ETH_ALEN];
u8 default_speed;
u32 numa_node_map;
- u8 speed_ability;
+ u16 speed_ability;
u16 umv_space;
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 28db13253a5e..19742f9b838a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -1355,7 +1355,7 @@ static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
static int hclge_tm_bp_setup(struct hclge_dev *hdev)
{
- int ret = 0;
+ int ret;
int i;
for (i = 0; i < hdev->tm_info.num_tc; i++) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 20dd50dd765a..307b18c9f50f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -19,8 +19,9 @@ static struct hnae3_ae_algo ae_algovf;
static struct workqueue_struct *hclgevf_wq;
static const struct pci_device_id ae_algovf_pci_tbl[] = {
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
/* required last entry */
{0, }
};
@@ -171,7 +172,7 @@ static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
u8 *buff = data;
- int i = 0;
+ int i;
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
@@ -2554,13 +2555,7 @@ static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
static int hclgevf_client_start(struct hnae3_handle *handle)
{
- int ret;
-
- ret = hclgevf_set_alive(handle, true);
- if (ret)
- return ret;
-
- return 0;
+ return hclgevf_set_alive(handle, true);
}
static void hclgevf_client_stop(struct hnae3_handle *handle)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index 6bb65ade1d77..c340d9acba80 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -1654,6 +1654,7 @@ static void hinic_diag_test(struct net_device *netdev,
}
netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
err = do_lp_test(nic_dev, eth_test->flags, LP_DEFAULT_TIME,
&test_index);
@@ -1662,9 +1663,12 @@ static void hinic_diag_test(struct net_device *netdev,
data[test_index] = 1;
}
+ netif_tx_wake_all_queues(netdev);
+
err = hinic_port_link_state(nic_dev, &link_state);
if (!err && link_state == HINIC_LINK_STATE_UP)
netif_carrier_on(netdev);
+
}
static int hinic_set_phys_id(struct net_device *netdev,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index e0eb294779ec..5a6bbee819cd 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -784,7 +784,7 @@ static void free_cmdq(struct hinic_cmdq *cmdq)
* init_cmdqs_ctxt - write the cmdq ctxt to HW after init all cmdq
* @hwdev: the NIC HW device
* @cmdqs: cmdqs to write the ctxts for
- * &db_area: db_area for all the cmdqs
+ * @db_area: db_area for all the cmdqs
*
* Return 0 - Success, negative - Failure
**/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 239685152f6e..0c74f6674634 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -437,6 +437,8 @@ static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn)
/**
* hinic_hwdev_ifup - Preparing the HW for passing IO
* @hwdev: the NIC HW device
+ * @sq_depth: the send queue depth
+ * @rq_depth: the receive queue depth
*
* Return 0 - Success, negative - Failure
**/
@@ -582,6 +584,7 @@ void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev,
/**
* nic_mgmt_msg_handler - nic mgmt event handler
* @handle: private data for the handler
+ * @cmd: message command
* @buf_in: input buffer
* @in_size: input size
* @buf_out: output buffer
@@ -909,6 +912,7 @@ int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
/**
* hinic_init_hwdev - Initialize the NIC HW
* @pdev: the NIC pci device
+ * @devlink: the poniter of hinic devlink
*
* Return initialized NIC HW device
*
@@ -1121,7 +1125,7 @@ int hinic_hwdev_msix_cnt_set(struct hinic_hwdev *hwdev, u16 msix_index)
* @msix_index: msix_index
* @pending_limit: the maximum pending interrupt events (unit 8)
* @coalesc_timer: coalesc period for interrupt (unit 8 us)
- * @lli_timer: replenishing period for low latency credit (unit 8 us)
+ * @lli_timer_cfg: replenishing period for low latency credit (unit 8 us)
* @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
* @resend_timer: maximum wait for resending msix (unit coalesc period)
*
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index b30733f4a59e..19942fef99d9 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -188,6 +188,7 @@ static u8 eq_cons_idx_checksum_set(u32 val)
/**
* eq_update_ci - update the HW cons idx of event queue
* @eq: the event queue to update the cons idx for
+ * @arm_state: the arm bit value of eq's interrupt
**/
static void eq_update_ci(struct hinic_eq *eq, u32 arm_state)
{
@@ -368,11 +369,11 @@ static void eq_irq_work(struct work_struct *work)
/**
* ceq_tasklet - the tasklet of the EQ that received the event
- * @ceq_data: the eq
+ * @t: the tasklet struct pointer
**/
-static void ceq_tasklet(unsigned long ceq_data)
+static void ceq_tasklet(struct tasklet_struct *t)
{
- struct hinic_eq *ceq = (struct hinic_eq *)ceq_data;
+ struct hinic_eq *ceq = from_tasklet(ceq, t, ceq_tasklet);
eq_irq_handler(ceq);
}
@@ -782,8 +783,7 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
INIT_WORK(&aeq_work->work, eq_irq_work);
} else if (type == HINIC_CEQ) {
- tasklet_init(&eq->ceq_tasklet, ceq_tasklet,
- (unsigned long)eq);
+ tasklet_setup(&eq->ceq_tasklet, ceq_tasklet);
}
/* set the attributes of the msix entry */
@@ -794,12 +794,15 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT,
HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT);
- if (type == HINIC_AEQ)
- err = request_irq(entry.vector, aeq_interrupt, 0,
- "hinic_aeq", eq);
- else if (type == HINIC_CEQ)
- err = request_irq(entry.vector, ceq_interrupt, 0,
- "hinic_ceq", eq);
+ if (type == HINIC_AEQ) {
+ snprintf(eq->irq_name, sizeof(eq->irq_name), "hinic_aeq%d@pci:%s", eq->q_id,
+ pci_name(pdev));
+ err = request_irq(entry.vector, aeq_interrupt, 0, eq->irq_name, eq);
+ } else if (type == HINIC_CEQ) {
+ snprintf(eq->irq_name, sizeof(eq->irq_name), "hinic_ceq%d@pci:%s", eq->q_id,
+ pci_name(pdev));
+ err = request_irq(entry.vector, ceq_interrupt, 0, eq->irq_name, eq);
+ }
if (err) {
dev_err(&pdev->dev, "Failed to request irq for the EQ\n");
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
index 43065fc70869..2f3222174fc7 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
@@ -186,6 +186,7 @@ struct hinic_eq {
int num_elem_in_pg;
struct msix_entry msix_entry;
+ char irq_name[64];
dma_addr_t *dma_addr;
void **virt_addr;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
index bc8925c0c982..efbaed389440 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
@@ -230,6 +230,7 @@ static int wait_hwif_ready(struct hinic_hwif *hwif)
* @hwif: the HW interface of a pci function device
* @attr0: the first attribute that was read from the hw
* @attr1: the second attribute that was read from the hw
+ * @attr2: the third attribute that was read from the hw
**/
static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1,
u32 attr2)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
index c6ce5966284c..819fa13034c0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
@@ -47,8 +47,12 @@
#define MGMT_MSG_TIMEOUT 5000
+#define SET_FUNC_PORT_MBOX_TIMEOUT 30000
+
#define SET_FUNC_PORT_MGMT_TIMEOUT 25000
+#define UPDATE_FW_MGMT_TIMEOUT 20000
+
#define mgmt_to_pfhwdev(pf_mgmt) \
container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
@@ -234,6 +238,7 @@ static int send_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
* @out_size: response length
* @direction: the direction of the original message
* @resp_msg_id: msg id to response for
+ * @timeout: time-out period of waiting for response
*
* Return 0 - Success, negative - Failure
**/
@@ -361,16 +366,22 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
return -EINVAL;
}
- if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
- timeout = SET_FUNC_PORT_MGMT_TIMEOUT;
+ if (HINIC_IS_VF(hwif)) {
+ if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
+ timeout = SET_FUNC_PORT_MBOX_TIMEOUT;
- if (HINIC_IS_VF(hwif))
return hinic_mbox_to_pf(pf_to_mgmt->hwdev, mod, cmd, buf_in,
- in_size, buf_out, out_size, 0);
- else
+ in_size, buf_out, out_size, timeout);
+ } else {
+ if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE)
+ timeout = SET_FUNC_PORT_MGMT_TIMEOUT;
+ else if (cmd == HINIC_PORT_CMD_UPDATE_FW)
+ timeout = UPDATE_FW_MGMT_TIMEOUT;
+
return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
buf_out, out_size, MGMT_DIRECT_SEND,
MSG_NOT_RESP, timeout);
+ }
}
static void recv_mgmt_msg_work_handler(struct work_struct *work)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 797c55a1d9c6..350225bbe0be 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -191,6 +191,24 @@ err_init_txq:
return err;
}
+static void enable_txqs_napi(struct hinic_dev *nic_dev)
+{
+ int num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
+ int i;
+
+ for (i = 0; i < num_txqs; i++)
+ napi_enable(&nic_dev->txqs[i].napi);
+}
+
+static void disable_txqs_napi(struct hinic_dev *nic_dev)
+{
+ int num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
+ int i;
+
+ for (i = 0; i < num_txqs; i++)
+ napi_disable(&nic_dev->txqs[i].napi);
+}
+
/**
* free_txqs - Free the Logical Tx Queues of specific NIC device
* @nic_dev: the specific NIC device
@@ -440,6 +458,8 @@ int hinic_open(struct net_device *netdev)
goto err_create_txqs;
}
+ enable_txqs_napi(nic_dev);
+
err = create_rxqs(nic_dev);
if (err) {
netif_err(nic_dev, drv, netdev,
@@ -524,6 +544,7 @@ err_port_state:
}
err_create_rxqs:
+ disable_txqs_napi(nic_dev);
free_txqs(nic_dev);
err_create_txqs:
@@ -537,6 +558,9 @@ int hinic_close(struct net_device *netdev)
struct hinic_dev *nic_dev = netdev_priv(netdev);
unsigned int flags;
+ /* Disable txq napi firstly to aviod rewaking txq in free_tx_poll */
+ disable_txqs_napi(nic_dev);
+
down(&nic_dev->mgmt_lock);
flags = nic_dev->flags;
@@ -929,11 +953,16 @@ static void netdev_features_init(struct net_device *netdev)
netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_RXCSUM | NETIF_F_LRO |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->vlan_features = netdev->hw_features;
netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ netdev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SCTP_CRC |
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_UDP_TUNNEL;
}
static void hinic_refresh_nic_cfg(struct hinic_dev *nic_dev)
@@ -961,7 +990,7 @@ static void hinic_refresh_nic_cfg(struct hinic_dev *nic_dev)
* @handle: nic device for the handler
* @buf_in: input buffer
* @in_size: input size
- * @buf_in: output buffer
+ * @buf_out: output buffer
* @out_size: returned output size
*
* Return 0 - Success, negative - Failure
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index 5bee951fe9d4..070a7cc6392e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -543,18 +543,25 @@ static int rx_request_irq(struct hinic_rxq *rxq)
if (err) {
netif_err(nic_dev, drv, rxq->netdev,
"Failed to set RX interrupt coalescing attribute\n");
- rx_del_napi(rxq);
- return err;
+ goto err_req_irq;
}
err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq);
- if (err) {
- rx_del_napi(rxq);
- return err;
- }
+ if (err)
+ goto err_req_irq;
cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
- return irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
+ err = irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
+ if (err)
+ goto err_irq_affinity;
+
+ return 0;
+
+err_irq_affinity:
+ free_irq(rq->irq, rxq);
+err_req_irq:
+ rx_del_napi(rxq);
+ return err;
}
static void rx_free_irq(struct hinic_rxq *rxq)
@@ -588,7 +595,7 @@ int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
rxq_stats_init(rxq);
rxq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL,
- "hinic_rxq%d", qp->q_id);
+ "%s_rxq%d", netdev->name, qp->q_id);
if (!rxq->irq_name)
return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index a97498ee6914..8da7d46363b2 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -357,6 +357,7 @@ static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
enum hinic_l4_offload_type l4_offload;
u32 offset, l4_len, network_hdr_len;
enum hinic_l3_offload_type l3_type;
+ u32 tunnel_type = NOT_TUNNEL;
union hinic_l3 ip;
union hinic_l4 l4;
u8 l4_proto;
@@ -367,27 +368,55 @@ static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
if (skb->encapsulation) {
u32 l4_tunnel_len;
+ tunnel_type = TUNNEL_UDP_NO_CSUM;
ip.hdr = skb_network_header(skb);
- if (ip.v4->version == 4)
+ if (ip.v4->version == 4) {
l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
- else if (ip.v4->version == 6)
+ l4_proto = ip.v4->protocol;
+ } else if (ip.v4->version == 6) {
+ unsigned char *exthdr;
+ __be16 frag_off;
l3_type = IPV6_PKT;
- else
+ tunnel_type = TUNNEL_UDP_CSUM;
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+ l4.hdr = skb_transport_header(skb);
+ if (l4.hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ } else {
l3_type = L3TYPE_UNKNOWN;
+ l4_proto = IPPROTO_RAW;
+ }
hinic_task_set_outter_l3(task, l3_type,
skb_network_header_len(skb));
- l4_tunnel_len = skb_inner_network_offset(skb) -
- skb_transport_offset(skb);
-
- hinic_task_set_tunnel_l4(task, TUNNEL_UDP_NO_CSUM,
- l4_tunnel_len);
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ l4_tunnel_len = skb_inner_network_offset(skb) -
+ skb_transport_offset(skb);
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ network_hdr_len = skb_inner_network_header_len(skb);
+ break;
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ tunnel_type = NOT_TUNNEL;
+ l4_tunnel_len = 0;
+
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ network_hdr_len = skb_network_header_len(skb);
+ break;
+ default:
+ /* Unsupported tunnel packet, disable csum offload */
+ skb_checksum_help(skb);
+ return 0;
+ }
- ip.hdr = skb_inner_network_header(skb);
- l4.hdr = skb_inner_transport_header(skb);
- network_hdr_len = skb_inner_network_header_len(skb);
+ hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
} else {
ip.hdr = skb_network_header(skb);
l4.hdr = skb_transport_header(skb);
@@ -717,8 +746,8 @@ static int free_tx_poll(struct napi_struct *napi, int budget)
netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id);
__netif_tx_lock(netdev_txq, smp_processor_id());
-
- netif_wake_subqueue(nic_dev->netdev, qp->q_id);
+ if (!netif_testing(nic_dev->netdev))
+ netif_wake_subqueue(nic_dev->netdev, qp->q_id);
__netif_tx_unlock(netdev_txq);
@@ -745,18 +774,6 @@ static int free_tx_poll(struct napi_struct *napi, int budget)
return budget;
}
-static void tx_napi_add(struct hinic_txq *txq, int weight)
-{
- netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, weight);
- napi_enable(&txq->napi);
-}
-
-static void tx_napi_del(struct hinic_txq *txq)
-{
- napi_disable(&txq->napi);
- netif_napi_del(&txq->napi);
-}
-
static irqreturn_t tx_irq(int irq, void *data)
{
struct hinic_txq *txq = data;
@@ -790,7 +807,7 @@ static int tx_request_irq(struct hinic_txq *txq)
qp = container_of(sq, struct hinic_qp, sq);
- tx_napi_add(txq, nic_dev->tx_weight);
+ netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, nic_dev->tx_weight);
hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry,
TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC,
@@ -807,14 +824,14 @@ static int tx_request_irq(struct hinic_txq *txq)
if (err) {
netif_err(nic_dev, drv, txq->netdev,
"Failed to set TX interrupt coalescing attribute\n");
- tx_napi_del(txq);
+ netif_napi_del(&txq->napi);
return err;
}
err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq);
if (err) {
dev_err(&pdev->dev, "Failed to request Tx irq\n");
- tx_napi_del(txq);
+ netif_napi_del(&txq->napi);
return err;
}
@@ -826,7 +843,7 @@ static void tx_free_irq(struct hinic_txq *txq)
struct hinic_sq *sq = txq->sq;
free_irq(sq->irq, txq);
- tx_napi_del(txq);
+ netif_napi_del(&txq->napi);
}
/**
@@ -865,14 +882,14 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
goto err_alloc_free_sges;
}
- irqname_len = snprintf(NULL, 0, "hinic_txq%d", qp->q_id) + 1;
+ irqname_len = snprintf(NULL, 0, "%s_txq%d", netdev->name, qp->q_id) + 1;
txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL);
if (!txq->irq_name) {
err = -ENOMEM;
goto err_alloc_irqname;
}
- sprintf(txq->irq_name, "hinic_txq%d", qp->q_id);
+ sprintf(txq->irq_name, "%s_txq%d", netdev->name, qp->q_id);
err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING,
CI_UPDATE_NO_COALESC);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 3153d62cc73e..c2e740475786 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1212,9 +1212,9 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
}
}
-static void ehea_neq_tasklet(unsigned long data)
+static void ehea_neq_tasklet(struct tasklet_struct *t)
{
- struct ehea_adapter *adapter = (struct ehea_adapter *)data;
+ struct ehea_adapter *adapter = from_tasklet(adapter, t, neq_tasklet);
struct ehea_eqe *eqe;
u64 event_mask;
@@ -3417,8 +3417,7 @@ static int ehea_probe_adapter(struct platform_device *dev)
goto out_free_ad;
}
- tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
- (unsigned long)adapter);
+ tasklet_setup(&adapter->neq_tasklet, ehea_neq_tasklet);
ret = ehea_create_device_sysfs(dev);
if (ret)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 6b619c190239..a151ff37fda2 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2019,16 +2019,18 @@ static int do_reset(struct ibmvnic_adapter *adapter,
} else {
rc = reset_tx_pools(adapter);
- if (rc)
+ if (rc) {
netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
rc);
goto out;
+ }
rc = reset_rx_pools(adapter);
- if (rc)
+ if (rc) {
netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
rc);
goto out;
+ }
}
ibmvnic_disable_irqs(adapter);
}
@@ -4842,9 +4844,9 @@ static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
return IRQ_HANDLED;
}
-static void ibmvnic_tasklet(void *data)
+static void ibmvnic_tasklet(struct tasklet_struct *t)
{
- struct ibmvnic_adapter *adapter = data;
+ struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
struct ibmvnic_crq_queue *queue = &adapter->crq;
union ibmvnic_crq *crq;
unsigned long flags;
@@ -4979,8 +4981,7 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter)
retrc = 0;
- tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
- (unsigned long)adapter);
+ tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
@@ -5048,6 +5049,12 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
return adapter->init_done_rc;
}
+ if (adapter->from_passive_init) {
+ adapter->state = VNIC_OPEN;
+ adapter->from_passive_init = false;
+ return -1;
+ }
+
if (reset &&
test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
adapter->reset_reason != VNIC_RESET_MOBILITY) {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 4e7a0810eaeb..f1dbd7b8ee32 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -129,7 +129,6 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw)
*/
static void e1000_phy_init_script(struct e1000_hw *hw)
{
- u32 ret_val;
u16 phy_saved_data;
if (hw->phy_init_script) {
@@ -138,7 +137,7 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
/* Save off the current value of register 0x2F5B to be restored
* at the end of this routine.
*/
- ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+ e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
/* Disabled the PHY transmitter */
e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index a7e212d1caa2..ada0e93c38f0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -90,7 +90,7 @@
#define I40E_OEM_RELEASE_MASK 0x0000ffff
#define I40E_RX_DESC(R, i) \
- (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
+ (&(((union i40e_rx_desc *)((R)->desc))[i]))
#define I40E_TX_DESC(R, i) \
(&(((struct i40e_tx_desc *)((R)->desc))[i]))
#define I40E_TX_CTXTDESC(R, i) \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index d3ad2e3aa838..d7c13ca9be7d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -604,10 +604,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
} else {
rxd = I40E_RX_DESC(ring, i);
dev_info(&pf->pdev->dev,
- " d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ " d[%03x] = 0x%016llx 0x%016llx\n",
i, rxd->read.pkt_addr,
- rxd->read.hdr_addr,
- rxd->read.rsvd1, rxd->read.rsvd2);
+ rxd->read.hdr_addr);
}
}
} else if (cnt == 3) {
@@ -625,10 +624,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
} else {
rxd = I40E_RX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev,
- "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n,
- rxd->read.pkt_addr, rxd->read.hdr_addr,
- rxd->read.rsvd1, rxd->read.rsvd2);
+ rxd->read.pkt_addr, rxd->read.hdr_addr);
}
} else {
dev_info(&pf->pdev->dev, "dump desc rx/tx/xdp <vsi_seid> <ring_id> [<desc_n>]\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 05c6d3ea11e6..07207e21874f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3321,8 +3321,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.base = (ring->dma / 128);
rx_ctx.qlen = ring->count;
- /* use 32 byte descriptors */
- rx_ctx.dsize = 1;
+ /* use 16 byte descriptors */
+ rx_ctx.dsize = 0;
/* descriptor type is always zero
* rx_ctx.dtype = 0;
@@ -11186,11 +11186,10 @@ static int i40e_init_msix(struct i40e_pf *pf)
* i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
* @vsi: the VSI being configured
* @v_idx: index of the vector in the vsi struct
- * @cpu: cpu to be used on affinity_mask
*
* We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
-static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
{
struct i40e_q_vector *q_vector;
@@ -11223,7 +11222,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- int err, v_idx, num_q_vectors, current_cpu;
+ int err, v_idx, num_q_vectors;
/* if not MSIX, give the one vector only to the LAN VSI */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -11233,15 +11232,10 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
else
return -EINVAL;
- current_cpu = cpumask_first(cpu_online_mask);
-
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
+ err = i40e_vsi_alloc_q_vector(vsi, v_idx);
if (err)
goto err_out;
- current_cpu = cpumask_next(current_cpu, cpu_online_mask);
- if (unlikely(current_cpu >= nr_cpu_ids))
- current_cpu = cpumask_first(cpu_online_mask);
}
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h
index 424f02077e2e..983f8b98b275 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_trace.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h
@@ -112,7 +112,7 @@ DECLARE_EVENT_CLASS(
i40e_rx_template,
TP_PROTO(struct i40e_ring *ring,
- union i40e_32byte_rx_desc *desc,
+ union i40e_16byte_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb),
@@ -140,7 +140,7 @@ DECLARE_EVENT_CLASS(
DEFINE_EVENT(
i40e_rx_template, i40e_clean_rx_irq,
TP_PROTO(struct i40e_ring *ring,
- union i40e_32byte_rx_desc *desc,
+ union i40e_16byte_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
@@ -148,7 +148,7 @@ DEFINE_EVENT(
DEFINE_EVENT(
i40e_rx_template, i40e_clean_rx_irq_rx,
TP_PROTO(struct i40e_ring *ring,
- union i40e_32byte_rx_desc *desc,
+ union i40e_16byte_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 91ab824926b9..1606ba5318f7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -533,11 +533,11 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
{
struct i40e_pf *pf = rx_ring->vsi->back;
struct pci_dev *pdev = pf->pdev;
- struct i40e_32b_rx_wb_qw0 *qw0;
+ struct i40e_16b_rx_wb_qw0 *qw0;
u32 fcnt_prog, fcnt_avail;
u32 error;
- qw0 = (struct i40e_32b_rx_wb_qw0 *)&qword0_raw;
+ qw0 = (struct i40e_16b_rx_wb_qw0 *)&qword0_raw;
error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
@@ -1418,7 +1418,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
- rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -1953,7 +1953,7 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer;
rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
- prefetchw(rx_buffer->page);
+ prefetch_page_address(rx_buffer->page);
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
@@ -2296,6 +2296,19 @@ void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
}
/**
+ * i40e_inc_ntc: Advance the next_to_clean index
+ * @rx_ring: Rx ring
+ **/
+static void i40e_inc_ntc(struct i40e_ring *rx_ring)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+ prefetch(I40E_RX_DESC(rx_ring, ntc));
+}
+
+/**
* i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 703b644fd71f..66c2b92c0d10 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -110,7 +110,7 @@ enum i40e_dyn_idx_t {
*/
#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
-#define i40e_rx_desc i40e_32byte_rx_desc
+#define i40e_rx_desc i40e_16byte_rx_desc
#define I40E_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
index 667c4dc4b39f..1397dd3c1c57 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
@@ -99,19 +99,6 @@ static inline bool i40e_rx_is_programming_status(u64 qword1)
return qword1 & I40E_RXD_QW1_LENGTH_SPH_MASK;
}
-/**
- * i40e_inc_ntc: Advance the next_to_clean index
- * @rx_ring: Rx ring
- **/
-static inline void i40e_inc_ntc(struct i40e_ring *rx_ring)
-{
- u32 ntc = rx_ring->next_to_clean + 1;
-
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
- prefetch(I40E_RX_DESC(rx_ring, ntc));
-}
-
void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring);
void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring);
bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 52410d609ba1..97d29df65f9e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -628,7 +628,7 @@ union i40e_16byte_rx_desc {
__le64 hdr_addr; /* Header buffer address */
} read;
struct {
- struct {
+ struct i40e_16b_rx_wb_qw0 {
struct {
union {
__le16 mirroring_status;
@@ -647,6 +647,9 @@ union i40e_16byte_rx_desc {
__le64 status_error_len;
} qword1;
} wb; /* writeback */
+ struct {
+ u64 qword[2];
+ } raw;
};
union i40e_32byte_rx_desc {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 8e133d6545bd..47bfb2e95e2d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1115,7 +1115,7 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
{
struct i40e_mac_filter *f;
- int num_vlans = 0, bkt;
+ u16 num_vlans = 0, bkt;
hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
@@ -1134,8 +1134,8 @@ static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
*
* Called to get number of VLANs and VLAN list present in mac_filter_hash.
**/
-static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, int *num_vlans,
- s16 **vlan_list)
+static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
+ s16 **vlan_list)
{
struct i40e_mac_filter *f;
int i = 0;
@@ -1169,11 +1169,11 @@ err:
**/
static i40e_status
i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
- bool unicast_enable, s16 *vl, int num_vlans)
+ bool unicast_enable, s16 *vl, u16 num_vlans)
{
+ i40e_status aq_ret, aq_tmp = 0;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
- i40e_status aq_ret;
int i;
/* No VLAN to set promisc on, set on VSI */
@@ -1222,6 +1222,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
vf->vf_id,
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
+
+ if (!aq_tmp)
+ aq_tmp = aq_ret;
}
aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
@@ -1235,8 +1238,15 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
vf->vf_id,
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw, aq_err));
+
+ if (!aq_tmp)
+ aq_tmp = aq_ret;
}
}
+
+ if (aq_tmp)
+ aq_ret = aq_tmp;
+
return aq_ret;
}
@@ -1258,7 +1268,7 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
i40e_status aq_ret = I40E_SUCCESS;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi;
- int num_vlans;
+ u16 num_vlans;
s16 *vl;
vsi = i40e_find_vsi_from_id(pf, vsi_id);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 2a1153d8957b..6acede0acdca 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -258,6 +258,18 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
}
/**
+ * i40e_inc_ntc: Advance the next_to_clean index
+ * @rx_ring: Rx ring
+ **/
+static void i40e_inc_ntc(struct i40e_ring *rx_ring)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+}
+
+/**
* i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
* @rx_ring: Rx ring
* @budget: NAPI budget
@@ -269,8 +281,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
unsigned int xdp_res, xdp_xmit = 0;
- bool failure = false;
struct sk_buff *skb;
+ bool failure;
while (likely(total_rx_packets < (unsigned int)budget)) {
union i40e_rx_desc *rx_desc;
@@ -278,13 +290,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
unsigned int size;
u64 qword;
- if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
- failure = failure ||
- !i40e_alloc_rx_buffers_zc(rx_ring,
- cleaned_count);
- cleaned_count = 0;
- }
-
rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
@@ -359,6 +364,9 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
+ if (cleaned_count >= I40E_RX_BUFFER_WRITE)
+ failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
+
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 111d6bfe4222..67d1190cb164 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -401,6 +401,7 @@ void ice_devlink_destroy_port(struct ice_pf *pf)
/**
* ice_devlink_nvm_snapshot - Capture a snapshot of the Shadow RAM contents
* @devlink: the devlink instance
+ * @ops: the devlink region being snapshotted
* @extack: extended ACK response structure
* @data: on exit points to snapshot data buffer
*
@@ -413,6 +414,7 @@ void ice_devlink_destroy_port(struct ice_pf *pf)
* error code on failure.
*/
static int ice_devlink_nvm_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack, u8 **data)
{
struct ice_pf *pf = devlink_priv(devlink);
@@ -456,6 +458,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
/**
* ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities
* @devlink: the devlink instance
+ * @ops: the devlink region being snapshotted
* @extack: extended ACK response structure
* @data: on exit points to snapshot data buffer
*
@@ -468,6 +471,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
*/
static int
ice_devlink_devcaps_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack, u8 **data)
{
struct ice_pf *pf = devlink_priv(devlink);
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index deaefe00c9c0..292d87b996ab 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -608,14 +608,9 @@ static int ice_finalize_update(struct pldmfw *context)
struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context);
struct netlink_ext_ack *extack = priv->extack;
struct ice_pf *pf = priv->pf;
- int err;
/* Finally, notify firmware to activate the written NVM banks */
- err = ice_switch_flash_banks(pf, priv->activate_flags, extack);
- if (err)
- return err;
-
- return 0;
+ return ice_switch_flash_banks(pf, priv->activate_flags, extack);
}
static const struct pldmfw_ops ice_fwu_ops = {
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 3070dfdb7eb4..2d566f3c827b 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -299,18 +299,14 @@ extern char igc_driver_name[];
#define IGC_RX_HDR_LEN IGC_RXBUFFER_256
/* Transmit and receive latency (for PTP timestamps) */
-/* FIXME: These values were estimated using the ones that i225 has as
- * basis, they seem to provide good numbers with ptp4l/phc2sys, but we
- * need to confirm them.
- */
-#define IGC_I225_TX_LATENCY_10 9542
-#define IGC_I225_TX_LATENCY_100 1024
-#define IGC_I225_TX_LATENCY_1000 178
-#define IGC_I225_TX_LATENCY_2500 64
-#define IGC_I225_RX_LATENCY_10 20662
-#define IGC_I225_RX_LATENCY_100 2213
-#define IGC_I225_RX_LATENCY_1000 448
-#define IGC_I225_RX_LATENCY_2500 160
+#define IGC_I225_TX_LATENCY_10 240
+#define IGC_I225_TX_LATENCY_100 58
+#define IGC_I225_TX_LATENCY_1000 80
+#define IGC_I225_TX_LATENCY_2500 1325
+#define IGC_I225_RX_LATENCY_10 6450
+#define IGC_I225_RX_LATENCY_100 185
+#define IGC_I225_RX_LATENCY_1000 300
+#define IGC_I225_RX_LATENCY_2500 1485
/* RX and TX descriptor control thresholds.
* PTHRESH - MAC will consider prefetch if it has fewer than this number of
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 36c999250fcc..6a9b5102aa55 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -364,6 +364,7 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
struct sk_buff *skb = adapter->ptp_tx_skb;
struct skb_shared_hwtstamps shhwtstamps;
struct igc_hw *hw = &adapter->hw;
+ int adjust = 0;
u64 regval;
if (WARN_ON_ONCE(!skb))
@@ -373,6 +374,24 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
regval |= (u64)rd32(IGC_TXSTMPH) << 32;
igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adjust = IGC_I225_TX_LATENCY_10;
+ break;
+ case SPEED_100:
+ adjust = IGC_I225_TX_LATENCY_100;
+ break;
+ case SPEED_1000:
+ adjust = IGC_I225_TX_LATENCY_1000;
+ break;
+ case SPEED_2500:
+ adjust = IGC_I225_TX_LATENCY_2500;
+ break;
+ }
+
+ shhwtstamps.hwtstamp =
+ ktime_add_ns(shhwtstamps.hwtstamp, adjust);
+
/* Clear the lock early before calling skb_tstamp_tx so that
* applications are not woken up before the lock bit is clear. We use
* a copy of the skb pointer to ensure other threads can't change it
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 71ec908266a6..a280aa34ca1d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -531,6 +531,16 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev,
return err;
}
+static void ixgbe_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *stats)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw_stats *hwstats = &adapter->stats;
+
+ stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc;
+ stats->rx_pause_frames = hwstats->lxonrxc + hwstats->lxoffrxc;
+}
+
static void ixgbe_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
@@ -3546,6 +3556,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_eeprom = ixgbe_set_eeprom,
.get_ringparam = ixgbe_get_ringparam,
.set_ringparam = ixgbe_set_ringparam,
+ .get_pause_stats = ixgbe_get_pause_stats,
.get_pauseparam = ixgbe_get_pauseparam,
.set_pauseparam = ixgbe_set_pauseparam,
.get_msglevel = ixgbe_get_msglevel,
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index ddc757680089..e9efe074edc1 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1187,9 +1187,9 @@ jme_shutdown_nic(struct jme_adapter *jme)
}
static void
-jme_pcc_tasklet(unsigned long arg)
+jme_pcc_tasklet(struct tasklet_struct *t)
{
- struct jme_adapter *jme = (struct jme_adapter *)arg;
+ struct jme_adapter *jme = from_tasklet(jme, t, pcc_task);
struct net_device *netdev = jme->dev;
if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
@@ -1265,10 +1265,9 @@ jme_stop_shutdown_timer(struct jme_adapter *jme)
jwrite32f(jme, JME_APMC, apmc);
}
-static void
-jme_link_change_tasklet(unsigned long arg)
+static void jme_link_change_tasklet(struct tasklet_struct *t)
{
- struct jme_adapter *jme = (struct jme_adapter *)arg;
+ struct jme_adapter *jme = from_tasklet(jme, t, linkch_task);
struct net_device *netdev = jme->dev;
int rc;
@@ -1345,9 +1344,9 @@ out:
}
static void
-jme_rx_clean_tasklet(unsigned long arg)
+jme_rx_clean_tasklet(struct tasklet_struct *t)
{
- struct jme_adapter *jme = (struct jme_adapter *)arg;
+ struct jme_adapter *jme = from_tasklet(jme, t, rxclean_task);
struct dynpcc_info *dpi = &(jme->dpi);
jme_process_receive(jme, jme->rx_ring_size);
@@ -1380,9 +1379,9 @@ jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
}
static void
-jme_rx_empty_tasklet(unsigned long arg)
+jme_rx_empty_tasklet(struct tasklet_struct *t)
{
- struct jme_adapter *jme = (struct jme_adapter *)arg;
+ struct jme_adapter *jme = from_tasklet(jme, t, rxempty_task);
if (unlikely(atomic_read(&jme->link_changing) != 1))
return;
@@ -1392,7 +1391,7 @@ jme_rx_empty_tasklet(unsigned long arg)
netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");
- jme_rx_clean_tasklet(arg);
+ jme_rx_clean_tasklet(&jme->rxclean_task);
while (atomic_read(&jme->rx_empty) > 0) {
atomic_dec(&jme->rx_empty);
@@ -1416,10 +1415,9 @@ jme_wake_queue_if_stopped(struct jme_adapter *jme)
}
-static void
-jme_tx_clean_tasklet(unsigned long arg)
+static void jme_tx_clean_tasklet(struct tasklet_struct *t)
{
- struct jme_adapter *jme = (struct jme_adapter *)arg;
+ struct jme_adapter *jme = from_tasklet(jme, t, txclean_task);
struct jme_ring *txring = &(jme->txring[0]);
struct txdesc *txdesc = txring->desc;
struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
@@ -1834,14 +1832,10 @@ jme_open(struct net_device *netdev)
jme_clear_pm_disable_wol(jme);
JME_NAPI_ENABLE(jme);
- tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
- (unsigned long) jme);
- tasklet_init(&jme->txclean_task, jme_tx_clean_tasklet,
- (unsigned long) jme);
- tasklet_init(&jme->rxclean_task, jme_rx_clean_tasklet,
- (unsigned long) jme);
- tasklet_init(&jme->rxempty_task, jme_rx_empty_tasklet,
- (unsigned long) jme);
+ tasklet_setup(&jme->linkch_task, jme_link_change_tasklet);
+ tasklet_setup(&jme->txclean_task, jme_tx_clean_tasklet);
+ tasklet_setup(&jme->rxclean_task, jme_rx_clean_tasklet);
+ tasklet_setup(&jme->rxempty_task, jme_rx_empty_tasklet);
rc = jme_request_irq(jme);
if (rc)
@@ -3040,9 +3034,7 @@ jme_init_one(struct pci_dev *pdev,
atomic_set(&jme->tx_cleaning, 1);
atomic_set(&jme->rx_empty, 1);
- tasklet_init(&jme->pcc_task,
- jme_pcc_tasklet,
- (unsigned long) jme);
+ tasklet_setup(&jme->pcc_task, jme_pcc_tasklet);
jme->dpi.cur = PCC_P1;
jme->reg_ghc = 0;
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 1645e4e7ebdb..635ff3a5dcfb 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -230,8 +230,8 @@ static int xrx200_poll_rx(struct napi_struct *napi, int budget)
}
if (rx < budget) {
- napi_complete(&ch->napi);
- ltq_dma_enable_irq(&ch->dma);
+ if (napi_complete_done(&ch->napi, rx))
+ ltq_dma_enable_irq(&ch->dma);
}
return rx;
@@ -268,9 +268,12 @@ static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget)
net_dev->stats.tx_bytes += bytes;
netdev_completed_queue(ch->priv->net_dev, pkts, bytes);
+ if (netif_queue_stopped(net_dev))
+ netif_wake_queue(net_dev);
+
if (pkts < budget) {
- napi_complete(&ch->napi);
- ltq_dma_enable_irq(&ch->dma);
+ if (napi_complete_done(&ch->napi, pkts))
+ ltq_dma_enable_irq(&ch->dma);
}
return pkts;
@@ -342,10 +345,12 @@ static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
{
struct xrx200_chan *ch = ptr;
- ltq_dma_disable_irq(&ch->dma);
- ltq_dma_ack_irq(&ch->dma);
+ if (napi_schedule_prep(&ch->napi)) {
+ __napi_schedule(&ch->napi);
+ ltq_dma_disable_irq(&ch->dma);
+ }
- napi_schedule(&ch->napi);
+ ltq_dma_ack_irq(&ch->dma);
return IRQ_HANDLED;
}
@@ -499,7 +504,7 @@ static int xrx200_probe(struct platform_device *pdev)
/* setup NAPI */
netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32);
- netif_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32);
+ netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32);
platform_set_drvdata(pdev, priv);
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index a599e44a36a8..41815b609569 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -178,5 +178,6 @@ config SKY2_DEBUG
source "drivers/net/ethernet/marvell/octeontx2/Kconfig"
+source "drivers/net/ethernet/marvell/prestera/Kconfig"
endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 89dea7284d5b..9f88fe822555 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
obj-y += octeontx2/
+obj-y += prestera/
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index f75e05e899bb..14df3aec285d 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -751,13 +751,12 @@ static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
static void mvneta_mib_counters_clear(struct mvneta_port *pp)
{
int i;
- u32 dummy;
/* Perform dummy reads from MIB counters */
for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
- dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
- dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
- dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
+ mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
+ mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
+ mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
}
/* Get System Network Statistics */
@@ -2028,11 +2027,11 @@ mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
int i;
- page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
- sync_len, napi);
for (i = 0; i < sinfo->nr_frags; i++)
page_pool_put_full_page(rxq->page_pool,
skb_frag_page(&sinfo->frags[i]), napi);
+ page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
+ sync_len, napi);
}
static int
@@ -2305,11 +2304,8 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
int i, num_frags = sinfo->nr_frags;
- skb_frag_t frags[MAX_SKB_FRAGS];
struct sk_buff *skb;
- memcpy(frags, sinfo->frags, sizeof(skb_frag_t) * num_frags);
-
skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
if (!skb)
return ERR_PTR(-ENOMEM);
@@ -2321,12 +2317,12 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
mvneta_rx_csum(pp, desc_status, skb);
for (i = 0; i < num_frags; i++) {
- struct page *page = skb_frag_page(&frags[i]);
+ skb_frag_t *frag = &sinfo->frags[i];
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- page, skb_frag_off(&frags[i]),
- skb_frag_size(&frags[i]), PAGE_SIZE);
- page_pool_release_page(rxq->page_pool, page);
+ skb_frag_page(frag), skb_frag_off(frag),
+ skb_frag_size(frag), PAGE_SIZE);
+ page_pool_release_page(rxq->page_pool, skb_frag_page(frag));
}
return skb;
@@ -2381,8 +2377,12 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
&size, page);
} else {
- if (unlikely(!xdp_buf.data_hard_start))
+ if (unlikely(!xdp_buf.data_hard_start)) {
+ rx_desc->buf_phys_addr = 0;
+ page_pool_put_full_page(rxq->page_pool, page,
+ true);
continue;
+ }
mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
&size, page);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 87b1c9cfdc77..d11d33cf3443 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3701,6 +3701,8 @@ static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port,
if (!hdr)
return false;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
ptpdesc = MVPP22_PTP_MACTIMESTAMPINGEN |
MVPP22_PTP_ACTION_CAPTURE;
queue = &port->tx_hwtstamp_queue[0];
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 0bc2410c8949..2f7a861d0c7b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -3,9 +3,10 @@
# Makefile for Marvell's OcteonTX2 RVU Admin Function driver
#
+ccflags-y += -I$(src)
obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o
obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
-octeontx2_mbox-y := mbox.o
+octeontx2_mbox-y := mbox.o rvu_trace.o
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 387e33fa417a..4b4cf7dac77f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -14,6 +14,7 @@
#include "rvu_reg.h"
#include "mbox.h"
+#include "rvu_trace.h"
static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
@@ -199,6 +200,9 @@ void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
*/
tx_hdr->num_msgs = mdev->num_msgs;
rx_hdr->num_msgs = 0;
+
+ trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size);
+
spin_unlock(&mdev->mbox_lock);
/* The interrupt should be fired after num_msgs is written
@@ -295,10 +299,15 @@ int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid)
struct mbox_msghdr *preq = mdev->mbase + ireq;
struct mbox_msghdr *prsp = mdev->mbase + irsp;
- if (preq->id != prsp->id)
+ if (preq->id != prsp->id) {
+ trace_otx2_msg_check(mbox->pdev, preq->id,
+ prsp->id, prsp->rc);
goto exit;
+ }
if (prsp->rc) {
rc = prsp->rc;
+ trace_otx2_msg_check(mbox->pdev, preq->id,
+ prsp->id, prsp->rc);
goto exit;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 4aaef0a2b51c..aa3bda3f34be 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -625,6 +625,7 @@ struct nix_rss_flowkey_cfg {
#define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15)
#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16)
#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
+#define NIX_FLOW_KEY_TYPE_VLAN BIT(20)
u32 flowkey_cfg; /* Flowkey types selected */
u8 group; /* RSS context or group */
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index c3ef73ae782c..e1f918960730 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -20,6 +20,8 @@
#include "rvu_reg.h"
#include "ptp.h"
+#include "rvu_trace.h"
+
#define DRV_NAME "octeontx2-af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
@@ -1549,6 +1551,7 @@ static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
if (rsp && err) \
rsp->hdr.rc = err; \
\
+ trace_otx2_msg_process(mbox->pdev, _id, err); \
return rsp ? err : -ENOMEM; \
}
MBOX_MESSAGES
@@ -1881,6 +1884,8 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
/* Clear interrupts */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
+ if (intr)
+ trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
/* Sync with mbox memory region */
rmb();
@@ -1898,6 +1903,8 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
+ if (intr)
+ trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index fe3389c144b5..fa9152ff5e2a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -15,6 +15,7 @@
#include "rvu.h"
#include "cgx.h"
#include "rvu_reg.h"
+#include "rvu_trace.h"
struct cgx_evq_entry {
struct list_head evq_node;
@@ -34,6 +35,7 @@ static struct _req_type __maybe_unused \
return NULL; \
req->hdr.sig = OTX2_MBOX_REQ_SIG; \
req->hdr.id = _id; \
+ trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \
return req; \
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 08181fc5f5d4..4bdc4baa3c59 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -2509,6 +2509,14 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->ltype_match = NPC_LT_LE_GTPU;
field->ltype_mask = 0xF;
break;
+ case NIX_FLOW_KEY_TYPE_VLAN:
+ field->lid = NPC_LID_LB;
+ field->hdr_offset = 2; /* Skip TPID (2-bytes) */
+ field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
+ field->ltype_match = NPC_LT_LB_CTAG;
+ field->ltype_mask = 0xF;
+ field->fn_mask = 1; /* Mask out the first nibble */
+ break;
}
field->ena = 1;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
new file mode 100644
index 000000000000..56f90cf9c4c0
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver tracepoints
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#define CREATE_TRACE_POINTS
+#include "rvu_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_alloc);
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_interrupt);
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_process);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
new file mode 100644
index 000000000000..e6609068e81b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver tracepoints
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvu
+
+#if !defined(__RVU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVU_TRACE_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/pci.h>
+
+TRACE_EVENT(otx2_msg_alloc,
+ TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size),
+ TP_ARGS(pdev, id, size),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, id)
+ __field(u64, size)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ __entry->id = id;
+ __entry->size = size;
+ ),
+ TP_printk("[%s] msg:(0x%x) size:%lld\n", __get_str(dev),
+ __entry->id, __entry->size)
+);
+
+TRACE_EVENT(otx2_msg_send,
+ TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size),
+ TP_ARGS(pdev, num_msgs, msg_size),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, num_msgs)
+ __field(u64, msg_size)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ __entry->num_msgs = num_msgs;
+ __entry->msg_size = msg_size;
+ ),
+ TP_printk("[%s] sent %d msg(s) of size:%lld\n", __get_str(dev),
+ __entry->num_msgs, __entry->msg_size)
+);
+
+TRACE_EVENT(otx2_msg_check,
+ TP_PROTO(const struct pci_dev *pdev, u16 reqid, u16 rspid, int rc),
+ TP_ARGS(pdev, reqid, rspid, rc),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, reqid)
+ __field(u16, rspid)
+ __field(int, rc)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ __entry->reqid = reqid;
+ __entry->rspid = rspid;
+ __entry->rc = rc;
+ ),
+ TP_printk("[%s] req->id:0x%x rsp->id:0x%x resp_code:%d\n",
+ __get_str(dev), __entry->reqid,
+ __entry->rspid, __entry->rc)
+);
+
+TRACE_EVENT(otx2_msg_interrupt,
+ TP_PROTO(const struct pci_dev *pdev, const char *msg, u64 intr),
+ TP_ARGS(pdev, msg, intr),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __string(str, msg)
+ __field(u64, intr)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ __assign_str(str, msg)
+ __entry->intr = intr;
+ ),
+ TP_printk("[%s] mbox interrupt %s (0x%llx)\n", __get_str(dev),
+ __get_str(str), __entry->intr)
+);
+
+TRACE_EVENT(otx2_msg_process,
+ TP_PROTO(const struct pci_dev *pdev, u16 id, int err),
+ TP_ARGS(pdev, id, err),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, id)
+ __field(int, err)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ __entry->id = id;
+ __entry->err = err;
+ ),
+ TP_printk("[%s] msg:(0x%x) error:%d\n", __get_str(dev),
+ __entry->id, __entry->err)
+);
+
+#endif /* __RVU_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE rvu_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 820fc660de66..d2581090f9a4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -355,7 +355,7 @@ int otx2_rss_init(struct otx2_nic *pfvf)
rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg :
NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 |
NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP |
- NIX_FLOW_KEY_TYPE_SCTP;
+ NIX_FLOW_KEY_TYPE_SCTP | NIX_FLOW_KEY_TYPE_VLAN;
ret = otx2_set_flowkey_cfg(pfvf);
if (ret)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index ac47762cce9b..d6253f2a414d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -20,6 +20,7 @@
#include <mbox.h>
#include "otx2_reg.h"
#include "otx2_txrx.h"
+#include <rvu_trace.h>
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
@@ -523,6 +524,7 @@ static struct _req_type __maybe_unused \
return NULL; \
req->hdr.sig = OTX2_MBOX_REQ_SIG; \
req->hdr.id = _id; \
+ trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \
return req; \
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 0341d9694e8b..662fb80dbb9d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -428,6 +428,8 @@ static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
/* Mimimum is IPv4 and IPv6, SIP/DIP */
nfc->data = RXH_IP_SRC | RXH_IP_DST;
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_VLAN)
+ nfc->data |= RXH_VLAN;
switch (nfc->flow_type) {
case TCP_V4_FLOW:
@@ -477,6 +479,11 @@ static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST))
return -EINVAL;
+ if (nfc->data & RXH_VLAN)
+ rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN;
+ else
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_VLAN;
+
switch (nfc->flow_type) {
case TCP_V4_FLOW:
case TCP_V6_FLOW:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index aac2845c1fb1..265e4d1b4e64 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -22,6 +22,7 @@
#include "otx2_txrx.h"
#include "otx2_struct.h"
#include "otx2_ptp.h"
+#include <rvu_trace.h>
#define DRV_NAME "octeontx2-nicpf"
#define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver"
@@ -558,6 +559,8 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
+ trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
+
return IRQ_HANDLED;
}
@@ -940,6 +943,9 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
mbox = &pf->mbox;
+
+ trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0));
+
otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
return IRQ_HANDLED;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 70e0d4ca6688..32daa3e0f296 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -187,6 +187,8 @@ static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq)
mdev = &mbox->dev[0];
otx2_sync_mbox_bbuf(mbox, 0);
+ trace_otx2_msg_interrupt(mbox->pdev, "PF to VF", BIT_ULL(0));
+
hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
if (hdr->num_msgs) {
vf->mbox.num_msgs = hdr->num_msgs;
diff --git a/drivers/net/ethernet/marvell/prestera/Kconfig b/drivers/net/ethernet/marvell/prestera/Kconfig
new file mode 100644
index 000000000000..b1fcc44f566a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/Kconfig
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Marvell Prestera drivers configuration
+#
+
+config PRESTERA
+ tristate "Marvell Prestera Switch ASICs support"
+ depends on NET_SWITCHDEV && VLAN_8021Q
+ select NET_DEVLINK
+ help
+ This driver supports Marvell Prestera Switch ASICs family.
+
+ To compile this driver as a module, choose M here: the
+ module will be called prestera.
+
+config PRESTERA_PCI
+ tristate "PCI interface driver for Marvell Prestera Switch ASICs family"
+ depends on PCI && HAS_IOMEM && PRESTERA
+ default PRESTERA
+ help
+ This is implementation of PCI interface support for Marvell Prestera
+ Switch ASICs family.
+
+ To compile this driver as a module, choose M here: the
+ module will be called prestera_pci.
diff --git a/drivers/net/ethernet/marvell/prestera/Makefile b/drivers/net/ethernet/marvell/prestera/Makefile
new file mode 100644
index 000000000000..93129e32ebc5
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PRESTERA) += prestera.o
+prestera-objs := prestera_main.o prestera_hw.o prestera_dsa.o \
+ prestera_rxtx.o prestera_devlink.o prestera_ethtool.o \
+ prestera_switchdev.o
+
+obj-$(CONFIG_PRESTERA_PCI) += prestera_pci.o
diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h
new file mode 100644
index 000000000000..55aa4bf8a27c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_H_
+#define _PRESTERA_H_
+
+#include <linux/notifier.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <net/devlink.h>
+#include <uapi/linux/if_ether.h>
+
+#define PRESTERA_DRV_NAME "prestera"
+
+#define PRESTERA_DEFAULT_VID 1
+
+struct prestera_fw_rev {
+ u16 maj;
+ u16 min;
+ u16 sub;
+};
+
+struct prestera_port_stats {
+ u64 good_octets_received;
+ u64 bad_octets_received;
+ u64 mac_trans_error;
+ u64 broadcast_frames_received;
+ u64 multicast_frames_received;
+ u64 frames_64_octets;
+ u64 frames_65_to_127_octets;
+ u64 frames_128_to_255_octets;
+ u64 frames_256_to_511_octets;
+ u64 frames_512_to_1023_octets;
+ u64 frames_1024_to_max_octets;
+ u64 excessive_collision;
+ u64 multicast_frames_sent;
+ u64 broadcast_frames_sent;
+ u64 fc_sent;
+ u64 fc_received;
+ u64 buffer_overrun;
+ u64 undersize;
+ u64 fragments;
+ u64 oversize;
+ u64 jabber;
+ u64 rx_error_frame_received;
+ u64 bad_crc;
+ u64 collisions;
+ u64 late_collision;
+ u64 unicast_frames_received;
+ u64 unicast_frames_sent;
+ u64 sent_multiple;
+ u64 sent_deferred;
+ u64 good_octets_sent;
+};
+
+struct prestera_port_caps {
+ u64 supp_link_modes;
+ u8 supp_fec;
+ u8 type;
+ u8 transceiver;
+};
+
+struct prestera_port {
+ struct net_device *dev;
+ struct prestera_switch *sw;
+ struct devlink_port dl_port;
+ u32 id;
+ u32 hw_id;
+ u32 dev_id;
+ u16 fp_id;
+ u16 pvid;
+ bool autoneg;
+ u64 adver_link_modes;
+ u8 adver_fec;
+ struct prestera_port_caps caps;
+ struct list_head list;
+ struct list_head vlans_list;
+ struct {
+ struct prestera_port_stats stats;
+ struct delayed_work caching_dw;
+ } cached_hw_stats;
+};
+
+struct prestera_device {
+ struct device *dev;
+ u8 __iomem *ctl_regs;
+ u8 __iomem *pp_regs;
+ struct prestera_fw_rev fw_rev;
+ void *priv;
+
+ /* called by device driver to handle received packets */
+ void (*recv_pkt)(struct prestera_device *dev);
+
+ /* called by device driver to pass event up to the higher layer */
+ int (*recv_msg)(struct prestera_device *dev, void *msg, size_t size);
+
+ /* called by higher layer to send request to the firmware */
+ int (*send_req)(struct prestera_device *dev, void *in_msg,
+ size_t in_size, void *out_msg, size_t out_size,
+ unsigned int wait);
+};
+
+enum prestera_event_type {
+ PRESTERA_EVENT_TYPE_UNSPEC,
+
+ PRESTERA_EVENT_TYPE_PORT,
+ PRESTERA_EVENT_TYPE_FDB,
+ PRESTERA_EVENT_TYPE_RXTX,
+
+ PRESTERA_EVENT_TYPE_MAX
+};
+
+enum prestera_rxtx_event_id {
+ PRESTERA_RXTX_EVENT_UNSPEC,
+ PRESTERA_RXTX_EVENT_RCV_PKT,
+};
+
+enum prestera_port_event_id {
+ PRESTERA_PORT_EVENT_UNSPEC,
+ PRESTERA_PORT_EVENT_STATE_CHANGED,
+};
+
+struct prestera_port_event {
+ u32 port_id;
+ union {
+ u32 oper_state;
+ } data;
+};
+
+enum prestera_fdb_event_id {
+ PRESTERA_FDB_EVENT_UNSPEC,
+ PRESTERA_FDB_EVENT_LEARNED,
+ PRESTERA_FDB_EVENT_AGED,
+};
+
+struct prestera_fdb_event {
+ u32 port_id;
+ u32 vid;
+ union {
+ u8 mac[ETH_ALEN];
+ } data;
+};
+
+struct prestera_event {
+ u16 id;
+ union {
+ struct prestera_port_event port_evt;
+ struct prestera_fdb_event fdb_evt;
+ };
+};
+
+struct prestera_switchdev;
+struct prestera_rxtx;
+
+struct prestera_switch {
+ struct prestera_device *dev;
+ struct prestera_switchdev *swdev;
+ struct prestera_rxtx *rxtx;
+ struct list_head event_handlers;
+ struct notifier_block netdev_nb;
+ char base_mac[ETH_ALEN];
+ struct list_head port_list;
+ rwlock_t port_list_lock;
+ u32 port_count;
+ u32 mtu_min;
+ u32 mtu_max;
+ u8 id;
+};
+
+struct prestera_rxtx_params {
+ bool use_sdma;
+ u32 map_addr;
+};
+
+#define prestera_dev(sw) ((sw)->dev->dev)
+
+static inline void prestera_write(const struct prestera_switch *sw,
+ unsigned int reg, u32 val)
+{
+ writel(val, sw->dev->pp_regs + reg);
+}
+
+static inline u32 prestera_read(const struct prestera_switch *sw,
+ unsigned int reg)
+{
+ return readl(sw->dev->pp_regs + reg);
+}
+
+int prestera_device_register(struct prestera_device *dev);
+void prestera_device_unregister(struct prestera_device *dev);
+
+struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
+ u32 dev_id, u32 hw_id);
+
+int prestera_port_autoneg_set(struct prestera_port *port, bool enable,
+ u64 adver_link_modes, u8 adver_fec);
+
+struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id);
+
+struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev);
+
+int prestera_port_pvid_set(struct prestera_port *port, u16 vid);
+
+bool prestera_netdev_check(const struct net_device *dev);
+
+#endif /* _PRESTERA_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
new file mode 100644
index 000000000000..94c185a0e2b8
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <net/devlink.h>
+
+#include "prestera_devlink.h"
+
+static int prestera_dl_info_get(struct devlink *dl,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_switch *sw = devlink_priv(dl);
+ char buf[16];
+ int err;
+
+ err = devlink_info_driver_name_put(req, PRESTERA_DRV_NAME);
+ if (err)
+ return err;
+
+ snprintf(buf, sizeof(buf), "%d.%d.%d",
+ sw->dev->fw_rev.maj,
+ sw->dev->fw_rev.min,
+ sw->dev->fw_rev.sub);
+
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+}
+
+static const struct devlink_ops prestera_dl_ops = {
+ .info_get = prestera_dl_info_get,
+};
+
+struct prestera_switch *prestera_devlink_alloc(void)
+{
+ struct devlink *dl;
+
+ dl = devlink_alloc(&prestera_dl_ops, sizeof(struct prestera_switch));
+
+ return devlink_priv(dl);
+}
+
+void prestera_devlink_free(struct prestera_switch *sw)
+{
+ struct devlink *dl = priv_to_devlink(sw);
+
+ devlink_free(dl);
+}
+
+int prestera_devlink_register(struct prestera_switch *sw)
+{
+ struct devlink *dl = priv_to_devlink(sw);
+ int err;
+
+ err = devlink_register(dl, sw->dev->dev);
+ if (err)
+ dev_err(prestera_dev(sw), "devlink_register failed: %d\n", err);
+
+ return err;
+}
+
+void prestera_devlink_unregister(struct prestera_switch *sw)
+{
+ struct devlink *dl = priv_to_devlink(sw);
+
+ devlink_unregister(dl);
+}
+
+int prestera_devlink_port_register(struct prestera_port *port)
+{
+ struct prestera_switch *sw = port->sw;
+ struct devlink *dl = priv_to_devlink(sw);
+ struct devlink_port_attrs attrs = {};
+ int err;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = port->fp_id;
+ attrs.switch_id.id_len = sizeof(sw->id);
+ memcpy(attrs.switch_id.id, &sw->id, attrs.switch_id.id_len);
+
+ devlink_port_attrs_set(&port->dl_port, &attrs);
+
+ err = devlink_port_register(dl, &port->dl_port, port->fp_id);
+ if (err) {
+ dev_err(prestera_dev(sw), "devlink_port_register failed: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+void prestera_devlink_port_unregister(struct prestera_port *port)
+{
+ devlink_port_unregister(&port->dl_port);
+}
+
+void prestera_devlink_port_set(struct prestera_port *port)
+{
+ devlink_port_type_eth_set(&port->dl_port, port->dev);
+}
+
+void prestera_devlink_port_clear(struct prestera_port *port)
+{
+ devlink_port_type_clear(&port->dl_port);
+}
+
+struct devlink_port *prestera_devlink_get_port(struct net_device *dev)
+{
+ struct prestera_port *port = netdev_priv(dev);
+
+ return &port->dl_port;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
new file mode 100644
index 000000000000..51bee9f75415
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_DEVLINK_H_
+#define _PRESTERA_DEVLINK_H_
+
+#include "prestera.h"
+
+struct prestera_switch *prestera_devlink_alloc(void);
+void prestera_devlink_free(struct prestera_switch *sw);
+
+int prestera_devlink_register(struct prestera_switch *sw);
+void prestera_devlink_unregister(struct prestera_switch *sw);
+
+int prestera_devlink_port_register(struct prestera_port *port);
+void prestera_devlink_port_unregister(struct prestera_port *port);
+
+void prestera_devlink_port_set(struct prestera_port *port);
+void prestera_devlink_port_clear(struct prestera_port *port);
+
+struct devlink_port *prestera_devlink_get_port(struct net_device *dev);
+
+#endif /* _PRESTERA_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_dsa.c b/drivers/net/ethernet/marvell/prestera/prestera_dsa.c
new file mode 100644
index 000000000000..a5e01c7a307b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_dsa.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+
+#include "prestera_dsa.h"
+
+#define PRESTERA_DSA_W0_CMD GENMASK(31, 30)
+#define PRESTERA_DSA_W0_IS_TAGGED BIT(29)
+#define PRESTERA_DSA_W0_DEV_NUM GENMASK(28, 24)
+#define PRESTERA_DSA_W0_PORT_NUM GENMASK(23, 19)
+#define PRESTERA_DSA_W0_VPT GENMASK(15, 13)
+#define PRESTERA_DSA_W0_EXT_BIT BIT(12)
+#define PRESTERA_DSA_W0_VID GENMASK(11, 0)
+
+#define PRESTERA_DSA_W1_EXT_BIT BIT(31)
+#define PRESTERA_DSA_W1_CFI_BIT BIT(30)
+#define PRESTERA_DSA_W1_PORT_NUM GENMASK(11, 10)
+
+#define PRESTERA_DSA_W2_EXT_BIT BIT(31)
+#define PRESTERA_DSA_W2_PORT_NUM BIT(20)
+
+#define PRESTERA_DSA_W3_VID GENMASK(30, 27)
+#define PRESTERA_DSA_W3_DST_EPORT GENMASK(23, 7)
+#define PRESTERA_DSA_W3_DEV_NUM GENMASK(6, 0)
+
+#define PRESTERA_DSA_VID GENMASK(15, 12)
+#define PRESTERA_DSA_DEV_NUM GENMASK(11, 5)
+
+int prestera_dsa_parse(struct prestera_dsa *dsa, const u8 *dsa_buf)
+{
+ __be32 *dsa_words = (__be32 *)dsa_buf;
+ enum prestera_dsa_cmd cmd;
+ u32 words[4];
+ u32 field;
+
+ words[0] = ntohl(dsa_words[0]);
+ words[1] = ntohl(dsa_words[1]);
+ words[2] = ntohl(dsa_words[2]);
+ words[3] = ntohl(dsa_words[3]);
+
+ /* set the common parameters */
+ cmd = (enum prestera_dsa_cmd)FIELD_GET(PRESTERA_DSA_W0_CMD, words[0]);
+
+ /* only to CPU is supported */
+ if (unlikely(cmd != PRESTERA_DSA_CMD_TO_CPU))
+ return -EINVAL;
+
+ if (FIELD_GET(PRESTERA_DSA_W0_EXT_BIT, words[0]) == 0)
+ return -EINVAL;
+ if (FIELD_GET(PRESTERA_DSA_W1_EXT_BIT, words[1]) == 0)
+ return -EINVAL;
+ if (FIELD_GET(PRESTERA_DSA_W2_EXT_BIT, words[2]) == 0)
+ return -EINVAL;
+
+ field = FIELD_GET(PRESTERA_DSA_W3_VID, words[3]);
+
+ dsa->vlan.is_tagged = FIELD_GET(PRESTERA_DSA_W0_IS_TAGGED, words[0]);
+ dsa->vlan.cfi_bit = FIELD_GET(PRESTERA_DSA_W1_CFI_BIT, words[1]);
+ dsa->vlan.vpt = FIELD_GET(PRESTERA_DSA_W0_VPT, words[0]);
+ dsa->vlan.vid = FIELD_GET(PRESTERA_DSA_W0_VID, words[0]);
+ dsa->vlan.vid &= ~PRESTERA_DSA_VID;
+ dsa->vlan.vid |= FIELD_PREP(PRESTERA_DSA_VID, field);
+
+ field = FIELD_GET(PRESTERA_DSA_W3_DEV_NUM, words[3]);
+
+ dsa->hw_dev_num = FIELD_GET(PRESTERA_DSA_W0_DEV_NUM, words[0]);
+ dsa->hw_dev_num |= FIELD_PREP(PRESTERA_DSA_DEV_NUM, field);
+
+ dsa->port_num = (FIELD_GET(PRESTERA_DSA_W0_PORT_NUM, words[0]) << 0) |
+ (FIELD_GET(PRESTERA_DSA_W1_PORT_NUM, words[1]) << 5) |
+ (FIELD_GET(PRESTERA_DSA_W2_PORT_NUM, words[2]) << 7);
+
+ return 0;
+}
+
+int prestera_dsa_build(const struct prestera_dsa *dsa, u8 *dsa_buf)
+{
+ __be32 *dsa_words = (__be32 *)dsa_buf;
+ u32 dev_num = dsa->hw_dev_num;
+ u32 words[4] = { 0 };
+
+ words[0] |= FIELD_PREP(PRESTERA_DSA_W0_CMD, PRESTERA_DSA_CMD_FROM_CPU);
+
+ words[0] |= FIELD_PREP(PRESTERA_DSA_W0_DEV_NUM, dev_num);
+ dev_num = FIELD_GET(PRESTERA_DSA_DEV_NUM, dev_num);
+ words[3] |= FIELD_PREP(PRESTERA_DSA_W3_DEV_NUM, dev_num);
+
+ words[3] |= FIELD_PREP(PRESTERA_DSA_W3_DST_EPORT, dsa->port_num);
+
+ words[0] |= FIELD_PREP(PRESTERA_DSA_W0_EXT_BIT, 1);
+ words[1] |= FIELD_PREP(PRESTERA_DSA_W1_EXT_BIT, 1);
+ words[2] |= FIELD_PREP(PRESTERA_DSA_W2_EXT_BIT, 1);
+
+ dsa_words[0] = htonl(words[0]);
+ dsa_words[1] = htonl(words[1]);
+ dsa_words[2] = htonl(words[2]);
+ dsa_words[3] = htonl(words[3]);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_dsa.h b/drivers/net/ethernet/marvell/prestera/prestera_dsa.h
new file mode 100644
index 000000000000..67018629bdd2
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_dsa.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef __PRESTERA_DSA_H_
+#define __PRESTERA_DSA_H_
+
+#include <linux/types.h>
+
+#define PRESTERA_DSA_HLEN 16
+
+enum prestera_dsa_cmd {
+ /* DSA command is "To CPU" */
+ PRESTERA_DSA_CMD_TO_CPU = 0,
+
+ /* DSA command is "From CPU" */
+ PRESTERA_DSA_CMD_FROM_CPU,
+};
+
+struct prestera_dsa_vlan {
+ u16 vid;
+ u8 vpt;
+ u8 cfi_bit;
+ bool is_tagged;
+};
+
+struct prestera_dsa {
+ struct prestera_dsa_vlan vlan;
+ u32 hw_dev_num;
+ u32 port_num;
+};
+
+int prestera_dsa_parse(struct prestera_dsa *dsa, const u8 *dsa_buf);
+int prestera_dsa_build(const struct prestera_dsa *dsa, u8 *dsa_buf);
+
+#endif /* _PRESTERA_DSA_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
new file mode 100644
index 000000000000..93a5e2baf808
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
@@ -0,0 +1,780 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+
+#include "prestera_ethtool.h"
+#include "prestera.h"
+#include "prestera_hw.h"
+
+#define PRESTERA_STATS_CNT \
+ (sizeof(struct prestera_port_stats) / sizeof(u64))
+#define PRESTERA_STATS_IDX(name) \
+ (offsetof(struct prestera_port_stats, name) / sizeof(u64))
+#define PRESTERA_STATS_FIELD(name) \
+ [PRESTERA_STATS_IDX(name)] = __stringify(name)
+
+static const char driver_kind[] = "prestera";
+
+static const struct prestera_link_mode {
+ enum ethtool_link_mode_bit_indices eth_mode;
+ u32 speed;
+ u64 pr_mask;
+ u8 duplex;
+ u8 port_type;
+} port_link_modes[PRESTERA_LINK_MODE_MAX] = {
+ [PRESTERA_LINK_MODE_10baseT_Half] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ .speed = 10,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10baseT_Half,
+ .duplex = PRESTERA_PORT_DUPLEX_HALF,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_10baseT_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ .speed = 10,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10baseT_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_100baseT_Half] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ .speed = 100,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100baseT_Half,
+ .duplex = PRESTERA_PORT_DUPLEX_HALF,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_100baseT_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ .speed = 100,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100baseT_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_1000baseT_Half] = {
+ .eth_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ .speed = 1000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseT_Half,
+ .duplex = PRESTERA_PORT_DUPLEX_HALF,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_1000baseT_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ .speed = 1000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseT_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_1000baseX_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ .speed = 1000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseX_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_1000baseKX_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = 1000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseKX_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_2500baseX_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ .speed = 2500,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_2500baseX_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ },
+ [PRESTERA_LINK_MODE_10GbaseKR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ .speed = 10000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseKR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_10GbaseSR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ .speed = 10000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseSR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_10GbaseLR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ .speed = 10000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseLR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_20GbaseKR2_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+ .speed = 20000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_20GbaseKR2_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_25GbaseCR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = 25000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseCR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_DA,
+ },
+ [PRESTERA_LINK_MODE_25GbaseKR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = 25000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseKR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_25GbaseSR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = 25000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseSR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_40GbaseKR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ .speed = 40000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseKR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_40GbaseCR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ .speed = 40000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseCR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_DA,
+ },
+ [PRESTERA_LINK_MODE_40GbaseSR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ .speed = 40000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseSR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_50GbaseCR2_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ .speed = 50000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseCR2_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_DA,
+ },
+ [PRESTERA_LINK_MODE_50GbaseKR2_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ .speed = 50000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseKR2_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_50GbaseSR2_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ .speed = 50000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseSR2_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_100GbaseKR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = 100000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseKR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_100GbaseSR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ .speed = 100000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseSR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_100GbaseCR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = 100000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseCR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_DA,
+ }
+};
+
+static const struct prestera_fec {
+ u32 eth_fec;
+ enum ethtool_link_mode_bit_indices eth_mode;
+ u8 pr_fec;
+} port_fec_caps[PRESTERA_PORT_FEC_MAX] = {
+ [PRESTERA_PORT_FEC_OFF] = {
+ .eth_fec = ETHTOOL_FEC_OFF,
+ .eth_mode = ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ .pr_fec = 1 << PRESTERA_PORT_FEC_OFF,
+ },
+ [PRESTERA_PORT_FEC_BASER] = {
+ .eth_fec = ETHTOOL_FEC_BASER,
+ .eth_mode = ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ .pr_fec = 1 << PRESTERA_PORT_FEC_BASER,
+ },
+ [PRESTERA_PORT_FEC_RS] = {
+ .eth_fec = ETHTOOL_FEC_RS,
+ .eth_mode = ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ .pr_fec = 1 << PRESTERA_PORT_FEC_RS,
+ }
+};
+
+static const struct prestera_port_type {
+ enum ethtool_link_mode_bit_indices eth_mode;
+ u8 eth_type;
+} port_types[PRESTERA_PORT_TYPE_MAX] = {
+ [PRESTERA_PORT_TYPE_NONE] = {
+ .eth_mode = __ETHTOOL_LINK_MODE_MASK_NBITS,
+ .eth_type = PORT_NONE,
+ },
+ [PRESTERA_PORT_TYPE_TP] = {
+ .eth_mode = ETHTOOL_LINK_MODE_TP_BIT,
+ .eth_type = PORT_TP,
+ },
+ [PRESTERA_PORT_TYPE_AUI] = {
+ .eth_mode = ETHTOOL_LINK_MODE_AUI_BIT,
+ .eth_type = PORT_AUI,
+ },
+ [PRESTERA_PORT_TYPE_MII] = {
+ .eth_mode = ETHTOOL_LINK_MODE_MII_BIT,
+ .eth_type = PORT_MII,
+ },
+ [PRESTERA_PORT_TYPE_FIBRE] = {
+ .eth_mode = ETHTOOL_LINK_MODE_FIBRE_BIT,
+ .eth_type = PORT_FIBRE,
+ },
+ [PRESTERA_PORT_TYPE_BNC] = {
+ .eth_mode = ETHTOOL_LINK_MODE_BNC_BIT,
+ .eth_type = PORT_BNC,
+ },
+ [PRESTERA_PORT_TYPE_DA] = {
+ .eth_mode = ETHTOOL_LINK_MODE_TP_BIT,
+ .eth_type = PORT_TP,
+ },
+ [PRESTERA_PORT_TYPE_OTHER] = {
+ .eth_mode = __ETHTOOL_LINK_MODE_MASK_NBITS,
+ .eth_type = PORT_OTHER,
+ }
+};
+
+static const char prestera_cnt_name[PRESTERA_STATS_CNT][ETH_GSTRING_LEN] = {
+ PRESTERA_STATS_FIELD(good_octets_received),
+ PRESTERA_STATS_FIELD(bad_octets_received),
+ PRESTERA_STATS_FIELD(mac_trans_error),
+ PRESTERA_STATS_FIELD(broadcast_frames_received),
+ PRESTERA_STATS_FIELD(multicast_frames_received),
+ PRESTERA_STATS_FIELD(frames_64_octets),
+ PRESTERA_STATS_FIELD(frames_65_to_127_octets),
+ PRESTERA_STATS_FIELD(frames_128_to_255_octets),
+ PRESTERA_STATS_FIELD(frames_256_to_511_octets),
+ PRESTERA_STATS_FIELD(frames_512_to_1023_octets),
+ PRESTERA_STATS_FIELD(frames_1024_to_max_octets),
+ PRESTERA_STATS_FIELD(excessive_collision),
+ PRESTERA_STATS_FIELD(multicast_frames_sent),
+ PRESTERA_STATS_FIELD(broadcast_frames_sent),
+ PRESTERA_STATS_FIELD(fc_sent),
+ PRESTERA_STATS_FIELD(fc_received),
+ PRESTERA_STATS_FIELD(buffer_overrun),
+ PRESTERA_STATS_FIELD(undersize),
+ PRESTERA_STATS_FIELD(fragments),
+ PRESTERA_STATS_FIELD(oversize),
+ PRESTERA_STATS_FIELD(jabber),
+ PRESTERA_STATS_FIELD(rx_error_frame_received),
+ PRESTERA_STATS_FIELD(bad_crc),
+ PRESTERA_STATS_FIELD(collisions),
+ PRESTERA_STATS_FIELD(late_collision),
+ PRESTERA_STATS_FIELD(unicast_frames_received),
+ PRESTERA_STATS_FIELD(unicast_frames_sent),
+ PRESTERA_STATS_FIELD(sent_multiple),
+ PRESTERA_STATS_FIELD(sent_deferred),
+ PRESTERA_STATS_FIELD(good_octets_sent),
+};
+
+static void prestera_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct prestera_switch *sw = port->sw;
+
+ strlcpy(drvinfo->driver, driver_kind, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->bus_info, dev_name(prestera_dev(sw)),
+ sizeof(drvinfo->bus_info));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
+ sw->dev->fw_rev.maj,
+ sw->dev->fw_rev.min,
+ sw->dev->fw_rev.sub);
+}
+
+static u8 prestera_port_type_get(struct prestera_port *port)
+{
+ if (port->caps.type < PRESTERA_PORT_TYPE_MAX)
+ return port_types[port->caps.type].eth_type;
+
+ return PORT_OTHER;
+}
+
+static int prestera_port_type_set(const struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u32 new_mode = PRESTERA_LINK_MODE_MAX;
+ u32 type, mode;
+ int err;
+
+ for (type = 0; type < PRESTERA_PORT_TYPE_MAX; type++) {
+ if (port_types[type].eth_type == ecmd->base.port &&
+ test_bit(port_types[type].eth_mode,
+ ecmd->link_modes.supported)) {
+ break;
+ }
+ }
+
+ if (type == port->caps.type)
+ return 0;
+ if (type != port->caps.type && ecmd->base.autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+ if (type == PRESTERA_PORT_TYPE_MAX)
+ return -EOPNOTSUPP;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if ((port_link_modes[mode].pr_mask &
+ port->caps.supp_link_modes) &&
+ type == port_link_modes[mode].port_type) {
+ new_mode = mode;
+ }
+ }
+
+ if (new_mode < PRESTERA_LINK_MODE_MAX)
+ err = prestera_hw_port_link_mode_set(port, new_mode);
+ else
+ err = -EINVAL;
+
+ if (err)
+ return err;
+
+ port->caps.type = type;
+ port->autoneg = false;
+
+ return 0;
+}
+
+static void prestera_modes_to_eth(unsigned long *eth_modes, u64 link_modes,
+ u8 fec, u8 type)
+{
+ u32 mode;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if ((port_link_modes[mode].pr_mask & link_modes) == 0)
+ continue;
+
+ if (type != PRESTERA_PORT_TYPE_NONE &&
+ port_link_modes[mode].port_type != type)
+ continue;
+
+ __set_bit(port_link_modes[mode].eth_mode, eth_modes);
+ }
+
+ for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
+ if ((port_fec_caps[mode].pr_fec & fec) == 0)
+ continue;
+
+ __set_bit(port_fec_caps[mode].eth_mode, eth_modes);
+ }
+}
+
+static void prestera_modes_from_eth(const unsigned long *eth_modes,
+ u64 *link_modes, u8 *fec, u8 type)
+{
+ u64 adver_modes = 0;
+ u32 fec_modes = 0;
+ u32 mode;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if (!test_bit(port_link_modes[mode].eth_mode, eth_modes))
+ continue;
+
+ if (port_link_modes[mode].port_type != type)
+ continue;
+
+ adver_modes |= port_link_modes[mode].pr_mask;
+ }
+
+ for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
+ if (!test_bit(port_fec_caps[mode].eth_mode, eth_modes))
+ continue;
+
+ fec_modes |= port_fec_caps[mode].pr_fec;
+ }
+
+ *link_modes = adver_modes;
+ *fec = fec_modes;
+}
+
+static void prestera_port_supp_types_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u32 mode;
+ u8 ptype;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if ((port_link_modes[mode].pr_mask &
+ port->caps.supp_link_modes) == 0)
+ continue;
+
+ ptype = port_link_modes[mode].port_type;
+ __set_bit(port_types[ptype].eth_mode,
+ ecmd->link_modes.supported);
+ }
+}
+
+static void prestera_port_remote_cap_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ bool asym_pause;
+ bool pause;
+ u64 bitmap;
+ int err;
+
+ err = prestera_hw_port_remote_cap_get(port, &bitmap);
+ if (!err) {
+ prestera_modes_to_eth(ecmd->link_modes.lp_advertising,
+ bitmap, 0, PRESTERA_PORT_TYPE_NONE);
+
+ if (!bitmap_empty(ecmd->link_modes.lp_advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS)) {
+ ethtool_link_ksettings_add_link_mode(ecmd,
+ lp_advertising,
+ Autoneg);
+ }
+ }
+
+ err = prestera_hw_port_remote_fc_get(port, &pause, &asym_pause);
+ if (err)
+ return;
+
+ if (pause)
+ ethtool_link_ksettings_add_link_mode(ecmd,
+ lp_advertising,
+ Pause);
+ if (asym_pause)
+ ethtool_link_ksettings_add_link_mode(ecmd,
+ lp_advertising,
+ Asym_Pause);
+}
+
+static void prestera_port_speed_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u32 speed;
+ int err;
+
+ err = prestera_hw_port_speed_get(port, &speed);
+ ecmd->base.speed = err ? SPEED_UNKNOWN : speed;
+}
+
+static void prestera_port_duplex_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u8 duplex;
+ int err;
+
+ err = prestera_hw_port_duplex_get(port, &duplex);
+ if (err) {
+ ecmd->base.duplex = DUPLEX_UNKNOWN;
+ return;
+ }
+
+ ecmd->base.duplex = duplex == PRESTERA_PORT_DUPLEX_FULL ?
+ DUPLEX_FULL : DUPLEX_HALF;
+}
+
+static int
+prestera_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *ecmd)
+{
+ struct prestera_port *port = netdev_priv(dev);
+
+ ethtool_link_ksettings_zero_link_mode(ecmd, supported);
+ ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
+ ethtool_link_ksettings_zero_link_mode(ecmd, lp_advertising);
+
+ ecmd->base.autoneg = port->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ if (port->caps.type == PRESTERA_PORT_TYPE_TP) {
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, Autoneg);
+
+ if (netif_running(dev) &&
+ (port->autoneg ||
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER))
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising,
+ Autoneg);
+ }
+
+ prestera_modes_to_eth(ecmd->link_modes.supported,
+ port->caps.supp_link_modes,
+ port->caps.supp_fec,
+ port->caps.type);
+
+ prestera_port_supp_types_get(ecmd, port);
+
+ if (netif_carrier_ok(dev)) {
+ prestera_port_speed_get(ecmd, port);
+ prestera_port_duplex_get(ecmd, port);
+ } else {
+ ecmd->base.speed = SPEED_UNKNOWN;
+ ecmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+
+ ecmd->base.port = prestera_port_type_get(port);
+
+ if (port->autoneg) {
+ if (netif_running(dev))
+ prestera_modes_to_eth(ecmd->link_modes.advertising,
+ port->adver_link_modes,
+ port->adver_fec,
+ port->caps.type);
+
+ if (netif_carrier_ok(dev) &&
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER)
+ prestera_port_remote_cap_get(ecmd, port);
+ }
+
+ if (port->caps.type == PRESTERA_PORT_TYPE_TP &&
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER)
+ prestera_hw_port_mdix_get(port, &ecmd->base.eth_tp_mdix,
+ &ecmd->base.eth_tp_mdix_ctrl);
+
+ return 0;
+}
+
+static int prestera_port_mdix_set(const struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ if (ecmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_INVALID &&
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER &&
+ port->caps.type == PRESTERA_PORT_TYPE_TP)
+ return prestera_hw_port_mdix_set(port,
+ ecmd->base.eth_tp_mdix_ctrl);
+
+ return 0;
+}
+
+static int prestera_port_link_mode_set(struct prestera_port *port,
+ u32 speed, u8 duplex, u8 type)
+{
+ u32 new_mode = PRESTERA_LINK_MODE_MAX;
+ u32 mode;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if (speed != port_link_modes[mode].speed)
+ continue;
+
+ if (duplex != port_link_modes[mode].duplex)
+ continue;
+
+ if (!(port_link_modes[mode].pr_mask &
+ port->caps.supp_link_modes))
+ continue;
+
+ if (type != port_link_modes[mode].port_type)
+ continue;
+
+ new_mode = mode;
+ break;
+ }
+
+ if (new_mode == PRESTERA_LINK_MODE_MAX)
+ return -EOPNOTSUPP;
+
+ return prestera_hw_port_link_mode_set(port, new_mode);
+}
+
+static int
+prestera_port_speed_duplex_set(const struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u32 curr_mode;
+ u8 duplex;
+ u32 speed;
+ int err;
+
+ err = prestera_hw_port_link_mode_get(port, &curr_mode);
+ if (err)
+ return err;
+ if (curr_mode >= PRESTERA_LINK_MODE_MAX)
+ return -EINVAL;
+
+ if (ecmd->base.duplex != DUPLEX_UNKNOWN)
+ duplex = ecmd->base.duplex == DUPLEX_FULL ?
+ PRESTERA_PORT_DUPLEX_FULL : PRESTERA_PORT_DUPLEX_HALF;
+ else
+ duplex = port_link_modes[curr_mode].duplex;
+
+ if (ecmd->base.speed != SPEED_UNKNOWN)
+ speed = ecmd->base.speed;
+ else
+ speed = port_link_modes[curr_mode].speed;
+
+ return prestera_port_link_mode_set(port, speed, duplex,
+ port->caps.type);
+}
+
+static int
+prestera_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *ecmd)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ u64 adver_modes;
+ u8 adver_fec;
+ int err;
+
+ err = prestera_port_type_set(ecmd, port);
+ if (err)
+ return err;
+
+ if (port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER) {
+ err = prestera_port_mdix_set(ecmd, port);
+ if (err)
+ return err;
+ }
+
+ prestera_modes_from_eth(ecmd->link_modes.advertising, &adver_modes,
+ &adver_fec, port->caps.type);
+
+ err = prestera_port_autoneg_set(port,
+ ecmd->base.autoneg == AUTONEG_ENABLE,
+ adver_modes, adver_fec);
+ if (err)
+ return err;
+
+ if (ecmd->base.autoneg == AUTONEG_DISABLE) {
+ err = prestera_port_speed_duplex_set(ecmd, port);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int prestera_ethtool_get_fecparam(struct net_device *dev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ u8 active;
+ u32 mode;
+ int err;
+
+ err = prestera_hw_port_fec_get(port, &active);
+ if (err)
+ return err;
+
+ fecparam->fec = 0;
+
+ for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
+ if ((port_fec_caps[mode].pr_fec & port->caps.supp_fec) == 0)
+ continue;
+
+ fecparam->fec |= port_fec_caps[mode].eth_fec;
+ }
+
+ if (active < PRESTERA_PORT_FEC_MAX)
+ fecparam->active_fec = port_fec_caps[active].eth_fec;
+ else
+ fecparam->active_fec = ETHTOOL_FEC_AUTO;
+
+ return 0;
+}
+
+static int prestera_ethtool_set_fecparam(struct net_device *dev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ u8 fec, active;
+ u32 mode;
+ int err;
+
+ if (port->autoneg) {
+ netdev_err(dev, "FEC set is not allowed while autoneg is on\n");
+ return -EINVAL;
+ }
+
+ err = prestera_hw_port_fec_get(port, &active);
+ if (err)
+ return err;
+
+ fec = PRESTERA_PORT_FEC_MAX;
+ for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
+ if ((port_fec_caps[mode].eth_fec & fecparam->fec) &&
+ (port_fec_caps[mode].pr_fec & port->caps.supp_fec)) {
+ fec = mode;
+ break;
+ }
+ }
+
+ if (fec == active)
+ return 0;
+
+ if (fec == PRESTERA_PORT_FEC_MAX)
+ return -EOPNOTSUPP;
+
+ return prestera_hw_port_fec_set(port, fec);
+}
+
+static int prestera_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return PRESTERA_STATS_CNT;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void prestera_ethtool_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ memcpy(data, prestera_cnt_name, sizeof(prestera_cnt_name));
+}
+
+static void prestera_ethtool_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct prestera_port_stats *port_stats;
+
+ port_stats = &port->cached_hw_stats.stats;
+
+ memcpy(data, port_stats, sizeof(*port_stats));
+}
+
+static int prestera_ethtool_nway_reset(struct net_device *dev)
+{
+ struct prestera_port *port = netdev_priv(dev);
+
+ if (netif_running(dev) &&
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER &&
+ port->caps.type == PRESTERA_PORT_TYPE_TP)
+ return prestera_hw_port_autoneg_restart(port);
+
+ return -EINVAL;
+}
+
+const struct ethtool_ops prestera_ethtool_ops = {
+ .get_drvinfo = prestera_ethtool_get_drvinfo,
+ .get_link_ksettings = prestera_ethtool_get_link_ksettings,
+ .set_link_ksettings = prestera_ethtool_set_link_ksettings,
+ .get_fecparam = prestera_ethtool_get_fecparam,
+ .set_fecparam = prestera_ethtool_set_fecparam,
+ .get_sset_count = prestera_ethtool_get_sset_count,
+ .get_strings = prestera_ethtool_get_strings,
+ .get_ethtool_stats = prestera_ethtool_get_stats,
+ .get_link = ethtool_op_get_link,
+ .nway_reset = prestera_ethtool_nway_reset
+};
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
new file mode 100644
index 000000000000..523ef1f592ce
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef __PRESTERA_ETHTOOL_H_
+#define __PRESTERA_ETHTOOL_H_
+
+#include <linux/ethtool.h>
+
+extern const struct ethtool_ops prestera_ethtool_ops;
+
+#endif /* _PRESTERA_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
new file mode 100644
index 000000000000..0424718d5998
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -0,0 +1,1253 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/list.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+
+#define PRESTERA_SWITCH_INIT_TIMEOUT_MS (30 * 1000)
+
+#define PRESTERA_MIN_MTU 64
+
+enum prestera_cmd_type_t {
+ PRESTERA_CMD_TYPE_SWITCH_INIT = 0x1,
+ PRESTERA_CMD_TYPE_SWITCH_ATTR_SET = 0x2,
+
+ PRESTERA_CMD_TYPE_PORT_ATTR_SET = 0x100,
+ PRESTERA_CMD_TYPE_PORT_ATTR_GET = 0x101,
+ PRESTERA_CMD_TYPE_PORT_INFO_GET = 0x110,
+
+ PRESTERA_CMD_TYPE_VLAN_CREATE = 0x200,
+ PRESTERA_CMD_TYPE_VLAN_DELETE = 0x201,
+ PRESTERA_CMD_TYPE_VLAN_PORT_SET = 0x202,
+ PRESTERA_CMD_TYPE_VLAN_PVID_SET = 0x203,
+
+ PRESTERA_CMD_TYPE_FDB_ADD = 0x300,
+ PRESTERA_CMD_TYPE_FDB_DELETE = 0x301,
+ PRESTERA_CMD_TYPE_FDB_FLUSH_PORT = 0x310,
+ PRESTERA_CMD_TYPE_FDB_FLUSH_VLAN = 0x311,
+ PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN = 0x312,
+
+ PRESTERA_CMD_TYPE_BRIDGE_CREATE = 0x400,
+ PRESTERA_CMD_TYPE_BRIDGE_DELETE = 0x401,
+ PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD = 0x402,
+ PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE = 0x403,
+
+ PRESTERA_CMD_TYPE_RXTX_INIT = 0x800,
+ PRESTERA_CMD_TYPE_RXTX_PORT_INIT = 0x801,
+
+ PRESTERA_CMD_TYPE_STP_PORT_SET = 0x1000,
+
+ PRESTERA_CMD_TYPE_ACK = 0x10000,
+ PRESTERA_CMD_TYPE_MAX
+};
+
+enum {
+ PRESTERA_CMD_PORT_ATTR_ADMIN_STATE = 1,
+ PRESTERA_CMD_PORT_ATTR_MTU = 3,
+ PRESTERA_CMD_PORT_ATTR_MAC = 4,
+ PRESTERA_CMD_PORT_ATTR_SPEED = 5,
+ PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE = 6,
+ PRESTERA_CMD_PORT_ATTR_LEARNING = 7,
+ PRESTERA_CMD_PORT_ATTR_FLOOD = 8,
+ PRESTERA_CMD_PORT_ATTR_CAPABILITY = 9,
+ PRESTERA_CMD_PORT_ATTR_REMOTE_CAPABILITY = 10,
+ PRESTERA_CMD_PORT_ATTR_REMOTE_FC = 11,
+ PRESTERA_CMD_PORT_ATTR_LINK_MODE = 12,
+ PRESTERA_CMD_PORT_ATTR_TYPE = 13,
+ PRESTERA_CMD_PORT_ATTR_FEC = 14,
+ PRESTERA_CMD_PORT_ATTR_AUTONEG = 15,
+ PRESTERA_CMD_PORT_ATTR_DUPLEX = 16,
+ PRESTERA_CMD_PORT_ATTR_STATS = 17,
+ PRESTERA_CMD_PORT_ATTR_MDIX = 18,
+ PRESTERA_CMD_PORT_ATTR_AUTONEG_RESTART = 19,
+};
+
+enum {
+ PRESTERA_CMD_SWITCH_ATTR_MAC = 1,
+ PRESTERA_CMD_SWITCH_ATTR_AGEING = 2,
+};
+
+enum {
+ PRESTERA_CMD_ACK_OK,
+ PRESTERA_CMD_ACK_FAILED,
+
+ PRESTERA_CMD_ACK_MAX
+};
+
+enum {
+ PRESTERA_PORT_TP_NA,
+ PRESTERA_PORT_TP_MDI,
+ PRESTERA_PORT_TP_MDIX,
+ PRESTERA_PORT_TP_AUTO,
+};
+
+enum {
+ PRESTERA_PORT_GOOD_OCTETS_RCV_CNT,
+ PRESTERA_PORT_BAD_OCTETS_RCV_CNT,
+ PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT,
+ PRESTERA_PORT_BRDC_PKTS_RCV_CNT,
+ PRESTERA_PORT_MC_PKTS_RCV_CNT,
+ PRESTERA_PORT_PKTS_64L_CNT,
+ PRESTERA_PORT_PKTS_65TO127L_CNT,
+ PRESTERA_PORT_PKTS_128TO255L_CNT,
+ PRESTERA_PORT_PKTS_256TO511L_CNT,
+ PRESTERA_PORT_PKTS_512TO1023L_CNT,
+ PRESTERA_PORT_PKTS_1024TOMAXL_CNT,
+ PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT,
+ PRESTERA_PORT_MC_PKTS_SENT_CNT,
+ PRESTERA_PORT_BRDC_PKTS_SENT_CNT,
+ PRESTERA_PORT_FC_SENT_CNT,
+ PRESTERA_PORT_GOOD_FC_RCV_CNT,
+ PRESTERA_PORT_DROP_EVENTS_CNT,
+ PRESTERA_PORT_UNDERSIZE_PKTS_CNT,
+ PRESTERA_PORT_FRAGMENTS_PKTS_CNT,
+ PRESTERA_PORT_OVERSIZE_PKTS_CNT,
+ PRESTERA_PORT_JABBER_PKTS_CNT,
+ PRESTERA_PORT_MAC_RCV_ERROR_CNT,
+ PRESTERA_PORT_BAD_CRC_CNT,
+ PRESTERA_PORT_COLLISIONS_CNT,
+ PRESTERA_PORT_LATE_COLLISIONS_CNT,
+ PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT,
+ PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT,
+ PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT,
+ PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT,
+ PRESTERA_PORT_GOOD_OCTETS_SENT_CNT,
+
+ PRESTERA_PORT_CNT_MAX
+};
+
+enum {
+ PRESTERA_FC_NONE,
+ PRESTERA_FC_SYMMETRIC,
+ PRESTERA_FC_ASYMMETRIC,
+ PRESTERA_FC_SYMM_ASYMM,
+};
+
+struct prestera_fw_event_handler {
+ struct list_head list;
+ struct rcu_head rcu;
+ enum prestera_event_type type;
+ prestera_event_cb_t func;
+ void *arg;
+};
+
+struct prestera_msg_cmd {
+ u32 type;
+};
+
+struct prestera_msg_ret {
+ struct prestera_msg_cmd cmd;
+ u32 status;
+};
+
+struct prestera_msg_common_req {
+ struct prestera_msg_cmd cmd;
+};
+
+struct prestera_msg_common_resp {
+ struct prestera_msg_ret ret;
+};
+
+union prestera_msg_switch_param {
+ u8 mac[ETH_ALEN];
+ u32 ageing_timeout_ms;
+};
+
+struct prestera_msg_switch_attr_req {
+ struct prestera_msg_cmd cmd;
+ u32 attr;
+ union prestera_msg_switch_param param;
+};
+
+struct prestera_msg_switch_init_resp {
+ struct prestera_msg_ret ret;
+ u32 port_count;
+ u32 mtu_max;
+ u8 switch_id;
+};
+
+struct prestera_msg_port_autoneg_param {
+ u64 link_mode;
+ u8 enable;
+ u8 fec;
+};
+
+struct prestera_msg_port_cap_param {
+ u64 link_mode;
+ u8 type;
+ u8 fec;
+ u8 transceiver;
+};
+
+struct prestera_msg_port_mdix_param {
+ u8 status;
+ u8 admin_mode;
+};
+
+union prestera_msg_port_param {
+ u8 admin_state;
+ u8 oper_state;
+ u32 mtu;
+ u8 mac[ETH_ALEN];
+ u8 accept_frm_type;
+ u32 speed;
+ u8 learning;
+ u8 flood;
+ u32 link_mode;
+ u8 type;
+ u8 duplex;
+ u8 fec;
+ u8 fc;
+ struct prestera_msg_port_mdix_param mdix;
+ struct prestera_msg_port_autoneg_param autoneg;
+ struct prestera_msg_port_cap_param cap;
+};
+
+struct prestera_msg_port_attr_req {
+ struct prestera_msg_cmd cmd;
+ u32 attr;
+ u32 port;
+ u32 dev;
+ union prestera_msg_port_param param;
+};
+
+struct prestera_msg_port_attr_resp {
+ struct prestera_msg_ret ret;
+ union prestera_msg_port_param param;
+};
+
+struct prestera_msg_port_stats_resp {
+ struct prestera_msg_ret ret;
+ u64 stats[PRESTERA_PORT_CNT_MAX];
+};
+
+struct prestera_msg_port_info_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+};
+
+struct prestera_msg_port_info_resp {
+ struct prestera_msg_ret ret;
+ u32 hw_id;
+ u32 dev_id;
+ u16 fp_id;
+};
+
+struct prestera_msg_vlan_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+ u32 dev;
+ u16 vid;
+ u8 is_member;
+ u8 is_tagged;
+};
+
+struct prestera_msg_fdb_req {
+ struct prestera_msg_cmd cmd;
+ u8 dest_type;
+ u32 port;
+ u32 dev;
+ u8 mac[ETH_ALEN];
+ u16 vid;
+ u8 dynamic;
+ u32 flush_mode;
+};
+
+struct prestera_msg_bridge_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+ u32 dev;
+ u16 bridge;
+};
+
+struct prestera_msg_bridge_resp {
+ struct prestera_msg_ret ret;
+ u16 bridge;
+};
+
+struct prestera_msg_stp_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+ u32 dev;
+ u16 vid;
+ u8 state;
+};
+
+struct prestera_msg_rxtx_req {
+ struct prestera_msg_cmd cmd;
+ u8 use_sdma;
+};
+
+struct prestera_msg_rxtx_resp {
+ struct prestera_msg_ret ret;
+ u32 map_addr;
+};
+
+struct prestera_msg_rxtx_port_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+ u32 dev;
+};
+
+struct prestera_msg_event {
+ u16 type;
+ u16 id;
+};
+
+union prestera_msg_event_port_param {
+ u32 oper_state;
+};
+
+struct prestera_msg_event_port {
+ struct prestera_msg_event id;
+ u32 port_id;
+ union prestera_msg_event_port_param param;
+};
+
+union prestera_msg_event_fdb_param {
+ u8 mac[ETH_ALEN];
+};
+
+struct prestera_msg_event_fdb {
+ struct prestera_msg_event id;
+ u8 dest_type;
+ u32 port_id;
+ u32 vid;
+ union prestera_msg_event_fdb_param param;
+};
+
+static int __prestera_cmd_ret(struct prestera_switch *sw,
+ enum prestera_cmd_type_t type,
+ struct prestera_msg_cmd *cmd, size_t clen,
+ struct prestera_msg_ret *ret, size_t rlen,
+ int waitms)
+{
+ struct prestera_device *dev = sw->dev;
+ int err;
+
+ cmd->type = type;
+
+ err = dev->send_req(dev, cmd, clen, ret, rlen, waitms);
+ if (err)
+ return err;
+
+ if (ret->cmd.type != PRESTERA_CMD_TYPE_ACK)
+ return -EBADE;
+ if (ret->status != PRESTERA_CMD_ACK_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int prestera_cmd_ret(struct prestera_switch *sw,
+ enum prestera_cmd_type_t type,
+ struct prestera_msg_cmd *cmd, size_t clen,
+ struct prestera_msg_ret *ret, size_t rlen)
+{
+ return __prestera_cmd_ret(sw, type, cmd, clen, ret, rlen, 0);
+}
+
+static int prestera_cmd_ret_wait(struct prestera_switch *sw,
+ enum prestera_cmd_type_t type,
+ struct prestera_msg_cmd *cmd, size_t clen,
+ struct prestera_msg_ret *ret, size_t rlen,
+ int waitms)
+{
+ return __prestera_cmd_ret(sw, type, cmd, clen, ret, rlen, waitms);
+}
+
+static int prestera_cmd(struct prestera_switch *sw,
+ enum prestera_cmd_type_t type,
+ struct prestera_msg_cmd *cmd, size_t clen)
+{
+ struct prestera_msg_common_resp resp;
+
+ return prestera_cmd_ret(sw, type, cmd, clen, &resp.ret, sizeof(resp));
+}
+
+static int prestera_fw_parse_port_evt(void *msg, struct prestera_event *evt)
+{
+ struct prestera_msg_event_port *hw_evt = msg;
+
+ if (evt->id != PRESTERA_PORT_EVENT_STATE_CHANGED)
+ return -EINVAL;
+
+ evt->port_evt.data.oper_state = hw_evt->param.oper_state;
+ evt->port_evt.port_id = hw_evt->port_id;
+
+ return 0;
+}
+
+static int prestera_fw_parse_fdb_evt(void *msg, struct prestera_event *evt)
+{
+ struct prestera_msg_event_fdb *hw_evt = msg;
+
+ evt->fdb_evt.port_id = hw_evt->port_id;
+ evt->fdb_evt.vid = hw_evt->vid;
+
+ ether_addr_copy(evt->fdb_evt.data.mac, hw_evt->param.mac);
+
+ return 0;
+}
+
+static struct prestera_fw_evt_parser {
+ int (*func)(void *msg, struct prestera_event *evt);
+} fw_event_parsers[PRESTERA_EVENT_TYPE_MAX] = {
+ [PRESTERA_EVENT_TYPE_PORT] = { .func = prestera_fw_parse_port_evt },
+ [PRESTERA_EVENT_TYPE_FDB] = { .func = prestera_fw_parse_fdb_evt },
+};
+
+static struct prestera_fw_event_handler *
+__find_event_handler(const struct prestera_switch *sw,
+ enum prestera_event_type type)
+{
+ struct prestera_fw_event_handler *eh;
+
+ list_for_each_entry_rcu(eh, &sw->event_handlers, list) {
+ if (eh->type == type)
+ return eh;
+ }
+
+ return NULL;
+}
+
+static int prestera_find_event_handler(const struct prestera_switch *sw,
+ enum prestera_event_type type,
+ struct prestera_fw_event_handler *eh)
+{
+ struct prestera_fw_event_handler *tmp;
+ int err = 0;
+
+ rcu_read_lock();
+ tmp = __find_event_handler(sw, type);
+ if (tmp)
+ *eh = *tmp;
+ else
+ err = -ENOENT;
+ rcu_read_unlock();
+
+ return err;
+}
+
+static int prestera_evt_recv(struct prestera_device *dev, void *buf, size_t size)
+{
+ struct prestera_switch *sw = dev->priv;
+ struct prestera_msg_event *msg = buf;
+ struct prestera_fw_event_handler eh;
+ struct prestera_event evt;
+ int err;
+
+ if (msg->type >= PRESTERA_EVENT_TYPE_MAX)
+ return -EINVAL;
+ if (!fw_event_parsers[msg->type].func)
+ return -ENOENT;
+
+ err = prestera_find_event_handler(sw, msg->type, &eh);
+ if (err)
+ return err;
+
+ evt.id = msg->id;
+
+ err = fw_event_parsers[msg->type].func(buf, &evt);
+ if (err)
+ return err;
+
+ eh.func(sw, &evt, eh.arg);
+
+ return 0;
+}
+
+static void prestera_pkt_recv(struct prestera_device *dev)
+{
+ struct prestera_switch *sw = dev->priv;
+ struct prestera_fw_event_handler eh;
+ struct prestera_event ev;
+ int err;
+
+ ev.id = PRESTERA_RXTX_EVENT_RCV_PKT;
+
+ err = prestera_find_event_handler(sw, PRESTERA_EVENT_TYPE_RXTX, &eh);
+ if (err)
+ return;
+
+ eh.func(sw, &ev, eh.arg);
+}
+
+int prestera_hw_port_info_get(const struct prestera_port *port,
+ u32 *dev_id, u32 *hw_id, u16 *fp_id)
+{
+ struct prestera_msg_port_info_req req = {
+ .port = port->id,
+ };
+ struct prestera_msg_port_info_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_INFO_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *dev_id = resp.dev_id;
+ *hw_id = resp.hw_id;
+ *fp_id = resp.fp_id;
+
+ return 0;
+}
+
+int prestera_hw_switch_mac_set(struct prestera_switch *sw, const char *mac)
+{
+ struct prestera_msg_switch_attr_req req = {
+ .attr = PRESTERA_CMD_SWITCH_ATTR_MAC,
+ };
+
+ ether_addr_copy(req.param.mac, mac);
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_SWITCH_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_switch_init(struct prestera_switch *sw)
+{
+ struct prestera_msg_switch_init_resp resp;
+ struct prestera_msg_common_req req;
+ int err;
+
+ INIT_LIST_HEAD(&sw->event_handlers);
+
+ err = prestera_cmd_ret_wait(sw, PRESTERA_CMD_TYPE_SWITCH_INIT,
+ &req.cmd, sizeof(req),
+ &resp.ret, sizeof(resp),
+ PRESTERA_SWITCH_INIT_TIMEOUT_MS);
+ if (err)
+ return err;
+
+ sw->dev->recv_msg = prestera_evt_recv;
+ sw->dev->recv_pkt = prestera_pkt_recv;
+ sw->port_count = resp.port_count;
+ sw->mtu_min = PRESTERA_MIN_MTU;
+ sw->mtu_max = resp.mtu_max;
+ sw->id = resp.switch_id;
+
+ return 0;
+}
+
+void prestera_hw_switch_fini(struct prestera_switch *sw)
+{
+ WARN_ON(!list_empty(&sw->event_handlers));
+}
+
+int prestera_hw_switch_ageing_set(struct prestera_switch *sw, u32 ageing_ms)
+{
+ struct prestera_msg_switch_attr_req req = {
+ .attr = PRESTERA_CMD_SWITCH_ATTR_AGEING,
+ .param = {
+ .ageing_timeout_ms = ageing_ms,
+ },
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_SWITCH_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_state_set(const struct prestera_port *port,
+ bool admin_state)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_ADMIN_STATE,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .admin_state = admin_state,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_MTU,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .mtu = mtu,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_mac_set(const struct prestera_port *port, const char *mac)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_MAC,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ ether_addr_copy(req.param.mac, mac);
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_accept_frm_type(struct prestera_port *port,
+ enum prestera_accept_frm_type type)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .accept_frm_type = type,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_cap_get(const struct prestera_port *port,
+ struct prestera_port_caps *caps)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_CAPABILITY,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ caps->supp_link_modes = resp.param.cap.link_mode;
+ caps->transceiver = resp.param.cap.transceiver;
+ caps->supp_fec = resp.param.cap.fec;
+ caps->type = resp.param.cap.type;
+
+ return err;
+}
+
+int prestera_hw_port_remote_cap_get(const struct prestera_port *port,
+ u64 *link_mode_bitmap)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_REMOTE_CAPABILITY,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *link_mode_bitmap = resp.param.cap.link_mode;
+
+ return 0;
+}
+
+int prestera_hw_port_remote_fc_get(const struct prestera_port *port,
+ bool *pause, bool *asym_pause)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_REMOTE_FC,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ switch (resp.param.fc) {
+ case PRESTERA_FC_SYMMETRIC:
+ *pause = true;
+ *asym_pause = false;
+ break;
+ case PRESTERA_FC_ASYMMETRIC:
+ *pause = false;
+ *asym_pause = true;
+ break;
+ case PRESTERA_FC_SYMM_ASYMM:
+ *pause = true;
+ *asym_pause = true;
+ break;
+ default:
+ *pause = false;
+ *asym_pause = false;
+ }
+
+ return 0;
+}
+
+int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_TYPE,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *type = resp.param.type;
+
+ return 0;
+}
+
+int prestera_hw_port_fec_get(const struct prestera_port *port, u8 *fec)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_FEC,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *fec = resp.param.fec;
+
+ return 0;
+}
+
+int prestera_hw_port_fec_set(const struct prestera_port *port, u8 fec)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_FEC,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .fec = fec,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+static u8 prestera_hw_mdix_to_eth(u8 mode)
+{
+ switch (mode) {
+ case PRESTERA_PORT_TP_MDI:
+ return ETH_TP_MDI;
+ case PRESTERA_PORT_TP_MDIX:
+ return ETH_TP_MDI_X;
+ case PRESTERA_PORT_TP_AUTO:
+ return ETH_TP_MDI_AUTO;
+ default:
+ return ETH_TP_MDI_INVALID;
+ }
+}
+
+static u8 prestera_hw_mdix_from_eth(u8 mode)
+{
+ switch (mode) {
+ case ETH_TP_MDI:
+ return PRESTERA_PORT_TP_MDI;
+ case ETH_TP_MDI_X:
+ return PRESTERA_PORT_TP_MDIX;
+ case ETH_TP_MDI_AUTO:
+ return PRESTERA_PORT_TP_AUTO;
+ default:
+ return PRESTERA_PORT_TP_NA;
+ }
+}
+
+int prestera_hw_port_mdix_get(const struct prestera_port *port, u8 *status,
+ u8 *admin_mode)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_MDIX,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *status = prestera_hw_mdix_to_eth(resp.param.mdix.status);
+ *admin_mode = prestera_hw_mdix_to_eth(resp.param.mdix.admin_mode);
+
+ return 0;
+}
+
+int prestera_hw_port_mdix_set(const struct prestera_port *port, u8 mode)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_MDIX,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ req.param.mdix.admin_mode = prestera_hw_mdix_from_eth(mode);
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_link_mode_set(const struct prestera_port *port, u32 mode)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_LINK_MODE,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .link_mode = mode,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_link_mode_get(const struct prestera_port *port, u32 *mode)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_LINK_MODE,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *mode = resp.param.link_mode;
+
+ return 0;
+}
+
+int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_SPEED,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *speed = resp.param.speed;
+
+ return 0;
+}
+
+int prestera_hw_port_autoneg_set(const struct prestera_port *port,
+ bool autoneg, u64 link_modes, u8 fec)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_AUTONEG,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .autoneg = {
+ .link_mode = link_modes,
+ .enable = autoneg,
+ .fec = fec,
+ }
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_autoneg_restart(struct prestera_port *port)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_AUTONEG_RESTART,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_duplex_get(const struct prestera_port *port, u8 *duplex)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_DUPLEX,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *duplex = resp.param.duplex;
+
+ return 0;
+}
+
+int prestera_hw_port_stats_get(const struct prestera_port *port,
+ struct prestera_port_stats *st)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_STATS,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_stats_resp resp;
+ u64 *hw = resp.stats;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ st->good_octets_received = hw[PRESTERA_PORT_GOOD_OCTETS_RCV_CNT];
+ st->bad_octets_received = hw[PRESTERA_PORT_BAD_OCTETS_RCV_CNT];
+ st->mac_trans_error = hw[PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT];
+ st->broadcast_frames_received = hw[PRESTERA_PORT_BRDC_PKTS_RCV_CNT];
+ st->multicast_frames_received = hw[PRESTERA_PORT_MC_PKTS_RCV_CNT];
+ st->frames_64_octets = hw[PRESTERA_PORT_PKTS_64L_CNT];
+ st->frames_65_to_127_octets = hw[PRESTERA_PORT_PKTS_65TO127L_CNT];
+ st->frames_128_to_255_octets = hw[PRESTERA_PORT_PKTS_128TO255L_CNT];
+ st->frames_256_to_511_octets = hw[PRESTERA_PORT_PKTS_256TO511L_CNT];
+ st->frames_512_to_1023_octets = hw[PRESTERA_PORT_PKTS_512TO1023L_CNT];
+ st->frames_1024_to_max_octets = hw[PRESTERA_PORT_PKTS_1024TOMAXL_CNT];
+ st->excessive_collision = hw[PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT];
+ st->multicast_frames_sent = hw[PRESTERA_PORT_MC_PKTS_SENT_CNT];
+ st->broadcast_frames_sent = hw[PRESTERA_PORT_BRDC_PKTS_SENT_CNT];
+ st->fc_sent = hw[PRESTERA_PORT_FC_SENT_CNT];
+ st->fc_received = hw[PRESTERA_PORT_GOOD_FC_RCV_CNT];
+ st->buffer_overrun = hw[PRESTERA_PORT_DROP_EVENTS_CNT];
+ st->undersize = hw[PRESTERA_PORT_UNDERSIZE_PKTS_CNT];
+ st->fragments = hw[PRESTERA_PORT_FRAGMENTS_PKTS_CNT];
+ st->oversize = hw[PRESTERA_PORT_OVERSIZE_PKTS_CNT];
+ st->jabber = hw[PRESTERA_PORT_JABBER_PKTS_CNT];
+ st->rx_error_frame_received = hw[PRESTERA_PORT_MAC_RCV_ERROR_CNT];
+ st->bad_crc = hw[PRESTERA_PORT_BAD_CRC_CNT];
+ st->collisions = hw[PRESTERA_PORT_COLLISIONS_CNT];
+ st->late_collision = hw[PRESTERA_PORT_LATE_COLLISIONS_CNT];
+ st->unicast_frames_received = hw[PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT];
+ st->unicast_frames_sent = hw[PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT];
+ st->sent_multiple = hw[PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT];
+ st->sent_deferred = hw[PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT];
+ st->good_octets_sent = hw[PRESTERA_PORT_GOOD_OCTETS_SENT_CNT];
+
+ return 0;
+}
+
+int prestera_hw_port_learning_set(struct prestera_port *port, bool enable)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_LEARNING,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .learning = enable,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_flood_set(struct prestera_port *port, bool flood)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_FLOOD,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .flood = flood,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid)
+{
+ struct prestera_msg_vlan_req req = {
+ .vid = vid,
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_CREATE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_delete(struct prestera_switch *sw, u16 vid)
+{
+ struct prestera_msg_vlan_req req = {
+ .vid = vid,
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_port_set(struct prestera_port *port, u16 vid,
+ bool is_member, bool untagged)
+{
+ struct prestera_msg_vlan_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ .is_member = is_member,
+ .is_tagged = !untagged,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_VLAN_PORT_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_port_vid_set(struct prestera_port *port, u16 vid)
+{
+ struct prestera_msg_vlan_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_VLAN_PVID_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_port_stp_set(struct prestera_port *port, u16 vid, u8 state)
+{
+ struct prestera_msg_stp_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ .state = state,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_STP_PORT_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_add(struct prestera_port *port, const unsigned char *mac,
+ u16 vid, bool dynamic)
+{
+ struct prestera_msg_fdb_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ .dynamic = dynamic,
+ };
+
+ ether_addr_copy(req.mac, mac);
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_ADD,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac,
+ u16 vid)
+{
+ struct prestera_msg_fdb_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ };
+
+ ether_addr_copy(req.mac, mac);
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode)
+{
+ struct prestera_msg_fdb_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .flush_mode = mode,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_flush_vlan(struct prestera_switch *sw, u16 vid, u32 mode)
+{
+ struct prestera_msg_fdb_req req = {
+ .vid = vid,
+ .flush_mode = mode,
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_VLAN,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid,
+ u32 mode)
+{
+ struct prestera_msg_fdb_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ .flush_mode = mode,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id)
+{
+ struct prestera_msg_bridge_resp resp;
+ struct prestera_msg_bridge_req req;
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_BRIDGE_CREATE,
+ &req.cmd, sizeof(req),
+ &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *bridge_id = resp.bridge;
+
+ return 0;
+}
+
+int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id)
+{
+ struct prestera_msg_bridge_req req = {
+ .bridge = bridge_id,
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_BRIDGE_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id)
+{
+ struct prestera_msg_bridge_req req = {
+ .bridge = bridge_id,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id)
+{
+ struct prestera_msg_bridge_req req = {
+ .bridge = bridge_id,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_rxtx_init(struct prestera_switch *sw,
+ struct prestera_rxtx_params *params)
+{
+ struct prestera_msg_rxtx_resp resp;
+ struct prestera_msg_rxtx_req req;
+ int err;
+
+ req.use_sdma = params->use_sdma;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_RXTX_INIT,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ params->map_addr = resp.map_addr;
+
+ return 0;
+}
+
+int prestera_hw_rxtx_port_init(struct prestera_port *port)
+{
+ struct prestera_msg_rxtx_port_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_RXTX_PORT_INIT,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_event_handler_register(struct prestera_switch *sw,
+ enum prestera_event_type type,
+ prestera_event_cb_t fn,
+ void *arg)
+{
+ struct prestera_fw_event_handler *eh;
+
+ eh = __find_event_handler(sw, type);
+ if (eh)
+ return -EEXIST;
+
+ eh = kmalloc(sizeof(*eh), GFP_KERNEL);
+ if (!eh)
+ return -ENOMEM;
+
+ eh->type = type;
+ eh->func = fn;
+ eh->arg = arg;
+
+ INIT_LIST_HEAD(&eh->list);
+
+ list_add_rcu(&eh->list, &sw->event_handlers);
+
+ return 0;
+}
+
+void prestera_hw_event_handler_unregister(struct prestera_switch *sw,
+ enum prestera_event_type type,
+ prestera_event_cb_t fn)
+{
+ struct prestera_fw_event_handler *eh;
+
+ eh = __find_event_handler(sw, type);
+ if (!eh)
+ return;
+
+ list_del_rcu(&eh->list);
+ kfree_rcu(eh, rcu);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
new file mode 100644
index 000000000000..b2b5ac95b4e3
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_HW_H_
+#define _PRESTERA_HW_H_
+
+#include <linux/types.h>
+
+enum prestera_accept_frm_type {
+ PRESTERA_ACCEPT_FRAME_TYPE_TAGGED,
+ PRESTERA_ACCEPT_FRAME_TYPE_UNTAGGED,
+ PRESTERA_ACCEPT_FRAME_TYPE_ALL,
+};
+
+enum prestera_fdb_flush_mode {
+ PRESTERA_FDB_FLUSH_MODE_DYNAMIC = BIT(0),
+ PRESTERA_FDB_FLUSH_MODE_STATIC = BIT(1),
+ PRESTERA_FDB_FLUSH_MODE_ALL = PRESTERA_FDB_FLUSH_MODE_DYNAMIC
+ | PRESTERA_FDB_FLUSH_MODE_STATIC,
+};
+
+enum {
+ PRESTERA_LINK_MODE_10baseT_Half,
+ PRESTERA_LINK_MODE_10baseT_Full,
+ PRESTERA_LINK_MODE_100baseT_Half,
+ PRESTERA_LINK_MODE_100baseT_Full,
+ PRESTERA_LINK_MODE_1000baseT_Half,
+ PRESTERA_LINK_MODE_1000baseT_Full,
+ PRESTERA_LINK_MODE_1000baseX_Full,
+ PRESTERA_LINK_MODE_1000baseKX_Full,
+ PRESTERA_LINK_MODE_2500baseX_Full,
+ PRESTERA_LINK_MODE_10GbaseKR_Full,
+ PRESTERA_LINK_MODE_10GbaseSR_Full,
+ PRESTERA_LINK_MODE_10GbaseLR_Full,
+ PRESTERA_LINK_MODE_20GbaseKR2_Full,
+ PRESTERA_LINK_MODE_25GbaseCR_Full,
+ PRESTERA_LINK_MODE_25GbaseKR_Full,
+ PRESTERA_LINK_MODE_25GbaseSR_Full,
+ PRESTERA_LINK_MODE_40GbaseKR4_Full,
+ PRESTERA_LINK_MODE_40GbaseCR4_Full,
+ PRESTERA_LINK_MODE_40GbaseSR4_Full,
+ PRESTERA_LINK_MODE_50GbaseCR2_Full,
+ PRESTERA_LINK_MODE_50GbaseKR2_Full,
+ PRESTERA_LINK_MODE_50GbaseSR2_Full,
+ PRESTERA_LINK_MODE_100GbaseKR4_Full,
+ PRESTERA_LINK_MODE_100GbaseSR4_Full,
+ PRESTERA_LINK_MODE_100GbaseCR4_Full,
+
+ PRESTERA_LINK_MODE_MAX
+};
+
+enum {
+ PRESTERA_PORT_TYPE_NONE,
+ PRESTERA_PORT_TYPE_TP,
+ PRESTERA_PORT_TYPE_AUI,
+ PRESTERA_PORT_TYPE_MII,
+ PRESTERA_PORT_TYPE_FIBRE,
+ PRESTERA_PORT_TYPE_BNC,
+ PRESTERA_PORT_TYPE_DA,
+ PRESTERA_PORT_TYPE_OTHER,
+
+ PRESTERA_PORT_TYPE_MAX
+};
+
+enum {
+ PRESTERA_PORT_TCVR_COPPER,
+ PRESTERA_PORT_TCVR_SFP,
+
+ PRESTERA_PORT_TCVR_MAX
+};
+
+enum {
+ PRESTERA_PORT_FEC_OFF,
+ PRESTERA_PORT_FEC_BASER,
+ PRESTERA_PORT_FEC_RS,
+
+ PRESTERA_PORT_FEC_MAX
+};
+
+enum {
+ PRESTERA_PORT_DUPLEX_HALF,
+ PRESTERA_PORT_DUPLEX_FULL,
+};
+
+enum {
+ PRESTERA_STP_DISABLED,
+ PRESTERA_STP_BLOCK_LISTEN,
+ PRESTERA_STP_LEARN,
+ PRESTERA_STP_FORWARD,
+};
+
+struct prestera_switch;
+struct prestera_port;
+struct prestera_port_stats;
+struct prestera_port_caps;
+enum prestera_event_type;
+struct prestera_event;
+
+typedef void (*prestera_event_cb_t)
+ (struct prestera_switch *sw, struct prestera_event *evt, void *arg);
+
+struct prestera_rxtx_params;
+
+/* Switch API */
+int prestera_hw_switch_init(struct prestera_switch *sw);
+void prestera_hw_switch_fini(struct prestera_switch *sw);
+int prestera_hw_switch_ageing_set(struct prestera_switch *sw, u32 ageing_ms);
+int prestera_hw_switch_mac_set(struct prestera_switch *sw, const char *mac);
+
+/* Port API */
+int prestera_hw_port_info_get(const struct prestera_port *port,
+ u32 *dev_id, u32 *hw_id, u16 *fp_id);
+int prestera_hw_port_state_set(const struct prestera_port *port,
+ bool admin_state);
+int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu);
+int prestera_hw_port_mtu_get(const struct prestera_port *port, u32 *mtu);
+int prestera_hw_port_mac_set(const struct prestera_port *port, const char *mac);
+int prestera_hw_port_mac_get(const struct prestera_port *port, char *mac);
+int prestera_hw_port_cap_get(const struct prestera_port *port,
+ struct prestera_port_caps *caps);
+int prestera_hw_port_remote_cap_get(const struct prestera_port *port,
+ u64 *link_mode_bitmap);
+int prestera_hw_port_remote_fc_get(const struct prestera_port *port,
+ bool *pause, bool *asym_pause);
+int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type);
+int prestera_hw_port_fec_get(const struct prestera_port *port, u8 *fec);
+int prestera_hw_port_fec_set(const struct prestera_port *port, u8 fec);
+int prestera_hw_port_autoneg_set(const struct prestera_port *port,
+ bool autoneg, u64 link_modes, u8 fec);
+int prestera_hw_port_autoneg_restart(struct prestera_port *port);
+int prestera_hw_port_duplex_get(const struct prestera_port *port, u8 *duplex);
+int prestera_hw_port_stats_get(const struct prestera_port *port,
+ struct prestera_port_stats *stats);
+int prestera_hw_port_link_mode_set(const struct prestera_port *port, u32 mode);
+int prestera_hw_port_link_mode_get(const struct prestera_port *port, u32 *mode);
+int prestera_hw_port_mdix_get(const struct prestera_port *port, u8 *status,
+ u8 *admin_mode);
+int prestera_hw_port_mdix_set(const struct prestera_port *port, u8 mode);
+int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed);
+int prestera_hw_port_learning_set(struct prestera_port *port, bool enable);
+int prestera_hw_port_flood_set(struct prestera_port *port, bool flood);
+int prestera_hw_port_accept_frm_type(struct prestera_port *port,
+ enum prestera_accept_frm_type type);
+/* Vlan API */
+int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid);
+int prestera_hw_vlan_delete(struct prestera_switch *sw, u16 vid);
+int prestera_hw_vlan_port_set(struct prestera_port *port, u16 vid,
+ bool is_member, bool untagged);
+int prestera_hw_vlan_port_vid_set(struct prestera_port *port, u16 vid);
+int prestera_hw_vlan_port_stp_set(struct prestera_port *port, u16 vid, u8 state);
+
+/* FDB API */
+int prestera_hw_fdb_add(struct prestera_port *port, const unsigned char *mac,
+ u16 vid, bool dynamic);
+int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac,
+ u16 vid);
+int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode);
+int prestera_hw_fdb_flush_vlan(struct prestera_switch *sw, u16 vid, u32 mode);
+int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid,
+ u32 mode);
+
+/* Bridge API */
+int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id);
+int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id);
+int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id);
+int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id);
+
+/* Event handlers */
+int prestera_hw_event_handler_register(struct prestera_switch *sw,
+ enum prestera_event_type type,
+ prestera_event_cb_t fn,
+ void *arg);
+void prestera_hw_event_handler_unregister(struct prestera_switch *sw,
+ enum prestera_event_type type,
+ prestera_event_cb_t fn);
+
+/* RX/TX */
+int prestera_hw_rxtx_init(struct prestera_switch *sw,
+ struct prestera_rxtx_params *params);
+int prestera_hw_rxtx_port_init(struct prestera_port *port);
+
+#endif /* _PRESTERA_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
new file mode 100644
index 000000000000..9bd57b89d1d0
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/etherdevice.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdev_features.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_rxtx.h"
+#include "prestera_devlink.h"
+#include "prestera_ethtool.h"
+#include "prestera_switchdev.h"
+
+#define PRESTERA_MTU_DEFAULT 1536
+
+#define PRESTERA_STATS_DELAY_MS 1000
+
+#define PRESTERA_MAC_ADDR_NUM_MAX 255
+
+static struct workqueue_struct *prestera_wq;
+
+int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
+{
+ enum prestera_accept_frm_type frm_type;
+ int err;
+
+ frm_type = PRESTERA_ACCEPT_FRAME_TYPE_TAGGED;
+
+ if (vid) {
+ err = prestera_hw_vlan_port_vid_set(port, vid);
+ if (err)
+ return err;
+
+ frm_type = PRESTERA_ACCEPT_FRAME_TYPE_ALL;
+ }
+
+ err = prestera_hw_port_accept_frm_type(port, frm_type);
+ if (err && frm_type == PRESTERA_ACCEPT_FRAME_TYPE_ALL)
+ prestera_hw_vlan_port_vid_set(port, port->pvid);
+
+ port->pvid = vid;
+ return 0;
+}
+
+struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
+ u32 dev_id, u32 hw_id)
+{
+ struct prestera_port *port = NULL;
+
+ read_lock(&sw->port_list_lock);
+ list_for_each_entry(port, &sw->port_list, list) {
+ if (port->dev_id == dev_id && port->hw_id == hw_id)
+ break;
+ }
+ read_unlock(&sw->port_list_lock);
+
+ return port;
+}
+
+struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id)
+{
+ struct prestera_port *port = NULL;
+
+ read_lock(&sw->port_list_lock);
+ list_for_each_entry(port, &sw->port_list, list) {
+ if (port->id == id)
+ break;
+ }
+ read_unlock(&sw->port_list_lock);
+
+ return port;
+}
+
+static int prestera_port_open(struct net_device *dev)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ int err;
+
+ err = prestera_hw_port_state_set(port, true);
+ if (err)
+ return err;
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int prestera_port_close(struct net_device *dev)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ int err;
+
+ netif_stop_queue(dev);
+
+ err = prestera_hw_port_state_set(port, false);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static netdev_tx_t prestera_port_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ return prestera_rxtx_xmit(netdev_priv(dev), skb);
+}
+
+static int prestera_is_valid_mac_addr(struct prestera_port *port, u8 *addr)
+{
+ if (!is_valid_ether_addr(addr))
+ return -EADDRNOTAVAIL;
+
+ /* firmware requires that port's MAC address contains first 5 bytes
+ * of the base MAC address
+ */
+ if (memcmp(port->sw->base_mac, addr, ETH_ALEN - 1))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int prestera_port_set_mac_address(struct net_device *dev, void *p)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ int err;
+
+ err = prestera_is_valid_mac_addr(port, addr->sa_data);
+ if (err)
+ return err;
+
+ err = prestera_hw_port_mac_set(port, addr->sa_data);
+ if (err)
+ return err;
+
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+ return 0;
+}
+
+static int prestera_port_change_mtu(struct net_device *dev, int mtu)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ int err;
+
+ err = prestera_hw_port_mtu_set(port, mtu);
+ if (err)
+ return err;
+
+ dev->mtu = mtu;
+
+ return 0;
+}
+
+static void prestera_port_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct prestera_port_stats *port_stats = &port->cached_hw_stats.stats;
+
+ stats->rx_packets = port_stats->broadcast_frames_received +
+ port_stats->multicast_frames_received +
+ port_stats->unicast_frames_received;
+
+ stats->tx_packets = port_stats->broadcast_frames_sent +
+ port_stats->multicast_frames_sent +
+ port_stats->unicast_frames_sent;
+
+ stats->rx_bytes = port_stats->good_octets_received;
+
+ stats->tx_bytes = port_stats->good_octets_sent;
+
+ stats->rx_errors = port_stats->rx_error_frame_received;
+ stats->tx_errors = port_stats->mac_trans_error;
+
+ stats->rx_dropped = port_stats->buffer_overrun;
+ stats->tx_dropped = 0;
+
+ stats->multicast = port_stats->multicast_frames_received;
+ stats->collisions = port_stats->excessive_collision;
+
+ stats->rx_crc_errors = port_stats->bad_crc;
+}
+
+static void prestera_port_get_hw_stats(struct prestera_port *port)
+{
+ prestera_hw_port_stats_get(port, &port->cached_hw_stats.stats);
+}
+
+static void prestera_port_stats_update(struct work_struct *work)
+{
+ struct prestera_port *port =
+ container_of(work, struct prestera_port,
+ cached_hw_stats.caching_dw.work);
+
+ prestera_port_get_hw_stats(port);
+
+ queue_delayed_work(prestera_wq, &port->cached_hw_stats.caching_dw,
+ msecs_to_jiffies(PRESTERA_STATS_DELAY_MS));
+}
+
+static const struct net_device_ops prestera_netdev_ops = {
+ .ndo_open = prestera_port_open,
+ .ndo_stop = prestera_port_close,
+ .ndo_start_xmit = prestera_port_xmit,
+ .ndo_change_mtu = prestera_port_change_mtu,
+ .ndo_get_stats64 = prestera_port_get_stats64,
+ .ndo_set_mac_address = prestera_port_set_mac_address,
+ .ndo_get_devlink_port = prestera_devlink_get_port,
+};
+
+int prestera_port_autoneg_set(struct prestera_port *port, bool enable,
+ u64 adver_link_modes, u8 adver_fec)
+{
+ bool refresh = false;
+ u64 link_modes;
+ int err;
+ u8 fec;
+
+ if (port->caps.type != PRESTERA_PORT_TYPE_TP)
+ return enable ? -EINVAL : 0;
+
+ if (!enable)
+ goto set_autoneg;
+
+ link_modes = port->caps.supp_link_modes & adver_link_modes;
+ fec = port->caps.supp_fec & adver_fec;
+
+ if (!link_modes && !fec)
+ return -EOPNOTSUPP;
+
+ if (link_modes && port->adver_link_modes != link_modes) {
+ port->adver_link_modes = link_modes;
+ refresh = true;
+ }
+
+ if (fec && port->adver_fec != fec) {
+ port->adver_fec = fec;
+ refresh = true;
+ }
+
+set_autoneg:
+ if (port->autoneg == enable && !refresh)
+ return 0;
+
+ err = prestera_hw_port_autoneg_set(port, enable, port->adver_link_modes,
+ port->adver_fec);
+ if (err)
+ return err;
+
+ port->autoneg = enable;
+
+ return 0;
+}
+
+static void prestera_port_list_add(struct prestera_port *port)
+{
+ write_lock(&port->sw->port_list_lock);
+ list_add(&port->list, &port->sw->port_list);
+ write_unlock(&port->sw->port_list_lock);
+}
+
+static void prestera_port_list_del(struct prestera_port *port)
+{
+ write_lock(&port->sw->port_list_lock);
+ list_del(&port->list);
+ write_unlock(&port->sw->port_list_lock);
+}
+
+static int prestera_port_create(struct prestera_switch *sw, u32 id)
+{
+ struct prestera_port *port;
+ struct net_device *dev;
+ int err;
+
+ dev = alloc_etherdev(sizeof(*port));
+ if (!dev)
+ return -ENOMEM;
+
+ port = netdev_priv(dev);
+
+ INIT_LIST_HEAD(&port->vlans_list);
+ port->pvid = PRESTERA_DEFAULT_VID;
+ port->dev = dev;
+ port->id = id;
+ port->sw = sw;
+
+ err = prestera_hw_port_info_get(port, &port->dev_id, &port->hw_id,
+ &port->fp_id);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to get port(%u) info\n", id);
+ goto err_port_info_get;
+ }
+
+ err = prestera_devlink_port_register(port);
+ if (err)
+ goto err_dl_port_register;
+
+ dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->netdev_ops = &prestera_netdev_ops;
+ dev->ethtool_ops = &prestera_ethtool_ops;
+
+ netif_carrier_off(dev);
+
+ dev->mtu = min_t(unsigned int, sw->mtu_max, PRESTERA_MTU_DEFAULT);
+ dev->min_mtu = sw->mtu_min;
+ dev->max_mtu = sw->mtu_max;
+
+ err = prestera_hw_port_mtu_set(port, dev->mtu);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to set port(%u) mtu(%d)\n",
+ id, dev->mtu);
+ goto err_port_init;
+ }
+
+ if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX)
+ goto err_port_init;
+
+ /* firmware requires that port's MAC address consist of the first
+ * 5 bytes of the base MAC address
+ */
+ memcpy(dev->dev_addr, sw->base_mac, dev->addr_len - 1);
+ dev->dev_addr[dev->addr_len - 1] = port->fp_id;
+
+ err = prestera_hw_port_mac_set(port, dev->dev_addr);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to set port(%u) mac addr\n", id);
+ goto err_port_init;
+ }
+
+ err = prestera_hw_port_cap_get(port, &port->caps);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to get port(%u) caps\n", id);
+ goto err_port_init;
+ }
+
+ port->adver_fec = BIT(PRESTERA_PORT_FEC_OFF);
+ prestera_port_autoneg_set(port, true, port->caps.supp_link_modes,
+ port->caps.supp_fec);
+
+ err = prestera_hw_port_state_set(port, false);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to set port(%u) down\n", id);
+ goto err_port_init;
+ }
+
+ err = prestera_rxtx_port_init(port);
+ if (err)
+ goto err_port_init;
+
+ INIT_DELAYED_WORK(&port->cached_hw_stats.caching_dw,
+ &prestera_port_stats_update);
+
+ prestera_port_list_add(port);
+
+ err = register_netdev(dev);
+ if (err)
+ goto err_register_netdev;
+
+ prestera_devlink_port_set(port);
+
+ return 0;
+
+err_register_netdev:
+ prestera_port_list_del(port);
+err_port_init:
+ prestera_devlink_port_unregister(port);
+err_dl_port_register:
+err_port_info_get:
+ free_netdev(dev);
+ return err;
+}
+
+static void prestera_port_destroy(struct prestera_port *port)
+{
+ struct net_device *dev = port->dev;
+
+ cancel_delayed_work_sync(&port->cached_hw_stats.caching_dw);
+ prestera_devlink_port_clear(port);
+ unregister_netdev(dev);
+ prestera_port_list_del(port);
+ prestera_devlink_port_unregister(port);
+ free_netdev(dev);
+}
+
+static void prestera_destroy_ports(struct prestera_switch *sw)
+{
+ struct prestera_port *port, *tmp;
+
+ list_for_each_entry_safe(port, tmp, &sw->port_list, list)
+ prestera_port_destroy(port);
+}
+
+static int prestera_create_ports(struct prestera_switch *sw)
+{
+ struct prestera_port *port, *tmp;
+ u32 port_idx;
+ int err;
+
+ for (port_idx = 0; port_idx < sw->port_count; port_idx++) {
+ err = prestera_port_create(sw, port_idx);
+ if (err)
+ goto err_port_create;
+ }
+
+ return 0;
+
+err_port_create:
+ list_for_each_entry_safe(port, tmp, &sw->port_list, list)
+ prestera_port_destroy(port);
+
+ return err;
+}
+
+static void prestera_port_handle_event(struct prestera_switch *sw,
+ struct prestera_event *evt, void *arg)
+{
+ struct delayed_work *caching_dw;
+ struct prestera_port *port;
+
+ port = prestera_find_port(sw, evt->port_evt.port_id);
+ if (!port || !port->dev)
+ return;
+
+ caching_dw = &port->cached_hw_stats.caching_dw;
+
+ if (evt->id == PRESTERA_PORT_EVENT_STATE_CHANGED) {
+ if (evt->port_evt.data.oper_state) {
+ netif_carrier_on(port->dev);
+ if (!delayed_work_pending(caching_dw))
+ queue_delayed_work(prestera_wq, caching_dw, 0);
+ } else {
+ netif_carrier_off(port->dev);
+ if (delayed_work_pending(caching_dw))
+ cancel_delayed_work(caching_dw);
+ }
+ }
+}
+
+static int prestera_event_handlers_register(struct prestera_switch *sw)
+{
+ return prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_PORT,
+ prestera_port_handle_event,
+ NULL);
+}
+
+static void prestera_event_handlers_unregister(struct prestera_switch *sw)
+{
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_PORT,
+ prestera_port_handle_event);
+}
+
+static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw)
+{
+ struct device_node *base_mac_np;
+ struct device_node *np;
+ const char *base_mac;
+
+ np = of_find_compatible_node(NULL, NULL, "marvell,prestera");
+ base_mac_np = of_parse_phandle(np, "base-mac-provider", 0);
+
+ base_mac = of_get_mac_address(base_mac_np);
+ of_node_put(base_mac_np);
+ if (!IS_ERR(base_mac))
+ ether_addr_copy(sw->base_mac, base_mac);
+
+ if (!is_valid_ether_addr(sw->base_mac)) {
+ eth_random_addr(sw->base_mac);
+ dev_info(prestera_dev(sw), "using random base mac address\n");
+ }
+
+ return prestera_hw_switch_mac_set(sw, sw->base_mac);
+}
+
+bool prestera_netdev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &prestera_netdev_ops;
+}
+
+static int prestera_lower_dev_walk(struct net_device *dev, void *data)
+{
+ struct prestera_port **pport = data;
+
+ if (prestera_netdev_check(dev)) {
+ *pport = netdev_priv(dev);
+ return 1;
+ }
+
+ return 0;
+}
+
+struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev)
+{
+ struct prestera_port *port = NULL;
+
+ if (prestera_netdev_check(dev))
+ return netdev_priv(dev);
+
+ netdev_walk_all_lower_dev(dev, prestera_lower_dev_walk, &port);
+
+ return port;
+}
+
+static int prestera_netdev_port_event(struct net_device *dev,
+ unsigned long event, void *ptr)
+{
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ case NETDEV_CHANGEUPPER:
+ return prestera_bridge_port_event(dev, event, ptr);
+ default:
+ return 0;
+ }
+}
+
+static int prestera_netdev_event_handler(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ int err = 0;
+
+ if (prestera_netdev_check(dev))
+ err = prestera_netdev_port_event(dev, event, ptr);
+
+ return notifier_from_errno(err);
+}
+
+static int prestera_netdev_event_handler_register(struct prestera_switch *sw)
+{
+ sw->netdev_nb.notifier_call = prestera_netdev_event_handler;
+
+ return register_netdevice_notifier(&sw->netdev_nb);
+}
+
+static void prestera_netdev_event_handler_unregister(struct prestera_switch *sw)
+{
+ unregister_netdevice_notifier(&sw->netdev_nb);
+}
+
+static int prestera_switch_init(struct prestera_switch *sw)
+{
+ int err;
+
+ err = prestera_hw_switch_init(sw);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to init Switch device\n");
+ return err;
+ }
+
+ rwlock_init(&sw->port_list_lock);
+ INIT_LIST_HEAD(&sw->port_list);
+
+ err = prestera_switch_set_base_mac_addr(sw);
+ if (err)
+ return err;
+
+ err = prestera_netdev_event_handler_register(sw);
+ if (err)
+ return err;
+
+ err = prestera_switchdev_init(sw);
+ if (err)
+ goto err_swdev_register;
+
+ err = prestera_rxtx_switch_init(sw);
+ if (err)
+ goto err_rxtx_register;
+
+ err = prestera_event_handlers_register(sw);
+ if (err)
+ goto err_handlers_register;
+
+ err = prestera_devlink_register(sw);
+ if (err)
+ goto err_dl_register;
+
+ err = prestera_create_ports(sw);
+ if (err)
+ goto err_ports_create;
+
+ return 0;
+
+err_ports_create:
+ prestera_devlink_unregister(sw);
+err_dl_register:
+ prestera_event_handlers_unregister(sw);
+err_handlers_register:
+ prestera_rxtx_switch_fini(sw);
+err_rxtx_register:
+ prestera_switchdev_fini(sw);
+err_swdev_register:
+ prestera_netdev_event_handler_unregister(sw);
+ prestera_hw_switch_fini(sw);
+
+ return err;
+}
+
+static void prestera_switch_fini(struct prestera_switch *sw)
+{
+ prestera_destroy_ports(sw);
+ prestera_devlink_unregister(sw);
+ prestera_event_handlers_unregister(sw);
+ prestera_rxtx_switch_fini(sw);
+ prestera_switchdev_fini(sw);
+ prestera_netdev_event_handler_unregister(sw);
+ prestera_hw_switch_fini(sw);
+}
+
+int prestera_device_register(struct prestera_device *dev)
+{
+ struct prestera_switch *sw;
+ int err;
+
+ sw = prestera_devlink_alloc();
+ if (!sw)
+ return -ENOMEM;
+
+ dev->priv = sw;
+ sw->dev = dev;
+
+ err = prestera_switch_init(sw);
+ if (err) {
+ prestera_devlink_free(sw);
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(prestera_device_register);
+
+void prestera_device_unregister(struct prestera_device *dev)
+{
+ struct prestera_switch *sw = dev->priv;
+
+ prestera_switch_fini(sw);
+ prestera_devlink_free(sw);
+}
+EXPORT_SYMBOL(prestera_device_unregister);
+
+static int __init prestera_module_init(void)
+{
+ prestera_wq = alloc_workqueue("prestera", 0, 0);
+ if (!prestera_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void __exit prestera_module_exit(void)
+{
+ destroy_workqueue(prestera_wq);
+}
+
+module_init(prestera_module_init);
+module_exit(prestera_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Marvell Prestera switch driver");
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
new file mode 100644
index 000000000000..1b97adae542e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -0,0 +1,769 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/circ_buf.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "prestera.h"
+
+#define PRESTERA_MSG_MAX_SIZE 1500
+
+#define PRESTERA_SUPP_FW_MAJ_VER 2
+#define PRESTERA_SUPP_FW_MIN_VER 0
+
+#define PRESTERA_FW_PATH_FMT "mrvl/prestera/mvsw_prestera_fw-v%u.%u.img"
+
+#define PRESTERA_FW_HDR_MAGIC 0x351D9D06
+#define PRESTERA_FW_DL_TIMEOUT_MS 50000
+#define PRESTERA_FW_BLK_SZ 1024
+
+#define PRESTERA_FW_VER_MAJ_MUL 1000000
+#define PRESTERA_FW_VER_MIN_MUL 1000
+
+#define PRESTERA_FW_VER_MAJ(v) ((v) / PRESTERA_FW_VER_MAJ_MUL)
+
+#define PRESTERA_FW_VER_MIN(v) \
+ (((v) - (PRESTERA_FW_VER_MAJ(v) * PRESTERA_FW_VER_MAJ_MUL)) / \
+ PRESTERA_FW_VER_MIN_MUL)
+
+#define PRESTERA_FW_VER_PATCH(v) \
+ ((v) - (PRESTERA_FW_VER_MAJ(v) * PRESTERA_FW_VER_MAJ_MUL) - \
+ (PRESTERA_FW_VER_MIN(v) * PRESTERA_FW_VER_MIN_MUL))
+
+enum prestera_pci_bar_t {
+ PRESTERA_PCI_BAR_FW = 2,
+ PRESTERA_PCI_BAR_PP = 4,
+};
+
+struct prestera_fw_header {
+ __be32 magic_number;
+ __be32 version_value;
+ u8 reserved[8];
+};
+
+struct prestera_ldr_regs {
+ u32 ldr_ready;
+ u32 pad1;
+
+ u32 ldr_img_size;
+ u32 ldr_ctl_flags;
+
+ u32 ldr_buf_offs;
+ u32 ldr_buf_size;
+
+ u32 ldr_buf_rd;
+ u32 pad2;
+ u32 ldr_buf_wr;
+
+ u32 ldr_status;
+};
+
+#define PRESTERA_LDR_REG_OFFSET(f) offsetof(struct prestera_ldr_regs, f)
+
+#define PRESTERA_LDR_READY_MAGIC 0xf00dfeed
+
+#define PRESTERA_LDR_STATUS_IMG_DL BIT(0)
+#define PRESTERA_LDR_STATUS_START_FW BIT(1)
+#define PRESTERA_LDR_STATUS_INVALID_IMG BIT(2)
+#define PRESTERA_LDR_STATUS_NOMEM BIT(3)
+
+#define PRESTERA_LDR_REG_BASE(fw) ((fw)->ldr_regs)
+#define PRESTERA_LDR_REG_ADDR(fw, reg) (PRESTERA_LDR_REG_BASE(fw) + (reg))
+
+/* fw loader registers */
+#define PRESTERA_LDR_READY_REG PRESTERA_LDR_REG_OFFSET(ldr_ready)
+#define PRESTERA_LDR_IMG_SIZE_REG PRESTERA_LDR_REG_OFFSET(ldr_img_size)
+#define PRESTERA_LDR_CTL_REG PRESTERA_LDR_REG_OFFSET(ldr_ctl_flags)
+#define PRESTERA_LDR_BUF_SIZE_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_size)
+#define PRESTERA_LDR_BUF_OFFS_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_offs)
+#define PRESTERA_LDR_BUF_RD_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_rd)
+#define PRESTERA_LDR_BUF_WR_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_wr)
+#define PRESTERA_LDR_STATUS_REG PRESTERA_LDR_REG_OFFSET(ldr_status)
+
+#define PRESTERA_LDR_CTL_DL_START BIT(0)
+
+#define PRESTERA_EVT_QNUM_MAX 4
+
+struct prestera_fw_evtq_regs {
+ u32 rd_idx;
+ u32 pad1;
+ u32 wr_idx;
+ u32 pad2;
+ u32 offs;
+ u32 len;
+};
+
+struct prestera_fw_regs {
+ u32 fw_ready;
+ u32 pad;
+ u32 cmd_offs;
+ u32 cmd_len;
+ u32 evt_offs;
+ u32 evt_qnum;
+
+ u32 cmd_req_ctl;
+ u32 cmd_req_len;
+ u32 cmd_rcv_ctl;
+ u32 cmd_rcv_len;
+
+ u32 fw_status;
+ u32 rx_status;
+
+ struct prestera_fw_evtq_regs evtq_list[PRESTERA_EVT_QNUM_MAX];
+};
+
+#define PRESTERA_FW_REG_OFFSET(f) offsetof(struct prestera_fw_regs, f)
+
+#define PRESTERA_FW_READY_MAGIC 0xcafebabe
+
+/* fw registers */
+#define PRESTERA_FW_READY_REG PRESTERA_FW_REG_OFFSET(fw_ready)
+
+#define PRESTERA_CMD_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(cmd_offs)
+#define PRESTERA_CMD_BUF_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_len)
+#define PRESTERA_EVT_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(evt_offs)
+#define PRESTERA_EVT_QNUM_REG PRESTERA_FW_REG_OFFSET(evt_qnum)
+
+#define PRESTERA_CMD_REQ_CTL_REG PRESTERA_FW_REG_OFFSET(cmd_req_ctl)
+#define PRESTERA_CMD_REQ_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_req_len)
+
+#define PRESTERA_CMD_RCV_CTL_REG PRESTERA_FW_REG_OFFSET(cmd_rcv_ctl)
+#define PRESTERA_CMD_RCV_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_rcv_len)
+#define PRESTERA_FW_STATUS_REG PRESTERA_FW_REG_OFFSET(fw_status)
+#define PRESTERA_RX_STATUS_REG PRESTERA_FW_REG_OFFSET(rx_status)
+
+/* PRESTERA_CMD_REQ_CTL_REG flags */
+#define PRESTERA_CMD_F_REQ_SENT BIT(0)
+#define PRESTERA_CMD_F_REPL_RCVD BIT(1)
+
+/* PRESTERA_CMD_RCV_CTL_REG flags */
+#define PRESTERA_CMD_F_REPL_SENT BIT(0)
+
+#define PRESTERA_EVTQ_REG_OFFSET(q, f) \
+ (PRESTERA_FW_REG_OFFSET(evtq_list) + \
+ (q) * sizeof(struct prestera_fw_evtq_regs) + \
+ offsetof(struct prestera_fw_evtq_regs, f))
+
+#define PRESTERA_EVTQ_RD_IDX_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, rd_idx)
+#define PRESTERA_EVTQ_WR_IDX_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, wr_idx)
+#define PRESTERA_EVTQ_OFFS_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, offs)
+#define PRESTERA_EVTQ_LEN_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, len)
+
+#define PRESTERA_FW_REG_BASE(fw) ((fw)->dev.ctl_regs)
+#define PRESTERA_FW_REG_ADDR(fw, reg) PRESTERA_FW_REG_BASE((fw)) + (reg)
+
+#define PRESTERA_FW_CMD_DEFAULT_WAIT_MS 30000
+#define PRESTERA_FW_READY_WAIT_MS 20000
+
+struct prestera_fw_evtq {
+ u8 __iomem *addr;
+ size_t len;
+};
+
+struct prestera_fw {
+ struct workqueue_struct *wq;
+ struct prestera_device dev;
+ u8 __iomem *ldr_regs;
+ u8 __iomem *ldr_ring_buf;
+ u32 ldr_buf_len;
+ u32 ldr_wr_idx;
+ struct mutex cmd_mtx; /* serialize access to dev->send_req */
+ size_t cmd_mbox_len;
+ u8 __iomem *cmd_mbox;
+ struct prestera_fw_evtq evt_queue[PRESTERA_EVT_QNUM_MAX];
+ u8 evt_qnum;
+ struct work_struct evt_work;
+ u8 __iomem *evt_buf;
+ u8 *evt_msg;
+};
+
+static int prestera_fw_load(struct prestera_fw *fw);
+
+static void prestera_fw_write(struct prestera_fw *fw, u32 reg, u32 val)
+{
+ writel(val, PRESTERA_FW_REG_ADDR(fw, reg));
+}
+
+static u32 prestera_fw_read(struct prestera_fw *fw, u32 reg)
+{
+ return readl(PRESTERA_FW_REG_ADDR(fw, reg));
+}
+
+static u32 prestera_fw_evtq_len(struct prestera_fw *fw, u8 qid)
+{
+ return fw->evt_queue[qid].len;
+}
+
+static u32 prestera_fw_evtq_avail(struct prestera_fw *fw, u8 qid)
+{
+ u32 wr_idx = prestera_fw_read(fw, PRESTERA_EVTQ_WR_IDX_REG(qid));
+ u32 rd_idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
+
+ return CIRC_CNT(wr_idx, rd_idx, prestera_fw_evtq_len(fw, qid));
+}
+
+static void prestera_fw_evtq_rd_set(struct prestera_fw *fw,
+ u8 qid, u32 idx)
+{
+ u32 rd_idx = idx & (prestera_fw_evtq_len(fw, qid) - 1);
+
+ prestera_fw_write(fw, PRESTERA_EVTQ_RD_IDX_REG(qid), rd_idx);
+}
+
+static u8 __iomem *prestera_fw_evtq_buf(struct prestera_fw *fw, u8 qid)
+{
+ return fw->evt_queue[qid].addr;
+}
+
+static u32 prestera_fw_evtq_read32(struct prestera_fw *fw, u8 qid)
+{
+ u32 rd_idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
+ u32 val;
+
+ val = readl(prestera_fw_evtq_buf(fw, qid) + rd_idx);
+ prestera_fw_evtq_rd_set(fw, qid, rd_idx + 4);
+ return val;
+}
+
+static ssize_t prestera_fw_evtq_read_buf(struct prestera_fw *fw,
+ u8 qid, void *buf, size_t len)
+{
+ u32 idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
+ u8 __iomem *evtq_addr = prestera_fw_evtq_buf(fw, qid);
+ u32 *buf32 = buf;
+ int i;
+
+ for (i = 0; i < len / 4; buf32++, i++) {
+ *buf32 = readl_relaxed(evtq_addr + idx);
+ idx = (idx + 4) & (prestera_fw_evtq_len(fw, qid) - 1);
+ }
+
+ prestera_fw_evtq_rd_set(fw, qid, idx);
+
+ return i;
+}
+
+static u8 prestera_fw_evtq_pick(struct prestera_fw *fw)
+{
+ int qid;
+
+ for (qid = 0; qid < fw->evt_qnum; qid++) {
+ if (prestera_fw_evtq_avail(fw, qid) >= 4)
+ return qid;
+ }
+
+ return PRESTERA_EVT_QNUM_MAX;
+}
+
+static void prestera_fw_evt_work_fn(struct work_struct *work)
+{
+ struct prestera_fw *fw;
+ void *msg;
+ u8 qid;
+
+ fw = container_of(work, struct prestera_fw, evt_work);
+ msg = fw->evt_msg;
+
+ while ((qid = prestera_fw_evtq_pick(fw)) < PRESTERA_EVT_QNUM_MAX) {
+ u32 idx;
+ u32 len;
+
+ len = prestera_fw_evtq_read32(fw, qid);
+ idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
+
+ WARN_ON(prestera_fw_evtq_avail(fw, qid) < len);
+
+ if (WARN_ON(len > PRESTERA_MSG_MAX_SIZE)) {
+ prestera_fw_evtq_rd_set(fw, qid, idx + len);
+ continue;
+ }
+
+ prestera_fw_evtq_read_buf(fw, qid, msg, len);
+
+ if (fw->dev.recv_msg)
+ fw->dev.recv_msg(&fw->dev, msg, len);
+ }
+}
+
+static int prestera_fw_wait_reg32(struct prestera_fw *fw, u32 reg, u32 cmp,
+ unsigned int waitms)
+{
+ u8 __iomem *addr = PRESTERA_FW_REG_ADDR(fw, reg);
+ u32 val;
+
+ return readl_poll_timeout(addr, val, cmp == val,
+ 1 * USEC_PER_MSEC, waitms * USEC_PER_MSEC);
+}
+
+static int prestera_fw_cmd_send(struct prestera_fw *fw,
+ void *in_msg, size_t in_size,
+ void *out_msg, size_t out_size,
+ unsigned int waitms)
+{
+ u32 ret_size;
+ int err;
+
+ if (!waitms)
+ waitms = PRESTERA_FW_CMD_DEFAULT_WAIT_MS;
+
+ if (ALIGN(in_size, 4) > fw->cmd_mbox_len)
+ return -EMSGSIZE;
+
+ /* wait for finish previous reply from FW */
+ err = prestera_fw_wait_reg32(fw, PRESTERA_CMD_RCV_CTL_REG, 0, 30);
+ if (err) {
+ dev_err(fw->dev.dev, "finish reply from FW is timed out\n");
+ return err;
+ }
+
+ prestera_fw_write(fw, PRESTERA_CMD_REQ_LEN_REG, in_size);
+ memcpy_toio(fw->cmd_mbox, in_msg, in_size);
+
+ prestera_fw_write(fw, PRESTERA_CMD_REQ_CTL_REG, PRESTERA_CMD_F_REQ_SENT);
+
+ /* wait for reply from FW */
+ err = prestera_fw_wait_reg32(fw, PRESTERA_CMD_RCV_CTL_REG,
+ PRESTERA_CMD_F_REPL_SENT, waitms);
+ if (err) {
+ dev_err(fw->dev.dev, "reply from FW is timed out\n");
+ goto cmd_exit;
+ }
+
+ ret_size = prestera_fw_read(fw, PRESTERA_CMD_RCV_LEN_REG);
+ if (ret_size > out_size) {
+ dev_err(fw->dev.dev, "ret_size (%u) > out_len(%zu)\n",
+ ret_size, out_size);
+ err = -EMSGSIZE;
+ goto cmd_exit;
+ }
+
+ memcpy_fromio(out_msg, fw->cmd_mbox + in_size, ret_size);
+
+cmd_exit:
+ prestera_fw_write(fw, PRESTERA_CMD_REQ_CTL_REG, PRESTERA_CMD_F_REPL_RCVD);
+ return err;
+}
+
+static int prestera_fw_send_req(struct prestera_device *dev,
+ void *in_msg, size_t in_size, void *out_msg,
+ size_t out_size, unsigned int waitms)
+{
+ struct prestera_fw *fw;
+ ssize_t ret;
+
+ fw = container_of(dev, struct prestera_fw, dev);
+
+ mutex_lock(&fw->cmd_mtx);
+ ret = prestera_fw_cmd_send(fw, in_msg, in_size, out_msg, out_size, waitms);
+ mutex_unlock(&fw->cmd_mtx);
+
+ return ret;
+}
+
+static int prestera_fw_init(struct prestera_fw *fw)
+{
+ u8 __iomem *base;
+ int err;
+ u8 qid;
+
+ fw->dev.send_req = prestera_fw_send_req;
+ fw->ldr_regs = fw->dev.ctl_regs;
+
+ err = prestera_fw_load(fw);
+ if (err)
+ return err;
+
+ err = prestera_fw_wait_reg32(fw, PRESTERA_FW_READY_REG,
+ PRESTERA_FW_READY_MAGIC,
+ PRESTERA_FW_READY_WAIT_MS);
+ if (err) {
+ dev_err(fw->dev.dev, "FW failed to start\n");
+ return err;
+ }
+
+ base = fw->dev.ctl_regs;
+
+ fw->cmd_mbox = base + prestera_fw_read(fw, PRESTERA_CMD_BUF_OFFS_REG);
+ fw->cmd_mbox_len = prestera_fw_read(fw, PRESTERA_CMD_BUF_LEN_REG);
+ mutex_init(&fw->cmd_mtx);
+
+ fw->evt_buf = base + prestera_fw_read(fw, PRESTERA_EVT_BUF_OFFS_REG);
+ fw->evt_qnum = prestera_fw_read(fw, PRESTERA_EVT_QNUM_REG);
+ fw->evt_msg = kmalloc(PRESTERA_MSG_MAX_SIZE, GFP_KERNEL);
+ if (!fw->evt_msg)
+ return -ENOMEM;
+
+ for (qid = 0; qid < fw->evt_qnum; qid++) {
+ u32 offs = prestera_fw_read(fw, PRESTERA_EVTQ_OFFS_REG(qid));
+ struct prestera_fw_evtq *evtq = &fw->evt_queue[qid];
+
+ evtq->len = prestera_fw_read(fw, PRESTERA_EVTQ_LEN_REG(qid));
+ evtq->addr = fw->evt_buf + offs;
+ }
+
+ return 0;
+}
+
+static void prestera_fw_uninit(struct prestera_fw *fw)
+{
+ kfree(fw->evt_msg);
+}
+
+static irqreturn_t prestera_pci_irq_handler(int irq, void *dev_id)
+{
+ struct prestera_fw *fw = dev_id;
+
+ if (prestera_fw_read(fw, PRESTERA_RX_STATUS_REG)) {
+ prestera_fw_write(fw, PRESTERA_RX_STATUS_REG, 0);
+
+ if (fw->dev.recv_pkt)
+ fw->dev.recv_pkt(&fw->dev);
+ }
+
+ queue_work(fw->wq, &fw->evt_work);
+
+ return IRQ_HANDLED;
+}
+
+static void prestera_ldr_write(struct prestera_fw *fw, u32 reg, u32 val)
+{
+ writel(val, PRESTERA_LDR_REG_ADDR(fw, reg));
+}
+
+static u32 prestera_ldr_read(struct prestera_fw *fw, u32 reg)
+{
+ return readl(PRESTERA_LDR_REG_ADDR(fw, reg));
+}
+
+static int prestera_ldr_wait_reg32(struct prestera_fw *fw,
+ u32 reg, u32 cmp, unsigned int waitms)
+{
+ u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, reg);
+ u32 val;
+
+ return readl_poll_timeout(addr, val, cmp == val,
+ 10 * USEC_PER_MSEC, waitms * USEC_PER_MSEC);
+}
+
+static u32 prestera_ldr_wait_buf(struct prestera_fw *fw, size_t len)
+{
+ u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, PRESTERA_LDR_BUF_RD_REG);
+ u32 buf_len = fw->ldr_buf_len;
+ u32 wr_idx = fw->ldr_wr_idx;
+ u32 rd_idx;
+
+ return readl_poll_timeout(addr, rd_idx,
+ CIRC_SPACE(wr_idx, rd_idx, buf_len) >= len,
+ 1 * USEC_PER_MSEC, 100 * USEC_PER_MSEC);
+}
+
+static int prestera_ldr_wait_dl_finish(struct prestera_fw *fw)
+{
+ u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, PRESTERA_LDR_STATUS_REG);
+ unsigned long mask = ~(PRESTERA_LDR_STATUS_IMG_DL);
+ u32 val;
+ int err;
+
+ err = readl_poll_timeout(addr, val, val & mask, 10 * USEC_PER_MSEC,
+ PRESTERA_FW_DL_TIMEOUT_MS * USEC_PER_MSEC);
+ if (err) {
+ dev_err(fw->dev.dev, "Timeout to load FW img [state=%d]",
+ prestera_ldr_read(fw, PRESTERA_LDR_STATUS_REG));
+ return err;
+ }
+
+ return 0;
+}
+
+static void prestera_ldr_wr_idx_move(struct prestera_fw *fw, unsigned int n)
+{
+ fw->ldr_wr_idx = (fw->ldr_wr_idx + (n)) & (fw->ldr_buf_len - 1);
+}
+
+static void prestera_ldr_wr_idx_commit(struct prestera_fw *fw)
+{
+ prestera_ldr_write(fw, PRESTERA_LDR_BUF_WR_REG, fw->ldr_wr_idx);
+}
+
+static u8 __iomem *prestera_ldr_wr_ptr(struct prestera_fw *fw)
+{
+ return fw->ldr_ring_buf + fw->ldr_wr_idx;
+}
+
+static int prestera_ldr_send(struct prestera_fw *fw, const u8 *buf, size_t len)
+{
+ int err;
+ int i;
+
+ err = prestera_ldr_wait_buf(fw, len);
+ if (err) {
+ dev_err(fw->dev.dev, "failed wait for sending firmware\n");
+ return err;
+ }
+
+ for (i = 0; i < len; i += 4) {
+ writel_relaxed(*(u32 *)(buf + i), prestera_ldr_wr_ptr(fw));
+ prestera_ldr_wr_idx_move(fw, 4);
+ }
+
+ prestera_ldr_wr_idx_commit(fw);
+ return 0;
+}
+
+static int prestera_ldr_fw_send(struct prestera_fw *fw,
+ const char *img, u32 fw_size)
+{
+ u32 status;
+ u32 pos;
+ int err;
+
+ err = prestera_ldr_wait_reg32(fw, PRESTERA_LDR_STATUS_REG,
+ PRESTERA_LDR_STATUS_IMG_DL,
+ 5 * MSEC_PER_SEC);
+ if (err) {
+ dev_err(fw->dev.dev, "Loader is not ready to load image\n");
+ return err;
+ }
+
+ for (pos = 0; pos < fw_size; pos += PRESTERA_FW_BLK_SZ) {
+ if (pos + PRESTERA_FW_BLK_SZ > fw_size)
+ break;
+
+ err = prestera_ldr_send(fw, img + pos, PRESTERA_FW_BLK_SZ);
+ if (err)
+ return err;
+ }
+
+ if (pos < fw_size) {
+ err = prestera_ldr_send(fw, img + pos, fw_size - pos);
+ if (err)
+ return err;
+ }
+
+ err = prestera_ldr_wait_dl_finish(fw);
+ if (err)
+ return err;
+
+ status = prestera_ldr_read(fw, PRESTERA_LDR_STATUS_REG);
+
+ switch (status) {
+ case PRESTERA_LDR_STATUS_INVALID_IMG:
+ dev_err(fw->dev.dev, "FW img has bad CRC\n");
+ return -EINVAL;
+ case PRESTERA_LDR_STATUS_NOMEM:
+ dev_err(fw->dev.dev, "Loader has no enough mem\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void prestera_fw_rev_parse(const struct prestera_fw_header *hdr,
+ struct prestera_fw_rev *rev)
+{
+ u32 version = be32_to_cpu(hdr->version_value);
+
+ rev->maj = PRESTERA_FW_VER_MAJ(version);
+ rev->min = PRESTERA_FW_VER_MIN(version);
+ rev->sub = PRESTERA_FW_VER_PATCH(version);
+}
+
+static int prestera_fw_rev_check(struct prestera_fw *fw)
+{
+ struct prestera_fw_rev *rev = &fw->dev.fw_rev;
+ u16 maj_supp = PRESTERA_SUPP_FW_MAJ_VER;
+ u16 min_supp = PRESTERA_SUPP_FW_MIN_VER;
+
+ if (rev->maj == maj_supp && rev->min >= min_supp)
+ return 0;
+
+ dev_err(fw->dev.dev, "Driver supports FW version only '%u.%u.x'",
+ PRESTERA_SUPP_FW_MAJ_VER, PRESTERA_SUPP_FW_MIN_VER);
+
+ return -EINVAL;
+}
+
+static int prestera_fw_hdr_parse(struct prestera_fw *fw,
+ const struct firmware *img)
+{
+ struct prestera_fw_header *hdr = (struct prestera_fw_header *)img->data;
+ struct prestera_fw_rev *rev = &fw->dev.fw_rev;
+ u32 magic;
+
+ magic = be32_to_cpu(hdr->magic_number);
+ if (magic != PRESTERA_FW_HDR_MAGIC) {
+ dev_err(fw->dev.dev, "FW img hdr magic is invalid");
+ return -EINVAL;
+ }
+
+ prestera_fw_rev_parse(hdr, rev);
+
+ dev_info(fw->dev.dev, "FW version '%u.%u.%u'\n",
+ rev->maj, rev->min, rev->sub);
+
+ return prestera_fw_rev_check(fw);
+}
+
+static int prestera_fw_load(struct prestera_fw *fw)
+{
+ size_t hlen = sizeof(struct prestera_fw_header);
+ const struct firmware *f;
+ char fw_path[128];
+ int err;
+
+ err = prestera_ldr_wait_reg32(fw, PRESTERA_LDR_READY_REG,
+ PRESTERA_LDR_READY_MAGIC,
+ 5 * MSEC_PER_SEC);
+ if (err) {
+ dev_err(fw->dev.dev, "waiting for FW loader is timed out");
+ return err;
+ }
+
+ fw->ldr_ring_buf = fw->ldr_regs +
+ prestera_ldr_read(fw, PRESTERA_LDR_BUF_OFFS_REG);
+
+ fw->ldr_buf_len =
+ prestera_ldr_read(fw, PRESTERA_LDR_BUF_SIZE_REG);
+
+ fw->ldr_wr_idx = 0;
+
+ snprintf(fw_path, sizeof(fw_path), PRESTERA_FW_PATH_FMT,
+ PRESTERA_SUPP_FW_MAJ_VER, PRESTERA_SUPP_FW_MIN_VER);
+
+ err = request_firmware_direct(&f, fw_path, fw->dev.dev);
+ if (err) {
+ dev_err(fw->dev.dev, "failed to request firmware file\n");
+ return err;
+ }
+
+ err = prestera_fw_hdr_parse(fw, f);
+ if (err) {
+ dev_err(fw->dev.dev, "FW image header is invalid\n");
+ goto out_release;
+ }
+
+ prestera_ldr_write(fw, PRESTERA_LDR_IMG_SIZE_REG, f->size - hlen);
+ prestera_ldr_write(fw, PRESTERA_LDR_CTL_REG, PRESTERA_LDR_CTL_DL_START);
+
+ dev_info(fw->dev.dev, "Loading %s ...", fw_path);
+
+ err = prestera_ldr_fw_send(fw, f->data + hlen, f->size - hlen);
+
+out_release:
+ release_firmware(f);
+ return err;
+}
+
+static int prestera_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ const char *driver_name = pdev->driver->name;
+ struct prestera_fw *fw;
+ int err;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ return err;
+
+ err = pcim_iomap_regions(pdev, BIT(PRESTERA_PCI_BAR_FW) |
+ BIT(PRESTERA_PCI_BAR_PP),
+ pci_name(pdev));
+ if (err)
+ return err;
+
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(30))) {
+ dev_err(&pdev->dev, "fail to set DMA mask\n");
+ goto err_dma_mask;
+ }
+
+ pci_set_master(pdev);
+
+ fw = devm_kzalloc(&pdev->dev, sizeof(*fw), GFP_KERNEL);
+ if (!fw) {
+ err = -ENOMEM;
+ goto err_pci_dev_alloc;
+ }
+
+ fw->dev.ctl_regs = pcim_iomap_table(pdev)[PRESTERA_PCI_BAR_FW];
+ fw->dev.pp_regs = pcim_iomap_table(pdev)[PRESTERA_PCI_BAR_PP];
+ fw->dev.dev = &pdev->dev;
+
+ pci_set_drvdata(pdev, fw);
+
+ err = prestera_fw_init(fw);
+ if (err)
+ goto err_prestera_fw_init;
+
+ dev_info(fw->dev.dev, "Prestera FW is ready\n");
+
+ fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI, 1);
+ if (!fw->wq)
+ goto err_wq_alloc;
+
+ INIT_WORK(&fw->evt_work, prestera_fw_evt_work_fn);
+
+ err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (err < 0) {
+ dev_err(&pdev->dev, "MSI IRQ init failed\n");
+ goto err_irq_alloc;
+ }
+
+ err = request_irq(pci_irq_vector(pdev, 0), prestera_pci_irq_handler,
+ 0, driver_name, fw);
+ if (err) {
+ dev_err(&pdev->dev, "fail to request IRQ\n");
+ goto err_request_irq;
+ }
+
+ err = prestera_device_register(&fw->dev);
+ if (err)
+ goto err_prestera_dev_register;
+
+ return 0;
+
+err_prestera_dev_register:
+ free_irq(pci_irq_vector(pdev, 0), fw);
+err_request_irq:
+ pci_free_irq_vectors(pdev);
+err_irq_alloc:
+ destroy_workqueue(fw->wq);
+err_wq_alloc:
+ prestera_fw_uninit(fw);
+err_prestera_fw_init:
+err_pci_dev_alloc:
+err_dma_mask:
+ return err;
+}
+
+static void prestera_pci_remove(struct pci_dev *pdev)
+{
+ struct prestera_fw *fw = pci_get_drvdata(pdev);
+
+ prestera_device_unregister(&fw->dev);
+ free_irq(pci_irq_vector(pdev, 0), fw);
+ pci_free_irq_vectors(pdev);
+ destroy_workqueue(fw->wq);
+ prestera_fw_uninit(fw);
+}
+
+static const struct pci_device_id prestera_pci_devices[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC804) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, prestera_pci_devices);
+
+static struct pci_driver prestera_pci_driver = {
+ .name = "Prestera DX",
+ .id_table = prestera_pci_devices,
+ .probe = prestera_pci_probe,
+ .remove = prestera_pci_remove,
+};
+module_pci_driver(prestera_pci_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Marvell Prestera switch PCI interface");
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
new file mode 100644
index 000000000000..2a13c318048c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
@@ -0,0 +1,820 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/bitfield.h>
+#include <linux/dmapool.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "prestera_dsa.h"
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_rxtx.h"
+
+#define PRESTERA_SDMA_WAIT_MUL 10
+
+struct prestera_sdma_desc {
+ __le32 word1;
+ __le32 word2;
+ __le32 buff;
+ __le32 next;
+} __packed __aligned(16);
+
+#define PRESTERA_SDMA_BUFF_SIZE_MAX 1544
+
+#define PRESTERA_SDMA_RX_DESC_PKT_LEN(desc) \
+ ((le32_to_cpu((desc)->word2) >> 16) & GENMASK(13, 0))
+
+#define PRESTERA_SDMA_RX_DESC_OWNER(desc) \
+ ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
+
+#define PRESTERA_SDMA_RX_DESC_IS_RCVD(desc) \
+ (PRESTERA_SDMA_RX_DESC_OWNER(desc) == PRESTERA_SDMA_RX_DESC_CPU_OWN)
+
+#define PRESTERA_SDMA_RX_DESC_CPU_OWN 0
+#define PRESTERA_SDMA_RX_DESC_DMA_OWN 1
+
+#define PRESTERA_SDMA_RX_QUEUE_NUM 8
+
+#define PRESTERA_SDMA_RX_DESC_PER_Q 1000
+
+#define PRESTERA_SDMA_TX_DESC_PER_Q 1000
+#define PRESTERA_SDMA_TX_MAX_BURST 64
+
+#define PRESTERA_SDMA_TX_DESC_OWNER(desc) \
+ ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
+
+#define PRESTERA_SDMA_TX_DESC_CPU_OWN 0
+#define PRESTERA_SDMA_TX_DESC_DMA_OWN 1U
+
+#define PRESTERA_SDMA_TX_DESC_IS_SENT(desc) \
+ (PRESTERA_SDMA_TX_DESC_OWNER(desc) == PRESTERA_SDMA_TX_DESC_CPU_OWN)
+
+#define PRESTERA_SDMA_TX_DESC_LAST BIT(20)
+#define PRESTERA_SDMA_TX_DESC_FIRST BIT(21)
+#define PRESTERA_SDMA_TX_DESC_CALC_CRC BIT(12)
+
+#define PRESTERA_SDMA_TX_DESC_SINGLE \
+ (PRESTERA_SDMA_TX_DESC_FIRST | PRESTERA_SDMA_TX_DESC_LAST)
+
+#define PRESTERA_SDMA_TX_DESC_INIT \
+ (PRESTERA_SDMA_TX_DESC_SINGLE | PRESTERA_SDMA_TX_DESC_CALC_CRC)
+
+#define PRESTERA_SDMA_RX_INTR_MASK_REG 0x2814
+#define PRESTERA_SDMA_RX_QUEUE_STATUS_REG 0x2680
+#define PRESTERA_SDMA_RX_QUEUE_DESC_REG(n) (0x260C + (n) * 16)
+
+#define PRESTERA_SDMA_TX_QUEUE_DESC_REG 0x26C0
+#define PRESTERA_SDMA_TX_QUEUE_START_REG 0x2868
+
+struct prestera_sdma_buf {
+ struct prestera_sdma_desc *desc;
+ dma_addr_t desc_dma;
+ struct sk_buff *skb;
+ dma_addr_t buf_dma;
+ bool is_used;
+};
+
+struct prestera_rx_ring {
+ struct prestera_sdma_buf *bufs;
+ int next_rx;
+};
+
+struct prestera_tx_ring {
+ struct prestera_sdma_buf *bufs;
+ int next_tx;
+ int max_burst;
+ int burst;
+};
+
+struct prestera_sdma {
+ struct prestera_rx_ring rx_ring[PRESTERA_SDMA_RX_QUEUE_NUM];
+ struct prestera_tx_ring tx_ring;
+ struct prestera_switch *sw;
+ struct dma_pool *desc_pool;
+ struct work_struct tx_work;
+ struct napi_struct rx_napi;
+ struct net_device napi_dev;
+ u32 map_addr;
+ u64 dma_mask;
+ /* protect SDMA with concurrrent access from multiple CPUs */
+ spinlock_t tx_lock;
+};
+
+struct prestera_rxtx {
+ struct prestera_sdma sdma;
+};
+
+static int prestera_sdma_buf_init(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf)
+{
+ struct prestera_sdma_desc *desc;
+ dma_addr_t dma;
+
+ desc = dma_pool_alloc(sdma->desc_pool, GFP_DMA | GFP_KERNEL, &dma);
+ if (!desc)
+ return -ENOMEM;
+
+ buf->buf_dma = DMA_MAPPING_ERROR;
+ buf->desc_dma = dma;
+ buf->desc = desc;
+ buf->skb = NULL;
+
+ return 0;
+}
+
+static u32 prestera_sdma_map(struct prestera_sdma *sdma, dma_addr_t pa)
+{
+ return sdma->map_addr + pa;
+}
+
+static void prestera_sdma_rx_desc_init(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc,
+ dma_addr_t buf)
+{
+ u32 word = le32_to_cpu(desc->word2);
+
+ u32p_replace_bits(&word, PRESTERA_SDMA_BUFF_SIZE_MAX, GENMASK(15, 0));
+ desc->word2 = cpu_to_le32(word);
+
+ desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
+
+ /* make sure buffer is set before reset the descriptor */
+ wmb();
+
+ desc->word1 = cpu_to_le32(0xA0000000);
+}
+
+static void prestera_sdma_rx_desc_set_next(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc,
+ dma_addr_t next)
+{
+ desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
+}
+
+static int prestera_sdma_rx_skb_alloc(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf)
+{
+ struct device *dev = sdma->sw->dev->dev;
+ struct sk_buff *skb;
+ dma_addr_t dma;
+
+ skb = alloc_skb(PRESTERA_SDMA_BUFF_SIZE_MAX, GFP_DMA | GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ dma = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto err_dma_map;
+
+ if (buf->skb)
+ dma_unmap_single(dev, buf->buf_dma, buf->skb->len,
+ DMA_FROM_DEVICE);
+
+ buf->buf_dma = dma;
+ buf->skb = skb;
+
+ return 0;
+
+err_dma_map:
+ kfree_skb(skb);
+
+ return -ENOMEM;
+}
+
+static struct sk_buff *prestera_sdma_rx_skb_get(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf)
+{
+ dma_addr_t buf_dma = buf->buf_dma;
+ struct sk_buff *skb = buf->skb;
+ u32 len = skb->len;
+ int err;
+
+ err = prestera_sdma_rx_skb_alloc(sdma, buf);
+ if (err) {
+ buf->buf_dma = buf_dma;
+ buf->skb = skb;
+
+ skb = alloc_skb(skb->len, GFP_ATOMIC);
+ if (skb) {
+ skb_put(skb, len);
+ skb_copy_from_linear_data(buf->skb, skb->data, len);
+ }
+ }
+
+ prestera_sdma_rx_desc_init(sdma, buf->desc, buf->buf_dma);
+
+ return skb;
+}
+
+static int prestera_rxtx_process_skb(struct prestera_sdma *sdma,
+ struct sk_buff *skb)
+{
+ const struct prestera_port *port;
+ struct prestera_dsa dsa;
+ u32 hw_port, dev_id;
+ int err;
+
+ skb_pull(skb, ETH_HLEN);
+
+ /* ethertype field is part of the dsa header */
+ err = prestera_dsa_parse(&dsa, skb->data - ETH_TLEN);
+ if (err)
+ return err;
+
+ dev_id = dsa.hw_dev_num;
+ hw_port = dsa.port_num;
+
+ port = prestera_port_find_by_hwid(sdma->sw, dev_id, hw_port);
+ if (unlikely(!port)) {
+ dev_warn_ratelimited(prestera_dev(sdma->sw), "received pkt for non-existent port(%u, %u)\n",
+ dev_id, hw_port);
+ return -ENOENT;
+ }
+
+ if (unlikely(!pskb_may_pull(skb, PRESTERA_DSA_HLEN)))
+ return -EINVAL;
+
+ /* remove DSA tag and update checksum */
+ skb_pull_rcsum(skb, PRESTERA_DSA_HLEN);
+
+ memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - PRESTERA_DSA_HLEN,
+ ETH_ALEN * 2);
+
+ skb_push(skb, ETH_HLEN);
+
+ skb->protocol = eth_type_trans(skb, port->dev);
+
+ if (dsa.vlan.is_tagged) {
+ u16 tci = dsa.vlan.vid & VLAN_VID_MASK;
+
+ tci |= dsa.vlan.vpt << VLAN_PRIO_SHIFT;
+ if (dsa.vlan.cfi_bit)
+ tci |= VLAN_CFI_MASK;
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci);
+ }
+
+ return 0;
+}
+
+static int prestera_sdma_next_rx_buf_idx(int buf_idx)
+{
+ return (buf_idx + 1) % PRESTERA_SDMA_RX_DESC_PER_Q;
+}
+
+static int prestera_sdma_rx_poll(struct napi_struct *napi, int budget)
+{
+ int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
+ unsigned int rxq_done_map = 0;
+ struct prestera_sdma *sdma;
+ struct list_head rx_list;
+ unsigned int qmask;
+ int pkts_done = 0;
+ int q;
+
+ qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
+ qmask = GENMASK(qnum - 1, 0);
+
+ INIT_LIST_HEAD(&rx_list);
+
+ sdma = container_of(napi, struct prestera_sdma, rx_napi);
+
+ while (pkts_done < budget && rxq_done_map != qmask) {
+ for (q = 0; q < qnum && pkts_done < budget; q++) {
+ struct prestera_rx_ring *ring = &sdma->rx_ring[q];
+ struct prestera_sdma_desc *desc;
+ struct prestera_sdma_buf *buf;
+ int buf_idx = ring->next_rx;
+ struct sk_buff *skb;
+
+ buf = &ring->bufs[buf_idx];
+ desc = buf->desc;
+
+ if (PRESTERA_SDMA_RX_DESC_IS_RCVD(desc)) {
+ rxq_done_map &= ~BIT(q);
+ } else {
+ rxq_done_map |= BIT(q);
+ continue;
+ }
+
+ pkts_done++;
+
+ __skb_trim(buf->skb, PRESTERA_SDMA_RX_DESC_PKT_LEN(desc));
+
+ skb = prestera_sdma_rx_skb_get(sdma, buf);
+ if (!skb)
+ goto rx_next_buf;
+
+ if (unlikely(prestera_rxtx_process_skb(sdma, skb)))
+ goto rx_next_buf;
+
+ list_add_tail(&skb->list, &rx_list);
+rx_next_buf:
+ ring->next_rx = prestera_sdma_next_rx_buf_idx(buf_idx);
+ }
+ }
+
+ if (pkts_done < budget && napi_complete_done(napi, pkts_done))
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG,
+ GENMASK(9, 2));
+
+ netif_receive_skb_list(&rx_list);
+
+ return pkts_done;
+}
+
+static void prestera_sdma_rx_fini(struct prestera_sdma *sdma)
+{
+ int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
+ int q, b;
+
+ /* disable all rx queues */
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
+ GENMASK(15, 8));
+
+ for (q = 0; q < qnum; q++) {
+ struct prestera_rx_ring *ring = &sdma->rx_ring[q];
+
+ if (!ring->bufs)
+ break;
+
+ for (b = 0; b < PRESTERA_SDMA_RX_DESC_PER_Q; b++) {
+ struct prestera_sdma_buf *buf = &ring->bufs[b];
+
+ if (buf->desc_dma)
+ dma_pool_free(sdma->desc_pool, buf->desc,
+ buf->desc_dma);
+
+ if (!buf->skb)
+ continue;
+
+ if (buf->buf_dma != DMA_MAPPING_ERROR)
+ dma_unmap_single(sdma->sw->dev->dev,
+ buf->buf_dma, buf->skb->len,
+ DMA_FROM_DEVICE);
+ kfree_skb(buf->skb);
+ }
+ }
+}
+
+static int prestera_sdma_rx_init(struct prestera_sdma *sdma)
+{
+ int bnum = PRESTERA_SDMA_RX_DESC_PER_Q;
+ int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
+ int err;
+ int q;
+
+ /* disable all rx queues */
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
+ GENMASK(15, 8));
+
+ for (q = 0; q < qnum; q++) {
+ struct prestera_sdma_buf *head, *tail, *next, *prev;
+ struct prestera_rx_ring *ring = &sdma->rx_ring[q];
+
+ ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
+ if (!ring->bufs)
+ return -ENOMEM;
+
+ ring->next_rx = 0;
+
+ tail = &ring->bufs[bnum - 1];
+ head = &ring->bufs[0];
+ next = head;
+ prev = next;
+
+ do {
+ err = prestera_sdma_buf_init(sdma, next);
+ if (err)
+ return err;
+
+ err = prestera_sdma_rx_skb_alloc(sdma, next);
+ if (err)
+ return err;
+
+ prestera_sdma_rx_desc_init(sdma, next->desc,
+ next->buf_dma);
+
+ prestera_sdma_rx_desc_set_next(sdma, prev->desc,
+ next->desc_dma);
+
+ prev = next;
+ next++;
+ } while (prev != tail);
+
+ /* join tail with head to make a circular list */
+ prestera_sdma_rx_desc_set_next(sdma, tail->desc, head->desc_dma);
+
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_DESC_REG(q),
+ prestera_sdma_map(sdma, head->desc_dma));
+ }
+
+ /* make sure all rx descs are filled before enabling all rx queues */
+ wmb();
+
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
+ GENMASK(7, 0));
+
+ return 0;
+}
+
+static void prestera_sdma_tx_desc_init(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc)
+{
+ desc->word1 = cpu_to_le32(PRESTERA_SDMA_TX_DESC_INIT);
+ desc->word2 = 0;
+}
+
+static void prestera_sdma_tx_desc_set_next(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc,
+ dma_addr_t next)
+{
+ desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
+}
+
+static void prestera_sdma_tx_desc_set_buf(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc,
+ dma_addr_t buf, size_t len)
+{
+ u32 word = le32_to_cpu(desc->word2);
+
+ u32p_replace_bits(&word, len + ETH_FCS_LEN, GENMASK(30, 16));
+
+ desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
+ desc->word2 = cpu_to_le32(word);
+}
+
+static void prestera_sdma_tx_desc_xmit(struct prestera_sdma_desc *desc)
+{
+ u32 word = le32_to_cpu(desc->word1);
+
+ word |= PRESTERA_SDMA_TX_DESC_DMA_OWN << 31;
+
+ /* make sure everything is written before enable xmit */
+ wmb();
+
+ desc->word1 = cpu_to_le32(word);
+}
+
+static int prestera_sdma_tx_buf_map(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf,
+ struct sk_buff *skb)
+{
+ struct device *dma_dev = sdma->sw->dev->dev;
+ dma_addr_t dma;
+
+ dma = dma_map_single(dma_dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, dma))
+ return -ENOMEM;
+
+ buf->buf_dma = dma;
+ buf->skb = skb;
+
+ return 0;
+}
+
+static void prestera_sdma_tx_buf_unmap(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf)
+{
+ struct device *dma_dev = sdma->sw->dev->dev;
+
+ dma_unmap_single(dma_dev, buf->buf_dma, buf->skb->len, DMA_TO_DEVICE);
+}
+
+static void prestera_sdma_tx_recycle_work_fn(struct work_struct *work)
+{
+ int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
+ struct prestera_tx_ring *tx_ring;
+ struct prestera_sdma *sdma;
+ int b;
+
+ sdma = container_of(work, struct prestera_sdma, tx_work);
+
+ tx_ring = &sdma->tx_ring;
+
+ for (b = 0; b < bnum; b++) {
+ struct prestera_sdma_buf *buf = &tx_ring->bufs[b];
+
+ if (!buf->is_used)
+ continue;
+
+ if (!PRESTERA_SDMA_TX_DESC_IS_SENT(buf->desc))
+ continue;
+
+ prestera_sdma_tx_buf_unmap(sdma, buf);
+ dev_consume_skb_any(buf->skb);
+ buf->skb = NULL;
+
+ /* make sure everything is cleaned up */
+ wmb();
+
+ buf->is_used = false;
+ }
+}
+
+static int prestera_sdma_tx_init(struct prestera_sdma *sdma)
+{
+ struct prestera_sdma_buf *head, *tail, *next, *prev;
+ struct prestera_tx_ring *tx_ring = &sdma->tx_ring;
+ int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
+ int err;
+
+ INIT_WORK(&sdma->tx_work, prestera_sdma_tx_recycle_work_fn);
+ spin_lock_init(&sdma->tx_lock);
+
+ tx_ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
+ if (!tx_ring->bufs)
+ return -ENOMEM;
+
+ tail = &tx_ring->bufs[bnum - 1];
+ head = &tx_ring->bufs[0];
+ next = head;
+ prev = next;
+
+ tx_ring->max_burst = PRESTERA_SDMA_TX_MAX_BURST;
+ tx_ring->burst = tx_ring->max_burst;
+ tx_ring->next_tx = 0;
+
+ do {
+ err = prestera_sdma_buf_init(sdma, next);
+ if (err)
+ return err;
+
+ next->is_used = false;
+
+ prestera_sdma_tx_desc_init(sdma, next->desc);
+
+ prestera_sdma_tx_desc_set_next(sdma, prev->desc,
+ next->desc_dma);
+
+ prev = next;
+ next++;
+ } while (prev != tail);
+
+ /* join tail with head to make a circular list */
+ prestera_sdma_tx_desc_set_next(sdma, tail->desc, head->desc_dma);
+
+ /* make sure descriptors are written */
+ wmb();
+
+ prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_DESC_REG,
+ prestera_sdma_map(sdma, head->desc_dma));
+
+ return 0;
+}
+
+static void prestera_sdma_tx_fini(struct prestera_sdma *sdma)
+{
+ struct prestera_tx_ring *ring = &sdma->tx_ring;
+ int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
+ int b;
+
+ cancel_work_sync(&sdma->tx_work);
+
+ if (!ring->bufs)
+ return;
+
+ for (b = 0; b < bnum; b++) {
+ struct prestera_sdma_buf *buf = &ring->bufs[b];
+
+ if (buf->desc)
+ dma_pool_free(sdma->desc_pool, buf->desc,
+ buf->desc_dma);
+
+ if (!buf->skb)
+ continue;
+
+ dma_unmap_single(sdma->sw->dev->dev, buf->buf_dma,
+ buf->skb->len, DMA_TO_DEVICE);
+
+ dev_consume_skb_any(buf->skb);
+ }
+}
+
+static void prestera_rxtx_handle_event(struct prestera_switch *sw,
+ struct prestera_event *evt,
+ void *arg)
+{
+ struct prestera_sdma *sdma = arg;
+
+ if (evt->id != PRESTERA_RXTX_EVENT_RCV_PKT)
+ return;
+
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG, 0);
+ napi_schedule(&sdma->rx_napi);
+}
+
+static int prestera_sdma_switch_init(struct prestera_switch *sw)
+{
+ struct prestera_sdma *sdma = &sw->rxtx->sdma;
+ struct device *dev = sw->dev->dev;
+ struct prestera_rxtx_params p;
+ int err;
+
+ p.use_sdma = true;
+
+ err = prestera_hw_rxtx_init(sw, &p);
+ if (err) {
+ dev_err(dev, "failed to init rxtx by hw\n");
+ return err;
+ }
+
+ sdma->dma_mask = dma_get_mask(dev);
+ sdma->map_addr = p.map_addr;
+ sdma->sw = sw;
+
+ sdma->desc_pool = dma_pool_create("desc_pool", dev,
+ sizeof(struct prestera_sdma_desc),
+ 16, 0);
+ if (!sdma->desc_pool)
+ return -ENOMEM;
+
+ err = prestera_sdma_rx_init(sdma);
+ if (err) {
+ dev_err(dev, "failed to init rx ring\n");
+ goto err_rx_init;
+ }
+
+ err = prestera_sdma_tx_init(sdma);
+ if (err) {
+ dev_err(dev, "failed to init tx ring\n");
+ goto err_tx_init;
+ }
+
+ err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_RXTX,
+ prestera_rxtx_handle_event,
+ sdma);
+ if (err)
+ goto err_evt_register;
+
+ init_dummy_netdev(&sdma->napi_dev);
+
+ netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll, 64);
+ napi_enable(&sdma->rx_napi);
+
+ return 0;
+
+err_evt_register:
+err_tx_init:
+ prestera_sdma_tx_fini(sdma);
+err_rx_init:
+ prestera_sdma_rx_fini(sdma);
+
+ dma_pool_destroy(sdma->desc_pool);
+ return err;
+}
+
+static void prestera_sdma_switch_fini(struct prestera_switch *sw)
+{
+ struct prestera_sdma *sdma = &sw->rxtx->sdma;
+
+ napi_disable(&sdma->rx_napi);
+ netif_napi_del(&sdma->rx_napi);
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX,
+ prestera_rxtx_handle_event);
+ prestera_sdma_tx_fini(sdma);
+ prestera_sdma_rx_fini(sdma);
+ dma_pool_destroy(sdma->desc_pool);
+}
+
+static bool prestera_sdma_is_ready(struct prestera_sdma *sdma)
+{
+ return !(prestera_read(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG) & 1);
+}
+
+static int prestera_sdma_tx_wait(struct prestera_sdma *sdma,
+ struct prestera_tx_ring *tx_ring)
+{
+ int tx_wait_num = PRESTERA_SDMA_WAIT_MUL * tx_ring->max_burst;
+
+ do {
+ if (prestera_sdma_is_ready(sdma))
+ return 0;
+
+ udelay(1);
+ } while (--tx_wait_num);
+
+ return -EBUSY;
+}
+
+static void prestera_sdma_tx_start(struct prestera_sdma *sdma)
+{
+ prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG, 1);
+ schedule_work(&sdma->tx_work);
+}
+
+static netdev_tx_t prestera_sdma_xmit(struct prestera_sdma *sdma,
+ struct sk_buff *skb)
+{
+ struct device *dma_dev = sdma->sw->dev->dev;
+ struct net_device *dev = skb->dev;
+ struct prestera_tx_ring *tx_ring;
+ struct prestera_sdma_buf *buf;
+ int err;
+
+ spin_lock(&sdma->tx_lock);
+
+ tx_ring = &sdma->tx_ring;
+
+ buf = &tx_ring->bufs[tx_ring->next_tx];
+ if (buf->is_used) {
+ schedule_work(&sdma->tx_work);
+ goto drop_skb;
+ }
+
+ if (unlikely(eth_skb_pad(skb)))
+ goto drop_skb_nofree;
+
+ err = prestera_sdma_tx_buf_map(sdma, buf, skb);
+ if (err)
+ goto drop_skb;
+
+ prestera_sdma_tx_desc_set_buf(sdma, buf->desc, buf->buf_dma, skb->len);
+
+ dma_sync_single_for_device(dma_dev, buf->buf_dma, skb->len,
+ DMA_TO_DEVICE);
+
+ if (tx_ring->burst) {
+ tx_ring->burst--;
+ } else {
+ tx_ring->burst = tx_ring->max_burst;
+
+ err = prestera_sdma_tx_wait(sdma, tx_ring);
+ if (err)
+ goto drop_skb_unmap;
+ }
+
+ tx_ring->next_tx = (tx_ring->next_tx + 1) % PRESTERA_SDMA_TX_DESC_PER_Q;
+ prestera_sdma_tx_desc_xmit(buf->desc);
+ buf->is_used = true;
+
+ prestera_sdma_tx_start(sdma);
+
+ goto tx_done;
+
+drop_skb_unmap:
+ prestera_sdma_tx_buf_unmap(sdma, buf);
+drop_skb:
+ dev_consume_skb_any(skb);
+drop_skb_nofree:
+ dev->stats.tx_dropped++;
+tx_done:
+ spin_unlock(&sdma->tx_lock);
+ return NETDEV_TX_OK;
+}
+
+int prestera_rxtx_switch_init(struct prestera_switch *sw)
+{
+ struct prestera_rxtx *rxtx;
+
+ rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL);
+ if (!rxtx)
+ return -ENOMEM;
+
+ sw->rxtx = rxtx;
+
+ return prestera_sdma_switch_init(sw);
+}
+
+void prestera_rxtx_switch_fini(struct prestera_switch *sw)
+{
+ prestera_sdma_switch_fini(sw);
+ kfree(sw->rxtx);
+}
+
+int prestera_rxtx_port_init(struct prestera_port *port)
+{
+ int err;
+
+ err = prestera_hw_rxtx_port_init(port);
+ if (err)
+ return err;
+
+ port->dev->needed_headroom = PRESTERA_DSA_HLEN;
+
+ return 0;
+}
+
+netdev_tx_t prestera_rxtx_xmit(struct prestera_port *port, struct sk_buff *skb)
+{
+ struct prestera_dsa dsa;
+
+ dsa.hw_dev_num = port->dev_id;
+ dsa.port_num = port->hw_id;
+
+ if (skb_cow_head(skb, PRESTERA_DSA_HLEN) < 0)
+ return NET_XMIT_DROP;
+
+ skb_push(skb, PRESTERA_DSA_HLEN);
+ memmove(skb->data, skb->data + PRESTERA_DSA_HLEN, 2 * ETH_ALEN);
+
+ if (prestera_dsa_build(&dsa, skb->data + 2 * ETH_ALEN) != 0)
+ return NET_XMIT_DROP;
+
+ return prestera_sdma_xmit(&port->sw->rxtx->sdma, skb);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.h b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.h
new file mode 100644
index 000000000000..882a1225c323
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_RXTX_H_
+#define _PRESTERA_RXTX_H_
+
+#include <linux/netdevice.h>
+
+struct prestera_switch;
+struct prestera_port;
+
+int prestera_rxtx_switch_init(struct prestera_switch *sw);
+void prestera_rxtx_switch_fini(struct prestera_switch *sw);
+
+int prestera_rxtx_port_init(struct prestera_port *port);
+
+netdev_tx_t prestera_rxtx_xmit(struct prestera_port *port, struct sk_buff *skb);
+
+#endif /* _PRESTERA_RXTX_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
new file mode 100644
index 000000000000..7d83e1f91ef1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -0,0 +1,1277 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <net/netevent.h>
+#include <net/switchdev.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_switchdev.h"
+
+#define PRESTERA_VID_ALL (0xffff)
+
+#define PRESTERA_DEFAULT_AGEING_TIME_MS 300000
+#define PRESTERA_MAX_AGEING_TIME_MS 1000000000
+#define PRESTERA_MIN_AGEING_TIME_MS 32000
+
+struct prestera_fdb_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ unsigned long event;
+};
+
+struct prestera_switchdev {
+ struct prestera_switch *sw;
+ struct list_head bridge_list;
+ bool bridge_8021q_exists;
+ struct notifier_block swdev_nb_blk;
+ struct notifier_block swdev_nb;
+};
+
+struct prestera_bridge {
+ struct list_head head;
+ struct net_device *dev;
+ struct prestera_switchdev *swdev;
+ struct list_head port_list;
+ bool vlan_enabled;
+ u16 bridge_id;
+};
+
+struct prestera_bridge_port {
+ struct list_head head;
+ struct net_device *dev;
+ struct prestera_bridge *bridge;
+ struct list_head vlan_list;
+ refcount_t ref_count;
+ unsigned long flags;
+ u8 stp_state;
+};
+
+struct prestera_bridge_vlan {
+ struct list_head head;
+ struct list_head port_vlan_list;
+ u16 vid;
+};
+
+struct prestera_port_vlan {
+ struct list_head br_vlan_head;
+ struct list_head port_head;
+ struct prestera_port *port;
+ struct prestera_bridge_port *br_port;
+ u16 vid;
+};
+
+static struct workqueue_struct *swdev_wq;
+
+static void prestera_bridge_port_put(struct prestera_bridge_port *br_port);
+
+static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid,
+ u8 state);
+
+static struct prestera_bridge_vlan *
+prestera_bridge_vlan_create(struct prestera_bridge_port *br_port, u16 vid)
+{
+ struct prestera_bridge_vlan *br_vlan;
+
+ br_vlan = kzalloc(sizeof(*br_vlan), GFP_KERNEL);
+ if (!br_vlan)
+ return NULL;
+
+ INIT_LIST_HEAD(&br_vlan->port_vlan_list);
+ br_vlan->vid = vid;
+ list_add(&br_vlan->head, &br_port->vlan_list);
+
+ return br_vlan;
+}
+
+static void prestera_bridge_vlan_destroy(struct prestera_bridge_vlan *br_vlan)
+{
+ list_del(&br_vlan->head);
+ WARN_ON(!list_empty(&br_vlan->port_vlan_list));
+ kfree(br_vlan);
+}
+
+static struct prestera_bridge_vlan *
+prestera_bridge_vlan_by_vid(struct prestera_bridge_port *br_port, u16 vid)
+{
+ struct prestera_bridge_vlan *br_vlan;
+
+ list_for_each_entry(br_vlan, &br_port->vlan_list, head) {
+ if (br_vlan->vid == vid)
+ return br_vlan;
+ }
+
+ return NULL;
+}
+
+static int prestera_bridge_vlan_port_count(struct prestera_bridge *bridge,
+ u16 vid)
+{
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge_vlan *br_vlan;
+ int count = 0;
+
+ list_for_each_entry(br_port, &bridge->port_list, head) {
+ list_for_each_entry(br_vlan, &br_port->vlan_list, head) {
+ if (br_vlan->vid == vid) {
+ count += 1;
+ break;
+ }
+ }
+ }
+
+ return count;
+}
+
+static void prestera_bridge_vlan_put(struct prestera_bridge_vlan *br_vlan)
+{
+ if (list_empty(&br_vlan->port_vlan_list))
+ prestera_bridge_vlan_destroy(br_vlan);
+}
+
+static struct prestera_port_vlan *
+prestera_port_vlan_by_vid(struct prestera_port *port, u16 vid)
+{
+ struct prestera_port_vlan *port_vlan;
+
+ list_for_each_entry(port_vlan, &port->vlans_list, port_head) {
+ if (port_vlan->vid == vid)
+ return port_vlan;
+ }
+
+ return NULL;
+}
+
+static struct prestera_port_vlan *
+prestera_port_vlan_create(struct prestera_port *port, u16 vid, bool untagged)
+{
+ struct prestera_port_vlan *port_vlan;
+ int err;
+
+ port_vlan = prestera_port_vlan_by_vid(port, vid);
+ if (port_vlan)
+ return ERR_PTR(-EEXIST);
+
+ err = prestera_hw_vlan_port_set(port, vid, true, untagged);
+ if (err)
+ return ERR_PTR(err);
+
+ port_vlan = kzalloc(sizeof(*port_vlan), GFP_KERNEL);
+ if (!port_vlan) {
+ err = -ENOMEM;
+ goto err_port_vlan_alloc;
+ }
+
+ port_vlan->port = port;
+ port_vlan->vid = vid;
+
+ list_add(&port_vlan->port_head, &port->vlans_list);
+
+ return port_vlan;
+
+err_port_vlan_alloc:
+ prestera_hw_vlan_port_set(port, vid, false, false);
+ return ERR_PTR(err);
+}
+
+static void
+prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan)
+{
+ u32 fdb_flush_mode = PRESTERA_FDB_FLUSH_MODE_DYNAMIC;
+ struct prestera_port *port = port_vlan->port;
+ struct prestera_bridge_vlan *br_vlan;
+ struct prestera_bridge_port *br_port;
+ bool last_port, last_vlan;
+ u16 vid = port_vlan->vid;
+ int port_count;
+
+ br_port = port_vlan->br_port;
+ port_count = prestera_bridge_vlan_port_count(br_port->bridge, vid);
+ br_vlan = prestera_bridge_vlan_by_vid(br_port, vid);
+
+ last_vlan = list_is_singular(&br_port->vlan_list);
+ last_port = port_count == 1;
+
+ if (last_vlan)
+ prestera_hw_fdb_flush_port(port, fdb_flush_mode);
+ else if (last_port)
+ prestera_hw_fdb_flush_vlan(port->sw, vid, fdb_flush_mode);
+ else
+ prestera_hw_fdb_flush_port_vlan(port, vid, fdb_flush_mode);
+
+ list_del(&port_vlan->br_vlan_head);
+ prestera_bridge_vlan_put(br_vlan);
+ prestera_bridge_port_put(br_port);
+ port_vlan->br_port = NULL;
+}
+
+static void prestera_port_vlan_destroy(struct prestera_port_vlan *port_vlan)
+{
+ struct prestera_port *port = port_vlan->port;
+ u16 vid = port_vlan->vid;
+
+ if (port_vlan->br_port)
+ prestera_port_vlan_bridge_leave(port_vlan);
+
+ prestera_hw_vlan_port_set(port, vid, false, false);
+ list_del(&port_vlan->port_head);
+ kfree(port_vlan);
+}
+
+static struct prestera_bridge *
+prestera_bridge_create(struct prestera_switchdev *swdev, struct net_device *dev)
+{
+ bool vlan_enabled = br_vlan_enabled(dev);
+ struct prestera_bridge *bridge;
+ u16 bridge_id;
+ int err;
+
+ if (vlan_enabled && swdev->bridge_8021q_exists) {
+ netdev_err(dev, "Only one VLAN-aware bridge is supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return ERR_PTR(-ENOMEM);
+
+ if (vlan_enabled) {
+ swdev->bridge_8021q_exists = true;
+ } else {
+ err = prestera_hw_bridge_create(swdev->sw, &bridge_id);
+ if (err) {
+ kfree(bridge);
+ return ERR_PTR(err);
+ }
+
+ bridge->bridge_id = bridge_id;
+ }
+
+ bridge->vlan_enabled = vlan_enabled;
+ bridge->swdev = swdev;
+ bridge->dev = dev;
+
+ INIT_LIST_HEAD(&bridge->port_list);
+
+ list_add(&bridge->head, &swdev->bridge_list);
+
+ return bridge;
+}
+
+static void prestera_bridge_destroy(struct prestera_bridge *bridge)
+{
+ struct prestera_switchdev *swdev = bridge->swdev;
+
+ list_del(&bridge->head);
+
+ if (bridge->vlan_enabled)
+ swdev->bridge_8021q_exists = false;
+ else
+ prestera_hw_bridge_delete(swdev->sw, bridge->bridge_id);
+
+ WARN_ON(!list_empty(&bridge->port_list));
+ kfree(bridge);
+}
+
+static void prestera_bridge_put(struct prestera_bridge *bridge)
+{
+ if (list_empty(&bridge->port_list))
+ prestera_bridge_destroy(bridge);
+}
+
+static
+struct prestera_bridge *prestera_bridge_by_dev(struct prestera_switchdev *swdev,
+ const struct net_device *dev)
+{
+ struct prestera_bridge *bridge;
+
+ list_for_each_entry(bridge, &swdev->bridge_list, head)
+ if (bridge->dev == dev)
+ return bridge;
+
+ return NULL;
+}
+
+static struct prestera_bridge_port *
+__prestera_bridge_port_by_dev(struct prestera_bridge *bridge,
+ struct net_device *dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ list_for_each_entry(br_port, &bridge->port_list, head) {
+ if (br_port->dev == dev)
+ return br_port;
+ }
+
+ return NULL;
+}
+
+static struct prestera_bridge_port *
+prestera_bridge_port_by_dev(struct prestera_switchdev *swdev,
+ struct net_device *dev)
+{
+ struct net_device *br_dev = netdev_master_upper_dev_get(dev);
+ struct prestera_bridge *bridge;
+
+ if (!br_dev)
+ return NULL;
+
+ bridge = prestera_bridge_by_dev(swdev, br_dev);
+ if (!bridge)
+ return NULL;
+
+ return __prestera_bridge_port_by_dev(bridge, dev);
+}
+
+static struct prestera_bridge_port *
+prestera_bridge_port_create(struct prestera_bridge *bridge,
+ struct net_device *dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ br_port = kzalloc(sizeof(*br_port), GFP_KERNEL);
+ if (!br_port)
+ return NULL;
+
+ br_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
+ BR_MCAST_FLOOD;
+ br_port->stp_state = BR_STATE_DISABLED;
+ refcount_set(&br_port->ref_count, 1);
+ br_port->bridge = bridge;
+ br_port->dev = dev;
+
+ INIT_LIST_HEAD(&br_port->vlan_list);
+ list_add(&br_port->head, &bridge->port_list);
+
+ return br_port;
+}
+
+static void
+prestera_bridge_port_destroy(struct prestera_bridge_port *br_port)
+{
+ list_del(&br_port->head);
+ WARN_ON(!list_empty(&br_port->vlan_list));
+ kfree(br_port);
+}
+
+static void prestera_bridge_port_get(struct prestera_bridge_port *br_port)
+{
+ refcount_inc(&br_port->ref_count);
+}
+
+static void prestera_bridge_port_put(struct prestera_bridge_port *br_port)
+{
+ struct prestera_bridge *bridge = br_port->bridge;
+
+ if (refcount_dec_and_test(&br_port->ref_count)) {
+ prestera_bridge_port_destroy(br_port);
+ prestera_bridge_put(bridge);
+ }
+}
+
+static struct prestera_bridge_port *
+prestera_bridge_port_add(struct prestera_bridge *bridge, struct net_device *dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ br_port = __prestera_bridge_port_by_dev(bridge, dev);
+ if (br_port) {
+ prestera_bridge_port_get(br_port);
+ return br_port;
+ }
+
+ br_port = prestera_bridge_port_create(bridge, dev);
+ if (!br_port)
+ return ERR_PTR(-ENOMEM);
+
+ return br_port;
+}
+
+static int
+prestera_bridge_1d_port_join(struct prestera_bridge_port *br_port)
+{
+ struct prestera_port *port = netdev_priv(br_port->dev);
+ struct prestera_bridge *bridge = br_port->bridge;
+ int err;
+
+ err = prestera_hw_bridge_port_add(port, bridge->bridge_id);
+ if (err)
+ return err;
+
+ err = prestera_hw_port_flood_set(port, br_port->flags & BR_FLOOD);
+ if (err)
+ goto err_port_flood_set;
+
+ err = prestera_hw_port_learning_set(port, br_port->flags & BR_LEARNING);
+ if (err)
+ goto err_port_learning_set;
+
+ return 0;
+
+err_port_learning_set:
+ prestera_hw_port_flood_set(port, false);
+err_port_flood_set:
+ prestera_hw_bridge_port_delete(port, bridge->bridge_id);
+
+ return err;
+}
+
+static int prestera_port_bridge_join(struct prestera_port *port,
+ struct net_device *upper)
+{
+ struct prestera_switchdev *swdev = port->sw->swdev;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *bridge;
+ int err;
+
+ bridge = prestera_bridge_by_dev(swdev, upper);
+ if (!bridge) {
+ bridge = prestera_bridge_create(swdev, upper);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+ }
+
+ br_port = prestera_bridge_port_add(bridge, port->dev);
+ if (IS_ERR(br_port)) {
+ err = PTR_ERR(br_port);
+ goto err_brport_create;
+ }
+
+ if (bridge->vlan_enabled)
+ return 0;
+
+ err = prestera_bridge_1d_port_join(br_port);
+ if (err)
+ goto err_port_join;
+
+ return 0;
+
+err_port_join:
+ prestera_bridge_port_put(br_port);
+err_brport_create:
+ prestera_bridge_put(bridge);
+ return err;
+}
+
+static void prestera_bridge_1q_port_leave(struct prestera_bridge_port *br_port)
+{
+ struct prestera_port *port = netdev_priv(br_port->dev);
+
+ prestera_hw_fdb_flush_port(port, PRESTERA_FDB_FLUSH_MODE_ALL);
+ prestera_port_pvid_set(port, PRESTERA_DEFAULT_VID);
+}
+
+static void prestera_bridge_1d_port_leave(struct prestera_bridge_port *br_port)
+{
+ struct prestera_port *port = netdev_priv(br_port->dev);
+
+ prestera_hw_fdb_flush_port(port, PRESTERA_FDB_FLUSH_MODE_ALL);
+ prestera_hw_bridge_port_delete(port, br_port->bridge->bridge_id);
+}
+
+static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid,
+ u8 state)
+{
+ u8 hw_state = state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ hw_state = PRESTERA_STP_DISABLED;
+ break;
+
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ hw_state = PRESTERA_STP_BLOCK_LISTEN;
+ break;
+
+ case BR_STATE_LEARNING:
+ hw_state = PRESTERA_STP_LEARN;
+ break;
+
+ case BR_STATE_FORWARDING:
+ hw_state = PRESTERA_STP_FORWARD;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return prestera_hw_vlan_port_stp_set(port, vid, hw_state);
+}
+
+static void prestera_port_bridge_leave(struct prestera_port *port,
+ struct net_device *upper)
+{
+ struct prestera_switchdev *swdev = port->sw->swdev;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *bridge;
+
+ bridge = prestera_bridge_by_dev(swdev, upper);
+ if (!bridge)
+ return;
+
+ br_port = __prestera_bridge_port_by_dev(bridge, port->dev);
+ if (!br_port)
+ return;
+
+ bridge = br_port->bridge;
+
+ if (bridge->vlan_enabled)
+ prestera_bridge_1q_port_leave(br_port);
+ else
+ prestera_bridge_1d_port_leave(br_port);
+
+ prestera_hw_port_learning_set(port, false);
+ prestera_hw_port_flood_set(port, false);
+ prestera_port_vid_stp_set(port, PRESTERA_VID_ALL, BR_STATE_FORWARDING);
+ prestera_bridge_port_put(br_port);
+}
+
+int prestera_bridge_port_event(struct net_device *dev, unsigned long event,
+ void *ptr)
+{
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct prestera_port *port;
+ struct net_device *upper;
+ int err;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+ port = netdev_priv(dev);
+ upper = info->upper_dev;
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ if (!netif_is_bridge_master(upper)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+ return -EINVAL;
+ }
+
+ if (!info->linking)
+ break;
+
+ if (netdev_has_any_upper_dev(upper)) {
+ NL_SET_ERR_MSG_MOD(extack, "Upper device is already enslaved");
+ return -EINVAL;
+ }
+ break;
+
+ case NETDEV_CHANGEUPPER:
+ if (!netif_is_bridge_master(upper))
+ break;
+
+ if (info->linking) {
+ err = prestera_port_bridge_join(port, upper);
+ if (err)
+ return err;
+ } else {
+ prestera_port_bridge_leave(port, upper);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int prestera_port_attr_br_flags_set(struct prestera_port *port,
+ struct switchdev_trans *trans,
+ struct net_device *dev,
+ unsigned long flags)
+{
+ struct prestera_bridge_port *br_port;
+ int err;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
+ if (!br_port)
+ return 0;
+
+ err = prestera_hw_port_flood_set(port, flags & BR_FLOOD);
+ if (err)
+ return err;
+
+ err = prestera_hw_port_learning_set(port, flags & BR_LEARNING);
+ if (err)
+ return err;
+
+ memcpy(&br_port->flags, &flags, sizeof(flags));
+
+ return 0;
+}
+
+static int prestera_port_attr_br_ageing_set(struct prestera_port *port,
+ struct switchdev_trans *trans,
+ unsigned long ageing_clock_t)
+{
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+ u32 ageing_time_ms = jiffies_to_msecs(ageing_jiffies);
+ struct prestera_switch *sw = port->sw;
+
+ if (switchdev_trans_ph_prepare(trans)) {
+ if (ageing_time_ms < PRESTERA_MIN_AGEING_TIME_MS ||
+ ageing_time_ms > PRESTERA_MAX_AGEING_TIME_MS)
+ return -ERANGE;
+ else
+ return 0;
+ }
+
+ return prestera_hw_switch_ageing_set(sw, ageing_time_ms);
+}
+
+static int prestera_port_attr_br_vlan_set(struct prestera_port *port,
+ struct switchdev_trans *trans,
+ struct net_device *dev,
+ bool vlan_enabled)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_bridge *bridge;
+
+ if (!switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ bridge = prestera_bridge_by_dev(sw->swdev, dev);
+ if (WARN_ON(!bridge))
+ return -EINVAL;
+
+ if (bridge->vlan_enabled == vlan_enabled)
+ return 0;
+
+ netdev_err(bridge->dev, "VLAN filtering can't be changed for existing bridge\n");
+
+ return -EINVAL;
+}
+
+static int prestera_port_bridge_vlan_stp_set(struct prestera_port *port,
+ struct prestera_bridge_vlan *br_vlan,
+ u8 state)
+{
+ struct prestera_port_vlan *port_vlan;
+
+ list_for_each_entry(port_vlan, &br_vlan->port_vlan_list, br_vlan_head) {
+ if (port_vlan->port != port)
+ continue;
+
+ return prestera_port_vid_stp_set(port, br_vlan->vid, state);
+ }
+
+ return 0;
+}
+
+static int presterar_port_attr_stp_state_set(struct prestera_port *port,
+ struct switchdev_trans *trans,
+ struct net_device *dev,
+ u8 state)
+{
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge_vlan *br_vlan;
+ int err;
+ u16 vid;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
+ if (!br_port)
+ return 0;
+
+ if (!br_port->bridge->vlan_enabled) {
+ vid = br_port->bridge->bridge_id;
+ err = prestera_port_vid_stp_set(port, vid, state);
+ if (err)
+ goto err_port_stp_set;
+ } else {
+ list_for_each_entry(br_vlan, &br_port->vlan_list, head) {
+ err = prestera_port_bridge_vlan_stp_set(port, br_vlan,
+ state);
+ if (err)
+ goto err_port_vlan_stp_set;
+ }
+ }
+
+ br_port->stp_state = state;
+
+ return 0;
+
+err_port_vlan_stp_set:
+ list_for_each_entry_continue_reverse(br_vlan, &br_port->vlan_list, head)
+ prestera_port_bridge_vlan_stp_set(port, br_vlan, br_port->stp_state);
+ return err;
+
+err_port_stp_set:
+ prestera_port_vid_stp_set(port, vid, br_port->stp_state);
+
+ return err;
+}
+
+static int prestera_port_obj_attr_set(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ int err = 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ err = presterar_port_attr_stp_state_set(port, trans,
+ attr->orig_dev,
+ attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ if (attr->u.brport_flags &
+ ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
+ err = -EINVAL;
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ err = prestera_port_attr_br_flags_set(port, trans,
+ attr->orig_dev,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ err = prestera_port_attr_br_ageing_set(port, trans,
+ attr->u.ageing_time);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ err = prestera_port_attr_br_vlan_set(port, trans,
+ attr->orig_dev,
+ attr->u.vlan_filtering);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static void
+prestera_fdb_offload_notify(struct prestera_port *port,
+ struct switchdev_notifier_fdb_info *info)
+{
+ struct switchdev_notifier_fdb_info send_info;
+
+ send_info.addr = info->addr;
+ send_info.vid = info->vid;
+ send_info.offloaded = true;
+
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, port->dev,
+ &send_info.info, NULL);
+}
+
+static int prestera_port_fdb_set(struct prestera_port *port,
+ struct switchdev_notifier_fdb_info *fdb_info,
+ bool adding)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *bridge;
+ int err;
+ u16 vid;
+
+ br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev);
+ if (!br_port)
+ return -EINVAL;
+
+ bridge = br_port->bridge;
+
+ if (bridge->vlan_enabled)
+ vid = fdb_info->vid;
+ else
+ vid = bridge->bridge_id;
+
+ if (adding)
+ err = prestera_hw_fdb_add(port, fdb_info->addr, vid, false);
+ else
+ err = prestera_hw_fdb_del(port, fdb_info->addr, vid);
+
+ return err;
+}
+
+static void prestera_fdb_event_work(struct work_struct *work)
+{
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct prestera_fdb_event_work *swdev_work;
+ struct prestera_port *port;
+ struct net_device *dev;
+ int err;
+
+ swdev_work = container_of(work, struct prestera_fdb_event_work, work);
+ dev = swdev_work->dev;
+
+ rtnl_lock();
+
+ port = prestera_port_dev_lower_find(dev);
+ if (!port)
+ goto out_unlock;
+
+ switch (swdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb_info = &swdev_work->fdb_info;
+ if (!fdb_info->added_by_user)
+ break;
+
+ err = prestera_port_fdb_set(port, fdb_info, true);
+ if (err)
+ break;
+
+ prestera_fdb_offload_notify(port, fdb_info);
+ break;
+
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb_info = &swdev_work->fdb_info;
+ prestera_port_fdb_set(port, fdb_info, false);
+ break;
+ }
+
+out_unlock:
+ rtnl_unlock();
+
+ kfree(swdev_work->fdb_info.addr);
+ kfree(swdev_work);
+ dev_put(dev);
+}
+
+static int prestera_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct switchdev_notifier_info *info = ptr;
+ struct prestera_fdb_event_work *swdev_work;
+ struct net_device *upper;
+ int err;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ prestera_netdev_check,
+ prestera_port_obj_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ if (!prestera_netdev_check(dev))
+ return NOTIFY_DONE;
+
+ upper = netdev_master_upper_dev_get_rcu(dev);
+ if (!upper)
+ return NOTIFY_DONE;
+
+ if (!netif_is_bridge_master(upper))
+ return NOTIFY_DONE;
+
+ swdev_work = kzalloc(sizeof(*swdev_work), GFP_ATOMIC);
+ if (!swdev_work)
+ return NOTIFY_BAD;
+
+ swdev_work->event = event;
+ swdev_work->dev = dev;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb_info = container_of(info,
+ struct switchdev_notifier_fdb_info,
+ info);
+
+ INIT_WORK(&swdev_work->work, prestera_fdb_event_work);
+ memcpy(&swdev_work->fdb_info, ptr,
+ sizeof(swdev_work->fdb_info));
+
+ swdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!swdev_work->fdb_info.addr)
+ goto out_bad;
+
+ ether_addr_copy((u8 *)swdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(dev);
+ break;
+
+ default:
+ kfree(swdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(swdev_wq, &swdev_work->work);
+ return NOTIFY_DONE;
+
+out_bad:
+ kfree(swdev_work);
+ return NOTIFY_BAD;
+}
+
+static int
+prestera_port_vlan_bridge_join(struct prestera_port_vlan *port_vlan,
+ struct prestera_bridge_port *br_port)
+{
+ struct prestera_port *port = port_vlan->port;
+ struct prestera_bridge_vlan *br_vlan;
+ u16 vid = port_vlan->vid;
+ int err;
+
+ if (port_vlan->br_port)
+ return 0;
+
+ err = prestera_hw_port_flood_set(port, br_port->flags & BR_FLOOD);
+ if (err)
+ return err;
+
+ err = prestera_hw_port_learning_set(port, br_port->flags & BR_LEARNING);
+ if (err)
+ goto err_port_learning_set;
+
+ err = prestera_port_vid_stp_set(port, vid, br_port->stp_state);
+ if (err)
+ goto err_port_vid_stp_set;
+
+ br_vlan = prestera_bridge_vlan_by_vid(br_port, vid);
+ if (!br_vlan) {
+ br_vlan = prestera_bridge_vlan_create(br_port, vid);
+ if (!br_vlan) {
+ err = -ENOMEM;
+ goto err_bridge_vlan_get;
+ }
+ }
+
+ list_add(&port_vlan->br_vlan_head, &br_vlan->port_vlan_list);
+
+ prestera_bridge_port_get(br_port);
+ port_vlan->br_port = br_port;
+
+ return 0;
+
+err_bridge_vlan_get:
+ prestera_port_vid_stp_set(port, vid, BR_STATE_FORWARDING);
+err_port_vid_stp_set:
+ prestera_hw_port_learning_set(port, false);
+err_port_learning_set:
+ return err;
+}
+
+static int
+prestera_bridge_port_vlan_add(struct prestera_port *port,
+ struct prestera_bridge_port *br_port,
+ u16 vid, bool is_untagged, bool is_pvid,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_port_vlan *port_vlan;
+ u16 old_pvid = port->pvid;
+ u16 pvid;
+ int err;
+
+ if (is_pvid)
+ pvid = vid;
+ else
+ pvid = port->pvid == vid ? 0 : port->pvid;
+
+ port_vlan = prestera_port_vlan_by_vid(port, vid);
+ if (port_vlan && port_vlan->br_port != br_port)
+ return -EEXIST;
+
+ if (!port_vlan) {
+ port_vlan = prestera_port_vlan_create(port, vid, is_untagged);
+ if (IS_ERR(port_vlan))
+ return PTR_ERR(port_vlan);
+ } else {
+ err = prestera_hw_vlan_port_set(port, vid, true, is_untagged);
+ if (err)
+ goto err_port_vlan_set;
+ }
+
+ err = prestera_port_pvid_set(port, pvid);
+ if (err)
+ goto err_port_pvid_set;
+
+ err = prestera_port_vlan_bridge_join(port_vlan, br_port);
+ if (err)
+ goto err_port_vlan_bridge_join;
+
+ return 0;
+
+err_port_vlan_bridge_join:
+ prestera_port_pvid_set(port, old_pvid);
+err_port_pvid_set:
+ prestera_hw_vlan_port_set(port, vid, false, false);
+err_port_vlan_set:
+ prestera_port_vlan_destroy(port_vlan);
+
+ return err;
+}
+
+static void
+prestera_bridge_port_vlan_del(struct prestera_port *port,
+ struct prestera_bridge_port *br_port, u16 vid)
+{
+ u16 pvid = port->pvid == vid ? 0 : port->pvid;
+ struct prestera_port_vlan *port_vlan;
+
+ port_vlan = prestera_port_vlan_by_vid(port, vid);
+ if (WARN_ON(!port_vlan))
+ return;
+
+ prestera_port_vlan_bridge_leave(port_vlan);
+ prestera_port_pvid_set(port, pvid);
+ prestera_port_vlan_destroy(port_vlan);
+}
+
+static int prestera_port_vlans_add(struct prestera_port *port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
+{
+ bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct net_device *dev = vlan->obj.orig_dev;
+ struct prestera_bridge_port *br_port;
+ struct prestera_switch *sw = port->sw;
+ struct prestera_bridge *bridge;
+ u16 vid;
+
+ if (netif_is_bridge_master(dev))
+ return 0;
+
+ if (switchdev_trans_ph_commit(trans))
+ return 0;
+
+ br_port = prestera_bridge_port_by_dev(sw->swdev, dev);
+ if (WARN_ON(!br_port))
+ return -EINVAL;
+
+ bridge = br_port->bridge;
+ if (!bridge->vlan_enabled)
+ return 0;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ int err;
+
+ err = prestera_bridge_port_vlan_add(port, br_port,
+ vid, flag_untagged,
+ flag_pvid, extack);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int prestera_port_obj_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ const struct switchdev_obj_port_vlan *vlan;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ return prestera_port_vlans_add(port, vlan, trans, extack);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int prestera_port_vlans_del(struct prestera_port *port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct net_device *dev = vlan->obj.orig_dev;
+ struct prestera_bridge_port *br_port;
+ struct prestera_switch *sw = port->sw;
+ u16 vid;
+
+ if (netif_is_bridge_master(dev))
+ return -EOPNOTSUPP;
+
+ br_port = prestera_bridge_port_by_dev(sw->swdev, dev);
+ if (WARN_ON(!br_port))
+ return -EINVAL;
+
+ if (!br_port->bridge->vlan_enabled)
+ return 0;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
+ prestera_bridge_port_vlan_del(port, br_port, vid);
+
+ return 0;
+}
+
+static int prestera_port_obj_del(struct net_device *dev,
+ const struct switchdev_obj *obj)
+{
+ struct prestera_port *port = netdev_priv(dev);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ return prestera_port_vlans_del(port, SWITCHDEV_OBJ_PORT_VLAN(obj));
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int prestera_switchdev_blk_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ prestera_netdev_check,
+ prestera_port_obj_add);
+ break;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ prestera_netdev_check,
+ prestera_port_obj_del);
+ break;
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ prestera_netdev_check,
+ prestera_port_obj_attr_set);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static void prestera_fdb_event(struct prestera_switch *sw,
+ struct prestera_event *evt, void *arg)
+{
+ struct switchdev_notifier_fdb_info info;
+ struct prestera_port *port;
+
+ port = prestera_find_port(sw, evt->fdb_evt.port_id);
+ if (!port)
+ return;
+
+ info.addr = evt->fdb_evt.data.mac;
+ info.vid = evt->fdb_evt.vid;
+ info.offloaded = true;
+
+ rtnl_lock();
+
+ switch (evt->id) {
+ case PRESTERA_FDB_EVENT_LEARNED:
+ call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+ port->dev, &info.info, NULL);
+ break;
+ case PRESTERA_FDB_EVENT_AGED:
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+ port->dev, &info.info, NULL);
+ break;
+ }
+
+ rtnl_unlock();
+}
+
+static int prestera_fdb_init(struct prestera_switch *sw)
+{
+ int err;
+
+ err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_FDB,
+ prestera_fdb_event, NULL);
+ if (err)
+ return err;
+
+ err = prestera_hw_switch_ageing_set(sw, PRESTERA_DEFAULT_AGEING_TIME_MS);
+ if (err)
+ goto err_ageing_set;
+
+ return 0;
+
+err_ageing_set:
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_FDB,
+ prestera_fdb_event);
+ return err;
+}
+
+static void prestera_fdb_fini(struct prestera_switch *sw)
+{
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_FDB,
+ prestera_fdb_event);
+}
+
+static int prestera_switchdev_handler_init(struct prestera_switchdev *swdev)
+{
+ int err;
+
+ swdev->swdev_nb.notifier_call = prestera_switchdev_event;
+ err = register_switchdev_notifier(&swdev->swdev_nb);
+ if (err)
+ goto err_register_swdev_notifier;
+
+ swdev->swdev_nb_blk.notifier_call = prestera_switchdev_blk_event;
+ err = register_switchdev_blocking_notifier(&swdev->swdev_nb_blk);
+ if (err)
+ goto err_register_blk_swdev_notifier;
+
+ return 0;
+
+err_register_blk_swdev_notifier:
+ unregister_switchdev_notifier(&swdev->swdev_nb);
+err_register_swdev_notifier:
+ destroy_workqueue(swdev_wq);
+ return err;
+}
+
+static void prestera_switchdev_handler_fini(struct prestera_switchdev *swdev)
+{
+ unregister_switchdev_blocking_notifier(&swdev->swdev_nb_blk);
+ unregister_switchdev_notifier(&swdev->swdev_nb);
+}
+
+int prestera_switchdev_init(struct prestera_switch *sw)
+{
+ struct prestera_switchdev *swdev;
+ int err;
+
+ swdev = kzalloc(sizeof(*swdev), GFP_KERNEL);
+ if (!swdev)
+ return -ENOMEM;
+
+ sw->swdev = swdev;
+ swdev->sw = sw;
+
+ INIT_LIST_HEAD(&swdev->bridge_list);
+
+ swdev_wq = alloc_ordered_workqueue("%s_ordered", 0, "prestera_br");
+ if (!swdev_wq) {
+ err = -ENOMEM;
+ goto err_alloc_wq;
+ }
+
+ err = prestera_switchdev_handler_init(swdev);
+ if (err)
+ goto err_swdev_init;
+
+ err = prestera_fdb_init(sw);
+ if (err)
+ goto err_fdb_init;
+
+ return 0;
+
+err_fdb_init:
+err_swdev_init:
+ destroy_workqueue(swdev_wq);
+err_alloc_wq:
+ kfree(swdev);
+
+ return err;
+}
+
+void prestera_switchdev_fini(struct prestera_switch *sw)
+{
+ struct prestera_switchdev *swdev = sw->swdev;
+
+ prestera_fdb_fini(sw);
+ prestera_switchdev_handler_fini(swdev);
+ destroy_workqueue(swdev_wq);
+ kfree(swdev);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h
new file mode 100644
index 000000000000..606e21d2355b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_SWITCHDEV_H_
+#define _PRESTERA_SWITCHDEV_H_
+
+int prestera_switchdev_init(struct prestera_switch *sw);
+void prestera_switchdev_fini(struct prestera_switch *sw);
+
+int prestera_bridge_port_event(struct net_device *dev, unsigned long event,
+ void *ptr);
+
+#endif /* _PRESTERA_SWITCHDEV_H_ */
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index faac94b2d5ff..d1e4d42e497d 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1187,11 +1187,10 @@ static int pxa168_eth_stop(struct net_device *dev)
static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
{
- int retval;
struct pxa168_eth_private *pep = netdev_priv(dev);
dev->mtu = mtu;
- retval = set_port_config_ext(pep);
+ set_port_config_ext(pep);
if (!netif_running(dev))
return 0;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 6a930351cb23..8a9c0f490bfb 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3338,9 +3338,9 @@ static void skge_error_irq(struct skge_hw *hw)
* because accessing phy registers requires spin wait which might
* cause excess interrupt latency.
*/
-static void skge_extirq(unsigned long arg)
+static void skge_extirq(struct tasklet_struct *t)
{
- struct skge_hw *hw = (struct skge_hw *) arg;
+ struct skge_hw *hw = from_tasklet(hw, t, phy_task);
int port;
for (port = 0; port < hw->ports; port++) {
@@ -3927,7 +3927,7 @@ static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->pdev = pdev;
spin_lock_init(&hw->hw_lock);
spin_lock_init(&hw->phy_lock);
- tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw);
+ tasklet_setup(&hw->phy_task, skge_extirq);
hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000);
if (!hw->regs) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 65f8a4b6ed0c..3b8576b9c2f9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -55,11 +55,11 @@
#define TASKLET_MAX_TIME 2
#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
-void mlx4_cq_tasklet_cb(unsigned long data)
+void mlx4_cq_tasklet_cb(struct tasklet_struct *t)
{
unsigned long flags;
unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
- struct mlx4_eq_tasklet *ctx = (struct mlx4_eq_tasklet *)data;
+ struct mlx4_eq_tasklet *ctx = from_tasklet(ctx, t, task);
struct mlx4_cq *mcq, *temp;
spin_lock_irqsave(&ctx->lock, flags);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index b816154bc79a..23849f2b9c25 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1106,6 +1106,24 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
return err;
}
+static void mlx4_en_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *stats)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct bitmap_iterator it;
+
+ bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
+
+ spin_lock_bh(&priv->stats_lock);
+ if (test_bit(FLOW_PRIORITY_STATS_IDX_TX_FRAMES,
+ priv->stats_bitmap.bitmap))
+ stats->tx_pause_frames = priv->tx_flowstats.tx_pause;
+ if (test_bit(FLOW_PRIORITY_STATS_IDX_RX_FRAMES,
+ priv->stats_bitmap.bitmap))
+ stats->rx_pause_frames = priv->rx_flowstats.rx_pause;
+ spin_unlock_bh(&priv->stats_lock);
+}
+
static void mlx4_en_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
@@ -2138,6 +2156,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.set_msglevel = mlx4_en_set_msglevel,
.get_coalesce = mlx4_en_get_coalesce,
.set_coalesce = mlx4_en_set_coalesce,
+ .get_pause_stats = mlx4_en_get_pause_stats,
.get_pauseparam = mlx4_en_get_pauseparam,
.set_pauseparam = mlx4_en_set_pauseparam,
.get_ringparam = mlx4_en_get_ringparam,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 9dff7b086c9f..d55434449c0c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -1075,7 +1075,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
*/
smp_rmb();
- ring_cons = READ_ONCE(ring->cons);
if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
netif_tx_wake_queue(ring->tx_queue);
ring->wake_queue++;
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index ae305c2e9225..9e48509ed3b2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1057,8 +1057,7 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
INIT_LIST_HEAD(&eq->tasklet_ctx.list);
INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
spin_lock_init(&eq->tasklet_ctx.lock);
- tasklet_init(&eq->tasklet_ctx.task, mlx4_cq_tasklet_cb,
- (unsigned long)&eq->tasklet_ctx);
+ tasklet_setup(&eq->tasklet_ctx.task, mlx4_cq_tasklet_cb);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 527b52e48276..64bed7ac3836 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1217,7 +1217,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev);
int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
u16 op, unsigned long timeout);
-void mlx4_cq_tasklet_cb(unsigned long data);
+void mlx4_cq_tasklet_cb(struct tasklet_struct *t);
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index 86b6051da8ec..51d4eaab6a2f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -84,6 +84,11 @@ struct mlx4_en_flow_stats_rx {
MLX4_NUM_PRIORITIES)
};
+#define FLOW_PRIORITY_STATS_IDX_RX_FRAMES (NUM_MAIN_STATS + \
+ NUM_PORT_STATS + \
+ NUM_PF_STATS + \
+ NUM_FLOW_PRIORITY_STATS_RX)
+
struct mlx4_en_flow_stats_tx {
u64 tx_pause;
u64 tx_pause_duration;
@@ -93,6 +98,13 @@ struct mlx4_en_flow_stats_tx {
MLX4_NUM_PRIORITIES)
};
+#define FLOW_PRIORITY_STATS_IDX_TX_FRAMES (NUM_MAIN_STATS + \
+ NUM_PORT_STATS + \
+ NUM_PF_STATS + \
+ NUM_FLOW_PRIORITY_STATS_RX + \
+ NUM_FLOW_STATS_RX + \
+ NUM_FLOW_PRIORITY_STATS_TX)
+
#define NUM_FLOW_STATS (NUM_FLOW_STATS_RX + NUM_FLOW_STATS_TX + \
NUM_FLOW_PRIORITY_STATS_TX + \
NUM_FLOW_PRIORITY_STATS_RX)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 0b3eaa102751..9826a041e407 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -37,7 +37,7 @@ mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \
en_rep.o en/rep/bond.o en/mod_hdr.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
- en/mapping.o esw/chains.o en/tc_tun.o \
+ en/mapping.o lib/fs_chains.o en/tc_tun.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o
mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 8379b24cb838..df3e4938ecdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -42,11 +42,11 @@
#define TASKLET_MAX_TIME 2
#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
-void mlx5_cq_tasklet_cb(unsigned long data)
+void mlx5_cq_tasklet_cb(struct tasklet_struct *t)
{
unsigned long flags;
unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
- struct mlx5_eq_tasklet *ctx = (struct mlx5_eq_tasklet *)data;
+ struct mlx5_eq_tasklet *ctx = from_tasklet(ctx, t, task);
struct mlx5_core_cq *mcq;
struct mlx5_core_cq *temp;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
index a894ea98c95a..3dc9dd3f24dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
@@ -43,19 +43,13 @@ static void mlx5_peer_pf_cleanup(struct mlx5_core_dev *dev)
int mlx5_ec_init(struct mlx5_core_dev *dev)
{
- int err = 0;
-
if (!mlx5_core_is_ecpf(dev))
return 0;
/* ECPF shall enable HCA for peer PF in the same way a PF
* does this for its VFs.
*/
- err = mlx5_peer_pf_init(dev);
- if (err)
- return err;
-
- return 0;
+ return mlx5_peer_pf_init(dev);
}
void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 4f33658da25a..5368e06cd71c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -221,6 +221,7 @@ enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_STRIDING_RQ,
MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
MLX5E_PFLAG_XDP_TX_MPWQE,
+ MLX5E_PFLAG_SKB_TX_MPWQE,
MLX5E_NUM_PFLAGS, /* Keep last */
};
@@ -265,6 +266,7 @@ enum {
MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
+ MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX /* set when mini_cqe_resp_stride_index cap is used */
};
struct mlx5e_cq {
@@ -304,6 +306,7 @@ struct mlx5e_sq_dma {
enum {
MLX5E_SQ_STATE_ENABLED,
+ MLX5E_SQ_STATE_MPWQE,
MLX5E_SQ_STATE_RECOVERING,
MLX5E_SQ_STATE_IPSEC,
MLX5E_SQ_STATE_AM,
@@ -312,26 +315,40 @@ enum {
MLX5E_SQ_STATE_PENDING_XSK_TX,
};
+struct mlx5e_tx_mpwqe {
+ /* Current MPWQE session */
+ struct mlx5e_tx_wqe *wqe;
+ u32 bytes_count;
+ u8 ds_count;
+ u8 pkt_count;
+ u8 inline_on;
+};
+
struct mlx5e_txqsq {
/* data path */
/* dirtied @completion */
u16 cc;
+ u16 skb_fifo_cc;
u32 dma_fifo_cc;
struct dim dim; /* Adaptive Moderation */
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
+ u16 skb_fifo_pc;
u32 dma_fifo_pc;
+ struct mlx5e_tx_mpwqe mpwqe;
struct mlx5e_cq cq;
/* read only */
struct mlx5_wq_cyc wq;
u32 dma_fifo_mask;
+ u16 skb_fifo_mask;
struct mlx5e_sq_stats *stats;
struct {
struct mlx5e_sq_dma *dma_fifo;
+ struct sk_buff **skb_fifo;
struct mlx5e_tx_wqe_info *wqe_info;
} db;
void __iomem *uar_map;
@@ -398,7 +415,7 @@ struct mlx5e_xdp_info {
};
};
-struct mlx5e_xdp_xmit_data {
+struct mlx5e_xmit_data {
dma_addr_t dma_addr;
void *data;
u32 len;
@@ -411,18 +428,10 @@ struct mlx5e_xdp_info_fifo {
u32 mask;
};
-struct mlx5e_xdp_mpwqe {
- /* Current MPWQE session */
- struct mlx5e_tx_wqe *wqe;
- u8 ds_count;
- u8 pkt_count;
- u8 inline_on;
-};
-
struct mlx5e_xdpsq;
typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
- struct mlx5e_xdp_xmit_data *,
+ struct mlx5e_xmit_data *,
struct mlx5e_xdp_info *,
int);
@@ -437,7 +446,7 @@ struct mlx5e_xdpsq {
u32 xdpi_fifo_pc ____cacheline_aligned_in_smp;
u16 pc;
struct mlx5_wqe_ctrl_seg *doorbell_cseg;
- struct mlx5e_xdp_mpwqe mpwqe;
+ struct mlx5e_tx_mpwqe mpwqe;
struct mlx5e_cq cq;
@@ -600,7 +609,7 @@ struct mlx5e_rq {
struct dim dim; /* Dynamic Interrupt Moderation */
/* XDP */
- struct bpf_prog *xdp_prog;
+ struct bpf_prog __rcu *xdp_prog;
struct mlx5e_xdpsq *xdpsq;
DECLARE_BITMAP(flags, 8);
struct page_pool *page_pool;
@@ -1006,7 +1015,6 @@ int mlx5e_update_nic_rx(struct mlx5e_priv *priv);
void mlx5e_update_carrier(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev);
-void mlx5e_update_ndo_stats(struct mlx5e_priv *priv);
void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
int mlx5e_bits_invert(unsigned long a, int size);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 6fdcd5e69476..6a97452dc60e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -12,9 +12,12 @@ enum {
};
struct mlx5e_tc_table {
- /* protects flow table */
+ /* Protects the dynamic assignment of the t parameter
+ * which is the nic tc root table.
+ */
struct mutex t_lock;
struct mlx5_flow_table *t;
+ struct mlx5_fs_chains *chains;
struct rhashtable ht;
@@ -24,6 +27,8 @@ struct mlx5e_tc_table {
struct notifier_block netdevice_nb;
struct netdev_net_notifier netdevice_nn;
+
+ struct mlx5_tc_ct_priv *ct;
};
struct mlx5e_flow_table {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 3dc200bcfabd..69a05da0e3e3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -242,8 +242,8 @@ static int mlx5e_health_rsc_fmsg_binary(struct devlink_fmsg *fmsg,
{
u32 data_size;
+ int err = 0;
u32 offset;
- int err;
for (offset = 0; offset < value_len; offset += data_size) {
data_size = value_len - offset;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
index 8fe8b4d6ad1c..254c84739046 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
@@ -51,7 +51,7 @@ static void mlx5e_monitor_counters_work(struct work_struct *work)
monitor_counters_work);
mutex_lock(&priv->state_lock);
- mlx5e_update_ndo_stats(priv);
+ mlx5e_stats_update_ndo_stats(priv);
mutex_unlock(&priv->state_lock);
mlx5e_monitor_counter_arm(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 5de1cb9f5330..96608dbb9314 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -490,11 +490,8 @@ bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy)
int err;
int i;
- if (!MLX5_CAP_GEN(dev, pcam_reg))
- return -EOPNOTSUPP;
-
- if (!MLX5_CAP_PCAM_REG(dev, pplm))
- return -EOPNOTSUPP;
+ if (!MLX5_CAP_GEN(dev, pcam_reg) || !MLX5_CAP_PCAM_REG(dev, pplm))
+ return false;
MLX5_SET(pplm_reg, in, local_port, 1);
err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 79cc42d88eec..e36e505d38ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -12,7 +12,7 @@
#include "neigh.h"
#include "en_rep.h"
#include "eswitch.h"
-#include "esw/chains.h"
+#include "lib/fs_chains.h"
#include "en/tc_ct.h"
#include "en/mapping.h"
#include "en/tc_tun.h"
@@ -191,7 +191,7 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
case TC_SETUP_CLSFLOWER:
memcpy(&tmp, f, sizeof(*f));
- if (!mlx5_esw_chains_prios_supported(esw))
+ if (!mlx5_chains_prios_supported(esw_chains(esw)))
return -EOPNOTSUPP;
/* Re-use tc offload path by moving the ft flow to the
@@ -203,12 +203,12 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
*
* We only support chain 0 of FT offload.
*/
- if (tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw))
+ if (tmp.common.prio >= mlx5_chains_get_prio_range(esw_chains(esw)))
return -EOPNOTSUPP;
if (tmp.common.chain_index != 0)
return -EOPNOTSUPP;
- tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
+ tmp.common.chain_index = mlx5_chains_get_nf_ft_chain(esw_chains(esw));
tmp.common.prio++;
err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags);
memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
@@ -378,12 +378,12 @@ static int mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type,
*
* We only support chain 0 of FT offload.
*/
- if (!mlx5_esw_chains_prios_supported(esw) ||
- tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw) ||
+ if (!mlx5_chains_prios_supported(esw_chains(esw)) ||
+ tmp.common.prio >= mlx5_chains_get_prio_range(esw_chains(esw)) ||
tmp.common.chain_index)
return -EOPNOTSUPP;
- tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
+ tmp.common.chain_index = mlx5_chains_get_nf_ft_chain(esw_chains(esw));
tmp.common.prio++;
err = mlx5e_rep_indr_offload(priv->netdev, &tmp, priv, flags);
memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
@@ -612,7 +612,6 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
struct tc_skb_ext *tc_skb_ext;
struct mlx5_eswitch *esw;
struct mlx5e_priv *priv;
- int tunnel_moffset;
int err;
reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
@@ -626,7 +625,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
priv = netdev_priv(skb->dev);
esw = priv->mdev->priv.eswitch;
- err = mlx5_eswitch_get_chain_for_tag(esw, reg_c0, &chain);
+ err = mlx5_get_chain_for_tag(esw_chains(esw), reg_c0, &chain);
if (err) {
netdev_dbg(priv->netdev,
"Couldn't find chain for chain tag: %d, err: %d\n",
@@ -647,13 +646,12 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &uplink_rpriv->uplink_priv;
- if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb,
+ if (!mlx5e_tc_ct_restore_flow(uplink_priv->ct_priv, skb,
zone_restore_id))
return false;
}
- tunnel_moffset = mlx5e_tc_attr_to_reg_mappings[TUNNEL_TO_REG].moffset;
- tunnel_id = reg_c1 >> (8 * tunnel_moffset);
+ tunnel_id = reg_c1 >> REG_MAPPING_SHIFT(TUNNEL_TO_REG);
return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
#endif /* CONFIG_NET_TC_SKB_EXT */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index c6bc9224c3b1..b5f8ed30047b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -14,7 +14,7 @@
#include <linux/workqueue.h>
#include <linux/xarray.h>
-#include "esw/chains.h"
+#include "lib/fs_chains.h"
#include "en/tc_ct.h"
#include "en/mod_hdr.h"
#include "en/mapping.h"
@@ -39,8 +39,9 @@
netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args)
struct mlx5_tc_ct_priv {
- struct mlx5_eswitch *esw;
+ struct mlx5_core_dev *dev;
const struct net_device *netdev;
+ struct mod_hdr_tbl *mod_hdr_tbl;
struct idr fte_ids;
struct xarray tuple_ids;
struct rhashtable zone_ht;
@@ -50,13 +51,16 @@ struct mlx5_tc_ct_priv {
struct mlx5_flow_table *ct_nat;
struct mlx5_flow_table *post_ct;
struct mutex control_lock; /* guards parallel adds/dels */
+ struct mutex shared_counter_lock;
struct mapping_ctx *zone_mapping;
struct mapping_ctx *labels_mapping;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5_fs_chains *chains;
};
struct mlx5_ct_flow {
- struct mlx5_esw_flow_attr pre_ct_attr;
- struct mlx5_esw_flow_attr post_ct_attr;
+ struct mlx5_flow_attr *pre_ct_attr;
+ struct mlx5_flow_attr *post_ct_attr;
struct mlx5_flow_handle *pre_ct_rule;
struct mlx5_flow_handle *post_ct_rule;
struct mlx5_ct_ft *ft;
@@ -67,12 +71,12 @@ struct mlx5_ct_flow {
struct mlx5_ct_zone_rule {
struct mlx5_flow_handle *rule;
struct mlx5e_mod_hdr_handle *mh;
- struct mlx5_esw_flow_attr attr;
+ struct mlx5_flow_attr *attr;
bool nat;
};
struct mlx5_tc_ct_pre {
- struct mlx5_flow_table *fdb;
+ struct mlx5_flow_table *ft;
struct mlx5_flow_group *flow_grp;
struct mlx5_flow_group *miss_grp;
struct mlx5_flow_handle *flow_rule;
@@ -114,11 +118,16 @@ struct mlx5_ct_tuple {
u16 zone;
};
+struct mlx5_ct_shared_counter {
+ struct mlx5_fc *counter;
+ refcount_t refcount;
+};
+
struct mlx5_ct_entry {
struct rhash_head node;
struct rhash_head tuple_node;
struct rhash_head tuple_nat_node;
- struct mlx5_fc *counter;
+ struct mlx5_ct_shared_counter *shared_counter;
unsigned long cookie;
unsigned long restore_cookie;
struct mlx5_ct_tuple tuple;
@@ -157,18 +166,6 @@ static const struct rhashtable_params tuples_nat_ht_params = {
.min_size = 16 * 1024,
};
-static struct mlx5_tc_ct_priv *
-mlx5_tc_ct_get_ct_priv(struct mlx5e_priv *priv)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_rep_uplink_priv *uplink_priv;
- struct mlx5e_rep_priv *uplink_rpriv;
-
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- uplink_priv = &uplink_rpriv->uplink_priv;
- return uplink_priv->ct_priv;
-}
-
static int
mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
{
@@ -395,20 +392,30 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
}
static void
+mlx5_tc_ct_shared_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
+{
+ if (!refcount_dec_and_test(&entry->shared_counter->refcount))
+ return;
+
+ mlx5_fc_destroy(ct_priv->dev, entry->shared_counter->counter);
+ kfree(entry->shared_counter);
+}
+
+static void
mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry,
bool nat)
{
struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
- struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
- struct mlx5_eswitch *esw = ct_priv->esw;
+ struct mlx5_flow_attr *attr = zone_rule->attr;
ct_dbg("Deleting ct entry rule in zone %d", entry->tuple.zone);
- mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr);
- mlx5e_mod_hdr_detach(ct_priv->esw->dev,
- &esw->offloads.mod_hdr, zone_rule->mh);
+ mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr);
+ mlx5e_mod_hdr_detach(ct_priv->dev,
+ ct_priv->mod_hdr_tbl, zone_rule->mh);
mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
+ kfree(attr);
}
static void
@@ -417,8 +424,6 @@ mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv *ct_priv,
{
mlx5_tc_ct_entry_del_rule(ct_priv, entry, true);
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
-
- mlx5_fc_destroy(ct_priv->esw->dev, entry->counter);
}
static struct flow_action_entry *
@@ -444,29 +449,40 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
u32 labels_id,
u8 zone_restore_id)
{
- struct mlx5_eswitch *esw = ct_priv->esw;
+ enum mlx5_flow_namespace_type ns = ct_priv->ns_type;
+ struct mlx5_core_dev *dev = ct_priv->dev;
int err;
- err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
CTSTATE_TO_REG, ct_state);
if (err)
return err;
- err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
MARK_TO_REG, mark);
if (err)
return err;
- err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
LABELS_TO_REG, labels_id);
if (err)
return err;
- err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
ZONE_RESTORE_TO_REG, zone_restore_id);
if (err)
return err;
+ /* Make another copy of zone id in reg_b for
+ * NIC rx flows since we don't copy reg_c1 to
+ * reg_b upon miss.
+ */
+ if (ns != MLX5_FLOW_NAMESPACE_FDB) {
+ err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
+ NIC_ZONE_RESTORE_TO_REG, zone_restore_id);
+ if (err)
+ return err;
+ }
return 0;
}
@@ -547,7 +563,7 @@ mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5e_tc_mod_hdr_acts *mod_acts)
{
struct flow_action *flow_action = &flow_rule->action;
- struct mlx5_core_dev *mdev = ct_priv->esw->dev;
+ struct mlx5_core_dev *mdev = ct_priv->dev;
struct flow_action_entry *act;
size_t action_size;
char *modact;
@@ -558,8 +574,7 @@ mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_MANGLE: {
- err = alloc_mod_hdr_actions(mdev,
- MLX5_FLOW_NAMESPACE_FDB,
+ err = alloc_mod_hdr_actions(mdev, ct_priv->ns_type,
mod_acts);
if (err)
return err;
@@ -588,7 +603,7 @@ mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
static int
mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct flow_rule *flow_rule,
struct mlx5e_mod_hdr_handle **mh,
u8 zone_restore_id, bool nat)
@@ -624,9 +639,9 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
if (err)
goto err_mapping;
- *mh = mlx5e_mod_hdr_attach(ct_priv->esw->dev,
- &ct_priv->esw->offloads.mod_hdr,
- MLX5_FLOW_NAMESPACE_FDB,
+ *mh = mlx5e_mod_hdr_attach(ct_priv->dev,
+ ct_priv->mod_hdr_tbl,
+ ct_priv->ns_type,
&mod_acts);
if (IS_ERR(*mh)) {
err = PTR_ERR(*mh);
@@ -650,9 +665,9 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
bool nat, u8 zone_restore_id)
{
struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
- struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
- struct mlx5_eswitch *esw = ct_priv->esw;
+ struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
struct mlx5_flow_spec *spec = NULL;
+ struct mlx5_flow_attr *attr;
int err;
zone_rule->nat = nat;
@@ -661,6 +676,12 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
if (!spec)
return -ENOMEM;
+ attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
+ if (!attr) {
+ err = -ENOMEM;
+ goto err_attr;
+ }
+
err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule,
&zone_rule->mh,
zone_restore_id, nat);
@@ -674,9 +695,9 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = 0;
attr->dest_ft = ct_priv->post_ct;
- attr->fdb = nat ? ct_priv->ct_nat : ct_priv->ct;
+ attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
attr->outer_match_level = MLX5_MATCH_L4;
- attr->counter = entry->counter;
+ attr->counter = entry->shared_counter->counter;
attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
@@ -684,38 +705,98 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
entry->tuple.zone & MLX5_CT_ZONE_MASK,
MLX5_CT_ZONE_MASK);
- zone_rule->rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+ zone_rule->rule = mlx5_tc_rule_insert(priv, spec, attr);
if (IS_ERR(zone_rule->rule)) {
err = PTR_ERR(zone_rule->rule);
ct_dbg("Failed to add ct entry rule, nat: %d", nat);
goto err_rule;
}
+ zone_rule->attr = attr;
+
kfree(spec);
ct_dbg("Offloaded ct entry rule in zone %d", entry->tuple.zone);
return 0;
err_rule:
- mlx5e_mod_hdr_detach(ct_priv->esw->dev,
- &esw->offloads.mod_hdr, zone_rule->mh);
+ mlx5e_mod_hdr_detach(ct_priv->dev,
+ ct_priv->mod_hdr_tbl, zone_rule->mh);
+ mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
err_mod_hdr:
+ kfree(attr);
+err_attr:
kfree(spec);
return err;
}
+static struct mlx5_ct_shared_counter *
+mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_ct_entry *entry)
+{
+ struct mlx5_ct_tuple rev_tuple = entry->tuple;
+ struct mlx5_ct_shared_counter *shared_counter;
+ struct mlx5_core_dev *dev = ct_priv->dev;
+ struct mlx5_ct_entry *rev_entry;
+ __be16 tmp_port;
+
+ /* get the reversed tuple */
+ tmp_port = rev_tuple.port.src;
+ rev_tuple.port.src = rev_tuple.port.dst;
+ rev_tuple.port.dst = tmp_port;
+
+ if (rev_tuple.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ __be32 tmp_addr = rev_tuple.ip.src_v4;
+
+ rev_tuple.ip.src_v4 = rev_tuple.ip.dst_v4;
+ rev_tuple.ip.dst_v4 = tmp_addr;
+ } else if (rev_tuple.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct in6_addr tmp_addr = rev_tuple.ip.src_v6;
+
+ rev_tuple.ip.src_v6 = rev_tuple.ip.dst_v6;
+ rev_tuple.ip.dst_v6 = tmp_addr;
+ } else {
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ /* Use the same counter as the reverse direction */
+ mutex_lock(&ct_priv->shared_counter_lock);
+ rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
+ tuples_ht_params);
+ if (rev_entry) {
+ if (refcount_inc_not_zero(&rev_entry->shared_counter->refcount)) {
+ mutex_unlock(&ct_priv->shared_counter_lock);
+ return rev_entry->shared_counter;
+ }
+ }
+ mutex_unlock(&ct_priv->shared_counter_lock);
+
+ shared_counter = kzalloc(sizeof(*shared_counter), GFP_KERNEL);
+ if (!shared_counter)
+ return ERR_PTR(-ENOMEM);
+
+ shared_counter->counter = mlx5_fc_create(dev, true);
+ if (IS_ERR(shared_counter->counter)) {
+ ct_dbg("Failed to create counter for ct entry");
+ kfree(shared_counter);
+ return ERR_PTR(PTR_ERR(shared_counter->counter));
+ }
+
+ refcount_set(&shared_counter->refcount, 1);
+ return shared_counter;
+}
+
static int
mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
struct flow_rule *flow_rule,
struct mlx5_ct_entry *entry,
u8 zone_restore_id)
{
- struct mlx5_eswitch *esw = ct_priv->esw;
int err;
- entry->counter = mlx5_fc_create(esw->dev, true);
- if (IS_ERR(entry->counter)) {
- err = PTR_ERR(entry->counter);
+ entry->shared_counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
+ if (IS_ERR(entry->shared_counter)) {
+ err = PTR_ERR(entry->shared_counter);
ct_dbg("Failed to create counter for ct entry");
return err;
}
@@ -735,7 +816,7 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
err_nat:
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
err_orig:
- mlx5_fc_destroy(esw->dev, entry->counter);
+ mlx5_tc_ct_shared_counter_put(ct_priv, entry);
return err;
}
@@ -825,12 +906,16 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry)
{
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
+ mutex_lock(&ct_priv->shared_counter_lock);
if (entry->tuple_node.next)
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node,
tuples_nat_ht_params);
rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
tuples_ht_params);
+ mutex_unlock(&ct_priv->shared_counter_lock);
+ mlx5_tc_ct_shared_counter_put(ct_priv, entry);
+
}
static int
@@ -867,7 +952,7 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
if (!entry)
return -ENOENT;
- mlx5_fc_query_cached(entry->counter, &bytes, &packets, &lastuse);
+ mlx5_fc_query_cached(entry->shared_counter->counter, &bytes, &packets, &lastuse);
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
FLOW_ACTION_HW_STATS_DELAYED);
@@ -940,9 +1025,7 @@ out:
return false;
}
-int
-mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec)
+int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
{
u32 ctstate = 0, ctstate_mask = 0;
@@ -958,14 +1041,21 @@ mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
return 0;
}
+void mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_attr)
+{
+ if (!priv || !ct_attr->ct_labels_id)
+ return;
+
+ mapping_remove(priv->labels_mapping, ct_attr->ct_labels_id);
+}
+
int
-mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- struct flow_cls_offload *f,
- struct mlx5_ct_attr *ct_attr,
- struct netlink_ext_ack *extack)
+mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ struct mlx5_ct_attr *ct_attr,
+ struct netlink_ext_ack *extack)
{
- struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector_key_ct *mask, *key;
bool trk, est, untrk, unest, new;
@@ -978,7 +1068,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT))
return 0;
- if (!ct_priv) {
+ if (!priv) {
NL_SET_ERR_MSG_MOD(extack,
"offload of ct matching isn't available");
return -EOPNOTSUPP;
@@ -1034,7 +1124,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
ct_labels[1] = key->ct_labels[1] & mask->ct_labels[1];
ct_labels[2] = key->ct_labels[2] & mask->ct_labels[2];
ct_labels[3] = key->ct_labels[3] & mask->ct_labels[3];
- if (mapping_add(ct_priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id))
+ if (mapping_add(priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id))
return -EOPNOTSUPP;
mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ct_attr->ct_labels_id,
MLX5_CT_LABELS_MASK);
@@ -1044,14 +1134,12 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
}
int
-mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
- struct mlx5_esw_flow_attr *attr,
+mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
+ struct mlx5_flow_attr *attr,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
- struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
-
- if (!ct_priv) {
+ if (!priv) {
NL_SET_ERR_MSG_MOD(extack,
"offload of ct action isn't available");
return -EOPNOTSUPP;
@@ -1070,8 +1158,8 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
{
struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {};
- struct mlx5_core_dev *dev = ct_priv->esw->dev;
- struct mlx5_flow_table *fdb = pre_ct->fdb;
+ struct mlx5_core_dev *dev = ct_priv->dev;
+ struct mlx5_flow_table *ft = pre_ct->ft;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
struct mlx5_modify_hdr *mod_hdr;
@@ -1086,14 +1174,14 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
return -ENOMEM;
zone = ct_ft->zone & MLX5_CT_ZONE_MASK;
- err = mlx5e_tc_match_to_reg_set(dev, &pre_mod_acts, ZONE_TO_REG, zone);
+ err = mlx5e_tc_match_to_reg_set(dev, &pre_mod_acts, ct_priv->ns_type,
+ ZONE_TO_REG, zone);
if (err) {
ct_dbg("Failed to set zone register mapping");
goto err_mapping;
}
- mod_hdr = mlx5_modify_header_alloc(dev,
- MLX5_FLOW_NAMESPACE_FDB,
+ mod_hdr = mlx5_modify_header_alloc(dev, ct_priv->ns_type,
pre_mod_acts.num_actions,
pre_mod_acts.actions);
@@ -1119,7 +1207,7 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG, ctstate, ctstate);
dest.ft = ct_priv->post_ct;
- rule = mlx5_add_flow_rules(fdb, spec, &flow_act, &dest, 1);
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
ct_dbg("Failed to add pre ct flow rule zone %d", zone);
@@ -1130,7 +1218,7 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
/* add miss rule */
memset(spec, 0, sizeof(*spec));
dest.ft = nat ? ct_priv->ct_nat : ct_priv->ct;
- rule = mlx5_add_flow_rules(fdb, spec, &flow_act, &dest, 1);
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
ct_dbg("Failed to add pre ct miss rule zone %d", zone);
@@ -1157,7 +1245,7 @@ tc_ct_pre_ct_del_rules(struct mlx5_ct_ft *ct_ft,
struct mlx5_tc_ct_pre *pre_ct)
{
struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
- struct mlx5_core_dev *dev = ct_priv->esw->dev;
+ struct mlx5_core_dev *dev = ct_priv->dev;
mlx5_del_flow_rules(pre_ct->flow_rule);
mlx5_del_flow_rules(pre_ct->miss_rule);
@@ -1171,7 +1259,7 @@ mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft *ct_ft,
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
- struct mlx5_core_dev *dev = ct_priv->esw->dev;
+ struct mlx5_core_dev *dev = ct_priv->dev;
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *ft;
@@ -1181,10 +1269,10 @@ mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft *ct_ft,
void *misc;
int err;
- ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ ns = mlx5_get_flow_namespace(dev, ct_priv->ns_type);
if (!ns) {
err = -EOPNOTSUPP;
- ct_dbg("Failed to get FDB flow namespace");
+ ct_dbg("Failed to get flow namespace");
return err;
}
@@ -1193,7 +1281,8 @@ mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft *ct_ft,
return -ENOMEM;
ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
- ft_attr.prio = FDB_TC_OFFLOAD;
+ ft_attr.prio = ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB ?
+ FDB_TC_OFFLOAD : MLX5E_TC_PRIO;
ft_attr.max_fte = 2;
ft_attr.level = 1;
ft = mlx5_create_flow_table(ns, &ft_attr);
@@ -1202,7 +1291,7 @@ mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft *ct_ft,
ct_dbg("Failed to create pre ct table");
goto out_free;
}
- pre_ct->fdb = ft;
+ pre_ct->ft = ft;
/* create flow group */
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
@@ -1266,7 +1355,7 @@ mlx5_tc_ct_free_pre_ct(struct mlx5_ct_ft *ct_ft,
tc_ct_pre_ct_del_rules(ct_ft, pre_ct);
mlx5_destroy_flow_group(pre_ct->miss_grp);
mlx5_destroy_flow_group(pre_ct->flow_grp);
- mlx5_destroy_flow_table(pre_ct->fdb);
+ mlx5_destroy_flow_table(pre_ct->ft);
}
static int
@@ -1385,7 +1474,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
/* We translate the tc filter with CT action to the following HW model:
*
* +---------------------+
- * + fdb prio (tc chain) +
+ * + ft prio (tc chain) +
* + original match +
* +---------------------+
* | set chain miss mapping
@@ -1415,17 +1504,17 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* +--------------+
*/
static struct mlx5_flow_handle *
-__mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+__mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *orig_spec,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
- struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
+ struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {};
+ u32 attr_sz = ns_to_attr_sz(ct_priv->ns_type);
struct mlx5_flow_spec *post_ct_spec = NULL;
- struct mlx5_eswitch *esw = ct_priv->esw;
- struct mlx5_esw_flow_attr *pre_ct_attr;
+ struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_modify_hdr *mod_hdr;
struct mlx5_flow_handle *rule;
struct mlx5_ct_flow *ct_flow;
@@ -1460,10 +1549,22 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
}
ct_flow->fte_id = fte_id;
- /* Base esw attributes of both rules on original rule attribute */
- pre_ct_attr = &ct_flow->pre_ct_attr;
- memcpy(pre_ct_attr, attr, sizeof(*attr));
- memcpy(&ct_flow->post_ct_attr, attr, sizeof(*attr));
+ /* Base flow attributes of both rules on original rule attribute */
+ ct_flow->pre_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
+ if (!ct_flow->pre_ct_attr) {
+ err = -ENOMEM;
+ goto err_alloc_pre;
+ }
+
+ ct_flow->post_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
+ if (!ct_flow->post_ct_attr) {
+ err = -ENOMEM;
+ goto err_alloc_post;
+ }
+
+ pre_ct_attr = ct_flow->pre_ct_attr;
+ memcpy(pre_ct_attr, attr, attr_sz);
+ memcpy(ct_flow->post_ct_attr, attr, attr_sz);
/* Modify the original rule's action to fwd and modify, leave decap */
pre_ct_attr->action = attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP;
@@ -1474,22 +1575,22 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
* don't go though all prios of this chain as normal tc rules
* miss.
*/
- err = mlx5_esw_chains_get_chain_mapping(esw, attr->chain,
- &chain_mapping);
+ err = mlx5_chains_get_chain_mapping(ct_priv->chains, attr->chain,
+ &chain_mapping);
if (err) {
ct_dbg("Failed to get chain register mapping for chain");
goto err_get_chain;
}
ct_flow->chain_mapping = chain_mapping;
- err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts, ct_priv->ns_type,
CHAIN_TO_REG, chain_mapping);
if (err) {
ct_dbg("Failed to set chain register mapping");
goto err_mapping;
}
- err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts, ct_priv->ns_type,
FTEID_TO_REG, fte_id);
if (err) {
ct_dbg("Failed to set fte_id register mapping");
@@ -1503,7 +1604,8 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
attr->chain == 0) {
u32 tun_id = mlx5e_tc_get_flow_tun_id(flow);
- err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts,
+ ct_priv->ns_type,
TUNNEL_TO_REG,
tun_id);
if (err) {
@@ -1512,8 +1614,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
}
}
- mod_hdr = mlx5_modify_header_alloc(esw->dev,
- MLX5_FLOW_NAMESPACE_FDB,
+ mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
pre_mod_acts.num_actions,
pre_mod_acts.actions);
if (IS_ERR(mod_hdr)) {
@@ -1529,16 +1630,16 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
mlx5e_tc_match_to_reg_match(post_ct_spec, FTEID_TO_REG,
fte_id, MLX5_FTE_ID_MASK);
- /* Put post_ct rule on post_ct fdb */
- ct_flow->post_ct_attr.chain = 0;
- ct_flow->post_ct_attr.prio = 0;
- ct_flow->post_ct_attr.fdb = ct_priv->post_ct;
+ /* Put post_ct rule on post_ct flow table */
+ ct_flow->post_ct_attr->chain = 0;
+ ct_flow->post_ct_attr->prio = 0;
+ ct_flow->post_ct_attr->ft = ct_priv->post_ct;
- ct_flow->post_ct_attr.inner_match_level = MLX5_MATCH_NONE;
- ct_flow->post_ct_attr.outer_match_level = MLX5_MATCH_NONE;
- ct_flow->post_ct_attr.action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);
- rule = mlx5_eswitch_add_offloaded_rule(esw, post_ct_spec,
- &ct_flow->post_ct_attr);
+ ct_flow->post_ct_attr->inner_match_level = MLX5_MATCH_NONE;
+ ct_flow->post_ct_attr->outer_match_level = MLX5_MATCH_NONE;
+ ct_flow->post_ct_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);
+ rule = mlx5_tc_rule_insert(priv, post_ct_spec,
+ ct_flow->post_ct_attr);
ct_flow->post_ct_rule = rule;
if (IS_ERR(ct_flow->post_ct_rule)) {
err = PTR_ERR(ct_flow->post_ct_rule);
@@ -1548,10 +1649,9 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
/* Change original rule point to ct table */
pre_ct_attr->dest_chain = 0;
- pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.fdb : ft->pre_ct.fdb;
- ct_flow->pre_ct_rule = mlx5_eswitch_add_offloaded_rule(esw,
- orig_spec,
- pre_ct_attr);
+ pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.ft : ft->pre_ct.ft;
+ ct_flow->pre_ct_rule = mlx5_tc_rule_insert(priv, orig_spec,
+ pre_ct_attr);
if (IS_ERR(ct_flow->pre_ct_rule)) {
err = PTR_ERR(ct_flow->pre_ct_rule);
ct_dbg("Failed to add pre ct rule");
@@ -1565,14 +1665,18 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
return rule;
err_insert_orig:
- mlx5_eswitch_del_offloaded_rule(ct_priv->esw, ct_flow->post_ct_rule,
- &ct_flow->post_ct_attr);
+ mlx5_tc_rule_delete(priv, ct_flow->post_ct_rule,
+ ct_flow->post_ct_attr);
err_insert_post_ct:
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
err_mapping:
dealloc_mod_hdr_actions(&pre_mod_acts);
- mlx5_esw_chains_put_chain_mapping(esw, ct_flow->chain_mapping);
+ mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
err_get_chain:
+ kfree(ct_flow->post_ct_attr);
+err_alloc_post:
+ kfree(ct_flow->pre_ct_attr);
+err_alloc_pre:
idr_remove(&ct_priv->fte_ids, fte_id);
err_idr:
mlx5_tc_ct_del_ft_cb(ct_priv, ft);
@@ -1584,14 +1688,14 @@ err_ft:
}
static struct mlx5_flow_handle *
-__mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
+__mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_flow_spec *orig_spec,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_acts)
{
- struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
- struct mlx5_eswitch *esw = ct_priv->esw;
- struct mlx5_esw_flow_attr *pre_ct_attr;
+ struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
+ u32 attr_sz = ns_to_attr_sz(ct_priv->ns_type);
+ struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_modify_hdr *mod_hdr;
struct mlx5_flow_handle *rule;
struct mlx5_ct_flow *ct_flow;
@@ -1602,8 +1706,13 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
return ERR_PTR(-ENOMEM);
/* Base esw attributes on original rule attribute */
- pre_ct_attr = &ct_flow->pre_ct_attr;
- memcpy(pre_ct_attr, attr, sizeof(*attr));
+ pre_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
+ if (!pre_ct_attr) {
+ err = -ENOMEM;
+ goto err_attr;
+ }
+
+ memcpy(pre_ct_attr, attr, attr_sz);
err = mlx5_tc_ct_entry_set_registers(ct_priv, mod_acts, 0, 0, 0, 0);
if (err) {
@@ -1611,8 +1720,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
goto err_set_registers;
}
- mod_hdr = mlx5_modify_header_alloc(esw->dev,
- MLX5_FLOW_NAMESPACE_FDB,
+ mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
mod_acts->num_actions,
mod_acts->actions);
if (IS_ERR(mod_hdr)) {
@@ -1625,7 +1733,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
pre_ct_attr->modify_hdr = mod_hdr;
pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- rule = mlx5_eswitch_add_offloaded_rule(esw, orig_spec, pre_ct_attr);
+ rule = mlx5_tc_rule_insert(priv, orig_spec, pre_ct_attr);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
ct_dbg("Failed to add ct clear rule");
@@ -1633,6 +1741,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
}
attr->ct_attr.ct_flow = ct_flow;
+ ct_flow->pre_ct_attr = pre_ct_attr;
ct_flow->pre_ct_rule = rule;
return rule;
@@ -1641,61 +1750,67 @@ err_insert:
err_set_registers:
netdev_warn(priv->netdev,
"Failed to offload ct clear flow, err %d\n", err);
+ kfree(pre_ct_attr);
+err_attr:
+ kfree(ct_flow);
+
return ERR_PTR(err);
}
struct mlx5_flow_handle *
-mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
{
bool clear_action = attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
- struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
struct mlx5_flow_handle *rule;
- if (!ct_priv)
+ if (!priv)
return ERR_PTR(-EOPNOTSUPP);
- mutex_lock(&ct_priv->control_lock);
+ mutex_lock(&priv->control_lock);
if (clear_action)
rule = __mlx5_tc_ct_flow_offload_clear(priv, spec, attr, mod_hdr_acts);
else
rule = __mlx5_tc_ct_flow_offload(priv, flow, spec, attr);
- mutex_unlock(&ct_priv->control_lock);
+ mutex_unlock(&priv->control_lock);
return rule;
}
static void
__mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5e_tc_flow *flow,
struct mlx5_ct_flow *ct_flow)
{
- struct mlx5_esw_flow_attr *pre_ct_attr = &ct_flow->pre_ct_attr;
- struct mlx5_eswitch *esw = ct_priv->esw;
+ struct mlx5_flow_attr *pre_ct_attr = ct_flow->pre_ct_attr;
+ struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
- mlx5_eswitch_del_offloaded_rule(esw, ct_flow->pre_ct_rule,
- pre_ct_attr);
- mlx5_modify_header_dealloc(esw->dev, pre_ct_attr->modify_hdr);
+ mlx5_tc_rule_delete(priv, ct_flow->pre_ct_rule,
+ pre_ct_attr);
+ mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
if (ct_flow->post_ct_rule) {
- mlx5_eswitch_del_offloaded_rule(esw, ct_flow->post_ct_rule,
- &ct_flow->post_ct_attr);
- mlx5_esw_chains_put_chain_mapping(esw, ct_flow->chain_mapping);
+ mlx5_tc_rule_delete(priv, ct_flow->post_ct_rule,
+ ct_flow->post_ct_attr);
+ mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
idr_remove(&ct_priv->fte_ids, ct_flow->fte_id);
mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
}
+ kfree(ct_flow->pre_ct_attr);
+ kfree(ct_flow->post_ct_attr);
kfree(ct_flow);
}
void
-mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow,
- struct mlx5_esw_flow_attr *attr)
+mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr)
{
- struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
struct mlx5_ct_flow *ct_flow = attr->ct_attr.ct_flow;
/* We are called on error to clean up stuff from parsing
@@ -1704,22 +1819,15 @@ mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow,
if (!ct_flow)
return;
- mutex_lock(&ct_priv->control_lock);
- __mlx5_tc_ct_delete_flow(ct_priv, ct_flow);
- mutex_unlock(&ct_priv->control_lock);
+ mutex_lock(&priv->control_lock);
+ __mlx5_tc_ct_delete_flow(priv, flow, ct_flow);
+ mutex_unlock(&priv->control_lock);
}
static int
-mlx5_tc_ct_init_check_support(struct mlx5_eswitch *esw,
- const char **err_msg)
+mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch *esw,
+ const char **err_msg)
{
-#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- /* cannot restore chain ID on HW miss */
-
- *err_msg = "tc skb extension missing";
- return -EOPNOTSUPP;
-#endif
-
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level)) {
*err_msg = "firmware level support is missing";
return -EOPNOTSUPP;
@@ -1753,44 +1861,61 @@ mlx5_tc_ct_init_check_support(struct mlx5_eswitch *esw,
return 0;
}
-static void
-mlx5_tc_ct_init_err(struct mlx5e_rep_priv *rpriv, const char *msg, int err)
+static int
+mlx5_tc_ct_init_check_nic_support(struct mlx5e_priv *priv,
+ const char **err_msg)
+{
+ if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
+ *err_msg = "firmware level support is missing";
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ const char **err_msg)
{
- if (msg)
- netdev_warn(rpriv->netdev,
- "tc ct offload not supported, %s, err: %d\n",
- msg, err);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ /* cannot restore chain ID on HW miss */
+
+ *err_msg = "tc skb extension missing";
+ return -EOPNOTSUPP;
+#endif
+ if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ return mlx5_tc_ct_init_check_esw_support(esw, err_msg);
else
- netdev_warn(rpriv->netdev,
- "tc ct offload not supported, err: %d\n",
- err);
+ return mlx5_tc_ct_init_check_nic_support(priv, err_msg);
}
-int
-mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
+#define INIT_ERR_PREFIX "tc ct offload init failed"
+
+struct mlx5_tc_ct_priv *
+mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
+ struct mod_hdr_tbl *mod_hdr,
+ enum mlx5_flow_namespace_type ns_type)
{
struct mlx5_tc_ct_priv *ct_priv;
- struct mlx5e_rep_priv *rpriv;
- struct mlx5_eswitch *esw;
- struct mlx5e_priv *priv;
+ struct mlx5_core_dev *dev;
const char *msg;
int err;
- rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
- priv = netdev_priv(rpriv->netdev);
- esw = priv->mdev->priv.eswitch;
-
- err = mlx5_tc_ct_init_check_support(esw, &msg);
+ dev = priv->mdev;
+ err = mlx5_tc_ct_init_check_support(priv, ns_type, &msg);
if (err) {
- mlx5_tc_ct_init_err(rpriv, msg, err);
+ mlx5_core_warn(dev,
+ "tc ct offload not supported, %s\n",
+ msg);
goto err_support;
}
ct_priv = kzalloc(sizeof(*ct_priv), GFP_KERNEL);
- if (!ct_priv) {
- mlx5_tc_ct_init_err(rpriv, NULL, -ENOMEM);
+ if (!ct_priv)
goto err_alloc;
- }
ct_priv->zone_mapping = mapping_create(sizeof(u16), 0, true);
if (IS_ERR(ct_priv->zone_mapping)) {
@@ -1804,46 +1929,51 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
goto err_mapping_labels;
}
- ct_priv->esw = esw;
- ct_priv->netdev = rpriv->netdev;
- ct_priv->ct = mlx5_esw_chains_create_global_table(esw);
+ ct_priv->ns_type = ns_type;
+ ct_priv->chains = chains;
+ ct_priv->netdev = priv->netdev;
+ ct_priv->dev = priv->mdev;
+ ct_priv->mod_hdr_tbl = mod_hdr;
+ ct_priv->ct = mlx5_chains_create_global_table(chains);
if (IS_ERR(ct_priv->ct)) {
err = PTR_ERR(ct_priv->ct);
- mlx5_tc_ct_init_err(rpriv, "failed to create ct table", err);
+ mlx5_core_warn(dev,
+ "%s, failed to create ct table err: %d\n",
+ INIT_ERR_PREFIX, err);
goto err_ct_tbl;
}
- ct_priv->ct_nat = mlx5_esw_chains_create_global_table(esw);
+ ct_priv->ct_nat = mlx5_chains_create_global_table(chains);
if (IS_ERR(ct_priv->ct_nat)) {
err = PTR_ERR(ct_priv->ct_nat);
- mlx5_tc_ct_init_err(rpriv, "failed to create ct nat table",
- err);
+ mlx5_core_warn(dev,
+ "%s, failed to create ct nat table err: %d\n",
+ INIT_ERR_PREFIX, err);
goto err_ct_nat_tbl;
}
- ct_priv->post_ct = mlx5_esw_chains_create_global_table(esw);
+ ct_priv->post_ct = mlx5_chains_create_global_table(chains);
if (IS_ERR(ct_priv->post_ct)) {
err = PTR_ERR(ct_priv->post_ct);
- mlx5_tc_ct_init_err(rpriv, "failed to create post ct table",
- err);
+ mlx5_core_warn(dev,
+ "%s, failed to create post ct table err: %d\n",
+ INIT_ERR_PREFIX, err);
goto err_post_ct_tbl;
}
idr_init(&ct_priv->fte_ids);
mutex_init(&ct_priv->control_lock);
+ mutex_init(&ct_priv->shared_counter_lock);
rhashtable_init(&ct_priv->zone_ht, &zone_params);
rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params);
rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params);
- /* Done, set ct_priv to know it initializted */
- uplink_priv->ct_priv = ct_priv;
-
- return 0;
+ return ct_priv;
err_post_ct_tbl:
- mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct_nat);
+ mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
err_ct_nat_tbl:
- mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct);
+ mlx5_chains_destroy_global_table(chains, ct_priv->ct);
err_ct_tbl:
mapping_destroy(ct_priv->labels_mapping);
err_mapping_labels:
@@ -1853,20 +1983,22 @@ err_mapping_zone:
err_alloc:
err_support:
- return 0;
+ return NULL;
}
void
-mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
+mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
{
- struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv;
+ struct mlx5_fs_chains *chains;
if (!ct_priv)
return;
- mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->post_ct);
- mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct_nat);
- mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct);
+ chains = ct_priv->chains;
+
+ mlx5_chains_destroy_global_table(chains, ct_priv->post_ct);
+ mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
+ mlx5_chains_destroy_global_table(chains, ct_priv->ct);
mapping_destroy(ct_priv->zone_mapping);
mapping_destroy(ct_priv->labels_mapping);
@@ -1874,17 +2006,15 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
rhashtable_destroy(&ct_priv->zone_ht);
mutex_destroy(&ct_priv->control_lock);
+ mutex_destroy(&ct_priv->shared_counter_lock);
idr_destroy(&ct_priv->fte_ids);
kfree(ct_priv);
-
- uplink_priv->ct_priv = NULL;
}
bool
-mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
struct sk_buff *skb, u8 zone_restore_id)
{
- struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv;
struct mlx5_ct_tuple tuple = {};
struct mlx5_ct_entry *entry;
u16 zone;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 3baef917a677..6503b614337c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -10,12 +10,14 @@
#include "en.h"
-struct mlx5_esw_flow_attr;
+struct mlx5_flow_attr;
struct mlx5e_tc_mod_hdr_acts;
struct mlx5_rep_uplink_priv;
struct mlx5e_tc_flow;
struct mlx5e_priv;
+struct mlx5_fs_chains;
+struct mlx5_tc_ct_priv;
struct mlx5_ct_flow;
struct nf_flowtable;
@@ -76,66 +78,82 @@ struct mlx5_ct_attr {
misc_parameters_2.metadata_reg_c_1) + 3,\
}
+#define nic_zone_restore_to_reg_ct {\
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,\
+ .moffset = 2,\
+ .mlen = 1,\
+}
+
#define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen)
+#define REG_MAPPING_MOFFSET(reg) (mlx5e_tc_attr_to_reg_mappings[reg].moffset)
+#define REG_MAPPING_SHIFT(reg) (REG_MAPPING_MOFFSET(reg) * 8)
#define ZONE_RESTORE_BITS (REG_MAPPING_MLEN(ZONE_RESTORE_TO_REG) * 8)
#define ZONE_RESTORE_MAX GENMASK(ZONE_RESTORE_BITS - 1, 0)
#if IS_ENABLED(CONFIG_MLX5_TC_CT)
-int
-mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv);
+struct mlx5_tc_ct_priv *
+mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
+ struct mod_hdr_tbl *mod_hdr,
+ enum mlx5_flow_namespace_type ns_type);
void
-mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv);
+mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv);
+
+void
+mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_attr);
int
-mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- struct flow_cls_offload *f,
- struct mlx5_ct_attr *ct_attr,
- struct netlink_ext_ack *extack);
-int
-mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec);
+mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ struct mlx5_ct_attr *ct_attr,
+ struct netlink_ext_ack *extack);
+int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec);
int
-mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
- struct mlx5_esw_flow_attr *attr,
+mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
+ struct mlx5_flow_attr *attr,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack);
struct mlx5_flow_handle *
-mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
void
-mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv,
+mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
struct mlx5e_tc_flow *flow,
- struct mlx5_esw_flow_attr *attr);
+ struct mlx5_flow_attr *attr);
bool
-mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
struct sk_buff *skb, u8 zone_restore_id);
#else /* CONFIG_MLX5_TC_CT */
-static inline int
-mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
+static inline struct mlx5_tc_ct_priv *
+mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
+ struct mod_hdr_tbl *mod_hdr,
+ enum mlx5_flow_namespace_type ns_type)
{
- return 0;
+ return NULL;
}
static inline void
-mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
+mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
{
}
+static inline void
+mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_attr) {}
+
static inline int
-mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec,
- struct flow_cls_offload *f,
- struct mlx5_ct_attr *ct_attr,
- struct netlink_ext_ack *extack)
+mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct flow_cls_offload *f,
+ struct mlx5_ct_attr *ct_attr,
+ struct netlink_ext_ack *extack)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
@@ -143,47 +161,44 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
return 0;
NL_SET_ERR_MSG_MOD(extack, "mlx5 tc ct offload isn't enabled.");
- netdev_warn(priv->netdev, "mlx5 tc ct offload isn't enabled.\n");
return -EOPNOTSUPP;
}
static inline int
-mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec)
+mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
{
return 0;
}
static inline int
-mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
- struct mlx5_esw_flow_attr *attr,
+mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
+ struct mlx5_flow_attr *attr,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
NL_SET_ERR_MSG_MOD(extack, "mlx5 tc ct offload isn't enabled.");
- netdev_warn(priv->netdev, "mlx5 tc ct offload isn't enabled.\n");
return -EOPNOTSUPP;
}
static inline struct mlx5_flow_handle *
-mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline void
-mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv,
+mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
struct mlx5e_tc_flow *flow,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
}
static inline bool
-mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
struct sk_buff *skb, u8 zone_restore_id)
{
if (!zone_restore_id)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 9334c9c3e208..07ee1d236ab3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -7,6 +7,21 @@
#include "en.h"
#include <linux/indirect_call_wrapper.h>
+#define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
+
+/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
+ * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
+ * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
+ * full-session WQE be cache-aligned.
+ */
+#if L1_CACHE_BYTES < 128
+#define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
+#else
+#define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
+#endif
+
+#define MLX5E_TX_MPW_MAX_NUM_DS (MLX5E_TX_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS)
+
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
enum mlx5e_icosq_wqe_type {
@@ -20,6 +35,11 @@ enum mlx5e_icosq_wqe_type {
};
/* General */
+static inline bool mlx5e_skb_is_multicast(struct sk_buff *skb)
+{
+ return skb->pkt_type == PACKET_MULTICAST || skb->pkt_type == PACKET_BROADCAST;
+}
+
void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
@@ -41,8 +61,6 @@ void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
-void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
@@ -105,6 +123,7 @@ struct mlx5e_tx_wqe_info {
u32 num_bytes;
u8 num_wqebbs;
u8 num_dma;
+ u8 num_fifo_pkts;
#ifdef CONFIG_MLX5_EN_TLS
struct page *resync_dump_frag_page;
#endif
@@ -189,23 +208,6 @@ static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
}
static inline void
-mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq,
- u16 pi, u16 nnops)
-{
- struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
-
- edge_wi = wi + nnops;
-
- /* fill sq frag edge with nops to avoid wqe wrapping two pages */
- for (; wi < edge_wi; wi++) {
- memset(wi, 0, sizeof(*wi));
- wi->num_wqebbs = 1;
- mlx5e_post_nop(wq, sq->sqn, &sq->pc);
- }
- sq->stats->nop += nnops;
-}
-
-static inline void
mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
struct mlx5_wqe_ctrl_seg *ctrl)
{
@@ -223,29 +225,6 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
mlx5_write64((__be32 *)ctrl, uar_map);
}
-static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg)
-{
- return cseg && !!cseg->tis_tir_num;
-}
-
-static inline u8
-mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
- struct sk_buff *skb)
-{
- u8 mode;
-
- if (mlx5e_transport_inline_tx_wqe(cseg))
- return MLX5_INLINE_MODE_TCP_UDP;
-
- mode = sq->min_inline_mode;
-
- if (skb_vlan_tag_present(skb) &&
- test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
- mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
-
- return mode;
-}
-
static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
{
struct mlx5_core_cq *mcq;
@@ -271,6 +250,23 @@ mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
dma->type = map_type;
}
+static inline struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_txqsq *sq, u16 i)
+{
+ return &sq->db.skb_fifo[i & sq->skb_fifo_mask];
+}
+
+static inline void mlx5e_skb_fifo_push(struct mlx5e_txqsq *sq, struct sk_buff *skb)
+{
+ struct sk_buff **skb_item = mlx5e_skb_fifo_get(sq, sq->skb_fifo_pc++);
+
+ *skb_item = skb;
+}
+
+static inline struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_txqsq *sq)
+{
+ return *mlx5e_skb_fifo_get(sq, sq->skb_fifo_cc++);
+}
+
static inline void
mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
{
@@ -286,6 +282,14 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
}
}
+void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more);
+void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
+
+static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
+{
+ return session->ds_count == MLX5E_TX_MPW_MAX_NUM_DS;
+}
+
static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
{
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 145592788de5..ae90d533a350 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -59,7 +59,7 @@ static inline bool
mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
struct mlx5e_dma_info *di, struct xdp_buff *xdp)
{
- struct mlx5e_xdp_xmit_data xdptxd;
+ struct mlx5e_xmit_data xdptxd;
struct mlx5e_xdp_info xdpi;
struct xdp_frame *xdpf;
dma_addr_t dma_addr;
@@ -122,7 +122,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
u32 *len, struct xdp_buff *xdp)
{
- struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
+ struct bpf_prog *prog = rcu_dereference(rq->xdp_prog);
u32 act;
int err;
@@ -194,18 +194,22 @@ static u16 mlx5e_xdpsq_get_next_pi(struct mlx5e_xdpsq *sq, u16 size)
static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
{
- struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats;
+ struct mlx5e_tx_wqe *wqe;
u16 pi;
- pi = mlx5e_xdpsq_get_next_pi(sq, MLX5_SEND_WQE_MAX_WQEBBS);
- session->wqe = MLX5E_TX_FETCH_WQE(sq, pi);
+ pi = mlx5e_xdpsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
+ wqe = MLX5E_TX_FETCH_WQE(sq, pi);
+ net_prefetchw(wqe->data);
- net_prefetchw(session->wqe->data);
- session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT;
- session->pkt_count = 0;
-
- mlx5e_xdp_update_inline_state(sq);
+ *session = (struct mlx5e_tx_mpwqe) {
+ .wqe = wqe,
+ .bytes_count = 0,
+ .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
+ .pkt_count = 0,
+ .inline_on = mlx5e_xdp_get_inline_state(sq, session->inline_on),
+ };
stats->mpwqe++;
}
@@ -213,7 +217,7 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq)
{
struct mlx5_wq_cyc *wq = &sq->wq;
- struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5_wqe_ctrl_seg *cseg = &session->wqe->ctrl;
u16 ds_count = session->ds_count;
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
@@ -258,10 +262,10 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq
}
INDIRECT_CALLABLE_SCOPE bool
-mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *xdptxd,
+mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi, int check_result)
{
- struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats;
if (unlikely(xdptxd->len > sq->hw_mtu)) {
@@ -284,8 +288,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *x
mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats);
- if (unlikely(mlx5e_xdp_no_room_for_inline_pkt(session) ||
- session->ds_count == MLX5E_XDP_MPW_MAX_NUM_DS))
+ if (unlikely(mlx5e_xdp_mpqwe_is_full(session)))
mlx5e_xdp_mpwqe_complete(sq);
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
@@ -306,7 +309,7 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
}
INDIRECT_CALLABLE_SCOPE bool
-mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *xdptxd,
+mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi, int check_result)
{
struct mlx5_wq_cyc *wq = &sq->wq;
@@ -503,7 +506,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
- struct mlx5e_xdp_xmit_data xdptxd;
+ struct mlx5e_xmit_data xdptxd;
struct mlx5e_xdp_info xdpi;
bool ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index e806c13d491f..d487e5e37162 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -38,27 +38,12 @@
#include "en/txrx.h"
#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
-#define MLX5E_XDP_TX_EMPTY_DS_COUNT \
- (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
-#define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */)
-
-#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg))
-#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT \
- DIV_ROUND_UP(MLX5E_XDP_INLINE_WQE_SZ_THRSD, MLX5_SEND_WQE_DS)
-
-/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
- * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
- * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
- * full-session WQE be cache-aligned.
- */
-#if L1_CACHE_BYTES < 128
-#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
-#else
-#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
-#endif
+#define MLX5E_XDP_TX_DS_COUNT (MLX5E_TX_WQE_EMPTY_DS_COUNT + 1 /* SG DS */)
-#define MLX5E_XDP_MPW_MAX_NUM_DS \
- (MLX5E_XDP_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS)
+#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT 16
+#define MLX5E_XDP_INLINE_WQE_SZ_THRSD \
+ (MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \
+ sizeof(struct mlx5_wqe_inline_seg))
struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
@@ -73,11 +58,11 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
- struct mlx5e_xdp_xmit_data *xdptxd,
+ struct mlx5e_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi,
int check_result));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
- struct mlx5e_xdp_xmit_data *xdptxd,
+ struct mlx5e_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi,
int check_result));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
@@ -122,30 +107,28 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
/* Enable inline WQEs to shift some load from a congested HCA (HW) to
* a less congested cpu (SW).
*/
-static inline void mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq *sq)
+static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur)
{
u16 outstanding = sq->xdpi_fifo_pc - sq->xdpi_fifo_cc;
- struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
#define MLX5E_XDP_INLINE_WATERMARK_LOW 10
#define MLX5E_XDP_INLINE_WATERMARK_HIGH 128
- if (session->inline_on) {
- if (outstanding <= MLX5E_XDP_INLINE_WATERMARK_LOW)
- session->inline_on = 0;
- return;
- }
+ if (cur && outstanding <= MLX5E_XDP_INLINE_WATERMARK_LOW)
+ return false;
+
+ if (!cur && outstanding >= MLX5E_XDP_INLINE_WATERMARK_HIGH)
+ return true;
- /* inline is false */
- if (outstanding >= MLX5E_XDP_INLINE_WATERMARK_HIGH)
- session->inline_on = 1;
+ return cur;
}
-static inline bool
-mlx5e_xdp_no_room_for_inline_pkt(struct mlx5e_xdp_mpwqe *session)
+static inline bool mlx5e_xdp_mpqwe_is_full(struct mlx5e_tx_mpwqe *session)
{
- return session->inline_on &&
- session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > MLX5E_XDP_MPW_MAX_NUM_DS;
+ if (session->inline_on)
+ return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT >
+ MLX5E_TX_MPW_MAX_NUM_DS;
+ return mlx5e_tx_mpwqe_is_full(session);
}
struct mlx5e_xdp_wqe_info {
@@ -155,15 +138,16 @@ struct mlx5e_xdp_wqe_info {
static inline void
mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
- struct mlx5e_xdp_xmit_data *xdptxd,
+ struct mlx5e_xmit_data *xdptxd,
struct mlx5e_xdpsq_stats *stats)
{
- struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5_wqe_data_seg *dseg =
(struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
u32 dma_len = xdptxd->len;
session->pkt_count++;
+ session->bytes_count += dma_len;
if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) {
struct mlx5_wqe_inline_seg *inline_dseg =
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index bb6669d2a916..8e7b877d8a12 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -31,7 +31,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
{
struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk;
u32 cqe_bcnt32 = cqe_bcnt;
- bool consumed;
/* Check packet size. Note LRO doesn't use linear SKB */
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
@@ -51,10 +50,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
net_prefetch(xdp->data);
- rcu_read_lock();
- consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp);
- rcu_read_unlock();
-
/* Possible flows:
* - XDP_REDIRECT to XSKMAP:
* The page is owned by the userspace from now.
@@ -70,7 +65,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
* allocated first from the Reuse Ring, so it has enough space.
*/
- if (likely(consumed)) {
+ if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp))) {
if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
@@ -88,7 +83,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
u32 cqe_bcnt)
{
struct xdp_buff *xdp = wi->di->xsk;
- bool consumed;
/* wi->offset is not used in this function, because xdp->data and the
* DMA address point directly to the necessary place. Furthermore, the
@@ -107,11 +101,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
return NULL;
}
- rcu_read_lock();
- consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp);
- rcu_read_unlock();
-
- if (likely(consumed))
+ if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp)))
return NULL; /* page/packet was consumed by XDP */
/* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 662a1dafeaad..4e574ac73019 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -106,8 +106,7 @@ err_free_cparam:
void mlx5e_close_xsk(struct mlx5e_channel *c)
{
clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
- napi_synchronize(&c->napi);
- synchronize_rcu(); /* Sync with the XSK wakeup. */
+ synchronize_rcu(); /* Sync with the XSK wakeup and with NAPI. */
mlx5e_close_rq(&c->xskrq);
mlx5e_close_cq(&c->xskrq.cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index aa91cbdfe969..fb671a457129 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -67,8 +67,8 @@ static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq,
bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
{
struct xsk_buff_pool *pool = sq->xsk_pool;
+ struct mlx5e_xmit_data xdptxd;
struct mlx5e_xdp_info xdpi;
- struct mlx5e_xdp_xmit_data xdptxd;
bool work_done = true;
bool flush = false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 110476bdeffb..2ea1cdc1ca54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -128,26 +128,38 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
return true;
}
-static inline bool mlx5e_accel_tx_finish(struct mlx5e_priv *priv,
- struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe,
- struct mlx5e_accel_tx_state *state)
-{
-#ifdef CONFIG_MLX5_EN_TLS
- mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls);
-#endif
+/* Part of the eseg touched by TX offloads */
+#define MLX5E_ACCEL_ESEG_LEN offsetof(struct mlx5_wqe_eth_seg, mss)
+static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
+ struct mlx5e_txqsq *sq,
+ struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg)
+{
#ifdef CONFIG_MLX5_EN_IPSEC
if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) {
- if (unlikely(!mlx5e_ipsec_handle_tx_skb(priv, &wqe->eth, skb)))
+ if (unlikely(!mlx5e_ipsec_handle_tx_skb(priv, eseg, skb)))
return false;
}
#endif
+#if IS_ENABLED(CONFIG_GENEVE)
+ if (skb->encapsulation)
+ mlx5e_tx_tunnel_accel(skb, eseg);
+#endif
+
return true;
}
+static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
+ struct mlx5e_tx_wqe *wqe,
+ struct mlx5e_accel_tx_state *state)
+{
+#ifdef CONFIG_MLX5_EN_TLS
+ mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls);
+#endif
+}
+
static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv)
{
return mlx5e_ktls_init_rx(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index 4cdd9eac647d..97f1594cee11 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -191,7 +191,7 @@ static int accel_fs_tcp_create_groups(struct mlx5e_flow_table *ft,
ft->g = kcalloc(MLX5E_ACCEL_FS_TCP_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) {
- kvfree(ft->g);
+ kfree(ft->g);
kvfree(in);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 429428bbc903..b974f3cd1005 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -228,8 +228,8 @@ static int rx_fs_create(struct mlx5e_priv *priv,
fs_prot->miss_rule = miss_rule;
out:
- kfree(flow_group_in);
- kfree(spec);
+ kvfree(flow_group_in);
+ kvfree(spec);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index acf6d80a6bb7..6bbfcf18107d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -234,7 +234,7 @@ mlx5e_get_ktls_rx_priv_ctx(struct tls_context *tls_ctx)
/* Re-sync */
/* Runs in work context */
-static struct mlx5_wqe_ctrl_seg *
+static int
resync_post_get_progress_params(struct mlx5e_icosq *sq,
struct mlx5e_ktls_offload_context_rx *priv_rx)
{
@@ -258,15 +258,19 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) {
err = -ENOMEM;
- goto err_out;
+ goto err_free;
}
buf->priv_rx = priv_rx;
BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1);
+
+ spin_lock(&sq->channel->async_icosq_lock);
+
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
+ spin_unlock(&sq->channel->async_icosq_lock);
err = -ENOSPC;
- goto err_out;
+ goto err_dma_unmap;
}
pi = mlx5e_icosq_get_next_pi(sq, 1);
@@ -294,12 +298,18 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
};
icosq_fill_wi(sq, pi, &wi);
sq->pc++;
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
+ spin_unlock(&sq->channel->async_icosq_lock);
- return cseg;
+ return 0;
+err_dma_unmap:
+ dma_unmap_single(pdev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
+err_free:
+ kfree(buf);
err_out:
priv_rx->stats->tls_resync_req_skip++;
- return ERR_PTR(err);
+ return err;
}
/* Function is called with elevated refcount.
@@ -309,10 +319,8 @@ static void resync_handle_work(struct work_struct *work)
{
struct mlx5e_ktls_offload_context_rx *priv_rx;
struct mlx5e_ktls_rx_resync_ctx *resync;
- struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5e_channel *c;
struct mlx5e_icosq *sq;
- struct mlx5_wq_cyc *wq;
resync = container_of(work, struct mlx5e_ktls_rx_resync_ctx, work);
priv_rx = container_of(resync, struct mlx5e_ktls_offload_context_rx, resync);
@@ -324,18 +332,9 @@ static void resync_handle_work(struct work_struct *work)
c = resync->priv->channels.c[priv_rx->rxq];
sq = &c->async_icosq;
- wq = &sq->wq;
-
- spin_lock(&c->async_icosq_lock);
- cseg = resync_post_get_progress_params(sq, priv_rx);
- if (IS_ERR(cseg)) {
+ if (resync_post_get_progress_params(sq, priv_rx))
refcount_dec(&resync->refcnt);
- goto unlock;
- }
- mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
-unlock:
- spin_unlock(&c->async_icosq_lock);
}
static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
@@ -386,16 +385,17 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
struct mlx5e_ktls_offload_context_rx *priv_rx;
struct mlx5e_ktls_rx_resync_ctx *resync;
u8 tracker_state, auth_state, *ctx;
+ struct device *dev;
u32 hw_seq;
priv_rx = buf->priv_rx;
resync = &priv_rx->resync;
-
+ dev = resync->priv->mdev->device;
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
goto out;
- dma_sync_single_for_cpu(resync->priv->mdev->device, buf->dma_addr,
- PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE,
+ DMA_FROM_DEVICE);
ctx = buf->progress.ctx;
tracker_state = MLX5_GET(tls_progress_params, ctx, record_tracker_state);
@@ -411,6 +411,7 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
priv_rx->stats->tls_resync_req_end++;
out:
refcount_dec(&resync->refcnt);
+ dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
kfree(buf);
}
@@ -659,7 +660,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx);
set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags);
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, NULL);
- napi_synchronize(&priv->channels.c[priv_rx->rxq]->napi);
+ synchronize_rcu(); /* Sync with NAPI */
if (!cancel_work_sync(&priv_rx->rule.work))
/* completion is needed, as the priv_rx in the add flow
* is maintained on the wqe info (wi), not on the socket.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index f4861545b236..b140e13fdcc8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -345,9 +345,6 @@ void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_sq_stats *stats;
struct mlx5e_sq_dma *dma;
- if (!wi->resync_dump_frag_page)
- return;
-
dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
stats = sq->stats;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
index ff4c740af10b..7521c9be735b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
@@ -29,12 +29,24 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
u32 *dma_fifo_cc);
+static inline bool
+mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
+ struct mlx5e_tx_wqe_info *wi,
+ u32 *dma_fifo_cc)
+{
+ if (unlikely(wi->resync_dump_frag_page)) {
+ mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, dma_fifo_cc);
+ return true;
+ }
+ return false;
+}
#else
-static inline void
-mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
- struct mlx5e_tx_wqe_info *wi,
- u32 *dma_fifo_cc)
+static inline bool
+mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
+ struct mlx5e_tx_wqe_info *wi,
+ u32 *dma_fifo_cc)
{
+ return false;
}
#endif /* CONFIG_MLX5_EN_TLS */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index b0c31d49ff8d..6982b193ee8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -189,12 +189,10 @@ static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
struct mlx5e_tls *tls)
{
u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
- struct mlx5e_tx_wqe *wqe;
struct sync_info info;
struct sk_buff *nskb;
int linear_len = 0;
int headln;
- u16 pi;
int i;
sq->stats->tls_ooo++;
@@ -246,9 +244,7 @@ static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
sq->stats->tls_resync_bytes += nskb->len;
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
cpu_to_be64(info.rcd_sn));
- pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
- wqe = MLX5E_TX_FETCH_WQE(sq, pi);
- mlx5e_sq_xmit(sq, nskb, wqe, pi, true);
+ mlx5e_sq_xmit_simple(sq, nskb, true);
return true;
@@ -274,6 +270,8 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
if (!datalen)
return true;
+ mlx5e_tx_mpwqe_ensure_complete(sq);
+
tls_ctx = tls_get_ctx(skb->sk);
if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
goto err_out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
index 01468ec27446..b949b9a7538b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
@@ -35,7 +35,6 @@
#include <net/sock.h>
#include "en.h"
-#include "accel/tls.h"
#include "fpga/sdk.h"
#include "en_accel/tls.h"
@@ -51,9 +50,14 @@ static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
#define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc)
+static bool is_tls_atomic_stats(struct mlx5e_priv *priv)
+{
+ return priv->tls && !mlx5_accel_is_ktls_device(priv->mdev);
+}
+
int mlx5e_tls_get_count(struct mlx5e_priv *priv)
{
- if (!priv->tls)
+ if (!is_tls_atomic_stats(priv))
return 0;
return NUM_TLS_SW_COUNTERS;
@@ -63,7 +67,7 @@ int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
{
unsigned int i, idx = 0;
- if (!priv->tls)
+ if (!is_tls_atomic_stats(priv))
return 0;
for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
@@ -77,7 +81,7 @@ int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data)
{
int i, idx = 0;
- if (!priv->tls)
+ if (!is_tls_atomic_stats(priv))
return 0;
for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 5cb1e4839eb7..d25a56ec6876 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -243,7 +243,6 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
return MLX5E_NUM_PFLAGS;
case ETH_SS_TEST:
return mlx5e_self_test_num(priv);
- fallthrough;
default:
return -EOPNOTSUPP;
}
@@ -1341,6 +1340,14 @@ static int mlx5e_set_tunable(struct net_device *dev,
return err;
}
+static void mlx5e_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_stats_pause_get(priv, pause_stats);
+}
+
void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
struct ethtool_pauseparam *pauseparam)
{
@@ -1901,7 +1908,7 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
return 0;
}
-static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable)
+static int set_pflag_tx_mpwqe_common(struct net_device *netdev, u32 flag, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1913,7 +1920,7 @@ static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable)
new_channels.params = priv->channels.params;
- MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_XDP_TX_MPWQE, enable);
+ MLX5E_SET_PFLAG(&new_channels.params, flag, enable);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
@@ -1924,6 +1931,16 @@ static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable)
return err;
}
+static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable)
+{
+ return set_pflag_tx_mpwqe_common(netdev, MLX5E_PFLAG_XDP_TX_MPWQE, enable);
+}
+
+static int set_pflag_skb_tx_mpwqe(struct net_device *netdev, bool enable)
+{
+ return set_pflag_tx_mpwqe_common(netdev, MLX5E_PFLAG_SKB_TX_MPWQE, enable);
+}
+
static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = {
{ "rx_cqe_moder", set_pflag_rx_cqe_based_moder },
{ "tx_cqe_moder", set_pflag_tx_cqe_based_moder },
@@ -1931,6 +1948,7 @@ static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = {
{ "rx_striding_rq", set_pflag_rx_striding_rq },
{ "rx_no_csum_complete", set_pflag_rx_no_csum_complete },
{ "xdp_tx_mpwqe", set_pflag_xdp_tx_mpwqe },
+ { "skb_tx_mpwqe", set_pflag_skb_tx_mpwqe },
};
static int mlx5e_handle_pflag(struct net_device *netdev,
@@ -2033,6 +2051,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_rxnfc = mlx5e_set_rxnfc,
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
+ .get_pause_stats = mlx5e_get_pause_stats,
.get_pauseparam = mlx5e_get_pauseparam,
.set_pauseparam = mlx5e_set_pauseparam,
.get_ts_info = mlx5e_get_ts_info,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 26834625556d..961cdce37cc4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -158,16 +158,6 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
-void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
-{
- int i;
-
- for (i = mlx5e_nic_stats_grps_num(priv) - 1; i >= 0; i--)
- if (mlx5e_nic_stats_grps[i]->update_stats_mask &
- MLX5E_NDO_UPDATE_STATS)
- mlx5e_nic_stats_grps[i]->update_stats(priv);
-}
-
static void mlx5e_update_stats_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
@@ -399,7 +389,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
if (params->xdp_prog)
bpf_prog_inc(params->xdp_prog);
- rq->xdp_prog = params->xdp_prog;
+ RCU_INIT_POINTER(rq->xdp_prog, params->xdp_prog);
rq_xdp_ix = rq->ix;
if (xsk)
@@ -408,7 +398,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
if (err < 0)
goto err_rq_wq_destroy;
- rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
+ rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk);
pool_size = 1 << params->log_rq_mtu_frames;
@@ -564,8 +554,8 @@ err_free:
}
err_rq_wq_destroy:
- if (rq->xdp_prog)
- bpf_prog_put(rq->xdp_prog);
+ if (params->xdp_prog)
+ bpf_prog_put(params->xdp_prog);
xdp_rxq_info_unreg(&rq->xdp_rxq);
page_pool_destroy(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl);
@@ -575,10 +565,16 @@ err_rq_wq_destroy:
static void mlx5e_free_rq(struct mlx5e_rq *rq)
{
+ struct mlx5e_channel *c = rq->channel;
+ struct bpf_prog *old_prog = NULL;
int i;
- if (rq->xdp_prog)
- bpf_prog_put(rq->xdp_prog);
+ /* drop_rq has neither channel nor xdp_prog. */
+ if (c)
+ old_prog = rcu_dereference_protected(rq->xdp_prog,
+ lockdep_is_held(&c->priv->state_lock));
+ if (old_prog)
+ bpf_prog_put(old_prog);
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
@@ -848,6 +844,13 @@ int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
+ /* For CQE compression on striding RQ, use stride index provided by
+ * HW if capability is supported.
+ */
+ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) &&
+ MLX5_CAP_GEN(c->mdev, mini_cqe_resp_stride_index))
+ __set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &c->rq.state);
+
return 0;
err_destroy_rq:
@@ -867,7 +870,7 @@ void mlx5e_activate_rq(struct mlx5e_rq *rq)
void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
{
clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
- napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
+ synchronize_rcu(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
}
void mlx5e_close_rq(struct mlx5e_rq *rq)
@@ -1040,6 +1043,7 @@ static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
{
kvfree(sq->db.wqe_info);
+ kvfree(sq->db.skb_fifo);
kvfree(sq->db.dma_fifo);
}
@@ -1051,15 +1055,19 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
sq->db.dma_fifo = kvzalloc_node(array_size(df_sz,
sizeof(*sq->db.dma_fifo)),
GFP_KERNEL, numa);
+ sq->db.skb_fifo = kvzalloc_node(array_size(df_sz,
+ sizeof(*sq->db.skb_fifo)),
+ GFP_KERNEL, numa);
sq->db.wqe_info = kvzalloc_node(array_size(wq_sz,
sizeof(*sq->db.wqe_info)),
GFP_KERNEL, numa);
- if (!sq->db.dma_fifo || !sq->db.wqe_info) {
+ if (!sq->db.dma_fifo || !sq->db.skb_fifo || !sq->db.wqe_info) {
mlx5e_free_txqsq_db(sq);
return -ENOMEM;
}
sq->dma_fifo_mask = df_sz - 1;
+ sq->skb_fifo_mask = df_sz - 1;
return 0;
}
@@ -1070,6 +1078,12 @@ static int mlx5e_calc_sq_stop_room(struct mlx5e_txqsq *sq, u8 log_sq_size)
sq->stop_room = mlx5e_tls_get_stop_room(sq);
sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+ if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state))
+ /* A MPWQE can take up to the maximum-sized WQE + all the normal
+ * stop room can be taken if a new packet breaks the active
+ * MPWQE session and allocates its WQEs right away.
+ */
+ sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
if (WARN_ON(sq->stop_room >= sq_size)) {
netdev_err(sq->channel->netdev, "Stop room %hu is bigger than the SQ size %d\n",
@@ -1111,6 +1125,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
if (mlx5_accel_is_tls_device(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
+ if (param->is_mpw)
+ set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
err = mlx5e_calc_sq_stop_room(sq, params->log_sq_size);
if (err)
return err;
@@ -1312,12 +1328,10 @@ void mlx5e_tx_disable_queue(struct netdev_queue *txq)
static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
{
- struct mlx5e_channel *c = sq->channel;
struct mlx5_wq_cyc *wq = &sq->wq;
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- /* prevent netif_tx_wake_queue */
- napi_synchronize(&c->napi);
+ synchronize_rcu(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
mlx5e_tx_disable_queue(sq->txq);
@@ -1392,10 +1406,8 @@ void mlx5e_activate_icosq(struct mlx5e_icosq *icosq)
void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
{
- struct mlx5e_channel *c = icosq->channel;
-
clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
- napi_synchronize(&c->napi);
+ synchronize_rcu(); /* Sync with NAPI. */
}
void mlx5e_close_icosq(struct mlx5e_icosq *sq)
@@ -1474,7 +1486,7 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
struct mlx5e_channel *c = sq->channel;
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- napi_synchronize(&c->napi);
+ synchronize_rcu(); /* Sync with NAPI. */
mlx5e_destroy_sq(c->mdev, sq->sqn);
mlx5e_free_xdpsq_descs(sq);
@@ -2163,6 +2175,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
mlx5e_build_sq_param_common(priv, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, allow_swp);
+ param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
mlx5e_build_tx_cq_param(priv, params, &param->cqp);
}
@@ -2182,6 +2195,7 @@ void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param)
{
struct mlx5_core_dev *mdev = priv->mdev;
+ bool hw_stridx = false;
void *cqc = param->cqc;
u8 log_cq_size;
@@ -2189,6 +2203,7 @@ void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
+ hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
log_cq_size = params->log_rq_mtu_frames;
@@ -2196,7 +2211,8 @@ void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
- MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
+ MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
+ MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
MLX5_SET(cqc, cqc, cqe_comp_en, 1);
}
@@ -3567,6 +3583,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
s->rx_packets += rq_stats->packets + xskrq_stats->packets;
s->rx_bytes += rq_stats->bytes + xskrq_stats->bytes;
+ s->multicast += rq_stats->mcast_packets + xskrq_stats->mcast_packets;
for (j = 0; j < priv->max_opened_tc; j++) {
struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
@@ -3582,7 +3599,6 @@ void
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_vport_stats *vstats = &priv->stats.vport;
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
/* In switchdev mode, monitor counters doesn't monitor
@@ -3617,12 +3633,6 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
stats->rx_frame_errors;
stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
-
- /* vport multicast also counts packets that are dropped due to steering
- * or rx out of buffer
- */
- stats->multicast =
- VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
}
static void mlx5e_set_rx_mode(struct net_device *dev)
@@ -4331,6 +4341,16 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
return 0;
}
+static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog)
+{
+ struct bpf_prog *old_prog;
+
+ old_prog = rcu_replace_pointer(rq->xdp_prog, prog,
+ lockdep_is_held(&rq->channel->priv->state_lock));
+ if (old_prog)
+ bpf_prog_put(old_prog);
+}
+
static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -4389,29 +4409,10 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
*/
for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i];
- bool xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
-
- clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
- if (xsk_open)
- clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
- napi_synchronize(&c->napi);
- /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
-
- old_prog = xchg(&c->rq.xdp_prog, prog);
- if (old_prog)
- bpf_prog_put(old_prog);
-
- if (xsk_open) {
- old_prog = xchg(&c->xskrq.xdp_prog, prog);
- if (old_prog)
- bpf_prog_put(old_prog);
- }
- set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
- if (xsk_open)
- set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
- /* napi_schedule in case we have missed anything */
- napi_schedule(&c->napi);
+ mlx5e_rq_replace_xdp_prog(&c->rq, prog);
+ if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
+ mlx5e_rq_replace_xdp_prog(&c->xskrq, prog);
}
unlock:
@@ -4716,6 +4717,8 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv,
params->log_sq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE,
+ MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe));
/* XDP SQ */
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE,
@@ -5201,7 +5204,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.enable = mlx5e_nic_enable,
.disable = mlx5e_nic_disable,
.update_rx = mlx5e_update_nic_rx,
- .update_stats = mlx5e_update_ndo_stats,
+ .update_stats = mlx5e_stats_update_ndo_stats,
.update_carrier = mlx5e_update_carrier,
.rx_handlers = &mlx5e_rx_handlers_nic,
.max_tc = MLX5E_MAX_NUM_TC,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 135ee26881c9..9f5c97d22af4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -39,7 +39,6 @@
#include <net/ipv6_stubs.h>
#include "eswitch.h"
-#include "esw/chains.h"
#include "en.h"
#include "en_rep.h"
#include "en/txrx.h"
@@ -288,6 +287,14 @@ static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
return mlx5e_ethtool_get_rxfh_indir_size(priv);
}
+static void mlx5e_uplink_rep_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_stats_pause_get(priv, stats);
+}
+
static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pauseparam)
{
@@ -362,6 +369,7 @@ static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
.set_rxfh = mlx5e_set_rxfh,
.get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc,
+ .get_pause_stats = mlx5e_uplink_rep_get_pause_stats,
.get_pauseparam = mlx5e_uplink_rep_get_pauseparam,
.set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
};
@@ -1171,7 +1179,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.cleanup_tx = mlx5e_cleanup_rep_tx,
.enable = mlx5e_rep_enable,
.update_rx = mlx5e_update_rep_rx,
- .update_stats = mlx5e_update_ndo_stats,
+ .update_stats = mlx5e_stats_update_ndo_stats,
.rx_handlers = &mlx5e_rx_handlers_rep,
.max_tc = 1,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
@@ -1189,7 +1197,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.enable = mlx5e_uplink_rep_enable,
.disable = mlx5e_uplink_rep_disable,
.update_rx = mlx5e_update_rep_rx,
- .update_stats = mlx5e_update_ndo_stats,
+ .update_stats = mlx5e_stats_update_ndo_stats,
.update_carrier = mlx5e_update_carrier,
.rx_handlers = &mlx5e_rx_handlers_rep,
.max_tc = MLX5E_MAX_NUM_TC,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 7aab69e991a5..599f5b5ebc97 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -52,6 +52,7 @@
#include "en/xsk/rx.h"
#include "en/health.h"
#include "en/params.h"
+#include "en/txrx.h"
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
@@ -137,8 +138,17 @@ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
title->check_sum = mini_cqe->checksum;
title->op_own &= 0xf0;
title->op_own |= 0x01 & (cqcc >> wq->fbc.log_sz);
- title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
+ /* state bit set implies linked-list striding RQ wq type and
+ * HW stride index capability supported
+ */
+ if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) {
+ title->wqe_counter = mini_cqe->stridx;
+ return;
+ }
+
+ /* HW stride index capability not supported */
+ title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title);
else
@@ -1079,6 +1089,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
mlx5e_enable_ecn(rq, skb);
skb->protocol = eth_type_trans(skb, netdev);
+
+ if (unlikely(mlx5e_skb_is_multicast(skb)))
+ stats->mcast_packets++;
}
static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
@@ -1131,7 +1144,6 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct xdp_buff xdp;
struct sk_buff *skb;
void *va, *data;
- bool consumed;
u32 frag_size;
va = page_address(di->page) + wi->offset;
@@ -1143,11 +1155,8 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
net_prefetchw(va); /* xdp_frame data area */
net_prefetch(data);
- rcu_read_lock();
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
- consumed = mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp);
- rcu_read_unlock();
- if (consumed)
+ if (mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp))
return NULL; /* page/packet was consumed by XDP */
rx_headroom = xdp.data - xdp.data_hard_start;
@@ -1251,6 +1260,11 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
}
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+ if (mlx5e_cqe_regb_chain(cqe))
+ if (!mlx5e_tc_update_skb(cqe, skb))
+ goto free_wqe;
+
napi_gro_receive(rq->cq.napi, skb);
free_wqe:
@@ -1437,7 +1451,6 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct sk_buff *skb;
void *va, *data;
u32 frag_size;
- bool consumed;
/* Check packet size. Note LRO doesn't use linear SKB */
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
@@ -1454,11 +1467,8 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
net_prefetchw(va); /* xdp_frame data area */
net_prefetch(data);
- rcu_read_lock();
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp);
- consumed = mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp);
- rcu_read_unlock();
- if (consumed) {
+ if (mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
@@ -1516,6 +1526,11 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
goto mpwrq_cqe_out;
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+ if (mlx5e_cqe_regb_chain(cqe))
+ if (!mlx5e_tc_update_skb(cqe, skb))
+ goto mpwrq_cqe_out;
+
napi_gro_receive(rq->cq.napi, skb);
mpwrq_cqe_out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index e3b2f59408e6..78f6a6f0a7e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -54,6 +54,18 @@ unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
return total;
}
+void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv)
+{
+ mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
+ const unsigned int num_stats_grps = stats_grps_num(priv);
+ int i;
+
+ for (i = num_stats_grps - 1; i >= 0; i--)
+ if (stats_grps[i]->update_stats &&
+ stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS)
+ stats_grps[i]->update_stats(priv);
+}
+
void mlx5e_stats_update(struct mlx5e_priv *priv)
{
mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
@@ -98,6 +110,8 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) },
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
@@ -353,6 +367,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
s->tx_nop += sq_stats->nop;
+ s->tx_mpwqe_blks += sq_stats->mpwqe_blks;
+ s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
@@ -677,6 +693,35 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
}
+#define MLX5E_READ_CTR64_BE_F(ptr, c) \
+ be64_to_cpu(*(__be64 *)((char *)ptr + \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)))
+
+void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
+ struct ethtool_pause_stats *pause_stats)
+{
+ u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+ if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
+ return;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
+ sz, MLX5_REG_PPCNT, 0, 0);
+
+ pause_stats->tx_pause_frames =
+ MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
+ a_pause_mac_ctrl_frames_transmitted);
+ pause_stats->rx_pause_frames =
+ MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
+ a_pause_mac_ctrl_frames_received);
+}
+
#define PPORT_2863_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
@@ -1527,6 +1572,8 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 2e1cca1923b9..162daaadb0d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -103,6 +103,10 @@ unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv);
void mlx5e_stats_update(struct mlx5e_priv *priv);
void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx);
void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data);
+void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv);
+
+void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
+ struct ethtool_pause_stats *pause_stats);
/* Concrete NIC Stats */
@@ -117,8 +121,11 @@ struct mlx5e_sw_stats {
u64 tx_tso_inner_bytes;
u64 tx_added_vlan_packets;
u64 tx_nop;
+ u64 tx_mpwqe_blks;
+ u64 tx_mpwqe_pkts;
u64 rx_lro_packets;
u64 rx_lro_bytes;
+ u64 rx_mcast_packets;
u64 rx_ecn_mark;
u64 rx_removed_vlan_packets;
u64 rx_csum_unnecessary;
@@ -298,6 +305,7 @@ struct mlx5e_rq_stats {
u64 csum_none;
u64 lro_packets;
u64 lro_bytes;
+ u64 mcast_packets;
u64 ecn_mark;
u64 removed_vlan_packets;
u64 xdp_drop;
@@ -345,6 +353,8 @@ struct mlx5e_sq_stats {
u64 csum_partial_inner;
u64 added_vlan_packets;
u64 nop;
+ u64 mpwqe_blks;
+ u64 mpwqe_pkts;
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_encrypted_packets;
u64 tls_encrypted_bytes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index fd53d101d8fd..f815b0c60a6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -57,7 +57,6 @@
#include "en/rep/neigh.h"
#include "en_tc.h"
#include "eswitch.h"
-#include "esw/chains.h"
#include "fs_core.h"
#include "en/port.h"
#include "en/tc_tun.h"
@@ -66,20 +65,11 @@
#include "en/mod_hdr.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
+#include "lib/fs_chains.h"
#include "diag/en_tc_tracepoint.h"
+#define nic_chains(priv) ((priv)->fs.tc.chains)
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
-
-struct mlx5_nic_flow_attr {
- u32 action;
- u32 flow_tag;
- struct mlx5_modify_hdr *modify_hdr;
- u32 hairpin_tirn;
- u8 match_level;
- struct mlx5_flow_table *hairpin_ft;
- struct mlx5_fc *counter;
-};
-
#define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
enum {
@@ -153,11 +143,7 @@ struct mlx5e_tc_flow {
struct rcu_head rcu_head;
struct completion init_done;
int tunnel_id; /* the mapped tunnel id of this flow */
-
- union {
- struct mlx5_esw_flow_attr esw_attr[0];
- struct mlx5_nic_flow_attr nic_attr[0];
- };
+ struct mlx5_flow_attr *attr;
};
struct mlx5e_tc_flow_parse_attr {
@@ -170,7 +156,7 @@ struct mlx5e_tc_flow_parse_attr {
};
#define MLX5E_TC_TABLE_NUM_GROUPS 4
-#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
+#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[CHAIN_TO_REG] = {
@@ -191,6 +177,16 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[MARK_TO_REG] = mark_to_reg_ct,
[LABELS_TO_REG] = labels_to_reg_ct,
[FTEID_TO_REG] = fteid_to_reg_ct,
+ /* For NIC rules we store the retore metadata directly
+ * into reg_b that is passed to SW since we don't
+ * jump between steering domains.
+ */
+ [NIC_CHAIN_TO_REG] = {
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
+ .moffset = 0,
+ .mlen = 2,
+ },
+ [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
};
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
@@ -244,6 +240,7 @@ mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
int
mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ enum mlx5_flow_namespace_type ns,
enum mlx5e_tc_attr_to_reg type,
u32 data)
{
@@ -253,8 +250,7 @@ mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
char *modact;
int err;
- err = alloc_mod_hdr_actions(mdev, MLX5_FLOW_NAMESPACE_FDB,
- mod_hdr_acts);
+ err = alloc_mod_hdr_actions(mdev, ns, mod_hdr_acts);
if (err)
return err;
@@ -275,6 +271,54 @@ mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
return 0;
}
+#define esw_offloads_mode(esw) (mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS)
+
+static struct mlx5_tc_ct_priv *
+get_ct_priv(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+
+ if (esw_offloads_mode(esw)) {
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ return uplink_priv->ct_priv;
+ }
+
+ return priv->fs.tc.ct;
+}
+
+struct mlx5_flow_handle *
+mlx5_tc_rule_insert(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (esw_offloads_mode(esw))
+ return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+
+ return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
+}
+
+void
+mlx5_tc_rule_delete(struct mlx5e_priv *priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (esw_offloads_mode(esw)) {
+ mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
+
+ return;
+ }
+
+ mlx5e_del_offloaded_nic_rule(priv, rule, attr);
+}
+
struct mlx5e_hairpin {
struct mlx5_hairpin *pair;
@@ -370,7 +414,7 @@ static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
#define flow_flag_test(flow, flag) __flow_flag_test(flow, \
MLX5E_TC_FLOW_FLAG_##flag)
-static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
+bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
{
return flow_flag_test(flow, ESWITCH);
}
@@ -415,10 +459,7 @@ static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
return PTR_ERR(mh);
modify_hdr = mlx5e_mod_hdr_get(mh);
- if (mlx5e_is_eswitch_flow(flow))
- flow->esw_attr->modify_hdr = modify_hdr;
- else
- flow->nic_attr->modify_hdr = modify_hdr;
+ flow->attr->modify_hdr = modify_hdr;
flow->mh = mh;
return 0;
@@ -858,9 +899,9 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
attach_flow:
if (hpe->hp->num_channels > 1) {
flow_flag_set(flow, HAIRPIN_RSS);
- flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
+ flow->attr->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
} else {
- flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
+ flow->attr->nic_attr->hairpin_tirn = hpe->hp->tirn;
}
flow->hpe = hpe;
@@ -890,129 +931,212 @@ static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
flow->hpe = NULL;
}
-static int
-mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
+struct mlx5_flow_handle *
+mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr)
{
- struct mlx5_flow_context *flow_context = &parse_attr->spec.flow_context;
- struct mlx5_nic_flow_attr *attr = flow->nic_attr;
- struct mlx5_core_dev *dev = priv->mdev;
+ struct mlx5_flow_context *flow_context = &spec->flow_context;
+ struct mlx5_fs_chains *nic_chains = nic_chains(priv);
+ struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {
.action = attr->action,
.flags = FLOW_ACT_NO_APPEND,
};
- struct mlx5_fc *counter = NULL;
- int err, dest_ix = 0;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_table *ft;
+ int dest_ix = 0;
flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
- flow_context->flow_tag = attr->flow_tag;
+ flow_context->flow_tag = nic_attr->flow_tag;
- if (flow_flag_test(flow, HAIRPIN)) {
- err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
- if (err)
- return err;
-
- if (flow_flag_test(flow, HAIRPIN_RSS)) {
- dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[dest_ix].ft = attr->hairpin_ft;
- } else {
- dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- dest[dest_ix].tir_num = attr->hairpin_tirn;
- }
+ if (attr->dest_ft) {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[dest_ix].ft = attr->dest_ft;
+ dest_ix++;
+ } else if (nic_attr->hairpin_ft) {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[dest_ix].ft = nic_attr->hairpin_ft;
+ dest_ix++;
+ } else if (nic_attr->hairpin_tirn) {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest[dest_ix].tir_num = nic_attr->hairpin_tirn;
dest_ix++;
} else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[dest_ix].ft = priv->fs.vlan.ft.t;
+ if (attr->dest_chain) {
+ dest[dest_ix].ft = mlx5_chains_get_table(nic_chains,
+ attr->dest_chain, 1,
+ MLX5E_TC_FT_LEVEL);
+ if (IS_ERR(dest[dest_ix].ft))
+ return ERR_CAST(dest[dest_ix].ft);
+ } else {
+ dest[dest_ix].ft = priv->fs.vlan.ft.t;
+ }
+ dest_ix++;
+ }
+
+ if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
dest_ix++;
}
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ flow_act.modify_hdr = attr->modify_hdr;
+
+ mutex_lock(&tc->t_lock);
+ if (IS_ERR_OR_NULL(tc->t)) {
+ /* Create the root table here if doesn't exist yet */
+ tc->t =
+ mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL);
+
+ if (IS_ERR(tc->t)) {
+ mutex_unlock(&tc->t_lock);
+ netdev_err(priv->netdev,
+ "Failed to create tc offload table\n");
+ rule = ERR_CAST(priv->fs.tc.t);
+ goto err_ft_get;
+ }
+ }
+ mutex_unlock(&tc->t_lock);
+
+ if (attr->chain || attr->prio)
+ ft = mlx5_chains_get_table(nic_chains,
+ attr->chain, attr->prio,
+ MLX5E_TC_FT_LEVEL);
+ else
+ ft = attr->ft;
+
+ if (IS_ERR(ft)) {
+ rule = ERR_CAST(ft);
+ goto err_ft_get;
+ }
+
+ if (attr->outer_match_level != MLX5_MATCH_NONE)
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+
+ rule = mlx5_add_flow_rules(ft, spec,
+ &flow_act, dest, dest_ix);
+ if (IS_ERR(rule))
+ goto err_rule;
+
+ return rule;
+
+err_rule:
+ if (attr->chain || attr->prio)
+ mlx5_chains_put_table(nic_chains,
+ attr->chain, attr->prio,
+ MLX5E_TC_FT_LEVEL);
+err_ft_get:
+ if (attr->dest_chain)
+ mlx5_chains_put_table(nic_chains,
+ attr->dest_chain, 1,
+ MLX5E_TC_FT_LEVEL);
+
+ return ERR_CAST(rule);
+}
+
+static int
+mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_flow_attr *attr = flow->attr;
+ struct mlx5_core_dev *dev = priv->mdev;
+ struct mlx5_fc *counter = NULL;
+ int err;
+
+ if (flow_flag_test(flow, HAIRPIN)) {
+ err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
+ if (err)
+ return err;
+ }
+
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(dev, true);
if (IS_ERR(counter))
return PTR_ERR(counter);
- dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dest_ix].counter_id = mlx5_fc_id(counter);
- dest_ix++;
attr->counter = counter;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
- flow_act.modify_hdr = attr->modify_hdr;
dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
if (err)
return err;
}
- mutex_lock(&priv->fs.tc.t_lock);
- if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
- struct mlx5_flow_table_attr ft_attr = {};
- int tc_grp_size, tc_tbl_size, tc_num_grps;
- u32 max_flow_counter;
-
- max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
- MLX5_CAP_GEN(dev, max_flow_counter_15_0);
-
- tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
-
- tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
- BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
- tc_num_grps = MLX5E_TC_TABLE_NUM_GROUPS;
-
- ft_attr.prio = MLX5E_TC_PRIO;
- ft_attr.max_fte = tc_tbl_size;
- ft_attr.level = MLX5E_TC_FT_LEVEL;
- ft_attr.autogroup.max_num_groups = tc_num_grps;
- priv->fs.tc.t =
- mlx5_create_auto_grouped_flow_table(priv->fs.ns,
- &ft_attr);
- if (IS_ERR(priv->fs.tc.t)) {
- mutex_unlock(&priv->fs.tc.t_lock);
- NL_SET_ERR_MSG_MOD(extack,
- "Failed to create tc offload table");
- netdev_err(priv->netdev,
- "Failed to create tc offload table\n");
- return PTR_ERR(priv->fs.tc.t);
- }
- }
+ if (flow_flag_test(flow, CT))
+ flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), flow, &parse_attr->spec,
+ attr, &parse_attr->mod_hdr_acts);
+ else
+ flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec,
+ attr);
- if (attr->match_level != MLX5_MATCH_NONE)
- parse_attr->spec.match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+ return PTR_ERR_OR_ZERO(flow->rule[0]);
+}
- flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
- &flow_act, dest, dest_ix);
- mutex_unlock(&priv->fs.tc.t_lock);
+void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_fs_chains *nic_chains = nic_chains(priv);
- return PTR_ERR_OR_ZERO(flow->rule[0]);
+ mlx5_del_flow_rules(rule);
+
+ if (attr->chain || attr->prio)
+ mlx5_chains_put_table(nic_chains, attr->chain, attr->prio,
+ MLX5E_TC_FT_LEVEL);
+
+ if (attr->dest_chain)
+ mlx5_chains_put_table(nic_chains, attr->dest_chain, 1,
+ MLX5E_TC_FT_LEVEL);
}
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
- struct mlx5_nic_flow_attr *attr = flow->nic_attr;
- struct mlx5_fc *counter = NULL;
+ struct mlx5_flow_attr *attr = flow->attr;
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
- counter = attr->counter;
- if (!IS_ERR_OR_NULL(flow->rule[0]))
- mlx5_del_flow_rules(flow->rule[0]);
- mlx5_fc_destroy(priv->mdev, counter);
+ flow_flag_clear(flow, OFFLOADED);
+ if (flow_flag_test(flow, CT))
+ mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
+ else if (!IS_ERR_OR_NULL(flow->rule[0]))
+ mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
+
+ /* Remove root table if no rules are left to avoid
+ * extra steering hops.
+ */
mutex_lock(&priv->fs.tc.t_lock);
- if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) {
- mlx5_destroy_flow_table(priv->fs.tc.t);
+ if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
+ !IS_ERR_OR_NULL(tc->t)) {
+ mlx5_chains_put_table(nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
priv->fs.tc.t = NULL;
}
mutex_unlock(&priv->fs.tc.t_lock);
+ kvfree(attr->parse_attr);
+
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
+ mlx5_fc_destroy(priv->mdev, attr->counter);
+
if (flow_flag_test(flow, HAIRPIN))
mlx5e_hairpin_flow_del(priv, flow);
+
+ kfree(flow->attr);
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
@@ -1035,7 +1159,7 @@ static struct mlx5_flow_handle *
mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct mlx5_flow_handle *rule;
@@ -1043,7 +1167,8 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
if (flow_flag_test(flow, CT)) {
mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
- return mlx5_tc_ct_flow_offload(flow->priv, flow, spec, attr,
+ return mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
+ flow, spec, attr,
mod_hdr_acts);
}
@@ -1051,7 +1176,7 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
if (IS_ERR(rule))
return rule;
- if (attr->split_count) {
+ if (attr->esw_attr->split_count) {
flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
if (IS_ERR(flow->rule[1])) {
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
@@ -1065,16 +1190,16 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
static void
mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
flow_flag_clear(flow, OFFLOADED);
if (flow_flag_test(flow, CT)) {
- mlx5_tc_ct_delete_flow(flow->priv, flow, attr);
+ mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
return;
}
- if (attr->split_count)
+ if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
@@ -1085,18 +1210,24 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec)
{
- struct mlx5_esw_flow_attr slow_attr;
+ struct mlx5_flow_attr *slow_attr;
struct mlx5_flow_handle *rule;
- memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
- slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- slow_attr.split_count = 0;
- slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
+ if (!slow_attr)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
+ slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ slow_attr->esw_attr->split_count = 0;
+ slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
- rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, &slow_attr);
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
if (!IS_ERR(rule))
flow_flag_set(flow, SLOW);
+ kfree(slow_attr);
+
return rule;
}
@@ -1104,14 +1235,19 @@ static void
mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow)
{
- struct mlx5_esw_flow_attr slow_attr;
+ struct mlx5_flow_attr *slow_attr;
+
+ slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
+ if (!slow_attr)
+ mlx5_core_warn(flow->priv->mdev, "Unable to unoffload slow path rule\n");
- memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
- slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- slow_attr.split_count = 0;
- slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
- mlx5e_tc_unoffload_fdb_rules(esw, flow, &slow_attr);
+ memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
+ slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ slow_attr->esw_attr->split_count = 0;
+ slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
flow_flag_clear(flow, SLOW);
+ kfree(slow_attr);
}
/* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
@@ -1169,9 +1305,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
- struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
struct net_device *out_dev, *encap_dev = NULL;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
+ struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_fc *counter = NULL;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *out_priv;
@@ -1180,7 +1317,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
int err = 0;
int out_index;
- if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) {
+ if (!mlx5_chains_prios_supported(esw_chains(esw)) && attr->prio != 1) {
NL_SET_ERR_MSG_MOD(extack,
"E-switch priorities unsupported, upgrade FW");
return -EOPNOTSUPP;
@@ -1191,14 +1328,14 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
* FDB_FT_CHAIN which is outside tc range.
* See mlx5e_rep_setup_ft_cb().
*/
- max_chain = mlx5_esw_chains_get_chain_range(esw);
+ max_chain = mlx5_chains_get_chain_range(esw_chains(esw));
if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
NL_SET_ERR_MSG_MOD(extack,
"Requested chain is out of supported range");
return -EOPNOTSUPP;
}
- max_prio = mlx5_esw_chains_get_prio_range(esw);
+ max_prio = mlx5_chains_get_prio_range(esw_chains(esw));
if (attr->prio > max_prio) {
NL_SET_ERR_MSG_MOD(extack,
"Requested priority is out of supported range");
@@ -1211,10 +1348,13 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
return err;
}
+ parse_attr = attr->parse_attr;
+ esw_attr = attr->esw_attr;
+
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
int mirred_ifindex;
- if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
+ if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
continue;
mirred_ifindex = parse_attr->mirred_ifindex[out_index];
@@ -1227,8 +1367,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
- attr->dests[out_index].rep = rpriv->rep;
- attr->dests[out_index].mdev = out_priv->mdev;
+ esw_attr->dests[out_index].rep = rpriv->rep;
+ esw_attr->dests[out_index].mdev = out_priv->mdev;
}
err = mlx5_eswitch_add_vlan_action(esw, attr);
@@ -1244,7 +1384,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- counter = mlx5_fc_create(attr->counter_dev, true);
+ counter = mlx5_fc_create(esw_attr->counter_dev, true);
if (IS_ERR(counter))
return PTR_ERR(counter);
@@ -1270,7 +1410,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
{
- struct mlx5_flow_spec *spec = &flow->esw_attr->parse_attr->spec;
+ struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
void *headers_v = MLX5_ADDR_OF(fte_match_param,
spec->match_value,
misc_parameters_3);
@@ -1285,16 +1425,13 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
int out_index;
mlx5e_put_flow_tunnel_id(flow);
- if (flow_flag_test(flow, NOT_READY)) {
+ if (flow_flag_test(flow, NOT_READY))
remove_unready_flow(flow);
- kvfree(attr->parse_attr);
- return;
- }
if (mlx5e_is_offloaded_flow(flow)) {
if (flow_flag_test(flow, SLOW))
@@ -1309,20 +1446,24 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5_eswitch_del_vlan_action(esw, attr);
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
- if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
+ if (attr->esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
mlx5e_detach_encap(priv, flow, out_index);
kfree(attr->parse_attr->tun_info[out_index]);
}
kvfree(attr->parse_attr);
+ mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
+
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
- mlx5_fc_destroy(attr->counter_dev, attr->counter);
+ mlx5_fc_destroy(attr->esw_attr->counter_dev, attr->counter);
if (flow_flag_test(flow, L3_TO_L2_DECAP))
mlx5e_detach_decap(priv, flow);
+
+ kfree(flow->attr);
}
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
@@ -1332,6 +1473,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_handle *rule;
+ struct mlx5_flow_attr *attr;
struct mlx5_flow_spec *spec;
struct mlx5e_tc_flow *flow;
int err;
@@ -1354,8 +1496,9 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
if (!mlx5e_is_offloaded_flow(flow))
continue;
- esw_attr = flow->esw_attr;
- spec = &esw_attr->parse_attr->spec;
+ attr = flow->attr;
+ esw_attr = attr->esw_attr;
+ spec = &attr->parse_attr->spec;
esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat;
esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
@@ -1375,7 +1518,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
if (!all_flow_encaps_valid)
continue;
/* update from slow path rule to encap rule */
- rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
@@ -1395,7 +1538,9 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
struct list_head *flow_list)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_handle *rule;
+ struct mlx5_flow_attr *attr;
struct mlx5_flow_spec *spec;
struct mlx5e_tc_flow *flow;
int err;
@@ -1403,12 +1548,14 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
list_for_each_entry(flow, flow_list, tmp_list) {
if (!mlx5e_is_offloaded_flow(flow))
continue;
- spec = &flow->esw_attr->parse_attr->spec;
+ attr = flow->attr;
+ esw_attr = attr->esw_attr;
+ spec = &attr->parse_attr->spec;
/* update from encap rule to slow path rule */
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
/* mark the flow's encap dest as non-valid */
- flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
+ esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1417,7 +1564,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
continue;
}
- mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
flow->rule[0] = rule;
/* was unset when fast path rule removed */
flow_flag_set(flow, OFFLOADED);
@@ -1430,10 +1577,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
{
- if (mlx5e_is_eswitch_flow(flow))
- return flow->esw_attr->counter;
- else
- return flow->nic_attr->counter;
+ return flow->attr->counter;
}
/* Takes reference to all flows attached to encap and adds the flows to
@@ -1799,11 +1943,11 @@ static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct flow_match_enc_opts enc_opts_match;
struct tunnel_match_enc_opts tun_enc_opts;
struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5_flow_attr *attr = flow->attr;
struct mlx5e_rep_priv *uplink_rpriv;
struct tunnel_match_key tunnel_key;
bool enc_opts_is_dont_care = true;
@@ -1867,7 +2011,7 @@ static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
} else {
mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
err = mlx5e_tc_match_to_reg_set(priv->mdev,
- mod_hdr_acts,
+ mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB,
TUNNEL_TO_REG, value);
if (err)
goto err_set;
@@ -1953,8 +2097,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
if (!mlx5e_is_eswitch_flow(flow))
return -EOPNOTSUPP;
- needs_mapping = !!flow->esw_attr->chain;
- sets_mapping = !flow->esw_attr->chain && flow_has_tc_fwd_action(f);
+ needs_mapping = !!flow->attr->chain;
+ sets_mapping = !flow->attr->chain && flow_has_tc_fwd_action(f);
*match_inner = !needs_mapping;
if ((needs_mapping || sets_mapping) &&
@@ -1966,7 +2110,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (!flow->esw_attr->chain) {
+ if (!flow->attr->chain) {
err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
match_level);
if (err) {
@@ -1981,7 +2125,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
* object
*/
if (!netif_is_bareudp(filter_dev))
- flow->esw_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
}
if (!needs_mapping && !sets_mapping)
@@ -2484,12 +2628,9 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
}
}
- if (is_eswitch_flow) {
- flow->esw_attr->inner_match_level = inner_match_level;
- flow->esw_attr->outer_match_level = outer_match_level;
- } else {
- flow->nic_attr->match_level = non_tunnel_match_level;
- }
+ flow->attr->inner_match_level = inner_match_level;
+ flow->attr->outer_match_level = outer_match_level;
+
return err;
}
@@ -2615,6 +2756,7 @@ static struct mlx5_fields fields[] = {
OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
+ OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
@@ -2625,6 +2767,22 @@ static struct mlx5_fields fields[] = {
OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
};
+static unsigned long mask_to_le(unsigned long mask, int size)
+{
+ __be32 mask_be32;
+ __be16 mask_be16;
+
+ if (size == 32) {
+ mask_be32 = (__force __be32)(mask);
+ mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
+ } else if (size == 16) {
+ mask_be32 = (__force __be32)(mask);
+ mask_be16 = *(__be16 *)&mask_be32;
+ mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
+ }
+
+ return mask;
+}
static int offload_pedit_fields(struct mlx5e_priv *priv,
int namespace,
struct pedit_headers_action *hdrs,
@@ -2638,9 +2796,7 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
struct mlx5e_tc_mod_hdr_acts *mod_acts;
struct mlx5_fields *f;
- unsigned long mask;
- __be32 mask_be32;
- __be16 mask_be16;
+ unsigned long mask, field_mask;
int err;
u8 cmd;
@@ -2706,14 +2862,7 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
if (skip)
continue;
- if (f->field_bsize == 32) {
- mask_be32 = (__force __be32)(mask);
- mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
- } else if (f->field_bsize == 16) {
- mask_be32 = (__force __be32)(mask);
- mask_be16 = *(__be16 *)&mask_be32;
- mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
- }
+ mask = mask_to_le(mask, f->field_bsize);
first = find_first_bit(&mask, f->field_bsize);
next_z = find_next_zero_bit(&mask, f->field_bsize, first);
@@ -2744,9 +2893,10 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
if (cmd == MLX5_ACTION_TYPE_SET) {
int start;
+ field_mask = mask_to_le(f->field_mask, f->field_bsize);
+
/* if field is bit sized it can start not from first bit */
- start = find_first_bit((unsigned long *)&f->field_mask,
- f->field_bsize);
+ start = find_first_bit(&field_mask, f->field_bsize);
MLX5_SET(set_action_in, action, offset, first - start);
/* length is num of bits to be written, zero means length of 32 */
@@ -3083,7 +3233,7 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
* we can't restore ct state
*/
if (!ct_clear && modify_tuple &&
- mlx5_tc_ct_add_no_trk_match(priv, spec)) {
+ mlx5_tc_ct_add_no_trk_match(spec)) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload tuple modify header with ct matches");
netdev_info(priv->netdev,
@@ -3114,12 +3264,13 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
bool ct_flow = false, ct_clear = false;
u32 actions;
+ ct_clear = flow->attr->ct_attr.ct_action &
+ TCA_CT_ACT_CLEAR;
+ ct_flow = flow_flag_test(flow, CT) && !ct_clear;
+ actions = flow->attr->action;
+
if (mlx5e_is_eswitch_flow(flow)) {
- actions = flow->esw_attr->action;
- ct_clear = flow->esw_attr->ct_attr.ct_action &
- TCA_CT_ACT_CLEAR;
- ct_flow = flow_flag_test(flow, CT) && !ct_clear;
- if (flow->esw_attr->split_count && ct_flow) {
+ if (flow->attr->esw_attr->split_count && ct_flow) {
/* All registers used by ct are cleared when using
* split rules.
*/
@@ -3127,8 +3278,6 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
"Can't offload mirroring with action ct");
return false;
}
- } else {
- actions = flow->nic_attr->action;
}
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
@@ -3226,15 +3375,67 @@ add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
extack);
}
+static int validate_goto_chain(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ const struct flow_action_entry *act,
+ u32 actions,
+ struct netlink_ext_ack *extack)
+{
+ bool is_esw = mlx5e_is_eswitch_flow(flow);
+ struct mlx5_flow_attr *attr = flow->attr;
+ bool ft_flow = mlx5e_is_ft_flow(flow);
+ u32 dest_chain = act->chain_index;
+ struct mlx5_fs_chains *chains;
+ struct mlx5_eswitch *esw;
+ u32 reformat_and_fwd;
+ u32 max_chain;
+
+ esw = priv->mdev->priv.eswitch;
+ chains = is_esw ? esw_chains(esw) : nic_chains(priv);
+ max_chain = mlx5_chains_get_chain_range(chains);
+ reformat_and_fwd = is_esw ?
+ MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
+ MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, reformat_and_fwd_to_table);
+
+ if (ft_flow) {
+ NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5_chains_backwards_supported(chains) &&
+ dest_chain <= attr->chain) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Goto lower numbered chain isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (dest_chain > max_chain) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Requested destination chain is out of supported range");
+ return -EOPNOTSUPP;
+ }
+
+ if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
+ !reformat_and_fwd) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Goto chain is not allowed if action has reformat or decap");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int parse_tc_nic_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
- struct mlx5_nic_flow_attr *attr = flow->nic_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
struct pedit_headers_action hdrs[2] = {};
const struct flow_action_entry *act;
+ struct mlx5_nic_flow_attr *nic_attr;
u32 action = 0;
int err, i;
@@ -3245,7 +3446,9 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
FLOW_ACTION_HW_STATS_DELAYED_BIT))
return -EOPNOTSUPP;
- attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
+ nic_attr = attr->nic_attr;
+
+ nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
@@ -3266,8 +3469,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
if (err)
return err;
- action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
break;
case FLOW_ACTION_VLAN_MANGLE:
err = add_vlan_rewrite_action(priv,
@@ -3312,10 +3514,26 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
return -EINVAL;
}
- attr->flow_tag = mark;
+ nic_attr->flow_tag = mark;
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}
break;
+ case FLOW_ACTION_GOTO:
+ err = validate_goto_chain(priv, flow, act, action,
+ extack);
+ if (err)
+ return err;
+
+ action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->dest_chain = act->chain_index;
+ break;
+ case FLOW_ACTION_CT:
+ err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
+ if (err)
+ return err;
+
+ flow_flag_set(flow, CT);
+ break;
default:
NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
return -EOPNOTSUPP;
@@ -3338,6 +3556,18 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
}
attr->action = action;
+
+ if (attr->dest_chain) {
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
+ return -EOPNOTSUPP;
+ }
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ }
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
return -EOPNOTSUPP;
@@ -3469,8 +3699,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
bool *encap_valid)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
const struct ip_tunnel_info *tun_info;
struct encap_key key;
struct mlx5e_encap_entry *e;
@@ -3556,8 +3786,8 @@ attach_flow:
flow->encaps[out_index].index = out_index;
*encap_dev = e->out_dev;
if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
- attr->dests[out_index].pkt_reformat = e->pkt_reformat;
- attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
+ attr->esw_attr->dests[out_index].pkt_reformat = e->pkt_reformat;
+ attr->esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
*encap_valid = true;
} else {
*encap_valid = false;
@@ -3584,14 +3814,14 @@ static int mlx5e_attach_decap(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_decap_entry *d;
struct mlx5e_decap_key key;
uintptr_t hash_key;
int err = 0;
- parse_attr = attr->parse_attr;
+ parse_attr = flow->attr->parse_attr;
if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
NL_SET_ERR_MSG_MOD(extack,
"encap header larger than max supported");
@@ -3733,7 +3963,7 @@ static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
}
static int add_vlan_push_action(struct mlx5e_priv *priv,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct net_device **out_dev,
u32 *action)
{
@@ -3746,7 +3976,7 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
};
int err;
- err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
if (err)
return err;
@@ -3759,7 +3989,7 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
}
static int add_vlan_pop_action(struct mlx5e_priv *priv,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
u32 *action)
{
struct flow_action_entry vlan_act = {
@@ -3770,7 +4000,7 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv,
nest_level = attr->parse_attr->filter_dev->lower_level -
priv->netdev->lower_level;
while (nest_level--) {
- err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
if (err)
return err;
}
@@ -3831,59 +4061,20 @@ static bool is_duplicated_output_device(struct net_device *dev,
return false;
}
-static int mlx5_validate_goto_chain(struct mlx5_eswitch *esw,
- struct mlx5e_tc_flow *flow,
- const struct flow_action_entry *act,
- u32 actions,
- struct netlink_ext_ack *extack)
-{
- u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
- bool ft_flow = mlx5e_is_ft_flow(flow);
- u32 dest_chain = act->chain_index;
-
- if (ft_flow) {
- NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
- return -EOPNOTSUPP;
- }
-
- if (!mlx5_esw_chains_backwards_supported(esw) &&
- dest_chain <= attr->chain) {
- NL_SET_ERR_MSG_MOD(extack,
- "Goto lower numbered chain isn't supported");
- return -EOPNOTSUPP;
- }
- if (dest_chain > max_chain) {
- NL_SET_ERR_MSG_MOD(extack,
- "Requested destination chain is out of supported range");
- return -EOPNOTSUPP;
- }
-
- if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
- MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
- !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_and_fwd_to_table)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Goto chain is not allowed if action has reformat or decap");
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
static int verify_uplink_forwarding(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct net_device *out_dev,
struct netlink_ext_ack *extack)
{
+ struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_rep_priv *rep_priv;
/* Forwarding non encapsulated traffic between
* uplink ports is allowed only if
* termination_table_raw_traffic cap is set.
*
- * Input vport was stored esw_attr->in_rep.
+ * Input vport was stored attr->in_rep.
* In LAG case, *priv* is the private data of
* uplink which may be not the input vport.
*/
@@ -3918,13 +4109,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
{
struct pedit_headers_action hdrs[2] = {};
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
- struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
const struct ip_tunnel_info *info = NULL;
+ struct mlx5_flow_attr *attr = flow->attr;
int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
bool ft_flow = mlx5e_is_ft_flow(flow);
const struct flow_action_entry *act;
+ struct mlx5_esw_flow_attr *esw_attr;
bool encap = false, decap = false;
u32 action = attr->action;
int err, i, if_count = 0;
@@ -3937,12 +4129,25 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
FLOW_ACTION_HW_STATS_DELAYED_BIT))
return -EOPNOTSUPP;
+ esw_attr = attr->esw_attr;
+ parse_attr = attr->parse_attr;
+
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_DROP:
action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
break;
+ case FLOW_ACTION_TRAP:
+ if (!flow_offload_has_one_action(flow_action)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "action trap is supported as a sole action only");
+ return -EOPNOTSUPP;
+ }
+ action |= (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT);
+ attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ break;
case FLOW_ACTION_MPLS_PUSH:
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
reformat_l2_to_l3_tunnel) ||
@@ -3983,7 +4188,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (!flow_flag_test(flow, L3_TO_L2_DECAP)) {
action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- attr->split_count = attr->out_count;
+ esw_attr->split_count = esw_attr->out_count;
}
break;
case FLOW_ACTION_CSUM:
@@ -4020,27 +4225,27 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
+ if (esw_attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
NL_SET_ERR_MSG_MOD(extack,
"can't support more output ports, can't offload forwarding");
netdev_warn(priv->netdev,
"can't support more than %d output ports, can't offload forwarding\n",
- attr->out_count);
+ esw_attr->out_count);
return -EOPNOTSUPP;
}
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
if (encap) {
- parse_attr->mirred_ifindex[attr->out_count] =
+ parse_attr->mirred_ifindex[esw_attr->out_count] =
out_dev->ifindex;
- parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
- if (!parse_attr->tun_info[attr->out_count])
+ parse_attr->tun_info[esw_attr->out_count] = dup_tun_info(info);
+ if (!parse_attr->tun_info[esw_attr->out_count])
return -ENOMEM;
encap = false;
- attr->dests[attr->out_count].flags |=
+ esw_attr->dests[esw_attr->out_count].flags |=
MLX5_ESW_DEST_ENCAP;
- attr->out_count++;
+ esw_attr->out_count++;
/* attr->dests[].rep is resolved when we
* handle encap
*/
@@ -4089,9 +4294,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
out_priv = netdev_priv(out_dev);
rpriv = out_priv->ppriv;
- attr->dests[attr->out_count].rep = rpriv->rep;
- attr->dests[attr->out_count].mdev = out_priv->mdev;
- attr->out_count++;
+ esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
+ esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
+ esw_attr->out_count++;
} else if (parse_attr->filter_dev != priv->netdev) {
/* All mlx5 devices are called to configure
* high level device filters. Therefore, the
@@ -4129,12 +4334,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
act, parse_attr, hdrs,
&action, extack);
} else {
- err = parse_tc_vlan_action(priv, act, attr, &action);
+ err = parse_tc_vlan_action(priv, act, esw_attr, &action);
}
if (err)
return err;
- attr->split_count = attr->out_count;
+ esw_attr->split_count = esw_attr->out_count;
break;
case FLOW_ACTION_VLAN_MANGLE:
err = add_vlan_rewrite_action(priv,
@@ -4144,14 +4349,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (err)
return err;
- attr->split_count = attr->out_count;
+ esw_attr->split_count = esw_attr->out_count;
break;
case FLOW_ACTION_TUNNEL_DECAP:
decap = true;
break;
case FLOW_ACTION_GOTO:
- err = mlx5_validate_goto_chain(esw, flow, act, action,
- extack);
+ err = validate_goto_chain(priv, flow, act, action,
+ extack);
if (err)
return err;
@@ -4159,7 +4364,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
attr->dest_chain = act->chain_index;
break;
case FLOW_ACTION_CT:
- err = mlx5_tc_ct_parse_action(priv, attr, act, extack);
+ err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
if (err)
return err;
@@ -4198,7 +4403,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
(action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
- attr->split_count = 0;
+ esw_attr->split_count = 0;
}
}
@@ -4238,7 +4443,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
+ if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
NL_SET_ERR_MSG_MOD(extack,
"current firmware doesn't support split rule for port mirroring");
netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
@@ -4289,25 +4494,37 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
{
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
- bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
+ struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
+ bool is_rep_ingress = esw_attr->in_rep->vport != MLX5_VPORT_UPLINK &&
flow_flag_test(flow, INGRESS);
bool act_is_encap = !!(attr->action &
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
- bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
+ bool esw_paired = mlx5_devcom_is_paired(esw_attr->in_mdev->priv.devcom,
MLX5_DEVCOM_ESW_OFFLOADS);
if (!esw_paired)
return false;
- if ((mlx5_lag_is_sriov(attr->in_mdev) ||
- mlx5_lag_is_multipath(attr->in_mdev)) &&
+ if ((mlx5_lag_is_sriov(esw_attr->in_mdev) ||
+ mlx5_lag_is_multipath(esw_attr->in_mdev)) &&
(is_rep_ingress || act_is_encap))
return true;
return false;
}
+struct mlx5_flow_attr *
+mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
+{
+ u32 ex_attr_size = (type == MLX5_FLOW_NAMESPACE_FDB) ?
+ sizeof(struct mlx5_esw_flow_attr) :
+ sizeof(struct mlx5_nic_flow_attr);
+ struct mlx5_flow_attr *attr;
+
+ return kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
+}
+
static int
mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
struct flow_cls_offload *f, unsigned long flow_flags,
@@ -4315,19 +4532,24 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
struct mlx5e_tc_flow **__flow)
{
struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr;
struct mlx5e_tc_flow *flow;
int out_index, err;
- flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
- if (!parse_attr || !flow) {
+
+ flow->flags = flow_flags;
+ flow->cookie = f->cookie;
+ flow->priv = priv;
+
+ attr = mlx5_alloc_flow_attr(get_flow_name_space(flow));
+ if (!parse_attr || !flow || !attr) {
err = -ENOMEM;
goto err_free;
}
+ flow->attr = attr;
- flow->cookie = f->cookie;
- flow->flags = flow_flags;
- flow->priv = priv;
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
INIT_LIST_HEAD(&flow->encaps[out_index].list);
INIT_LIST_HEAD(&flow->hairpin);
@@ -4343,11 +4565,22 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
err_free:
kfree(flow);
kvfree(parse_attr);
+ kfree(attr);
return err;
}
static void
-mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
+mlx5e_flow_attr_init(struct mlx5_flow_attr *attr,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct flow_cls_offload *f)
+{
+ attr->parse_attr = parse_attr;
+ attr->chain = f->common.chain_index;
+ attr->prio = f->common.prio;
+}
+
+static void
+mlx5e_flow_esw_attr_init(struct mlx5_flow_attr *attr,
struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct flow_cls_offload *f,
@@ -4355,10 +4588,9 @@ mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
struct mlx5_core_dev *in_mdev)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
- esw_attr->parse_attr = parse_attr;
- esw_attr->chain = f->common.chain_index;
- esw_attr->prio = f->common.prio;
+ mlx5e_flow_attr_init(attr, parse_attr, f);
esw_attr->in_rep = in_rep;
esw_attr->in_mdev = in_mdev;
@@ -4392,7 +4624,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
goto out;
parse_attr->filter_dev = filter_dev;
- mlx5e_flow_esw_attr_init(flow->esw_attr,
+ mlx5e_flow_esw_attr_init(flow->attr,
priv, parse_attr,
f, in_rep, in_mdev);
@@ -4402,8 +4634,8 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
goto err_free;
/* actions validation depends on parsing the ct matches first */
- err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f,
- &flow->esw_attr->ct_attr, extack);
+ err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
+ &flow->attr->ct_attr, extack);
if (err)
goto err_free;
@@ -4434,6 +4666,7 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
{
struct mlx5e_priv *priv = flow->priv, *peer_priv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
+ struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_rep_priv *peer_urpriv;
@@ -4453,15 +4686,15 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
* original flow and packets redirected from uplink use the
* peer mdev.
*/
- if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK)
+ if (attr->in_rep->vport == MLX5_VPORT_UPLINK)
in_mdev = peer_priv->mdev;
else
in_mdev = priv->mdev;
- parse_attr = flow->esw_attr->parse_attr;
+ parse_attr = flow->attr->parse_attr;
peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
parse_attr->filter_dev,
- flow->esw_attr->in_rep, in_mdev);
+ attr->in_rep, in_mdev);
if (IS_ERR(peer_flow)) {
err = PTR_ERR(peer_flow);
goto out;
@@ -4525,9 +4758,12 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow;
int attr_size, err;
- /* multi-chain not supported for NIC rules */
- if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
+ if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
+ if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
+ return -EOPNOTSUPP;
+ } else if (!tc_can_offload_extack(priv->netdev, f->common.extack)) {
return -EOPNOTSUPP;
+ }
flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
attr_size = sizeof(struct mlx5_nic_flow_attr);
@@ -4537,11 +4773,18 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
goto out;
parse_attr->filter_dev = filter_dev;
+ mlx5e_flow_attr_init(flow->attr, parse_attr, f);
+
err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
f, filter_dev);
if (err)
goto err_free;
+ err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
+ &flow->attr->ct_attr, extack);
+ if (err)
+ goto err_free;
+
err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
if (err)
goto err_free;
@@ -4551,14 +4794,12 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
goto err_free;
flow_flag_set(flow, OFFLOADED);
- kvfree(parse_attr);
*__flow = flow;
return 0;
err_free:
mlx5e_flow_put(priv, flow);
- kvfree(parse_attr);
out:
return err;
}
@@ -4933,9 +5174,27 @@ static int mlx5e_tc_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
}
+static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
+{
+ int tc_grp_size, tc_tbl_size;
+ u32 max_flow_counter;
+
+ max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
+ MLX5_CAP_GEN(dev, max_flow_counter_15_0);
+
+ tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
+
+ tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
+ BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
+
+ return tc_tbl_size;
+}
+
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5_core_dev *dev = priv->mdev;
+ struct mlx5_chains_attr attr = {};
int err;
mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
@@ -4947,6 +5206,27 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
if (err)
return err;
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
+ attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
+ MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
+ attr.max_restore_tag = MLX5E_TC_TABLE_CHAIN_TAG_MASK;
+ }
+ attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
+ attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
+ attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
+ attr.default_ft = priv->fs.vlan.ft.t;
+
+ tc->chains = mlx5_chains_create(dev, &attr);
+ if (IS_ERR(tc->chains)) {
+ err = PTR_ERR(tc->chains);
+ goto err_chains;
+ }
+
+ tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
+ MLX5_FLOW_NAMESPACE_KERNEL);
+ if (IS_ERR(tc->ct))
+ goto err_ct;
+
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
err = register_netdevice_notifier_dev_net(priv->netdev,
&tc->netdevice_nb,
@@ -4954,8 +5234,17 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
if (err) {
tc->netdevice_nb.notifier_call = NULL;
mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
+ goto err_reg;
}
+ return 0;
+
+err_reg:
+ mlx5_tc_ct_clean(tc->ct);
+err_ct:
+ mlx5_chains_destroy(tc->chains);
+err_chains:
+ rhashtable_destroy(&tc->ht);
return err;
}
@@ -4980,28 +5269,38 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr);
mutex_destroy(&tc->hairpin_tbl_lock);
- rhashtable_destroy(&tc->ht);
+ rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
if (!IS_ERR_OR_NULL(tc->t)) {
- mlx5_destroy_flow_table(tc->t);
+ mlx5_chains_put_table(tc->chains, 0, 1, MLX5E_TC_FT_LEVEL);
tc->t = NULL;
}
mutex_destroy(&tc->t_lock);
+
+ mlx5_tc_ct_clean(tc->ct);
+ mlx5_chains_destroy(tc->chains);
}
int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
{
const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
struct mlx5_rep_uplink_priv *uplink_priv;
- struct mlx5e_rep_priv *priv;
+ struct mlx5e_rep_priv *rpriv;
struct mapping_ctx *mapping;
- int err;
+ struct mlx5_eswitch *esw;
+ struct mlx5e_priv *priv;
+ int err = 0;
uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
- priv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
+ rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
+ priv = netdev_priv(rpriv->netdev);
+ esw = priv->mdev->priv.eswitch;
- err = mlx5_tc_ct_init(uplink_priv);
- if (err)
+ uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev),
+ esw_chains(esw),
+ &esw->offloads.mod_hdr,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(uplink_priv->ct_priv))
goto err_ct;
mapping = mapping_create(sizeof(struct tunnel_match_key),
@@ -5030,7 +5329,7 @@ err_ht_init:
err_enc_opts_mapping:
mapping_destroy(uplink_priv->tunnel_mapping);
err_tun_mapping:
- mlx5_tc_ct_clean(uplink_priv);
+ mlx5_tc_ct_clean(uplink_priv->ct_priv);
err_ct:
netdev_warn(priv->netdev,
"Failed to initialize tc (eswitch), err: %d", err);
@@ -5044,10 +5343,11 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
+
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
mapping_destroy(uplink_priv->tunnel_mapping);
- mlx5_tc_ct_clean(uplink_priv);
+ mlx5_tc_ct_clean(uplink_priv->ct_priv);
}
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
@@ -5112,3 +5412,44 @@ int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
return -EOPNOTSUPP;
}
}
+
+bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
+ struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ u32 chain = 0, chain_tag, reg_b, zone_restore_id;
+ struct mlx5e_priv *priv = netdev_priv(skb->dev);
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct tc_skb_ext *tc_skb_ext;
+ int err;
+
+ reg_b = be32_to_cpu(cqe->ft_metadata);
+
+ chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
+
+ err = mlx5_get_chain_for_tag(nic_chains(priv), chain_tag, &chain);
+ if (err) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find chain for chain tag: %d, err: %d\n",
+ chain_tag, err);
+ return false;
+ }
+
+ if (chain) {
+ tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+ if (WARN_ON(!tc_skb_ext))
+ return false;
+
+ tc_skb_ext->chain = chain;
+
+ zone_restore_id = (reg_b >> REG_MAPPING_SHIFT(NIC_ZONE_RESTORE_TO_REG)) &
+ ZONE_RESTORE_MAX;
+
+ if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
+ zone_restore_id))
+ return false;
+ }
+#endif /* CONFIG_NET_TC_SKB_EXT */
+
+ return true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 437f680728fd..3b979008143d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -35,17 +35,57 @@
#include <net/pkt_cls.h>
#include "en.h"
+#include "eswitch.h"
+#include "en/tc_ct.h"
#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
#ifdef CONFIG_MLX5_ESWITCH
+#define NIC_FLOW_ATTR_SZ (sizeof(struct mlx5_flow_attr) +\
+ sizeof(struct mlx5_nic_flow_attr))
+#define ESW_FLOW_ATTR_SZ (sizeof(struct mlx5_flow_attr) +\
+ sizeof(struct mlx5_esw_flow_attr))
+#define ns_to_attr_sz(ns) (((ns) == MLX5_FLOW_NAMESPACE_FDB) ?\
+ ESW_FLOW_ATTR_SZ :\
+ NIC_FLOW_ATTR_SZ)
+
+
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
struct mlx5e_tc_update_priv {
struct net_device *tun_dev;
};
+struct mlx5_nic_flow_attr {
+ u32 flow_tag;
+ u32 hairpin_tirn;
+ struct mlx5_flow_table *hairpin_ft;
+};
+
+struct mlx5_flow_attr {
+ u32 action;
+ struct mlx5_fc *counter;
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_ct_attr ct_attr;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ u32 chain;
+ u16 prio;
+ u32 dest_chain;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_table *dest_ft;
+ u8 inner_match_level;
+ u8 outer_match_level;
+ u32 flags;
+ union {
+ struct mlx5_esw_flow_attr esw_attr[0];
+ struct mlx5_nic_flow_attr nic_attr[0];
+ };
+};
+
+#define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
+#define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0)
+
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
struct tunnel_match_key {
@@ -90,6 +130,7 @@ enum {
int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
+bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags);
@@ -133,6 +174,8 @@ enum mlx5e_tc_attr_to_reg {
MARK_TO_REG,
LABELS_TO_REG,
FTEID_TO_REG,
+ NIC_CHAIN_TO_REG,
+ NIC_ZONE_RESTORE_TO_REG,
};
struct mlx5e_tc_attr_to_reg_mapping {
@@ -150,6 +193,7 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ enum mlx5_flow_namespace_type ns,
enum mlx5e_tc_attr_to_reg type,
u32 data);
@@ -181,14 +225,42 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv);
+struct mlx5_flow_handle *
+mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr);
+void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr);
+
+struct mlx5_flow_handle *
+mlx5_tc_rule_insert(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr);
+void
+mlx5_tc_rule_delete(struct mlx5e_priv *priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr);
+
#else /* CONFIG_MLX5_CLS_ACT */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
static inline int
mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
{ return -EOPNOTSUPP; }
+
#endif /* CONFIG_MLX5_CLS_ACT */
+struct mlx5_flow_attr *mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type);
+
+struct mlx5_flow_handle *
+mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr);
+void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr);
+
#else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
@@ -203,4 +275,29 @@ mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
{ return -EOPNOTSUPP; }
#endif
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
+{
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ u32 chain, reg_b;
+
+ reg_b = be32_to_cpu(cqe->ft_metadata);
+
+ chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
+ if (chain)
+ return true;
+#endif
+
+ return false;
+}
+
+bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb);
+#else /* CONFIG_MLX5_CLS_ACT */
+static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
+{ return false; }
+static inline bool
+mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
+{ return true; }
+#endif
+
#endif /* __MLX5_EN_TC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index da596de3abba..13bd4f254ed7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -232,131 +232,180 @@ dma_unmap_wqe_err:
return -ENOMEM;
}
+struct mlx5e_tx_attr {
+ u32 num_bytes;
+ u16 headlen;
+ u16 ihs;
+ __be16 mss;
+ u8 opcode;
+};
+
+struct mlx5e_tx_wqe_attr {
+ u16 ds_cnt;
+ u16 ds_cnt_inl;
+ u8 num_wqebbs;
+};
+
+static u8
+mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_accel_tx_state *accel)
+{
+ u8 mode;
+
+#ifdef CONFIG_MLX5_EN_TLS
+ if (accel && accel->tls.tls_tisn)
+ return MLX5_INLINE_MODE_TCP_UDP;
+#endif
+
+ mode = sq->min_inline_mode;
+
+ if (skb_vlan_tag_present(skb) &&
+ test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
+ mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
+
+ return mode;
+}
+
+static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_accel_tx_state *accel,
+ struct mlx5e_tx_attr *attr)
+{
+ struct mlx5e_sq_stats *stats = sq->stats;
+
+ if (skb_is_gso(skb)) {
+ u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb);
+
+ *attr = (struct mlx5e_tx_attr) {
+ .opcode = MLX5_OPCODE_LSO,
+ .mss = cpu_to_be16(skb_shinfo(skb)->gso_size),
+ .ihs = ihs,
+ .num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs,
+ .headlen = skb_headlen(skb) - ihs,
+ };
+
+ stats->packets += skb_shinfo(skb)->gso_segs;
+ } else {
+ u8 mode = mlx5e_tx_wqe_inline_mode(sq, skb, accel);
+ u16 ihs = mlx5e_calc_min_inline(mode, skb);
+
+ *attr = (struct mlx5e_tx_attr) {
+ .opcode = MLX5_OPCODE_SEND,
+ .mss = cpu_to_be16(0),
+ .ihs = ihs,
+ .num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN),
+ .headlen = skb_headlen(skb) - ihs,
+ };
+
+ stats->packets++;
+ }
+
+ stats->bytes += attr->num_bytes;
+}
+
+static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_attr *attr,
+ struct mlx5e_tx_wqe_attr *wqe_attr)
+{
+ u16 ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT;
+ u16 ds_cnt_inl = 0;
+
+ ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags;
+
+ if (attr->ihs) {
+ u16 inl = attr->ihs - INL_HDR_START_SZ;
+
+ if (skb_vlan_tag_present(skb))
+ inl += VLAN_HLEN;
+
+ ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
+ ds_cnt += ds_cnt_inl;
+ }
+
+ *wqe_attr = (struct mlx5e_tx_wqe_attr) {
+ .ds_cnt = ds_cnt,
+ .ds_cnt_inl = ds_cnt_inl,
+ .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS),
+ };
+}
+
+static void mlx5e_tx_skb_update_hwts_flags(struct sk_buff *skb)
+{
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+}
+
+static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq)
+{
+ if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room))) {
+ netif_tx_stop_queue(sq->txq);
+ sq->stats->stopped++;
+ }
+}
+
static inline void
mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma,
+ const struct mlx5e_tx_attr *attr,
+ const struct mlx5e_tx_wqe_attr *wqe_attr, u8 num_dma,
struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg,
bool xmit_more)
{
struct mlx5_wq_cyc *wq = &sq->wq;
bool send_doorbell;
- wi->num_bytes = num_bytes;
- wi->num_dma = num_dma;
- wi->num_wqebbs = num_wqebbs;
- wi->skb = skb;
+ *wi = (struct mlx5e_tx_wqe_info) {
+ .skb = skb,
+ .num_bytes = attr->num_bytes,
+ .num_dma = num_dma,
+ .num_wqebbs = wqe_attr->num_wqebbs,
+ .num_fifo_pkts = 0,
+ };
- cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | attr->opcode);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | wqe_attr->ds_cnt);
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ mlx5e_tx_skb_update_hwts_flags(skb);
sq->pc += wi->num_wqebbs;
- if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, sq->stop_room))) {
- netif_tx_stop_queue(sq->txq);
- sq->stats->stopped++;
- }
- send_doorbell = __netdev_tx_sent_queue(sq->txq, num_bytes,
- xmit_more);
+ mlx5e_tx_check_stop(sq);
+
+ send_doorbell = __netdev_tx_sent_queue(sq->txq, attr->num_bytes, xmit_more);
if (send_doorbell)
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
}
-void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
+static void
+mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ const struct mlx5e_tx_attr *attr, const struct mlx5e_tx_wqe_attr *wqe_attr,
+ struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
{
- struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5_wqe_eth_seg *eseg;
struct mlx5_wqe_data_seg *dseg;
struct mlx5e_tx_wqe_info *wi;
struct mlx5e_sq_stats *stats = sq->stats;
- u16 headlen, ihs, contig_wqebbs_room;
- u16 ds_cnt, ds_cnt_inl = 0;
- u8 num_wqebbs, opcode;
- u32 num_bytes;
int num_dma;
- __be16 mss;
-
- /* Calc ihs and ds cnt, no writes to wqe yet */
- ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
- if (skb_is_gso(skb)) {
- opcode = MLX5_OPCODE_LSO;
- mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
- ihs = mlx5e_tx_get_gso_ihs(sq, skb);
- num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
- stats->packets += skb_shinfo(skb)->gso_segs;
- } else {
- u8 mode = mlx5e_tx_wqe_inline_mode(sq, &wqe->ctrl, skb);
-
- opcode = MLX5_OPCODE_SEND;
- mss = 0;
- ihs = mlx5e_calc_min_inline(mode, skb);
- num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
- stats->packets++;
- }
- stats->bytes += num_bytes;
stats->xmit_more += xmit_more;
- headlen = skb->len - ihs - skb->data_len;
- ds_cnt += !!headlen;
- ds_cnt += skb_shinfo(skb)->nr_frags;
-
- if (ihs) {
- ihs += !!skb_vlan_tag_present(skb) * VLAN_HLEN;
-
- ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
- ds_cnt += ds_cnt_inl;
- }
-
- num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
- contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
- if (unlikely(contig_wqebbs_room < num_wqebbs)) {
-#ifdef CONFIG_MLX5_EN_IPSEC
- struct mlx5_wqe_eth_seg cur_eth = wqe->eth;
-#endif
-#ifdef CONFIG_MLX5_EN_TLS
- struct mlx5_wqe_ctrl_seg cur_ctrl = wqe->ctrl;
-#endif
- mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- wqe = MLX5E_TX_FETCH_WQE(sq, pi);
-#ifdef CONFIG_MLX5_EN_IPSEC
- wqe->eth = cur_eth;
-#endif
-#ifdef CONFIG_MLX5_EN_TLS
- wqe->ctrl = cur_ctrl;
-#endif
- }
-
/* fill wqe */
wi = &sq->db.wqe_info[pi];
cseg = &wqe->ctrl;
eseg = &wqe->eth;
dseg = wqe->data;
-#if IS_ENABLED(CONFIG_GENEVE)
- if (skb->encapsulation)
- mlx5e_tx_tunnel_accel(skb, eseg);
-#endif
- mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
-
- eseg->mss = mss;
+ eseg->mss = attr->mss;
- if (ihs) {
- eseg->inline_hdr.sz = cpu_to_be16(ihs);
+ if (attr->ihs) {
if (skb_vlan_tag_present(skb)) {
- ihs -= VLAN_HLEN;
- mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs);
+ eseg->inline_hdr.sz = cpu_to_be16(attr->ihs + VLAN_HLEN);
+ mlx5e_insert_vlan(eseg->inline_hdr.start, skb, attr->ihs);
stats->added_vlan_packets++;
} else {
- memcpy(eseg->inline_hdr.start, skb->data, ihs);
+ eseg->inline_hdr.sz = cpu_to_be16(attr->ihs);
+ memcpy(eseg->inline_hdr.start, skb->data, attr->ihs);
}
- dseg += ds_cnt_inl;
+ dseg += wqe_attr->ds_cnt_inl;
} else if (skb_vlan_tag_present(skb)) {
eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
@@ -365,12 +414,12 @@ void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
stats->added_vlan_packets++;
}
- num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg);
+ num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs,
+ attr->headlen, dseg);
if (unlikely(num_dma < 0))
goto err_drop;
- mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
- num_dma, wi, cseg, xmit_more);
+ mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, xmit_more);
return;
@@ -379,10 +428,172 @@ err_drop:
dev_kfree_skb_any(skb);
}
+static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr)
+{
+ return !skb_is_nonlinear(skb) && !skb_vlan_tag_present(skb) && !attr->ihs;
+}
+
+static bool mlx5e_tx_mpwqe_same_eseg(struct mlx5e_txqsq *sq, struct mlx5_wqe_eth_seg *eseg)
+{
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
+
+ /* Assumes the session is already running and has at least one packet. */
+ return !memcmp(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN);
+}
+
+static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
+ struct mlx5e_tx_wqe *wqe;
+ u16 pi;
+
+ pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
+ wqe = MLX5E_TX_FETCH_WQE(sq, pi);
+ prefetchw(wqe->data);
+
+ *session = (struct mlx5e_tx_mpwqe) {
+ .wqe = wqe,
+ .bytes_count = 0,
+ .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
+ .pkt_count = 0,
+ .inline_on = 0,
+ };
+
+ memcpy(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN);
+
+ sq->stats->mpwqe_blks++;
+}
+
+static bool mlx5e_tx_mpwqe_session_is_active(struct mlx5e_txqsq *sq)
+{
+ return sq->mpwqe.wqe;
+}
+
+static void mlx5e_tx_mpwqe_add_dseg(struct mlx5e_txqsq *sq, struct mlx5e_xmit_data *txd)
+{
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
+ struct mlx5_wqe_data_seg *dseg;
+
+ dseg = (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
+
+ session->pkt_count++;
+ session->bytes_count += txd->len;
+
+ dseg->addr = cpu_to_be64(txd->dma_addr);
+ dseg->byte_count = cpu_to_be32(txd->len);
+ dseg->lkey = sq->mkey_be;
+ session->ds_count++;
+
+ sq->stats->mpwqe_pkts++;
+}
+
+static struct mlx5_wqe_ctrl_seg *mlx5e_tx_mpwqe_session_complete(struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
+ u8 ds_count = session->ds_count;
+ struct mlx5_wqe_ctrl_seg *cseg;
+ struct mlx5e_tx_wqe_info *wi;
+ u16 pi;
+
+ cseg = &session->wqe->ctrl;
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count);
+
+ pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ wi = &sq->db.wqe_info[pi];
+ *wi = (struct mlx5e_tx_wqe_info) {
+ .skb = NULL,
+ .num_bytes = session->bytes_count,
+ .num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS),
+ .num_dma = session->pkt_count,
+ .num_fifo_pkts = session->pkt_count,
+ };
+
+ sq->pc += wi->num_wqebbs;
+
+ session->wqe = NULL;
+
+ mlx5e_tx_check_stop(sq);
+
+ return cseg;
+}
+
+static void
+mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg, bool xmit_more)
+{
+ struct mlx5_wqe_ctrl_seg *cseg;
+ struct mlx5e_xmit_data txd;
+
+ if (!mlx5e_tx_mpwqe_session_is_active(sq)) {
+ mlx5e_tx_mpwqe_session_start(sq, eseg);
+ } else if (!mlx5e_tx_mpwqe_same_eseg(sq, eseg)) {
+ mlx5e_tx_mpwqe_session_complete(sq);
+ mlx5e_tx_mpwqe_session_start(sq, eseg);
+ }
+
+ sq->stats->xmit_more += xmit_more;
+
+ txd.data = skb->data;
+ txd.len = skb->len;
+
+ txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr)))
+ goto err_unmap;
+ mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
+
+ mlx5e_skb_fifo_push(sq, skb);
+
+ mlx5e_tx_mpwqe_add_dseg(sq, &txd);
+
+ mlx5e_tx_skb_update_hwts_flags(skb);
+
+ if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) {
+ /* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */
+ cseg = mlx5e_tx_mpwqe_session_complete(sq);
+
+ if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more))
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
+ } else if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more)) {
+ /* Might stop the queue, but we were asked to ring the doorbell anyway. */
+ cseg = mlx5e_tx_mpwqe_session_complete(sq);
+
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
+ }
+
+ return;
+
+err_unmap:
+ mlx5e_dma_unmap_wqe_err(sq, 1);
+ sq->stats->dropped++;
+ dev_kfree_skb_any(skb);
+}
+
+void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
+{
+ /* Unlikely in non-MPWQE workloads; not important in MPWQE workloads. */
+ if (unlikely(mlx5e_tx_mpwqe_session_is_active(sq)))
+ mlx5e_tx_mpwqe_session_complete(sq);
+}
+
+static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
+{
+ if (unlikely(!mlx5e_accel_tx_eseg(priv, sq, skb, eseg)))
+ return false;
+
+ mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
+
+ return true;
+}
+
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_accel_tx_state accel = {};
+ struct mlx5e_tx_wqe_attr wqe_attr;
+ struct mlx5e_tx_attr attr;
struct mlx5e_tx_wqe *wqe;
struct mlx5e_txqsq *sq;
u16 pi;
@@ -391,21 +602,91 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
/* May send SKBs and WQEs. */
if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel)))
- goto out;
+ return NETDEV_TX_OK;
- pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ mlx5e_sq_xmit_prepare(sq, skb, &accel, &attr);
+
+ if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state)) {
+ if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
+ struct mlx5_wqe_eth_seg eseg = {};
+
+ if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &eseg)))
+ return NETDEV_TX_OK;
+
+ mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
+ return NETDEV_TX_OK;
+ }
+
+ mlx5e_tx_mpwqe_ensure_complete(sq);
+ }
+
+ mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
+ pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
/* May update the WQE, but may not post other WQEs. */
- if (unlikely(!mlx5e_accel_tx_finish(priv, sq, skb, wqe, &accel)))
- goto out;
+ mlx5e_accel_tx_finish(sq, wqe, &accel);
+ if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &wqe->eth)))
+ return NETDEV_TX_OK;
- mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
+ mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
-out:
return NETDEV_TX_OK;
}
+void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more)
+{
+ struct mlx5e_tx_wqe_attr wqe_attr;
+ struct mlx5e_tx_attr attr;
+ struct mlx5e_tx_wqe *wqe;
+ u16 pi;
+
+ mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
+ mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
+ pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
+ wqe = MLX5E_TX_FETCH_WQE(sq, pi);
+ mlx5e_txwqe_build_eseg_csum(sq, skb, &wqe->eth);
+ mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, xmit_more);
+}
+
+static void mlx5e_tx_wi_dma_unmap(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
+ u32 *dma_fifo_cc)
+{
+ int i;
+
+ for (i = 0; i < wi->num_dma; i++) {
+ struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
+
+ mlx5e_tx_dma_unmap(sq->pdev, dma);
+ }
+}
+
+static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe, int napi_budget)
+{
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ struct skb_shared_hwtstamps hwts = {};
+ u64 ts = get_cqe_ts(cqe);
+
+ hwts.hwtstamp = mlx5_timecounter_cyc2time(sq->clock, ts);
+ skb_tstamp_tx(skb, &hwts);
+ }
+
+ napi_consume_skb(skb, napi_budget);
+}
+
+static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
+ struct mlx5_cqe64 *cqe, int napi_budget)
+{
+ int i;
+
+ for (i = 0; i < wi->num_fifo_pkts; i++) {
+ struct sk_buff *skb = mlx5e_skb_fifo_pop(sq);
+
+ mlx5e_consume_skb(sq, skb, cqe, napi_budget);
+ }
+}
+
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_sq_stats *stats;
@@ -451,42 +732,33 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
wqe_counter = be16_to_cpu(cqe->wqe_counter);
do {
- struct sk_buff *skb;
- int j;
-
last_wqe = (sqcc == wqe_counter);
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
wi = &sq->db.wqe_info[ci];
- skb = wi->skb;
- if (unlikely(!skb)) {
- mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
- sqcc += wi->num_wqebbs;
- continue;
- }
+ sqcc += wi->num_wqebbs;
- if (unlikely(skb_shinfo(skb)->tx_flags &
- SKBTX_HW_TSTAMP)) {
- struct skb_shared_hwtstamps hwts = {};
+ if (likely(wi->skb)) {
+ mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
+ mlx5e_consume_skb(sq, wi->skb, cqe, napi_budget);
- hwts.hwtstamp =
- mlx5_timecounter_cyc2time(sq->clock,
- get_cqe_ts(cqe));
- skb_tstamp_tx(skb, &hwts);
+ npkts++;
+ nbytes += wi->num_bytes;
+ continue;
}
- for (j = 0; j < wi->num_dma; j++) {
- struct mlx5e_sq_dma *dma =
- mlx5e_dma_get(sq, dma_fifo_cc++);
+ if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi,
+ &dma_fifo_cc)))
+ continue;
- mlx5e_tx_dma_unmap(sq->pdev, dma);
- }
+ if (wi->num_fifo_pkts) {
+ mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
+ mlx5e_tx_wi_consume_fifo_skbs(sq, wi, cqe, napi_budget);
- npkts++;
- nbytes += wi->num_bytes;
- sqcc += wi->num_wqebbs;
- napi_consume_skb(skb, napi_budget);
+ npkts += wi->num_fifo_pkts;
+ nbytes += wi->num_bytes;
+ }
} while (!last_wqe);
if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
@@ -525,13 +797,19 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
+static void mlx5e_tx_wi_kfree_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi)
+{
+ int i;
+
+ for (i = 0; i < wi->num_fifo_pkts; i++)
+ dev_kfree_skb_any(mlx5e_skb_fifo_pop(sq));
+}
+
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
{
struct mlx5e_tx_wqe_info *wi;
u32 dma_fifo_cc, nbytes = 0;
u16 ci, sqcc, npkts = 0;
- struct sk_buff *skb;
- int i;
sqcc = sq->cc;
dma_fifo_cc = sq->dma_fifo_cc;
@@ -539,25 +817,28 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
while (sqcc != sq->pc) {
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
wi = &sq->db.wqe_info[ci];
- skb = wi->skb;
- if (!skb) {
- mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
- sqcc += wi->num_wqebbs;
+ sqcc += wi->num_wqebbs;
+
+ if (likely(wi->skb)) {
+ mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
+ dev_kfree_skb_any(wi->skb);
+
+ npkts++;
+ nbytes += wi->num_bytes;
continue;
}
- for (i = 0; i < wi->num_dma; i++) {
- struct mlx5e_sq_dma *dma =
- mlx5e_dma_get(sq, dma_fifo_cc++);
+ if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi, &dma_fifo_cc)))
+ continue;
- mlx5e_tx_dma_unmap(sq->pdev, dma);
- }
+ if (wi->num_fifo_pkts) {
+ mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
+ mlx5e_tx_wi_kfree_fifo_skbs(sq, wi);
- dev_kfree_skb_any(skb);
- npkts++;
- nbytes += wi->num_bytes;
- sqcc += wi->num_wqebbs;
+ npkts += wi->num_fifo_pkts;
+ nbytes += wi->num_bytes;
+ }
}
sq->dma_fifo_cc = dma_fifo_cc;
@@ -576,9 +857,34 @@ mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
}
+static void mlx5i_sq_calc_wqe_attr(struct sk_buff *skb,
+ const struct mlx5e_tx_attr *attr,
+ struct mlx5e_tx_wqe_attr *wqe_attr)
+{
+ u16 ds_cnt = sizeof(struct mlx5i_tx_wqe) / MLX5_SEND_WQE_DS;
+ u16 ds_cnt_inl = 0;
+
+ ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags;
+
+ if (attr->ihs) {
+ u16 inl = attr->ihs - INL_HDR_START_SZ;
+
+ ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
+ ds_cnt += ds_cnt_inl;
+ }
+
+ *wqe_attr = (struct mlx5e_tx_wqe_attr) {
+ .ds_cnt = ds_cnt,
+ .ds_cnt_inl = ds_cnt_inl,
+ .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS),
+ };
+}
+
void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more)
{
+ struct mlx5e_tx_wqe_attr wqe_attr;
+ struct mlx5e_tx_attr attr;
struct mlx5i_tx_wqe *wqe;
struct mlx5_wqe_datagram_seg *datagram;
@@ -588,47 +894,17 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe_info *wi;
struct mlx5e_sq_stats *stats = sq->stats;
- u16 ds_cnt, ds_cnt_inl = 0;
- u8 num_wqebbs, opcode;
- u16 headlen, ihs, pi;
- u32 num_bytes;
int num_dma;
- __be16 mss;
+ u16 pi;
- /* Calc ihs and ds cnt, no writes to wqe yet */
- ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
- if (skb_is_gso(skb)) {
- opcode = MLX5_OPCODE_LSO;
- mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
- ihs = mlx5e_tx_get_gso_ihs(sq, skb);
- num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
- stats->packets += skb_shinfo(skb)->gso_segs;
- } else {
- u8 mode = mlx5e_tx_wqe_inline_mode(sq, NULL, skb);
+ mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
+ mlx5i_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
- opcode = MLX5_OPCODE_SEND;
- mss = 0;
- ihs = mlx5e_calc_min_inline(mode, skb);
- num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
- stats->packets++;
- }
+ pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
+ wqe = MLX5I_SQ_FETCH_WQE(sq, pi);
- stats->bytes += num_bytes;
stats->xmit_more += xmit_more;
- headlen = skb->len - ihs - skb->data_len;
- ds_cnt += !!headlen;
- ds_cnt += skb_shinfo(skb)->nr_frags;
-
- if (ihs) {
- ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
- ds_cnt += ds_cnt_inl;
- }
-
- num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
- pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
- wqe = MLX5I_SQ_FETCH_WQE(sq, pi);
-
/* fill wqe */
wi = &sq->db.wqe_info[pi];
cseg = &wqe->ctrl;
@@ -640,20 +916,20 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
- eseg->mss = mss;
+ eseg->mss = attr.mss;
- if (ihs) {
- memcpy(eseg->inline_hdr.start, skb->data, ihs);
- eseg->inline_hdr.sz = cpu_to_be16(ihs);
- dseg += ds_cnt_inl;
+ if (attr.ihs) {
+ memcpy(eseg->inline_hdr.start, skb->data, attr.ihs);
+ eseg->inline_hdr.sz = cpu_to_be16(attr.ihs);
+ dseg += wqe_attr.ds_cnt_inl;
}
- num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg);
+ num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs,
+ attr.headlen, dseg);
if (unlikely(num_dma < 0))
goto err_drop;
- mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
- num_dma, wi, cseg, xmit_more);
+ mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, xmit_more);
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index de10b06bade5..d5868670f8a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -121,13 +121,17 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
struct mlx5e_xdpsq *xsksq = &c->xsksq;
struct mlx5e_rq *xskrq = &c->xskrq;
struct mlx5e_rq *rq = &c->rq;
- bool xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
bool aff_change = false;
bool busy_xsk = false;
bool busy = false;
int work_done = 0;
+ bool xsk_open;
int i;
+ rcu_read_lock();
+
+ xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
+
ch_stats->poll++;
for (i = 0; i < c->num_tc; i++)
@@ -167,8 +171,10 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
busy |= busy_xsk;
if (busy) {
- if (likely(mlx5e_channel_no_affinity_change(c)))
- return budget;
+ if (likely(mlx5e_channel_no_affinity_change(c))) {
+ work_done = budget;
+ goto out;
+ }
ch_stats->aff_change++;
aff_change = true;
if (budget && work_done == budget)
@@ -176,7 +182,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
}
if (unlikely(!napi_complete_done(napi, work_done)))
- return work_done;
+ goto out;
ch_stats->arm++;
@@ -203,6 +209,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
ch_stats->force_irq++;
}
+out:
+ rcu_read_unlock();
+
return work_done;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 31ef9f8420c8..ec38405d467f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -788,8 +788,7 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
INIT_LIST_HEAD(&eq->tasklet_ctx.list);
INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
spin_lock_init(&eq->tasklet_ctx.lock);
- tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
- (unsigned long)&eq->tasklet_ctx);
+ tasklet_setup(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb);
eq->irq_nb.notifier_call = mlx5_eq_comp_int;
param = (struct mlx5_eq_param) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c
deleted file mode 100644
index d5bf908dfecd..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c
+++ /dev/null
@@ -1,944 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-// Copyright (c) 2020 Mellanox Technologies.
-
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/mlx5_ifc.h>
-#include <linux/mlx5/fs.h>
-
-#include "esw/chains.h"
-#include "en/mapping.h"
-#include "mlx5_core.h"
-#include "fs_core.h"
-#include "eswitch.h"
-#include "en.h"
-#include "en_tc.h"
-
-#define esw_chains_priv(esw) ((esw)->fdb_table.offloads.esw_chains_priv)
-#define esw_chains_lock(esw) (esw_chains_priv(esw)->lock)
-#define esw_chains_ht(esw) (esw_chains_priv(esw)->chains_ht)
-#define esw_chains_mapping(esw) (esw_chains_priv(esw)->chains_mapping)
-#define esw_prios_ht(esw) (esw_chains_priv(esw)->prios_ht)
-#define fdb_pool_left(esw) (esw_chains_priv(esw)->fdb_left)
-#define tc_slow_fdb(esw) ((esw)->fdb_table.offloads.slow_fdb)
-#define tc_end_fdb(esw) (esw_chains_priv(esw)->tc_end_fdb)
-#define fdb_ignore_flow_level_supported(esw) \
- (MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level))
-#define fdb_modify_header_fwd_to_table_supported(esw) \
- (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
-
-/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
- * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
- * for each flow table pool. We can allocate up to 16M of each pool,
- * and we keep track of how much we used via get_next_avail_sz_from_pool.
- * Firmware doesn't report any of this for now.
- * ESW_POOL is expected to be sorted from large to small and match firmware
- * pools.
- */
-#define ESW_SIZE (16 * 1024 * 1024)
-static const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024,
- 1 * 1024 * 1024,
- 64 * 1024,
- 128 };
-#define ESW_FT_TBL_SZ (64 * 1024)
-
-struct mlx5_esw_chains_priv {
- struct rhashtable chains_ht;
- struct rhashtable prios_ht;
- /* Protects above chains_ht and prios_ht */
- struct mutex lock;
-
- struct mlx5_flow_table *tc_end_fdb;
- struct mapping_ctx *chains_mapping;
-
- int fdb_left[ARRAY_SIZE(ESW_POOLS)];
-};
-
-struct fdb_chain {
- struct rhash_head node;
-
- u32 chain;
-
- int ref;
- int id;
-
- struct mlx5_eswitch *esw;
- struct list_head prios_list;
- struct mlx5_flow_handle *restore_rule;
- struct mlx5_modify_hdr *miss_modify_hdr;
-};
-
-struct fdb_prio_key {
- u32 chain;
- u32 prio;
- u32 level;
-};
-
-struct fdb_prio {
- struct rhash_head node;
- struct list_head list;
-
- struct fdb_prio_key key;
-
- int ref;
-
- struct fdb_chain *fdb_chain;
- struct mlx5_flow_table *fdb;
- struct mlx5_flow_table *next_fdb;
- struct mlx5_flow_group *miss_group;
- struct mlx5_flow_handle *miss_rule;
-};
-
-static const struct rhashtable_params chain_params = {
- .head_offset = offsetof(struct fdb_chain, node),
- .key_offset = offsetof(struct fdb_chain, chain),
- .key_len = sizeof_field(struct fdb_chain, chain),
- .automatic_shrinking = true,
-};
-
-static const struct rhashtable_params prio_params = {
- .head_offset = offsetof(struct fdb_prio, node),
- .key_offset = offsetof(struct fdb_prio, key),
- .key_len = sizeof_field(struct fdb_prio, key),
- .automatic_shrinking = true,
-};
-
-bool mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw)
-{
- return esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
-}
-
-bool mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw)
-{
- return mlx5_esw_chains_prios_supported(esw) &&
- fdb_ignore_flow_level_supported(esw);
-}
-
-u32 mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw)
-{
- if (!mlx5_esw_chains_prios_supported(esw))
- return 1;
-
- if (fdb_ignore_flow_level_supported(esw))
- return UINT_MAX - 1;
-
- return FDB_TC_MAX_CHAIN;
-}
-
-u32 mlx5_esw_chains_get_ft_chain(struct mlx5_eswitch *esw)
-{
- return mlx5_esw_chains_get_chain_range(esw) + 1;
-}
-
-u32 mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw)
-{
- if (!mlx5_esw_chains_prios_supported(esw))
- return 1;
-
- if (fdb_ignore_flow_level_supported(esw))
- return UINT_MAX;
-
- return FDB_TC_MAX_PRIO;
-}
-
-static unsigned int mlx5_esw_chains_get_level_range(struct mlx5_eswitch *esw)
-{
- if (fdb_ignore_flow_level_supported(esw))
- return UINT_MAX;
-
- return FDB_TC_LEVELS_PER_PRIO;
-}
-
-#define POOL_NEXT_SIZE 0
-static int
-mlx5_esw_chains_get_avail_sz_from_pool(struct mlx5_eswitch *esw,
- int desired_size)
-{
- int i, found_i = -1;
-
- for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) {
- if (fdb_pool_left(esw)[i] && ESW_POOLS[i] > desired_size) {
- found_i = i;
- if (desired_size != POOL_NEXT_SIZE)
- break;
- }
- }
-
- if (found_i != -1) {
- --fdb_pool_left(esw)[found_i];
- return ESW_POOLS[found_i];
- }
-
- return 0;
-}
-
-static void
-mlx5_esw_chains_put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
-{
- int i;
-
- for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) {
- if (sz == ESW_POOLS[i]) {
- ++fdb_pool_left(esw)[i];
- return;
- }
- }
-
- WARN_ONCE(1, "Couldn't find size %d in fdb size pool", sz);
-}
-
-static void
-mlx5_esw_chains_init_sz_pool(struct mlx5_eswitch *esw)
-{
- u32 fdb_max;
- int i;
-
- fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, log_max_ft_size);
-
- for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--)
- fdb_pool_left(esw)[i] =
- ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
-}
-
-static struct mlx5_flow_table *
-mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw,
- u32 chain, u32 prio, u32 level)
-{
- struct mlx5_flow_table_attr ft_attr = {};
- struct mlx5_flow_namespace *ns;
- struct mlx5_flow_table *fdb;
- int sz;
-
- if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
- ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
- MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
-
- sz = (chain == mlx5_esw_chains_get_ft_chain(esw)) ?
- mlx5_esw_chains_get_avail_sz_from_pool(esw, ESW_FT_TBL_SZ) :
- mlx5_esw_chains_get_avail_sz_from_pool(esw, POOL_NEXT_SIZE);
- if (!sz)
- return ERR_PTR(-ENOSPC);
- ft_attr.max_fte = sz;
-
- /* We use tc_slow_fdb(esw) as the table's next_ft till
- * ignore_flow_level is allowed on FT creation and not just for FTEs.
- * Instead caller should add an explicit miss rule if needed.
- */
- ft_attr.next_ft = tc_slow_fdb(esw);
-
- /* The root table(chain 0, prio 1, level 0) is required to be
- * connected to the previous prio (FDB_BYPASS_PATH if exists).
- * We always create it, as a managed table, in order to align with
- * fs_core logic.
- */
- if (!fdb_ignore_flow_level_supported(esw) ||
- (chain == 0 && prio == 1 && level == 0)) {
- ft_attr.level = level;
- ft_attr.prio = prio - 1;
- ns = mlx5_get_fdb_sub_ns(esw->dev, chain);
- } else {
- ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED;
- ft_attr.prio = FDB_TC_OFFLOAD;
- /* Firmware doesn't allow us to create another level 0 table,
- * so we create all unmanaged tables as level 1.
- *
- * To connect them, we use explicit miss rules with
- * ignore_flow_level. Caller is responsible to create
- * these rules (if needed).
- */
- ft_attr.level = 1;
- ns = mlx5_get_flow_namespace(esw->dev, MLX5_FLOW_NAMESPACE_FDB);
- }
-
- ft_attr.autogroup.num_reserved_entries = 2;
- ft_attr.autogroup.max_num_groups = esw->params.large_group_num;
- fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
- if (IS_ERR(fdb)) {
- esw_warn(esw->dev,
- "Failed to create FDB table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
- (int)PTR_ERR(fdb), chain, prio, level, sz);
- mlx5_esw_chains_put_sz_to_pool(esw, sz);
- return fdb;
- }
-
- return fdb;
-}
-
-static void
-mlx5_esw_chains_destroy_fdb_table(struct mlx5_eswitch *esw,
- struct mlx5_flow_table *fdb)
-{
- mlx5_esw_chains_put_sz_to_pool(esw, fdb->max_fte);
- mlx5_destroy_flow_table(fdb);
-}
-
-static int
-create_fdb_chain_restore(struct fdb_chain *fdb_chain)
-{
- char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)];
- struct mlx5_eswitch *esw = fdb_chain->esw;
- struct mlx5_modify_hdr *mod_hdr;
- u32 index;
- int err;
-
- if (fdb_chain->chain == mlx5_esw_chains_get_ft_chain(esw) ||
- !mlx5_esw_chains_prios_supported(esw))
- return 0;
-
- err = mapping_add(esw_chains_mapping(esw), &fdb_chain->chain, &index);
- if (err)
- return err;
- if (index == MLX5_FS_DEFAULT_FLOW_TAG) {
- /* we got the special default flow tag id, so we won't know
- * if we actually marked the packet with the restore rule
- * we create.
- *
- * This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0.
- */
- err = mapping_add(esw_chains_mapping(esw),
- &fdb_chain->chain, &index);
- mapping_remove(esw_chains_mapping(esw),
- MLX5_FS_DEFAULT_FLOW_TAG);
- if (err)
- return err;
- }
-
- fdb_chain->id = index;
-
- MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
- MLX5_SET(set_action_in, modact, field,
- mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].mfield);
- MLX5_SET(set_action_in, modact, offset,
- mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].moffset * 8);
- MLX5_SET(set_action_in, modact, length,
- mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].mlen * 8);
- MLX5_SET(set_action_in, modact, data, fdb_chain->id);
- mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB,
- 1, modact);
- if (IS_ERR(mod_hdr)) {
- err = PTR_ERR(mod_hdr);
- goto err_mod_hdr;
- }
- fdb_chain->miss_modify_hdr = mod_hdr;
-
- fdb_chain->restore_rule = esw_add_restore_rule(esw, fdb_chain->id);
- if (IS_ERR(fdb_chain->restore_rule)) {
- err = PTR_ERR(fdb_chain->restore_rule);
- goto err_rule;
- }
-
- return 0;
-
-err_rule:
- mlx5_modify_header_dealloc(esw->dev, fdb_chain->miss_modify_hdr);
-err_mod_hdr:
- /* Datapath can't find this mapping, so we can safely remove it */
- mapping_remove(esw_chains_mapping(esw), fdb_chain->id);
- return err;
-}
-
-static void destroy_fdb_chain_restore(struct fdb_chain *fdb_chain)
-{
- struct mlx5_eswitch *esw = fdb_chain->esw;
-
- if (!fdb_chain->miss_modify_hdr)
- return;
-
- mlx5_del_flow_rules(fdb_chain->restore_rule);
- mlx5_modify_header_dealloc(esw->dev, fdb_chain->miss_modify_hdr);
- mapping_remove(esw_chains_mapping(esw), fdb_chain->id);
-}
-
-static struct fdb_chain *
-mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
-{
- struct fdb_chain *fdb_chain = NULL;
- int err;
-
- fdb_chain = kvzalloc(sizeof(*fdb_chain), GFP_KERNEL);
- if (!fdb_chain)
- return ERR_PTR(-ENOMEM);
-
- fdb_chain->esw = esw;
- fdb_chain->chain = chain;
- INIT_LIST_HEAD(&fdb_chain->prios_list);
-
- err = create_fdb_chain_restore(fdb_chain);
- if (err)
- goto err_restore;
-
- err = rhashtable_insert_fast(&esw_chains_ht(esw), &fdb_chain->node,
- chain_params);
- if (err)
- goto err_insert;
-
- return fdb_chain;
-
-err_insert:
- destroy_fdb_chain_restore(fdb_chain);
-err_restore:
- kvfree(fdb_chain);
- return ERR_PTR(err);
-}
-
-static void
-mlx5_esw_chains_destroy_fdb_chain(struct fdb_chain *fdb_chain)
-{
- struct mlx5_eswitch *esw = fdb_chain->esw;
-
- rhashtable_remove_fast(&esw_chains_ht(esw), &fdb_chain->node,
- chain_params);
-
- destroy_fdb_chain_restore(fdb_chain);
- kvfree(fdb_chain);
-}
-
-static struct fdb_chain *
-mlx5_esw_chains_get_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
-{
- struct fdb_chain *fdb_chain;
-
- fdb_chain = rhashtable_lookup_fast(&esw_chains_ht(esw), &chain,
- chain_params);
- if (!fdb_chain) {
- fdb_chain = mlx5_esw_chains_create_fdb_chain(esw, chain);
- if (IS_ERR(fdb_chain))
- return fdb_chain;
- }
-
- fdb_chain->ref++;
-
- return fdb_chain;
-}
-
-static struct mlx5_flow_handle *
-mlx5_esw_chains_add_miss_rule(struct fdb_chain *fdb_chain,
- struct mlx5_flow_table *fdb,
- struct mlx5_flow_table *next_fdb)
-{
- struct mlx5_eswitch *esw = fdb_chain->esw;
- struct mlx5_flow_destination dest = {};
- struct mlx5_flow_act act = {};
-
- act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND;
- act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = next_fdb;
-
- if (next_fdb == tc_end_fdb(esw) &&
- mlx5_esw_chains_prios_supported(esw)) {
- act.modify_hdr = fdb_chain->miss_modify_hdr;
- act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- }
-
- return mlx5_add_flow_rules(fdb, NULL, &act, &dest, 1);
-}
-
-static int
-mlx5_esw_chains_update_prio_prevs(struct fdb_prio *fdb_prio,
- struct mlx5_flow_table *next_fdb)
-{
- struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {};
- struct fdb_chain *fdb_chain = fdb_prio->fdb_chain;
- struct fdb_prio *pos;
- int n = 0, err;
-
- if (fdb_prio->key.level)
- return 0;
-
- /* Iterate in reverse order until reaching the level 0 rule of
- * the previous priority, adding all the miss rules first, so we can
- * revert them if any of them fails.
- */
- pos = fdb_prio;
- list_for_each_entry_continue_reverse(pos,
- &fdb_chain->prios_list,
- list) {
- miss_rules[n] = mlx5_esw_chains_add_miss_rule(fdb_chain,
- pos->fdb,
- next_fdb);
- if (IS_ERR(miss_rules[n])) {
- err = PTR_ERR(miss_rules[n]);
- goto err_prev_rule;
- }
-
- n++;
- if (!pos->key.level)
- break;
- }
-
- /* Success, delete old miss rules, and update the pointers. */
- n = 0;
- pos = fdb_prio;
- list_for_each_entry_continue_reverse(pos,
- &fdb_chain->prios_list,
- list) {
- mlx5_del_flow_rules(pos->miss_rule);
-
- pos->miss_rule = miss_rules[n];
- pos->next_fdb = next_fdb;
-
- n++;
- if (!pos->key.level)
- break;
- }
-
- return 0;
-
-err_prev_rule:
- while (--n >= 0)
- mlx5_del_flow_rules(miss_rules[n]);
-
- return err;
-}
-
-static void
-mlx5_esw_chains_put_fdb_chain(struct fdb_chain *fdb_chain)
-{
- if (--fdb_chain->ref == 0)
- mlx5_esw_chains_destroy_fdb_chain(fdb_chain);
-}
-
-static struct fdb_prio *
-mlx5_esw_chains_create_fdb_prio(struct mlx5_eswitch *esw,
- u32 chain, u32 prio, u32 level)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_flow_handle *miss_rule = NULL;
- struct mlx5_flow_group *miss_group;
- struct fdb_prio *fdb_prio = NULL;
- struct mlx5_flow_table *next_fdb;
- struct fdb_chain *fdb_chain;
- struct mlx5_flow_table *fdb;
- struct list_head *pos;
- u32 *flow_group_in;
- int err;
-
- fdb_chain = mlx5_esw_chains_get_fdb_chain(esw, chain);
- if (IS_ERR(fdb_chain))
- return ERR_CAST(fdb_chain);
-
- fdb_prio = kvzalloc(sizeof(*fdb_prio), GFP_KERNEL);
- flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- if (!fdb_prio || !flow_group_in) {
- err = -ENOMEM;
- goto err_alloc;
- }
-
- /* Chain's prio list is sorted by prio and level.
- * And all levels of some prio point to the next prio's level 0.
- * Example list (prio, level):
- * (3,0)->(3,1)->(5,0)->(5,1)->(6,1)->(7,0)
- * In hardware, we will we have the following pointers:
- * (3,0) -> (5,0) -> (7,0) -> Slow path
- * (3,1) -> (5,0)
- * (5,1) -> (7,0)
- * (6,1) -> (7,0)
- */
-
- /* Default miss for each chain: */
- next_fdb = (chain == mlx5_esw_chains_get_ft_chain(esw)) ?
- tc_slow_fdb(esw) :
- tc_end_fdb(esw);
- list_for_each(pos, &fdb_chain->prios_list) {
- struct fdb_prio *p = list_entry(pos, struct fdb_prio, list);
-
- /* exit on first pos that is larger */
- if (prio < p->key.prio || (prio == p->key.prio &&
- level < p->key.level)) {
- /* Get next level 0 table */
- next_fdb = p->key.level == 0 ? p->fdb : p->next_fdb;
- break;
- }
- }
-
- fdb = mlx5_esw_chains_create_fdb_table(esw, chain, prio, level);
- if (IS_ERR(fdb)) {
- err = PTR_ERR(fdb);
- goto err_create;
- }
-
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
- fdb->max_fte - 2);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
- fdb->max_fte - 1);
- miss_group = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(miss_group)) {
- err = PTR_ERR(miss_group);
- goto err_group;
- }
-
- /* Add miss rule to next_fdb */
- miss_rule = mlx5_esw_chains_add_miss_rule(fdb_chain, fdb, next_fdb);
- if (IS_ERR(miss_rule)) {
- err = PTR_ERR(miss_rule);
- goto err_miss_rule;
- }
-
- fdb_prio->miss_group = miss_group;
- fdb_prio->miss_rule = miss_rule;
- fdb_prio->next_fdb = next_fdb;
- fdb_prio->fdb_chain = fdb_chain;
- fdb_prio->key.chain = chain;
- fdb_prio->key.prio = prio;
- fdb_prio->key.level = level;
- fdb_prio->fdb = fdb;
-
- err = rhashtable_insert_fast(&esw_prios_ht(esw), &fdb_prio->node,
- prio_params);
- if (err)
- goto err_insert;
-
- list_add(&fdb_prio->list, pos->prev);
-
- /* Table is ready, connect it */
- err = mlx5_esw_chains_update_prio_prevs(fdb_prio, fdb);
- if (err)
- goto err_update;
-
- kvfree(flow_group_in);
- return fdb_prio;
-
-err_update:
- list_del(&fdb_prio->list);
- rhashtable_remove_fast(&esw_prios_ht(esw), &fdb_prio->node,
- prio_params);
-err_insert:
- mlx5_del_flow_rules(miss_rule);
-err_miss_rule:
- mlx5_destroy_flow_group(miss_group);
-err_group:
- mlx5_esw_chains_destroy_fdb_table(esw, fdb);
-err_create:
-err_alloc:
- kvfree(fdb_prio);
- kvfree(flow_group_in);
- mlx5_esw_chains_put_fdb_chain(fdb_chain);
- return ERR_PTR(err);
-}
-
-static void
-mlx5_esw_chains_destroy_fdb_prio(struct mlx5_eswitch *esw,
- struct fdb_prio *fdb_prio)
-{
- struct fdb_chain *fdb_chain = fdb_prio->fdb_chain;
-
- WARN_ON(mlx5_esw_chains_update_prio_prevs(fdb_prio,
- fdb_prio->next_fdb));
-
- list_del(&fdb_prio->list);
- rhashtable_remove_fast(&esw_prios_ht(esw), &fdb_prio->node,
- prio_params);
- mlx5_del_flow_rules(fdb_prio->miss_rule);
- mlx5_destroy_flow_group(fdb_prio->miss_group);
- mlx5_esw_chains_destroy_fdb_table(esw, fdb_prio->fdb);
- mlx5_esw_chains_put_fdb_chain(fdb_chain);
- kvfree(fdb_prio);
-}
-
-struct mlx5_flow_table *
-mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
- u32 level)
-{
- struct mlx5_flow_table *prev_fts;
- struct fdb_prio *fdb_prio;
- struct fdb_prio_key key;
- int l = 0;
-
- if ((chain > mlx5_esw_chains_get_chain_range(esw) &&
- chain != mlx5_esw_chains_get_ft_chain(esw)) ||
- prio > mlx5_esw_chains_get_prio_range(esw) ||
- level > mlx5_esw_chains_get_level_range(esw))
- return ERR_PTR(-EOPNOTSUPP);
-
- /* create earlier levels for correct fs_core lookup when
- * connecting tables.
- */
- for (l = 0; l < level; l++) {
- prev_fts = mlx5_esw_chains_get_table(esw, chain, prio, l);
- if (IS_ERR(prev_fts)) {
- fdb_prio = ERR_CAST(prev_fts);
- goto err_get_prevs;
- }
- }
-
- key.chain = chain;
- key.prio = prio;
- key.level = level;
-
- mutex_lock(&esw_chains_lock(esw));
- fdb_prio = rhashtable_lookup_fast(&esw_prios_ht(esw), &key,
- prio_params);
- if (!fdb_prio) {
- fdb_prio = mlx5_esw_chains_create_fdb_prio(esw, chain,
- prio, level);
- if (IS_ERR(fdb_prio))
- goto err_create_prio;
- }
-
- ++fdb_prio->ref;
- mutex_unlock(&esw_chains_lock(esw));
-
- return fdb_prio->fdb;
-
-err_create_prio:
- mutex_unlock(&esw_chains_lock(esw));
-err_get_prevs:
- while (--l >= 0)
- mlx5_esw_chains_put_table(esw, chain, prio, l);
- return ERR_CAST(fdb_prio);
-}
-
-void
-mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
- u32 level)
-{
- struct fdb_prio *fdb_prio;
- struct fdb_prio_key key;
-
- key.chain = chain;
- key.prio = prio;
- key.level = level;
-
- mutex_lock(&esw_chains_lock(esw));
- fdb_prio = rhashtable_lookup_fast(&esw_prios_ht(esw), &key,
- prio_params);
- if (!fdb_prio)
- goto err_get_prio;
-
- if (--fdb_prio->ref == 0)
- mlx5_esw_chains_destroy_fdb_prio(esw, fdb_prio);
- mutex_unlock(&esw_chains_lock(esw));
-
- while (level-- > 0)
- mlx5_esw_chains_put_table(esw, chain, prio, level);
-
- return;
-
-err_get_prio:
- mutex_unlock(&esw_chains_lock(esw));
- WARN_ONCE(1,
- "Couldn't find table: (chain: %d prio: %d level: %d)",
- chain, prio, level);
-}
-
-struct mlx5_flow_table *
-mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw)
-{
- return tc_end_fdb(esw);
-}
-
-struct mlx5_flow_table *
-mlx5_esw_chains_create_global_table(struct mlx5_eswitch *esw)
-{
- u32 chain, prio, level;
- int err;
-
- if (!fdb_ignore_flow_level_supported(esw)) {
- err = -EOPNOTSUPP;
-
- esw_warn(esw->dev,
- "Couldn't create global flow table, ignore_flow_level not supported.");
- goto err_ignore;
- }
-
- chain = mlx5_esw_chains_get_chain_range(esw),
- prio = mlx5_esw_chains_get_prio_range(esw);
- level = mlx5_esw_chains_get_level_range(esw);
-
- return mlx5_esw_chains_create_fdb_table(esw, chain, prio, level);
-
-err_ignore:
- return ERR_PTR(err);
-}
-
-void
-mlx5_esw_chains_destroy_global_table(struct mlx5_eswitch *esw,
- struct mlx5_flow_table *ft)
-{
- mlx5_esw_chains_destroy_fdb_table(esw, ft);
-}
-
-static int
-mlx5_esw_chains_init(struct mlx5_eswitch *esw)
-{
- struct mlx5_esw_chains_priv *chains_priv;
- struct mlx5_core_dev *dev = esw->dev;
- u32 max_flow_counter, fdb_max;
- struct mapping_ctx *mapping;
- int err;
-
- chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL);
- if (!chains_priv)
- return -ENOMEM;
- esw_chains_priv(esw) = chains_priv;
-
- max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
- MLX5_CAP_GEN(dev, max_flow_counter_15_0);
- fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
-
- esw_debug(dev,
- "Init esw offloads chains, max counters(%d), groups(%d), max flow table size(%d)\n",
- max_flow_counter, esw->params.large_group_num, fdb_max);
-
- mlx5_esw_chains_init_sz_pool(esw);
-
- if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
- esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
- esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
- esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
- } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
- esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
- esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
- } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
- /* Disabled when ttl workaround is needed, e.g
- * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
- */
- esw_warn(dev,
- "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
- esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
- } else {
- esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
- esw_info(dev, "Supported tc offload range - chains: %u, prios: %u\n",
- mlx5_esw_chains_get_chain_range(esw),
- mlx5_esw_chains_get_prio_range(esw));
- }
-
- err = rhashtable_init(&esw_chains_ht(esw), &chain_params);
- if (err)
- goto init_chains_ht_err;
-
- err = rhashtable_init(&esw_prios_ht(esw), &prio_params);
- if (err)
- goto init_prios_ht_err;
-
- mapping = mapping_create(sizeof(u32), esw_get_max_restore_tag(esw),
- true);
- if (IS_ERR(mapping)) {
- err = PTR_ERR(mapping);
- goto mapping_err;
- }
- esw_chains_mapping(esw) = mapping;
-
- mutex_init(&esw_chains_lock(esw));
-
- return 0;
-
-mapping_err:
- rhashtable_destroy(&esw_prios_ht(esw));
-init_prios_ht_err:
- rhashtable_destroy(&esw_chains_ht(esw));
-init_chains_ht_err:
- kfree(chains_priv);
- return err;
-}
-
-static void
-mlx5_esw_chains_cleanup(struct mlx5_eswitch *esw)
-{
- mutex_destroy(&esw_chains_lock(esw));
- mapping_destroy(esw_chains_mapping(esw));
- rhashtable_destroy(&esw_prios_ht(esw));
- rhashtable_destroy(&esw_chains_ht(esw));
-
- kfree(esw_chains_priv(esw));
-}
-
-static int
-mlx5_esw_chains_open(struct mlx5_eswitch *esw)
-{
- struct mlx5_flow_table *ft;
- int err;
-
- /* Create tc_end_fdb(esw) which is the always created ft chain */
- ft = mlx5_esw_chains_get_table(esw, mlx5_esw_chains_get_ft_chain(esw),
- 1, 0);
- if (IS_ERR(ft))
- return PTR_ERR(ft);
-
- tc_end_fdb(esw) = ft;
-
- /* Always open the root for fast path */
- ft = mlx5_esw_chains_get_table(esw, 0, 1, 0);
- if (IS_ERR(ft)) {
- err = PTR_ERR(ft);
- goto level_0_err;
- }
-
- /* Open level 1 for split rules now if prios isn't supported */
- if (!mlx5_esw_chains_prios_supported(esw)) {
- err = mlx5_esw_vport_tbl_get(esw);
- if (err)
- goto level_1_err;
- }
-
- return 0;
-
-level_1_err:
- mlx5_esw_chains_put_table(esw, 0, 1, 0);
-level_0_err:
- mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0);
- return err;
-}
-
-static void
-mlx5_esw_chains_close(struct mlx5_eswitch *esw)
-{
- if (!mlx5_esw_chains_prios_supported(esw))
- mlx5_esw_vport_tbl_put(esw);
- mlx5_esw_chains_put_table(esw, 0, 1, 0);
- mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0);
-}
-
-int
-mlx5_esw_chains_create(struct mlx5_eswitch *esw)
-{
- int err;
-
- err = mlx5_esw_chains_init(esw);
- if (err)
- return err;
-
- err = mlx5_esw_chains_open(esw);
- if (err)
- goto err_open;
-
- return 0;
-
-err_open:
- mlx5_esw_chains_cleanup(esw);
- return err;
-}
-
-void
-mlx5_esw_chains_destroy(struct mlx5_eswitch *esw)
-{
- mlx5_esw_chains_close(esw);
- mlx5_esw_chains_cleanup(esw);
-}
-
-int
-mlx5_esw_chains_get_chain_mapping(struct mlx5_eswitch *esw, u32 chain,
- u32 *chain_mapping)
-{
- return mapping_add(esw_chains_mapping(esw), &chain, chain_mapping);
-}
-
-int
-mlx5_esw_chains_put_chain_mapping(struct mlx5_eswitch *esw, u32 chain_mapping)
-{
- return mapping_remove(esw_chains_mapping(esw), chain_mapping);
-}
-
-int mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag,
- u32 *chain)
-{
- int err;
-
- err = mapping_find(esw_chains_mapping(esw), tag, chain);
- if (err) {
- esw_warn(esw->dev, "Can't find chain for tag: %d\n", tag);
- return -ENOENT;
- }
-
- return 0;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h
deleted file mode 100644
index 7679ac359e31..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2020 Mellanox Technologies. */
-
-#ifndef __ML5_ESW_CHAINS_H__
-#define __ML5_ESW_CHAINS_H__
-
-#include "eswitch.h"
-
-#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
-
-bool
-mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw);
-bool
-mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw);
-u32
-mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw);
-u32
-mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw);
-u32
-mlx5_esw_chains_get_ft_chain(struct mlx5_eswitch *esw);
-
-struct mlx5_flow_table *
-mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
- u32 level);
-void
-mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
- u32 level);
-
-struct mlx5_flow_table *
-mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw);
-
-struct mlx5_flow_table *
-mlx5_esw_chains_create_global_table(struct mlx5_eswitch *esw);
-void
-mlx5_esw_chains_destroy_global_table(struct mlx5_eswitch *esw,
- struct mlx5_flow_table *ft);
-
-int
-mlx5_esw_chains_get_chain_mapping(struct mlx5_eswitch *esw, u32 chain,
- u32 *chain_mapping);
-int
-mlx5_esw_chains_put_chain_mapping(struct mlx5_eswitch *esw,
- u32 chain_mapping);
-
-int mlx5_esw_chains_create(struct mlx5_eswitch *esw);
-void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw);
-
-int
-mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag, u32 *chain);
-
-#else /* CONFIG_MLX5_CLS_ACT */
-
-static inline struct mlx5_flow_table *
-mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
- u32 level) { return ERR_PTR(-EOPNOTSUPP); }
-static inline void
-mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
- u32 level) {}
-
-static inline struct mlx5_flow_table *
-mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw) { return ERR_PTR(-EOPNOTSUPP); }
-
-static inline int mlx5_esw_chains_create(struct mlx5_eswitch *esw) { return 0; }
-static inline void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw) {}
-
-#endif /* CONFIG_MLX5_CLS_ACT */
-
-#endif /* __ML5_ESW_CHAINS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 7455fbd21a0a..66393fbdcd94 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -42,6 +42,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include "lib/mpfs.h"
+#include "lib/fs_chains.h"
#include "en/tc_ct.h"
#ifdef CONFIG_MLX5_ESWITCH
@@ -62,6 +63,9 @@
#define mlx5_esw_has_fwd_fdb(dev) \
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
+#define esw_chains(esw) \
+ ((esw)->fdb_table.offloads.esw_chains_priv)
+
struct vport_ingress {
struct mlx5_flow_table *acl;
struct mlx5_flow_handle *allow_rule;
@@ -154,12 +158,6 @@ struct mlx5_vport {
enum mlx5_eswitch_vport_event enabled_events;
};
-enum offloads_fdb_flags {
- ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED = BIT(0),
-};
-
-struct mlx5_esw_chains_priv;
-
struct mlx5_eswitch_fdb {
union {
struct legacy_fdb {
@@ -183,7 +181,7 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_handle *miss_rule_multi;
int vlan_push_pop_refcount;
- struct mlx5_esw_chains_priv *esw_chains_priv;
+ struct mlx5_fs_chains *esw_chains_priv;
struct {
DECLARE_HASHTABLE(table, 8);
/* Protects vports.table */
@@ -330,7 +328,7 @@ struct mlx5_termtbl_handle;
bool
mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_spec *spec);
@@ -350,19 +348,19 @@ mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr);
+ struct mlx5_flow_attr *attr);
struct mlx5_flow_handle *
mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr);
+ struct mlx5_flow_attr *attr);
void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
- struct mlx5_esw_flow_attr *attr);
+ struct mlx5_flow_attr *attr);
void
mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
- struct mlx5_esw_flow_attr *attr);
+ struct mlx5_flow_attr *attr);
struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
@@ -402,7 +400,6 @@ struct mlx5_esw_flow_attr {
int split_count;
int out_count;
- int action;
__be16 vlan_proto[MLX5_FS_VLAN_DEPTH];
u16 vlan_vid[MLX5_FS_VLAN_DEPTH];
u8 vlan_prio[MLX5_FS_VLAN_DEPTH];
@@ -414,19 +411,7 @@ struct mlx5_esw_flow_attr {
struct mlx5_core_dev *mdev;
struct mlx5_termtbl_handle *termtbl;
} dests[MLX5_MAX_FLOW_FWD_VPORTS];
- struct mlx5_modify_hdr *modify_hdr;
- u8 inner_match_level;
- u8 outer_match_level;
- struct mlx5_fc *counter;
- u32 chain;
- u16 prio;
- u32 dest_chain;
- u32 flags;
- struct mlx5_flow_table *fdb;
- struct mlx5_flow_table *dest_ft;
- struct mlx5_ct_attr ct_attr;
struct mlx5_pkt_reformat *decap_pkt_reformat;
- struct mlx5e_tc_flow_parse_attr *parse_attr;
};
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
@@ -452,9 +437,9 @@ int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink,
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_esw_flow_attr *attr);
+ struct mlx5_flow_attr *attr);
int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_esw_flow_attr *attr);
+ struct mlx5_flow_attr *attr);
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u16 vport, u16 vlan, u8 qos, u8 set_flags);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index b381cbca5852..ffd5d540a19e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -39,12 +39,13 @@
#include "mlx5_core.h"
#include "eswitch.h"
#include "esw/acl/ofld.h"
-#include "esw/chains.h"
#include "rdma.h"
#include "en.h"
#include "fs_core.h"
#include "lib/devcom.h"
#include "lib/eq.h"
+#include "lib/fs_chains.h"
+#include "en_tc.h"
/* There are two match-all miss flows, one for unicast dst mac and
* one for multicast.
@@ -66,6 +67,12 @@ struct mlx5_vport_key {
u16 vhca_id;
} __packed;
+struct mlx5_vport_tbl_attr {
+ u16 chain;
+ u16 prio;
+ u16 vport;
+};
+
struct mlx5_vport_table {
struct hlist_node hlist;
struct mlx5_flow_table *fdb;
@@ -94,10 +101,10 @@ esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
}
static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_vport_tbl_attr *attr,
struct mlx5_vport_key *key)
{
- key->vport = attr->in_rep->vport;
+ key->vport = attr->vport;
key->chain = attr->chain;
key->prio = attr->prio;
key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
@@ -118,7 +125,7 @@ esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32
}
static void
-esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
+esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
{
struct mlx5_vport_table *e;
struct mlx5_vport_key key;
@@ -138,7 +145,7 @@ out:
}
static struct mlx5_flow_table *
-esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
+esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
{
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *ns;
@@ -189,16 +196,15 @@ err_alloc:
int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
{
- struct mlx5_esw_flow_attr attr = {};
- struct mlx5_eswitch_rep rep = {};
+ struct mlx5_vport_tbl_attr attr;
struct mlx5_flow_table *fdb;
struct mlx5_vport *vport;
int i;
+ attr.chain = 0;
attr.prio = 1;
- attr.in_rep = &rep;
mlx5_esw_for_all_vports(esw, i, vport) {
- attr.in_rep->vport = vport->vport;
+ attr.vport = vport->vport;
fdb = esw_vport_tbl_get(esw, &attr);
if (IS_ERR(fdb))
goto out;
@@ -212,15 +218,14 @@ out:
void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
{
- struct mlx5_esw_flow_attr attr = {};
- struct mlx5_eswitch_rep rep = {};
+ struct mlx5_vport_tbl_attr attr;
struct mlx5_vport *vport;
int i;
+ attr.chain = 0;
attr.prio = 1;
- attr.in_rep = &rep;
mlx5_esw_for_all_vports(esw, i, vport) {
- attr.in_rep->vport = vport->vport;
+ attr.vport = vport->vport;
esw_vport_tbl_put(esw, &attr);
}
}
@@ -290,11 +295,14 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
- bool split = !!(attr->split_count);
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+ bool split = !!(esw_attr->split_count);
+ struct mlx5_vport_tbl_attr fwd_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_table *fdb;
int j, i = 0;
@@ -308,13 +316,13 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
- flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
- flow_act.vlan[0].vid = attr->vlan_vid[0];
- flow_act.vlan[0].prio = attr->vlan_prio[0];
+ flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
+ flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
+ flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
- flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
- flow_act.vlan[1].vid = attr->vlan_vid[1];
- flow_act.vlan[1].prio = attr->vlan_prio[1];
+ flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]);
+ flow_act.vlan[1].vid = esw_attr->vlan_vid[1];
+ flow_act.vlan[1].prio = esw_attr->vlan_prio[1];
}
}
@@ -329,12 +337,12 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
} else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[i].ft = mlx5_esw_chains_get_tc_end_ft(esw);
+ dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
i++;
} else if (attr->dest_chain) {
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
- ft = mlx5_esw_chains_get_table(esw, attr->dest_chain,
- 1, 0);
+ ft = mlx5_chains_get_table(chains, attr->dest_chain,
+ 1, 0);
if (IS_ERR(ft)) {
rule = ERR_CAST(ft);
goto err_create_goto_table;
@@ -344,28 +352,29 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
dest[i].ft = ft;
i++;
} else {
- for (j = attr->split_count; j < attr->out_count; j++) {
+ for (j = esw_attr->split_count; j < esw_attr->out_count; j++) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest[i].vport.num = attr->dests[j].rep->vport;
+ dest[i].vport.num = esw_attr->dests[j].rep->vport;
dest[i].vport.vhca_id =
- MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
+ MLX5_CAP_GEN(esw_attr->dests[j].mdev, vhca_id);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
dest[i].vport.flags |=
MLX5_FLOW_DEST_VPORT_VHCA_ID;
- if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
+ if (esw_attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- flow_act.pkt_reformat = attr->dests[j].pkt_reformat;
+ flow_act.pkt_reformat =
+ esw_attr->dests[j].pkt_reformat;
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
dest[i].vport.pkt_reformat =
- attr->dests[j].pkt_reformat;
+ esw_attr->dests[j].pkt_reformat;
}
i++;
}
}
}
- if (attr->decap_pkt_reformat)
- flow_act.pkt_reformat = attr->decap_pkt_reformat;
+ if (esw_attr->decap_pkt_reformat)
+ flow_act.pkt_reformat = esw_attr->decap_pkt_reformat;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
@@ -382,26 +391,30 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
flow_act.modify_hdr = attr->modify_hdr;
if (split) {
- fdb = esw_vport_tbl_get(esw, attr);
+ fwd_attr.chain = attr->chain;
+ fwd_attr.prio = attr->prio;
+ fwd_attr.vport = esw_attr->in_rep->vport;
+
+ fdb = esw_vport_tbl_get(esw, &fwd_attr);
} else {
if (attr->chain || attr->prio)
- fdb = mlx5_esw_chains_get_table(esw, attr->chain,
- attr->prio, 0);
+ fdb = mlx5_chains_get_table(chains, attr->chain,
+ attr->prio, 0);
else
- fdb = attr->fdb;
+ fdb = attr->ft;
if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
- mlx5_eswitch_set_rule_source_port(esw, spec, attr);
+ mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr);
}
if (IS_ERR(fdb)) {
rule = ERR_CAST(fdb);
goto err_esw_get;
}
- mlx5_eswitch_set_rule_flow_source(esw, spec, attr);
+ mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
- rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
+ rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
&flow_act, dest, i);
else
rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
@@ -414,12 +427,12 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
err_add_rule:
if (split)
- esw_vport_tbl_put(esw, attr);
+ esw_vport_tbl_put(esw, &fwd_attr);
else if (attr->chain || attr->prio)
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
+ mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
err_esw_get:
if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
- mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
+ mlx5_chains_put_table(chains, attr->dest_chain, 1, 0);
err_create_goto_table:
return rule;
}
@@ -427,46 +440,51 @@ err_create_goto_table:
struct mlx5_flow_handle *
mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+ struct mlx5_vport_tbl_attr fwd_attr;
struct mlx5_flow_table *fast_fdb;
struct mlx5_flow_table *fwd_fdb;
struct mlx5_flow_handle *rule;
int i;
- fast_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 0);
+ fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
if (IS_ERR(fast_fdb)) {
rule = ERR_CAST(fast_fdb);
goto err_get_fast;
}
- fwd_fdb = esw_vport_tbl_get(esw, attr);
+ fwd_attr.chain = attr->chain;
+ fwd_attr.prio = attr->prio;
+ fwd_attr.vport = esw_attr->in_rep->vport;
+ fwd_fdb = esw_vport_tbl_get(esw, &fwd_attr);
if (IS_ERR(fwd_fdb)) {
rule = ERR_CAST(fwd_fdb);
goto err_get_fwd;
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- for (i = 0; i < attr->split_count; i++) {
+ for (i = 0; i < esw_attr->split_count; i++) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest[i].vport.num = attr->dests[i].rep->vport;
+ dest[i].vport.num = esw_attr->dests[i].rep->vport;
dest[i].vport.vhca_id =
- MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
+ MLX5_CAP_GEN(esw_attr->dests[i].mdev, vhca_id);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
- if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
+ if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
- dest[i].vport.pkt_reformat = attr->dests[i].pkt_reformat;
+ dest[i].vport.pkt_reformat = esw_attr->dests[i].pkt_reformat;
}
}
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = fwd_fdb,
i++;
- mlx5_eswitch_set_rule_source_port(esw, spec, attr);
- mlx5_eswitch_set_rule_flow_source(esw, spec, attr);
+ mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr);
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
@@ -481,9 +499,9 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
return rule;
add_err:
- esw_vport_tbl_put(esw, attr);
+ esw_vport_tbl_put(esw, &fwd_attr);
err_get_fwd:
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
+ mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
err_get_fast:
return rule;
}
@@ -491,10 +509,13 @@ err_get_fast:
static void
__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
bool fwd_rule)
{
- bool split = (attr->split_count > 0);
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+ bool split = (esw_attr->split_count > 0);
+ struct mlx5_vport_tbl_attr fwd_attr;
int i;
mlx5_del_flow_rules(rule);
@@ -502,31 +523,36 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
/* unref the term table */
for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
- if (attr->dests[i].termtbl)
- mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
+ if (esw_attr->dests[i].termtbl)
+ mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl);
}
}
atomic64_dec(&esw->offloads.num_flows);
+ if (fwd_rule || split) {
+ fwd_attr.chain = attr->chain;
+ fwd_attr.prio = attr->prio;
+ fwd_attr.vport = esw_attr->in_rep->vport;
+ }
+
if (fwd_rule) {
- esw_vport_tbl_put(esw, attr);
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
+ esw_vport_tbl_put(esw, &fwd_attr);
+ mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
} else {
if (split)
- esw_vport_tbl_put(esw, attr);
+ esw_vport_tbl_put(esw, &fwd_attr);
else if (attr->chain || attr->prio)
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
- 0);
+ mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
if (attr->dest_chain)
- mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
+ mlx5_chains_put_table(chains, attr->dest_chain, 1, 0);
}
}
void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
__mlx5_eswitch_del_rule(esw, rule, attr, false);
}
@@ -534,7 +560,7 @@ mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
void
mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
__mlx5_eswitch_del_rule(esw, rule, attr, true);
}
@@ -611,9 +637,10 @@ out_notsupp:
}
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
struct offloads_fdb *offloads = &esw->fdb_table.offloads;
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_eswitch_rep *vport = NULL;
bool push, pop, fwd;
int err = 0;
@@ -629,17 +656,17 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
mutex_lock(&esw->state_lock);
- err = esw_add_vlan_action_check(attr, push, pop, fwd);
+ err = esw_add_vlan_action_check(esw_attr, push, pop, fwd);
if (err)
goto unlock;
attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
- vport = esw_vlan_action_get_vport(attr, push, pop);
+ vport = esw_vlan_action_get_vport(esw_attr, push, pop);
if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */
- if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
+ if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
vport->vlan_refcount++;
attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
}
@@ -662,11 +689,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (vport->vlan_refcount)
goto skip_set_push;
- err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
- SET_VLAN_INSERT | SET_VLAN_STRIP);
+ err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, esw_attr->vlan_vid[0],
+ 0, SET_VLAN_INSERT | SET_VLAN_STRIP);
if (err)
goto out;
- vport->vlan = attr->vlan_vid[0];
+ vport->vlan = esw_attr->vlan_vid[0];
skip_set_push:
vport->vlan_refcount++;
}
@@ -679,9 +706,10 @@ unlock:
}
int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
struct offloads_fdb *offloads = &esw->fdb_table.offloads;
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_eswitch_rep *vport = NULL;
bool push, pop, fwd;
int err = 0;
@@ -699,11 +727,11 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
mutex_lock(&esw->state_lock);
- vport = esw_vlan_action_get_vport(attr, push, pop);
+ vport = esw_vlan_action_get_vport(esw_attr, push, pop);
if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */
- if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
+ if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
vport->vlan_refcount--;
goto out;
@@ -1137,6 +1165,126 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
}
}
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+#define fdb_modify_header_fwd_to_table_supported(esw) \
+ (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
+static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level))
+ *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) &&
+ esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
+ *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
+ esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
+ } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
+ *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
+ esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
+ } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
+ /* Disabled when ttl workaround is needed, e.g
+ * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
+ */
+ esw_warn(dev,
+ "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
+ *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
+ } else {
+ *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED;
+ esw_info(dev, "Supported tc chains and prios offload\n");
+ }
+
+ if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
+ *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED;
+}
+
+static int
+esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_table *nf_ft, *ft;
+ struct mlx5_chains_attr attr = {};
+ struct mlx5_fs_chains *chains;
+ u32 fdb_max;
+ int err;
+
+ fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
+
+ esw_init_chains_offload_flags(esw, &attr.flags);
+ attr.ns = MLX5_FLOW_NAMESPACE_FDB;
+ attr.max_ft_sz = fdb_max;
+ attr.max_grp_num = esw->params.large_group_num;
+ attr.default_ft = miss_fdb;
+ attr.max_restore_tag = esw_get_max_restore_tag(esw);
+
+ chains = mlx5_chains_create(dev, &attr);
+ if (IS_ERR(chains)) {
+ err = PTR_ERR(chains);
+ esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
+ return err;
+ }
+
+ esw->fdb_table.offloads.esw_chains_priv = chains;
+
+ /* Create tc_end_ft which is the always created ft chain */
+ nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains),
+ 1, 0);
+ if (IS_ERR(nf_ft)) {
+ err = PTR_ERR(nf_ft);
+ goto nf_ft_err;
+ }
+
+ /* Always open the root for fast path */
+ ft = mlx5_chains_get_table(chains, 0, 1, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto level_0_err;
+ }
+
+ /* Open level 1 for split fdb rules now if prios isn't supported */
+ if (!mlx5_chains_prios_supported(chains)) {
+ err = mlx5_esw_vport_tbl_get(esw);
+ if (err)
+ goto level_1_err;
+ }
+
+ mlx5_chains_set_end_ft(chains, nf_ft);
+
+ return 0;
+
+level_1_err:
+ mlx5_chains_put_table(chains, 0, 1, 0);
+level_0_err:
+ mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
+nf_ft_err:
+ mlx5_chains_destroy(chains);
+ esw->fdb_table.offloads.esw_chains_priv = NULL;
+
+ return err;
+}
+
+static void
+esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
+{
+ if (!mlx5_chains_prios_supported(chains))
+ mlx5_esw_vport_tbl_put(esw);
+ mlx5_chains_put_table(chains, 0, 1, 0);
+ mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
+ mlx5_chains_destroy(chains);
+}
+
+#else /* CONFIG_MLX5_CLS_ACT */
+
+static int
+esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
+{ return 0; }
+
+static void
+esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
+{}
+
+#endif
+
static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -1192,9 +1340,9 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
}
esw->fdb_table.offloads.slow_fdb = fdb;
- err = mlx5_esw_chains_create(esw);
+ err = esw_chains_create(esw, fdb);
if (err) {
- esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
+ esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
goto fdb_chains_err;
}
@@ -1219,35 +1367,37 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
}
esw->fdb_table.offloads.send_to_vport_grp = g;
- /* create peer esw miss group */
- memset(flow_group_in, 0, inlen);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
+ /* create peer esw miss group */
+ memset(flow_group_in, 0, inlen);
- esw_set_flow_group_source_port(esw, flow_group_in);
+ esw_set_flow_group_source_port(esw, flow_group_in);
- if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
- match_criteria = MLX5_ADDR_OF(create_flow_group_in,
- flow_group_in,
- match_criteria);
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in,
+ flow_group_in,
+ match_criteria);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
- misc_parameters.source_eswitch_owner_vhca_id);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters.source_eswitch_owner_vhca_id);
- MLX5_SET(create_flow_group_in, flow_group_in,
- source_eswitch_owner_vhca_id_valid, 1);
- }
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ source_eswitch_owner_vhca_id_valid, 1);
+ }
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
- ix + esw->total_vports - 1);
- ix += esw->total_vports;
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ ix + esw->total_vports - 1);
+ ix += esw->total_vports;
- g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(g)) {
- err = PTR_ERR(g);
- esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
- goto peer_miss_err;
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
+ goto peer_miss_err;
+ }
+ esw->fdb_table.offloads.peer_miss_grp = g;
}
- esw->fdb_table.offloads.peer_miss_grp = g;
/* create miss group */
memset(flow_group_in, 0, inlen);
@@ -1281,11 +1431,12 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
miss_rule_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
miss_err:
- mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
peer_miss_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
send_vport_err:
- mlx5_esw_chains_destroy(esw);
+ esw_chains_destroy(esw, esw_chains(esw));
fdb_chains_err:
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
slow_fdb_err:
@@ -1305,10 +1456,12 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
- mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
- mlx5_esw_chains_destroy(esw);
+ esw_chains_destroy(esw, esw_chains(esw));
+
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
@@ -1864,18 +2017,6 @@ esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
return true;
}
-static bool
-esw_check_vport_match_metadata_mandatory(const struct mlx5_eswitch *esw)
-{
- return mlx5_core_mp_enabled(esw->dev);
-}
-
-static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw)
-{
- return esw_check_vport_match_metadata_mandatory(esw) &&
- esw_check_vport_match_metadata_supported(esw);
-}
-
u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
{
u32 num_vports = GENMASK(ESW_VPORT_BITS - 1, 0) - 1;
@@ -1908,9 +2049,6 @@ void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- if (vport->vport == MLX5_VPORT_UPLINK)
- return 0;
-
vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
vport->metadata = vport->default_metadata;
return vport->metadata ? 0 : -ENOSPC;
@@ -1919,26 +2057,56 @@ static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- if (vport->vport == MLX5_VPORT_UPLINK || !vport->default_metadata)
+ if (!vport->default_metadata)
return;
WARN_ON(vport->metadata != vport->default_metadata);
mlx5_esw_match_metadata_free(esw, vport->default_metadata);
}
+static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ int i;
+
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
+ return;
+
+ mlx5_esw_for_all_vports_reverse(esw, i, vport)
+ esw_offloads_vport_metadata_cleanup(esw, vport);
+}
+
+static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ int err;
+ int i;
+
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
+ return 0;
+
+ mlx5_esw_for_all_vports(esw, i, vport) {
+ err = esw_offloads_vport_metadata_setup(esw, vport);
+ if (err)
+ goto metadata_err;
+ }
+
+ return 0;
+
+metadata_err:
+ esw_offloads_metadata_uninit(esw);
+ return err;
+}
+
int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int err;
- err = esw_offloads_vport_metadata_setup(esw, vport);
- if (err)
- goto metadata_err;
-
err = esw_acl_ingress_ofld_setup(esw, vport);
if (err)
- goto ingress_err;
+ return err;
if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
err = esw_acl_egress_ofld_setup(esw, vport);
@@ -1950,9 +2118,6 @@ esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
egress_err:
esw_acl_ingress_ofld_cleanup(esw, vport);
-ingress_err:
- esw_offloads_vport_metadata_cleanup(esw, vport);
-metadata_err:
return err;
}
@@ -1962,22 +2127,14 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
{
esw_acl_egress_ofld_cleanup(vport);
esw_acl_ingress_ofld_cleanup(esw, vport);
- esw_offloads_vport_metadata_cleanup(esw, vport);
}
static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
- int err;
-
- if (esw_use_vport_metadata(esw))
- esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
- err = esw_vport_create_offloads_acl_tables(esw, vport);
- if (err)
- esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
- return err;
+ return esw_vport_create_offloads_acl_tables(esw, vport);
}
static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
@@ -1986,7 +2143,6 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
esw_vport_destroy_offloads_acl_tables(esw, vport);
- esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
}
static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
@@ -2144,7 +2300,14 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
err = mlx5_esw_host_number_init(esw);
if (err)
- goto err_vport_metadata;
+ goto err_metadata;
+
+ if (esw_check_vport_match_metadata_supported(esw))
+ esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+
+ err = esw_offloads_metadata_init(esw);
+ if (err)
+ goto err_metadata;
err = esw_set_passing_vport_metadata(esw, true);
if (err)
@@ -2178,6 +2341,9 @@ err_uplink:
err_steering_init:
esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
+ esw_offloads_metadata_uninit(esw);
+err_metadata:
+ esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex);
return err;
@@ -2211,6 +2377,8 @@ void esw_offloads_disable(struct mlx5_eswitch *esw)
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
+ esw_offloads_metadata_uninit(esw);
+ esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index 17a0d2bc102b..ec679560a95d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -3,6 +3,7 @@
#include <linux/mlx5/fs.h>
#include "eswitch.h"
+#include "en_tc.h"
#include "fs_core.h"
struct mlx5_termtbl_handle {
@@ -228,10 +229,11 @@ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
bool
mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_spec *spec)
{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
int i;
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
@@ -244,8 +246,8 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
return true;
/* hairpin */
- for (i = attr->split_count; i < attr->out_count; i++)
- if (attr->dests[i].rep->vport == MLX5_VPORT_UPLINK)
+ for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
+ if (esw_attr->dests[i].rep->vport == MLX5_VPORT_UPLINK)
return true;
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index 831d2c39e153..9f6d97eae0ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -388,9 +388,9 @@ static inline void mlx5_fpga_conn_cqes(struct mlx5_fpga_conn *conn,
mlx5_fpga_conn_arm_cq(conn);
}
-static void mlx5_fpga_conn_cq_tasklet(unsigned long data)
+static void mlx5_fpga_conn_cq_tasklet(struct tasklet_struct *t)
{
- struct mlx5_fpga_conn *conn = (void *)data;
+ struct mlx5_fpga_conn *conn = from_tasklet(conn, t, cq.tasklet);
if (unlikely(!conn->qp.active))
return;
@@ -478,8 +478,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
conn->cq.mcq.irqn = irqn;
conn->cq.mcq.uar = fdev->conn_res.uar;
- tasklet_init(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet,
- (unsigned long)conn);
+ tasklet_setup(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet);
mlx5_fpga_dbg(fdev, "Created CQ #0x%x\n", conn->cq.mcq.cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 9ccec5f8b92a..6141e9ec8190 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -654,7 +654,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
fte->action = *flow_act;
fte->flow_context = spec->flow_context;
- tree_init_node(&fte->node, NULL, del_sw_fte);
+ tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
return fte;
}
@@ -1595,11 +1595,12 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest,
return true;
if (ignore_level) {
- if (ft->type != FS_FT_FDB)
+ if (ft->type != FS_FT_FDB &&
+ ft->type != FS_FT_NIC_RX)
return false;
if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
- dest->ft->type != FS_FT_FDB)
+ ft->type != dest->ft->type)
return false;
}
@@ -1792,7 +1793,6 @@ skip_search:
up_write_ref_node(&g->node, false);
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node, false);
- tree_put_node(&fte->node, false);
return rule;
}
rule = ERR_PTR(-ENOENT);
@@ -1891,7 +1891,6 @@ search_again_locked:
up_write_ref_node(&g->node, false);
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node, false);
- tree_put_node(&fte->node, false);
tree_put_node(&g->node, false);
return rule;
@@ -2001,7 +2000,9 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
up_write_ref_node(&fte->node, false);
} else {
del_hw_fte(&fte->node);
- up_write(&fte->node.lock);
+ /* Avoid double call to del_hw_fte */
+ fte->node.del_hw_func = NULL;
+ up_write_ref_node(&fte->node, false);
tree_put_node(&fte->node, false);
}
kfree(handle);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 874c70e8cc54..33081b24f10a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -102,7 +102,7 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
if (ldev->pf[i].netdev == ndev)
return i;
- return -1;
+ return -ENOENT;
}
static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
@@ -271,7 +271,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
bool do_bond, roce_lag;
int err;
- if (!dev0 || !dev1)
+ if (!mlx5_lag_is_ready(ldev))
return;
spin_lock(&lag_lock);
@@ -355,7 +355,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
{
struct net_device *upper = info->upper_dev, *ndev_tmp;
struct netdev_lag_upper_info *lag_upper_info = NULL;
- bool is_bonded;
+ bool is_bonded, is_in_lag, mode_supported;
int bond_status = 0;
int num_slaves = 0;
int idx;
@@ -374,7 +374,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
rcu_read_lock();
for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
- if (idx > -1)
+ if (idx >= 0)
bond_status |= (1 << idx);
num_slaves++;
@@ -391,13 +391,24 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
/* Determine bonding status:
* A device is considered bonded if both its physical ports are slaves
* of the same lag master, and only them.
- * Lag mode must be activebackup or hash.
*/
- is_bonded = (num_slaves == MLX5_MAX_PORTS) &&
- (bond_status == 0x3) &&
- ((tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) ||
- (tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH));
+ is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3;
+ if (!mlx5_lag_is_ready(ldev) && is_in_lag) {
+ NL_SET_ERR_MSG_MOD(info->info.extack,
+ "Can't activate LAG offload, PF is configured with more than 64 VFs");
+ return 0;
+ }
+
+ /* Lag mode must be activebackup or hash. */
+ mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
+ tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH;
+
+ if (is_in_lag && !mode_supported)
+ NL_SET_ERR_MSG_MOD(info->info.extack,
+ "Can't activate LAG offload, TX type isn't supported");
+
+ is_bonded = is_in_lag && mode_supported;
if (tracker->is_bonded != is_bonded) {
tracker->is_bonded = is_bonded;
return 1;
@@ -418,7 +429,7 @@ static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
return 0;
idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
- if (idx == -1)
+ if (idx < 0)
return 0;
/* This information is used to determine virtual to physical
@@ -445,6 +456,10 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
ldev = container_of(this, struct mlx5_lag, nb);
+
+ if (!mlx5_lag_is_ready(ldev) && event == NETDEV_CHANGELOWERSTATE)
+ return NOTIFY_DONE;
+
tracker = ldev->tracker;
switch (event) {
@@ -493,14 +508,14 @@ static void mlx5_lag_dev_free(struct mlx5_lag *ldev)
kfree(ldev);
}
-static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
- struct mlx5_core_dev *dev,
- struct net_device *netdev)
+static int mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
+ struct mlx5_core_dev *dev,
+ struct net_device *netdev)
{
unsigned int fn = PCI_FUNC(dev->pdev->devfn);
if (fn >= MLX5_MAX_PORTS)
- return;
+ return -EPERM;
spin_lock(&lag_lock);
ldev->pf[fn].dev = dev;
@@ -511,6 +526,8 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
dev->priv.lag = ldev;
spin_unlock(&lag_lock);
+
+ return fn;
}
static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
@@ -537,11 +554,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
{
struct mlx5_lag *ldev = NULL;
struct mlx5_core_dev *tmp_dev;
- int err;
+ int i, err;
- if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
- !MLX5_CAP_GEN(dev, lag_master) ||
- (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS))
+ if (!MLX5_CAP_GEN(dev, vport_group_manager))
return;
tmp_dev = mlx5_get_next_phys_dev(dev);
@@ -556,7 +571,18 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
}
}
- mlx5_lag_dev_add_pf(ldev, dev, netdev);
+ if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0)
+ return;
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ tmp_dev = ldev->pf[i].dev;
+ if (!tmp_dev || !MLX5_CAP_GEN(tmp_dev, lag_master) ||
+ MLX5_CAP_GEN(tmp_dev, num_lag_ports) != MLX5_MAX_PORTS)
+ break;
+ }
+
+ if (i >= MLX5_MAX_PORTS)
+ ldev->flags |= MLX5_LAG_FLAG_READY;
if (!ldev->nb.notifier_call) {
ldev->nb.notifier_call = mlx5_lag_netdev_event;
@@ -587,6 +613,8 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev)
mlx5_lag_dev_remove_pf(ldev, dev);
+ ldev->flags &= ~MLX5_LAG_FLAG_READY;
+
for (i = 0; i < MLX5_MAX_PORTS; i++)
if (ldev->pf[i].dev)
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
index f1068aac6406..8d8cf2d0bc6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
@@ -16,6 +16,7 @@ enum {
MLX5_LAG_FLAG_ROCE = 1 << 0,
MLX5_LAG_FLAG_SRIOV = 1 << 1,
MLX5_LAG_FLAG_MULTIPATH = 1 << 2,
+ MLX5_LAG_FLAG_READY = 1 << 3,
};
#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV |\
@@ -59,6 +60,12 @@ __mlx5_lag_is_active(struct mlx5_lag *ldev)
return !!(ldev->flags & MLX5_LAG_MODE_FLAGS);
}
+static inline bool
+mlx5_lag_is_ready(struct mlx5_lag *ldev)
+{
+ return ldev->flags & MLX5_LAG_FLAG_READY;
+}
+
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker);
int mlx5_activate_lag(struct mlx5_lag *ldev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index 9e68f5926ab6..88e58ac902de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -11,7 +11,7 @@
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
{
- if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev)
+ if (!mlx5_lag_is_ready(ldev))
return false;
return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
@@ -131,7 +131,12 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
struct net_device *nh_dev = nh->fib_nh_dev;
int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);
- mlx5_lag_set_port_affinity(ldev, ++i);
+ if (i < 0)
+ i = MLX5_LAG_NORMAL_AFFINITY;
+ else
+ ++i;
+
+ mlx5_lag_set_port_affinity(ldev, i);
}
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 2d55b7c22c03..c70c1f0ca0c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -150,28 +150,30 @@ static void mlx5_pps_out(struct work_struct *work)
static void mlx5_timestamp_overflow(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock,
- overflow_work);
+ struct mlx5_core_dev *mdev;
+ struct mlx5_clock *clock;
unsigned long flags;
+ clock = container_of(dwork, struct mlx5_clock, overflow_work);
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
timecounter_read(&clock->tc);
- mlx5_update_clock_info_page(clock->mdev);
+ mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
}
-static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
- const struct timespec64 *ts)
+static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
{
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
- ptp_info);
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
u64 ns = timespec64_to_ns(ts);
+ struct mlx5_core_dev *mdev;
unsigned long flags;
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
timecounter_init(&clock->tc, &clock->cycles, ns);
- mlx5_update_clock_info_page(clock->mdev);
+ mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
return 0;
@@ -180,13 +182,12 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
- ptp_info);
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
- clock);
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev;
unsigned long flags;
u64 cycles, ns;
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
cycles = mlx5_read_internal_timer(mdev, sts);
ns = timecounter_cyc2time(&clock->tc, cycles);
@@ -199,13 +200,14 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
- ptp_info);
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev;
unsigned long flags;
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
timecounter_adjtime(&clock->tc, delta);
- mlx5_update_clock_info_page(clock->mdev);
+ mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
return 0;
@@ -213,12 +215,13 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
{
- u64 adj;
- u32 diff;
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev;
unsigned long flags;
int neg_adj = 0;
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
- ptp_info);
+ u32 diff;
+ u64 adj;
+
if (delta < 0) {
neg_adj = 1;
@@ -229,11 +232,12 @@ static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
adj *= delta;
diff = div_u64(adj, 1000000000ULL);
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
timecounter_read(&clock->tc);
clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
clock->nominal_c_mult + diff;
- mlx5_update_clock_info_page(clock->mdev);
+ mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
return 0;
@@ -431,13 +435,11 @@ static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
default:
return -EOPNOTSUPP;
}
-
- return -EOPNOTSUPP;
}
static const struct ptp_clock_info mlx5_ptp_clock_info = {
.owner = THIS_MODULE,
- .name = "mlx5_p2p",
+ .name = "mlx5_ptp",
.max_adj = 100000000,
.n_alarm = 0,
.n_ext_ts = 0,
@@ -465,7 +467,8 @@ static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
{
- struct mlx5_core_dev *mdev = clock->mdev;
+ struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
+
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
u8 mode;
int err;
@@ -538,20 +541,23 @@ static int mlx5_pps_event(struct notifier_block *nb,
unsigned long type, void *data)
{
struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
- struct mlx5_core_dev *mdev = clock->mdev;
struct ptp_clock_event ptp_event;
u64 cycles_now, cycles_delta;
u64 nsec_now, nsec_delta, ns;
struct mlx5_eqe *eqe = data;
int pin = eqe->data.pps.pin;
+ struct mlx5_core_dev *mdev;
struct timespec64 ts;
unsigned long flags;
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
+
switch (clock->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
ptp_event.index = pin;
- ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
- be64_to_cpu(eqe->data.pps.time_stamp));
+ ptp_event.timestamp =
+ mlx5_timecounter_cyc2time(clock,
+ be64_to_cpu(eqe->data.pps.time_stamp));
if (clock->pps_info.enabled) {
ptp_event.type = PTP_CLOCK_PPSUSR;
ptp_event.pps_times.ts_real =
@@ -574,8 +580,8 @@ static int mlx5_pps_event(struct notifier_block *nb,
cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
clock->cycles.mult);
clock->pps_info.start[pin] = cycles_now + cycles_delta;
- schedule_work(&clock->pps_info.out_work);
write_sequnlock_irqrestore(&clock->lock, flags);
+ schedule_work(&clock->pps_info.out_work);
break;
default:
mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
@@ -605,7 +611,6 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
clock->cycles.shift);
clock->nominal_c_mult = clock->cycles.mult;
clock->cycles.mask = CLOCKSOURCE_MASK(41);
- clock->mdev = mdev;
timecounter_init(&clock->tc, &clock->cycles,
ktime_to_ns(ktime_get_real()));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index 4aaca7400fb2..078a7cc29bf0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -77,7 +77,7 @@ int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn);
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev);
-void mlx5_cq_tasklet_cb(unsigned long data);
+void mlx5_cq_tasklet_cb(struct tasklet_struct *t);
struct cpumask *mlx5_eq_comp_cpumask(struct mlx5_core_dev *dev, int ix);
u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
new file mode 100644
index 000000000000..947f346bdc2d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -0,0 +1,911 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2020 Mellanox Technologies.
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/fs.h>
+
+#include "lib/fs_chains.h"
+#include "en/mapping.h"
+#include "mlx5_core.h"
+#include "fs_core.h"
+#include "eswitch.h"
+#include "en.h"
+#include "en_tc.h"
+
+#define chains_lock(chains) ((chains)->lock)
+#define chains_ht(chains) ((chains)->chains_ht)
+#define chains_mapping(chains) ((chains)->chains_mapping)
+#define prios_ht(chains) ((chains)->prios_ht)
+#define ft_pool_left(chains) ((chains)->ft_left)
+#define tc_default_ft(chains) ((chains)->tc_default_ft)
+#define tc_end_ft(chains) ((chains)->tc_end_ft)
+#define ns_to_chains_fs_prio(ns) ((ns) == MLX5_FLOW_NAMESPACE_FDB ? \
+ FDB_TC_OFFLOAD : MLX5E_TC_PRIO)
+
+/* Firmware currently has 4 pool of 4 sizes that it supports (FT_POOLS),
+ * and a virtual memory region of 16M (MLX5_FT_SIZE), this region is duplicated
+ * for each flow table pool. We can allocate up to 16M of each pool,
+ * and we keep track of how much we used via get_next_avail_sz_from_pool.
+ * Firmware doesn't report any of this for now.
+ * ESW_POOL is expected to be sorted from large to small and match firmware
+ * pools.
+ */
+#define FT_SIZE (16 * 1024 * 1024)
+static const unsigned int FT_POOLS[] = { 4 * 1024 * 1024,
+ 1 * 1024 * 1024,
+ 64 * 1024,
+ 128 };
+#define FT_TBL_SZ (64 * 1024)
+
+struct mlx5_fs_chains {
+ struct mlx5_core_dev *dev;
+
+ struct rhashtable chains_ht;
+ struct rhashtable prios_ht;
+ /* Protects above chains_ht and prios_ht */
+ struct mutex lock;
+
+ struct mlx5_flow_table *tc_default_ft;
+ struct mlx5_flow_table *tc_end_ft;
+ struct mapping_ctx *chains_mapping;
+
+ enum mlx5_flow_namespace_type ns;
+ u32 group_num;
+ u32 flags;
+
+ int ft_left[ARRAY_SIZE(FT_POOLS)];
+};
+
+struct fs_chain {
+ struct rhash_head node;
+
+ u32 chain;
+
+ int ref;
+ int id;
+
+ struct mlx5_fs_chains *chains;
+ struct list_head prios_list;
+ struct mlx5_flow_handle *restore_rule;
+ struct mlx5_modify_hdr *miss_modify_hdr;
+};
+
+struct prio_key {
+ u32 chain;
+ u32 prio;
+ u32 level;
+};
+
+struct prio {
+ struct rhash_head node;
+ struct list_head list;
+
+ struct prio_key key;
+
+ int ref;
+
+ struct fs_chain *chain;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_table *next_ft;
+ struct mlx5_flow_group *miss_group;
+ struct mlx5_flow_handle *miss_rule;
+};
+
+static const struct rhashtable_params chain_params = {
+ .head_offset = offsetof(struct fs_chain, node),
+ .key_offset = offsetof(struct fs_chain, chain),
+ .key_len = sizeof_field(struct fs_chain, chain),
+ .automatic_shrinking = true,
+};
+
+static const struct rhashtable_params prio_params = {
+ .head_offset = offsetof(struct prio, node),
+ .key_offset = offsetof(struct prio, key),
+ .key_len = sizeof_field(struct prio, key),
+ .automatic_shrinking = true,
+};
+
+bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains)
+{
+ return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED;
+}
+
+static bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
+{
+ return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
+}
+
+bool mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains)
+{
+ return mlx5_chains_prios_supported(chains) &&
+ mlx5_chains_ignore_flow_level_supported(chains);
+}
+
+u32 mlx5_chains_get_chain_range(struct mlx5_fs_chains *chains)
+{
+ if (!mlx5_chains_prios_supported(chains))
+ return 1;
+
+ if (mlx5_chains_ignore_flow_level_supported(chains))
+ return UINT_MAX - 1;
+
+ /* We should get here only for eswitch case */
+ return FDB_TC_MAX_CHAIN;
+}
+
+u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
+{
+ return mlx5_chains_get_chain_range(chains) + 1;
+}
+
+u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
+{
+ if (!mlx5_chains_prios_supported(chains))
+ return 1;
+
+ if (mlx5_chains_ignore_flow_level_supported(chains))
+ return UINT_MAX;
+
+ /* We should get here only for eswitch case */
+ return FDB_TC_MAX_PRIO;
+}
+
+static unsigned int mlx5_chains_get_level_range(struct mlx5_fs_chains *chains)
+{
+ if (mlx5_chains_ignore_flow_level_supported(chains))
+ return UINT_MAX;
+
+ /* Same value for FDB and NIC RX tables */
+ return FDB_TC_LEVELS_PER_PRIO;
+}
+
+void
+mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
+ struct mlx5_flow_table *ft)
+{
+ tc_end_ft(chains) = ft;
+}
+
+#define POOL_NEXT_SIZE 0
+static int
+mlx5_chains_get_avail_sz_from_pool(struct mlx5_fs_chains *chains,
+ int desired_size)
+{
+ int i, found_i = -1;
+
+ for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
+ if (ft_pool_left(chains)[i] && FT_POOLS[i] > desired_size) {
+ found_i = i;
+ if (desired_size != POOL_NEXT_SIZE)
+ break;
+ }
+ }
+
+ if (found_i != -1) {
+ --ft_pool_left(chains)[found_i];
+ return FT_POOLS[found_i];
+ }
+
+ return 0;
+}
+
+static void
+mlx5_chains_put_sz_to_pool(struct mlx5_fs_chains *chains, int sz)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
+ if (sz == FT_POOLS[i]) {
+ ++ft_pool_left(chains)[i];
+ return;
+ }
+ }
+
+ WARN_ONCE(1, "Couldn't find size %d in flow table size pool", sz);
+}
+
+static void
+mlx5_chains_init_sz_pool(struct mlx5_fs_chains *chains, u32 ft_max)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--)
+ ft_pool_left(chains)[i] =
+ FT_POOLS[i] <= ft_max ? FT_SIZE / FT_POOLS[i] : 0;
+}
+
+static struct mlx5_flow_table *
+mlx5_chains_create_table(struct mlx5_fs_chains *chains,
+ u32 chain, u32 prio, u32 level)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft;
+ int sz;
+
+ if (chains->flags & MLX5_CHAINS_FT_TUNNEL_SUPPORTED)
+ ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
+ MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
+
+ sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
+ mlx5_chains_get_avail_sz_from_pool(chains, FT_TBL_SZ) :
+ mlx5_chains_get_avail_sz_from_pool(chains, POOL_NEXT_SIZE);
+ if (!sz)
+ return ERR_PTR(-ENOSPC);
+ ft_attr.max_fte = sz;
+
+ /* We use tc_default_ft(chains) as the table's next_ft till
+ * ignore_flow_level is allowed on FT creation and not just for FTEs.
+ * Instead caller should add an explicit miss rule if needed.
+ */
+ ft_attr.next_ft = tc_default_ft(chains);
+
+ /* The root table(chain 0, prio 1, level 0) is required to be
+ * connected to the previous fs_core managed prio.
+ * We always create it, as a managed table, in order to align with
+ * fs_core logic.
+ */
+ if (!mlx5_chains_ignore_flow_level_supported(chains) ||
+ (chain == 0 && prio == 1 && level == 0)) {
+ ft_attr.level = level;
+ ft_attr.prio = prio - 1;
+ ns = (chains->ns == MLX5_FLOW_NAMESPACE_FDB) ?
+ mlx5_get_fdb_sub_ns(chains->dev, chain) :
+ mlx5_get_flow_namespace(chains->dev, chains->ns);
+ } else {
+ ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.prio = ns_to_chains_fs_prio(chains->ns);
+ /* Firmware doesn't allow us to create another level 0 table,
+ * so we create all unmanaged tables as level 1.
+ *
+ * To connect them, we use explicit miss rules with
+ * ignore_flow_level. Caller is responsible to create
+ * these rules (if needed).
+ */
+ ft_attr.level = 1;
+ ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
+ }
+
+ ft_attr.autogroup.num_reserved_entries = 2;
+ ft_attr.autogroup.max_num_groups = chains->group_num;
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ mlx5_core_warn(chains->dev, "Failed to create chains table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
+ (int)PTR_ERR(ft), chain, prio, level, sz);
+ mlx5_chains_put_sz_to_pool(chains, sz);
+ return ft;
+ }
+
+ return ft;
+}
+
+static void
+mlx5_chains_destroy_table(struct mlx5_fs_chains *chains,
+ struct mlx5_flow_table *ft)
+{
+ mlx5_chains_put_sz_to_pool(chains, ft->max_fte);
+ mlx5_destroy_flow_table(ft);
+}
+
+static int
+create_chain_restore(struct fs_chain *chain)
+{
+ struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
+ char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)];
+ struct mlx5_fs_chains *chains = chain->chains;
+ enum mlx5e_tc_attr_to_reg chain_to_reg;
+ struct mlx5_modify_hdr *mod_hdr;
+ u32 index;
+ int err;
+
+ if (chain->chain == mlx5_chains_get_nf_ft_chain(chains) ||
+ !mlx5_chains_prios_supported(chains))
+ return 0;
+
+ err = mapping_add(chains_mapping(chains), &chain->chain, &index);
+ if (err)
+ return err;
+ if (index == MLX5_FS_DEFAULT_FLOW_TAG) {
+ /* we got the special default flow tag id, so we won't know
+ * if we actually marked the packet with the restore rule
+ * we create.
+ *
+ * This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0.
+ */
+ err = mapping_add(chains_mapping(chains),
+ &chain->chain, &index);
+ mapping_remove(chains_mapping(chains),
+ MLX5_FS_DEFAULT_FLOW_TAG);
+ if (err)
+ return err;
+ }
+
+ chain->id = index;
+
+ if (chains->ns == MLX5_FLOW_NAMESPACE_FDB) {
+ chain_to_reg = CHAIN_TO_REG;
+ chain->restore_rule = esw_add_restore_rule(esw, chain->id);
+ if (IS_ERR(chain->restore_rule)) {
+ err = PTR_ERR(chain->restore_rule);
+ goto err_rule;
+ }
+ } else if (chains->ns == MLX5_FLOW_NAMESPACE_KERNEL) {
+ /* For NIC RX we don't need a restore rule
+ * since we write the metadata to reg_b
+ * that is passed to SW directly.
+ */
+ chain_to_reg = NIC_CHAIN_TO_REG;
+ } else {
+ err = -EINVAL;
+ goto err_rule;
+ }
+
+ MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, modact, field,
+ mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mfield);
+ MLX5_SET(set_action_in, modact, offset,
+ mlx5e_tc_attr_to_reg_mappings[chain_to_reg].moffset * 8);
+ MLX5_SET(set_action_in, modact, length,
+ mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen * 8);
+ MLX5_SET(set_action_in, modact, data, chain->id);
+ mod_hdr = mlx5_modify_header_alloc(chains->dev, chains->ns,
+ 1, modact);
+ if (IS_ERR(mod_hdr)) {
+ err = PTR_ERR(mod_hdr);
+ goto err_mod_hdr;
+ }
+ chain->miss_modify_hdr = mod_hdr;
+
+ return 0;
+
+err_mod_hdr:
+ if (!IS_ERR_OR_NULL(chain->restore_rule))
+ mlx5_del_flow_rules(chain->restore_rule);
+err_rule:
+ /* Datapath can't find this mapping, so we can safely remove it */
+ mapping_remove(chains_mapping(chains), chain->id);
+ return err;
+}
+
+static void destroy_chain_restore(struct fs_chain *chain)
+{
+ struct mlx5_fs_chains *chains = chain->chains;
+
+ if (!chain->miss_modify_hdr)
+ return;
+
+ if (chain->restore_rule)
+ mlx5_del_flow_rules(chain->restore_rule);
+
+ mlx5_modify_header_dealloc(chains->dev, chain->miss_modify_hdr);
+ mapping_remove(chains_mapping(chains), chain->id);
+}
+
+static struct fs_chain *
+mlx5_chains_create_chain(struct mlx5_fs_chains *chains, u32 chain)
+{
+ struct fs_chain *chain_s = NULL;
+ int err;
+
+ chain_s = kvzalloc(sizeof(*chain_s), GFP_KERNEL);
+ if (!chain_s)
+ return ERR_PTR(-ENOMEM);
+
+ chain_s->chains = chains;
+ chain_s->chain = chain;
+ INIT_LIST_HEAD(&chain_s->prios_list);
+
+ err = create_chain_restore(chain_s);
+ if (err)
+ goto err_restore;
+
+ err = rhashtable_insert_fast(&chains_ht(chains), &chain_s->node,
+ chain_params);
+ if (err)
+ goto err_insert;
+
+ return chain_s;
+
+err_insert:
+ destroy_chain_restore(chain_s);
+err_restore:
+ kvfree(chain_s);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_chains_destroy_chain(struct fs_chain *chain)
+{
+ struct mlx5_fs_chains *chains = chain->chains;
+
+ rhashtable_remove_fast(&chains_ht(chains), &chain->node,
+ chain_params);
+
+ destroy_chain_restore(chain);
+ kvfree(chain);
+}
+
+static struct fs_chain *
+mlx5_chains_get_chain(struct mlx5_fs_chains *chains, u32 chain)
+{
+ struct fs_chain *chain_s;
+
+ chain_s = rhashtable_lookup_fast(&chains_ht(chains), &chain,
+ chain_params);
+ if (!chain_s) {
+ chain_s = mlx5_chains_create_chain(chains, chain);
+ if (IS_ERR(chain_s))
+ return chain_s;
+ }
+
+ chain_s->ref++;
+
+ return chain_s;
+}
+
+static struct mlx5_flow_handle *
+mlx5_chains_add_miss_rule(struct fs_chain *chain,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5_fs_chains *chains = chain->chains;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act act = {};
+
+ act.flags = FLOW_ACT_NO_APPEND;
+ if (mlx5_chains_ignore_flow_level_supported(chain->chains))
+ act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+
+ act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = next_ft;
+
+ if (next_ft == tc_end_ft(chains) &&
+ chain->chain != mlx5_chains_get_nf_ft_chain(chains) &&
+ mlx5_chains_prios_supported(chains)) {
+ act.modify_hdr = chain->miss_modify_hdr;
+ act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ }
+
+ return mlx5_add_flow_rules(ft, NULL, &act, &dest, 1);
+}
+
+static int
+mlx5_chains_update_prio_prevs(struct prio *prio,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {};
+ struct fs_chain *chain = prio->chain;
+ struct prio *pos;
+ int n = 0, err;
+
+ if (prio->key.level)
+ return 0;
+
+ /* Iterate in reverse order until reaching the level 0 rule of
+ * the previous priority, adding all the miss rules first, so we can
+ * revert them if any of them fails.
+ */
+ pos = prio;
+ list_for_each_entry_continue_reverse(pos,
+ &chain->prios_list,
+ list) {
+ miss_rules[n] = mlx5_chains_add_miss_rule(chain,
+ pos->ft,
+ next_ft);
+ if (IS_ERR(miss_rules[n])) {
+ err = PTR_ERR(miss_rules[n]);
+ goto err_prev_rule;
+ }
+
+ n++;
+ if (!pos->key.level)
+ break;
+ }
+
+ /* Success, delete old miss rules, and update the pointers. */
+ n = 0;
+ pos = prio;
+ list_for_each_entry_continue_reverse(pos,
+ &chain->prios_list,
+ list) {
+ mlx5_del_flow_rules(pos->miss_rule);
+
+ pos->miss_rule = miss_rules[n];
+ pos->next_ft = next_ft;
+
+ n++;
+ if (!pos->key.level)
+ break;
+ }
+
+ return 0;
+
+err_prev_rule:
+ while (--n >= 0)
+ mlx5_del_flow_rules(miss_rules[n]);
+
+ return err;
+}
+
+static void
+mlx5_chains_put_chain(struct fs_chain *chain)
+{
+ if (--chain->ref == 0)
+ mlx5_chains_destroy_chain(chain);
+}
+
+static struct prio *
+mlx5_chains_create_prio(struct mlx5_fs_chains *chains,
+ u32 chain, u32 prio, u32 level)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_handle *miss_rule = NULL;
+ struct mlx5_flow_group *miss_group;
+ struct mlx5_flow_table *next_ft;
+ struct mlx5_flow_table *ft;
+ struct prio *prio_s = NULL;
+ struct fs_chain *chain_s;
+ struct list_head *pos;
+ u32 *flow_group_in;
+ int err;
+
+ chain_s = mlx5_chains_get_chain(chains, chain);
+ if (IS_ERR(chain_s))
+ return ERR_CAST(chain_s);
+
+ prio_s = kvzalloc(sizeof(*prio_s), GFP_KERNEL);
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!prio_s || !flow_group_in) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ /* Chain's prio list is sorted by prio and level.
+ * And all levels of some prio point to the next prio's level 0.
+ * Example list (prio, level):
+ * (3,0)->(3,1)->(5,0)->(5,1)->(6,1)->(7,0)
+ * In hardware, we will we have the following pointers:
+ * (3,0) -> (5,0) -> (7,0) -> Slow path
+ * (3,1) -> (5,0)
+ * (5,1) -> (7,0)
+ * (6,1) -> (7,0)
+ */
+
+ /* Default miss for each chain: */
+ next_ft = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
+ tc_default_ft(chains) :
+ tc_end_ft(chains);
+ list_for_each(pos, &chain_s->prios_list) {
+ struct prio *p = list_entry(pos, struct prio, list);
+
+ /* exit on first pos that is larger */
+ if (prio < p->key.prio || (prio == p->key.prio &&
+ level < p->key.level)) {
+ /* Get next level 0 table */
+ next_ft = p->key.level == 0 ? p->ft : p->next_ft;
+ break;
+ }
+ }
+
+ ft = mlx5_chains_create_table(chains, chain, prio, level);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_create;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
+ ft->max_fte - 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ ft->max_fte - 1);
+ miss_group = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(miss_group)) {
+ err = PTR_ERR(miss_group);
+ goto err_group;
+ }
+
+ /* Add miss rule to next_ft */
+ miss_rule = mlx5_chains_add_miss_rule(chain_s, ft, next_ft);
+ if (IS_ERR(miss_rule)) {
+ err = PTR_ERR(miss_rule);
+ goto err_miss_rule;
+ }
+
+ prio_s->miss_group = miss_group;
+ prio_s->miss_rule = miss_rule;
+ prio_s->next_ft = next_ft;
+ prio_s->chain = chain_s;
+ prio_s->key.chain = chain;
+ prio_s->key.prio = prio;
+ prio_s->key.level = level;
+ prio_s->ft = ft;
+
+ err = rhashtable_insert_fast(&prios_ht(chains), &prio_s->node,
+ prio_params);
+ if (err)
+ goto err_insert;
+
+ list_add(&prio_s->list, pos->prev);
+
+ /* Table is ready, connect it */
+ err = mlx5_chains_update_prio_prevs(prio_s, ft);
+ if (err)
+ goto err_update;
+
+ kvfree(flow_group_in);
+ return prio_s;
+
+err_update:
+ list_del(&prio_s->list);
+ rhashtable_remove_fast(&prios_ht(chains), &prio_s->node,
+ prio_params);
+err_insert:
+ mlx5_del_flow_rules(miss_rule);
+err_miss_rule:
+ mlx5_destroy_flow_group(miss_group);
+err_group:
+ mlx5_chains_destroy_table(chains, ft);
+err_create:
+err_alloc:
+ kvfree(prio_s);
+ kvfree(flow_group_in);
+ mlx5_chains_put_chain(chain_s);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_chains_destroy_prio(struct mlx5_fs_chains *chains,
+ struct prio *prio)
+{
+ struct fs_chain *chain = prio->chain;
+
+ WARN_ON(mlx5_chains_update_prio_prevs(prio,
+ prio->next_ft));
+
+ list_del(&prio->list);
+ rhashtable_remove_fast(&prios_ht(chains), &prio->node,
+ prio_params);
+ mlx5_del_flow_rules(prio->miss_rule);
+ mlx5_destroy_flow_group(prio->miss_group);
+ mlx5_chains_destroy_table(chains, prio->ft);
+ mlx5_chains_put_chain(chain);
+ kvfree(prio);
+}
+
+struct mlx5_flow_table *
+mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ u32 level)
+{
+ struct mlx5_flow_table *prev_fts;
+ struct prio *prio_s;
+ struct prio_key key;
+ int l = 0;
+
+ if ((chain > mlx5_chains_get_chain_range(chains) &&
+ chain != mlx5_chains_get_nf_ft_chain(chains)) ||
+ prio > mlx5_chains_get_prio_range(chains) ||
+ level > mlx5_chains_get_level_range(chains))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /* create earlier levels for correct fs_core lookup when
+ * connecting tables.
+ */
+ for (l = 0; l < level; l++) {
+ prev_fts = mlx5_chains_get_table(chains, chain, prio, l);
+ if (IS_ERR(prev_fts)) {
+ prio_s = ERR_CAST(prev_fts);
+ goto err_get_prevs;
+ }
+ }
+
+ key.chain = chain;
+ key.prio = prio;
+ key.level = level;
+
+ mutex_lock(&chains_lock(chains));
+ prio_s = rhashtable_lookup_fast(&prios_ht(chains), &key,
+ prio_params);
+ if (!prio_s) {
+ prio_s = mlx5_chains_create_prio(chains, chain,
+ prio, level);
+ if (IS_ERR(prio_s))
+ goto err_create_prio;
+ }
+
+ ++prio_s->ref;
+ mutex_unlock(&chains_lock(chains));
+
+ return prio_s->ft;
+
+err_create_prio:
+ mutex_unlock(&chains_lock(chains));
+err_get_prevs:
+ while (--l >= 0)
+ mlx5_chains_put_table(chains, chain, prio, l);
+ return ERR_CAST(prio_s);
+}
+
+void
+mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ u32 level)
+{
+ struct prio *prio_s;
+ struct prio_key key;
+
+ key.chain = chain;
+ key.prio = prio;
+ key.level = level;
+
+ mutex_lock(&chains_lock(chains));
+ prio_s = rhashtable_lookup_fast(&prios_ht(chains), &key,
+ prio_params);
+ if (!prio_s)
+ goto err_get_prio;
+
+ if (--prio_s->ref == 0)
+ mlx5_chains_destroy_prio(chains, prio_s);
+ mutex_unlock(&chains_lock(chains));
+
+ while (level-- > 0)
+ mlx5_chains_put_table(chains, chain, prio, level);
+
+ return;
+
+err_get_prio:
+ mutex_unlock(&chains_lock(chains));
+ WARN_ONCE(1,
+ "Couldn't find table: (chain: %d prio: %d level: %d)",
+ chain, prio, level);
+}
+
+struct mlx5_flow_table *
+mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains)
+{
+ return tc_end_ft(chains);
+}
+
+struct mlx5_flow_table *
+mlx5_chains_create_global_table(struct mlx5_fs_chains *chains)
+{
+ u32 chain, prio, level;
+ int err;
+
+ if (!mlx5_chains_ignore_flow_level_supported(chains)) {
+ err = -EOPNOTSUPP;
+
+ mlx5_core_warn(chains->dev,
+ "Couldn't create global flow table, ignore_flow_level not supported.");
+ goto err_ignore;
+ }
+
+ chain = mlx5_chains_get_chain_range(chains),
+ prio = mlx5_chains_get_prio_range(chains);
+ level = mlx5_chains_get_level_range(chains);
+
+ return mlx5_chains_create_table(chains, chain, prio, level);
+
+err_ignore:
+ return ERR_PTR(err);
+}
+
+void
+mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
+ struct mlx5_flow_table *ft)
+{
+ mlx5_chains_destroy_table(chains, ft);
+}
+
+static struct mlx5_fs_chains *
+mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
+{
+ struct mlx5_fs_chains *chains_priv;
+ struct mapping_ctx *mapping;
+ u32 max_flow_counter;
+ int err;
+
+ chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL);
+ if (!chains_priv)
+ return ERR_PTR(-ENOMEM);
+
+ max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
+ MLX5_CAP_GEN(dev, max_flow_counter_15_0);
+
+ mlx5_core_dbg(dev,
+ "Init flow table chains, max counters(%d), groups(%d), max flow table size(%d)\n",
+ max_flow_counter, attr->max_grp_num, attr->max_ft_sz);
+
+ chains_priv->dev = dev;
+ chains_priv->flags = attr->flags;
+ chains_priv->ns = attr->ns;
+ chains_priv->group_num = attr->max_grp_num;
+ tc_default_ft(chains_priv) = tc_end_ft(chains_priv) = attr->default_ft;
+
+ mlx5_core_info(dev, "Supported tc offload range - chains: %u, prios: %u\n",
+ mlx5_chains_get_chain_range(chains_priv),
+ mlx5_chains_get_prio_range(chains_priv));
+
+ mlx5_chains_init_sz_pool(chains_priv, attr->max_ft_sz);
+
+ err = rhashtable_init(&chains_ht(chains_priv), &chain_params);
+ if (err)
+ goto init_chains_ht_err;
+
+ err = rhashtable_init(&prios_ht(chains_priv), &prio_params);
+ if (err)
+ goto init_prios_ht_err;
+
+ mapping = mapping_create(sizeof(u32), attr->max_restore_tag,
+ true);
+ if (IS_ERR(mapping)) {
+ err = PTR_ERR(mapping);
+ goto mapping_err;
+ }
+ chains_mapping(chains_priv) = mapping;
+
+ mutex_init(&chains_lock(chains_priv));
+
+ return chains_priv;
+
+mapping_err:
+ rhashtable_destroy(&prios_ht(chains_priv));
+init_prios_ht_err:
+ rhashtable_destroy(&chains_ht(chains_priv));
+init_chains_ht_err:
+ kfree(chains_priv);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_chains_cleanup(struct mlx5_fs_chains *chains)
+{
+ mutex_destroy(&chains_lock(chains));
+ mapping_destroy(chains_mapping(chains));
+ rhashtable_destroy(&prios_ht(chains));
+ rhashtable_destroy(&chains_ht(chains));
+
+ kfree(chains);
+}
+
+struct mlx5_fs_chains *
+mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
+{
+ struct mlx5_fs_chains *chains;
+
+ chains = mlx5_chains_init(dev, attr);
+
+ return chains;
+}
+
+void
+mlx5_chains_destroy(struct mlx5_fs_chains *chains)
+{
+ mlx5_chains_cleanup(chains);
+}
+
+int
+mlx5_chains_get_chain_mapping(struct mlx5_fs_chains *chains, u32 chain,
+ u32 *chain_mapping)
+{
+ return mapping_add(chains_mapping(chains), &chain, chain_mapping);
+}
+
+int
+mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains, u32 chain_mapping)
+{
+ return mapping_remove(chains_mapping(chains), chain_mapping);
+}
+
+int mlx5_get_chain_for_tag(struct mlx5_fs_chains *chains, u32 tag,
+ u32 *chain)
+{
+ int err;
+
+ err = mapping_find(chains_mapping(chains), tag, chain);
+ if (err) {
+ mlx5_core_warn(chains->dev, "Can't find chain for tag: %d\n", tag);
+ return -ENOENT;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
new file mode 100644
index 000000000000..6d5be31b05dd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies. */
+
+#ifndef __ML5_ESW_CHAINS_H__
+#define __ML5_ESW_CHAINS_H__
+
+#include <linux/mlx5/fs.h>
+
+struct mlx5_fs_chains;
+
+enum mlx5_chains_flags {
+ MLX5_CHAINS_AND_PRIOS_SUPPORTED = BIT(0),
+ MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED = BIT(1),
+ MLX5_CHAINS_FT_TUNNEL_SUPPORTED = BIT(2),
+};
+
+struct mlx5_chains_attr {
+ enum mlx5_flow_namespace_type ns;
+ u32 flags;
+ u32 max_ft_sz;
+ u32 max_grp_num;
+ struct mlx5_flow_table *default_ft;
+ u32 max_restore_tag;
+};
+
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+
+bool
+mlx5_chains_prios_supported(struct mlx5_fs_chains *chains);
+bool
+mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains);
+u32
+mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains);
+u32
+mlx5_chains_get_chain_range(struct mlx5_fs_chains *chains);
+u32
+mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains);
+
+struct mlx5_flow_table *
+mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ u32 level);
+void
+mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ u32 level);
+
+struct mlx5_flow_table *
+mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains);
+
+struct mlx5_flow_table *
+mlx5_chains_create_global_table(struct mlx5_fs_chains *chains);
+void
+mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
+ struct mlx5_flow_table *ft);
+
+int
+mlx5_chains_get_chain_mapping(struct mlx5_fs_chains *chains, u32 chain,
+ u32 *chain_mapping);
+int
+mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains,
+ u32 chain_mapping);
+
+struct mlx5_fs_chains *
+mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr);
+void mlx5_chains_destroy(struct mlx5_fs_chains *chains);
+
+int
+mlx5_get_chain_for_tag(struct mlx5_fs_chains *chains, u32 tag, u32 *chain);
+
+void
+mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
+ struct mlx5_flow_table *ft);
+
+#else /* CONFIG_MLX5_CLS_ACT */
+
+static inline struct mlx5_flow_table *
+mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ u32 level) { return ERR_PTR(-EOPNOTSUPP); }
+static inline void
+mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ u32 level) {};
+
+static inline struct mlx5_flow_table *
+mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains) { return ERR_PTR(-EOPNOTSUPP); }
+
+static inline struct mlx5_fs_chains *
+mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
+{ return NULL; }
+static inline void
+mlx5_chains_destroy(struct mlx5_fs_chains *chains) {};
+
+#endif /* CONFIG_MLX5_CLS_ACT */
+
+#endif /* __ML5_ESW_CHAINS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index ec45a03140d7..1bb21fe295b9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -20,6 +20,7 @@
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include <linux/firmware.h>
#include <asm/byteorder.h>
#include <net/devlink.h>
#include <trace/events/devlink.h>
@@ -32,6 +33,7 @@
#include "emad.h"
#include "reg.h"
#include "resources.h"
+#include "../mlxfw/mlxfw.h"
static LIST_HEAD(mlxsw_core_driver_list);
static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
@@ -82,6 +84,9 @@ struct mlxsw_core {
struct mlxsw_core_port *ports;
unsigned int max_ports;
bool fw_flash_in_progress;
+ struct {
+ struct devlink_health_reporter *fw_fatal;
+ } health;
unsigned long driver_priv[];
/* driver_priv has to be always the last item */
};
@@ -864,6 +869,297 @@ static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
return mlxsw_driver;
}
+struct mlxsw_core_fw_info {
+ struct mlxfw_dev mlxfw_dev;
+ struct mlxsw_core *mlxsw_core;
+};
+
+static int mlxsw_core_fw_component_query(struct mlxfw_dev *mlxfw_dev,
+ u16 component_index, u32 *p_max_size,
+ u8 *p_align_bits, u16 *p_max_write_size)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcqi_pl[MLXSW_REG_MCQI_LEN];
+ int err;
+
+ mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcqi), mcqi_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, p_max_write_size);
+
+ *p_align_bits = max_t(u8, *p_align_bits, 2);
+ *p_max_write_size = min_t(u16, *p_max_write_size, MLXSW_REG_MCDA_MAX_DATA_LEN);
+ return 0;
+}
+
+static int mlxsw_core_fw_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+ u8 control_state;
+ int err;
+
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
+ if (control_state != MLXFW_FSM_STATE_IDLE)
+ return -EBUSY;
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 0, *fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_core_fw_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u16 component_index, u32 component_size)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
+ component_index, fwhandle, component_size);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_core_fw_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u8 *data, u16 size, u32 offset)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcda_pl[MLXSW_REG_MCDA_LEN];
+
+ mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcda), mcda_pl);
+}
+
+static int mlxsw_core_fw_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u16 component_index)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
+ component_index, fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_core_fw_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_core_fw_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ enum mlxfw_fsm_state *fsm_state,
+ enum mlxfw_fsm_state_err *fsm_state_err)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+ u8 control_state;
+ u8 error_code;
+ int err;
+
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
+ *fsm_state = control_state;
+ *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, MLXFW_FSM_STATE_ERR_MAX);
+ return 0;
+}
+
+static void mlxsw_core_fw_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, fwhandle, 0);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static void mlxsw_core_fw_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, fwhandle, 0);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static const struct mlxfw_dev_ops mlxsw_core_fw_mlxsw_dev_ops = {
+ .component_query = mlxsw_core_fw_component_query,
+ .fsm_lock = mlxsw_core_fw_fsm_lock,
+ .fsm_component_update = mlxsw_core_fw_fsm_component_update,
+ .fsm_block_download = mlxsw_core_fw_fsm_block_download,
+ .fsm_component_verify = mlxsw_core_fw_fsm_component_verify,
+ .fsm_activate = mlxsw_core_fw_fsm_activate,
+ .fsm_query_state = mlxsw_core_fw_fsm_query_state,
+ .fsm_cancel = mlxsw_core_fw_fsm_cancel,
+ .fsm_release = mlxsw_core_fw_fsm_release,
+};
+
+static int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core, const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core_fw_info mlxsw_core_fw_info = {
+ .mlxfw_dev = {
+ .ops = &mlxsw_core_fw_mlxsw_dev_ops,
+ .psid = mlxsw_core->bus_info->psid,
+ .psid_size = strlen(mlxsw_core->bus_info->psid),
+ .devlink = priv_to_devlink(mlxsw_core),
+ },
+ .mlxsw_core = mlxsw_core
+ };
+ int err;
+
+ mlxsw_core->fw_flash_in_progress = true;
+ err = mlxfw_firmware_flash(&mlxsw_core_fw_info.mlxfw_dev, firmware, extack);
+ mlxsw_core->fw_flash_in_progress = false;
+
+ return err;
+}
+
+static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ const struct mlxsw_fw_rev *req_rev,
+ const char *filename)
+{
+ const struct mlxsw_fw_rev *rev = &mlxsw_bus_info->fw_rev;
+ union devlink_param_value value;
+ const struct firmware *firmware;
+ int err;
+
+ /* Don't check if driver does not require it */
+ if (!req_rev || !filename)
+ return 0;
+
+ /* Don't check if devlink 'fw_load_policy' param is 'flash' */
+ err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_core),
+ DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
+ &value);
+ if (err)
+ return err;
+ if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
+ return 0;
+
+ /* Validate driver & FW are compatible */
+ if (rev->major != req_rev->major) {
+ WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
+ rev->major, req_rev->major);
+ return -EINVAL;
+ }
+ if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
+ return 0;
+
+ dev_err(mlxsw_bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n",
+ rev->major, rev->minor, rev->subminor, req_rev->major,
+ req_rev->minor, req_rev->subminor);
+ dev_info(mlxsw_bus_info->dev, "Flashing firmware using file %s\n", filename);
+
+ err = request_firmware_direct(&firmware, filename, mlxsw_bus_info->dev);
+ if (err) {
+ dev_err(mlxsw_bus_info->dev, "Could not request firmware file %s\n", filename);
+ return err;
+ }
+
+ err = mlxsw_core_fw_flash(mlxsw_core, firmware, NULL);
+ release_firmware(firmware);
+ if (err)
+ dev_err(mlxsw_bus_info->dev, "Could not upgrade firmware\n");
+
+ /* On FW flash success, tell the caller FW reset is needed
+ * if current FW supports it.
+ */
+ if (rev->minor >= req_rev->can_reset_minor)
+ return err ? err : -EAGAIN;
+ else
+ return 0;
+}
+
+static int mlxsw_core_fw_flash_update(struct mlxsw_core *mlxsw_core,
+ const char *file_name, const char *component,
+ struct netlink_ext_ack *extack)
+{
+ const struct firmware *firmware;
+ int err;
+
+ if (component)
+ return -EOPNOTSUPP;
+
+ err = request_firmware_direct(&firmware, file_name, mlxsw_core->bus_info->dev);
+ if (err)
+ return err;
+ err = mlxsw_core_fw_flash(mlxsw_core, firmware, extack);
+ release_firmware(firmware);
+
+ return err;
+}
+
+static int mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER &&
+ val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) {
+ NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param mlxsw_core_fw_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlxsw_core_devlink_param_fw_load_policy_validate),
+};
+
+static int mlxsw_core_fw_params_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ union devlink_param_value value;
+ int err;
+
+ err = devlink_params_register(devlink, mlxsw_core_fw_devlink_params,
+ ARRAY_SIZE(mlxsw_core_fw_devlink_params));
+ if (err)
+ return err;
+
+ value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
+ devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, value);
+ return 0;
+}
+
+static void mlxsw_core_fw_params_unregister(struct mlxsw_core *mlxsw_core)
+{
+ devlink_params_unregister(priv_to_devlink(mlxsw_core), mlxsw_core_fw_devlink_params,
+ ARRAY_SIZE(mlxsw_core_fw_devlink_params));
+}
+
static int mlxsw_devlink_port_split(struct devlink *devlink,
unsigned int port_index,
unsigned int count,
@@ -1143,12 +1439,8 @@ static int mlxsw_devlink_flash_update(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
- if (!mlxsw_driver->flash_update)
- return -EOPNOTSUPP;
- return mlxsw_driver->flash_update(mlxsw_core, file_name,
- component, extack);
+ return mlxsw_core_fw_flash_update(mlxsw_core, file_name, component, extack);
}
static int mlxsw_devlink_trap_init(struct devlink *devlink,
@@ -1296,6 +1588,263 @@ static const struct devlink_ops mlxsw_devlink_ops = {
.trap_policer_counter_get = mlxsw_devlink_trap_policer_counter_get,
};
+static int mlxsw_core_params_register(struct mlxsw_core *mlxsw_core)
+{
+ int err;
+
+ err = mlxsw_core_fw_params_register(mlxsw_core);
+ if (err)
+ return err;
+
+ if (mlxsw_core->driver->params_register) {
+ err = mlxsw_core->driver->params_register(mlxsw_core);
+ if (err)
+ goto err_params_register;
+ }
+ return 0;
+
+err_params_register:
+ mlxsw_core_fw_params_unregister(mlxsw_core);
+ return err;
+}
+
+static void mlxsw_core_params_unregister(struct mlxsw_core *mlxsw_core)
+{
+ mlxsw_core_fw_params_unregister(mlxsw_core);
+ if (mlxsw_core->driver->params_register)
+ mlxsw_core->driver->params_unregister(mlxsw_core);
+}
+
+struct mlxsw_core_health_event {
+ struct mlxsw_core *mlxsw_core;
+ char mfde_pl[MLXSW_REG_MFDE_LEN];
+ struct work_struct work;
+};
+
+static void mlxsw_core_health_event_work(struct work_struct *work)
+{
+ struct mlxsw_core_health_event *event;
+ struct mlxsw_core *mlxsw_core;
+
+ event = container_of(work, struct mlxsw_core_health_event, work);
+ mlxsw_core = event->mlxsw_core;
+ devlink_health_report(mlxsw_core->health.fw_fatal, "FW fatal event occurred",
+ event->mfde_pl);
+ kfree(event);
+}
+
+static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg,
+ char *mfde_pl, void *priv)
+{
+ struct mlxsw_core_health_event *event;
+ struct mlxsw_core *mlxsw_core = priv;
+
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+ event->mlxsw_core = mlxsw_core;
+ memcpy(event->mfde_pl, mfde_pl, sizeof(event->mfde_pl));
+ INIT_WORK(&event->work, mlxsw_core_health_event_work);
+ mlxsw_core_schedule_work(&event->work);
+}
+
+static const struct mlxsw_listener mlxsw_core_health_listener =
+ MLXSW_EVENTL(mlxsw_core_health_listener_func, MFDE, MFDE);
+
+static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ char *mfde_pl = priv_ctx;
+ char *val_str;
+ u8 event_id;
+ u32 val;
+ int err;
+
+ if (!priv_ctx)
+ /* User-triggered dumps are not possible */
+ return -EOPNOTSUPP;
+
+ val = mlxsw_reg_mfde_irisc_id_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "irisc_id", val);
+ if (err)
+ return err;
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "event");
+ if (err)
+ return err;
+
+ event_id = mlxsw_reg_mfde_event_id_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "id", event_id);
+ if (err)
+ return err;
+ switch (event_id) {
+ case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO:
+ val_str = "CR space timeout";
+ break;
+ case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP:
+ val_str = "KVD insertion machine stopped";
+ break;
+ default:
+ val_str = NULL;
+ }
+ if (val_str) {
+ err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str);
+ if (err)
+ return err;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ val = mlxsw_reg_mfde_method_get(mfde_pl);
+ switch (val) {
+ case MLXSW_REG_MFDE_METHOD_QUERY:
+ val_str = "query";
+ break;
+ case MLXSW_REG_MFDE_METHOD_WRITE:
+ val_str = "write";
+ break;
+ default:
+ val_str = NULL;
+ }
+ if (val_str) {
+ err = devlink_fmsg_string_pair_put(fmsg, "method", val_str);
+ if (err)
+ return err;
+ }
+
+ val = mlxsw_reg_mfde_long_process_get(mfde_pl);
+ err = devlink_fmsg_bool_pair_put(fmsg, "long_process", val);
+ if (err)
+ return err;
+
+ val = mlxsw_reg_mfde_command_type_get(mfde_pl);
+ switch (val) {
+ case MLXSW_REG_MFDE_COMMAND_TYPE_MAD:
+ val_str = "mad";
+ break;
+ case MLXSW_REG_MFDE_COMMAND_TYPE_EMAD:
+ val_str = "emad";
+ break;
+ case MLXSW_REG_MFDE_COMMAND_TYPE_CMDIF:
+ val_str = "cmdif";
+ break;
+ default:
+ val_str = NULL;
+ }
+ if (val_str) {
+ err = devlink_fmsg_string_pair_put(fmsg, "command_type", val_str);
+ if (err)
+ return err;
+ }
+
+ val = mlxsw_reg_mfde_reg_attr_id_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "reg_attr_id", val);
+ if (err)
+ return err;
+
+ if (event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO) {
+ val = mlxsw_reg_mfde_log_address_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "log_address", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_log_id_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val);
+ if (err)
+ return err;
+ } else if (event_id == MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP) {
+ val = mlxsw_reg_mfde_pipes_mask_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_core_health_fw_fatal_test(struct devlink_health_reporter *reporter,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_health_reporter_priv(reporter);
+ char mfgd_pl[MLXSW_REG_MFGD_LEN];
+ int err;
+
+ /* Read the register first to make sure no other bits are changed. */
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mfgd_trigger_test_set(mfgd_pl, true);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
+}
+
+static const struct devlink_health_reporter_ops
+mlxsw_core_health_fw_fatal_ops = {
+ .name = "fw_fatal",
+ .dump = mlxsw_core_health_fw_fatal_dump,
+ .test = mlxsw_core_health_fw_fatal_test,
+};
+
+static int mlxsw_core_health_fw_fatal_config(struct mlxsw_core *mlxsw_core,
+ bool enable)
+{
+ char mfgd_pl[MLXSW_REG_MFGD_LEN];
+ int err;
+
+ /* Read the register first to make sure no other bits are changed. */
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mfgd_fatal_event_mode_set(mfgd_pl, enable);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
+}
+
+static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_health_reporter *fw_fatal;
+ int err;
+
+ if (!mlxsw_core->driver->fw_fatal_enabled)
+ return 0;
+
+ fw_fatal = devlink_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops,
+ 0, mlxsw_core);
+ if (IS_ERR(fw_fatal)) {
+ dev_err(mlxsw_core->bus_info->dev, "Failed to create fw fatal reporter");
+ return PTR_ERR(fw_fatal);
+ }
+ mlxsw_core->health.fw_fatal = fw_fatal;
+
+ err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
+ if (err)
+ goto err_trap_register;
+
+ err = mlxsw_core_health_fw_fatal_config(mlxsw_core, true);
+ if (err)
+ goto err_fw_fatal_config;
+
+ return 0;
+
+err_fw_fatal_config:
+ mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
+err_trap_register:
+ devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
+ return err;
+}
+
+static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core)
+{
+ if (!mlxsw_core->driver->fw_fatal_enabled)
+ return;
+
+ mlxsw_core_health_fw_fatal_config(mlxsw_core, false);
+ mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
+ /* Make sure there is no more event work scheduled */
+ mlxsw_core_flush_owq();
+ devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
+}
+
static int
__mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
@@ -1368,12 +1917,21 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_devlink_register;
}
- if (mlxsw_driver->params_register && !reload) {
- err = mlxsw_driver->params_register(mlxsw_core);
+ if (!reload) {
+ err = mlxsw_core_params_register(mlxsw_core);
if (err)
goto err_register_params;
}
+ err = mlxsw_core_fw_rev_validate(mlxsw_core, mlxsw_bus_info, mlxsw_driver->fw_req_rev,
+ mlxsw_driver->fw_filename);
+ if (err)
+ goto err_fw_rev_validate;
+
+ err = mlxsw_core_health_init(mlxsw_core);
+ if (err)
+ goto err_health_init;
+
if (mlxsw_driver->init) {
err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack);
if (err)
@@ -1389,8 +1947,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_thermal_init;
- if (mlxsw_driver->params_register)
- devlink_params_publish(devlink);
+ devlink_params_publish(devlink);
if (!reload)
devlink_reload_enable(devlink);
@@ -1403,8 +1960,11 @@ err_hwmon_init:
if (mlxsw_core->driver->fini)
mlxsw_core->driver->fini(mlxsw_core);
err_driver_init:
- if (mlxsw_driver->params_unregister && !reload)
- mlxsw_driver->params_unregister(mlxsw_core);
+ mlxsw_core_health_fini(mlxsw_core);
+err_health_init:
+err_fw_rev_validate:
+ if (!reload)
+ mlxsw_core_params_unregister(mlxsw_core);
err_register_params:
if (!reload)
devlink_unregister(devlink);
@@ -1469,14 +2029,14 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
return;
}
- if (mlxsw_core->driver->params_unregister)
- devlink_params_unpublish(devlink);
+ devlink_params_unpublish(devlink);
mlxsw_thermal_fini(mlxsw_core->thermal);
mlxsw_hwmon_fini(mlxsw_core->hwmon);
if (mlxsw_core->driver->fini)
mlxsw_core->driver->fini(mlxsw_core);
- if (mlxsw_core->driver->params_unregister && !reload)
- mlxsw_core->driver->params_unregister(mlxsw_core);
+ mlxsw_core_health_fini(mlxsw_core);
+ if (!reload)
+ mlxsw_core_params_unregister(mlxsw_core);
if (!reload)
devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
@@ -1489,8 +2049,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
return;
reload_fail_deinit:
- if (mlxsw_core->driver->params_unregister)
- mlxsw_core->driver->params_unregister(mlxsw_core);
+ mlxsw_core_params_unregister(mlxsw_core);
devlink_unregister(devlink);
devlink_resources_unregister(devlink, NULL);
devlink_free(devlink);
@@ -2410,18 +2969,6 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
-void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core)
-{
- mlxsw_core->fw_flash_in_progress = true;
-}
-EXPORT_SYMBOL(mlxsw_core_fw_flash_start);
-
-void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core)
-{
- mlxsw_core->fw_flash_in_progress = false;
-}
-EXPORT_SYMBOL(mlxsw_core_fw_flash_end);
-
int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox,
struct mlxsw_res *res)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 11af3308f8cc..2ca085a44774 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -280,6 +280,8 @@ struct mlxsw_driver {
struct list_head list;
const char *kind;
size_t priv_size;
+ const struct mlxsw_fw_rev *fw_req_rev;
+ const char *fw_filename;
int (*init)(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info,
struct netlink_ext_ack *extack);
@@ -324,9 +326,6 @@ struct mlxsw_driver {
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max);
- int (*flash_update)(struct mlxsw_core *mlxsw_core,
- const char *file_name, const char *component,
- struct netlink_ext_ack *extack);
int (*trap_init)(struct mlxsw_core *mlxsw_core,
const struct devlink_trap *trap, void *trap_ctx);
void (*trap_fini)(struct mlxsw_core *mlxsw_core,
@@ -371,6 +370,7 @@ struct mlxsw_driver {
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
bool res_query_enabled;
+ bool fw_fatal_enabled;
};
int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
@@ -378,9 +378,6 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
u64 *p_single_size, u64 *p_double_size,
u64 *p_linear_size);
-void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core);
-void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core);
-
u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core);
u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 1c64b03ff48e..641cdd81882b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -620,9 +620,9 @@ static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
return elem;
}
-static void mlxsw_pci_cq_tasklet(unsigned long data)
+static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
{
- struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+ struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
struct mlxsw_pci *mlxsw_pci = q->pci;
char *cqe;
int items = 0;
@@ -733,9 +733,9 @@ static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
return elem;
}
-static void mlxsw_pci_eq_tasklet(unsigned long data)
+static void mlxsw_pci_eq_tasklet(struct tasklet_struct *t)
{
- struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+ struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
struct mlxsw_pci *mlxsw_pci = q->pci;
u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
@@ -792,7 +792,7 @@ struct mlxsw_pci_queue_ops {
struct mlxsw_pci_queue *q);
void (*fini)(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q);
- void (*tasklet)(unsigned long data);
+ void (*tasklet)(struct tasklet_struct *t);
u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
u16 elem_count;
@@ -855,7 +855,7 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
q->pci = mlxsw_pci;
if (q_ops->tasklet)
- tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q);
+ tasklet_setup(&q->tasklet, q_ops->tasklet);
mem_item->size = MLXSW_PCI_AQ_SIZE;
mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 485e3e02eb70..6e3d55006089 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -5579,6 +5579,7 @@ MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+ MLXSW_REG_HTGT_TRAP_GROUP_MFDE,
MLXSW_REG_HTGT_TRAP_GROUP_SP_STP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP,
@@ -9821,6 +9822,26 @@ static inline void mlxsw_reg_mtptptp_pack(char *payload,
mlxsw_reg_mtptpt_message_type_set(payload, message_type);
}
+/* MFGD - Monitoring FW General Debug Register
+ * -------------------------------------------
+ */
+#define MLXSW_REG_MFGD_ID 0x90F0
+#define MLXSW_REG_MFGD_LEN 0x0C
+
+MLXSW_REG_DEFINE(mfgd, MLXSW_REG_MFGD_ID, MLXSW_REG_MFGD_LEN);
+
+/* reg_mfgd_fw_fatal_event_mode
+ * 0 - don't check FW fatal (default)
+ * 1 - check FW fatal - enable MFDE trap
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mfgd, fatal_event_mode, 0x00, 9, 2);
+
+/* reg_mfgd_trigger_test
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mfgd, trigger_test, 0x00, 11, 1);
+
/* MGPIR - Management General Peripheral Information Register
* ----------------------------------------------------------
* MGPIR register allows software to query the hardware and
@@ -9880,6 +9901,84 @@ mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
*num_of_modules = mlxsw_reg_mgpir_num_of_modules_get(payload);
}
+/* MFDE - Monitoring FW Debug Register
+ * -----------------------------------
+ */
+#define MLXSW_REG_MFDE_ID 0x9200
+#define MLXSW_REG_MFDE_LEN 0x18
+
+MLXSW_REG_DEFINE(mfde, MLXSW_REG_MFDE_ID, MLXSW_REG_MFDE_LEN);
+
+/* reg_mfde_irisc_id
+ * Which irisc triggered the event
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, irisc_id, 0x00, 8, 4);
+
+enum mlxsw_reg_mfde_event_id {
+ MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO = 1,
+ /* KVD insertion machine stopped */
+ MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP,
+};
+
+/* reg_mfde_event_id
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, event_id, 0x00, 0, 8);
+
+enum mlxsw_reg_mfde_method {
+ MLXSW_REG_MFDE_METHOD_QUERY,
+ MLXSW_REG_MFDE_METHOD_WRITE,
+};
+
+/* reg_mfde_method
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, method, 0x04, 29, 1);
+
+/* reg_mfde_long_process
+ * Indicates if the command is in long_process mode.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, long_process, 0x04, 28, 1);
+
+enum mlxsw_reg_mfde_command_type {
+ MLXSW_REG_MFDE_COMMAND_TYPE_MAD,
+ MLXSW_REG_MFDE_COMMAND_TYPE_EMAD,
+ MLXSW_REG_MFDE_COMMAND_TYPE_CMDIF,
+};
+
+/* reg_mfde_command_type
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, command_type, 0x04, 24, 2);
+
+/* reg_mfde_reg_attr_id
+ * EMAD - register id, MAD - attibute id
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, reg_attr_id, 0x04, 0, 16);
+
+/* reg_mfde_log_address
+ * crspace address accessed, which resulted in timeout.
+ * Valid in case event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, log_address, 0x10, 0, 32);
+
+/* reg_mfde_log_id
+ * Which irisc triggered the timeout.
+ * Valid in case event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, log_id, 0x14, 0, 4);
+
+/* reg_mfde_pipes_mask
+ * Bit per kvh pipe.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, pipes_mask, 0x10, 0, 16);
+
/* TNGCR - Tunneling NVE General Configuration Register
* ----------------------------------------------------
* The TNGCR register is used for setting up the NVE Tunneling configuration.
@@ -10993,7 +11092,9 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mtpppc),
MLXSW_REG(mtpptr),
MLXSW_REG(mtptpt),
+ MLXSW_REG(mfgd),
MLXSW_REG(mgpir),
+ MLXSW_REG(mfde),
MLXSW_REG(tngcr),
MLXSW_REG(tnumt),
MLXSW_REG(tnqcr),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 4186e29119c2..9d5a6f8d7438 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -42,11 +42,10 @@
#include "spectrum_span.h"
#include "spectrum_ptp.h"
#include "spectrum_trap.h"
-#include "../mlxfw/mlxfw.h"
#define MLXSW_SP1_FWREV_MAJOR 13
-#define MLXSW_SP1_FWREV_MINOR 2007
-#define MLXSW_SP1_FWREV_SUBMINOR 1168
+#define MLXSW_SP1_FWREV_MINOR 2008
+#define MLXSW_SP1_FWREV_SUBMINOR 1310
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -62,8 +61,8 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
"." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
#define MLXSW_SP2_FWREV_MAJOR 29
-#define MLXSW_SP2_FWREV_MINOR 2007
-#define MLXSW_SP2_FWREV_SUBMINOR 1168
+#define MLXSW_SP2_FWREV_MINOR 2008
+#define MLXSW_SP2_FWREV_SUBMINOR 1310
static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
.major = MLXSW_SP2_FWREV_MAJOR,
@@ -77,8 +76,8 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
"." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
#define MLXSW_SP3_FWREV_MAJOR 30
-#define MLXSW_SP3_FWREV_MINOR 2007
-#define MLXSW_SP3_FWREV_SUBMINOR 1168
+#define MLXSW_SP3_FWREV_MINOR 2008
+#define MLXSW_SP3_FWREV_SUBMINOR 1310
static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
.major = MLXSW_SP3_FWREV_MAJOR,
@@ -170,274 +169,6 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
*/
MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
-struct mlxsw_sp_mlxfw_dev {
- struct mlxfw_dev mlxfw_dev;
- struct mlxsw_sp *mlxsw_sp;
-};
-
-static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
- u16 component_index, u32 *p_max_size,
- u8 *p_align_bits, u16 *p_max_write_size)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcqi_pl[MLXSW_REG_MCQI_LEN];
- int err;
-
- mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
- if (err)
- return err;
- mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
- p_max_write_size);
-
- *p_align_bits = max_t(u8, *p_align_bits, 2);
- *p_max_write_size = min_t(u16, *p_max_write_size,
- MLXSW_REG_MCDA_MAX_DATA_LEN);
- return 0;
-}
-
-static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcc_pl[MLXSW_REG_MCC_LEN];
- u8 control_state;
- int err;
-
- mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
- if (err)
- return err;
-
- mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
- if (control_state != MLXFW_FSM_STATE_IDLE)
- return -EBUSY;
-
- mlxsw_reg_mcc_pack(mcc_pl,
- MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
- 0, *fwhandle, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
-}
-
-static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
- u32 fwhandle, u16 component_index,
- u32 component_size)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcc_pl[MLXSW_REG_MCC_LEN];
-
- mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
- component_index, fwhandle, component_size);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
-}
-
-static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
- u32 fwhandle, u8 *data, u16 size,
- u32 offset)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcda_pl[MLXSW_REG_MCDA_LEN];
-
- mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
-}
-
-static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
- u32 fwhandle, u16 component_index)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcc_pl[MLXSW_REG_MCC_LEN];
-
- mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
- component_index, fwhandle, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
-}
-
-static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcc_pl[MLXSW_REG_MCC_LEN];
-
- mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
- fwhandle, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
-}
-
-static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
- enum mlxfw_fsm_state *fsm_state,
- enum mlxfw_fsm_state_err *fsm_state_err)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcc_pl[MLXSW_REG_MCC_LEN];
- u8 control_state;
- u8 error_code;
- int err;
-
- mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
- if (err)
- return err;
-
- mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
- *fsm_state = control_state;
- *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
- MLXFW_FSM_STATE_ERR_MAX);
- return 0;
-}
-
-static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcc_pl[MLXSW_REG_MCC_LEN];
-
- mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
- fwhandle, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
-}
-
-static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcc_pl[MLXSW_REG_MCC_LEN];
-
- mlxsw_reg_mcc_pack(mcc_pl,
- MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
- fwhandle, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
-}
-
-static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
- .component_query = mlxsw_sp_component_query,
- .fsm_lock = mlxsw_sp_fsm_lock,
- .fsm_component_update = mlxsw_sp_fsm_component_update,
- .fsm_block_download = mlxsw_sp_fsm_block_download,
- .fsm_component_verify = mlxsw_sp_fsm_component_verify,
- .fsm_activate = mlxsw_sp_fsm_activate,
- .fsm_query_state = mlxsw_sp_fsm_query_state,
- .fsm_cancel = mlxsw_sp_fsm_cancel,
- .fsm_release = mlxsw_sp_fsm_release,
-};
-
-static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
- const struct firmware *firmware,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
- .mlxfw_dev = {
- .ops = &mlxsw_sp_mlxfw_dev_ops,
- .psid = mlxsw_sp->bus_info->psid,
- .psid_size = strlen(mlxsw_sp->bus_info->psid),
- .devlink = priv_to_devlink(mlxsw_sp->core),
- },
- .mlxsw_sp = mlxsw_sp
- };
- int err;
-
- mlxsw_core_fw_flash_start(mlxsw_sp->core);
- err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev,
- firmware, extack);
- mlxsw_core_fw_flash_end(mlxsw_sp->core);
-
- return err;
-}
-
-static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
-{
- const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
- const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev;
- const char *fw_filename = mlxsw_sp->fw_filename;
- union devlink_param_value value;
- const struct firmware *firmware;
- int err;
-
- /* Don't check if driver does not require it */
- if (!req_rev || !fw_filename)
- return 0;
-
- /* Don't check if devlink 'fw_load_policy' param is 'flash' */
- err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core),
- DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
- &value);
- if (err)
- return err;
- if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
- return 0;
-
- /* Validate driver & FW are compatible */
- if (rev->major != req_rev->major) {
- WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
- rev->major, req_rev->major);
- return -EINVAL;
- }
- if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
- return 0;
-
- dev_err(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n",
- rev->major, rev->minor, rev->subminor, req_rev->major,
- req_rev->minor, req_rev->subminor);
- dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
- fw_filename);
-
- err = request_firmware_direct(&firmware, fw_filename,
- mlxsw_sp->bus_info->dev);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
- fw_filename);
- return err;
- }
-
- err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL);
- release_firmware(firmware);
- if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
-
- /* On FW flash success, tell the caller FW reset is needed
- * if current FW supports it.
- */
- if (rev->minor >= req_rev->can_reset_minor)
- return err ? err : -EAGAIN;
- else
- return 0;
-}
-
-static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core,
- const char *file_name, const char *component,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- const struct firmware *firmware;
- int err;
-
- if (component)
- return -EOPNOTSUPP;
-
- err = request_firmware_direct(&firmware, file_name,
- mlxsw_sp->bus_info->dev);
- if (err)
- return err;
- err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack);
- release_firmware(firmware);
-
- return err;
-}
-
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index, u64 *packets,
u64 *bytes)
@@ -590,21 +321,28 @@ static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
}
-static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
+static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pmtu_pl[MLXSW_REG_PMTU_LEN];
- int max_mtu;
int err;
- mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
if (err)
return err;
- max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
- if (mtu > max_mtu)
+ *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
+ return 0;
+}
+
+static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char pmtu_pl[MLXSW_REG_PMTU_LEN];
+
+ mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
+ if (mtu > mlxsw_sp_port->max_mtu)
return -EINVAL;
mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
@@ -872,133 +610,25 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
- int mtu)
-{
- return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
-}
-
-#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
-
-static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
- u16 delay)
-{
- delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
- BITS_PER_BYTE));
- return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
- mtu);
-}
-
-/* Maximum delay buffer needed in case of PAUSE frames, in bytes.
- * Assumes 100m cable and maximum MTU.
- */
-#define MLXSW_SP_PAUSE_DELAY 58752
-
-static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
- u16 delay, bool pfc, bool pause)
+static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
{
- if (pfc)
- return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
- else if (pause)
- return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
- else
- return 0;
-}
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp_hdroom orig_hdroom;
+ struct mlxsw_sp_hdroom hdroom;
+ int err;
-static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
- bool lossy)
-{
- if (lossy)
- mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
- else
- mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
- thres);
-}
+ orig_hdroom = *mlxsw_sp_port->hdroom;
-int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
- u8 *prio_tc, bool pause_en,
- struct ieee_pfc *my_pfc)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
- u16 delay = !!my_pfc ? my_pfc->delay : 0;
- char pbmc_pl[MLXSW_REG_PBMC_LEN];
- u32 taken_headroom_cells = 0;
- u32 max_headroom_cells;
- int i, j, err;
+ hdroom = orig_hdroom;
+ hdroom.mtu = mtu;
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
- max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp);
-
- mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
- if (err)
+ err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
+ if (err) {
+ netdev_err(dev, "Failed to configure port's headroom\n");
return err;
-
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- bool configure = false;
- bool pfc = false;
- u16 thres_cells;
- u16 delay_cells;
- u16 total_cells;
- bool lossy;
-
- for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
- if (prio_tc[j] == i) {
- pfc = pfc_en & BIT(j);
- configure = true;
- break;
- }
- }
-
- if (!configure)
- continue;
-
- lossy = !(pfc || pause_en);
- thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
- thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells);
- delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
- pfc, pause_en);
- delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells);
- total_cells = thres_cells + delay_cells;
-
- taken_headroom_cells += total_cells;
- if (taken_headroom_cells > max_headroom_cells)
- return -ENOBUFS;
-
- mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells,
- thres_cells, lossy);
}
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
-}
-
-int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
- int mtu, bool pause_en)
-{
- u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
- bool dcb_en = !!mlxsw_sp_port->dcb.ets;
- struct ieee_pfc *my_pfc;
- u8 *prio_tc;
-
- prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
- my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
-
- return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
- pause_en, my_pfc);
-}
-
-static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
- int err;
-
- err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
- if (err)
- return err;
- err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
- if (err)
- goto err_span_port_mtu_update;
err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
if (err)
goto err_port_mtu_set;
@@ -1006,9 +636,7 @@ static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
return 0;
err_port_mtu_set:
- mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
-err_span_port_mtu_update:
- mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+ mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
return err;
}
@@ -1842,6 +1470,21 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_speed_by_width_set;
}
+ err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
+ &mlxsw_sp_port->max_speed);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
+ mlxsw_sp_port->local_port);
+ goto err_max_speed_get;
+ }
+
+ err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_max_mtu_get;
+ }
+
err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
@@ -1930,8 +1573,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
mlxsw_sp->ptp_ops->shaper_work);
- INIT_DELAYED_WORK(&mlxsw_sp_port->span.speed_update_dw,
- mlxsw_sp_span_speed_update_work);
mlxsw_sp->ports[local_port] = mlxsw_sp_port;
err = register_netdev(dev);
@@ -1963,9 +1604,12 @@ err_port_dcb_init:
mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
err_port_tc_mc_mode:
err_port_ets_init:
+ mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
err_port_buffers_init:
err_port_admin_status_set:
err_port_mtu_set:
+err_port_max_mtu_get:
+err_max_speed_get:
err_port_speed_by_width_set:
err_port_system_port_mapping_set:
err_dev_addr_init:
@@ -1986,7 +1630,6 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
- cancel_delayed_work_sync(&mlxsw_sp_port->span.speed_update_dw);
cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
@@ -1998,6 +1641,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
+ mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp_port);
free_percpu(mlxsw_sp_port->pcpu_stats);
@@ -2390,7 +2034,6 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
netdev_info(mlxsw_sp_port->dev, "link up\n");
netif_carrier_on(mlxsw_sp_port->dev);
mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
- mlxsw_core_schedule_dw(&mlxsw_sp_port->span.speed_update_dw, 0);
} else {
netdev_info(mlxsw_sp_port->dev, "link down\n");
netif_carrier_off(mlxsw_sp_port->dev);
@@ -2783,11 +2426,20 @@ static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
{
char htgt_pl[MLXSW_REG_HTGT_LEN];
+ int err;
mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
MLXSW_REG_HTGT_INVALID_POLICER,
MLXSW_REG_HTGT_DEFAULT_PRIORITY,
MLXSW_REG_HTGT_DEFAULT_TC);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MFDE,
+ MLXSW_REG_HTGT_INVALID_POLICER,
+ MLXSW_REG_HTGT_DEFAULT_PRIORITY,
+ MLXSW_REG_HTGT_DEFAULT_TC);
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
}
@@ -2836,10 +2488,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->core = mlxsw_core;
mlxsw_sp->bus_info = mlxsw_bus_info;
- err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
- if (err)
- return err;
-
mlxsw_core_emad_string_tlv_enable(mlxsw_core);
err = mlxsw_sp_base_mac_get(mlxsw_sp);
@@ -3039,8 +2687,6 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev;
- mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME;
mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
@@ -3051,6 +2697,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
+ mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
@@ -3069,8 +2716,6 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- mlxsw_sp->req_rev = &mlxsw_sp2_fw_rev;
- mlxsw_sp->fw_filename = MLXSW_SP2_FW_FILENAME;
mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
@@ -3081,6 +2726,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
+ mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
@@ -3097,8 +2743,6 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- mlxsw_sp->req_rev = &mlxsw_sp3_fw_rev;
- mlxsw_sp->fw_filename = MLXSW_SP3_FW_FILENAME;
mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
@@ -3109,6 +2753,7 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
+ mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
@@ -3451,52 +3096,6 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
}
static int
-mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
- union devlink_param_value val,
- struct netlink_ext_ack *extack)
-{
- if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) &&
- (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) {
- NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static const struct devlink_param mlxsw_sp_devlink_params[] = {
- DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY,
- BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
- NULL, NULL,
- mlxsw_sp_devlink_param_fw_load_policy_validate),
-};
-
-static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core)
-{
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
- union devlink_param_value value;
- int err;
-
- err = devlink_params_register(devlink, mlxsw_sp_devlink_params,
- ARRAY_SIZE(mlxsw_sp_devlink_params));
- if (err)
- return err;
-
- value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
- value);
- return 0;
-}
-
-static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core)
-{
- devlink_params_unregister(priv_to_devlink(mlxsw_core),
- mlxsw_sp_devlink_params,
- ARRAY_SIZE(mlxsw_sp_devlink_params));
-}
-
-static int
mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
@@ -3533,24 +3132,16 @@ static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
union devlink_param_value value;
int err;
- err = mlxsw_sp_params_register(mlxsw_core);
- if (err)
- return err;
-
err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
ARRAY_SIZE(mlxsw_sp2_devlink_params));
if (err)
- goto err_devlink_params_register;
+ return err;
value.vu32 = 0;
devlink_param_driverinit_value_set(devlink,
MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
value);
return 0;
-
-err_devlink_params_register:
- mlxsw_sp_params_unregister(mlxsw_core);
- return err;
}
static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
@@ -3558,7 +3149,6 @@ static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
devlink_params_unregister(priv_to_devlink(mlxsw_core),
mlxsw_sp2_devlink_params,
ARRAY_SIZE(mlxsw_sp2_devlink_params));
- mlxsw_sp_params_unregister(mlxsw_core);
}
static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
@@ -3573,6 +3163,8 @@ static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
static struct mlxsw_driver mlxsw_sp1_driver = {
.kind = mlxsw_sp1_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
+ .fw_req_rev = &mlxsw_sp1_fw_rev,
+ .fw_filename = MLXSW_SP1_FW_FILENAME,
.init = mlxsw_sp1_init,
.fini = mlxsw_sp_fini,
.basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
@@ -3588,7 +3180,6 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
- .flash_update = mlxsw_sp_flash_update,
.trap_init = mlxsw_sp_trap_init,
.trap_fini = mlxsw_sp_trap_fini,
.trap_action_set = mlxsw_sp_trap_action_set,
@@ -3601,17 +3192,18 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp1_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
- .params_register = mlxsw_sp_params_register,
- .params_unregister = mlxsw_sp_params_unregister,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp1_config_profile,
.res_query_enabled = true,
+ .fw_fatal_enabled = true,
};
static struct mlxsw_driver mlxsw_sp2_driver = {
.kind = mlxsw_sp2_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
+ .fw_req_rev = &mlxsw_sp2_fw_rev,
+ .fw_filename = MLXSW_SP2_FW_FILENAME,
.init = mlxsw_sp2_init,
.fini = mlxsw_sp_fini,
.basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
@@ -3627,7 +3219,6 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
- .flash_update = mlxsw_sp_flash_update,
.trap_init = mlxsw_sp_trap_init,
.trap_fini = mlxsw_sp_trap_fini,
.trap_action_set = mlxsw_sp_trap_action_set,
@@ -3645,11 +3236,14 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
.res_query_enabled = true,
+ .fw_fatal_enabled = true,
};
static struct mlxsw_driver mlxsw_sp3_driver = {
.kind = mlxsw_sp3_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
+ .fw_req_rev = &mlxsw_sp3_fw_rev,
+ .fw_filename = MLXSW_SP3_FW_FILENAME,
.init = mlxsw_sp3_init,
.fini = mlxsw_sp_fini,
.basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
@@ -3665,7 +3259,6 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
- .flash_update = mlxsw_sp_flash_update,
.trap_init = mlxsw_sp_trap_init,
.trap_fini = mlxsw_sp_trap_fini,
.trap_action_set = mlxsw_sp_trap_action_set,
@@ -3683,6 +3276,7 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
.res_query_enabled = true,
+ .fw_fatal_enabled = true,
};
bool mlxsw_sp_port_dev_check(const struct net_device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 5240bf11b6c4..f0a4347acd25 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -125,6 +125,7 @@ struct mlxsw_sp_mr_tcam_ops;
struct mlxsw_sp_acl_rulei_ops;
struct mlxsw_sp_acl_tcam_ops;
struct mlxsw_sp_nve_ops;
+struct mlxsw_sp_sb_ops;
struct mlxsw_sp_sb_vals;
struct mlxsw_sp_port_type_speed_ops;
struct mlxsw_sp_ptp_state;
@@ -162,8 +163,6 @@ struct mlxsw_sp {
struct mlxsw_sp_counter_pool *counter_pool;
struct mlxsw_sp_span *span;
struct mlxsw_sp_trap *trap;
- const struct mlxsw_fw_rev *req_rev;
- const char *fw_filename;
const struct mlxsw_sp_kvdl_ops *kvdl_ops;
const struct mlxsw_afa_ops *afa_ops;
const struct mlxsw_afk_ops *afk_ops;
@@ -173,6 +172,7 @@ struct mlxsw_sp {
const struct mlxsw_sp_nve_ops **nve_ops_arr;
const struct mlxsw_sp_rif_ops **rif_ops_arr;
const struct mlxsw_sp_sb_vals *sb_vals;
+ const struct mlxsw_sp_sb_ops *sb_ops;
const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
const struct mlxsw_sp_ptp_ops *ptp_ops;
const struct mlxsw_sp_span_ops *span_ops;
@@ -316,9 +316,9 @@ struct mlxsw_sp_port {
struct mlxsw_sp_ptp_port_stats stats;
} ptp;
u8 split_base_local_port;
- struct {
- struct delayed_work speed_update_dw;
- } span;
+ int max_mtu;
+ u32 max_speed;
+ struct mlxsw_sp_hdroom *hdroom;
};
struct mlxsw_sp_port_type_speed_ops {
@@ -331,6 +331,7 @@ struct mlxsw_sp_port_type_speed_ops {
void (*from_ptys_speed_duplex)(struct mlxsw_sp *mlxsw_sp,
bool carrier_ok, u32 ptys_eth_proto,
struct ethtool_link_ksettings *cmd);
+ int (*ptys_max_speed)(struct mlxsw_sp_port *mlxsw_sp_port, u32 *p_max_speed);
u32 (*to_ptys_advert_link)(struct mlxsw_sp *mlxsw_sp, u8 width,
const struct ethtool_link_ksettings *cmd);
u32 (*to_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u8 width, u32 speed);
@@ -414,34 +415,73 @@ mlxsw_sp_port_vlan_find_by_vid(const struct mlxsw_sp_port *mlxsw_sp_port,
return NULL;
}
-static inline u32
-mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port,
- u32 size_cells)
-{
- /* Ports with eight lanes use two headroom buffers between which the
- * configured headroom size is split. Therefore, multiply the calculated
- * headroom size by two.
- */
- return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells;
-}
-
enum mlxsw_sp_flood_type {
MLXSW_SP_FLOOD_TYPE_UC,
MLXSW_SP_FLOOD_TYPE_BC,
MLXSW_SP_FLOOD_TYPE_MC,
};
-int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
- int mtu, bool pause_en);
int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
int prio, char *ppcnt_pl);
int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool is_up);
/* spectrum_buffers.c */
+struct mlxsw_sp_hdroom_prio {
+ /* Number of port buffer associated with this priority. This is the
+ * actually configured value.
+ */
+ u8 buf_idx;
+ /* Value of buf_idx deduced from the DCB ETS configuration. */
+ u8 ets_buf_idx;
+ /* Value of buf_idx taken from the dcbnl_setbuffer configuration. */
+ u8 set_buf_idx;
+ bool lossy;
+};
+
+struct mlxsw_sp_hdroom_buf {
+ u32 thres_cells;
+ u32 size_cells;
+ /* Size requirement form dcbnl_setbuffer. */
+ u32 set_size_cells;
+ bool lossy;
+};
+
+enum mlxsw_sp_hdroom_mode {
+ MLXSW_SP_HDROOM_MODE_DCB,
+ MLXSW_SP_HDROOM_MODE_TC,
+};
+
+#define MLXSW_SP_PB_COUNT 10
+
+struct mlxsw_sp_hdroom {
+ enum mlxsw_sp_hdroom_mode mode;
+
+ struct {
+ struct mlxsw_sp_hdroom_prio prio[IEEE_8021Q_MAX_PRIORITIES];
+ } prios;
+ struct {
+ struct mlxsw_sp_hdroom_buf buf[MLXSW_SP_PB_COUNT];
+ } bufs;
+ struct {
+ /* Size actually configured for the internal buffer. Equal to
+ * reserve when internal buffer is enabled.
+ */
+ u32 size_cells;
+ /* Space reserved in the headroom for the internal buffer. Port
+ * buffers are not allowed to grow into this space.
+ */
+ u32 reserve_cells;
+ bool enable;
+ } int_buf;
+ int delay_bytes;
+ int mtu;
+};
+
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_buffers_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info);
@@ -477,11 +517,20 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
u32 *p_cur, u32 *p_max);
u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells);
u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes);
-u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_hdroom_prios_reset_buf_idx(struct mlxsw_sp_hdroom *hdroom);
+void mlxsw_sp_hdroom_bufs_reset_lossiness(struct mlxsw_sp_hdroom *hdroom);
+void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_hdroom *hdroom);
+int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom);
extern const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals;
extern const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals;
+extern const struct mlxsw_sp_sb_ops mlxsw_sp1_sb_ops;
+extern const struct mlxsw_sp_sb_ops mlxsw_sp2_sb_ops;
+extern const struct mlxsw_sp_sb_ops mlxsw_sp3_sb_ops;
+
/* spectrum_switchdev.c */
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
@@ -519,9 +568,6 @@ int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool dwrr, u8 dwrr_weight);
int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
u8 switch_prio, u8 tclass);
-int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
- u8 *prio_tc, bool pause_en,
- struct ieee_pfc *my_pfc);
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
u8 next_index, u32 maxrate, u8 burst_size);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 6f84557a5a6f..37ff29a1686e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -121,6 +121,10 @@ struct mlxsw_sp_sb_vals {
unsigned int cms_cpu_count;
};
+struct mlxsw_sp_sb_ops {
+ u32 (*int_buf_size_get)(int mtu, u32 speed);
+};
+
u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
{
return mlxsw_sp->sb->cell_size * cells;
@@ -131,9 +135,14 @@ u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
}
-u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp)
+static u32 mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 size_cells)
{
- return mlxsw_sp->sb->max_headroom_cells;
+ /* Ports with eight lanes use two headroom buffers between which the
+ * configured headroom size is split. Therefore, multiply the calculated
+ * headroom size by two.
+ */
+ return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells;
}
static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
@@ -291,55 +300,308 @@ static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
(unsigned long) pm);
}
-/* 1/4 of a headroom necessary for 100Gbps port and 100m cable. */
-#define MLXSW_SP_PB_HEADROOM 25632
+void mlxsw_sp_hdroom_prios_reset_buf_idx(struct mlxsw_sp_hdroom *hdroom)
+{
+ int prio;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
+ switch (hdroom->mode) {
+ case MLXSW_SP_HDROOM_MODE_DCB:
+ hdroom->prios.prio[prio].buf_idx = hdroom->prios.prio[prio].ets_buf_idx;
+ break;
+ case MLXSW_SP_HDROOM_MODE_TC:
+ hdroom->prios.prio[prio].buf_idx = hdroom->prios.prio[prio].set_buf_idx;
+ break;
+ }
+ }
+}
+
+void mlxsw_sp_hdroom_bufs_reset_lossiness(struct mlxsw_sp_hdroom *hdroom)
+{
+ int prio;
+ int i;
+
+ for (i = 0; i < DCBX_MAX_BUFFERS; i++)
+ hdroom->bufs.buf[i].lossy = true;
+
+ for (prio = 0; prio < IEEE_8021Q_MAX_PRIORITIES; prio++) {
+ if (!hdroom->prios.prio[prio].lossy)
+ hdroom->bufs.buf[hdroom->prios.prio[prio].buf_idx].lossy = false;
+ }
+}
+
+static u16 mlxsw_sp_hdroom_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, int mtu)
+{
+ return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
+}
+
+static void mlxsw_sp_hdroom_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, bool lossy)
+{
+ if (lossy)
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
+ else
+ mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
+ thres);
+}
+
+static u16 mlxsw_sp_hdroom_buf_delay_get(const struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_hdroom *hdroom)
+{
+ u16 delay_cells;
+
+ delay_cells = mlxsw_sp_bytes_cells(mlxsw_sp, hdroom->delay_bytes);
+
+ /* In the worst case scenario the delay will be made up of packets that
+ * are all of size CELL_SIZE + 1, which means each packet will require
+ * almost twice its true size when buffered in the switch. We therefore
+ * multiply this value by the "cell factor", which is close to 2.
+ *
+ * Another MTU is added in case the transmitting host already started
+ * transmitting a maximum length frame when the PFC packet was received.
+ */
+ return 2 * delay_cells + mlxsw_sp_bytes_cells(mlxsw_sp, hdroom->mtu);
+}
+
+static u32 mlxsw_sp_hdroom_int_buf_size_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed)
+{
+ u32 buffsize = mlxsw_sp->sb_ops->int_buf_size_get(speed, mtu);
+
+ return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1;
+}
+
+static bool mlxsw_sp_hdroom_buf_is_used(const struct mlxsw_sp_hdroom *hdroom, int buf)
+{
+ int prio;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
+ if (hdroom->prios.prio[prio].buf_idx == buf)
+ return true;
+ }
+ return false;
+}
+
+void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_hdroom *hdroom)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 reserve_cells;
+ int i;
+
+ /* Internal buffer. */
+ reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, mlxsw_sp_port->max_speed,
+ mlxsw_sp_port->max_mtu);
+ reserve_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, reserve_cells);
+ hdroom->int_buf.reserve_cells = reserve_cells;
+
+ if (hdroom->int_buf.enable)
+ hdroom->int_buf.size_cells = reserve_cells;
+ else
+ hdroom->int_buf.size_cells = 0;
+
+ /* PG buffers. */
+ for (i = 0; i < DCBX_MAX_BUFFERS; i++) {
+ struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i];
+ u16 thres_cells;
+ u16 delay_cells;
+
+ if (!mlxsw_sp_hdroom_buf_is_used(hdroom, i)) {
+ thres_cells = 0;
+ delay_cells = 0;
+ } else if (buf->lossy) {
+ thres_cells = mlxsw_sp_hdroom_buf_threshold_get(mlxsw_sp, hdroom->mtu);
+ delay_cells = 0;
+ } else {
+ thres_cells = mlxsw_sp_hdroom_buf_threshold_get(mlxsw_sp, hdroom->mtu);
+ delay_cells = mlxsw_sp_hdroom_buf_delay_get(mlxsw_sp, hdroom);
+ }
+
+ thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells);
+ delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells);
+
+ buf->thres_cells = thres_cells;
+ if (hdroom->mode == MLXSW_SP_HDROOM_MODE_DCB) {
+ buf->size_cells = thres_cells + delay_cells;
+ } else {
+ /* Do not allow going below the minimum size, even if
+ * the user requested it.
+ */
+ buf->size_cells = max(buf->set_size_cells, buf->thres_cells);
+ }
+ }
+}
+
#define MLXSW_SP_PB_UNUSED 8
-static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
+static int mlxsw_sp_hdroom_configure_buffers(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom, bool force)
{
- const u32 pbs[] = {
- [0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width,
- [9] = MLXSW_PORT_MAX_MTU,
- };
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pbmc_pl[MLXSW_REG_PBMC_LEN];
+ bool dirty;
+ int err;
int i;
- mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
- 0xffff, 0xffff / 2);
- for (i = 0; i < ARRAY_SIZE(pbs); i++) {
- u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, pbs[i]);
+ dirty = memcmp(&mlxsw_sp_port->hdroom->bufs, &hdroom->bufs, sizeof(hdroom->bufs));
+ if (!dirty && !force)
+ return 0;
+
+ mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0xffff, 0xffff / 2);
+ for (i = 0; i < MLXSW_SP_PB_COUNT; i++) {
+ const struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i];
if (i == MLXSW_SP_PB_UNUSED)
continue;
- size = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, size);
- mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
+
+ mlxsw_sp_hdroom_buf_pack(pbmc_pl, i, buf->size_cells, buf->thres_cells, buf->lossy);
}
- mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
- MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+ if (err)
+ return err;
+
+ mlxsw_sp_port->hdroom->bufs = hdroom->bufs;
+ return 0;
}
-static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
+static int mlxsw_sp_hdroom_configure_priomap(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom, bool force)
{
char pptb_pl[MLXSW_REG_PPTB_LEN];
- int i;
+ bool dirty;
+ int prio;
+ int err;
+
+ dirty = memcmp(&mlxsw_sp_port->hdroom->prios, &hdroom->prios, sizeof(hdroom->prios));
+ if (!dirty && !force)
+ return 0;
mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
- return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
- pptb_pl);
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, prio, hdroom->prios.prio[prio].buf_idx);
+
+ err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), pptb_pl);
+ if (err)
+ return err;
+
+ mlxsw_sp_port->hdroom->prios = hdroom->prios;
+ return 0;
}
-static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
+static int mlxsw_sp_hdroom_configure_int_buf(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom, bool force)
+{
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+ bool dirty;
+ int err;
+
+ dirty = memcmp(&mlxsw_sp_port->hdroom->int_buf, &hdroom->int_buf, sizeof(hdroom->int_buf));
+ if (!dirty && !force)
+ return 0;
+
+ mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, hdroom->int_buf.size_cells);
+ err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ if (err)
+ return err;
+
+ mlxsw_sp_port->hdroom->int_buf = hdroom->int_buf;
+ return 0;
+}
+
+static bool mlxsw_sp_hdroom_bufs_fit(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_hdroom *hdroom)
{
+ u32 taken_headroom_cells = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PB_COUNT; i++)
+ taken_headroom_cells += hdroom->bufs.buf[i].size_cells;
+
+ taken_headroom_cells += hdroom->int_buf.reserve_cells;
+ return taken_headroom_cells <= mlxsw_sp->sb->max_headroom_cells;
+}
+
+static int __mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom, bool force)
+{
+ struct mlxsw_sp_hdroom orig_hdroom;
+ struct mlxsw_sp_hdroom tmp_hdroom;
int err;
+ int i;
+
+ /* Port buffers need to be configured in three steps. First, all buffers
+ * with non-zero size are configured. Then, prio-to-buffer map is
+ * updated, allowing traffic to flow to the now non-zero buffers.
+ * Finally, zero-sized buffers are configured, because now no traffic
+ * should be directed to them anymore. This way, in a non-congested
+ * system, no packet drops are introduced by the reconfiguration.
+ */
- err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
+ orig_hdroom = *mlxsw_sp_port->hdroom;
+ tmp_hdroom = orig_hdroom;
+ for (i = 0; i < MLXSW_SP_PB_COUNT; i++) {
+ if (hdroom->bufs.buf[i].size_cells)
+ tmp_hdroom.bufs.buf[i] = hdroom->bufs.buf[i];
+ }
+
+ if (!mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port->mlxsw_sp, &tmp_hdroom) ||
+ !mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port->mlxsw_sp, hdroom))
+ return -ENOBUFS;
+
+ err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &tmp_hdroom, force);
if (err)
return err;
- return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
+
+ err = mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, hdroom, force);
+ if (err)
+ goto err_configure_priomap;
+
+ err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, hdroom, false);
+ if (err)
+ goto err_configure_buffers;
+
+ err = mlxsw_sp_hdroom_configure_int_buf(mlxsw_sp_port, hdroom, false);
+ if (err)
+ goto err_configure_int_buf;
+
+ *mlxsw_sp_port->hdroom = *hdroom;
+ return 0;
+
+err_configure_int_buf:
+ mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &tmp_hdroom, false);
+err_configure_buffers:
+ mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, &tmp_hdroom, false);
+err_configure_priomap:
+ mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &orig_hdroom, false);
+ return err;
+}
+
+int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom)
+{
+ return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, hdroom, false);
+}
+
+static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_hdroom hdroom = {};
+ u32 size9;
+ int prio;
+
+ hdroom.mtu = mlxsw_sp_port->dev->mtu;
+ hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB;
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ hdroom.prios.prio[prio].lossy = true;
+
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ /* Buffer 9 is used for control traffic. */
+ size9 = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, mlxsw_sp_port->max_mtu);
+ hdroom.bufs.buf[9].size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size9);
+
+ return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom, true);
}
static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp,
@@ -916,6 +1178,46 @@ const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
.cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
};
+static u32 mlxsw_sp1_pb_int_buf_size_get(int mtu, u32 speed)
+{
+ return mtu * 5 / 2;
+}
+
+static u32 __mlxsw_sp_pb_int_buf_size_get(int mtu, u32 speed, u32 buffer_factor)
+{
+ return 3 * mtu + buffer_factor * speed / 1000;
+}
+
+#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
+
+static u32 mlxsw_sp2_pb_int_buf_size_get(int mtu, u32 speed)
+{
+ int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR;
+
+ return __mlxsw_sp_pb_int_buf_size_get(mtu, speed, factor);
+}
+
+#define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
+
+static u32 mlxsw_sp3_pb_int_buf_size_get(int mtu, u32 speed)
+{
+ int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR;
+
+ return __mlxsw_sp_pb_int_buf_size_get(mtu, speed, factor);
+}
+
+const struct mlxsw_sp_sb_ops mlxsw_sp1_sb_ops = {
+ .int_buf_size_get = mlxsw_sp1_pb_int_buf_size_get,
+};
+
+const struct mlxsw_sp_sb_ops mlxsw_sp2_sb_ops = {
+ .int_buf_size_get = mlxsw_sp2_pb_int_buf_size_get,
+};
+
+const struct mlxsw_sp_sb_ops mlxsw_sp3_sb_ops = {
+ .int_buf_size_get = mlxsw_sp3_pb_int_buf_size_get,
+};
+
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
{
u32 max_headroom_size;
@@ -995,17 +1297,34 @@ int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
int err;
+ mlxsw_sp_port->hdroom = kzalloc(sizeof(*mlxsw_sp_port->hdroom), GFP_KERNEL);
+ if (!mlxsw_sp_port->hdroom)
+ return -ENOMEM;
+ mlxsw_sp_port->hdroom->mtu = mlxsw_sp_port->dev->mtu;
+
err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
if (err)
- return err;
+ goto err_headroom_init;
err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
if (err)
- return err;
+ goto err_port_sb_cms_init;
err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
+ if (err)
+ goto err_port_sb_pms_init;
+ return 0;
+err_port_sb_pms_init:
+err_port_sb_cms_init:
+err_headroom_init:
+ kfree(mlxsw_sp_port->hdroom);
return err;
}
+void mlxsw_sp_port_buffers_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ kfree(mlxsw_sp_port->hdroom);
+}
+
int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index 0d3fb2e51ea5..5f92b1691360 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -64,87 +64,28 @@ static int mlxsw_sp_port_ets_validate(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
-static int mlxsw_sp_port_pg_prio_map(struct mlxsw_sp_port *mlxsw_sp_port,
- u8 *prio_tc)
-{
- char pptb_pl[MLXSW_REG_PPTB_LEN];
- int i;
-
- mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, prio_tc[i]);
-
- return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
- pptb_pl);
-}
-
-static bool mlxsw_sp_ets_has_pg(u8 *prio_tc, u8 pg)
-{
- int i;
-
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- if (prio_tc[i] == pg)
- return true;
- return false;
-}
-
-static int mlxsw_sp_port_pg_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
- u8 *old_prio_tc, u8 *new_prio_tc)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char pbmc_pl[MLXSW_REG_PBMC_LEN];
- int err, i;
-
- mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
- if (err)
- return err;
-
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- u8 pg = old_prio_tc[i];
-
- if (!mlxsw_sp_ets_has_pg(new_prio_tc, pg))
- mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg, 0);
- }
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
-}
-
static int mlxsw_sp_port_headroom_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct ieee_ets *ets)
{
- bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
- struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets;
struct net_device *dev = mlxsw_sp_port->dev;
+ struct mlxsw_sp_hdroom hdroom;
+ int prio;
int err;
- /* Create the required PGs, but don't destroy existing ones, as
- * traffic is still directed to them.
- */
- err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
- ets->prio_tc, pause_en,
- mlxsw_sp_port->dcb.pfc);
+ hdroom = *mlxsw_sp_port->hdroom;
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ hdroom.prios.prio[prio].ets_buf_idx = ets->prio_tc[prio];
+ mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
if (err) {
netdev_err(dev, "Failed to configure port's headroom\n");
return err;
}
- err = mlxsw_sp_port_pg_prio_map(mlxsw_sp_port, ets->prio_tc);
- if (err) {
- netdev_err(dev, "Failed to set PG-priority mapping\n");
- goto err_port_prio_pg_map;
- }
-
- err = mlxsw_sp_port_pg_destroy(mlxsw_sp_port, my_ets->prio_tc,
- ets->prio_tc);
- if (err)
- netdev_warn(dev, "Failed to remove unused PGs\n");
-
return 0;
-
-err_port_prio_pg_map:
- mlxsw_sp_port_pg_destroy(mlxsw_sp_port, ets->prio_tc, my_ets->prio_tc);
- return err;
}
static int __mlxsw_sp_dcbnl_ieee_setets(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -605,6 +546,9 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
+ struct mlxsw_sp_hdroom orig_hdroom;
+ struct mlxsw_sp_hdroom hdroom;
+ int prio;
int err;
if (pause_en && pfc->pfc_en) {
@@ -612,9 +556,21 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
return -EINVAL;
}
- err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
- mlxsw_sp_port->dcb.ets->prio_tc,
- pause_en, pfc);
+ orig_hdroom = *mlxsw_sp_port->hdroom;
+
+ hdroom = orig_hdroom;
+ if (pfc->pfc_en)
+ hdroom.delay_bytes = DIV_ROUND_UP(pfc->delay, BITS_PER_BYTE);
+ else
+ hdroom.delay_bytes = 0;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ hdroom.prios.prio[prio].lossy = !(pfc->pfc_en & BIT(prio));
+
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
if (err) {
netdev_err(dev, "Failed to configure port's headroom for PFC\n");
return err;
@@ -632,12 +588,66 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
return 0;
err_port_pfc_set:
- __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
- mlxsw_sp_port->dcb.ets->prio_tc, pause_en,
- mlxsw_sp_port->dcb.pfc);
+ mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
return err;
}
+static int mlxsw_sp_dcbnl_getbuffer(struct net_device *dev, struct dcbnl_buffer *buf)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp_hdroom *hdroom = mlxsw_sp_port->hdroom;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ int prio;
+ int i;
+
+ buf->total_size = 0;
+
+ BUILD_BUG_ON(DCBX_MAX_BUFFERS > MLXSW_SP_PB_COUNT);
+ for (i = 0; i < MLXSW_SP_PB_COUNT; i++) {
+ u32 bytes = mlxsw_sp_cells_bytes(mlxsw_sp, hdroom->bufs.buf[i].size_cells);
+
+ if (i < DCBX_MAX_BUFFERS)
+ buf->buffer_size[i] = bytes;
+ buf->total_size += bytes;
+ }
+
+ buf->total_size += mlxsw_sp_cells_bytes(mlxsw_sp, hdroom->int_buf.size_cells);
+
+ for (prio = 0; prio < IEEE_8021Q_MAX_PRIORITIES; prio++)
+ buf->prio2buffer[prio] = hdroom->prios.prio[prio].buf_idx;
+
+ return 0;
+}
+
+static int mlxsw_sp_dcbnl_setbuffer(struct net_device *dev, struct dcbnl_buffer *buf)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_hdroom hdroom;
+ int prio;
+ int i;
+
+ hdroom = *mlxsw_sp_port->hdroom;
+
+ if (hdroom.mode != MLXSW_SP_HDROOM_MODE_TC) {
+ netdev_err(dev, "The use of dcbnl_setbuffer is only allowed if egress is configured using TC\n");
+ return -EINVAL;
+ }
+
+ for (prio = 0; prio < IEEE_8021Q_MAX_PRIORITIES; prio++)
+ hdroom.prios.prio[prio].set_buf_idx = buf->prio2buffer[prio];
+
+ BUILD_BUG_ON(DCBX_MAX_BUFFERS > MLXSW_SP_PB_COUNT);
+ for (i = 0; i < DCBX_MAX_BUFFERS; i++)
+ hdroom.bufs.buf[i].set_size_cells = mlxsw_sp_bytes_cells(mlxsw_sp,
+ buf->buffer_size[i]);
+
+ mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+ return mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
+}
+
static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = {
.ieee_getets = mlxsw_sp_dcbnl_ieee_getets,
.ieee_setets = mlxsw_sp_dcbnl_ieee_setets,
@@ -650,6 +660,9 @@ static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = {
.getdcbx = mlxsw_sp_dcbnl_getdcbx,
.setdcbx = mlxsw_sp_dcbnl_setdcbx,
+
+ .dcbnl_getbuffer = mlxsw_sp_dcbnl_getbuffer,
+ .dcbnl_setbuffer = mlxsw_sp_dcbnl_setbuffer,
};
static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index f08cad5b5657..6045d3df00ef 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -192,11 +192,19 @@ static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
pfcc_pl);
}
+/* Maximum delay buffer needed in case of PAUSE frames. Similar to PFC delay, but is
+ * measured in bytes. Assumes 100m cable and does not take into account MTU.
+ */
+#define MLXSW_SP_PAUSE_DELAY_BYTES 19476
+
static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
bool pause_en = pause->tx_pause || pause->rx_pause;
+ struct mlxsw_sp_hdroom orig_hdroom;
+ struct mlxsw_sp_hdroom hdroom;
+ int prio;
int err;
if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
@@ -209,7 +217,21 @@ static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
return -EINVAL;
}
- err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+ orig_hdroom = *mlxsw_sp_port->hdroom;
+
+ hdroom = orig_hdroom;
+ if (pause_en)
+ hdroom.delay_bytes = MLXSW_SP_PAUSE_DELAY_BYTES;
+ else
+ hdroom.delay_bytes = 0;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ hdroom.prios.prio[prio].lossy = !pause_en;
+
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
if (err) {
netdev_err(dev, "Failed to configure port's headroom\n");
return err;
@@ -227,8 +249,7 @@ static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
return 0;
err_port_pause_configure:
- pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
- mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+ mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
return err;
}
@@ -842,6 +863,29 @@ mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type)
}
}
+static int mlxsw_sp_port_ptys_query(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 *p_eth_proto_cap, u32 *p_eth_proto_admin,
+ u32 *p_eth_proto_oper, u8 *p_connector_type)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ const struct mlxsw_sp_port_type_speed_ops *ops;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ int err;
+
+ ops = mlxsw_sp->port_type_speed_ops;
+
+ ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 0, false);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err)
+ return err;
+
+ ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, p_eth_proto_cap, p_eth_proto_admin,
+ p_eth_proto_oper);
+ if (p_connector_type)
+ *p_connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl);
+ return 0;
+}
+
static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
@@ -849,21 +893,17 @@ static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
const struct mlxsw_sp_port_type_speed_ops *ops;
- char ptys_pl[MLXSW_REG_PTYS_LEN];
u8 connector_type;
bool autoneg;
int err;
- ops = mlxsw_sp->port_type_speed_ops;
-
- autoneg = mlxsw_sp_port->link.autoneg;
- ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
- 0, false);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ err = mlxsw_sp_port_ptys_query(mlxsw_sp_port, &eth_proto_cap, &eth_proto_admin,
+ &eth_proto_oper, &connector_type);
if (err)
return err;
- ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
- &eth_proto_admin, &eth_proto_oper);
+
+ ops = mlxsw_sp->port_type_speed_ops;
+ autoneg = mlxsw_sp_port->link.autoneg;
mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap,
mlxsw_sp_port->mapping.width, cmd);
@@ -872,7 +912,6 @@ static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
mlxsw_sp_port->mapping.width, cmd);
cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
- connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl);
cmd->base.port = mlxsw_sp_port_connector_port(connector_type);
ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev),
eth_proto_oper, cmd);
@@ -1144,6 +1183,27 @@ mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
cmd->base.duplex = DUPLEX_FULL;
}
+static int mlxsw_sp1_ptys_max_speed(struct mlxsw_sp_port *mlxsw_sp_port, u32 *p_max_speed)
+{
+ u32 eth_proto_cap;
+ u32 max_speed = 0;
+ int err;
+ int i;
+
+ err = mlxsw_sp_port_ptys_query(mlxsw_sp_port, &eth_proto_cap, NULL, NULL, NULL);
+ if (err)
+ return err;
+
+ for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+ if ((eth_proto_cap & mlxsw_sp1_port_link_mode[i].mask) &&
+ mlxsw_sp1_port_link_mode[i].speed > max_speed)
+ max_speed = mlxsw_sp1_port_link_mode[i].speed;
+ }
+
+ *p_max_speed = max_speed;
+ return 0;
+}
+
static u32
mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width,
const struct ethtool_link_ksettings *cmd)
@@ -1193,6 +1253,7 @@ const struct mlxsw_sp_port_type_speed_ops mlxsw_sp1_port_type_speed_ops = {
.from_ptys_link = mlxsw_sp1_from_ptys_link,
.from_ptys_speed = mlxsw_sp1_from_ptys_speed,
.from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex,
+ .ptys_max_speed = mlxsw_sp1_ptys_max_speed,
.to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link,
.to_ptys_speed = mlxsw_sp1_to_ptys_speed,
.reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack,
@@ -1530,6 +1591,27 @@ mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
cmd->base.duplex = DUPLEX_FULL;
}
+static int mlxsw_sp2_ptys_max_speed(struct mlxsw_sp_port *mlxsw_sp_port, u32 *p_max_speed)
+{
+ u32 eth_proto_cap;
+ u32 max_speed = 0;
+ int err;
+ int i;
+
+ err = mlxsw_sp_port_ptys_query(mlxsw_sp_port, &eth_proto_cap, NULL, NULL, NULL);
+ if (err)
+ return err;
+
+ for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+ if ((eth_proto_cap & mlxsw_sp2_port_link_mode[i].mask) &&
+ mlxsw_sp2_port_link_mode[i].speed > max_speed)
+ max_speed = mlxsw_sp2_port_link_mode[i].speed;
+ }
+
+ *p_max_speed = max_speed;
+ return 0;
+}
+
static bool
mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
const unsigned long *mode)
@@ -1599,6 +1681,7 @@ const struct mlxsw_sp_port_type_speed_ops mlxsw_sp2_port_type_speed_ops = {
.from_ptys_link = mlxsw_sp2_from_ptys_link,
.from_ptys_speed = mlxsw_sp2_from_ptys_speed,
.from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex,
+ .ptys_max_speed = mlxsw_sp2_ptys_max_speed,
.to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link,
.to_ptys_speed = mlxsw_sp2_to_ptys_speed,
.reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index 964fd444bb10..fd672c6c9133 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -140,18 +140,31 @@ static int
mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
+ struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
+ int err_hdroom = 0;
int err = 0;
if (!mlxsw_sp_qdisc)
return 0;
+ if (root_qdisc == mlxsw_sp_qdisc) {
+ struct mlxsw_sp_hdroom hdroom = *mlxsw_sp_port->hdroom;
+
+ hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB;
+ mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+ err_hdroom = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
+ }
+
if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
mlxsw_sp_qdisc);
mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
mlxsw_sp_qdisc->ops = NULL;
- return err;
+
+ return err_hdroom ?: err;
}
static int
@@ -159,6 +172,8 @@ mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct mlxsw_sp_qdisc_ops *ops, void *params)
{
+ struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
+ struct mlxsw_sp_hdroom orig_hdroom;
int err;
if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
@@ -168,6 +183,21 @@ mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
* new one.
*/
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+
+ orig_hdroom = *mlxsw_sp_port->hdroom;
+ if (root_qdisc == mlxsw_sp_qdisc) {
+ struct mlxsw_sp_hdroom hdroom = orig_hdroom;
+
+ hdroom.mode = MLXSW_SP_HDROOM_MODE_TC;
+ mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
+ if (err)
+ goto err_hdroom_configure;
+ }
+
err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
if (err)
goto err_bad_param;
@@ -191,6 +221,8 @@ mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
err_bad_param:
err_config:
+ mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
+err_hdroom_configure:
if (mlxsw_sp_qdisc->handle == handle && ops->unoffload)
ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 24f1fd1f8d56..9188fc32b850 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -8033,7 +8033,6 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
char rgcr_pl[MLXSW_REG_RGCR_LEN];
u64 max_rifs;
- int err;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
return -EIO;
@@ -8042,10 +8041,7 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
- if (err)
- return err;
- return 0;
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
}
static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 1d18e41ab255..c6c5826aba41 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -968,42 +968,26 @@ static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu,
- u32 speed)
+static int mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
{
- u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu);
+ struct mlxsw_sp_hdroom hdroom;
- return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1;
+ hdroom = *mlxsw_sp_port->hdroom;
+ hdroom.int_buf.enable = enable;
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ return mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
}
static int
-mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
+mlxsw_sp_span_port_buffer_enable(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char sbib_pl[MLXSW_REG_SBIB_LEN];
- u32 buffsize;
- u32 speed;
- int err;
-
- err = mlxsw_sp_port_speed_get(mlxsw_sp_port, &speed);
- if (err)
- return err;
- if (speed == SPEED_UNKNOWN)
- speed = 0;
-
- buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, speed, mtu);
- buffsize = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, buffsize);
- mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ return mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, true);
}
-static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp *mlxsw_sp,
- u8 local_port)
+static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp_port *mlxsw_sp_port)
{
- char sbib_pl[MLXSW_REG_SBIB_LEN];
-
- mlxsw_reg_sbib_pack(sbib_pl, local_port, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, false);
}
static struct mlxsw_sp_span_analyzed_port *
@@ -1021,48 +1005,6 @@ mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u8 local_port,
return NULL;
}
-int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
-{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- int err = 0;
-
- /* If port is egress mirrored, the shared buffer size should be
- * updated according to the mtu value
- */
- mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
-
- if (mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, port->local_port,
- false))
- err = mlxsw_sp_span_port_buffer_update(port, mtu);
-
- mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
-
- return err;
-}
-
-void mlxsw_sp_span_speed_update_work(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct mlxsw_sp_port *mlxsw_sp_port;
- struct mlxsw_sp *mlxsw_sp;
-
- mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port,
- span.speed_update_dw);
-
- /* If port is egress mirrored, the shared buffer size should be
- * updated according to the speed value.
- */
- mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
-
- if (mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
- mlxsw_sp_port->local_port, false))
- mlxsw_sp_span_port_buffer_update(mlxsw_sp_port,
- mlxsw_sp_port->dev->mtu);
-
- mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
-}
-
static const struct mlxsw_sp_span_entry_ops *
mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
const struct net_device *to_dev)
@@ -1180,9 +1122,7 @@ mlxsw_sp_span_analyzed_port_create(struct mlxsw_sp_span *span,
* does the mirroring.
*/
if (!ingress) {
- u16 mtu = mlxsw_sp_port->dev->mtu;
-
- err = mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, mtu);
+ err = mlxsw_sp_span_port_buffer_enable(mlxsw_sp_port);
if (err)
goto err_buffer_update;
}
@@ -1196,18 +1136,15 @@ err_buffer_update:
}
static void
-mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_span *span,
+mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_span_analyzed_port *
analyzed_port)
{
- struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp;
-
/* Remove egress mirror buffer now that port is no longer analyzed
* at egress.
*/
if (!analyzed_port->ingress)
- mlxsw_sp_span_port_buffer_disable(mlxsw_sp,
- analyzed_port->local_port);
+ mlxsw_sp_span_port_buffer_disable(mlxsw_sp_port);
list_del(&analyzed_port->list);
kfree(analyzed_port);
@@ -1258,7 +1195,7 @@ void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
if (!refcount_dec_and_test(&analyzed_port->ref_count))
goto out_unlock;
- mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp->span, analyzed_port);
+ mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp_port, analyzed_port);
out_unlock:
mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
@@ -1712,11 +1649,6 @@ static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp)
return 0;
}
-static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed)
-{
- return mtu * 5 / 2;
-}
-
static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
u16 policer_id_base)
{
@@ -1725,7 +1657,6 @@ static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = {
.init = mlxsw_sp1_span_init,
- .buffsize_get = mlxsw_sp1_span_buffsize_get,
.policer_id_base_set = mlxsw_sp1_span_policer_id_base_set,
};
@@ -1750,18 +1681,6 @@ static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp)
#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
#define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
-static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor)
-{
- return 3 * mtu + buffer_factor * speed / 1000;
-}
-
-static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed)
-{
- int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR;
-
- return __mlxsw_sp_span_buffsize_get(mtu, speed, factor);
-}
-
static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
u16 policer_id_base)
{
@@ -1778,19 +1697,10 @@ static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = {
.init = mlxsw_sp2_span_init,
- .buffsize_get = mlxsw_sp2_span_buffsize_get,
.policer_id_base_set = mlxsw_sp2_span_policer_id_base_set,
};
-static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed)
-{
- int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR;
-
- return __mlxsw_sp_span_buffsize_get(mtu, speed, factor);
-}
-
const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = {
.init = mlxsw_sp2_span_init,
- .buffsize_get = mlxsw_sp3_span_buffsize_get,
.policer_id_base_set = mlxsw_sp2_span_policer_id_base_set,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index 1c746dd3b1bd..d907718bc8c5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -47,7 +47,6 @@ struct mlxsw_sp_span_entry_ops;
struct mlxsw_sp_span_ops {
int (*init)(struct mlxsw_sp *mlxsw_sp);
- u32 (*buffsize_get)(int mtu, u32 speed);
int (*policer_id_base_set)(struct mlxsw_sp *mlxsw_sp,
u16 policer_id_base);
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 33909887d0ac..fe0b8af287a7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -120,6 +120,8 @@ enum {
};
enum mlxsw_event_trap_id {
+ /* Fatal Event generated by FW */
+ MLXSW_TRAP_ID_MFDE = 0x3,
/* Port Up/Down event generated by hardware */
MLXSW_TRAP_ID_PUDE = 0x8,
/* PTP Ingress FIFO has a new entry */
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index f3f6dfe3eddc..caa251d0e381 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -587,10 +587,10 @@ out:
return err;
}
-static void ks8842_rx_frame_dma_tasklet(unsigned long arg)
+static void ks8842_rx_frame_dma_tasklet(struct tasklet_struct *t)
{
- struct net_device *netdev = (struct net_device *)arg;
- struct ks8842_adapter *adapter = netdev_priv(netdev);
+ struct ks8842_adapter *adapter = from_tasklet(adapter, t, dma_rx.tasklet);
+ struct net_device *netdev = adapter->netdev;
struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
struct sk_buff *skb = ctl->skb;
dma_addr_t addr = sg_dma_address(&ctl->sg);
@@ -720,10 +720,10 @@ static void ks8842_handle_rx_overrun(struct net_device *netdev,
netdev->stats.rx_fifo_errors++;
}
-static void ks8842_tasklet(unsigned long arg)
+static void ks8842_tasklet(struct tasklet_struct *t)
{
- struct net_device *netdev = (struct net_device *)arg;
- struct ks8842_adapter *adapter = netdev_priv(netdev);
+ struct ks8842_adapter *adapter = from_tasklet(adapter, t, tasklet);
+ struct net_device *netdev = adapter->netdev;
u16 isr;
unsigned long flags;
u16 entry_bank;
@@ -953,8 +953,7 @@ static int ks8842_alloc_dma_bufs(struct net_device *netdev)
goto err;
}
- tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet,
- (unsigned long)netdev);
+ tasklet_setup(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet);
return 0;
err:
@@ -1173,7 +1172,7 @@ static int ks8842_probe(struct platform_device *pdev)
adapter->dma_tx.channel = -1;
}
- tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev);
+ tasklet_setup(&adapter->tasklet, ks8842_tasklet);
spin_lock_init(&adapter->lock);
netdev->netdev_ops = &ks8842_netdev_ops;
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index bb646b65cc95..ac4cc509ae07 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4978,7 +4978,6 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
struct dev_info *hw_priv = priv->adapter;
struct ksz_dma_buf *dma_buf;
struct sk_buff *skb;
- int rx_status;
/* Received length includes 4-byte CRC. */
packet_len = status.rx.frame_len - 4;
@@ -5014,7 +5013,7 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
dev->stats.rx_bytes += packet_len;
/* Notify upper layer for received packet. */
- rx_status = netif_rx(skb);
+ netif_rx(skb);
return 0;
}
@@ -5159,9 +5158,9 @@ release_packet:
return received;
}
-static void rx_proc_task(unsigned long data)
+static void rx_proc_task(struct tasklet_struct *t)
{
- struct dev_info *hw_priv = (struct dev_info *) data;
+ struct dev_info *hw_priv = from_tasklet(hw_priv, t, rx_tasklet);
struct ksz_hw *hw = &hw_priv->hw;
if (!hw->enabled)
@@ -5181,9 +5180,9 @@ static void rx_proc_task(unsigned long data)
}
}
-static void tx_proc_task(unsigned long data)
+static void tx_proc_task(struct tasklet_struct *t)
{
- struct dev_info *hw_priv = (struct dev_info *) data;
+ struct dev_info *hw_priv = from_tasklet(hw_priv, t, tx_tasklet);
struct ksz_hw *hw = &hw_priv->hw;
hw_ack_intr(hw, KS884X_INT_TX_MASK);
@@ -5436,10 +5435,8 @@ static int prepare_hardware(struct net_device *dev)
rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
if (rc)
return rc;
- tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
- (unsigned long) hw_priv);
- tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
- (unsigned long) hw_priv);
+ tasklet_setup(&hw_priv->rx_tasklet, rx_proc_task);
+ tasklet_setup(&hw_priv->tx_tasklet, tx_proc_task);
hw->promiscuous = 0;
hw->all_multi = 0;
@@ -6509,7 +6506,6 @@ static void netdev_get_ethtool_stats(struct net_device *dev,
int i;
int n;
int p;
- int rc;
u64 counter[TOTAL_PORT_COUNTER_NUM];
mutex_lock(&hw_priv->lock);
@@ -6530,19 +6526,19 @@ static void netdev_get_ethtool_stats(struct net_device *dev,
if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) {
p = n;
- rc = wait_event_interruptible_timeout(
+ wait_event_interruptible_timeout(
hw_priv->counter[p].counter,
2 == hw_priv->counter[p].read,
HZ * 1);
} else
for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) {
if (0 == i) {
- rc = wait_event_interruptible_timeout(
+ wait_event_interruptible_timeout(
hw_priv->counter[p].counter,
2 == hw_priv->counter[p].read,
HZ * 2);
} else if (hw->port_mib[p].cnt_ptr) {
- rc = wait_event_interruptible_timeout(
+ wait_event_interruptible_timeout(
hw_priv->counter[p].counter,
2 == hw_priv->counter[p].read,
HZ * 1);
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index de93cc6ebc1a..7e236c9ee4b1 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -3038,7 +3038,6 @@ static int lan743x_pm_suspend(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct lan743x_adapter *adapter = netdev_priv(netdev);
- int ret;
lan743x_pcidev_shutdown(pdev);
@@ -3051,9 +3050,7 @@ static int lan743x_pm_suspend(struct device *dev)
lan743x_pm_set_wol(adapter);
/* Host sets PME_En, put D3hot */
- ret = pci_prepare_to_sleep(pdev);
-
- return 0;
+ return pci_prepare_to_sleep(pdev);;
}
static int lan743x_pm_resume(struct device *dev)
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 5abb7d2b0a9e..0445c5ee5551 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -413,21 +413,20 @@ void ocelot_port_disable(struct ocelot *ocelot, int port)
}
EXPORT_SYMBOL(ocelot_port_disable);
-int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port,
- struct sk_buff *skb)
+void ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
+ struct sk_buff *clone)
{
- struct skb_shared_info *shinfo = skb_shinfo(skb);
- struct ocelot *ocelot = ocelot_port->ocelot;
-
- if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP &&
- ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
- shinfo->tx_flags |= SKBTX_IN_PROGRESS;
- /* Store timestamp ID in cb[0] of sk_buff */
- skb->cb[0] = ocelot_port->ts_id % 4;
- skb_queue_tail(&ocelot_port->tx_skbs, skb);
- return 0;
- }
- return -ENODATA;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ spin_lock(&ocelot_port->ts_id_lock);
+
+ skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
+ /* Store timestamp ID in cb[0] of sk_buff */
+ clone->cb[0] = ocelot_port->ts_id;
+ ocelot_port->ts_id = (ocelot_port->ts_id + 1) % 4;
+ skb_queue_tail(&ocelot_port->tx_skbs, clone);
+
+ spin_unlock(&ocelot_port->ts_id_lock);
}
EXPORT_SYMBOL(ocelot_port_add_txtstamp_skb);
@@ -506,9 +505,7 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
/* Set the timestamp into the skb */
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
- skb_tstamp_tx(skb_match, &shhwtstamps);
-
- dev_kfree_skb_any(skb_match);
+ skb_complete_tx_timestamp(skb_match, &shhwtstamps);
/* Next ts */
ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
@@ -1300,6 +1297,7 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
struct ocelot_port *ocelot_port = ocelot->ports[port];
skb_queue_head_init(&ocelot_port->tx_skbs);
+ spin_lock_init(&ocelot_port->ts_id_lock);
/* Basic L2 initialization */
@@ -1544,18 +1542,18 @@ EXPORT_SYMBOL(ocelot_init);
void ocelot_deinit(struct ocelot *ocelot)
{
- struct ocelot_port *port;
- int i;
-
cancel_delayed_work(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
mutex_destroy(&ocelot->stats_lock);
-
- for (i = 0; i < ocelot->num_phys_ports; i++) {
- port = ocelot->ports[i];
- skb_queue_purge(&port->tx_skbs);
- }
}
EXPORT_SYMBOL(ocelot_deinit);
+void ocelot_deinit_port(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ skb_queue_purge(&ocelot_port->tx_skbs);
+}
+EXPORT_SYMBOL(ocelot_deinit_port);
+
MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 0668d23cdbfa..028a0150f97d 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -344,10 +344,28 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
info.vid = skb_vlan_tag_get(skb);
/* Check if timestamping is needed */
+ if (ocelot->ptp && (shinfo->tx_flags & SKBTX_HW_TSTAMP)) {
+ info.rew_op = ocelot_port->ptp_cmd;
+
+ if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ struct sk_buff *clone;
+
+ clone = skb_clone_sk(skb);
+ if (!clone) {
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ ocelot_port_add_txtstamp_skb(ocelot, port, clone);
+
+ info.rew_op |= clone->cb[0] << 3;
+ }
+ }
+
if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP) {
info.rew_op = ocelot_port->ptp_cmd;
if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP)
- info.rew_op |= (ocelot_port->ts_id % 4) << 3;
+ info.rew_op |= skb->cb[0] << 3;
}
ocelot_gen_ifh(ifh, &info);
@@ -380,12 +398,8 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- if (!ocelot_port_add_txtstamp_skb(ocelot_port, skb)) {
- ocelot_port->ts_id++;
- return NETDEV_TX_OK;
- }
+ kfree_skb(skb);
- dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index 1e08fe4daaef..a33ab315cc6b 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -300,7 +300,8 @@ int ocelot_ptp_enable(struct ptp_clock_info *ptp,
}
EXPORT_SYMBOL(ocelot_ptp_enable);
-int ocelot_init_timestamp(struct ocelot *ocelot, struct ptp_clock_info *info)
+int ocelot_init_timestamp(struct ocelot *ocelot,
+ const struct ptp_clock_info *info)
{
struct ptp_clock *ptp_clock;
int i;
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 65408bc994c4..dfb1535f26f2 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -806,17 +806,17 @@ static const struct vcap_field vsc7514_vcap_is2_keys[] = {
[VCAP_IS2_HK_DIP_EQ_SIP] = {123, 1},
/* IP4_TCP_UDP (TYPE=100) */
[VCAP_IS2_HK_TCP] = {124, 1},
- [VCAP_IS2_HK_L4_SPORT] = {125, 16},
- [VCAP_IS2_HK_L4_DPORT] = {141, 16},
+ [VCAP_IS2_HK_L4_DPORT] = {125, 16},
+ [VCAP_IS2_HK_L4_SPORT] = {141, 16},
[VCAP_IS2_HK_L4_RNG] = {157, 8},
[VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {165, 1},
[VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {166, 1},
- [VCAP_IS2_HK_L4_URG] = {167, 1},
- [VCAP_IS2_HK_L4_ACK] = {168, 1},
- [VCAP_IS2_HK_L4_PSH] = {169, 1},
- [VCAP_IS2_HK_L4_RST] = {170, 1},
- [VCAP_IS2_HK_L4_SYN] = {171, 1},
- [VCAP_IS2_HK_L4_FIN] = {172, 1},
+ [VCAP_IS2_HK_L4_FIN] = {167, 1},
+ [VCAP_IS2_HK_L4_SYN] = {168, 1},
+ [VCAP_IS2_HK_L4_RST] = {169, 1},
+ [VCAP_IS2_HK_L4_PSH] = {170, 1},
+ [VCAP_IS2_HK_L4_ACK] = {171, 1},
+ [VCAP_IS2_HK_L4_URG] = {172, 1},
[VCAP_IS2_HK_L4_1588_DOM] = {173, 8},
[VCAP_IS2_HK_L4_1588_VER] = {181, 4},
/* IP4_OTHER (TYPE=101) */
@@ -896,11 +896,137 @@ static struct ptp_clock_info ocelot_ptp_clock_info = {
.enable = ocelot_ptp_enable,
};
+static void mscc_ocelot_release_ports(struct ocelot *ocelot)
+{
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port_private *priv;
+ struct ocelot_port *ocelot_port;
+
+ ocelot_port = ocelot->ports[port];
+ if (!ocelot_port)
+ continue;
+
+ ocelot_deinit_port(ocelot, port);
+
+ priv = container_of(ocelot_port, struct ocelot_port_private,
+ port);
+
+ unregister_netdev(priv->dev);
+ free_netdev(priv->dev);
+ }
+}
+
+static int mscc_ocelot_init_ports(struct platform_device *pdev,
+ struct device_node *ports)
+{
+ struct ocelot *ocelot = platform_get_drvdata(pdev);
+ struct device_node *portnp;
+ int err;
+
+ ocelot->ports = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
+ sizeof(struct ocelot_port *), GFP_KERNEL);
+ if (!ocelot->ports)
+ return -ENOMEM;
+
+ /* No NPI port */
+ ocelot_configure_cpu(ocelot, -1, OCELOT_TAG_PREFIX_NONE,
+ OCELOT_TAG_PREFIX_NONE);
+
+ for_each_available_child_of_node(ports, portnp) {
+ struct ocelot_port_private *priv;
+ struct ocelot_port *ocelot_port;
+ struct device_node *phy_node;
+ phy_interface_t phy_mode;
+ struct phy_device *phy;
+ struct regmap *target;
+ struct resource *res;
+ struct phy *serdes;
+ char res_name[8];
+ u32 port;
+
+ if (of_property_read_u32(portnp, "reg", &port))
+ continue;
+
+ snprintf(res_name, sizeof(res_name), "port%d", port);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ res_name);
+ target = ocelot_regmap_init(ocelot, res);
+ if (IS_ERR(target))
+ continue;
+
+ phy_node = of_parse_phandle(portnp, "phy-handle", 0);
+ if (!phy_node)
+ continue;
+
+ phy = of_phy_find_device(phy_node);
+ of_node_put(phy_node);
+ if (!phy)
+ continue;
+
+ err = ocelot_probe_port(ocelot, port, target, phy);
+ if (err) {
+ of_node_put(portnp);
+ return err;
+ }
+
+ ocelot_port = ocelot->ports[port];
+ priv = container_of(ocelot_port, struct ocelot_port_private,
+ port);
+
+ of_get_phy_mode(portnp, &phy_mode);
+
+ ocelot_port->phy_mode = phy_mode;
+
+ switch (ocelot_port->phy_mode) {
+ case PHY_INTERFACE_MODE_NA:
+ continue;
+ case PHY_INTERFACE_MODE_SGMII:
+ break;
+ case PHY_INTERFACE_MODE_QSGMII:
+ /* Ensure clock signals and speed is set on all
+ * QSGMII links
+ */
+ ocelot_port_writel(ocelot_port,
+ DEV_CLOCK_CFG_LINK_SPEED
+ (OCELOT_SPEED_1000),
+ DEV_CLOCK_CFG);
+ break;
+ default:
+ dev_err(ocelot->dev,
+ "invalid phy mode for port%d, (Q)SGMII only\n",
+ port);
+ of_node_put(portnp);
+ return -EINVAL;
+ }
+
+ serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
+ if (IS_ERR(serdes)) {
+ err = PTR_ERR(serdes);
+ if (err == -EPROBE_DEFER)
+ dev_dbg(ocelot->dev, "deferring probe\n");
+ else
+ dev_err(ocelot->dev,
+ "missing SerDes phys for port%d\n",
+ port);
+
+ of_node_put(portnp);
+ return err;
+ }
+
+ priv->serdes = serdes;
+ }
+
+ return 0;
+}
+
static int mscc_ocelot_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct device_node *ports, *portnp;
int err, irq_xtr, irq_ptp_rdy;
+ struct device_node *ports;
struct ocelot *ocelot;
struct regmap *hsio;
unsigned int i;
@@ -985,20 +1111,24 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ports = of_get_child_by_name(np, "ethernet-ports");
if (!ports) {
- dev_err(&pdev->dev, "no ethernet-ports child node found\n");
+ dev_err(ocelot->dev, "no ethernet-ports child node found\n");
return -ENODEV;
}
ocelot->num_phys_ports = of_get_child_count(ports);
- ocelot->ports = devm_kcalloc(&pdev->dev, ocelot->num_phys_ports,
- sizeof(struct ocelot_port *), GFP_KERNEL);
-
ocelot->vcap_is2_keys = vsc7514_vcap_is2_keys;
ocelot->vcap_is2_actions = vsc7514_vcap_is2_actions;
ocelot->vcap = vsc7514_vcap_props;
- ocelot_init(ocelot);
+ err = ocelot_init(ocelot);
+ if (err)
+ goto out_put_ports;
+
+ err = mscc_ocelot_init_ports(pdev, ports);
+ if (err)
+ goto out_put_ports;
+
if (ocelot->ptp) {
err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info);
if (err) {
@@ -1008,96 +1138,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
}
}
- /* No NPI port */
- ocelot_configure_cpu(ocelot, -1, OCELOT_TAG_PREFIX_NONE,
- OCELOT_TAG_PREFIX_NONE);
-
- for_each_available_child_of_node(ports, portnp) {
- struct ocelot_port_private *priv;
- struct ocelot_port *ocelot_port;
- struct device_node *phy_node;
- phy_interface_t phy_mode;
- struct phy_device *phy;
- struct regmap *target;
- struct resource *res;
- struct phy *serdes;
- char res_name[8];
- u32 port;
-
- if (of_property_read_u32(portnp, "reg", &port))
- continue;
-
- snprintf(res_name, sizeof(res_name), "port%d", port);
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- res_name);
- target = ocelot_regmap_init(ocelot, res);
- if (IS_ERR(target))
- continue;
-
- phy_node = of_parse_phandle(portnp, "phy-handle", 0);
- if (!phy_node)
- continue;
-
- phy = of_phy_find_device(phy_node);
- of_node_put(phy_node);
- if (!phy)
- continue;
-
- err = ocelot_probe_port(ocelot, port, target, phy);
- if (err) {
- of_node_put(portnp);
- goto out_put_ports;
- }
-
- ocelot_port = ocelot->ports[port];
- priv = container_of(ocelot_port, struct ocelot_port_private,
- port);
-
- of_get_phy_mode(portnp, &phy_mode);
-
- ocelot_port->phy_mode = phy_mode;
-
- switch (ocelot_port->phy_mode) {
- case PHY_INTERFACE_MODE_NA:
- continue;
- case PHY_INTERFACE_MODE_SGMII:
- break;
- case PHY_INTERFACE_MODE_QSGMII:
- /* Ensure clock signals and speed is set on all
- * QSGMII links
- */
- ocelot_port_writel(ocelot_port,
- DEV_CLOCK_CFG_LINK_SPEED
- (OCELOT_SPEED_1000),
- DEV_CLOCK_CFG);
- break;
- default:
- dev_err(ocelot->dev,
- "invalid phy mode for port%d, (Q)SGMII only\n",
- port);
- of_node_put(portnp);
- err = -EINVAL;
- goto out_put_ports;
- }
-
- serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
- if (IS_ERR(serdes)) {
- err = PTR_ERR(serdes);
- if (err == -EPROBE_DEFER)
- dev_dbg(ocelot->dev, "deferring probe\n");
- else
- dev_err(ocelot->dev,
- "missing SerDes phys for port%d\n",
- port);
-
- of_node_put(portnp);
- goto out_put_ports;
- }
-
- priv->serdes = serdes;
- }
-
register_netdevice_notifier(&ocelot_netdevice_nb);
register_switchdev_notifier(&ocelot_switchdev_nb);
register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
@@ -1114,6 +1154,7 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
struct ocelot *ocelot = platform_get_drvdata(pdev);
ocelot_deinit_timestamp(ocelot);
+ mscc_ocelot_release_ports(ocelot);
ocelot_deinit(ocelot);
unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
unregister_switchdev_notifier(&ocelot_switchdev_nb);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 3de8430ee8c5..b81e1487945c 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -1916,9 +1916,9 @@ static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue)
static int alloc_ring(struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
- np->rx_ring = pci_alloc_consistent(np->pci_dev,
- sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
- &np->ring_dma);
+ np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev,
+ sizeof(struct netdev_desc) * (RX_RING_SIZE + TX_RING_SIZE),
+ &np->ring_dma, GFP_KERNEL);
if (!np->rx_ring)
return -ENOMEM;
np->tx_ring = &np->rx_ring[RX_RING_SIZE];
@@ -1939,10 +1939,10 @@ static void refill_rx(struct net_device *dev)
np->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
- np->rx_dma[entry] = pci_map_single(np->pci_dev,
- skb->data, buflen, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(np->pci_dev,
- np->rx_dma[entry])) {
+ np->rx_dma[entry] = dma_map_single(&np->pci_dev->dev,
+ skb->data, buflen,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, np->rx_dma[entry])) {
dev_kfree_skb_any(skb);
np->rx_skbuff[entry] = NULL;
break; /* Better luck next round. */
@@ -2013,9 +2013,8 @@ static void drain_tx(struct net_device *dev)
for (i = 0; i < TX_RING_SIZE; i++) {
if (np->tx_skbuff[i]) {
- pci_unmap_single(np->pci_dev,
- np->tx_dma[i], np->tx_skbuff[i]->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->tx_dma[i],
+ np->tx_skbuff[i]->len, DMA_TO_DEVICE);
dev_kfree_skb(np->tx_skbuff[i]);
dev->stats.tx_dropped++;
}
@@ -2034,9 +2033,9 @@ static void drain_rx(struct net_device *dev)
np->rx_ring[i].cmd_status = 0;
np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
if (np->rx_skbuff[i]) {
- pci_unmap_single(np->pci_dev, np->rx_dma[i],
- buflen + NATSEMI_PADDING,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->rx_dma[i],
+ buflen + NATSEMI_PADDING,
+ DMA_FROM_DEVICE);
dev_kfree_skb(np->rx_skbuff[i]);
}
np->rx_skbuff[i] = NULL;
@@ -2052,9 +2051,9 @@ static void drain_ring(struct net_device *dev)
static void free_ring(struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
- pci_free_consistent(np->pci_dev,
- sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
- np->rx_ring, np->ring_dma);
+ dma_free_coherent(&np->pci_dev->dev,
+ sizeof(struct netdev_desc) * (RX_RING_SIZE + TX_RING_SIZE),
+ np->rx_ring, np->ring_dma);
}
static void reinit_rx(struct net_device *dev)
@@ -2101,9 +2100,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
entry = np->cur_tx % TX_RING_SIZE;
np->tx_skbuff[entry] = skb;
- np->tx_dma[entry] = pci_map_single(np->pci_dev,
- skb->data,skb->len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(np->pci_dev, np->tx_dma[entry])) {
+ np->tx_dma[entry] = dma_map_single(&np->pci_dev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, np->tx_dma[entry])) {
np->tx_skbuff[entry] = NULL;
dev_kfree_skb_irq(skb);
dev->stats.tx_dropped++;
@@ -2169,9 +2168,8 @@ static void netdev_tx_done(struct net_device *dev)
dev->stats.tx_window_errors++;
dev->stats.tx_errors++;
}
- pci_unmap_single(np->pci_dev,np->tx_dma[entry],
- np->tx_skbuff[entry]->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->tx_dma[entry],
+ np->tx_skbuff[entry]->len, DMA_TO_DEVICE);
/* Free the original skb. */
dev_consume_skb_irq(np->tx_skbuff[entry]);
np->tx_skbuff[entry] = NULL;
@@ -2359,21 +2357,22 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
(skb = netdev_alloc_skb(dev, pkt_len + RX_OFFSET)) != NULL) {
/* 16 byte align the IP header */
skb_reserve(skb, RX_OFFSET);
- pci_dma_sync_single_for_cpu(np->pci_dev,
- np->rx_dma[entry],
- buflen,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&np->pci_dev->dev,
+ np->rx_dma[entry],
+ buflen,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb,
np->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
- pci_dma_sync_single_for_device(np->pci_dev,
- np->rx_dma[entry],
- buflen,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&np->pci_dev->dev,
+ np->rx_dma[entry],
+ buflen,
+ DMA_FROM_DEVICE);
} else {
- pci_unmap_single(np->pci_dev, np->rx_dma[entry],
+ dma_unmap_single(&np->pci_dev->dev,
+ np->rx_dma[entry],
buflen + NATSEMI_PADDING,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
skb_put(skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
}
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 8e24c7acf79b..4c1f53339019 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -526,8 +526,8 @@ static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC;
cmdsts = REAL_RX_BUF_SIZE | CMDSTS_INTR;
- buf = pci_map_single(dev->pci_dev, skb->data,
- REAL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ buf = dma_map_single(&dev->pci_dev->dev, skb->data, REAL_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
build_rx_desc(dev, sg, 0, buf, cmdsts, 0);
/* update link of previous rx */
if (likely(next_empty != dev->rx_info.next_rx))
@@ -858,8 +858,8 @@ static void rx_irq(struct net_device *ndev)
mb();
clear_rx_desc(dev, next_rx);
- pci_unmap_single(dev->pci_dev, bufptr,
- RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&dev->pci_dev->dev, bufptr, RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
len = cmdsts & CMDSTS_LEN_MASK;
#ifdef NS83820_VLAN_ACCEL_SUPPORT
/* NH: As was mentioned below, this chip is kinda
@@ -923,10 +923,10 @@ out:
spin_unlock_irqrestore(&info->lock, flags);
}
-static void rx_action(unsigned long _dev)
+static void rx_action(struct tasklet_struct *t)
{
- struct net_device *ndev = (void *)_dev;
- struct ns83820 *dev = PRIV(ndev);
+ struct ns83820 *dev = from_tasklet(dev, t, rx_tasklet);
+ struct net_device *ndev = dev->ndev;
rx_irq(ndev);
writel(ihr, dev->base + IHR);
@@ -985,17 +985,13 @@ static void do_tx_done(struct net_device *ndev)
len = cmdsts & CMDSTS_LEN_MASK;
addr = desc_addr_get(desc + DESC_BUFPTR);
if (skb) {
- pci_unmap_single(dev->pci_dev,
- addr,
- len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&dev->pci_dev->dev, addr, len,
+ DMA_TO_DEVICE);
dev_consume_skb_irq(skb);
atomic_dec(&dev->nr_tx_skbs);
} else
- pci_unmap_page(dev->pci_dev,
- addr,
- len,
- PCI_DMA_TODEVICE);
+ dma_unmap_page(&dev->pci_dev->dev, addr, len,
+ DMA_TO_DEVICE);
tx_done_idx = (tx_done_idx + 1) % NR_TX_DESC;
dev->tx_done_idx = tx_done_idx;
@@ -1023,10 +1019,10 @@ static void ns83820_cleanup_tx(struct ns83820 *dev)
dev->tx_skbs[i] = NULL;
if (skb) {
__le32 *desc = dev->tx_descs + (i * DESC_SIZE);
- pci_unmap_single(dev->pci_dev,
- desc_addr_get(desc + DESC_BUFPTR),
- le32_to_cpu(desc[DESC_CMDSTS]) & CMDSTS_LEN_MASK,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&dev->pci_dev->dev,
+ desc_addr_get(desc + DESC_BUFPTR),
+ le32_to_cpu(desc[DESC_CMDSTS]) & CMDSTS_LEN_MASK,
+ DMA_TO_DEVICE);
dev_kfree_skb_irq(skb);
atomic_dec(&dev->nr_tx_skbs);
}
@@ -1121,7 +1117,8 @@ again:
len = skb->len;
if (nr_frags)
len -= skb->data_len;
- buf = pci_map_single(dev->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
+ buf = dma_map_single(&dev->pci_dev->dev, skb->data, len,
+ DMA_TO_DEVICE);
first_desc = dev->tx_descs + (free_idx * DESC_SIZE);
@@ -1207,7 +1204,7 @@ static int ns83820_get_link_ksettings(struct net_device *ndev,
struct ethtool_link_ksettings *cmd)
{
struct ns83820 *dev = PRIV(ndev);
- u32 cfg, tanar, tbicr;
+ u32 cfg, tbicr;
int fullduplex = 0;
u32 supported;
@@ -1226,7 +1223,7 @@ static int ns83820_get_link_ksettings(struct net_device *ndev,
/* read current configuration */
cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY;
- tanar = readl(dev->base + TANAR);
+ readl(dev->base + TANAR);
tbicr = readl(dev->base + TBICR);
fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0;
@@ -1902,12 +1899,12 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
/* See if we can set the dma mask early on; failure is fatal. */
if (sizeof(dma_addr_t) == 8 &&
- !pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
+ !dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64))) {
using_dac = 1;
- } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
+ } else if (!dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) {
using_dac = 0;
} else {
- dev_warn(&pci_dev->dev, "pci_set_dma_mask failed!\n");
+ dev_warn(&pci_dev->dev, "dma_set_mask failed!\n");
return -ENODEV;
}
@@ -1927,7 +1924,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
SET_NETDEV_DEV(ndev, &pci_dev->dev);
INIT_WORK(&dev->tq_refill, queue_refill);
- tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev);
+ tasklet_setup(&dev->rx_tasklet, rx_action);
err = pci_enable_device(pci_dev);
if (err) {
@@ -1938,10 +1935,12 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
pci_set_master(pci_dev);
addr = pci_resource_start(pci_dev, 1);
dev->base = ioremap(addr, PAGE_SIZE);
- dev->tx_descs = pci_alloc_consistent(pci_dev,
- 4 * DESC_SIZE * NR_TX_DESC, &dev->tx_phy_descs);
- dev->rx_info.descs = pci_alloc_consistent(pci_dev,
- 4 * DESC_SIZE * NR_RX_DESC, &dev->rx_info.phy_descs);
+ dev->tx_descs = dma_alloc_coherent(&pci_dev->dev,
+ 4 * DESC_SIZE * NR_TX_DESC,
+ &dev->tx_phy_descs, GFP_KERNEL);
+ dev->rx_info.descs = dma_alloc_coherent(&pci_dev->dev,
+ 4 * DESC_SIZE * NR_RX_DESC,
+ &dev->rx_info.phy_descs, GFP_KERNEL);
err = -ENOMEM;
if (!dev->base || !dev->tx_descs || !dev->rx_info.descs)
goto out_disable;
@@ -2183,8 +2182,10 @@ out_free_irq:
out_disable:
if (dev->base)
iounmap(dev->base);
- pci_free_consistent(pci_dev, 4 * DESC_SIZE * NR_TX_DESC, dev->tx_descs, dev->tx_phy_descs);
- pci_free_consistent(pci_dev, 4 * DESC_SIZE * NR_RX_DESC, dev->rx_info.descs, dev->rx_info.phy_descs);
+ dma_free_coherent(&pci_dev->dev, 4 * DESC_SIZE * NR_TX_DESC,
+ dev->tx_descs, dev->tx_phy_descs);
+ dma_free_coherent(&pci_dev->dev, 4 * DESC_SIZE * NR_RX_DESC,
+ dev->rx_info.descs, dev->rx_info.phy_descs);
pci_disable_device(pci_dev);
out_free:
free_netdev(ndev);
@@ -2205,10 +2206,10 @@ static void ns83820_remove_one(struct pci_dev *pci_dev)
unregister_netdev(ndev);
free_irq(dev->pci_dev->irq, ndev);
iounmap(dev->base);
- pci_free_consistent(dev->pci_dev, 4 * DESC_SIZE * NR_TX_DESC,
- dev->tx_descs, dev->tx_phy_descs);
- pci_free_consistent(dev->pci_dev, 4 * DESC_SIZE * NR_RX_DESC,
- dev->rx_info.descs, dev->rx_info.phy_descs);
+ dma_free_coherent(&dev->pci_dev->dev, 4 * DESC_SIZE * NR_TX_DESC,
+ dev->tx_descs, dev->tx_phy_descs);
+ dma_free_coherent(&dev->pci_dev->dev, 4 * DESC_SIZE * NR_RX_DESC,
+ dev->rx_info.descs, dev->rx_info.phy_descs);
pci_disable_device(dev->pci_dev);
free_netdev(ndev);
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index ac02369174a9..53851853562c 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -111,7 +111,9 @@ static int
nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
struct bpf_prog *prog)
{
- int i, cnt, err;
+ int i, cnt, err = 0;
+
+ mutex_lock(&prog->aux->used_maps_mutex);
/* Quickly count the maps we will have to remember */
cnt = 0;
@@ -119,13 +121,15 @@ nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
if (bpf_map_offload_neutral(prog->aux->used_maps[i]))
cnt++;
if (!cnt)
- return 0;
+ goto out;
nfp_prog->map_records = kmalloc_array(cnt,
sizeof(nfp_prog->map_records[0]),
GFP_KERNEL);
- if (!nfp_prog->map_records)
- return -ENOMEM;
+ if (!nfp_prog->map_records) {
+ err = -ENOMEM;
+ goto out;
+ }
for (i = 0; i < prog->aux->used_map_cnt; i++)
if (bpf_map_offload_neutral(prog->aux->used_maps[i])) {
@@ -133,12 +137,14 @@ nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
prog->aux->used_maps[i]);
if (err) {
nfp_map_ptrs_forget(bpf, nfp_prog);
- return err;
+ goto out;
}
}
WARN_ON(cnt != nfp_prog->map_records_cnt);
- return 0;
+out:
+ mutex_unlock(&prog->aux->used_maps_mutex);
+ return err;
}
static int
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 21ea22694e47..b150da43adb2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2287,9 +2287,9 @@ static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
return budget;
}
-static void nfp_ctrl_poll(unsigned long arg)
+static void nfp_ctrl_poll(struct tasklet_struct *t)
{
- struct nfp_net_r_vector *r_vec = (void *)arg;
+ struct nfp_net_r_vector *r_vec = from_tasklet(r_vec, t, tasklet);
spin_lock(&r_vec->lock);
nfp_net_tx_complete(r_vec->tx_ring, 0);
@@ -2337,8 +2337,7 @@ static void nfp_net_vecs_init(struct nfp_net *nn)
__skb_queue_head_init(&r_vec->queue);
spin_lock_init(&r_vec->lock);
- tasklet_init(&r_vec->tasklet, nfp_ctrl_poll,
- (unsigned long)r_vec);
+ tasklet_setup(&r_vec->tasklet, nfp_ctrl_poll);
tasklet_disable(&r_vec->tasklet);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 6eb9fb9a1814..9c9ae33d84ce 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -829,8 +829,8 @@ nfp_port_get_fecparam(struct net_device *netdev,
struct nfp_eth_table_port *eth_port;
struct nfp_port *port;
- param->active_fec = ETHTOOL_FEC_NONE_BIT;
- param->fec = ETHTOOL_FEC_NONE_BIT;
+ param->active_fec = ETHTOOL_FEC_NONE;
+ param->fec = ETHTOOL_FEC_NONE;
port = nfp_port_from_netdev(netdev);
eth_port = nfp_port_get_eth_port(port);
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 4075f5e59955..a6861df9904f 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -787,9 +787,9 @@ out:
return IRQ_HANDLED;
}
-static void nixge_dma_err_handler(unsigned long data)
+static void nixge_dma_err_handler(struct tasklet_struct *t)
{
- struct nixge_priv *lp = (struct nixge_priv *)data;
+ struct nixge_priv *lp = from_tasklet(lp, t, dma_err_tasklet);
struct nixge_hw_dma_bd *cur_p;
struct nixge_tx_skb *tx_skb;
u32 cr, i;
@@ -879,8 +879,7 @@ static int nixge_open(struct net_device *ndev)
phy_start(phy);
/* Enable tasklets for Axi DMA error handling */
- tasklet_init(&priv->dma_err_tasklet, nixge_dma_err_handler,
- (unsigned long)priv);
+ tasklet_setup(&priv->dma_err_tasklet, nixge_dma_err_handler);
napi_enable(&priv->napi);
diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig
index 76f8cc502bf9..5f8b0bb3af6e 100644
--- a/drivers/net/ethernet/pensando/Kconfig
+++ b/drivers/net/ethernet/pensando/Kconfig
@@ -21,6 +21,7 @@ config IONIC
tristate "Pensando Ethernet IONIC Support"
depends on 64BIT && PCI
select NET_DEVLINK
+ select DIMLIB
help
This enables the support for the Pensando family of Ethernet
adapters. More specific information on this driver can be
diff --git a/drivers/net/ethernet/pensando/ionic/Makefile b/drivers/net/ethernet/pensando/ionic/Makefile
index 29f304d75261..8d3c2d3cb10d 100644
--- a/drivers/net/ethernet/pensando/ionic/Makefile
+++ b/drivers/net/ethernet/pensando/ionic/Makefile
@@ -5,4 +5,4 @@ obj-$(CONFIG_IONIC) := ionic.o
ionic-y := ionic_main.o ionic_bus_pci.o ionic_devlink.o ionic_dev.o \
ionic_debugfs.o ionic_lif.o ionic_rx_filter.o ionic_ethtool.o \
- ionic_txrx.o ionic_stats.o
+ ionic_txrx.o ionic_stats.o ionic_fw.o
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
index 683bbbf75115..39f59849720d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -189,6 +189,8 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
&intr->index);
debugfs_create_u32("vector", 0400, intr_dentry,
&intr->vector);
+ debugfs_create_u32("dim_coal_hw", 0400, intr_dentry,
+ &intr->dim_coal_hw);
intr_ctrl_regset = devm_kzalloc(dev, sizeof(*intr_ctrl_regset),
GFP_KERNEL);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 4a35174e3ff1..8842dc4a716f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -237,6 +237,7 @@ struct ionic_intr_info {
u64 rearm_count;
unsigned int cpu;
cpumask_t affinity_mask;
+ u32 dim_coal_hw;
};
struct ionic_cq {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
index 8d9fb2e19cca..5348f05ebc32 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
@@ -9,6 +9,19 @@
#include "ionic_lif.h"
#include "ionic_devlink.h"
+static int ionic_dl_flash_update(struct devlink *dl,
+ const char *fwname,
+ const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct ionic *ionic = devlink_priv(dl);
+
+ if (component)
+ return -EOPNOTSUPP;
+
+ return ionic_firmware_update(ionic->lif, fwname, extack);
+}
+
static int ionic_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
@@ -48,6 +61,7 @@ static int ionic_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
static const struct devlink_ops ionic_dl_ops = {
.info_get = ionic_dl_info_get,
+ .flash_update = ionic_dl_flash_update,
};
struct ionic *ionic_devlink_alloc(struct device *dev)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.h b/drivers/net/ethernet/pensando/ionic/ionic_devlink.h
index 0690172fc57a..5c01a9e306d8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.h
@@ -6,6 +6,9 @@
#include <net/devlink.h>
+int ionic_firmware_update(struct ionic_lif *lif, const char *fw_name,
+ struct netlink_ext_ack *extack);
+
struct ionic *ionic_devlink_alloc(struct device *dev);
void ionic_devlink_free(struct ionic *ionic);
int ionic_devlink_register(struct ionic *ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 0d14659fbdfd..ed9808fc743b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -406,6 +406,13 @@ static int ionic_get_coalesce(struct net_device *netdev,
coalesce->tx_coalesce_usecs = lif->tx_coalesce_usecs;
coalesce->rx_coalesce_usecs = lif->rx_coalesce_usecs;
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ coalesce->use_adaptive_tx_coalesce = test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
+ else
+ coalesce->use_adaptive_tx_coalesce = 0;
+
+ coalesce->use_adaptive_rx_coalesce = test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
+
return 0;
}
@@ -414,10 +421,9 @@ static int ionic_set_coalesce(struct net_device *netdev,
{
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic_identity *ident;
- struct ionic_qcq *qcq;
+ u32 rx_coal, rx_dim;
+ u32 tx_coal, tx_dim;
unsigned int i;
- u32 rx_coal;
- u32 tx_coal;
ident = &lif->ionic->ident;
if (ident->dev.intr_coal_div == 0) {
@@ -426,10 +432,11 @@ static int ionic_set_coalesce(struct net_device *netdev,
return -EIO;
}
- /* Tx normally shares Rx interrupt, so only change Rx */
+ /* Tx normally shares Rx interrupt, so only change Rx if not split */
if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) &&
- coalesce->tx_coalesce_usecs != lif->rx_coalesce_usecs) {
- netdev_warn(netdev, "only the rx-usecs can be changed\n");
+ (coalesce->tx_coalesce_usecs != lif->rx_coalesce_usecs ||
+ coalesce->use_adaptive_tx_coalesce)) {
+ netdev_warn(netdev, "only rx parameters can be changed\n");
return -EINVAL;
}
@@ -449,32 +456,44 @@ static int ionic_set_coalesce(struct net_device *netdev,
/* Save the new values */
lif->rx_coalesce_usecs = coalesce->rx_coalesce_usecs;
- if (rx_coal != lif->rx_coalesce_hw) {
- lif->rx_coalesce_hw = rx_coal;
-
- if (test_bit(IONIC_LIF_F_UP, lif->state)) {
- for (i = 0; i < lif->nxqs; i++) {
- qcq = lif->rxqcqs[i];
- ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- qcq->intr.index,
- lif->rx_coalesce_hw);
- }
- }
- }
+ lif->rx_coalesce_hw = rx_coal;
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
lif->tx_coalesce_usecs = coalesce->tx_coalesce_usecs;
else
lif->tx_coalesce_usecs = coalesce->rx_coalesce_usecs;
- if (tx_coal != lif->tx_coalesce_hw) {
- lif->tx_coalesce_hw = tx_coal;
+ lif->tx_coalesce_hw = tx_coal;
+
+ if (coalesce->use_adaptive_rx_coalesce) {
+ set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
+ rx_dim = rx_coal;
+ } else {
+ clear_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
+ rx_dim = 0;
+ }
+
+ if (coalesce->use_adaptive_tx_coalesce) {
+ set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
+ tx_dim = tx_coal;
+ } else {
+ clear_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
+ tx_dim = 0;
+ }
+
+ if (test_bit(IONIC_LIF_F_UP, lif->state)) {
+ for (i = 0; i < lif->nxqs; i++) {
+ if (lif->rxqcqs[i]->flags & IONIC_QCQ_F_INTR) {
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+ lif->rxqcqs[i]->intr.index,
+ lif->rx_coalesce_hw);
+ lif->rxqcqs[i]->intr.dim_coal_hw = rx_dim;
+ }
- if (test_bit(IONIC_LIF_F_UP, lif->state)) {
- for (i = 0; i < lif->nxqs; i++) {
- qcq = lif->txqcqs[i];
+ if (lif->txqcqs[i]->flags & IONIC_QCQ_F_INTR) {
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- qcq->intr.index,
+ lif->txqcqs[i]->intr.index,
lif->tx_coalesce_hw);
+ lif->txqcqs[i]->intr.dim_coal_hw = tx_dim;
}
}
}
@@ -850,7 +869,9 @@ static int ionic_nway_reset(struct net_device *netdev)
}
static const struct ethtool_ops ionic_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_TX,
.get_drvinfo = ionic_get_drvinfo,
.get_regs_len = ionic_get_regs_len,
.get_regs = ionic_get_regs,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_fw.c b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
new file mode 100644
index 000000000000..f492ae406a60
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2020 Pensando Systems, Inc */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+
+#include "ionic.h"
+#include "ionic_dev.h"
+#include "ionic_lif.h"
+#include "ionic_devlink.h"
+
+/* The worst case wait for the install activity is about 25 minutes when
+ * installing a new CPLD, which is very seldom. Normal is about 30-35
+ * seconds. Since the driver can't tell if a CPLD update will happen we
+ * set the timeout for the ugly case.
+ */
+#define IONIC_FW_INSTALL_TIMEOUT (25 * 60)
+#define IONIC_FW_SELECT_TIMEOUT 30
+
+/* Number of periodic log updates during fw file download */
+#define IONIC_FW_INTERVAL_FRACTION 32
+
+static void ionic_dev_cmd_firmware_download(struct ionic_dev *idev, u64 addr,
+ u32 offset, u32 length)
+{
+ union ionic_dev_cmd cmd = {
+ .fw_download.opcode = IONIC_CMD_FW_DOWNLOAD,
+ .fw_download.offset = offset,
+ .fw_download.addr = addr,
+ .fw_download.length = length
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+static void ionic_dev_cmd_firmware_install(struct ionic_dev *idev)
+{
+ union ionic_dev_cmd cmd = {
+ .fw_control.opcode = IONIC_CMD_FW_CONTROL,
+ .fw_control.oper = IONIC_FW_INSTALL_ASYNC
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+static void ionic_dev_cmd_firmware_activate(struct ionic_dev *idev, u8 slot)
+{
+ union ionic_dev_cmd cmd = {
+ .fw_control.opcode = IONIC_CMD_FW_CONTROL,
+ .fw_control.oper = IONIC_FW_ACTIVATE_ASYNC,
+ .fw_control.slot = slot
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+static int ionic_fw_status_long_wait(struct ionic *ionic,
+ const char *label,
+ unsigned long timeout,
+ u8 fw_cmd,
+ struct netlink_ext_ack *extack)
+{
+ union ionic_dev_cmd cmd = {
+ .fw_control.opcode = IONIC_CMD_FW_CONTROL,
+ .fw_control.oper = fw_cmd,
+ };
+ unsigned long start_time;
+ unsigned long end_time;
+ int err;
+
+ start_time = jiffies;
+ end_time = start_time + (timeout * HZ);
+ do {
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_go(&ionic->idev, &cmd);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ msleep(20);
+ } while (time_before(jiffies, end_time) && (err == -EAGAIN || err == -ETIMEDOUT));
+
+ if (err == -EAGAIN || err == -ETIMEDOUT) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware wait timed out");
+ dev_err(ionic->dev, "DEV_CMD firmware wait %s timed out\n", label);
+ } else if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware wait failed");
+ }
+
+ return err;
+}
+
+int ionic_firmware_update(struct ionic_lif *lif, const char *fw_name,
+ struct netlink_ext_ack *extack)
+{
+ struct ionic_dev *idev = &lif->ionic->idev;
+ struct net_device *netdev = lif->netdev;
+ struct ionic *ionic = lif->ionic;
+ union ionic_dev_cmd_comp comp;
+ u32 buf_sz, copy_sz, offset;
+ const struct firmware *fw;
+ struct devlink *dl;
+ int next_interval;
+ int err = 0;
+ u8 fw_slot;
+
+ netdev_info(netdev, "Installing firmware %s\n", fw_name);
+
+ dl = priv_to_devlink(ionic);
+ devlink_flash_update_begin_notify(dl);
+ devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
+
+ err = request_firmware(&fw, fw_name, ionic->dev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to find firmware file");
+ goto err_out;
+ }
+
+ buf_sz = sizeof(idev->dev_cmd_regs->data);
+
+ netdev_dbg(netdev,
+ "downloading firmware - size %d part_sz %d nparts %lu\n",
+ (int)fw->size, buf_sz, DIV_ROUND_UP(fw->size, buf_sz));
+
+ offset = 0;
+ next_interval = 0;
+ while (offset < fw->size) {
+ if (offset >= next_interval) {
+ devlink_flash_update_status_notify(dl, "Downloading", NULL,
+ offset, fw->size);
+ next_interval = offset + (fw->size / IONIC_FW_INTERVAL_FRACTION);
+ }
+
+ copy_sz = min_t(unsigned int, buf_sz, fw->size - offset);
+ mutex_lock(&ionic->dev_cmd_lock);
+ memcpy_toio(&idev->dev_cmd_regs->data, fw->data + offset, copy_sz);
+ ionic_dev_cmd_firmware_download(idev,
+ offsetof(union ionic_dev_cmd_regs, data),
+ offset, copy_sz);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+ if (err) {
+ netdev_err(netdev,
+ "download failed offset 0x%x addr 0x%lx len 0x%x\n",
+ offset, offsetof(union ionic_dev_cmd_regs, data),
+ copy_sz);
+ NL_SET_ERR_MSG_MOD(extack, "Segment download failed");
+ goto err_out;
+ }
+ offset += copy_sz;
+ }
+ devlink_flash_update_status_notify(dl, "Downloading", NULL,
+ fw->size, fw->size);
+
+ devlink_flash_update_timeout_notify(dl, "Installing", NULL,
+ IONIC_FW_INSTALL_TIMEOUT);
+
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_firmware_install(idev);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
+ fw_slot = comp.fw_control.slot;
+ mutex_unlock(&ionic->dev_cmd_lock);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to start firmware install");
+ goto err_out;
+ }
+
+ err = ionic_fw_status_long_wait(ionic, "Installing",
+ IONIC_FW_INSTALL_TIMEOUT,
+ IONIC_FW_INSTALL_STATUS,
+ extack);
+ if (err)
+ goto err_out;
+
+ devlink_flash_update_timeout_notify(dl, "Selecting", NULL,
+ IONIC_FW_SELECT_TIMEOUT);
+
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_firmware_activate(idev, fw_slot);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to start firmware select");
+ goto err_out;
+ }
+
+ err = ionic_fw_status_long_wait(ionic, "Selecting",
+ IONIC_FW_SELECT_TIMEOUT,
+ IONIC_FW_ACTIVATE_STATUS,
+ extack);
+ if (err)
+ goto err_out;
+
+ netdev_info(netdev, "Firmware update completed\n");
+
+err_out:
+ if (err)
+ devlink_flash_update_status_notify(dl, "Flash failed", NULL, 0, 0);
+ else
+ devlink_flash_update_status_notify(dl, "Flash done", NULL, 0, 0);
+ release_firmware(fw);
+ devlink_flash_update_end_notify(dl);
+ return err;
+}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index acc94b244cf3..5bb56a27a50d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -63,8 +63,10 @@ enum ionic_cmd_opcode {
IONIC_CMD_QOS_RESET = 245,
/* Firmware commands */
- IONIC_CMD_FW_DOWNLOAD = 254,
- IONIC_CMD_FW_CONTROL = 255,
+ IONIC_CMD_FW_DOWNLOAD = 252,
+ IONIC_CMD_FW_CONTROL = 253,
+ IONIC_CMD_FW_DOWNLOAD_V1 = 254,
+ IONIC_CMD_FW_CONTROL_V1 = 255,
};
/**
@@ -2069,14 +2071,23 @@ typedef struct ionic_admin_comp ionic_fw_download_comp;
/**
* enum ionic_fw_control_oper - FW control operations
- * @IONIC_FW_RESET: Reset firmware
- * @IONIC_FW_INSTALL: Install firmware
- * @IONIC_FW_ACTIVATE: Activate firmware
+ * @IONIC_FW_RESET: Reset firmware
+ * @IONIC_FW_INSTALL: Install firmware
+ * @IONIC_FW_ACTIVATE: Activate firmware
+ * @IONIC_FW_INSTALL_ASYNC: Install firmware asynchronously
+ * @IONIC_FW_INSTALL_STATUS: Firmware installation status
+ * @IONIC_FW_ACTIVATE_ASYNC: Activate firmware asynchronously
+ * @IONIC_FW_ACTIVATE_STATUS: Firmware activate status
*/
enum ionic_fw_control_oper {
- IONIC_FW_RESET = 0,
- IONIC_FW_INSTALL = 1,
- IONIC_FW_ACTIVATE = 2,
+ IONIC_FW_RESET = 0,
+ IONIC_FW_INSTALL = 1,
+ IONIC_FW_ACTIVATE = 2,
+ IONIC_FW_INSTALL_ASYNC = 3,
+ IONIC_FW_INSTALL_STATUS = 4,
+ IONIC_FW_ACTIVATE_ASYNC = 5,
+ IONIC_FW_ACTIVATE_STATUS = 6,
+ IONIC_FW_UPDATE_CLEANUP = 7,
};
/**
@@ -2689,6 +2700,9 @@ union ionic_dev_cmd {
struct ionic_q_identify_cmd q_identify;
struct ionic_q_init_cmd q_init;
struct ionic_q_control_cmd q_control;
+
+ struct ionic_fw_download_cmd fw_download;
+ struct ionic_fw_control_cmd fw_control;
};
union ionic_dev_cmd_comp {
@@ -2722,6 +2736,9 @@ union ionic_dev_cmd_comp {
struct ionic_q_identify_comp q_identify;
struct ionic_q_init_comp q_init;
+
+ ionic_fw_download_comp fw_download;
+ struct ionic_fw_control_comp fw_control;
};
/**
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index ee683cb142a8..eb6fe37c0df6 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -42,6 +42,20 @@ static int ionic_start_queues(struct ionic_lif *lif);
static void ionic_stop_queues(struct ionic_lif *lif);
static void ionic_lif_queue_identify(struct ionic_lif *lif);
+static void ionic_dim_work(struct work_struct *work)
+{
+ struct dim *dim = container_of(work, struct dim, work);
+ struct dim_cq_moder cur_moder;
+ struct ionic_qcq *qcq;
+ u32 new_coal;
+
+ cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ qcq = container_of(dim, struct ionic_qcq, dim);
+ new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec);
+ qcq->intr.dim_coal_hw = new_coal ? new_coal : 1;
+ dim->state = DIM_START_MEASURE;
+}
+
static void ionic_lif_deferred_work(struct work_struct *work)
{
struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
@@ -270,6 +284,7 @@ static int ionic_qcq_disable(struct ionic_qcq *qcq)
ctx.cmd.q_control.index, ctx.cmd.q_control.type);
if (qcq->flags & IONIC_QCQ_F_INTR) {
+ cancel_work_sync(&qcq->dim.work);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_SET);
synchronize_irq(qcq->intr.vector);
@@ -542,6 +557,9 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
}
+ INIT_WORK(&new->dim.work, ionic_dim_work);
+ new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+
*qcq = new;
return 0;
@@ -834,7 +852,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
work_done = max(n_work, a_work);
if (work_done < budget && napi_complete_done(napi, work_done)) {
flags |= IONIC_INTR_CRED_UNMASK;
- DEBUG_STATS_INTR_REARM(intr);
+ lif->adminqcq->cq.bound_intr->rearm_count++;
}
if (work_done || flags) {
@@ -1639,10 +1657,13 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
if (err)
goto err_out;
- if (flags & IONIC_QCQ_F_INTR)
+ if (flags & IONIC_QCQ_F_INTR) {
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
lif->txqcqs[i]->intr.index,
lif->tx_coalesce_hw);
+ if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
+ lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
+ }
ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
}
@@ -1661,6 +1682,8 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
lif->rxqcqs[i]->intr.index,
lif->rx_coalesce_hw);
+ if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state))
+ lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw;
if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
ionic_link_qcq_interrupts(lif->rxqcqs[i],
@@ -2234,6 +2257,8 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
lif->txqcqs[i]->intr.index,
lif->tx_coalesce_hw);
+ if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
+ lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
} else {
lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]);
@@ -2241,6 +2266,21 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
}
}
+ /* now we can rework the debugfs mappings */
+ if (tx_qcqs) {
+ for (i = 0; i < qparam->nxqs; i++) {
+ ionic_debugfs_del_qcq(lif->txqcqs[i]);
+ ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
+ }
+ }
+
+ if (rx_qcqs) {
+ for (i = 0; i < qparam->nxqs; i++) {
+ ionic_debugfs_del_qcq(lif->rxqcqs[i]);
+ ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
+ }
+ }
+
swap(lif->nxqs, qparam->nxqs);
err_out_reinit_unlock:
@@ -2346,6 +2386,8 @@ int ionic_lif_alloc(struct ionic *ionic)
lif->rx_coalesce_usecs);
lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
lif->tx_coalesce_hw = lif->rx_coalesce_hw;
+ set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
+ set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 11ea9e0c6a4a..c65a5e6c26f4 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -4,6 +4,7 @@
#ifndef _IONIC_LIF_H_
#define _IONIC_LIF_H_
+#include <linux/dim.h>
#include <linux/pci.h>
#include "ionic_rx_filter.h"
@@ -66,6 +67,7 @@ struct ionic_qcq {
void *sg_base;
dma_addr_t sg_base_pa;
u32 sg_size;
+ struct dim dim;
struct ionic_queue q;
struct ionic_cq cq;
struct ionic_intr_info intr;
@@ -131,6 +133,8 @@ enum ionic_lif_state_flags {
IONIC_LIF_F_LINK_CHECK_REQUESTED,
IONIC_LIF_F_FW_RESET,
IONIC_LIF_F_SPLIT_INTR,
+ IONIC_LIF_F_TX_DIM_INTR,
+ IONIC_LIF_F_RX_DIM_INTR,
/* leave this as last */
IONIC_LIF_F_STATE_SIZE
@@ -288,7 +292,6 @@ static inline void debug_stats_napi_poll(struct ionic_qcq *qcq,
#define DEBUG_STATS_CQE_CNT(cq) ((cq)->compl_count++)
#define DEBUG_STATS_RX_BUFF_CNT(q) ((q)->lif->rxqstats[q->index].buffers_posted++)
-#define DEBUG_STATS_INTR_REARM(intr) ((intr)->rearm_count++)
#define DEBUG_STATS_TXQ_POST(q, dbell) debug_stats_txq_post(q, dbell)
#define DEBUG_STATS_NAPI_POLL(qcq, work_done) \
debug_stats_napi_poll(qcq, work_done)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index cfb90bf605fe..e339216949a6 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -170,6 +170,10 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
return "IONIC_CMD_FW_DOWNLOAD";
case IONIC_CMD_FW_CONTROL:
return "IONIC_CMD_FW_CONTROL";
+ case IONIC_CMD_FW_DOWNLOAD_V1:
+ return "IONIC_CMD_FW_DOWNLOAD_V1";
+ case IONIC_CMD_FW_CONTROL_V1:
+ return "IONIC_CMD_FW_CONTROL_V1";
case IONIC_CMD_VF_GETATTR:
return "IONIC_CMD_VF_GETATTR";
case IONIC_CMD_VF_SETATTR:
@@ -331,17 +335,22 @@ int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
*/
max_wait = jiffies + (max_seconds * HZ);
try_again:
+ opcode = idev->dev_cmd_regs->cmd.cmd.opcode;
start_time = jiffies;
do {
done = ionic_dev_cmd_done(idev);
if (done)
break;
- msleep(5);
- hb = ionic_heartbeat_check(ionic);
+ usleep_range(100, 200);
+
+ /* Don't check the heartbeat on FW_CONTROL commands as they are
+ * notorious for interrupting the firmware's heartbeat update.
+ */
+ if (opcode != IONIC_CMD_FW_CONTROL)
+ hb = ionic_heartbeat_check(ionic);
} while (!done && !hb && time_before(jiffies, max_wait));
duration = jiffies - start_time;
- opcode = idev->dev_cmd_regs->cmd.cmd.opcode;
dev_dbg(ionic->dev, "DEVCMD %s (%d) done=%d took %ld secs (%ld jiffies)\n",
ionic_opcode_to_str(opcode), opcode,
done, duration / HZ, duration);
@@ -365,8 +374,9 @@ try_again:
err = ionic_dev_cmd_status(&ionic->idev);
if (err) {
- if (err == IONIC_RC_EAGAIN && !time_after(jiffies, max_wait)) {
- dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) retrying...\n",
+ if (err == IONIC_RC_EAGAIN &&
+ time_before(jiffies, (max_wait - HZ))) {
+ dev_dbg(ionic->dev, "DEV_CMD %s (%d), %s (%d) retrying...\n",
ionic_opcode_to_str(opcode), opcode,
ionic_error_to_str(err), err);
@@ -376,9 +386,10 @@ try_again:
goto try_again;
}
- dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) failed\n",
- ionic_opcode_to_str(opcode), opcode,
- ionic_error_to_str(err), err);
+ if (!(opcode == IONIC_CMD_FW_CONTROL && err == IONIC_RC_EAGAIN))
+ dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) failed\n",
+ ionic_opcode_to_str(opcode), opcode,
+ ionic_error_to_str(err), err);
return ionic_error_to_errno(err);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 7225251c5563..169ac4f54640 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -432,6 +432,30 @@ void ionic_rx_empty(struct ionic_queue *q)
}
}
+static void ionic_dim_update(struct ionic_qcq *qcq)
+{
+ struct dim_sample dim_sample;
+ struct ionic_lif *lif;
+ unsigned int qi;
+
+ if (!qcq->intr.dim_coal_hw)
+ return;
+
+ lif = qcq->q.lif;
+ qi = qcq->cq.bound_q->index;
+
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+ lif->rxqcqs[qi]->intr.index,
+ qcq->intr.dim_coal_hw);
+
+ dim_update_sample(qcq->cq.bound_intr->rearm_count,
+ lif->txqstats[qi].pkts,
+ lif->txqstats[qi].bytes,
+ &dim_sample);
+
+ net_dim(&qcq->dim, dim_sample);
+}
+
int ionic_tx_napi(struct napi_struct *napi, int budget)
{
struct ionic_qcq *qcq = napi_to_qcq(napi);
@@ -448,8 +472,9 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
ionic_tx_service, NULL, NULL);
if (work_done < budget && napi_complete_done(napi, work_done)) {
+ ionic_dim_update(qcq);
flags |= IONIC_INTR_CRED_UNMASK;
- DEBUG_STATS_INTR_REARM(cq->bound_intr);
+ cq->bound_intr->rearm_count++;
}
if (work_done || flags) {
@@ -483,8 +508,9 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
ionic_rx_fill(cq->bound_q);
if (work_done < budget && napi_complete_done(napi, work_done)) {
+ ionic_dim_update(qcq);
flags |= IONIC_INTR_CRED_UNMASK;
- DEBUG_STATS_INTR_REARM(cq->bound_intr);
+ cq->bound_intr->rearm_count++;
}
if (work_done || flags) {
@@ -524,8 +550,9 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
ionic_rx_fill_cb(rxcq->bound_q);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
+ ionic_dim_update(qcq);
flags |= IONIC_INTR_CRED_UNMASK;
- DEBUG_STATS_INTR_REARM(rxcq->bound_intr);
+ rxcq->bound_intr->rearm_count++;
}
if (rx_work_done || flags) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index f34b25a79449..a20cb8a0c377 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -572,7 +572,7 @@ struct qed_hwfn {
struct qed_consq *p_consq;
/* Slow-Path definitions */
- struct tasklet_struct *sp_dpc;
+ struct tasklet_struct sp_dpc;
bool b_sp_dpc_enabled;
struct qed_ptt *p_main_ptt;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index f7f08e6a3acf..d2f5855b2ea7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -4254,7 +4254,8 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
BIT(QED_MF_LLH_PROTO_CLSS) |
BIT(QED_MF_LL2_NON_UNICAST) |
- BIT(QED_MF_INTER_PF_SWITCH);
+ BIT(QED_MF_INTER_PF_SWITCH) |
+ BIT(QED_MF_DISABLE_ARFS);
break;
case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
@@ -4267,6 +4268,14 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
cdev->mf_bits);
+
+ /* In CMT the PF is unknown when the GFS block processes the
+ * packet. Therefore cannot use searcher as it has a per PF
+ * database, and thus ARFS must be disabled.
+ *
+ */
+ if (QED_IS_CMT(cdev))
+ cdev->mf_bits |= BIT(QED_MF_DISABLE_ARFS);
}
DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index f8c5a864812d..578935f643b8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1216,9 +1216,9 @@ static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
barrier();
}
-void qed_int_sp_dpc(unsigned long hwfn_cookie)
+void qed_int_sp_dpc(struct tasklet_struct *t)
{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie;
+ struct qed_hwfn *p_hwfn = from_tasklet(p_hwfn, t, sp_dpc);
struct qed_pi_info *pi_info = NULL;
struct qed_sb_attn_info *sb_attn;
struct qed_sb_info *sb_info;
@@ -2285,34 +2285,14 @@ u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
{
- tasklet_init(p_hwfn->sp_dpc,
- qed_int_sp_dpc, (unsigned long)p_hwfn);
+ tasklet_setup(&p_hwfn->sp_dpc, qed_int_sp_dpc);
p_hwfn->b_sp_dpc_enabled = true;
}
-static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
-{
- p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_KERNEL);
- if (!p_hwfn->sp_dpc)
- return -ENOMEM;
-
- return 0;
-}
-
-static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
-{
- kfree(p_hwfn->sp_dpc);
- p_hwfn->sp_dpc = NULL;
-}
-
int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
int rc = 0;
- rc = qed_int_sp_dpc_alloc(p_hwfn);
- if (rc)
- return rc;
-
rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
if (rc)
return rc;
@@ -2326,7 +2306,6 @@ void qed_int_free(struct qed_hwfn *p_hwfn)
{
qed_int_sp_sb_free(p_hwfn);
qed_int_sb_attn_free(p_hwfn);
- qed_int_sp_dpc_free(p_hwfn);
}
void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 86809d7bc2de..c5550e96bbe1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -140,7 +140,7 @@ int qed_int_sb_release(struct qed_hwfn *p_hwfn,
* @param p_hwfn - pointer to hwfn
*
*/
-void qed_int_sp_dpc(unsigned long hwfn_cookie);
+void qed_int_sp_dpc(struct tasklet_struct *t);
/**
* @brief qed_int_get_num_sbs - get the number of status
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 4c6ac8862744..07824bf9d68d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -1980,6 +1980,9 @@ void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_arfs_config_params *p_cfg_params)
{
+ if (test_bit(QED_MF_DISABLE_ARFS, &p_hwfn->cdev->mf_bits))
+ return;
+
if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
p_cfg_params->tcp,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 5b149ceff6b6..5bd58c65e163 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -445,6 +445,8 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->fw_eng = FW_ENGINEERING_VERSION;
dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
&cdev->mf_bits);
+ if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits))
+ dev_info->b_arfs_capable = true;
dev_info->tx_switching = true;
if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
@@ -734,7 +736,7 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
/* Slowpath interrupt */
if (unlikely(status & 0x1)) {
- tasklet_schedule(hwfn->sp_dpc);
+ tasklet_schedule(&hwfn->sp_dpc);
status &= ~0x1;
rc = IRQ_HANDLED;
}
@@ -780,7 +782,7 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
id, cdev->pdev->bus->number,
PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
rc = request_irq(cdev->int_params.msix_table[id].vector,
- qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
+ qed_msix_sp_int, 0, hwfn->name, &hwfn->sp_dpc);
} else {
unsigned long flags = 0;
@@ -812,8 +814,8 @@ static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
* enable function makes this sequence a flush-like operation.
*/
if (p_hwfn->b_sp_dpc_enabled) {
- tasklet_disable(p_hwfn->sp_dpc);
- tasklet_enable(p_hwfn->sp_dpc);
+ tasklet_disable(&p_hwfn->sp_dpc);
+ tasklet_enable(&p_hwfn->sp_dpc);
}
}
@@ -842,7 +844,7 @@ static void qed_slowpath_irq_free(struct qed_dev *cdev)
break;
synchronize_irq(cdev->int_params.msix_table[i].vector);
free_irq(cdev->int_params.msix_table[i].vector,
- cdev->hwfns[i].sp_dpc);
+ &cdev->hwfns[i].sp_dpc);
}
} else {
if (QED_LEADING_HWFN(cdev)->b_int_requested)
@@ -861,11 +863,11 @@ static int qed_nic_stop(struct qed_dev *cdev)
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
if (p_hwfn->b_sp_dpc_enabled) {
- tasklet_disable(p_hwfn->sp_dpc);
+ tasklet_disable(&p_hwfn->sp_dpc);
p_hwfn->b_sp_dpc_enabled = false;
DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
"Disabled sp tasklet [hwfn %d] at %p\n",
- i, p_hwfn->sp_dpc);
+ i, &p_hwfn->sp_dpc);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 4394a4d77224..d3136556a1e9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -1462,14 +1462,14 @@ static int qed_rdma_modify_qp(void *rdma_cxt,
switch (qp->qp_type) {
case QED_RDMA_QP_TYPE_XRC_INI:
- qp->has_req = 1;
+ qp->has_req = true;
break;
case QED_RDMA_QP_TYPE_XRC_TGT:
- qp->has_resp = 1;
+ qp->has_resp = true;
break;
default:
- qp->has_req = 1;
- qp->has_resp = 1;
+ qp->has_req = true;
+ qp->has_resp = true;
}
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index f1f75b6d0421..b8dc5c4591ef 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -71,6 +71,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
p_ramrod->personality = PERSONALITY_ETH;
break;
case QED_PCI_ETH_ROCE:
+ case QED_PCI_ETH_IWARP:
p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
break;
default:
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index f961f65d9372..c59b72c90293 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -311,6 +311,9 @@ int qede_alloc_arfs(struct qede_dev *edev)
{
int i;
+ if (!edev->dev_info.common.b_arfs_capable)
+ return -EINVAL;
+
edev->arfs = vzalloc(sizeof(*edev->arfs));
if (!edev->arfs)
return -ENOMEM;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 20d2296beb79..05e3a3b60269 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -804,7 +804,7 @@ static void qede_init_ndev(struct qede_dev *edev)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
- if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
+ if (edev->dev_info.common.b_arfs_capable)
hw_features |= NETIF_F_NTUPLE;
if (edev->dev_info.common.vxlan_enable ||
@@ -2295,7 +2295,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
qede_vlan_mark_nonconfigured(edev);
edev->ops->fastpath_stop(edev->cdev);
- if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
+ if (edev->dev_info.common.b_arfs_capable) {
qede_poll_for_freeing_arfs_filters(edev);
qede_free_arfs(edev);
}
@@ -2362,10 +2362,9 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
if (rc)
goto err2;
- if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
- rc = qede_alloc_arfs(edev);
- if (rc)
- DP_NOTICE(edev, "aRFS memory allocation failed\n");
+ if (qede_alloc_arfs(edev)) {
+ edev->ndev->features &= ~NETIF_F_NTUPLE;
+ edev->dev_info.common.b_arfs_capable = false;
}
qede_napi_add_enable(edev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 29b9c728a65e..1753736c56f7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -658,11 +658,10 @@ int qlcnic_83xx_cam_lock(struct qlcnic_adapter *adapter)
void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *adapter)
{
void __iomem *addr;
- u32 val;
struct qlcnic_hardware_context *ahw = adapter->ahw;
addr = ahw->pci_base0 + QLC_83XX_SEM_UNLOCK_FUNC(ahw->pci_func);
- val = readl(addr);
+ readl(addr);
}
void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
@@ -3813,7 +3812,6 @@ static int qlcnic_83xx_shutdown(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
- int retval;
netif_device_detach(netdev);
qlcnic_cancel_idc_work(adapter);
@@ -3824,11 +3822,7 @@ static int qlcnic_83xx_shutdown(struct pci_dev *pdev)
qlcnic_83xx_disable_mbx_intr(adapter);
cancel_delayed_work_sync(&adapter->idc_aen_work);
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
- return 0;
+ return pci_save_state(pdev);
}
static int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index e291e6ac40cb..4e44313b7651 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1239,7 +1239,7 @@ static void cp_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
- int rc, i;
+ int i;
netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
cpr8(Cmd), cpr16(CpCmd),
@@ -1260,7 +1260,7 @@ static void cp_tx_timeout(struct net_device *dev, unsigned int txqueue)
cp_stop_hw(cp);
cp_clean_rings(cp);
- rc = cp_init_rings(cp);
+ cp_init_rings(cp);
cp_start_hw(cp);
__cp_set_rx_mode(dev);
cpw16_f(IntrMask, cp_norx_intr_mask);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 227139d42227..1e5a453dea14 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -978,7 +978,7 @@ static int rtl8139_init_one(struct pci_dev *pdev,
pdev->subsystem_vendor == PCI_VENDOR_ID_ATHEROS &&
pdev->subsystem_device == PCI_DEVICE_ID_REALTEK_8139) {
pr_info("OQO Model 2 detected. Forcing PIO\n");
- use_io = 1;
+ use_io = true;
}
dev = rtl8139_init_board (pdev);
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 42458a46ffaf..b35b27720175 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -200,9 +200,9 @@ static int rocker_dma_test_offset(const struct rocker *rocker,
buf = alloc + offset;
expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
- dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(pdev, dma_handle)) {
+ dma_handle = dma_map_single(&pdev->dev, buf, ROCKER_TEST_DMA_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&pdev->dev, dma_handle)) {
err = -EIO;
goto free_alloc;
}
@@ -234,8 +234,8 @@ static int rocker_dma_test_offset(const struct rocker *rocker,
goto unmap;
unmap:
- pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_single(&pdev->dev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
free_alloc:
kfree(alloc);
@@ -441,9 +441,9 @@ static int rocker_dma_ring_create(const struct rocker *rocker,
if (!info->desc_info)
return -ENOMEM;
- info->desc = pci_alloc_consistent(rocker->pdev,
- info->size * sizeof(*info->desc),
- &info->mapaddr);
+ info->desc = dma_alloc_coherent(&rocker->pdev->dev,
+ info->size * sizeof(*info->desc),
+ &info->mapaddr, GFP_KERNEL);
if (!info->desc) {
kfree(info->desc_info);
return -ENOMEM;
@@ -465,9 +465,9 @@ static void rocker_dma_ring_destroy(const struct rocker *rocker,
{
rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
- pci_free_consistent(rocker->pdev,
- info->size * sizeof(struct rocker_desc),
- info->desc, info->mapaddr);
+ dma_free_coherent(&rocker->pdev->dev,
+ info->size * sizeof(struct rocker_desc), info->desc,
+ info->mapaddr);
kfree(info->desc_info);
}
@@ -506,8 +506,9 @@ static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
goto rollback;
}
- dma_handle = pci_map_single(pdev, buf, buf_size, direction);
- if (pci_dma_mapping_error(pdev, dma_handle)) {
+ dma_handle = dma_map_single(&pdev->dev, buf, buf_size,
+ direction);
+ if (dma_mapping_error(&pdev->dev, dma_handle)) {
kfree(buf);
err = -EIO;
goto rollback;
@@ -526,7 +527,8 @@ rollback:
for (i--; i >= 0; i--) {
const struct rocker_desc_info *desc_info = &info->desc_info[i];
- pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(desc_info, mapaddr),
desc_info->data_size, direction);
kfree(desc_info->data);
}
@@ -546,7 +548,8 @@ static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
desc->buf_addr = 0;
desc->buf_size = 0;
- pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(desc_info, mapaddr),
desc_info->data_size, direction);
kfree(desc_info->data);
}
@@ -615,7 +618,7 @@ static int rocker_dma_rings_init(struct rocker *rocker)
spin_lock_init(&rocker->cmd_ring_lock);
err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
- PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
+ DMA_BIDIRECTIONAL, PAGE_SIZE);
if (err) {
dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
goto err_dma_cmd_ring_bufs_alloc;
@@ -636,7 +639,7 @@ static int rocker_dma_rings_init(struct rocker *rocker)
}
err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
- PCI_DMA_FROMDEVICE, PAGE_SIZE);
+ DMA_FROM_DEVICE, PAGE_SIZE);
if (err) {
dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
goto err_dma_event_ring_bufs_alloc;
@@ -650,7 +653,7 @@ err_dma_event_ring_create:
rocker_dma_cmd_ring_waits_free(rocker);
err_dma_cmd_ring_waits_alloc:
rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
err_dma_cmd_ring_bufs_alloc:
rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
return err;
@@ -659,11 +662,11 @@ err_dma_cmd_ring_bufs_alloc:
static void rocker_dma_rings_fini(struct rocker *rocker)
{
rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
rocker_dma_ring_destroy(rocker, &rocker->event_ring);
rocker_dma_cmd_ring_waits_free(rocker);
rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
}
@@ -675,9 +678,9 @@ static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
struct pci_dev *pdev = rocker->pdev;
dma_addr_t dma_handle;
- dma_handle = pci_map_single(pdev, skb->data, buf_len,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, dma_handle))
+ dma_handle = dma_map_single(&pdev->dev, skb->data, buf_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma_handle))
return -EIO;
if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
goto tlv_put_failure;
@@ -686,7 +689,7 @@ static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
return 0;
tlv_put_failure:
- pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, dma_handle, buf_len, DMA_FROM_DEVICE);
desc_info->tlv_size = 0;
return -EMSGSIZE;
}
@@ -734,7 +737,7 @@ static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
return;
dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
- pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, dma_handle, len, DMA_FROM_DEVICE);
}
static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
@@ -796,7 +799,7 @@ static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
}
err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
- PCI_DMA_TODEVICE,
+ DMA_TO_DEVICE,
ROCKER_DMA_TX_DESC_SIZE);
if (err) {
netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
@@ -813,7 +816,7 @@ static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
}
err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
- PCI_DMA_BIDIRECTIONAL,
+ DMA_BIDIRECTIONAL,
ROCKER_DMA_RX_DESC_SIZE);
if (err) {
netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
@@ -831,12 +834,12 @@ static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
err_dma_rx_ring_skbs_alloc:
rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
err_dma_rx_ring_bufs_alloc:
rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
err_dma_rx_ring_create:
rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
err_dma_tx_ring_bufs_alloc:
rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
return err;
@@ -848,10 +851,10 @@ static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
rocker_dma_rx_ring_skbs_free(rocker_port);
rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
}
@@ -1858,7 +1861,7 @@ static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
continue;
dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
- pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
+ dma_unmap_single(&pdev->dev, dma_handle, len, DMA_TO_DEVICE);
}
}
@@ -1871,8 +1874,8 @@ static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
dma_addr_t dma_handle;
struct rocker_tlv *frag;
- dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
- if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
+ dma_handle = dma_map_single(&pdev->dev, buf, buf_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&pdev->dev, dma_handle))) {
if (net_ratelimit())
netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
return -EIO;
@@ -1892,7 +1895,7 @@ static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
nest_cancel:
rocker_tlv_nest_cancel(desc_info, frag);
unmap_frag:
- pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
+ dma_unmap_single(&pdev->dev, dma_handle, buf_len, DMA_TO_DEVICE);
return -EMSGSIZE;
}
@@ -2905,17 +2908,17 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_pci_request_regions;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
+ dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
goto err_pci_set_dma_mask;
}
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
+ dev_err(&pdev->dev, "dma_set_mask failed\n");
goto err_pci_set_dma_mask;
}
}
diff --git a/drivers/net/ethernet/sfc/ef100.c b/drivers/net/ethernet/sfc/ef100.c
index c54b7f8243f3..ffdb36715a49 100644
--- a/drivers/net/ethernet/sfc/ef100.c
+++ b/drivers/net/ethernet/sfc/ef100.c
@@ -490,6 +490,7 @@ static int ef100_pci_probe(struct pci_dev *pci_dev,
if (fcw.offset > pci_resource_len(efx->pci_dev, fcw.bar) - ESE_GZ_FCW_LEN) {
netif_err(efx, probe, efx->net_dev,
"Func control window overruns BAR\n");
+ rc = -EIO;
goto fail;
}
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index f94078f8ebe5..1fd08a04bd4e 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -301,6 +301,7 @@ struct sc92031_priv {
/* for dev->get_stats */
long rx_value;
+ struct net_device *ndev;
};
/* I don't know which registers can be safely read; however, I can guess
@@ -829,10 +830,10 @@ static void _sc92031_link_tasklet(struct net_device *dev)
}
}
-static void sc92031_tasklet(unsigned long data)
+static void sc92031_tasklet(struct tasklet_struct *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct sc92031_priv *priv = netdev_priv(dev);
+ struct sc92031_priv *priv = from_tasklet(priv, t, tasklet);
+ struct net_device *dev = priv->ndev;
void __iomem *port_base = priv->port_base;
u32 intr_status, intr_mask;
@@ -993,15 +994,15 @@ static int sc92031_open(struct net_device *dev)
struct sc92031_priv *priv = netdev_priv(dev);
struct pci_dev *pdev = priv->pdev;
- priv->rx_ring = pci_alloc_consistent(pdev, RX_BUF_LEN,
- &priv->rx_ring_dma_addr);
+ priv->rx_ring = dma_alloc_coherent(&pdev->dev, RX_BUF_LEN,
+ &priv->rx_ring_dma_addr, GFP_KERNEL);
if (unlikely(!priv->rx_ring)) {
err = -ENOMEM;
goto out_alloc_rx_ring;
}
- priv->tx_bufs = pci_alloc_consistent(pdev, TX_BUF_TOT_LEN,
- &priv->tx_bufs_dma_addr);
+ priv->tx_bufs = dma_alloc_coherent(&pdev->dev, TX_BUF_TOT_LEN,
+ &priv->tx_bufs_dma_addr, GFP_KERNEL);
if (unlikely(!priv->tx_bufs)) {
err = -ENOMEM;
goto out_alloc_tx_bufs;
@@ -1031,11 +1032,11 @@ static int sc92031_open(struct net_device *dev)
return 0;
out_request_irq:
- pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
- priv->tx_bufs_dma_addr);
+ dma_free_coherent(&pdev->dev, TX_BUF_TOT_LEN, priv->tx_bufs,
+ priv->tx_bufs_dma_addr);
out_alloc_tx_bufs:
- pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
- priv->rx_ring_dma_addr);
+ dma_free_coherent(&pdev->dev, RX_BUF_LEN, priv->rx_ring,
+ priv->rx_ring_dma_addr);
out_alloc_rx_ring:
return err;
}
@@ -1058,10 +1059,10 @@ static int sc92031_stop(struct net_device *dev)
spin_unlock_bh(&priv->lock);
free_irq(pdev->irq, dev);
- pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
- priv->tx_bufs_dma_addr);
- pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
- priv->rx_ring_dma_addr);
+ dma_free_coherent(&pdev->dev, TX_BUF_TOT_LEN, priv->tx_bufs,
+ priv->tx_bufs_dma_addr);
+ dma_free_coherent(&pdev->dev, RX_BUF_LEN, priv->rx_ring,
+ priv->rx_ring_dma_addr);
return 0;
}
@@ -1108,7 +1109,7 @@ static void sc92031_poll_controller(struct net_device *dev)
disable_irq(irq);
if (sc92031_interrupt(irq, dev) != IRQ_NONE)
- sc92031_tasklet((unsigned long)dev);
+ sc92031_tasklet(&priv->tasklet);
enable_irq(irq);
}
#endif
@@ -1407,11 +1408,11 @@ static int sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (unlikely(err < 0))
goto out_set_dma_mask;
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (unlikely(err < 0))
goto out_set_dma_mask;
@@ -1443,10 +1444,11 @@ static int sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev->ethtool_ops = &sc92031_ethtool_ops;
priv = netdev_priv(dev);
+ priv->ndev = dev;
spin_lock_init(&priv->lock);
priv->port_base = port_base;
priv->pdev = pdev;
- tasklet_init(&priv->tasklet, sc92031_tasklet, (unsigned long)dev);
+ tasklet_setup(&priv->tasklet, sc92031_tasklet);
/* Fudge tasklet count so the call to sc92031_enable_interrupts at
* sc92031_open will work correctly */
tasklet_disable_nosync(&priv->tasklet);
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 4492715d1e42..f6b73afd1879 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -535,10 +535,10 @@ static inline void smc_rcv(struct net_device *dev)
/*
* This is called to actually send a packet to the chip.
*/
-static void smc_hardware_send_pkt(unsigned long data)
+static void smc_hardware_send_pkt(struct tasklet_struct *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct smc_local *lp = netdev_priv(dev);
+ struct smc_local *lp = from_tasklet(lp, t, tx_task);
+ struct net_device *dev = lp->dev;
void __iomem *ioaddr = lp->base;
struct sk_buff *skb;
unsigned int packet_no, len;
@@ -688,7 +688,7 @@ smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
* Allocation succeeded: push packet to the chip's own memory
* immediately.
*/
- smc_hardware_send_pkt((unsigned long)dev);
+ smc_hardware_send_pkt(&lp->tx_task);
}
return NETDEV_TX_OK;
@@ -1964,7 +1964,7 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
dev->netdev_ops = &smc_netdev_ops;
dev->ethtool_ops = &smc_ethtool_ops;
- tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev);
+ tasklet_setup(&lp->tx_task, smc_hardware_send_pkt);
INIT_WORK(&lp->phy_configure, smc_phy_configure);
lp->dev = dev;
lp->mii.phy_id_mask = 0x1f;
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index 52971f5293aa..d2cdc02d9f94 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -46,7 +46,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
while (len != 0) {
tx_q->tx_skbuff[entry] = NULL;
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
desc = tx_q->dma_tx + entry;
if (len > bmax) {
@@ -137,7 +137,7 @@ static void refill_desc3(void *priv_ptr, struct dma_desc *p)
*/
p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
(((rx_q->dirty_rx) + 1) %
- DMA_RX_SIZE) *
+ priv->dma_rx_size) *
sizeof(struct dma_desc)));
}
@@ -154,7 +154,8 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p)
* to keep explicit chaining in the descriptor.
*/
p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
- ((tx_q->dirty_tx + 1) % DMA_TX_SIZE))
+ ((tx_q->dirty_tx + 1) %
+ priv->dma_tx_size))
* sizeof(struct dma_desc)));
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index acc5e3fc1c2f..cafa8e3c3573 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -42,9 +42,16 @@
#define STMMAC_CHAN0 0 /* Always supported and default for all chips */
-/* These need to be power of two, and >= 4 */
-#define DMA_TX_SIZE 512
-#define DMA_RX_SIZE 512
+/* TX and RX Descriptor Length, these need to be power of two.
+ * TX descriptor length less than 64 may cause transmit queue timed out error.
+ * RX descriptor length less than 64 may cause inconsistent Rx chain error.
+ */
+#define DMA_MIN_TX_SIZE 64
+#define DMA_MAX_TX_SIZE 1024
+#define DMA_DEFAULT_TX_SIZE 512
+#define DMA_MIN_RX_SIZE 64
+#define DMA_MAX_RX_SIZE 1024
+#define DMA_DEFAULT_RX_SIZE 512
#define STMMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1))
#undef FRAME_FILTER_DEBUG
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 14bd5e7b9875..8ad900949dc8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -51,7 +51,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
STMMAC_RING_MODE, 0, false, skb->len);
tx_q->tx_skbuff[entry] = NULL;
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
if (priv->extend_desc)
desc = (struct dma_desc *)(tx_q->dma_etx + entry);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 9c02fc754bf1..014a816c9d0b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -171,9 +171,11 @@ struct stmmac_priv {
/* RX Queue */
struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
+ unsigned int dma_rx_size;
/* TX Queue */
struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
+ unsigned int dma_tx_size;
/* Generic channel for NAPI */
struct stmmac_channel channel[STMMAC_CH_MAX];
@@ -264,6 +266,8 @@ int stmmac_dvr_probe(struct device *device,
struct stmmac_resources *res);
void stmmac_disable_eee_mode(struct stmmac_priv *priv);
bool stmmac_eee_init(struct stmmac_priv *priv);
+int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
+int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
#if IS_ENABLED(CONFIG_STMMAC_SELFTESTS)
void stmmac_selftest_run(struct net_device *dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index ac5e8cc5fb9f..5cfc942dbabf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -440,6 +440,33 @@ static int stmmac_nway_reset(struct net_device *dev)
return phylink_ethtool_nway_reset(priv->phylink);
}
+static void stmmac_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+
+ ring->rx_max_pending = DMA_MAX_RX_SIZE;
+ ring->tx_max_pending = DMA_MAX_TX_SIZE;
+ ring->rx_pending = priv->dma_rx_size;
+ ring->tx_pending = priv->dma_tx_size;
+}
+
+static int stmmac_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
+ ring->rx_pending < DMA_MIN_RX_SIZE ||
+ ring->rx_pending > DMA_MAX_RX_SIZE ||
+ !is_power_of_2(ring->rx_pending) ||
+ ring->tx_pending < DMA_MIN_TX_SIZE ||
+ ring->tx_pending > DMA_MAX_TX_SIZE ||
+ !is_power_of_2(ring->tx_pending))
+ return -EINVAL;
+
+ return stmmac_reinit_ringparam(netdev, ring->rx_pending,
+ ring->tx_pending);
+}
+
static void
stmmac_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
@@ -840,6 +867,30 @@ static int stmmac_set_rxfh(struct net_device *dev, const u32 *indir,
priv->plat->rx_queues_to_use);
}
+static void stmmac_get_channels(struct net_device *dev,
+ struct ethtool_channels *chan)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ chan->rx_count = priv->plat->rx_queues_to_use;
+ chan->tx_count = priv->plat->tx_queues_to_use;
+ chan->max_rx = priv->dma_cap.number_rx_queues;
+ chan->max_tx = priv->dma_cap.number_tx_queues;
+}
+
+static int stmmac_set_channels(struct net_device *dev,
+ struct ethtool_channels *chan)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (chan->rx_count > priv->dma_cap.number_rx_queues ||
+ chan->tx_count > priv->dma_cap.number_tx_queues ||
+ !chan->rx_count || !chan->tx_count)
+ return -EINVAL;
+
+ return stmmac_reinit_queues(dev, chan->rx_count, chan->tx_count);
+}
+
static int stmmac_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
@@ -923,6 +974,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_regs_len = stmmac_ethtool_get_regs_len,
.get_link = ethtool_op_get_link,
.nway_reset = stmmac_nway_reset,
+ .get_ringparam = stmmac_get_ringparam,
+ .set_ringparam = stmmac_set_ringparam,
.get_pauseparam = stmmac_get_pauseparam,
.set_pauseparam = stmmac_set_pauseparam,
.self_test = stmmac_selftest_run,
@@ -941,6 +994,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_ts_info = stmmac_get_ts_info,
.get_coalesce = stmmac_get_coalesce,
.set_coalesce = stmmac_set_coalesce,
+ .get_channels = stmmac_get_channels,
+ .set_channels = stmmac_set_channels,
.get_tunable = stmmac_get_tunable,
.set_tunable = stmmac_set_tunable,
.get_link_ksettings = stmmac_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 3fa24b310d20..df2c74bbfcff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -63,8 +63,8 @@ static int phyaddr = -1;
module_param(phyaddr, int, 0444);
MODULE_PARM_DESC(phyaddr, "Physical device address");
-#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
-#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
+#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
+#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
static int flow_ctrl = FLOW_AUTO;
module_param(flow_ctrl, int, 0644);
@@ -176,32 +176,6 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv)
}
}
-/**
- * stmmac_stop_all_queues - Stop all queues
- * @priv: driver private structure
- */
-static void stmmac_stop_all_queues(struct stmmac_priv *priv)
-{
- u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
- u32 queue;
-
- for (queue = 0; queue < tx_queues_cnt; queue++)
- netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
-}
-
-/**
- * stmmac_start_all_queues - Start all queues
- * @priv: driver private structure
- */
-static void stmmac_start_all_queues(struct stmmac_priv *priv)
-{
- u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
- u32 queue;
-
- for (queue = 0; queue < tx_queues_cnt; queue++)
- netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
-}
-
static void stmmac_service_event_schedule(struct stmmac_priv *priv)
{
if (!test_bit(STMMAC_DOWN, &priv->state) &&
@@ -297,7 +271,7 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
if (tx_q->dirty_tx > tx_q->cur_tx)
avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
else
- avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
+ avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
return avail;
}
@@ -315,7 +289,7 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
if (rx_q->dirty_rx <= rx_q->cur_rx)
dirty = rx_q->cur_rx - rx_q->dirty_rx;
else
- dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
+ dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
return dirty;
}
@@ -1146,7 +1120,7 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
head_rx = (void *)rx_q->dma_rx;
/* Display RX ring */
- stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
+ stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true);
}
}
@@ -1169,7 +1143,7 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
else
head_tx = (void *)tx_q->dma_tx;
- stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
+ stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false);
}
}
@@ -1213,16 +1187,16 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
int i;
/* Clear the RX descriptors */
- for (i = 0; i < DMA_RX_SIZE; i++)
+ for (i = 0; i < priv->dma_rx_size; i++)
if (priv->extend_desc)
stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
priv->use_riwt, priv->mode,
- (i == DMA_RX_SIZE - 1),
+ (i == priv->dma_rx_size - 1),
priv->dma_buf_sz);
else
stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
priv->use_riwt, priv->mode,
- (i == DMA_RX_SIZE - 1),
+ (i == priv->dma_rx_size - 1),
priv->dma_buf_sz);
}
@@ -1239,8 +1213,8 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
int i;
/* Clear the TX descriptors */
- for (i = 0; i < DMA_TX_SIZE; i++) {
- int last = (i == (DMA_TX_SIZE - 1));
+ for (i = 0; i < priv->dma_tx_size; i++) {
+ int last = (i == (priv->dma_tx_size - 1));
struct dma_desc *p;
if (priv->extend_desc)
@@ -1394,7 +1368,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
stmmac_clear_rx_descriptors(priv, queue);
- for (i = 0; i < DMA_RX_SIZE; i++) {
+ for (i = 0; i < priv->dma_rx_size; i++) {
struct dma_desc *p;
if (priv->extend_desc)
@@ -1409,16 +1383,18 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
}
rx_q->cur_rx = 0;
- rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
+ rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size);
/* Setup the chained descriptor addresses */
if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc)
stmmac_mode_init(priv, rx_q->dma_erx,
- rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
+ rx_q->dma_rx_phy,
+ priv->dma_rx_size, 1);
else
stmmac_mode_init(priv, rx_q->dma_rx,
- rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
+ rx_q->dma_rx_phy,
+ priv->dma_rx_size, 0);
}
}
@@ -1432,7 +1408,7 @@ err_init_rx_buffers:
if (queue == 0)
break;
- i = DMA_RX_SIZE;
+ i = priv->dma_rx_size;
queue--;
}
@@ -1464,13 +1440,15 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc)
stmmac_mode_init(priv, tx_q->dma_etx,
- tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
+ tx_q->dma_tx_phy,
+ priv->dma_tx_size, 1);
else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
stmmac_mode_init(priv, tx_q->dma_tx,
- tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
+ tx_q->dma_tx_phy,
+ priv->dma_tx_size, 0);
}
- for (i = 0; i < DMA_TX_SIZE; i++) {
+ for (i = 0; i < priv->dma_tx_size; i++) {
struct dma_desc *p;
if (priv->extend_desc)
p = &((tx_q->dma_etx + i)->basic);
@@ -1534,7 +1512,7 @@ static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
{
int i;
- for (i = 0; i < DMA_RX_SIZE; i++)
+ for (i = 0; i < priv->dma_rx_size; i++)
stmmac_free_rx_buffer(priv, queue, i);
}
@@ -1547,7 +1525,7 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
{
int i;
- for (i = 0; i < DMA_TX_SIZE; i++)
+ for (i = 0; i < priv->dma_tx_size; i++)
stmmac_free_tx_buffer(priv, queue, i);
}
@@ -1569,11 +1547,11 @@ static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
/* Free DMA regions of consistent memory previously allocated */
if (!priv->extend_desc)
- dma_free_coherent(priv->device,
- DMA_RX_SIZE * sizeof(struct dma_desc),
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_desc),
rx_q->dma_rx, rx_q->dma_rx_phy);
else
- dma_free_coherent(priv->device, DMA_RX_SIZE *
+ dma_free_coherent(priv->device, priv->dma_rx_size *
sizeof(struct dma_extended_desc),
rx_q->dma_erx, rx_q->dma_rx_phy);
@@ -1612,7 +1590,7 @@ static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
addr = tx_q->dma_tx;
}
- size *= DMA_TX_SIZE;
+ size *= priv->dma_tx_size;
dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
@@ -1645,7 +1623,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
rx_q->priv_data = priv;
pp_params.flags = PP_FLAG_DMA_MAP;
- pp_params.pool_size = DMA_RX_SIZE;
+ pp_params.pool_size = priv->dma_rx_size;
num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
pp_params.order = ilog2(num_pages);
pp_params.nid = dev_to_node(priv->device);
@@ -1659,14 +1637,16 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
goto err_dma;
}
- rx_q->buf_pool = kcalloc(DMA_RX_SIZE, sizeof(*rx_q->buf_pool),
+ rx_q->buf_pool = kcalloc(priv->dma_rx_size,
+ sizeof(*rx_q->buf_pool),
GFP_KERNEL);
if (!rx_q->buf_pool)
goto err_dma;
if (priv->extend_desc) {
rx_q->dma_erx = dma_alloc_coherent(priv->device,
- DMA_RX_SIZE * sizeof(struct dma_extended_desc),
+ priv->dma_rx_size *
+ sizeof(struct dma_extended_desc),
&rx_q->dma_rx_phy,
GFP_KERNEL);
if (!rx_q->dma_erx)
@@ -1674,7 +1654,8 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
} else {
rx_q->dma_rx = dma_alloc_coherent(priv->device,
- DMA_RX_SIZE * sizeof(struct dma_desc),
+ priv->dma_rx_size *
+ sizeof(struct dma_desc),
&rx_q->dma_rx_phy,
GFP_KERNEL);
if (!rx_q->dma_rx)
@@ -1713,13 +1694,13 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
tx_q->queue_index = queue;
tx_q->priv_data = priv;
- tx_q->tx_skbuff_dma = kcalloc(DMA_TX_SIZE,
+ tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
sizeof(*tx_q->tx_skbuff_dma),
GFP_KERNEL);
if (!tx_q->tx_skbuff_dma)
goto err_dma;
- tx_q->tx_skbuff = kcalloc(DMA_TX_SIZE,
+ tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
sizeof(struct sk_buff *),
GFP_KERNEL);
if (!tx_q->tx_skbuff)
@@ -1732,7 +1713,7 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
else
size = sizeof(struct dma_desc);
- size *= DMA_TX_SIZE;
+ size *= priv->dma_tx_size;
addr = dma_alloc_coherent(priv->device, size,
&tx_q->dma_tx_phy, GFP_KERNEL);
@@ -2042,7 +2023,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
stmmac_release_tx_desc(priv, p, priv->mode);
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
}
tx_q->dirty_tx = entry;
@@ -2051,7 +2032,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
queue))) &&
- stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
+ stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
netif_dbg(priv, tx_done, priv->dev,
"%s: restart transmit\n", __func__);
@@ -2324,7 +2305,8 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
rx_q->dma_rx_phy, chan);
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
- (DMA_RX_SIZE * sizeof(struct dma_desc));
+ (priv->dma_rx_size *
+ sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
rx_q->rx_tail_addr, chan);
}
@@ -2408,12 +2390,12 @@ static void stmmac_set_rings_length(struct stmmac_priv *priv)
/* set TX ring length */
for (chan = 0; chan < tx_channels_count; chan++)
stmmac_set_tx_ring_len(priv, priv->ioaddr,
- (DMA_TX_SIZE - 1), chan);
+ (priv->dma_tx_size - 1), chan);
/* set RX ring length */
for (chan = 0; chan < rx_channels_count; chan++)
stmmac_set_rx_ring_len(priv, priv->ioaddr,
- (DMA_RX_SIZE - 1), chan);
+ (priv->dma_rx_size - 1), chan);
}
/**
@@ -2732,6 +2714,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
}
+ /* Configure real RX and TX queues */
+ netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
+ netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
+
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
@@ -2789,6 +2775,11 @@ static int stmmac_open(struct net_device *dev)
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
+ if (!priv->dma_tx_size)
+ priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
+ if (!priv->dma_rx_size)
+ priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
+
/* Earlier check for TBS */
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
@@ -2860,7 +2851,7 @@ static int stmmac_open(struct net_device *dev)
}
stmmac_enable_all_queues(priv);
- stmmac_start_all_queues(priv);
+ netif_tx_start_all_queues(priv->dev);
return 0;
@@ -2903,8 +2894,6 @@ static int stmmac_release(struct net_device *dev)
phylink_stop(priv->phylink);
phylink_disconnect_phy(priv->phylink);
- stmmac_stop_all_queues(priv);
-
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
@@ -2960,7 +2949,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
return false;
stmmac_set_tx_owner(priv, p);
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
return true;
}
@@ -2988,7 +2977,8 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
while (tmp_len > 0) {
dma_addr_t curr_addr;
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
+ priv->dma_tx_size);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -3095,7 +3085,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_set_mss(priv, mss_desc, mss);
tx_q->mss = mss;
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
+ priv->dma_tx_size);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
}
@@ -3202,7 +3193,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
* ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor.
*/
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
@@ -3365,7 +3356,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int len = skb_frag_size(frag);
bool last_segment = (i == (nfrags - 1));
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
WARN_ON(tx_q->tx_skbuff[entry]);
if (likely(priv->extend_desc))
@@ -3433,7 +3424,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor.
*/
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
tx_q->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
@@ -3618,7 +3609,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
dma_wmb();
stmmac_set_rx_owner(priv, p, use_rx_wd);
- entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
}
rx_q->dirty_rx = entry;
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
@@ -3701,7 +3692,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
else
rx_head = (void *)rx_q->dma_rx;
- stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
+ stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true);
}
while (count < limit) {
unsigned int buf1_len = 0, buf2_len = 0;
@@ -3743,7 +3734,8 @@ read_again:
if (unlikely(status & dma_own))
break;
- rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
+ rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
+ priv->dma_rx_size);
next_entry = rx_q->cur_rx;
if (priv->extend_desc)
@@ -3918,7 +3910,7 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
priv->xstats.napi_poll++;
- work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
+ work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan);
work_done = min(work_done, budget);
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -4311,11 +4303,11 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
if (priv->extend_desc) {
seq_printf(seq, "Extended descriptor ring:\n");
sysfs_display_ring((void *)rx_q->dma_erx,
- DMA_RX_SIZE, 1, seq);
+ priv->dma_rx_size, 1, seq);
} else {
seq_printf(seq, "Descriptor ring:\n");
sysfs_display_ring((void *)rx_q->dma_rx,
- DMA_RX_SIZE, 0, seq);
+ priv->dma_rx_size, 0, seq);
}
}
@@ -4327,11 +4319,11 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
if (priv->extend_desc) {
seq_printf(seq, "Extended descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_etx,
- DMA_TX_SIZE, 1, seq);
+ priv->dma_tx_size, 1, seq);
} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
seq_printf(seq, "Descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_tx,
- DMA_TX_SIZE, 0, seq);
+ priv->dma_tx_size, 0, seq);
}
}
@@ -4739,6 +4731,86 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
return 0;
}
+static void stmmac_napi_add(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 queue, maxq;
+
+ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
+
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
+
+ ch->priv_data = priv;
+ ch->index = queue;
+
+ if (queue < priv->plat->rx_queues_to_use) {
+ netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
+ NAPI_POLL_WEIGHT);
+ }
+ if (queue < priv->plat->tx_queues_to_use) {
+ netif_tx_napi_add(dev, &ch->tx_napi,
+ stmmac_napi_poll_tx,
+ NAPI_POLL_WEIGHT);
+ }
+ }
+}
+
+static void stmmac_napi_del(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 queue, maxq;
+
+ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
+
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
+
+ if (queue < priv->plat->rx_queues_to_use)
+ netif_napi_del(&ch->rx_napi);
+ if (queue < priv->plat->tx_queues_to_use)
+ netif_napi_del(&ch->tx_napi);
+ }
+}
+
+int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (netif_running(dev))
+ stmmac_release(dev);
+
+ stmmac_napi_del(dev);
+
+ priv->plat->rx_queues_to_use = rx_cnt;
+ priv->plat->tx_queues_to_use = tx_cnt;
+
+ stmmac_napi_add(dev);
+
+ if (netif_running(dev))
+ ret = stmmac_open(dev);
+
+ return ret;
+}
+
+int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (netif_running(dev))
+ stmmac_release(dev);
+
+ priv->dma_rx_size = rx_size;
+ priv->dma_tx_size = tx_size;
+
+ if (netif_running(dev))
+ ret = stmmac_open(dev);
+
+ return ret;
+}
+
/**
* stmmac_dvr_probe
* @device: device pointer
@@ -4755,7 +4827,7 @@ int stmmac_dvr_probe(struct device *device,
{
struct net_device *ndev = NULL;
struct stmmac_priv *priv;
- u32 queue, rxq, maxq;
+ u32 rxq;
int i, ret = 0;
ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
@@ -4819,10 +4891,6 @@ int stmmac_dvr_probe(struct device *device,
stmmac_check_ether_addr(priv);
- /* Configure real RX and TX queues */
- netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
- netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
-
ndev->netdev_ops = &stmmac_netdev_ops;
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -4920,25 +4988,7 @@ int stmmac_dvr_probe(struct device *device,
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
/* Setup channels NAPI */
- maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
-
- for (queue = 0; queue < maxq; queue++) {
- struct stmmac_channel *ch = &priv->channel[queue];
-
- spin_lock_init(&ch->lock);
- ch->priv_data = priv;
- ch->index = queue;
-
- if (queue < priv->plat->rx_queues_to_use) {
- netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
- NAPI_POLL_WEIGHT);
- }
- if (queue < priv->plat->tx_queues_to_use) {
- netif_tx_napi_add(ndev, &ch->tx_napi,
- stmmac_napi_poll_tx,
- NAPI_POLL_WEIGHT);
- }
- }
+ stmmac_napi_add(ndev);
mutex_init(&priv->lock);
@@ -5003,14 +5053,7 @@ error_phy_setup:
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
error_mdio_register:
- for (queue = 0; queue < maxq; queue++) {
- struct stmmac_channel *ch = &priv->channel[queue];
-
- if (queue < priv->plat->rx_queues_to_use)
- netif_napi_del(&ch->rx_napi);
- if (queue < priv->plat->tx_queues_to_use)
- netif_napi_del(&ch->tx_napi);
- }
+ stmmac_napi_del(ndev);
error_hw_init:
destroy_workqueue(priv->wq);
@@ -5078,7 +5121,6 @@ int stmmac_suspend(struct device *dev)
mutex_lock(&priv->lock);
netif_device_detach(ndev);
- stmmac_stop_all_queues(priv);
stmmac_disable_all_queues(priv);
@@ -5204,8 +5246,6 @@ int stmmac_resume(struct device *dev)
stmmac_enable_all_queues(priv);
- stmmac_start_all_queues(priv);
-
mutex_unlock(&priv->lock);
if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index bf195adee393..0462dcc93e53 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -796,7 +796,7 @@ static int stmmac_test_flowctrl(struct stmmac_priv *priv)
u32 tail;
tail = priv->rx_queue[i].dma_rx_phy +
- (DMA_RX_SIZE * sizeof(struct dma_desc));
+ (priv->dma_rx_size * sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i);
stmmac_start_rx(priv, priv->ioaddr, i);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index e28727297563..a142e4c9fc03 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -153,11 +153,11 @@ bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
u16 memsz = FIFO_SIZE * (1 << fsz_type);
memset(f, 0, sizeof(struct fifo));
- /* pci_alloc_consistent gives us 4k-aligned memory */
- f->va = pci_alloc_consistent(priv->pdev,
- memsz + FIFO_EXTRA_SPACE, &f->da);
+ /* dma_alloc_coherent gives us 4k-aligned memory */
+ f->va = dma_alloc_coherent(&priv->pdev->dev, memsz + FIFO_EXTRA_SPACE,
+ &f->da, GFP_ATOMIC);
if (!f->va) {
- pr_err("pci_alloc_consistent failed\n");
+ pr_err("dma_alloc_coherent failed\n");
RET(-ENOMEM);
}
f->reg_CFG0 = reg_CFG0;
@@ -183,8 +183,8 @@ static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
{
ENTER;
if (f->va) {
- pci_free_consistent(priv->pdev,
- f->memsz + FIFO_EXTRA_SPACE, f->va, f->da);
+ dma_free_coherent(&priv->pdev->dev,
+ f->memsz + FIFO_EXTRA_SPACE, f->va, f->da);
f->va = NULL;
}
RET();
@@ -1033,9 +1033,8 @@ static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
for (i = 0; i < db->nelem; i++) {
dm = bdx_rxdb_addr_elem(db, i);
if (dm->dma) {
- pci_unmap_single(priv->pdev,
- dm->dma, f->m.pktsz,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pdev->dev, dm->dma,
+ f->m.pktsz, DMA_FROM_DEVICE);
dev_kfree_skb(dm->skb);
}
}
@@ -1097,9 +1096,8 @@ static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
idx = bdx_rxdb_alloc_elem(db);
dm = bdx_rxdb_addr_elem(db, idx);
- dm->dma = pci_map_single(priv->pdev,
- skb->data, f->m.pktsz,
- PCI_DMA_FROMDEVICE);
+ dm->dma = dma_map_single(&priv->pdev->dev, skb->data,
+ f->m.pktsz, DMA_FROM_DEVICE);
dm->skb = skb;
rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */
@@ -1259,16 +1257,15 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
(skb2 = netdev_alloc_skb(priv->ndev, len + NET_IP_ALIGN))) {
skb_reserve(skb2, NET_IP_ALIGN);
/*skb_put(skb2, len); */
- pci_dma_sync_single_for_cpu(priv->pdev,
- dm->dma, rxf_fifo->m.pktsz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&priv->pdev->dev, dm->dma,
+ rxf_fifo->m.pktsz,
+ DMA_FROM_DEVICE);
memcpy(skb2->data, skb->data, len);
bdx_recycle_skb(priv, rxdd);
skb = skb2;
} else {
- pci_unmap_single(priv->pdev,
- dm->dma, rxf_fifo->m.pktsz,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pdev->dev, dm->dma,
+ rxf_fifo->m.pktsz, DMA_FROM_DEVICE);
bdx_rxdb_free_elem(db, rxdd->va_lo);
}
@@ -1478,8 +1475,8 @@ bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
int i;
db->wptr->len = skb_headlen(skb);
- db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data,
- db->wptr->len, PCI_DMA_TODEVICE);
+ db->wptr->addr.dma = dma_map_single(&priv->pdev->dev, skb->data,
+ db->wptr->len, DMA_TO_DEVICE);
pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
@@ -1716,8 +1713,8 @@ static void bdx_tx_cleanup(struct bdx_priv *priv)
BDX_ASSERT(db->rptr->len == 0);
do {
BDX_ASSERT(db->rptr->addr.dma == 0);
- pci_unmap_page(priv->pdev, db->rptr->addr.dma,
- db->rptr->len, PCI_DMA_TODEVICE);
+ dma_unmap_page(&priv->pdev->dev, db->rptr->addr.dma,
+ db->rptr->len, DMA_TO_DEVICE);
bdx_tx_db_inc_rptr(db);
} while (db->rptr->len > 0);
tx_level -= db->rptr->len; /* '-' koz len is negative */
@@ -1765,8 +1762,8 @@ static void bdx_tx_free_skbs(struct bdx_priv *priv)
ENTER;
while (db->rptr != db->wptr) {
if (likely(db->rptr->len))
- pci_unmap_page(priv->pdev, db->rptr->addr.dma,
- db->rptr->len, PCI_DMA_TODEVICE);
+ dma_unmap_page(&priv->pdev->dev, db->rptr->addr.dma,
+ db->rptr->len, DMA_TO_DEVICE);
else
dev_kfree_skb(db->rptr->addr.skb);
bdx_tx_db_inc_rptr(db);
@@ -1902,12 +1899,12 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) /* it triggers interrupt, dunno why. */
goto err_pci; /* it's not a problem though */
- if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
- !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
+ if (!(err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) &&
+ !(err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)))) {
pci_using_dac = 1;
} else {
- if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
- (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+ if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) ||
+ (err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)))) {
pr_err("No usable DMA configuration, aborting\n");
goto err_dma;
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 9b425f184f3c..9fd1f77190ad 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1291,7 +1291,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
data->mac_control = prop;
if (of_property_read_bool(node, "dual_emac"))
- data->dual_emac = 1;
+ data->dual_emac = true;
/*
* Populate all the child nodes here...
@@ -1590,7 +1590,7 @@ static int cpsw_probe(struct platform_device *pdev)
soc = soc_device_match(cpsw_soc_devices);
if (soc)
- cpsw->quirk_irq = 1;
+ cpsw->quirk_irq = true;
data = &cpsw->data;
cpsw->slaves = devm_kcalloc(dev,
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index a3528c5c823f..26073cd63e33 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -17,6 +17,7 @@
#include <linux/phy.h>
#include <linux/phy/phy.h>
#include <linux/delay.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
@@ -2069,9 +2070,61 @@ static int cpsw_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused cpsw_suspend(struct device *dev)
+{
+ struct cpsw_common *cpsw = dev_get_drvdata(dev);
+ int i;
+
+ rtnl_lock();
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct net_device *ndev = cpsw->slaves[i].ndev;
+
+ if (!(ndev && netif_running(ndev)))
+ continue;
+
+ cpsw_ndo_stop(ndev);
+ }
+
+ rtnl_unlock();
+
+ /* Select sleep pin state */
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int __maybe_unused cpsw_resume(struct device *dev)
+{
+ struct cpsw_common *cpsw = dev_get_drvdata(dev);
+ int i;
+
+ /* Select default pin state */
+ pinctrl_pm_select_default_state(dev);
+
+ /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
+ rtnl_lock();
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct net_device *ndev = cpsw->slaves[i].ndev;
+
+ if (!(ndev && netif_running(ndev)))
+ continue;
+
+ cpsw_ndo_open(ndev);
+ }
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
+
static struct platform_driver cpsw_driver = {
.driver = {
.name = "cpsw-switch",
+ .pm = &cpsw_pm_ops,
.of_match_table = cpsw_of_mtable,
},
.probe = cpsw_probe,
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 76a342ea3797..1203a3c0febb 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -305,9 +305,8 @@ static void tlan_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
if (priv->dma_storage) {
- pci_free_consistent(priv->pci_dev,
- priv->dma_size, priv->dma_storage,
- priv->dma_storage_dma);
+ dma_free_coherent(&priv->pci_dev->dev, priv->dma_size,
+ priv->dma_storage, priv->dma_storage_dma);
}
#ifdef CONFIG_PCI
@@ -482,7 +481,7 @@ static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev,
priv->adapter = &board_info[ent->driver_data];
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
pr_err("No suitable PCI mapping available\n");
goto err_out_free_dev;
@@ -584,8 +583,8 @@ static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev,
return 0;
err_out_uninit:
- pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage,
- priv->dma_storage_dma);
+ dma_free_coherent(&priv->pci_dev->dev, priv->dma_size,
+ priv->dma_storage, priv->dma_storage_dma);
err_out_free_dev:
free_netdev(dev);
err_out_regions:
@@ -609,9 +608,9 @@ static void tlan_eisa_cleanup(void)
dev = tlan_eisa_devices;
priv = netdev_priv(dev);
if (priv->dma_storage) {
- pci_free_consistent(priv->pci_dev, priv->dma_size,
- priv->dma_storage,
- priv->dma_storage_dma);
+ dma_free_coherent(&priv->pci_dev->dev, priv->dma_size,
+ priv->dma_storage,
+ priv->dma_storage_dma);
}
release_region(dev->base_addr, 0x10);
unregister_netdev(dev);
@@ -826,9 +825,8 @@ static int tlan_init(struct net_device *dev)
dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
* (sizeof(struct tlan_list));
- priv->dma_storage = pci_alloc_consistent(priv->pci_dev,
- dma_size,
- &priv->dma_storage_dma);
+ priv->dma_storage = dma_alloc_coherent(&priv->pci_dev->dev, dma_size,
+ &priv->dma_storage_dma, GFP_KERNEL);
priv->dma_size = dma_size;
if (priv->dma_storage == NULL) {
@@ -1069,9 +1067,9 @@ static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
tail_list->forward = 0;
- tail_list->buffer[0].address = pci_map_single(priv->pci_dev,
+ tail_list->buffer[0].address = dma_map_single(&priv->pci_dev->dev,
skb->data, txlen,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tlan_store_skb(tail_list, skb);
tail_list->frame_size = (u16) txlen;
@@ -1365,10 +1363,10 @@ static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
struct sk_buff *skb = tlan_get_skb(head_list);
ack++;
- pci_unmap_single(priv->pci_dev, head_list->buffer[0].address,
- max(skb->len,
- (unsigned int)TLAN_MIN_FRAME_SIZE),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&priv->pci_dev->dev,
+ head_list->buffer[0].address,
+ max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE),
+ DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
head_list->buffer[8].address = 0;
head_list->buffer[9].address = 0;
@@ -1511,8 +1509,8 @@ static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
goto drop_and_reuse;
skb = tlan_get_skb(head_list);
- pci_unmap_single(priv->pci_dev, frame_dma,
- TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pci_dev->dev, frame_dma,
+ TLAN_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
skb_put(skb, frame_size);
dev->stats.rx_bytes += frame_size;
@@ -1521,8 +1519,8 @@ static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
netif_rx(skb);
head_list->buffer[0].address =
- pci_map_single(priv->pci_dev, new_skb->data,
- TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
+ dma_map_single(&priv->pci_dev->dev, new_skb->data,
+ TLAN_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
tlan_store_skb(head_list, new_skb);
drop_and_reuse:
@@ -1923,10 +1921,10 @@ static void tlan_reset_lists(struct net_device *dev)
if (!skb)
break;
- list->buffer[0].address = pci_map_single(priv->pci_dev,
+ list->buffer[0].address = dma_map_single(&priv->pci_dev->dev,
skb->data,
TLAN_MAX_FRAME_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
tlan_store_skb(list, skb);
list->buffer[1].count = 0;
list->buffer[1].address = 0;
@@ -1954,12 +1952,10 @@ static void tlan_free_lists(struct net_device *dev)
list = priv->tx_list + i;
skb = tlan_get_skb(list);
if (skb) {
- pci_unmap_single(
- priv->pci_dev,
- list->buffer[0].address,
- max(skb->len,
- (unsigned int)TLAN_MIN_FRAME_SIZE),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&priv->pci_dev->dev,
+ list->buffer[0].address,
+ max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE),
+ DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
list->buffer[8].address = 0;
list->buffer[9].address = 0;
@@ -1970,10 +1966,9 @@ static void tlan_free_lists(struct net_device *dev)
list = priv->rx_list + i;
skb = tlan_get_skb(list);
if (skb) {
- pci_unmap_single(priv->pci_dev,
+ dma_unmap_single(&priv->pci_dev->dev,
list->buffer[0].address,
- TLAN_MAX_FRAME_SIZE,
- PCI_DMA_FROMDEVICE);
+ TLAN_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
list->buffer[8].address = 0;
list->buffer[9].address = 0;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index c71f994fbc73..974a244f45ba 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -777,7 +777,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
struct net_device *dev,
struct geneve_sock *gs4,
struct flowi4 *fl4,
- const struct ip_tunnel_info *info)
+ const struct ip_tunnel_info *info,
+ __be16 dport, __be16 sport)
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
@@ -793,6 +794,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
fl4->flowi4_proto = IPPROTO_UDP;
fl4->daddr = info->key.u.ipv4.dst;
fl4->saddr = info->key.u.ipv4.src;
+ fl4->fl4_dport = dport;
+ fl4->fl4_sport = sport;
tos = info->key.tos;
if ((tos == 1) && !geneve->cfg.collect_md) {
@@ -827,7 +830,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
struct net_device *dev,
struct geneve_sock *gs6,
struct flowi6 *fl6,
- const struct ip_tunnel_info *info)
+ const struct ip_tunnel_info *info,
+ __be16 dport, __be16 sport)
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
@@ -843,6 +847,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
fl6->flowi6_proto = IPPROTO_UDP;
fl6->daddr = info->key.u.ipv6.dst;
fl6->saddr = info->key.u.ipv6.src;
+ fl6->fl6_dport = dport;
+ fl6->fl6_sport = sport;
+
prio = info->key.tos;
if ((prio == 1) && !geneve->cfg.collect_md) {
prio = ip_tunnel_get_dsfield(ip_hdr(skb), skb);
@@ -889,7 +896,9 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info);
+ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+ rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
+ geneve->cfg.info.key.tp_dst, sport);
if (IS_ERR(rt))
return PTR_ERR(rt);
@@ -919,7 +928,6 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
return -EMSGSIZE;
}
- sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->cfg.collect_md) {
tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
ttl = key->ttl;
@@ -974,7 +982,9 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info);
+ sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+ dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
+ geneve->cfg.info.key.tp_dst, sport);
if (IS_ERR(dst))
return PTR_ERR(dst);
@@ -1003,7 +1013,6 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
return -EMSGSIZE;
}
- sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->cfg.collect_md) {
prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
ttl = key->ttl;
@@ -1085,13 +1094,18 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
{
struct ip_tunnel_info *info = skb_tunnel_info(skb);
struct geneve_dev *geneve = netdev_priv(dev);
+ __be16 sport;
if (ip_tunnel_info_af(info) == AF_INET) {
struct rtable *rt;
struct flowi4 fl4;
+
struct geneve_sock *gs4 = rcu_dereference(geneve->sock4);
+ sport = udp_flow_src_port(geneve->net, skb,
+ 1, USHRT_MAX, true);
- rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info);
+ rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
+ geneve->cfg.info.key.tp_dst, sport);
if (IS_ERR(rt))
return PTR_ERR(rt);
@@ -1101,9 +1115,13 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
} else if (ip_tunnel_info_af(info) == AF_INET6) {
struct dst_entry *dst;
struct flowi6 fl6;
+
struct geneve_sock *gs6 = rcu_dereference(geneve->sock6);
+ sport = udp_flow_src_port(geneve->net, skb,
+ 1, USHRT_MAX, true);
- dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info);
+ dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
+ geneve->cfg.info.key.tp_dst, sport);
if (IS_ERR(dst))
return PTR_ERR(dst);
@@ -1114,8 +1132,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
return -EINVAL;
}
- info->key.tp_src = udp_flow_src_port(geneve->net, skb,
- 1, USHRT_MAX, true);
+ info->key.tp_src = sport;
info->key.tp_dst = geneve->cfg.info.key.tp_dst;
return 0;
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 2181d4538ab7..a0f338cf1424 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -847,6 +847,10 @@ struct nvsp_message {
#define NETVSC_XDP_HDRM 256
+#define NETVSC_XFER_HEADER_SIZE(rng_cnt) \
+ (offsetof(struct vmtransfer_page_packet_header, ranges) + \
+ (rng_cnt) * sizeof(struct vmtransfer_page_range))
+
struct multi_send_data {
struct sk_buff *skb; /* skb containing the pkt */
struct hv_netvsc_packet *pkt; /* netvsc pkt pending */
@@ -974,6 +978,9 @@ struct net_device_context {
/* Serial number of the VF to team with */
u32 vf_serial;
+ /* Is the current data path through the VF NIC? */
+ bool data_path_is_vf;
+
/* Used to temporarily save the config info across hibernation */
struct netvsc_device_info *saved_netvsc_dev_info;
};
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 41f5cf0bb997..5a57d1985bae 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -388,6 +388,15 @@ static int netvsc_init_buf(struct hv_device *device,
net_device->recv_section_size = resp->sections[0].sub_alloc_size;
net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
+ /* Ensure buffer will not overflow */
+ if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size *
+ (u64)net_device->recv_section_cnt > (u64)buf_size) {
+ netdev_err(ndev, "invalid recv_section_size %u\n",
+ net_device->recv_section_size);
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
/* Setup receive completion ring.
* Add 1 to the recv_section_cnt because at least one entry in a
* ring buffer has to be empty.
@@ -460,6 +469,12 @@ static int netvsc_init_buf(struct hv_device *device,
/* Parse the response */
net_device->send_section_size = init_packet->msg.
v1_msg.send_send_buf_complete.section_size;
+ if (net_device->send_section_size < NETVSC_MTU_MIN) {
+ netdev_err(ndev, "invalid send_section_size %u\n",
+ net_device->send_section_size);
+ ret = -EINVAL;
+ goto cleanup;
+ }
/* Section count is simply the size divided by the section size. */
net_device->send_section_cnt = buf_size / net_device->send_section_size;
@@ -731,12 +746,49 @@ static void netvsc_send_completion(struct net_device *ndev,
int budget)
{
const struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
+ u32 msglen = hv_pkt_datalen(desc);
+
+ /* Ensure packet is big enough to read header fields */
+ if (msglen < sizeof(struct nvsp_message_header)) {
+ netdev_err(ndev, "nvsp_message length too small: %u\n", msglen);
+ return;
+ }
switch (nvsp_packet->hdr.msg_type) {
case NVSP_MSG_TYPE_INIT_COMPLETE:
+ if (msglen < sizeof(struct nvsp_message_header) +
+ sizeof(struct nvsp_message_init_complete)) {
+ netdev_err(ndev, "nvsp_msg length too small: %u\n",
+ msglen);
+ return;
+ }
+ fallthrough;
+
case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
+ if (msglen < sizeof(struct nvsp_message_header) +
+ sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
+ netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
+ msglen);
+ return;
+ }
+ fallthrough;
+
case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
+ if (msglen < sizeof(struct nvsp_message_header) +
+ sizeof(struct nvsp_1_message_send_send_buffer_complete)) {
+ netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
+ msglen);
+ return;
+ }
+ fallthrough;
+
case NVSP_MSG5_TYPE_SUBCHANNEL:
+ if (msglen < sizeof(struct nvsp_message_header) +
+ sizeof(struct nvsp_5_subchannel_complete)) {
+ netdev_err(ndev, "nvsp_msg5 length too small: %u\n",
+ msglen);
+ return;
+ }
/* Copy the response back */
memcpy(&net_device->channel_init_pkt, nvsp_packet,
sizeof(struct nvsp_message));
@@ -1117,19 +1169,28 @@ static void enq_receive_complete(struct net_device *ndev,
static int netvsc_receive(struct net_device *ndev,
struct netvsc_device *net_device,
struct netvsc_channel *nvchan,
- const struct vmpacket_descriptor *desc,
- const struct nvsp_message *nvsp)
+ const struct vmpacket_descriptor *desc)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct vmbus_channel *channel = nvchan->channel;
const struct vmtransfer_page_packet_header *vmxferpage_packet
= container_of(desc, const struct vmtransfer_page_packet_header, d);
+ const struct nvsp_message *nvsp = hv_pkt_data(desc);
+ u32 msglen = hv_pkt_datalen(desc);
u16 q_idx = channel->offermsg.offer.sub_channel_index;
char *recv_buf = net_device->recv_buf;
u32 status = NVSP_STAT_SUCCESS;
int i;
int count = 0;
+ /* Ensure packet is big enough to read header fields */
+ if (msglen < sizeof(struct nvsp_message_header)) {
+ netif_err(net_device_ctx, rx_err, ndev,
+ "invalid nvsp header, length too small: %u\n",
+ msglen);
+ return 0;
+ }
+
/* Make sure this is a valid nvsp packet */
if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
netif_err(net_device_ctx, rx_err, ndev,
@@ -1138,6 +1199,14 @@ static int netvsc_receive(struct net_device *ndev,
return 0;
}
+ /* Validate xfer page pkt header */
+ if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) {
+ netif_err(net_device_ctx, rx_err, ndev,
+ "Invalid xfer page pkt, offset too small: %u\n",
+ desc->offset8 << 3);
+ return 0;
+ }
+
if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
netif_err(net_device_ctx, rx_err, ndev,
"Invalid xfer page set id - expecting %x got %x\n",
@@ -1148,6 +1217,14 @@ static int netvsc_receive(struct net_device *ndev,
count = vmxferpage_packet->range_cnt;
+ /* Check count for a valid value */
+ if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) {
+ netif_err(net_device_ctx, rx_err, ndev,
+ "Range count is not valid: %d\n",
+ count);
+ return 0;
+ }
+
/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
for (i = 0; i < count; i++) {
u32 offset = vmxferpage_packet->ranges[i].byte_offset;
@@ -1155,7 +1232,8 @@ static int netvsc_receive(struct net_device *ndev,
void *data;
int ret;
- if (unlikely(offset + buflen > net_device->recv_buf_size)) {
+ if (unlikely(offset > net_device->recv_buf_size ||
+ buflen > net_device->recv_buf_size - offset)) {
nvchan->rsc.cnt = 0;
status = NVSP_STAT_FAIL;
netif_err(net_device_ctx, rx_err, ndev,
@@ -1194,6 +1272,13 @@ static void netvsc_send_table(struct net_device *ndev,
u32 count, offset, *tab;
int i;
+ /* Ensure packet is big enough to read send_table fields */
+ if (msglen < sizeof(struct nvsp_message_header) +
+ sizeof(struct nvsp_5_send_indirect_table)) {
+ netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen);
+ return;
+ }
+
count = nvmsg->msg.v5_msg.send_table.count;
offset = nvmsg->msg.v5_msg.send_table.offset;
@@ -1225,10 +1310,18 @@ static void netvsc_send_table(struct net_device *ndev,
}
static void netvsc_send_vf(struct net_device *ndev,
- const struct nvsp_message *nvmsg)
+ const struct nvsp_message *nvmsg,
+ u32 msglen)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ /* Ensure packet is big enough to read its fields */
+ if (msglen < sizeof(struct nvsp_message_header) +
+ sizeof(struct nvsp_4_send_vf_association)) {
+ netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen);
+ return;
+ }
+
net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
netdev_info(ndev, "VF slot %u %s\n",
@@ -1238,16 +1331,24 @@ static void netvsc_send_vf(struct net_device *ndev,
static void netvsc_receive_inband(struct net_device *ndev,
struct netvsc_device *nvscdev,
- const struct nvsp_message *nvmsg,
- u32 msglen)
+ const struct vmpacket_descriptor *desc)
{
+ const struct nvsp_message *nvmsg = hv_pkt_data(desc);
+ u32 msglen = hv_pkt_datalen(desc);
+
+ /* Ensure packet is big enough to read header fields */
+ if (msglen < sizeof(struct nvsp_message_header)) {
+ netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen);
+ return;
+ }
+
switch (nvmsg->hdr.msg_type) {
case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
break;
case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
- netvsc_send_vf(ndev, nvmsg);
+ netvsc_send_vf(ndev, nvmsg, msglen);
break;
}
}
@@ -1261,23 +1362,20 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
{
struct vmbus_channel *channel = nvchan->channel;
const struct nvsp_message *nvmsg = hv_pkt_data(desc);
- u32 msglen = hv_pkt_datalen(desc);
trace_nvsp_recv(ndev, channel, nvmsg);
switch (desc->type) {
case VM_PKT_COMP:
- netvsc_send_completion(ndev, net_device, channel,
- desc, budget);
+ netvsc_send_completion(ndev, net_device, channel, desc, budget);
break;
case VM_PKT_DATA_USING_XFER_PAGES:
- return netvsc_receive(ndev, net_device, nvchan,
- desc, nvmsg);
+ return netvsc_receive(ndev, net_device, nvchan, desc);
break;
case VM_PKT_DATA_INBAND:
- netvsc_receive_inband(ndev, net_device, nvmsg, msglen);
+ netvsc_receive_inband(ndev, net_device, desc);
break;
default:
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 64b0a74c1523..9869e390875e 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -748,6 +748,13 @@ void netvsc_linkstatus_callback(struct net_device *net,
struct netvsc_reconfig *event;
unsigned long flags;
+ /* Ensure the packet is big enough to access its fields */
+ if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_indicate_status)) {
+ netdev_err(net, "invalid rndis_indicate_status packet, len: %u\n",
+ resp->msg_len);
+ return;
+ }
+
/* Update the physical link speed when changing to another vSwitch */
if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
u32 speed;
@@ -2366,7 +2373,16 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
return NOTIFY_OK;
}
-/* VF up/down change detected, schedule to change data path */
+/* Change the data path when VF UP/DOWN/CHANGE are detected.
+ *
+ * Typically a UP or DOWN event is followed by a CHANGE event, so
+ * net_device_ctx->data_path_is_vf is used to cache the current data path
+ * to avoid the duplicate call of netvsc_switch_datapath() and the duplicate
+ * message.
+ *
+ * During hibernation, if a VF NIC driver (e.g. mlx5) preserves the network
+ * interface, there is only the CHANGE event and no UP or DOWN event.
+ */
static int netvsc_vf_changed(struct net_device *vf_netdev)
{
struct net_device_context *net_device_ctx;
@@ -2383,6 +2399,10 @@ static int netvsc_vf_changed(struct net_device *vf_netdev)
if (!netvsc_dev)
return NOTIFY_DONE;
+ if (net_device_ctx->data_path_is_vf == vf_is_up)
+ return NOTIFY_OK;
+ net_device_ctx->data_path_is_vf = vf_is_up;
+
netvsc_switch_datapath(ndev, vf_is_up);
netdev_info(ndev, "Data path switched %s VF: %s\n",
vf_is_up ? "to" : "from", vf_netdev->name);
@@ -2587,8 +2607,8 @@ static int netvsc_remove(struct hv_device *dev)
static int netvsc_suspend(struct hv_device *dev)
{
struct net_device_context *ndev_ctx;
- struct net_device *vf_netdev, *net;
struct netvsc_device *nvdev;
+ struct net_device *net;
int ret;
net = hv_get_drvdata(dev);
@@ -2604,10 +2624,6 @@ static int netvsc_suspend(struct hv_device *dev)
goto out;
}
- vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
- if (vf_netdev)
- netvsc_unregister_vf(vf_netdev);
-
/* Save the current config info */
ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
@@ -2628,6 +2644,12 @@ static int netvsc_resume(struct hv_device *dev)
rtnl_lock();
net_device_ctx = netdev_priv(net);
+
+ /* Reset the data path to the netvsc NIC before re-opening the vmbus
+ * channel. Later netvsc_netdev_event() will switch the data path to
+ * the VF upon the UP or CHANGE event.
+ */
+ net_device_ctx->data_path_is_vf = false;
device_info = net_device_ctx->saved_netvsc_dev_info;
ret = netvsc_attach(net, device_info);
@@ -2695,6 +2717,7 @@ static int netvsc_netdev_event(struct notifier_block *this,
return netvsc_unregister_vf(event_dev);
case NETDEV_UP:
case NETDEV_DOWN:
+ case NETDEV_CHANGE:
return netvsc_vf_changed(event_dev);
default:
return NOTIFY_DONE;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index b81ceba38218..12ad471ac5e1 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -275,6 +275,16 @@ static void rndis_filter_receive_response(struct net_device *ndev,
return;
}
+ /* Ensure the packet is big enough to read req_id. Req_id is the 1st
+ * field in any request/response message, so the payload should have at
+ * least sizeof(u32) bytes
+ */
+ if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(u32)) {
+ netdev_err(ndev, "rndis msg_len too small: %u\n",
+ resp->msg_len);
+ return;
+ }
+
spin_lock_irqsave(&dev->request_lock, flags);
list_for_each_entry(request, &dev->req_list, list_ent) {
/*
@@ -331,8 +341,9 @@ static void rndis_filter_receive_response(struct net_device *ndev,
* Get the Per-Packet-Info with the specified type
* return NULL if not found.
*/
-static inline void *rndis_get_ppi(struct rndis_packet *rpkt,
- u32 type, u8 internal)
+static inline void *rndis_get_ppi(struct net_device *ndev,
+ struct rndis_packet *rpkt,
+ u32 rpkt_len, u32 type, u8 internal)
{
struct rndis_per_packet_info *ppi;
int len;
@@ -340,11 +351,36 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt,
if (rpkt->per_pkt_info_offset == 0)
return NULL;
+ /* Validate info_offset and info_len */
+ if (rpkt->per_pkt_info_offset < sizeof(struct rndis_packet) ||
+ rpkt->per_pkt_info_offset > rpkt_len) {
+ netdev_err(ndev, "Invalid per_pkt_info_offset: %u\n",
+ rpkt->per_pkt_info_offset);
+ return NULL;
+ }
+
+ if (rpkt->per_pkt_info_len > rpkt_len - rpkt->per_pkt_info_offset) {
+ netdev_err(ndev, "Invalid per_pkt_info_len: %u\n",
+ rpkt->per_pkt_info_len);
+ return NULL;
+ }
+
ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
rpkt->per_pkt_info_offset);
len = rpkt->per_pkt_info_len;
while (len > 0) {
+ /* Validate ppi_offset and ppi_size */
+ if (ppi->size > len) {
+ netdev_err(ndev, "Invalid ppi size: %u\n", ppi->size);
+ continue;
+ }
+
+ if (ppi->ppi_offset >= ppi->size) {
+ netdev_err(ndev, "Invalid ppi_offset: %u\n", ppi->ppi_offset);
+ continue;
+ }
+
if (ppi->type == type && ppi->internal == internal)
return (void *)((ulong)ppi + ppi->ppi_offset);
len -= ppi->size;
@@ -388,14 +424,29 @@ static int rndis_filter_receive_data(struct net_device *ndev,
const struct ndis_pkt_8021q_info *vlan;
const struct rndis_pktinfo_id *pktinfo_id;
const u32 *hash_info;
- u32 data_offset;
+ u32 data_offset, rpkt_len;
void *data;
bool rsc_more = false;
int ret;
+ /* Ensure data_buflen is big enough to read header fields */
+ if (data_buflen < RNDIS_HEADER_SIZE + sizeof(struct rndis_packet)) {
+ netdev_err(ndev, "invalid rndis pkt, data_buflen too small: %u\n",
+ data_buflen);
+ return NVSP_STAT_FAIL;
+ }
+
+ /* Validate rndis_pkt offset */
+ if (rndis_pkt->data_offset >= data_buflen - RNDIS_HEADER_SIZE) {
+ netdev_err(ndev, "invalid rndis packet offset: %u\n",
+ rndis_pkt->data_offset);
+ return NVSP_STAT_FAIL;
+ }
+
/* Remove the rndis header and pass it back up the stack */
data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
+ rpkt_len = data_buflen - RNDIS_HEADER_SIZE;
data_buflen -= data_offset;
/*
@@ -410,13 +461,13 @@ static int rndis_filter_receive_data(struct net_device *ndev,
return NVSP_STAT_FAIL;
}
- vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO, 0);
+ vlan = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, IEEE_8021Q_INFO, 0);
- csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO, 0);
+ csum_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, TCPIP_CHKSUM_PKTINFO, 0);
- hash_info = rndis_get_ppi(rndis_pkt, NBL_HASH_VALUE, 0);
+ hash_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, NBL_HASH_VALUE, 0);
- pktinfo_id = rndis_get_ppi(rndis_pkt, RNDIS_PKTINFO_ID, 1);
+ pktinfo_id = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, RNDIS_PKTINFO_ID, 1);
data = (void *)msg + data_offset;
@@ -474,6 +525,14 @@ int rndis_filter_receive(struct net_device *ndev,
if (netif_msg_rx_status(net_device_ctx))
dump_rndis_message(ndev, rndis_msg);
+ /* Validate incoming rndis_message packet */
+ if (buflen < RNDIS_HEADER_SIZE || rndis_msg->msg_len < RNDIS_HEADER_SIZE ||
+ buflen < rndis_msg->msg_len) {
+ netdev_err(ndev, "Invalid rndis_msg (buflen: %u, msg_len: %u)\n",
+ buflen, rndis_msg->msg_len);
+ return NVSP_STAT_FAIL;
+ }
+
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
return rndis_filter_receive_data(ndev, net_dev, nvchan,
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index c11f32f644db..7db9cbd0f5de 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -882,7 +882,9 @@ static int adf7242_rx(struct adf7242_local *lp)
int ret;
u8 lqi, len_u8, *data;
- adf7242_read_reg(lp, 0, &len_u8);
+ ret = adf7242_read_reg(lp, 0, &len_u8);
+ if (ret)
+ return ret;
len = len_u8;
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index e04c3b60cae7..4eb64709d44c 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -2925,6 +2925,7 @@ static int ca8210_dev_com_init(struct ca8210_priv *priv)
);
if (!priv->irq_workqueue) {
dev_crit(&priv->spi->dev, "alloc of irq_workqueue failed!\n");
+ destroy_workqueue(priv->mlme_workqueue);
return -ENOMEM;
}
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 0e63d35320aa..cb75f7d54057 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -1987,31 +1987,26 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
}
gsi->irq = irq;
- ret = enable_irq_wake(gsi->irq);
- if (ret)
- dev_warn(dev, "error %d enabling gsi wake irq\n", ret);
- gsi->irq_wake_enabled = !ret;
-
/* Get GSI memory range and map it */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
if (!res) {
dev_err(dev, "DT error getting \"gsi\" memory property\n");
ret = -ENODEV;
- goto err_disable_irq_wake;
+ goto err_free_irq;
}
size = resource_size(res);
if (res->start > U32_MAX || size > U32_MAX - res->start) {
dev_err(dev, "DT memory resource \"gsi\" out of range\n");
ret = -EINVAL;
- goto err_disable_irq_wake;
+ goto err_free_irq;
}
gsi->virt = ioremap(res->start, size);
if (!gsi->virt) {
dev_err(dev, "unable to remap \"gsi\" memory\n");
ret = -ENOMEM;
- goto err_disable_irq_wake;
+ goto err_free_irq;
}
ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc);
@@ -2025,9 +2020,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
err_iounmap:
iounmap(gsi->virt);
-err_disable_irq_wake:
- if (gsi->irq_wake_enabled)
- (void)disable_irq_wake(gsi->irq);
+err_free_irq:
free_irq(gsi->irq, gsi);
return ret;
@@ -2038,8 +2031,6 @@ void gsi_exit(struct gsi *gsi)
{
mutex_destroy(&gsi->mutex);
gsi_channel_exit(gsi);
- if (gsi->irq_wake_enabled)
- (void)disable_irq_wake(gsi->irq);
free_irq(gsi->irq, gsi);
iounmap(gsi->virt);
}
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 061312773df0..3f9f29d531c4 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -150,7 +150,6 @@ struct gsi {
struct net_device dummy_dev; /* needed for NAPI */
void __iomem *virt;
u32 irq;
- bool irq_wake_enabled;
u32 channel_count;
u32 evt_ring_count;
struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 407fee841a9a..6c2371084c55 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -28,14 +28,24 @@ struct ipa_smp2p;
struct ipa_interrupt;
/**
+ * enum ipa_flag - IPA state flags
+ * @IPA_FLAG_RESUMED: Whether resume from suspend has been signaled
+ * @IPA_FLAG_COUNT: Number of defined IPA flags
+ */
+enum ipa_flag {
+ IPA_FLAG_RESUMED,
+ IPA_FLAG_COUNT, /* Last; not a flag */
+};
+
+/**
* struct ipa - IPA information
* @gsi: Embedded GSI structure
+ * @flags: Boolean state flags
* @version: IPA hardware version
* @pdev: Platform device
* @modem_rproc: Remoteproc handle for modem subsystem
* @smp2p: SMP2P information
* @clock: IPA clocking information
- * @suspend_ref: Whether clock reference preventing suspend taken
* @table_addr: DMA address of filter/route table content
* @table_virt: Virtual address of filter/route table content
* @interrupt: IPA Interrupt information
@@ -70,6 +80,7 @@ struct ipa_interrupt;
*/
struct ipa {
struct gsi gsi;
+ DECLARE_BITMAP(flags, IPA_FLAG_COUNT);
enum ipa_version version;
struct platform_device *pdev;
struct rproc *modem_rproc;
@@ -77,7 +88,6 @@ struct ipa {
void *notifier;
struct ipa_smp2p *smp2p;
struct ipa_clock *clock;
- atomic_t suspend_ref;
dma_addr_t table_addr;
__le64 *table_virt;
@@ -104,8 +114,6 @@ struct ipa {
void *zero_virt;
size_t zero_size;
- struct wakeup_source *wakeup_source;
-
/* Bit masks indicating endpoint state */
u32 available; /* supported by hardware */
u32 filter_map;
diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c
index 398f2e47043d..a2c0fde05819 100644
--- a/drivers/net/ipa/ipa_clock.c
+++ b/drivers/net/ipa/ipa_clock.c
@@ -4,7 +4,7 @@
* Copyright (C) 2018-2020 Linaro Ltd.
*/
-#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/device.h>
@@ -51,7 +51,7 @@
* @config_path: Configuration space interconnect
*/
struct ipa_clock {
- atomic_t count;
+ refcount_t count;
struct mutex mutex; /* protects clock enable/disable */
struct clk *core;
struct icc_path *memory_path;
@@ -195,14 +195,13 @@ static void ipa_clock_disable(struct ipa *ipa)
*/
bool ipa_clock_get_additional(struct ipa *ipa)
{
- return !!atomic_inc_not_zero(&ipa->clock->count);
+ return refcount_inc_not_zero(&ipa->clock->count);
}
/* Get an IPA clock reference. If the reference count is non-zero, it is
* incremented and return is immediate. Otherwise it is checked again
- * under protection of the mutex, and if appropriate the clock (and
- * interconnects) are enabled suspended endpoints (if any) are resumed
- * before returning.
+ * under protection of the mutex, and if appropriate the IPA clock
+ * is enabled.
*
* Incrementing the reference count is intentionally deferred until
* after the clock is running and endpoints are resumed.
@@ -229,28 +228,23 @@ void ipa_clock_get(struct ipa *ipa)
goto out_mutex_unlock;
}
- ipa_endpoint_resume(ipa);
-
- atomic_inc(&clock->count);
+ refcount_set(&clock->count, 1);
out_mutex_unlock:
mutex_unlock(&clock->mutex);
}
-/* Attempt to remove an IPA clock reference. If this represents the last
- * reference, suspend endpoints and disable the clock (and interconnects)
- * under protection of a mutex.
+/* Attempt to remove an IPA clock reference. If this represents the
+ * last reference, disable the IPA clock under protection of the mutex.
*/
void ipa_clock_put(struct ipa *ipa)
{
struct ipa_clock *clock = ipa->clock;
/* If this is not the last reference there's nothing more to do */
- if (!atomic_dec_and_mutex_lock(&clock->count, &clock->mutex))
+ if (!refcount_dec_and_mutex_lock(&clock->count, &clock->mutex))
return;
- ipa_endpoint_suspend(ipa);
-
ipa_clock_disable(ipa);
mutex_unlock(&clock->mutex);
@@ -294,7 +288,7 @@ struct ipa_clock *ipa_clock_init(struct device *dev)
goto err_kfree;
mutex_init(&clock->mutex);
- atomic_set(&clock->count, 0);
+ refcount_set(&clock->count, 0);
return clock;
@@ -311,7 +305,7 @@ void ipa_clock_exit(struct ipa_clock *clock)
{
struct clk *clk = clock->core;
- WARN_ON(atomic_read(&clock->count) != 0);
+ WARN_ON(refcount_read(&clock->count) != 0);
mutex_destroy(&clock->mutex);
ipa_interconnect_exit(clock);
kfree(clock);
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index 90353987c45f..cc1ea28f7bc2 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -237,8 +237,16 @@ struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa)
goto err_kfree;
}
+ ret = enable_irq_wake(irq);
+ if (ret) {
+ dev_err(dev, "error %d enabling wakeup for \"ipa\" IRQ\n", ret);
+ goto err_free_irq;
+ }
+
return interrupt;
+err_free_irq:
+ free_irq(interrupt->irq, interrupt);
err_kfree:
kfree(interrupt);
@@ -248,6 +256,12 @@ err_kfree:
/* Tear down the IPA interrupt framework */
void ipa_interrupt_teardown(struct ipa_interrupt *interrupt)
{
+ struct device *dev = &interrupt->ipa->pdev->dev;
+ int ret;
+
+ ret = disable_irq_wake(interrupt->irq);
+ if (ret)
+ dev_err(dev, "error %d disabling \"ipa\" IRQ wakeup\n", ret);
free_irq(interrupt->irq, interrupt);
kfree(interrupt);
}
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 1fdfec41e442..1044758b501d 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -75,17 +75,19 @@
* @ipa: IPA pointer
* @irq_id: IPA interrupt type (unused)
*
- * When in suspended state, the IPA can trigger a resume by sending a SUSPEND
- * IPA interrupt.
+ * If an RX endpoint is in suspend state, and the IPA has a packet
+ * destined for that endpoint, the IPA generates a SUSPEND interrupt
+ * to inform the AP that it should resume the endpoint. If we get
+ * one of these interrupts we just resume everything.
*/
static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
{
- /* Take a a single clock reference to prevent suspend. All
- * endpoints will be resumed as a result. This reference will
- * be dropped when we get a power management suspend request.
+ /* Just report the event, and let system resume handle the rest.
+ * More than one endpoint could signal this; if so, ignore
+ * all but the first.
*/
- if (!atomic_xchg(&ipa->suspend_ref, 1))
- ipa_clock_get(ipa);
+ if (!test_and_set_bit(IPA_FLAG_RESUMED, ipa->flags))
+ pm_wakeup_dev_event(&ipa->pdev->dev, 0, true);
/* Acknowledge/clear the suspend interrupt on all endpoints */
ipa_interrupt_suspend_clear_all(ipa->interrupt);
@@ -106,6 +108,7 @@ int ipa_setup(struct ipa *ipa)
{
struct ipa_endpoint *exception_endpoint;
struct ipa_endpoint *command_endpoint;
+ struct device *dev = &ipa->pdev->dev;
int ret;
/* Setup for IPA v3.5.1 has some slight differences */
@@ -123,6 +126,10 @@ int ipa_setup(struct ipa *ipa)
ipa_uc_setup(ipa);
+ ret = device_init_wakeup(dev, true);
+ if (ret)
+ goto err_uc_teardown;
+
ipa_endpoint_setup(ipa);
/* We need to use the AP command TX endpoint to perform other
@@ -158,7 +165,7 @@ int ipa_setup(struct ipa *ipa)
ipa->setup_complete = true;
- dev_info(&ipa->pdev->dev, "IPA driver setup completed successfully\n");
+ dev_info(dev, "IPA driver setup completed successfully\n");
return 0;
@@ -173,6 +180,8 @@ err_command_disable:
ipa_endpoint_disable_one(command_endpoint);
err_endpoint_teardown:
ipa_endpoint_teardown(ipa);
+ (void)device_init_wakeup(dev, false);
+err_uc_teardown:
ipa_uc_teardown(ipa);
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
ipa_interrupt_teardown(ipa->interrupt);
@@ -200,6 +209,7 @@ static void ipa_teardown(struct ipa *ipa)
command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
ipa_endpoint_disable_one(command_endpoint);
ipa_endpoint_teardown(ipa);
+ (void)device_init_wakeup(&ipa->pdev->dev, false);
ipa_uc_teardown(ipa);
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
ipa_interrupt_teardown(ipa->interrupt);
@@ -508,7 +518,6 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
* is held after initialization completes, and won't get dropped
* unless/until a system suspend request arrives.
*/
- atomic_set(&ipa->suspend_ref, 1);
ipa_clock_get(ipa);
ipa_hardware_config(ipa);
@@ -544,7 +553,6 @@ err_endpoint_deconfig:
err_hardware_deconfig:
ipa_hardware_deconfig(ipa);
ipa_clock_put(ipa);
- atomic_set(&ipa->suspend_ref, 0);
return ret;
}
@@ -562,7 +570,6 @@ static void ipa_deconfig(struct ipa *ipa)
ipa_endpoint_deconfig(ipa);
ipa_hardware_deconfig(ipa);
ipa_clock_put(ipa);
- atomic_set(&ipa->suspend_ref, 0);
}
static int ipa_firmware_load(struct device *dev)
@@ -709,7 +716,6 @@ static void ipa_validate_build(void)
*/
static int ipa_probe(struct platform_device *pdev)
{
- struct wakeup_source *wakeup_source;
struct device *dev = &pdev->dev;
const struct ipa_data *data;
struct ipa_clock *clock;
@@ -758,27 +764,17 @@ static int ipa_probe(struct platform_device *pdev)
goto err_clock_exit;
}
- /* Create a wakeup source. */
- wakeup_source = wakeup_source_register(dev, "ipa");
- if (!wakeup_source) {
- /* The most likely reason for failure is memory exhaustion */
- ret = -ENOMEM;
- goto err_clock_exit;
- }
-
/* Allocate and initialize the IPA structure */
ipa = kzalloc(sizeof(*ipa), GFP_KERNEL);
if (!ipa) {
ret = -ENOMEM;
- goto err_wakeup_source_unregister;
+ goto err_clock_exit;
}
ipa->pdev = pdev;
dev_set_drvdata(dev, ipa);
ipa->modem_rproc = rproc;
ipa->clock = clock;
- atomic_set(&ipa->suspend_ref, 0);
- ipa->wakeup_source = wakeup_source;
ipa->version = data->version;
ret = ipa_reg_init(ipa);
@@ -857,8 +853,6 @@ err_reg_exit:
ipa_reg_exit(ipa);
err_kfree_ipa:
kfree(ipa);
-err_wakeup_source_unregister:
- wakeup_source_unregister(wakeup_source);
err_clock_exit:
ipa_clock_exit(clock);
err_rproc_put:
@@ -872,11 +866,8 @@ static int ipa_remove(struct platform_device *pdev)
struct ipa *ipa = dev_get_drvdata(&pdev->dev);
struct rproc *rproc = ipa->modem_rproc;
struct ipa_clock *clock = ipa->clock;
- struct wakeup_source *wakeup_source;
int ret;
- wakeup_source = ipa->wakeup_source;
-
if (ipa->setup_complete) {
ret = ipa_modem_stop(ipa);
if (ret)
@@ -893,7 +884,6 @@ static int ipa_remove(struct platform_device *pdev)
ipa_mem_exit(ipa);
ipa_reg_exit(ipa);
kfree(ipa);
- wakeup_source_unregister(wakeup_source);
ipa_clock_exit(clock);
rproc_put(rproc);
@@ -907,13 +897,22 @@ static int ipa_remove(struct platform_device *pdev)
* Return: Always returns zero
*
* Called by the PM framework when a system suspend operation is invoked.
+ * Suspends endpoints and releases the clock reference held to keep
+ * the IPA clock running until this point.
*/
static int ipa_suspend(struct device *dev)
{
struct ipa *ipa = dev_get_drvdata(dev);
+ /* When a suspended RX endpoint has a packet ready to receive, we
+ * get an IPA SUSPEND interrupt. We trigger a system resume in
+ * that case, but only on the first such interrupt since suspend.
+ */
+ __clear_bit(IPA_FLAG_RESUMED, ipa->flags);
+
+ ipa_endpoint_suspend(ipa);
+
ipa_clock_put(ipa);
- atomic_set(&ipa->suspend_ref, 0);
return 0;
}
@@ -925,6 +924,8 @@ static int ipa_suspend(struct device *dev)
* Return: Always returns 0
*
* Called by the PM framework when a system resume operation is invoked.
+ * Takes an IPA clock reference to keep the clock running until suspend,
+ * and resumes endpoints.
*/
static int ipa_resume(struct device *dev)
{
@@ -933,9 +934,10 @@ static int ipa_resume(struct device *dev)
/* This clock reference will keep the IPA out of suspend
* until we get a power management suspend request.
*/
- atomic_set(&ipa->suspend_ref, 1);
ipa_clock_get(ipa);
+ ipa_endpoint_resume(ipa);
+
return 0;
}
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 2098ca2f2c90..b3790aa952a1 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -521,7 +521,7 @@ static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
val = ioread32(endpoint->ipa->reg_virt + offset);
/* Zero all filter-related fields, preserving the rest */
- u32_replace_bits(val, 0, IPA_REG_ENDP_FILTER_HASH_MSK_ALL);
+ u32p_replace_bits(&val, 0, IPA_REG_ENDP_FILTER_HASH_MSK_ALL);
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
@@ -573,7 +573,7 @@ static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id)
val = ioread32(ipa->reg_virt + offset);
/* Zero all route-related fields, preserving the rest */
- u32_replace_bits(val, 0, IPA_REG_ENDP_ROUTER_HASH_MSK_ALL);
+ u32p_replace_bits(&val, 0, IPA_REG_ENDP_ROUTER_HASH_MSK_ALL);
iowrite32(val, ipa->reg_virt + offset);
}
diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig
index 1299880dfe74..840727cc9499 100644
--- a/drivers/net/mdio/Kconfig
+++ b/drivers/net/mdio/Kconfig
@@ -138,6 +138,7 @@ config MDIO_OCTEON
depends on (64BIT && OF_MDIO) || COMPILE_TEST
depends on HAS_IOMEM
select MDIO_CAVIUM
+ select MDIO_DEVRES
help
This module provides a driver for the Octeon and ThunderX MDIO
buses. It is required by the Octeon and ThunderX ethernet device
diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
index 1ce81ff2f41d..25c25ea6da66 100644
--- a/drivers/net/mdio/mdio-ipq4019.c
+++ b/drivers/net/mdio/mdio-ipq4019.c
@@ -12,6 +12,7 @@
#include <linux/phy.h>
#include <linux/platform_device.h>
+#define MDIO_MODE_REG 0x40
#define MDIO_ADDR_REG 0x44
#define MDIO_DATA_WRITE_REG 0x48
#define MDIO_DATA_READ_REG 0x4c
@@ -20,9 +21,15 @@
#define MDIO_CMD_ACCESS_START BIT(8)
#define MDIO_CMD_ACCESS_CODE_READ 0
#define MDIO_CMD_ACCESS_CODE_WRITE 1
+#define MDIO_CMD_ACCESS_CODE_C45_ADDR 0
+#define MDIO_CMD_ACCESS_CODE_C45_WRITE 1
+#define MDIO_CMD_ACCESS_CODE_C45_READ 2
-#define ipq4019_MDIO_TIMEOUT 10000
-#define ipq4019_MDIO_SLEEP 10
+/* 0 = Clause 22, 1 = Clause 45 */
+#define MDIO_MODE_C45 BIT(8)
+
+#define IPQ4019_MDIO_TIMEOUT 10000
+#define IPQ4019_MDIO_SLEEP 10
struct ipq4019_mdio_data {
void __iomem *membase;
@@ -35,25 +42,50 @@ static int ipq4019_mdio_wait_busy(struct mii_bus *bus)
return readl_poll_timeout(priv->membase + MDIO_CMD_REG, busy,
(busy & MDIO_CMD_ACCESS_BUSY) == 0,
- ipq4019_MDIO_SLEEP, ipq4019_MDIO_TIMEOUT);
+ IPQ4019_MDIO_SLEEP, IPQ4019_MDIO_TIMEOUT);
}
static int ipq4019_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
struct ipq4019_mdio_data *priv = bus->priv;
+ unsigned int data;
unsigned int cmd;
- /* Reject clause 45 */
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
if (ipq4019_mdio_wait_busy(bus))
return -ETIMEDOUT;
- /* issue the phy address and reg */
- writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
+ /* Clause 45 support */
+ if (regnum & MII_ADDR_C45) {
+ unsigned int mmd = (regnum >> 16) & 0x1F;
+ unsigned int reg = regnum & 0xFFFF;
+
+ /* Enter Clause 45 mode */
+ data = readl(priv->membase + MDIO_MODE_REG);
+
+ data |= MDIO_MODE_C45;
+
+ writel(data, priv->membase + MDIO_MODE_REG);
+
+ /* issue the phy address and mmd */
+ writel((mii_id << 8) | mmd, priv->membase + MDIO_ADDR_REG);
+
+ /* issue reg */
+ writel(reg, priv->membase + MDIO_DATA_WRITE_REG);
+
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_ADDR;
+ } else {
+ /* Enter Clause 22 mode */
+ data = readl(priv->membase + MDIO_MODE_REG);
- cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_READ;
+ data &= ~MDIO_MODE_C45;
+
+ writel(data, priv->membase + MDIO_MODE_REG);
+
+ /* issue the phy address and reg */
+ writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
+
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_READ;
+ }
/* issue read command */
writel(cmd, priv->membase + MDIO_CMD_REG);
@@ -62,6 +94,15 @@ static int ipq4019_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
if (ipq4019_mdio_wait_busy(bus))
return -ETIMEDOUT;
+ if (regnum & MII_ADDR_C45) {
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_READ;
+
+ writel(cmd, priv->membase + MDIO_CMD_REG);
+
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
+ }
+
/* Read and return data */
return readl(priv->membase + MDIO_DATA_READ_REG);
}
@@ -70,23 +111,57 @@ static int ipq4019_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
u16 value)
{
struct ipq4019_mdio_data *priv = bus->priv;
+ unsigned int data;
unsigned int cmd;
- /* Reject clause 45 */
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
if (ipq4019_mdio_wait_busy(bus))
return -ETIMEDOUT;
- /* issue the phy address and reg */
- writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
+ /* Clause 45 support */
+ if (regnum & MII_ADDR_C45) {
+ unsigned int mmd = (regnum >> 16) & 0x1F;
+ unsigned int reg = regnum & 0xFFFF;
+
+ /* Enter Clause 45 mode */
+ data = readl(priv->membase + MDIO_MODE_REG);
+
+ data |= MDIO_MODE_C45;
+
+ writel(data, priv->membase + MDIO_MODE_REG);
+
+ /* issue the phy address and mmd */
+ writel((mii_id << 8) | mmd, priv->membase + MDIO_ADDR_REG);
+
+ /* issue reg */
+ writel(reg, priv->membase + MDIO_DATA_WRITE_REG);
+
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_ADDR;
+
+ writel(cmd, priv->membase + MDIO_CMD_REG);
+
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
+ } else {
+ /* Enter Clause 22 mode */
+ data = readl(priv->membase + MDIO_MODE_REG);
+
+ data &= ~MDIO_MODE_C45;
+
+ writel(data, priv->membase + MDIO_MODE_REG);
+
+ /* issue the phy address and reg */
+ writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
+ }
/* issue write data */
writel(value, priv->membase + MDIO_DATA_WRITE_REG);
- cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_WRITE;
/* issue write command */
+ if (regnum & MII_ADDR_C45)
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_WRITE;
+ else
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_WRITE;
+
writel(cmd, priv->membase + MDIO_CMD_REG);
/* Wait write complete */
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
index 4dfb389dbfd8..ade086eed955 100644
--- a/drivers/net/netdevsim/Makefile
+++ b/drivers/net/netdevsim/Makefile
@@ -3,7 +3,7 @@
obj-$(CONFIG_NETDEVSIM) += netdevsim.o
netdevsim-objs := \
- netdev.o dev.o fib.o bus.o health.o udp_tunnels.o
+ netdev.o dev.o ethtool.o fib.o bus.o health.o udp_tunnels.o
ifeq ($(CONFIG_BPF_SYSCALL),y)
netdevsim-objs += \
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 32f339fedb21..e665efd760f8 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -40,7 +40,9 @@ static struct dentry *nsim_dev_ddir;
#define NSIM_DEV_DUMMY_REGION_SIZE (1024 * 32)
static int
-nsim_dev_take_snapshot(struct devlink *devlink, struct netlink_ext_ack *extack,
+nsim_dev_take_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
u8 **data)
{
void *dummy_data;
@@ -68,7 +70,7 @@ static ssize_t nsim_dev_take_snapshot_write(struct file *file,
devlink = priv_to_devlink(nsim_dev);
- err = nsim_dev_take_snapshot(devlink, NULL, &dummy_data);
+ err = nsim_dev_take_snapshot(devlink, NULL, NULL, &dummy_data);
if (err)
return err;
@@ -768,6 +770,8 @@ static int nsim_dev_flash_update(struct devlink *devlink, const char *file_name,
component,
NSIM_DEV_FLASH_SIZE,
NSIM_DEV_FLASH_SIZE);
+ devlink_flash_update_timeout_notify(devlink, "Flash select",
+ component, 81);
devlink_flash_update_status_notify(devlink, "Flashing done",
component, 0, 0);
devlink_flash_update_end_notify(devlink);
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
new file mode 100644
index 000000000000..7a4c779b4c89
--- /dev/null
+++ b/drivers/net/netdevsim/ethtool.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+
+#include <linux/debugfs.h>
+#include <linux/ethtool.h>
+#include <linux/random.h>
+
+#include "netdevsim.h"
+
+static void
+nsim_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (ns->ethtool.report_stats_rx)
+ pause_stats->rx_pause_frames = 1;
+ if (ns->ethtool.report_stats_tx)
+ pause_stats->tx_pause_frames = 2;
+}
+
+static void
+nsim_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ pause->autoneg = 0; /* We don't support ksettings, so can't pretend */
+ pause->rx_pause = ns->ethtool.rx;
+ pause->tx_pause = ns->ethtool.tx;
+}
+
+static int
+nsim_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (pause->autoneg)
+ return -EINVAL;
+
+ ns->ethtool.rx = pause->rx_pause;
+ ns->ethtool.tx = pause->tx_pause;
+ return 0;
+}
+
+static const struct ethtool_ops nsim_ethtool_ops = {
+ .get_pause_stats = nsim_get_pause_stats,
+ .get_pauseparam = nsim_get_pauseparam,
+ .set_pauseparam = nsim_set_pauseparam,
+};
+
+void nsim_ethtool_init(struct netdevsim *ns)
+{
+ struct dentry *ethtool, *dir;
+
+ ns->netdev->ethtool_ops = &nsim_ethtool_ops;
+
+ ethtool = debugfs_create_dir("ethtool", ns->nsim_dev->ddir);
+
+ dir = debugfs_create_dir("pause", ethtool);
+ debugfs_create_bool("report_stats_rx", 0600, dir,
+ &ns->ethtool.report_stats_rx);
+ debugfs_create_bool("report_stats_tx", 0600, dir,
+ &ns->ethtool.report_stats_tx);
+}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 97cfb015a50b..7178468302c8 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -301,6 +301,7 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
ns->nsim_bus_dev = nsim_dev->nsim_bus_dev;
SET_NETDEV_DEV(dev, &ns->nsim_bus_dev->dev);
dev->netdev_ops = &nsim_netdev_ops;
+ nsim_ethtool_init(ns);
err = nsim_udp_tunnels_info_create(nsim_dev, dev);
if (err)
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 284f7092241d..0c86561e6d8d 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -50,6 +50,13 @@ struct nsim_ipsec {
u32 ok;
};
+struct nsim_ethtool {
+ bool rx;
+ bool tx;
+ bool report_stats_rx;
+ bool report_stats_tx;
+};
+
struct netdevsim {
struct net_device *netdev;
struct nsim_dev *nsim_dev;
@@ -80,12 +87,16 @@ struct netdevsim {
u32 ports[2][NSIM_UDP_TUNNEL_N_PORTS];
struct debugfs_u32_array dfs_ports[2];
} udp_ports;
+
+ struct nsim_ethtool ethtool;
};
struct netdevsim *
nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port);
void nsim_destroy(struct netdevsim *ns);
+void nsim_ethtool_init(struct netdevsim *ns);
+
void nsim_udp_tunnels_debugfs_create(struct nsim_dev *nsim_dev);
int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
struct net_device *dev);
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index c43d97682083..62bb9272dcb2 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -93,6 +93,9 @@ static void lynx_pcs_get_state(struct phylink_pcs *pcs,
case PHY_INTERFACE_MODE_USXGMII:
lynx_pcs_get_state_usxgmii(lynx->mdio, state);
break;
+ case PHY_INTERFACE_MODE_10GBASER:
+ phylink_mii_c45_pcs_get_state(lynx->mdio, state);
+ break;
default:
break;
}
@@ -172,6 +175,9 @@ static int lynx_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
break;
case PHY_INTERFACE_MODE_USXGMII:
return lynx_pcs_config_usxgmii(lynx->mdio, mode, advertising);
+ case PHY_INTERFACE_MODE_10GBASER:
+ /* Nothing to do here for 10GBASER */
+ break;
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 692048d86ab1..15812001b3ff 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -11,6 +11,7 @@
#include "bcm-phy-lib.h"
#include <linux/bitops.h>
#include <linux/brcmphy.h>
+#include <linux/clk.h>
#include <linux/mdio.h>
/* Broadcom BCM7xxx internal PHY registers */
@@ -39,6 +40,7 @@
struct bcm7xxx_phy_priv {
u64 *stats;
+ struct clk *clk;
};
static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev)
@@ -521,6 +523,7 @@ static void bcm7xxx_28nm_get_phy_stats(struct phy_device *phydev,
static int bcm7xxx_28nm_probe(struct phy_device *phydev)
{
struct bcm7xxx_phy_priv *priv;
+ int ret = 0;
priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -534,7 +537,30 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
if (!priv->stats)
return -ENOMEM;
- return 0;
+ priv->clk = devm_clk_get_optional(&phydev->mdio.dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ /* Dummy read to a register to workaround an issue upon reset where the
+ * internal inverter may not allow the first MDIO transaction to pass
+ * the MDIO management controller and make us return 0xffff for such
+ * reads. This is needed to ensure that any subsequent reads to the
+ * PHY will succeed.
+ */
+ phy_read(phydev, MII_BMSR);
+
+ return ret;
+}
+
+static void bcm7xxx_28nm_remove(struct phy_device *phydev)
+{
+ struct bcm7xxx_phy_priv *priv = phydev->priv;
+
+ clk_disable_unprepare(priv->clk);
}
#define BCM7XXX_28NM_GPHY(_oui, _name) \
@@ -552,6 +578,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.get_strings = bcm_phy_get_strings, \
.get_stats = bcm7xxx_28nm_get_phy_stats, \
.probe = bcm7xxx_28nm_probe, \
+ .remove = bcm7xxx_28nm_remove, \
}
#define BCM7XXX_28NM_EPHY(_oui, _name) \
@@ -567,6 +594,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.get_strings = bcm_phy_get_strings, \
.get_stats = bcm7xxx_28nm_get_phy_stats, \
.probe = bcm7xxx_28nm_probe, \
+ .remove = bcm7xxx_28nm_remove, \
}
#define BCM7XXX_40NM_EPHY(_oui, _name) \
@@ -583,6 +611,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
}
static struct phy_driver bcm7xxx_driver[] = {
+ BCM7XXX_28NM_EPHY(PHY_ID_BCM72113, "Broadcom BCM72113"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7255, "Broadcom BCM7255"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7260, "Broadcom BCM7260"),
@@ -603,6 +632,7 @@ static struct phy_driver bcm7xxx_driver[] = {
};
static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
+ { PHY_ID_BCM72113, 0xfffffff0 },
{ PHY_ID_BCM7250, 0xfffffff0, },
{ PHY_ID_BCM7255, 0xfffffff0, },
{ PHY_ID_BCM7260, 0xfffffff0, },
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 732c8bec7452..c162c9551bd1 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -110,9 +110,8 @@
#define DP83822_RX_ER_SHIFT 8
#define MII_DP83822_FIBER_ADVERTISE (ADVERTISED_TP | ADVERTISED_MII | \
- ADVERTISED_FIBRE | ADVERTISED_BNC | \
- ADVERTISED_Pause | ADVERTISED_Asym_Pause | \
- ADVERTISED_100baseT_Full)
+ ADVERTISED_FIBRE | \
+ ADVERTISED_Pause | ADVERTISED_Asym_Pause)
struct dp83822_private {
bool fx_signal_det_low;
@@ -406,6 +405,14 @@ static int dp83822_config_init(struct phy_device *phydev)
phydev->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
phydev->advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseFX_Full_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseFX_Half_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseFX_Full_BIT,
+ phydev->advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseFX_Half_BIT,
+ phydev->advertising);
/* Auto neg is not supported in fiber mode */
bmcr = phy_read(phydev, MII_BMCR);
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index 6b98d74b5102..81899bc99add 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -52,6 +52,10 @@
BMCR_FULLDPLX | \
BMCR_SPEED1000)
+#define MII_DP83869_FIBER_ADVERTISE (ADVERTISED_FIBRE | \
+ ADVERTISED_Pause | \
+ ADVERTISED_Asym_Pause)
+
/* This is the same bit mask as the BMCR so re-use the BMCR default */
#define DP83869_FX_CTRL_DEFAULT MII_DP83869_BMCR_DEFAULT
@@ -118,6 +122,28 @@ struct dp83869_private {
int mode;
};
+static int dp83869_read_status(struct phy_device *phydev)
+{
+ struct dp83869_private *dp83869 = phydev->priv;
+ int ret;
+
+ ret = genphy_read_status(phydev);
+ if (ret)
+ return ret;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported)) {
+ if (phydev->link) {
+ if (dp83869->mode == DP83869_RGMII_100_BASE)
+ phydev->speed = SPEED_100;
+ } else {
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ }
+ }
+
+ return 0;
+}
+
static int dp83869_ack_interrupt(struct phy_device *phydev)
{
int err = phy_read(phydev, MII_DP83869_ISR);
@@ -295,6 +321,51 @@ static int dp83869_configure_rgmii(struct phy_device *phydev,
return ret;
}
+static int dp83869_configure_fiber(struct phy_device *phydev,
+ struct dp83869_private *dp83869)
+{
+ int bmcr;
+ int ret;
+
+ /* Only allow advertising what this PHY supports */
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
+ linkmode_set_bit(ADVERTISED_FIBRE, phydev->advertising);
+
+ if (dp83869->mode == DP83869_RGMII_1000_BASE) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ phydev->supported);
+ } else {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseFX_Full_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseFX_Half_BIT,
+ phydev->supported);
+
+ /* Auto neg is not supported in 100base FX mode */
+ bmcr = phy_read(phydev, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
+
+ phydev->autoneg = AUTONEG_DISABLE;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->advertising);
+
+ if (bmcr & BMCR_ANENABLE) {
+ ret = phy_modify(phydev, MII_BMCR, BMCR_ANENABLE, 0);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* Update advertising from supported */
+ linkmode_or(phydev->advertising, phydev->advertising,
+ phydev->supported);
+
+ return 0;
+}
+
static int dp83869_configure_mode(struct phy_device *phydev,
struct dp83869_private *dp83869)
{
@@ -384,6 +455,7 @@ static int dp83869_configure_mode(struct phy_device *phydev,
break;
case DP83869_RGMII_1000_BASE:
case DP83869_RGMII_100_BASE:
+ ret = dp83869_configure_fiber(phydev, dp83869);
break;
default:
return -EINVAL;
@@ -494,6 +566,7 @@ static struct phy_driver dp83869_driver[] = {
/* IRQ related */
.ack_interrupt = dp83869_ack_interrupt,
.config_intr = dp83869_config_intr,
+ .read_status = dp83869_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 3fe552675dd2..a7f74b3b97af 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1315,6 +1315,19 @@ static struct phy_driver ksphy_driver[] = {
.suspend = genphy_suspend,
.resume = kszphy_resume,
}, {
+ .phy_id = PHY_ID_LAN8814,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
+ .name = "Microchip INDY Gigabit Quad PHY",
+ .driver_data = &ksz9021_type,
+ .probe = kszphy_probe,
+ .soft_reset = genphy_soft_reset,
+ .read_status = ksz9031_read_status,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
+ .suspend = genphy_suspend,
+ .resume = kszphy_resume,
+}, {
.phy_id = PHY_ID_KSZ9131,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip KSZ9131 Gigabit PHY",
@@ -1387,6 +1400,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ8081, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ886X, MICREL_PHY_ID_MASK },
+ { PHY_ID_LAN8814, MICREL_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index ff8e14b01eeb..8d333d3084ed 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -6,9 +6,14 @@
#include <linux/phy.h>
#include <linux/of.h>
+/**
+ * phy_speed_to_str - Return a string representing the PHY link speed
+ *
+ * @speed: Speed of the link
+ */
const char *phy_speed_to_str(int speed)
{
- BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 90,
+ BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 92,
"Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
"If a speed or mode has been added please update phy_speed_to_str "
"and the PHY settings array.\n");
@@ -52,6 +57,11 @@ const char *phy_speed_to_str(int speed)
}
EXPORT_SYMBOL_GPL(phy_speed_to_str);
+/**
+ * phy_duplex_to_str - Return string describing the duplex
+ *
+ * @duplex: Duplex setting to describe
+ */
const char *phy_duplex_to_str(unsigned int duplex)
{
if (duplex == DUPLEX_HALF)
@@ -160,6 +170,8 @@ static const struct phy_setting settings[] = {
PHY_SETTING( 100, FULL, 100baseT_Full ),
PHY_SETTING( 100, FULL, 100baseT1_Full ),
PHY_SETTING( 100, HALF, 100baseT_Half ),
+ PHY_SETTING( 100, HALF, 100baseFX_Half ),
+ PHY_SETTING( 100, FULL, 100baseFX_Full ),
/* 10M */
PHY_SETTING( 10, FULL, 10baseT_Full ),
PHY_SETTING( 10, HALF, 10baseT_Half ),
@@ -250,6 +262,16 @@ static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
return __set_linkmode_max_speed(max_speed, phydev->supported);
}
+/**
+ * phy_set_max_speed - Set the maximum speed the PHY should support
+ *
+ * @phydev: The phy_device struct
+ * @max_speed: Maximum speed
+ *
+ * The PHY might be more capable than the MAC. For example a Fast Ethernet
+ * is connected to a 1G PHY. This function allows the MAC to indicate its
+ * maximum speed, and so limit what the PHY will advertise.
+ */
int phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
{
int err;
@@ -306,6 +328,16 @@ void of_set_phy_eee_broken(struct phy_device *phydev)
phydev->eee_broken_modes = broken;
}
+/**
+ * phy_resolve_aneg_pause - Determine pause autoneg results
+ *
+ * @phydev: The phy_device struct
+ *
+ * Once autoneg has completed the local pause settings can be
+ * resolved. Determine if pause and asymmetric pause should be used
+ * by the MAC.
+ */
+
void phy_resolve_aneg_pause(struct phy_device *phydev)
{
if (phydev->duplex == DUPLEX_FULL) {
@@ -319,7 +351,7 @@ void phy_resolve_aneg_pause(struct phy_device *phydev)
EXPORT_SYMBOL_GPL(phy_resolve_aneg_pause);
/**
- * phy_resolve_aneg_linkmode - resolve the advertisements into phy settings
+ * phy_resolve_aneg_linkmode - resolve the advertisements into PHY settings
* @phydev: The phy_device struct
*
* Resolve our and the link partner advertisements into their corresponding
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 735a806045ac..35525a671400 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -456,7 +456,16 @@ int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
EXPORT_SYMBOL(phy_do_ioctl);
-/* same as phy_do_ioctl, but ensures that net_device is running */
+/**
+ * phy_do_ioctl_running - generic ndo_do_ioctl implementation but test first
+ *
+ * @dev: the net_device struct
+ * @ifr: &struct ifreq for socket ioctl's
+ * @cmd: ioctl cmd to execute
+ *
+ * Same as phy_do_ioctl, but ensures that net_device is running before
+ * handling the ioctl.
+ */
int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd)
{
if (!netif_running(dev))
@@ -466,6 +475,12 @@ int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd)
}
EXPORT_SYMBOL(phy_do_ioctl_running);
+/**
+ * phy_queue_state_machine - Trigger the state machine to run soon
+ *
+ * @phydev: the phy_device struct
+ * @jiffies: Run the state machine after these jiffies
+ */
void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies)
{
mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
@@ -473,6 +488,11 @@ void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies)
}
EXPORT_SYMBOL(phy_queue_state_machine);
+/**
+ * phy_queue_state_machine - Trigger the state machine to run now
+ *
+ * @phydev: the phy_device struct
+ */
static void phy_trigger_machine(struct phy_device *phydev)
{
phy_queue_state_machine(phydev, 0);
@@ -489,6 +509,12 @@ static void phy_abort_cable_test(struct phy_device *phydev)
phydev_err(phydev, "Error while aborting cable test");
}
+/**
+ * phy_ethtool_get_strings - Get the statistic counter names
+ *
+ * @phydev: the phy_device struct
+ * @data: Where to put the strings
+ */
int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data)
{
if (!phydev->drv)
@@ -502,6 +528,11 @@ int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data)
}
EXPORT_SYMBOL(phy_ethtool_get_strings);
+/**
+ * phy_ethtool_get_sset_count - Get the number of statistic counters
+ *
+ * @phydev: the phy_device struct
+ */
int phy_ethtool_get_sset_count(struct phy_device *phydev)
{
int ret;
@@ -523,6 +554,13 @@ int phy_ethtool_get_sset_count(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_ethtool_get_sset_count);
+/**
+ * phy_ethtool_get_stats - Get the statistic counters
+ *
+ * @phydev: the phy_device struct
+ * @stats: What counters to get
+ * @data: Where to store the counters
+ */
int phy_ethtool_get_stats(struct phy_device *phydev,
struct ethtool_stats *stats, u64 *data)
{
@@ -537,6 +575,12 @@ int phy_ethtool_get_stats(struct phy_device *phydev,
}
EXPORT_SYMBOL(phy_ethtool_get_stats);
+/**
+ * phy_start_cable_test - Start a cable test
+ *
+ * @phydev: the phy_device struct
+ * @extack: extack for reporting useful error messages
+ */
int phy_start_cable_test(struct phy_device *phydev,
struct netlink_ext_ack *extack)
{
@@ -600,6 +644,13 @@ out:
}
EXPORT_SYMBOL(phy_start_cable_test);
+/**
+ * phy_start_cable_test_tdr - Start a raw TDR cable test
+ *
+ * @phydev: the phy_device struct
+ * @extack: extack for reporting useful error messages
+ * @config: Configuration of the test to run
+ */
int phy_start_cable_test_tdr(struct phy_device *phydev,
struct netlink_ext_ack *extack,
const struct phy_tdr_config *config)
@@ -996,7 +1047,7 @@ void phy_stop(struct phy_device *phydev)
{
struct net_device *dev = phydev->attached_dev;
- if (!phy_is_started(phydev)) {
+ if (!phy_is_started(phydev) && phydev->state != PHY_DOWN) {
WARN(1, "called from state %s\n",
phy_state_to_str(phydev->state));
return;
@@ -1363,6 +1414,12 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
}
EXPORT_SYMBOL(phy_ethtool_set_eee);
+/**
+ * phy_ethtool_set_wol - Configure Wake On LAN
+ *
+ * @phydev: target phy_device struct
+ * @wol: Configuration requested
+ */
int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
{
if (phydev->drv && phydev->drv->set_wol)
@@ -1372,6 +1429,12 @@ int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
}
EXPORT_SYMBOL(phy_ethtool_set_wol);
+/**
+ * phy_ethtool_get_wol - Get the current Wake On LAN configuration
+ *
+ * @phydev: target phy_device struct
+ * @wol: Store the current configuration here
+ */
void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
{
if (phydev->drv && phydev->drv->get_wol)
@@ -1405,6 +1468,10 @@ int phy_ethtool_set_link_ksettings(struct net_device *ndev,
}
EXPORT_SYMBOL(phy_ethtool_set_link_ksettings);
+/**
+ * phy_ethtool_nway_reset - Restart auto negotiation
+ * @ndev: Network device to restart autoneg for
+ */
int phy_ethtool_nway_reset(struct net_device *ndev)
{
struct phy_device *phydev = ndev->phydev;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 8adfbad0a1e8..5dab6be6fc38 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1143,10 +1143,6 @@ int phy_init_hw(struct phy_device *phydev)
if (ret < 0)
return ret;
- ret = phy_disable_interrupts(phydev);
- if (ret)
- return ret;
-
if (phydev->drv->config_init)
ret = phydev->drv->config_init(phydev);
@@ -1423,6 +1419,10 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
if (err)
goto error;
+ err = phy_disable_interrupts(phydev);
+ if (err)
+ return err;
+
phy_resume(phydev);
phy_led_triggers_register(phydev);
@@ -1682,7 +1682,8 @@ void phy_detach(struct phy_device *phydev)
phy_led_triggers_unregister(phydev);
- module_put(phydev->mdio.dev.driver->owner);
+ if (phydev->mdio.dev.driver)
+ module_put(phydev->mdio.dev.driver->owner);
/* If the device had no specific driver before (i.e. - it
* was using the generic driver), we unbind the device
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 95dbe5e8e1d8..4bf54cded48a 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -27,11 +27,16 @@
#define RTL821x_EXT_PAGE_SELECT 0x1e
#define RTL821x_PAGE_SELECT 0x1f
+#define RTL8211F_PHYCR1 0x18
#define RTL8211F_INSR 0x1d
#define RTL8211F_TX_DELAY BIT(8)
#define RTL8211F_RX_DELAY BIT(3)
+#define RTL8211F_ALDPS_PLL_OFF BIT(1)
+#define RTL8211F_ALDPS_ENABLE BIT(2)
+#define RTL8211F_ALDPS_XTAL_OFF BIT(12)
+
#define RTL8211E_TX_DELAY BIT(1)
#define RTL8211E_RX_DELAY BIT(2)
#define RTL8211E_MODE_MII_GMII BIT(3)
@@ -178,8 +183,12 @@ static int rtl8211f_config_init(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
u16 val_txdly, val_rxdly;
+ u16 val;
int ret;
+ val = RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_XTAL_OFF;
+ phy_modify_paged_changed(phydev, 0xa43, RTL8211F_PHYCR1, val, val);
+
switch (phydev->interface) {
case PHY_INTERFACE_MODE_RGMII:
val_txdly = 0;
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index bd9c07888ebb..6fa7a009a24a 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -201,7 +201,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
dev_dbg(&info->control->dev,
"rndis response error, code %d\n", retval);
}
- msleep(20);
+ msleep(40);
}
dev_dbg(&info->control->dev, "rndis response timeout\n");
return -ETIMEDOUT;
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 444130655d8e..cb5898f7d68c 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -118,6 +118,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
skb_put(skb, sizeof(struct cisco_packet));
skb->priority = TC_PRIO_CONTROL;
skb->dev = dev;
+ skb->protocol = htons(ETH_P_HDLC);
skb_reset_network_header(skb);
dev_queue_xmit(skb);
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 9acad651ea1f..d6cfd51613ed 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -433,6 +433,8 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
if (pvc->state.fecn) /* TX Congestion counter */
dev->stats.tx_compressed++;
skb->dev = pvc->frad;
+ skb->protocol = htons(ETH_P_HDLC);
+ skb_reset_network_header(skb);
dev_queue_xmit(skb);
return NETDEV_TX_OK;
}
@@ -555,6 +557,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
skb_put(skb, i);
skb->priority = TC_PRIO_CONTROL;
skb->dev = dev;
+ skb->protocol = htons(ETH_P_HDLC);
skb_reset_network_header(skb);
dev_queue_xmit(skb);
@@ -1041,7 +1044,7 @@ static void pvc_setup(struct net_device *dev)
{
dev->type = ARPHRD_DLCI;
dev->flags = IFF_POINTOPOINT;
- dev->hard_header_len = 10;
+ dev->hard_header_len = 0;
dev->addr_len = 2;
netif_keep_dst(dev);
}
@@ -1093,6 +1096,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
dev->mtu = HDLC_MAX_MTU;
dev->min_mtu = 68;
dev->max_mtu = HDLC_MAX_MTU;
+ dev->needed_headroom = 10;
dev->priv_flags |= IFF_NO_QUEUE;
dev->ml_priv = pvc;
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 48ced3912576..64f855651336 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -251,6 +251,7 @@ static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code,
skb->priority = TC_PRIO_CONTROL;
skb->dev = dev;
+ skb->protocol = htons(ETH_P_HDLC);
skb_reset_network_header(skb);
skb_queue_tail(&tx_queue, skb);
}
@@ -383,11 +384,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
}
for (opt = data; len; len -= opt[1], opt += opt[1]) {
- if (len < 2 || len < opt[1]) {
- dev->stats.rx_errors++;
- kfree(out);
- return; /* bad packet, drop silently */
- }
+ if (len < 2 || opt[1] < 2 || len < opt[1])
+ goto err_out;
if (pid == PID_LCP)
switch (opt[0]) {
@@ -395,6 +393,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
continue; /* MRU always OK and > 1500 bytes? */
case LCP_OPTION_ACCM: /* async control character map */
+ if (opt[1] < sizeof(valid_accm))
+ goto err_out;
if (!memcmp(opt, valid_accm,
sizeof(valid_accm)))
continue;
@@ -406,6 +406,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
}
break;
case LCP_OPTION_MAGIC:
+ if (len < 6)
+ goto err_out;
if (opt[1] != 6 || (!opt[2] && !opt[3] &&
!opt[4] && !opt[5]))
break; /* reject invalid magic number */
@@ -424,6 +426,11 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data);
kfree(out);
+ return;
+
+err_out:
+ dev->stats.rx_errors++;
+ kfree(out);
}
static int ppp_rx(struct sk_buff *skb)
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 732a6c1851f5..b6be2454b8bd 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -198,8 +198,6 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
struct net_device *dev;
int size = skb->len;
- skb->protocol = htons(ETH_P_X25);
-
ptr = skb_push(skb, 2);
*ptr++ = size % 256;
@@ -210,6 +208,8 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
skb->dev = dev = lapbeth->ethdev;
+ skb->protocol = htons(ETH_P_DEC);
+
skb_reset_network_header(skb);
dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0);
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 5a7cf8bf9d0d..ab56a5e6447a 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -202,8 +202,7 @@ static void x25_asy_bump(struct x25_asy *sl)
return;
}
skb_put_data(skb, sl->rbuff, count);
- skb->protocol = x25_type_trans(skb, sl->dev);
- err = lapb_data_received(skb->dev, skb);
+ err = lapb_data_received(sl->dev, skb);
if (err != LAPB_OK) {
kfree_skb(skb);
printk(KERN_DEBUG "x25_asy: data received err - %d\n", err);
diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c
index 3dd3b76790d0..c0cfd9b36c0b 100644
--- a/drivers/net/wireguard/noise.c
+++ b/drivers/net/wireguard/noise.c
@@ -87,15 +87,12 @@ static void handshake_zero(struct noise_handshake *handshake)
void wg_noise_handshake_clear(struct noise_handshake *handshake)
{
+ down_write(&handshake->lock);
wg_index_hashtable_remove(
handshake->entry.peer->device->index_hashtable,
&handshake->entry);
- down_write(&handshake->lock);
handshake_zero(handshake);
up_write(&handshake->lock);
- wg_index_hashtable_remove(
- handshake->entry.peer->device->index_hashtable,
- &handshake->entry);
}
static struct noise_keypair *keypair_create(struct wg_peer *peer)
diff --git a/drivers/net/wireguard/peerlookup.c b/drivers/net/wireguard/peerlookup.c
index e4deb331476b..f2783aa7a88f 100644
--- a/drivers/net/wireguard/peerlookup.c
+++ b/drivers/net/wireguard/peerlookup.c
@@ -167,9 +167,13 @@ bool wg_index_hashtable_replace(struct index_hashtable *table,
struct index_hashtable_entry *old,
struct index_hashtable_entry *new)
{
- if (unlikely(hlist_unhashed(&old->index_hash)))
- return false;
+ bool ret;
+
spin_lock_bh(&table->lock);
+ ret = !hlist_unhashed(&old->index_hash);
+ if (unlikely(!ret))
+ goto out;
+
new->index = old->index;
hlist_replace_rcu(&old->index_hash, &new->index_hash);
@@ -180,8 +184,9 @@ bool wg_index_hashtable_replace(struct index_hashtable *table,
* simply gets dropped, which isn't terrible.
*/
INIT_HLIST_NODE(&old->index_hash);
+out:
spin_unlock_bh(&table->lock);
- return true;
+ return ret;
}
void wg_index_hashtable_remove(struct index_hashtable *table,
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 8625465500de..98209cccd639 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -19,7 +19,7 @@ ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb)
if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
return HAL_TCL_ENCAP_TYPE_RAW;
- if (tx_info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP)
+ if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
return HAL_TCL_ENCAP_TYPE_ETHERNET;
return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
@@ -99,7 +99,7 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN;
- if (!(info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) &&
+ if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
!ieee80211_is_data(hdr->frame_control))
return -ENOTSUPP;
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 86a1b984859f..e0cac9b61af8 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -3937,7 +3937,7 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
return -ENOSPC;
info = IEEE80211_SKB_CB(skb);
- if (!(info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP)) {
+ if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
@@ -4064,7 +4064,7 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
bool is_prb_rsp;
int ret;
- if (info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) {
+ if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
skb_cb->flags |= ATH11K_SKB_HW_80211_ENCAP;
} else if (ieee80211_is_mgmt(hdr->frame_control)) {
is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
@@ -4350,6 +4350,37 @@ static int ath11k_set_he_mu_sounding_mode(struct ath11k *ar,
return ret;
}
+static void ath11k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ u32 param_id, param_value;
+ int ret;
+
+ param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
+ if (ath11k_frame_mode != ATH11K_HW_TXRX_ETHERNET ||
+ (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP))
+ vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
+
+ if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED)
+ param_value = ATH11K_HW_TXRX_ETHERNET;
+ else if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
+ param_value = ATH11K_HW_TXRX_RAW;
+ else
+ param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d tx encap mode: %d\n",
+ arvif->vdev_id, ret);
+ vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
+ }
+}
+
static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -4359,7 +4390,6 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
struct vdev_create_params vdev_param = {0};
struct peer_create_params peer_param;
u32 param_id, param_value;
- int hw_encap = 0;
u16 nss;
int i;
int ret;
@@ -4453,32 +4483,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
list_add(&arvif->list, &ar->arvifs);
spin_unlock_bh(&ar->data_lock);
- param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
- if (ath11k_frame_mode == ATH11K_HW_TXRX_ETHERNET)
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_AP_VLAN:
- case NL80211_IFTYPE_AP:
- hw_encap = 1;
- break;
- default:
- break;
- }
-
- if (ieee80211_set_hw_80211_encap(vif, hw_encap))
- param_value = ATH11K_HW_TXRX_ETHERNET;
- else if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
- param_value = ATH11K_HW_TXRX_RAW;
- else
- param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
-
- ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- param_id, param_value);
- if (ret) {
- ath11k_warn(ab, "failed to set vdev %d tx encap mode: %d\n",
- arvif->vdev_id, ret);
- goto err_vdev_del;
- }
+ ath11k_mac_op_update_vif_offload(hw, vif);
nss = get_num_chains(ar->cfg_tx_chainmask) ? : 1;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
@@ -5841,6 +5846,7 @@ static const struct ieee80211_ops ath11k_ops = {
.reconfig_complete = ath11k_mac_op_reconfig_complete,
.add_interface = ath11k_mac_op_add_interface,
.remove_interface = ath11k_mac_op_remove_interface,
+ .update_vif_offload = ath11k_mac_op_update_vif_offload,
.config = ath11k_mac_op_config,
.bss_info_changed = ath11k_mac_op_bss_info_changed,
.configure_filter = ath11k_mac_op_configure_filter,
@@ -6149,6 +6155,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
+ ieee80211_hw_set(ar->hw, SUPPORTS_TX_ENCAP_OFFLOAD);
if (ht_cap & WMI_HT_CAP_ENABLED) {
ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 991ca9e32be3..d4989e0cd7be 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -664,9 +664,15 @@ static void pkt_align(struct sk_buff *p, int len, int align)
/* To check if there's window offered */
static bool data_ok(struct brcmf_sdio *bus)
{
- /* Reserve TXCTL_CREDITS credits for txctl */
- return (bus->tx_max - bus->tx_seq) > TXCTL_CREDITS &&
- ((bus->tx_max - bus->tx_seq) & 0x80) == 0;
+ u8 tx_rsv = 0;
+
+ /* Reserve TXCTL_CREDITS credits for txctl when it is ready to send */
+ if (bus->ctrl_frame_stat)
+ tx_rsv = TXCTL_CREDITS;
+
+ return (bus->tx_max - bus->tx_seq - tx_rsv) != 0 &&
+ ((bus->tx_max - bus->tx_seq - tx_rsv) & 0x80) == 0;
+
}
/* To check if there's window offered */
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index 1f02c5058aed..470d669c7f14 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -954,7 +954,7 @@ struct mwifiex_tkip_param {
struct mwifiex_aes_param {
u8 pn[WPA_PN_SIZE];
__le16 key_len;
- u8 key[WLAN_KEY_LEN_CCMP];
+ u8 key[WLAN_KEY_LEN_CCMP_256];
} __packed;
struct mwifiex_wapi_param {
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index 962d8bfe6f10..119ccacd1fcc 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -619,7 +619,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
key_v2 = &resp->params.key_material_v2;
len = le16_to_cpu(key_v2->key_param_set.key_params.aes.key_len);
- if (len > WLAN_KEY_LEN_CCMP)
+ if (len > sizeof(key_v2->key_param_set.key_params.aes.key))
return -EINVAL;
if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) {
@@ -635,7 +635,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
return 0;
memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0,
- WLAN_KEY_LEN_CCMP);
+ sizeof(key_v2->key_param_set.key_params.aes.key));
priv->aes_key_v2.key_param_set.key_params.aes.key_len =
cpu_to_le16(len);
memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index 8de9bba384f4..31b40fb83f6c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -2150,7 +2150,8 @@ static int mt7615_load_n9(struct mt7615_dev *dev, const char *name)
sizeof(dev->mt76.hw->wiphy->fw_version),
"%.10s-%.15s", hdr->fw_ver, hdr->build_date);
- if (!strncmp(hdr->fw_ver, "2.0", sizeof(hdr->fw_ver))) {
+ if (!is_mt7615(&dev->mt76) &&
+ !strncmp(hdr->fw_ver, "2.0", sizeof(hdr->fw_ver))) {
dev->fw_ver = MT7615_FIRMWARE_V2;
dev->mcu_ops = &sta_update_ops;
} else {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index 317ed0e93191..0232b66acb4f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -709,8 +709,12 @@ void mt7915_unregister_device(struct mt7915_dev *dev)
spin_lock_bh(&dev->token_lock);
idr_for_each_entry(&dev->token, txwi, id) {
mt7915_txp_skb_unmap(&dev->mt76, txwi);
- if (txwi->skb)
- dev_kfree_skb_any(txwi->skb);
+ if (txwi->skb) {
+ struct ieee80211_hw *hw;
+
+ hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
+ ieee80211_free_txskb(hw, txwi->skb);
+ }
mt76_put_txwi(&dev->mt76, txwi);
}
spin_unlock_bh(&dev->token_lock);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index c382e5ff2ca4..6f159d99a596 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -846,7 +846,7 @@ mt7915_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb,
if (sta || !(info->flags & IEEE80211_TX_CTL_NO_ACK))
mt7915_tx_status(sta, hw, info, NULL);
- dev_kfree_skb(skb);
+ ieee80211_free_txskb(hw, skb);
}
void mt7915_txp_skb_unmap(struct mt76_dev *dev,
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index 9acd8a41ea61..f2609d5b6bf7 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -458,7 +458,6 @@ enum wl1271_cmd_key_type {
KEY_TKIP = 2,
KEY_AES = 3,
KEY_GEM = 4,
- KEY_IGTK = 5,
};
struct wl1271_cmd_set_keys {
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index b55b8e0dab10..6863fd552d5e 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -3558,9 +3558,6 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
case WL1271_CIPHER_SUITE_GEM:
key_type = KEY_GEM;
break;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- key_type = KEY_IGTK;
- break;
default:
wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
@@ -6230,7 +6227,6 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
WL1271_CIPHER_SUITE_GEM,
- WLAN_CIPHER_SUITE_AES_CMAC,
};
/* The tx descriptor buffer */
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 3ed9786b88d8..a44d49d63968 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -73,6 +73,7 @@ config NVME_TCP
depends on INET
depends on BLK_DEV_NVME
select NVME_FABRICS
+ select CRYPTO
select CRYPTO_CRC32C
help
This provides support for the NVMe over Fabrics protocol using
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index d543bc1747fd..2f249052c95d 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3261,10 +3261,24 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
return -EWOULDBLOCK;
}
+ nvme_get_ctrl(ctrl);
+ if (!try_module_get(ctrl->ops->module))
+ return -EINVAL;
+
file->private_data = ctrl;
return 0;
}
+static int nvme_dev_release(struct inode *inode, struct file *file)
+{
+ struct nvme_ctrl *ctrl =
+ container_of(inode->i_cdev, struct nvme_ctrl, cdev);
+
+ module_put(ctrl->ops->module);
+ nvme_put_ctrl(ctrl);
+ return 0;
+}
+
static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
{
struct nvme_ns *ns;
@@ -3327,6 +3341,7 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations nvme_dev_fops = {
.owner = THIS_MODULE,
.open = nvme_dev_open,
+ .release = nvme_dev_release,
.unlocked_ioctl = nvme_dev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
@@ -3525,10 +3540,6 @@ static ssize_t nvme_sysfs_delete(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- /* Can't delete non-created controllers */
- if (!ctrl->created)
- return -EBUSY;
-
if (device_remove_file_self(dev, attr))
nvme_delete_ctrl_sync(ctrl);
return count;
@@ -4403,7 +4414,6 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
nvme_queue_scan(ctrl);
nvme_start_queues(ctrl);
}
- ctrl->created = true;
}
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 32f61fc5f4c5..8575724734e0 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -565,10 +565,14 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
struct nvme_request *req = nvme_req(rq);
/*
- * If we are in some state of setup or teardown only allow
- * internally generated commands.
+ * currently we have a problem sending passthru commands
+ * on the admin_q if the controller is not LIVE because we can't
+ * make sure that they are going out after the admin connect,
+ * controller enable and/or other commands in the initialization
+ * sequence. until the controller will be LIVE, fail with
+ * BLK_STS_RESOURCE so that they will be rescheduled.
*/
- if (!blk_rq_is_passthrough(rq) || (req->flags & NVME_REQ_USERCMD))
+ if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
return false;
/*
@@ -577,7 +581,7 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
*/
switch (ctrl->state) {
case NVME_CTRL_CONNECTING:
- if (nvme_is_fabrics(req->cmd) &&
+ if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
return true;
break;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index a7f474ddfff7..e8ef42b9d50c 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2160,6 +2160,7 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
struct nvme_fc_fcp_op *aen_op;
int i;
+ cancel_work_sync(&ctrl->ctrl.async_event_work);
aen_op = ctrl->aen_ops;
for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
__nvme_fc_exit_request(ctrl, aen_op);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 2910f6caab7d..9fd45ff656da 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -307,7 +307,6 @@ struct nvme_ctrl {
struct nvme_command ka_cmd;
struct work_struct fw_act_work;
unsigned long events;
- bool created;
#ifdef CONFIG_NVME_MULTIPATH
/* asymmetric namespace access: */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 899d2f4d7ab6..08c5a9b438f3 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3153,7 +3153,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_MEDIUM_PRIO_SQ |
- NVME_QUIRK_NO_TEMP_THRESH_CHANGE },
+ NVME_QUIRK_NO_TEMP_THRESH_CHANGE |
+ NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 8e5ffe2f117d..9e378d0a0c01 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -835,6 +835,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
}
if (ctrl->async_event_sqe.data) {
+ cancel_work_sync(&ctrl->ctrl.async_event_work);
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE);
ctrl->async_event_sqe.data = NULL;
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 16851ae3bddf..8f4f29f18b8c 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1596,6 +1596,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
{
if (to_tcp_ctrl(ctrl)->async_req.pdu) {
+ cancel_work_sync(&ctrl->async_event_work);
nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
}
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 8bd7f656e240..dacfa7435d0b 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -517,6 +517,7 @@ int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys)
subsys->ver = NVME_VS(1, 2, 1);
}
+ __module_get(subsys->passthru_ctrl->ops->module);
mutex_unlock(&subsys->lock);
return 0;
@@ -531,6 +532,7 @@ static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
{
if (subsys->passthru_ctrl) {
xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid);
+ module_put(subsys->passthru_ctrl->ops->module);
nvme_put_ctrl(subsys->passthru_ctrl);
}
subsys->passthru_ctrl = NULL;
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index cb32d7ef4938..4daf94bb56a5 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -338,6 +338,29 @@ unregister:
EXPORT_SYMBOL(of_mdiobus_register);
/**
+ * of_mdio_find_device - Given a device tree node, find the mdio_device
+ * @np: pointer to the mdio_device's device tree node
+ *
+ * If successful, returns a pointer to the mdio_device with the embedded
+ * struct device refcount incremented by one, or NULL on failure.
+ * The caller should call put_device() on the mdio_device after its use
+ */
+struct mdio_device *of_mdio_find_device(struct device_node *np)
+{
+ struct device *d;
+
+ if (!np)
+ return NULL;
+
+ d = bus_find_device_by_of_node(&mdio_bus_type, np);
+ if (!d)
+ return NULL;
+
+ return to_mdio_device(d);
+}
+EXPORT_SYMBOL(of_mdio_find_device);
+
+/**
* of_phy_find_device - Give a PHY node, find the phy_device
* @phy_np: Pointer to the phy's device tree node
*
@@ -346,19 +369,16 @@ EXPORT_SYMBOL(of_mdiobus_register);
*/
struct phy_device *of_phy_find_device(struct device_node *phy_np)
{
- struct device *d;
struct mdio_device *mdiodev;
- if (!phy_np)
+ mdiodev = of_mdio_find_device(phy_np);
+ if (!mdiodev)
return NULL;
- d = bus_find_device_by_of_node(&mdio_bus_type, phy_np);
- if (d) {
- mdiodev = to_mdio_device(d);
- if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
- return to_phy_device(d);
- put_device(d);
- }
+ if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
+ return to_phy_device(&mdiodev->dev);
+
+ put_device(&mdiodev->dev);
return NULL;
}
diff --git a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
index 71f257b4a7f5..9061ece7ff6a 100644
--- a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
@@ -505,9 +505,9 @@ static int qcom_ipq806x_usb_phy_probe(struct platform_device *pdev)
size = resource_size(res);
phy_dwc3->base = devm_ioremap(phy_dwc3->dev, res->start, size);
- if (IS_ERR(phy_dwc3->base)) {
+ if (!phy_dwc3->base) {
dev_err(phy_dwc3->dev, "failed to map reg\n");
- return PTR_ERR(phy_dwc3->base);
+ return -ENOMEM;
}
phy_dwc3->ref_clk = devm_clk_get(phy_dwc3->dev, "ref");
@@ -557,7 +557,6 @@ static struct platform_driver qcom_ipq806x_usb_phy_driver = {
.probe = qcom_ipq806x_usb_phy_probe,
.driver = {
.name = "qcom-ipq806x-usb-phy",
- .owner = THIS_MODULE,
.of_match_table = qcom_ipq806x_usb_phy_table,
},
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 562053ce9455..6e6f992a9524 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -604,8 +604,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0xf),
QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x1),
QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x0),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f),
QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x6),
QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0xf),
QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x0),
@@ -631,7 +631,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x0),
QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x1),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0xa),
QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x1),
QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x1),
@@ -640,7 +639,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f),
QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19),
QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19),
- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x7),
};
static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = {
@@ -648,6 +646,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x6),
QMP_PHY_INIT_CFG(QSERDES_TX_RES_CODE_LANE_OFFSET, 0x2),
QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_TX_EMP_POST1_LVL, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_TX_SLEW_CNTL, 0x0a),
};
static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = {
@@ -658,7 +658,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb),
QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x4),
- QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x4),
};
static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = {
@@ -2046,6 +2045,9 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
.pwrdn_ctrl = SW_PWRDN,
};
+static const char * const ipq8074_pciephy_clk_l[] = {
+ "aux", "cfg_ahb",
+};
/* list of resets */
static const char * const ipq8074_pciephy_reset_l[] = {
"phy", "common",
@@ -2063,8 +2065,8 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
.rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl),
.pcs_tbl = ipq8074_pcie_pcs_tbl,
.pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl),
- .clk_list = NULL,
- .num_clks = 0,
+ .clk_list = ipq8074_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l),
.reset_list = ipq8074_pciephy_reset_l,
.num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
.vreg_list = NULL,
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 4277f592684b..904b80ab9009 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -77,6 +77,8 @@
#define QSERDES_COM_CORECLK_DIV_MODE1 0x1bc
/* Only for QMP V2 PHY - TX registers */
+#define QSERDES_TX_EMP_POST1_LVL 0x018
+#define QSERDES_TX_SLEW_CNTL 0x040
#define QSERDES_TX_RES_CODE_LANE_OFFSET 0x054
#define QSERDES_TX_DEBUG_BUS_SEL 0x064
#define QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x068
diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
index cb2dd3230fa7..507f79d14adb 100644
--- a/drivers/phy/ti/phy-omap-usb2.c
+++ b/drivers/phy/ti/phy-omap-usb2.c
@@ -22,10 +22,15 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/of_platform.h>
+#include <linux/sys_soc.h>
#define USB2PHY_ANA_CONFIG1 0x4c
#define USB2PHY_DISCON_BYP_LATCH BIT(31)
+#define USB2PHY_CHRG_DET 0x14
+#define USB2PHY_CHRG_DET_USE_CHG_DET_REG BIT(29)
+#define USB2PHY_CHRG_DET_DIS_CHG_DET BIT(28)
+
/* SoC Specific USB2_OTG register definitions */
#define AM654_USB2_OTG_PD BIT(8)
#define AM654_USB2_VBUS_DET_EN BIT(5)
@@ -43,6 +48,7 @@
#define OMAP_USB2_HAS_START_SRP BIT(0)
#define OMAP_USB2_HAS_SET_VBUS BIT(1)
#define OMAP_USB2_CALIBRATE_FALSE_DISCONNECT BIT(2)
+#define OMAP_USB2_DISABLE_CHRG_DET BIT(3)
struct omap_usb {
struct usb_phy phy;
@@ -236,6 +242,13 @@ static int omap_usb_init(struct phy *x)
omap_usb_writel(phy->phy_base, USB2PHY_ANA_CONFIG1, val);
}
+ if (phy->flags & OMAP_USB2_DISABLE_CHRG_DET) {
+ val = omap_usb_readl(phy->phy_base, USB2PHY_CHRG_DET);
+ val |= USB2PHY_CHRG_DET_USE_CHG_DET_REG |
+ USB2PHY_CHRG_DET_DIS_CHG_DET;
+ omap_usb_writel(phy->phy_base, USB2PHY_CHRG_DET, val);
+ }
+
return 0;
}
@@ -329,6 +342,26 @@ static const struct of_device_id omap_usb2_id_table[] = {
};
MODULE_DEVICE_TABLE(of, omap_usb2_id_table);
+static void omap_usb2_init_errata(struct omap_usb *phy)
+{
+ static const struct soc_device_attribute am65x_sr10_soc_devices[] = {
+ { .family = "AM65X", .revision = "SR1.0" },
+ { /* sentinel */ }
+ };
+
+ /*
+ * Errata i2075: USB2PHY: USB2PHY Charger Detect is Enabled by
+ * Default Without VBUS Presence.
+ *
+ * AM654x SR1.0 has a silicon bug due to which D+ is pulled high after
+ * POR, which could cause enumeration failure with some USB hubs.
+ * Disabling the USB2_PHY Charger Detect function will put D+
+ * into the normal state.
+ */
+ if (soc_device_match(am65x_sr10_soc_devices))
+ phy->flags |= OMAP_USB2_DISABLE_CHRG_DET;
+}
+
static int omap_usb2_probe(struct platform_device *pdev)
{
struct omap_usb *phy;
@@ -366,14 +399,14 @@ static int omap_usb2_probe(struct platform_device *pdev)
phy->mask = phy_data->mask;
phy->power_on = phy_data->power_on;
phy->power_off = phy_data->power_off;
+ phy->flags = phy_data->flags;
- if (phy_data->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- phy->phy_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(phy->phy_base))
- return PTR_ERR(phy->phy_base);
- phy->flags |= OMAP_USB2_CALIBRATE_FALSE_DISCONNECT;
- }
+ omap_usb2_init_errata(phy);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ phy->phy_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(phy->phy_base))
+ return PTR_ERR(phy->phy_base);
phy->syscon_phy_power = syscon_regmap_lookup_by_phandle(node,
"syscon-phy-power");
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 6f55aaef8afc..983d75bd5bd1 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -1035,7 +1035,11 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
+ X86_MATCH_INTEL_FAM6_MODEL(LAKEFIELD, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &rapl_defaults_byt),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &rapl_defaults_cht),
diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
index c09c16be0edf..beb5f74944cd 100644
--- a/drivers/ptp/ptp_qoriq.c
+++ b/drivers/ptp/ptp_qoriq.c
@@ -72,6 +72,10 @@ static void set_fipers(struct ptp_qoriq *ptp_qoriq)
set_alarm(ptp_qoriq);
ptp_qoriq->write(&regs->fiper_regs->tmr_fiper1, ptp_qoriq->tmr_fiper1);
ptp_qoriq->write(&regs->fiper_regs->tmr_fiper2, ptp_qoriq->tmr_fiper2);
+
+ if (ptp_qoriq->fiper3_support)
+ ptp_qoriq->write(&regs->fiper_regs->tmr_fiper3,
+ ptp_qoriq->tmr_fiper3);
}
int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index, bool update_event)
@@ -366,6 +370,7 @@ static u32 ptp_qoriq_nominal_freq(u32 clk_src)
* "fsl,tmr-add"
* "fsl,tmr-fiper1"
* "fsl,tmr-fiper2"
+ * "fsl,tmr-fiper3" (required only for DPAA2 and ENETC hardware)
* "fsl,max-adj"
*
* Return 0 if success
@@ -412,6 +417,7 @@ static int ptp_qoriq_auto_config(struct ptp_qoriq *ptp_qoriq,
ptp_qoriq->tmr_add = freq_comp;
ptp_qoriq->tmr_fiper1 = DEFAULT_FIPER1_PERIOD - ptp_qoriq->tclk_period;
ptp_qoriq->tmr_fiper2 = DEFAULT_FIPER2_PERIOD - ptp_qoriq->tclk_period;
+ ptp_qoriq->tmr_fiper3 = DEFAULT_FIPER3_PERIOD - ptp_qoriq->tclk_period;
/* max_adj = 1000000000 * (freq_ratio - 1.0) - 1
* freq_ratio = reference_clock_freq / nominal_freq
@@ -446,6 +452,10 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
else
ptp_qoriq->extts_fifo_support = false;
+ if (of_device_is_compatible(node, "fsl,dpaa2-ptp") ||
+ of_device_is_compatible(node, "fsl,enetc-ptp"))
+ ptp_qoriq->fiper3_support = true;
+
if (of_property_read_u32(node,
"fsl,tclk-period", &ptp_qoriq->tclk_period) ||
of_property_read_u32(node,
@@ -457,7 +467,10 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
of_property_read_u32(node,
"fsl,tmr-fiper2", &ptp_qoriq->tmr_fiper2) ||
of_property_read_u32(node,
- "fsl,max-adj", &ptp_qoriq->caps.max_adj)) {
+ "fsl,max-adj", &ptp_qoriq->caps.max_adj) ||
+ (ptp_qoriq->fiper3_support &&
+ of_property_read_u32(node, "fsl,tmr-fiper3",
+ &ptp_qoriq->tmr_fiper3))) {
pr_warn("device tree node missing required elements, try automatic configuration\n");
if (ptp_qoriq_auto_config(ptp_qoriq, node))
@@ -502,6 +515,11 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
ptp_qoriq->write(&regs->ctrl_regs->tmr_prsc, ptp_qoriq->tmr_prsc);
ptp_qoriq->write(&regs->fiper_regs->tmr_fiper1, ptp_qoriq->tmr_fiper1);
ptp_qoriq->write(&regs->fiper_regs->tmr_fiper2, ptp_qoriq->tmr_fiper2);
+
+ if (ptp_qoriq->fiper3_support)
+ ptp_qoriq->write(&regs->fiper_regs->tmr_fiper3,
+ ptp_qoriq->tmr_fiper3);
+
set_alarm(ptp_qoriq);
ptp_qoriq->write(&regs->ctrl_regs->tmr_ctrl,
tmr_ctrl|FIPERST|RTPE|TE|FRD);
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
index e4c422d806be..b9f8514909bf 100644
--- a/drivers/rapidio/Kconfig
+++ b/drivers/rapidio/Kconfig
@@ -37,7 +37,7 @@ config RAPIDIO_ENABLE_RX_TX_PORTS
config RAPIDIO_DMA_ENGINE
bool "DMA Engine support for RapidIO"
depends on RAPIDIO
- select DMADEVICES
+ depends on DMADEVICES
select DMA_ENGINE
help
Say Y here if you want to use DMA Engine frameork for RapidIO data
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 3fd359914690..7ff507ec875a 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -236,8 +236,8 @@ static bool regulator_supply_is_couple(struct regulator_dev *rdev)
static void regulator_unlock_recursive(struct regulator_dev *rdev,
unsigned int n_coupled)
{
- struct regulator_dev *c_rdev;
- int i;
+ struct regulator_dev *c_rdev, *supply_rdev;
+ int i, supply_n_coupled;
for (i = n_coupled; i > 0; i--) {
c_rdev = rdev->coupling_desc.coupled_rdevs[i - 1];
@@ -245,10 +245,13 @@ static void regulator_unlock_recursive(struct regulator_dev *rdev,
if (!c_rdev)
continue;
- if (c_rdev->supply && !regulator_supply_is_couple(c_rdev))
- regulator_unlock_recursive(
- c_rdev->supply->rdev,
- c_rdev->coupling_desc.n_coupled);
+ if (c_rdev->supply && !regulator_supply_is_couple(c_rdev)) {
+ supply_rdev = c_rdev->supply->rdev;
+ supply_n_coupled = supply_rdev->coupling_desc.n_coupled;
+
+ regulator_unlock_recursive(supply_rdev,
+ supply_n_coupled);
+ }
regulator_unlock(c_rdev);
}
@@ -1461,7 +1464,7 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
const char *consumer_dev_name,
const char *supply)
{
- struct regulator_map *node;
+ struct regulator_map *node, *new_node;
int has_dev;
if (supply == NULL)
@@ -1472,6 +1475,22 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
else
has_dev = 0;
+ new_node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL);
+ if (new_node == NULL)
+ return -ENOMEM;
+
+ new_node->regulator = rdev;
+ new_node->supply = supply;
+
+ if (has_dev) {
+ new_node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL);
+ if (new_node->dev_name == NULL) {
+ kfree(new_node);
+ return -ENOMEM;
+ }
+ }
+
+ mutex_lock(&regulator_list_mutex);
list_for_each_entry(node, &regulator_map_list, list) {
if (node->dev_name && consumer_dev_name) {
if (strcmp(node->dev_name, consumer_dev_name) != 0)
@@ -1489,26 +1508,19 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
node->regulator->desc->name,
supply,
dev_name(&rdev->dev), rdev_get_name(rdev));
- return -EBUSY;
+ goto fail;
}
- node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL);
- if (node == NULL)
- return -ENOMEM;
-
- node->regulator = rdev;
- node->supply = supply;
-
- if (has_dev) {
- node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL);
- if (node->dev_name == NULL) {
- kfree(node);
- return -ENOMEM;
- }
- }
+ list_add(&new_node->list, &regulator_map_list);
+ mutex_unlock(&regulator_list_mutex);
- list_add(&node->list, &regulator_map_list);
return 0;
+
+fail:
+ mutex_unlock(&regulator_list_mutex);
+ kfree(new_node->dev_name);
+ kfree(new_node);
+ return -EBUSY;
}
static void unset_regulator_supplies(struct regulator_dev *rdev)
@@ -1580,44 +1592,53 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
const char *supply_name)
{
struct regulator *regulator;
- char buf[REG_STR_SIZE];
- int err, size;
+ int err;
+
+ if (dev) {
+ char buf[REG_STR_SIZE];
+ int size;
+
+ size = snprintf(buf, REG_STR_SIZE, "%s-%s",
+ dev->kobj.name, supply_name);
+ if (size >= REG_STR_SIZE)
+ return NULL;
+
+ supply_name = kstrdup(buf, GFP_KERNEL);
+ if (supply_name == NULL)
+ return NULL;
+ } else {
+ supply_name = kstrdup_const(supply_name, GFP_KERNEL);
+ if (supply_name == NULL)
+ return NULL;
+ }
regulator = kzalloc(sizeof(*regulator), GFP_KERNEL);
- if (regulator == NULL)
+ if (regulator == NULL) {
+ kfree(supply_name);
return NULL;
+ }
- regulator_lock(rdev);
regulator->rdev = rdev;
+ regulator->supply_name = supply_name;
+
+ regulator_lock(rdev);
list_add(&regulator->list, &rdev->consumer_list);
+ regulator_unlock(rdev);
if (dev) {
regulator->dev = dev;
/* Add a link to the device sysfs entry */
- size = snprintf(buf, REG_STR_SIZE, "%s-%s",
- dev->kobj.name, supply_name);
- if (size >= REG_STR_SIZE)
- goto overflow_err;
-
- regulator->supply_name = kstrdup(buf, GFP_KERNEL);
- if (regulator->supply_name == NULL)
- goto overflow_err;
-
err = sysfs_create_link_nowarn(&rdev->dev.kobj, &dev->kobj,
- buf);
+ supply_name);
if (err) {
rdev_dbg(rdev, "could not add device link %s err %d\n",
dev->kobj.name, err);
/* non-fatal */
}
- } else {
- regulator->supply_name = kstrdup_const(supply_name, GFP_KERNEL);
- if (regulator->supply_name == NULL)
- goto overflow_err;
}
- regulator->debugfs = debugfs_create_dir(regulator->supply_name,
+ regulator->debugfs = debugfs_create_dir(supply_name,
rdev->debugfs);
if (!regulator->debugfs) {
rdev_dbg(rdev, "Failed to create debugfs directory\n");
@@ -1642,13 +1663,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
_regulator_is_enabled(rdev))
regulator->always_on = true;
- regulator_unlock(rdev);
return regulator;
-overflow_err:
- list_del(&regulator->list);
- kfree(regulator);
- regulator_unlock(rdev);
- return NULL;
}
static int _regulator_get_enable_time(struct regulator_dev *rdev)
@@ -2230,10 +2245,13 @@ EXPORT_SYMBOL_GPL(regulator_bulk_unregister_supply_alias);
static int regulator_ena_gpio_request(struct regulator_dev *rdev,
const struct regulator_config *config)
{
- struct regulator_enable_gpio *pin;
+ struct regulator_enable_gpio *pin, *new_pin;
struct gpio_desc *gpiod;
gpiod = config->ena_gpiod;
+ new_pin = kzalloc(sizeof(*new_pin), GFP_KERNEL);
+
+ mutex_lock(&regulator_list_mutex);
list_for_each_entry(pin, &regulator_ena_gpio_list, list) {
if (pin->gpiod == gpiod) {
@@ -2242,9 +2260,13 @@ static int regulator_ena_gpio_request(struct regulator_dev *rdev,
}
}
- pin = kzalloc(sizeof(struct regulator_enable_gpio), GFP_KERNEL);
- if (pin == NULL)
+ if (new_pin == NULL) {
+ mutex_unlock(&regulator_list_mutex);
return -ENOMEM;
+ }
+
+ pin = new_pin;
+ new_pin = NULL;
pin->gpiod = gpiod;
list_add(&pin->list, &regulator_ena_gpio_list);
@@ -2252,6 +2274,10 @@ static int regulator_ena_gpio_request(struct regulator_dev *rdev,
update_ena_gpio_to_rdev:
pin->request_count++;
rdev->ena_pin = pin;
+
+ mutex_unlock(&regulator_list_mutex);
+ kfree(new_pin);
+
return 0;
}
@@ -2264,19 +2290,19 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev)
/* Free the GPIO only in case of no use */
list_for_each_entry_safe(pin, n, &regulator_ena_gpio_list, list) {
- if (pin->gpiod == rdev->ena_pin->gpiod) {
- if (pin->request_count <= 1) {
- pin->request_count = 0;
- gpiod_put(pin->gpiod);
- list_del(&pin->list);
- kfree(pin);
- rdev->ena_pin = NULL;
- return;
- } else {
- pin->request_count--;
- }
- }
+ if (pin != rdev->ena_pin)
+ continue;
+
+ if (--pin->request_count)
+ break;
+
+ gpiod_put(pin->gpiod);
+ list_del(&pin->list);
+ kfree(pin);
+ break;
}
+
+ rdev->ena_pin = NULL;
}
/**
@@ -4949,13 +4975,9 @@ static void regulator_resolve_coupling(struct regulator_dev *rdev)
return;
}
- regulator_lock(c_rdev);
-
c_desc->coupled_rdevs[i] = c_rdev;
c_desc->n_resolved++;
- regulator_unlock(c_rdev);
-
regulator_resolve_coupling(c_rdev);
}
}
@@ -5040,7 +5062,10 @@ static int regulator_init_coupling(struct regulator_dev *rdev)
if (!of_check_coupling_data(rdev))
return -EPERM;
+ mutex_lock(&regulator_list_mutex);
rdev->coupling_desc.coupler = regulator_find_coupler(rdev);
+ mutex_unlock(&regulator_list_mutex);
+
if (IS_ERR(rdev->coupling_desc.coupler)) {
err = PTR_ERR(rdev->coupling_desc.coupler);
rdev_err(rdev, "failed to get coupler: %d\n", err);
@@ -5141,6 +5166,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
ret = -ENOMEM;
goto rinse;
}
+ device_initialize(&rdev->dev);
/*
* Duplicate the config so the driver could override it after
@@ -5148,9 +5174,8 @@ regulator_register(const struct regulator_desc *regulator_desc,
*/
config = kmemdup(cfg, sizeof(*cfg), GFP_KERNEL);
if (config == NULL) {
- kfree(rdev);
ret = -ENOMEM;
- goto rinse;
+ goto clean;
}
init_data = regulator_of_get_init_data(dev, regulator_desc, config,
@@ -5162,10 +5187,8 @@ regulator_register(const struct regulator_desc *regulator_desc,
* from a gpio extender or something else.
*/
if (PTR_ERR(init_data) == -EPROBE_DEFER) {
- kfree(config);
- kfree(rdev);
ret = -EPROBE_DEFER;
- goto rinse;
+ goto clean;
}
/*
@@ -5206,9 +5229,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
}
if (config->ena_gpiod) {
- mutex_lock(&regulator_list_mutex);
ret = regulator_ena_gpio_request(rdev, config);
- mutex_unlock(&regulator_list_mutex);
if (ret != 0) {
rdev_err(rdev, "Failed to request enable GPIO: %d\n",
ret);
@@ -5220,7 +5241,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
}
/* register with sysfs */
- device_initialize(&rdev->dev);
rdev->dev.class = &regulator_class;
rdev->dev.parent = dev;
dev_set_name(&rdev->dev, "regulator.%lu",
@@ -5248,27 +5268,22 @@ regulator_register(const struct regulator_desc *regulator_desc,
if (ret < 0)
goto wash;
- mutex_lock(&regulator_list_mutex);
ret = regulator_init_coupling(rdev);
- mutex_unlock(&regulator_list_mutex);
if (ret < 0)
goto wash;
/* add consumers devices */
if (init_data) {
- mutex_lock(&regulator_list_mutex);
for (i = 0; i < init_data->num_consumer_supplies; i++) {
ret = set_consumer_device_supply(rdev,
init_data->consumer_supplies[i].dev_name,
init_data->consumer_supplies[i].supply);
if (ret < 0) {
- mutex_unlock(&regulator_list_mutex);
dev_err(dev, "Failed to set supply %s\n",
init_data->consumer_supplies[i].supply);
goto unset_supplies;
}
}
- mutex_unlock(&regulator_list_mutex);
}
if (!rdev->desc->ops->get_voltage &&
@@ -5303,13 +5318,11 @@ wash:
mutex_lock(&regulator_list_mutex);
regulator_ena_gpio_free(rdev);
mutex_unlock(&regulator_list_mutex);
- put_device(&rdev->dev);
- rdev = NULL;
clean:
if (dangling_of_gpiod)
gpiod_put(config->ena_gpiod);
- kfree(rdev);
kfree(config);
+ put_device(&rdev->dev);
rinse:
if (dangling_cfg_gpiod)
gpiod_put(cfg->ena_gpiod);
diff --git a/drivers/regulator/cros-ec-regulator.c b/drivers/regulator/cros-ec-regulator.c
index 3117bbd2826b..eb3fc1db4edc 100644
--- a/drivers/regulator/cros-ec-regulator.c
+++ b/drivers/regulator/cros-ec-regulator.c
@@ -170,6 +170,9 @@ static int cros_ec_regulator_init_info(struct device *dev,
data->voltages_mV =
devm_kmemdup(dev, resp.voltages_mv,
sizeof(u16) * data->num_voltages, GFP_KERNEL);
+ if (!data->voltages_mV)
+ return -ENOMEM;
+
data->desc.n_voltages = data->num_voltages;
/* Make sure the returned name is always a valid string */
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index d54830e48b8d..142a70a89153 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -182,7 +182,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
drvdata->enable_clock = devm_clk_get(dev, NULL);
if (IS_ERR(drvdata->enable_clock)) {
- dev_err(dev, "Cant get enable-clock from devicetree\n");
+ dev_err(dev, "Can't get enable-clock from devicetree\n");
return -ENOENT;
}
} else {
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index 3234b118b53e..990bd50771d8 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -279,7 +279,7 @@ static int pwm_regulator_init_table(struct platform_device *pdev,
return ret;
}
- drvdata->state = -EINVAL;
+ drvdata->state = -ENOTRECOVERABLE;
drvdata->duty_cycle_table = duty_cycle_table;
drvdata->desc.ops = &pwm_regulator_voltage_table_ops;
drvdata->desc.n_voltages = length / sizeof(*duty_cycle_table);
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index cbb770824226..1a44e321b54e 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -40,6 +40,7 @@
MODULE_LICENSE("GPL");
static struct dasd_discipline dasd_fba_discipline;
+static void *dasd_fba_zero_page;
struct dasd_fba_private {
struct dasd_fba_characteristics rdc_data;
@@ -270,7 +271,7 @@ static void ccw_write_zero(struct ccw1 *ccw, int count)
ccw->cmd_code = DASD_FBA_CCW_WRITE;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = count;
- ccw->cda = (__u32) (addr_t) page_to_phys(ZERO_PAGE(0));
+ ccw->cda = (__u32) (addr_t) dasd_fba_zero_page;
}
/*
@@ -830,6 +831,11 @@ dasd_fba_init(void)
int ret;
ASCEBC(dasd_fba_discipline.ebcname, 4);
+
+ dasd_fba_zero_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!dasd_fba_zero_page)
+ return -ENOMEM;
+
ret = ccw_driver_register(&dasd_fba_driver);
if (!ret)
wait_for_device_probe();
@@ -841,6 +847,7 @@ static void __exit
dasd_fba_cleanup(void)
{
ccw_driver_unregister(&dasd_fba_driver);
+ free_page((unsigned long)dasd_fba_zero_page);
}
module_init(dasd_fba_init);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index c314e9495c1b..38017c4a31e9 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -65,6 +65,8 @@ int chsc_error_from_response(int response)
case 0x0100:
case 0x0102:
return -ENOMEM;
+ case 0x0108: /* "HW limit exceeded" for the op 0x003d */
+ return -EUSERS;
default:
return -EIO;
}
@@ -1114,7 +1116,7 @@ int chsc_enable_facility(int operation_code)
return ret;
}
-int __init chsc_get_cssid(int idx)
+int __init chsc_get_cssid_iid(int idx, u8 *cssid, u8 *iid)
{
struct {
struct chsc_header request;
@@ -1125,7 +1127,8 @@ int __init chsc_get_cssid(int idx)
u32 reserved2[3];
struct {
u8 cssid;
- u32 : 24;
+ u8 iid;
+ u32 : 16;
} list[0];
} *sdcal_area;
int ret;
@@ -1151,8 +1154,10 @@ int __init chsc_get_cssid(int idx)
}
if ((addr_t) &sdcal_area->list[idx] <
- (addr_t) &sdcal_area->response + sdcal_area->response.length)
- ret = sdcal_area->list[idx].cssid;
+ (addr_t) &sdcal_area->response + sdcal_area->response.length) {
+ *cssid = sdcal_area->list[idx].cssid;
+ *iid = sdcal_area->list[idx].iid;
+ }
else
ret = -ENODEV;
exit:
@@ -1340,6 +1345,7 @@ EXPORT_SYMBOL_GPL(chsc_scm_info);
* chsc_pnso() - Perform Network-Subchannel Operation
* @schid: id of the subchannel on which PNSO is performed
* @pnso_area: request and response block for the operation
+ * @oc: Operation Code
* @resume_token: resume token for multiblock response
* @cnc: Boolean change-notification control
*
@@ -1347,10 +1353,8 @@ EXPORT_SYMBOL_GPL(chsc_scm_info);
*
* Returns 0 on success.
*/
-int chsc_pnso(struct subchannel_id schid,
- struct chsc_pnso_area *pnso_area,
- struct chsc_pnso_resume_token resume_token,
- int cnc)
+int chsc_pnso(struct subchannel_id schid, struct chsc_pnso_area *pnso_area,
+ u8 oc, struct chsc_pnso_resume_token resume_token, int cnc)
{
memset(pnso_area, 0, sizeof(*pnso_area));
pnso_area->request.length = 0x0030;
@@ -1359,7 +1363,7 @@ int chsc_pnso(struct subchannel_id schid,
pnso_area->ssid = schid.ssid;
pnso_area->sch = schid.sch_no;
pnso_area->cssid = schid.cssid;
- pnso_area->oc = 0; /* Store-network-bridging-information list */
+ pnso_area->oc = oc;
pnso_area->resume_token = resume_token;
pnso_area->n = (cnc != 0);
if (chsc(pnso_area))
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 7ecf7e4c402e..c2b83b68bc57 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -205,12 +205,10 @@ struct chsc_scm_info {
int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
-int chsc_pnso(struct subchannel_id schid,
- struct chsc_pnso_area *pnso_area,
- struct chsc_pnso_resume_token resume_token,
- int cnc);
+int chsc_pnso(struct subchannel_id schid, struct chsc_pnso_area *pnso_area,
+ u8 oc, struct chsc_pnso_resume_token resume_token, int cnc);
-int __init chsc_get_cssid(int idx);
+int __init chsc_get_cssid_iid(int idx, u8 *cssid, u8 *iid);
#ifdef CONFIG_SCM_BUS
int scm_update_information(void);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index aca022239b33..1981eb62d329 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -854,7 +854,7 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
if (css_general_characteristics.mcss) {
css->global_pgid.pgid_high.ext_cssid.version = 0x80;
css->global_pgid.pgid_high.ext_cssid.cssid =
- (css->cssid < 0) ? 0 : css->cssid;
+ css->id_valid ? css->cssid : 0;
} else {
css->global_pgid.pgid_high.cpu_addr = stap();
}
@@ -877,7 +877,7 @@ static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
{
struct channel_subsystem *css = to_css(dev);
- if (css->cssid < 0)
+ if (!css->id_valid)
return -EINVAL;
return sprintf(buf, "%x\n", css->cssid);
@@ -975,7 +975,12 @@ static int __init setup_css(int nr)
css->device.dma_mask = &css->device.coherent_dma_mask;
mutex_init(&css->mutex);
- css->cssid = chsc_get_cssid(nr);
+ ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
+ if (!ret) {
+ css->id_valid = true;
+ pr_info("Partition identifier %01x.%01x\n", css->cssid,
+ css->iid);
+ }
css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
ret = device_register(&css->device);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 8d832900a63d..3f322ea0f498 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -115,7 +115,9 @@ extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
void css_update_ssd_info(struct subchannel *sch);
struct channel_subsystem {
- int cssid;
+ u8 cssid;
+ u8 iid;
+ bool id_valid; /* cssid,iid */
struct channel_path *chps[__MAX_CHPID + 1];
struct device device;
struct pgid global_pgid;
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 963fcc9054c6..0fe7b2f2e7f5 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -714,6 +714,7 @@ EXPORT_SYMBOL_GPL(ccw_device_get_schid);
* ccw_device_pnso() - Perform Network-Subchannel Operation
* @cdev: device on which PNSO is performed
* @pnso_area: request and response block for the operation
+ * @oc: Operation Code
* @resume_token: resume token for multiblock response
* @cnc: Boolean change-notification control
*
@@ -722,17 +723,101 @@ EXPORT_SYMBOL_GPL(ccw_device_get_schid);
* Returns 0 on success.
*/
int ccw_device_pnso(struct ccw_device *cdev,
- struct chsc_pnso_area *pnso_area,
- struct chsc_pnso_resume_token resume_token,
- int cnc)
+ struct chsc_pnso_area *pnso_area, u8 oc,
+ struct chsc_pnso_resume_token resume_token, int cnc)
{
struct subchannel_id schid;
ccw_device_get_schid(cdev, &schid);
- return chsc_pnso(schid, pnso_area, resume_token, cnc);
+ return chsc_pnso(schid, pnso_area, oc, resume_token, cnc);
}
EXPORT_SYMBOL_GPL(ccw_device_pnso);
+/**
+ * ccw_device_get_cssid() - obtain Channel Subsystem ID
+ * @cdev: device to obtain the CSSID for
+ * @cssid: The resulting Channel Subsystem ID
+ */
+int ccw_device_get_cssid(struct ccw_device *cdev, u8 *cssid)
+{
+ struct device *sch_dev = cdev->dev.parent;
+ struct channel_subsystem *css = to_css(sch_dev->parent);
+
+ if (css->id_valid)
+ *cssid = css->cssid;
+ return css->id_valid ? 0 : -ENODEV;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_cssid);
+
+/**
+ * ccw_device_get_iid() - obtain MIF-image ID
+ * @cdev: device to obtain the MIF-image ID for
+ * @iid: The resulting MIF-image ID
+ */
+int ccw_device_get_iid(struct ccw_device *cdev, u8 *iid)
+{
+ struct device *sch_dev = cdev->dev.parent;
+ struct channel_subsystem *css = to_css(sch_dev->parent);
+
+ if (css->id_valid)
+ *iid = css->iid;
+ return css->id_valid ? 0 : -ENODEV;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_iid);
+
+/**
+ * ccw_device_get_chpid() - obtain Channel Path ID
+ * @cdev: device to obtain the Channel Path ID for
+ * @chp_idx: Index of the channel path
+ * @chpid: The resulting Channel Path ID
+ */
+int ccw_device_get_chpid(struct ccw_device *cdev, int chp_idx, u8 *chpid)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ int mask;
+
+ if ((chp_idx < 0) || (chp_idx > 7))
+ return -EINVAL;
+ mask = 0x80 >> chp_idx;
+ if (!(sch->schib.pmcw.pim & mask))
+ return -ENODEV;
+
+ *chpid = sch->schib.pmcw.chpid[chp_idx];
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_chpid);
+
+/**
+ * ccw_device_get_chid() - obtain Channel ID associated with specified CHPID
+ * @cdev: device to obtain the Channel ID for
+ * @chp_idx: Index of the channel path
+ * @chid: The resulting Channel ID
+ */
+int ccw_device_get_chid(struct ccw_device *cdev, int chp_idx, u16 *chid)
+{
+ struct chp_id cssid_chpid;
+ struct channel_path *chp;
+ int rc;
+
+ chp_id_init(&cssid_chpid);
+ rc = ccw_device_get_chpid(cdev, chp_idx, &cssid_chpid.id);
+ if (rc)
+ return rc;
+ chp = chpid_to_chp(cssid_chpid);
+ if (!chp)
+ return -ENODEV;
+
+ mutex_lock(&chp->lock);
+ if (chp->desc_fmt1.flags & 0x10)
+ *chid = chp->desc_fmt1.chid;
+ else
+ rc = -ENODEV;
+ mutex_unlock(&chp->lock);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_chid);
+
/*
* Allocate zeroed dma coherent 31 bit addressable memory using
* the subchannels dma pool. Maximal size of allocation supported
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index 3f5b61351cde..c793dcabd551 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -1692,9 +1692,9 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
*nr_apqns = 0;
/* fetch status of all crypto cards */
- device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
- sizeof(struct zcrypt_device_status_ext),
- GFP_KERNEL);
+ device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+ sizeof(struct zcrypt_device_status_ext),
+ GFP_KERNEL);
if (!device_status)
return -ENOMEM;
zcrypt_device_status_mask_ext(device_status);
@@ -1762,7 +1762,7 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
verify = 0;
}
- kfree(device_status);
+ kvfree(device_status);
return rc;
}
EXPORT_SYMBOL(cca_findcard2);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index da46af682af8..f321eabefbe4 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -195,8 +195,8 @@ struct qeth_vnicc_info {
#define QETH_IN_BUF_SIZE_DEFAULT 65536
#define QETH_IN_BUF_COUNT_DEFAULT 64
#define QETH_IN_BUF_COUNT_HSDEFAULT 128
-#define QETH_IN_BUF_COUNT_MIN 8
-#define QETH_IN_BUF_COUNT_MAX 128
+#define QETH_IN_BUF_COUNT_MIN 8U
+#define QETH_IN_BUF_COUNT_MAX 128U
#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
((card)->qdio.in_buf_pool.buf_count / 2)
@@ -677,6 +677,7 @@ struct qeth_card_blkt {
enum qeth_pnso_mode {
QETH_PNSO_NONE,
QETH_PNSO_BRIDGEPORT,
+ QETH_PNSO_ADDR_INFO,
};
#define QETH_BROADCAST_WITH_ECHO 0x01
@@ -684,9 +685,16 @@ enum qeth_pnso_mode {
struct qeth_card_info {
unsigned short unit_addr2;
unsigned short cula;
- u8 chpid;
__u16 func_level;
char mcl_level[QETH_MCL_LENGTH + 1];
+ /* doubleword below corresponds to net_if_token */
+ u16 ddev_devno;
+ u8 cssid;
+ u8 iid;
+ u8 ssid;
+ u8 chpid;
+ u16 chid;
+ u8 ids_valid:1; /* cssid,iid,chid */
u8 dev_addr_is_registered:1;
u8 open_when_online:1;
u8 promisc_mode:1;
@@ -745,7 +753,7 @@ struct qeth_discipline {
const struct device_type *devtype;
int (*setup) (struct ccwgroup_device *);
void (*remove) (struct ccwgroup_device *);
- int (*set_online)(struct qeth_card *card);
+ int (*set_online)(struct qeth_card *card, bool carrier_ok);
void (*set_offline)(struct qeth_card *card);
int (*do_ioctl)(struct net_device *dev, struct ifreq *rq, int cmd);
int (*control_event_handler)(struct qeth_card *card,
@@ -780,6 +788,8 @@ struct qeth_switch_info {
struct qeth_priv {
unsigned int rx_copybreak;
+ u32 brport_hw_features;
+ u32 brport_features;
};
#define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT
@@ -804,12 +814,16 @@ struct qeth_card {
struct workqueue_struct *event_wq;
struct workqueue_struct *cmd_wq;
wait_queue_head_t wait_q;
+
+ struct mutex ip_lock;
+ /* protected by ip_lock: */
DECLARE_HASHTABLE(ip_htable, 4);
+ struct qeth_ipato ipato;
+
DECLARE_HASHTABLE(local_addrs4, 4);
DECLARE_HASHTABLE(local_addrs6, 4);
spinlock_t local_addrs4_lock;
spinlock_t local_addrs6_lock;
- struct mutex ip_lock;
DECLARE_HASHTABLE(rx_mode_addrs, 4);
struct work_struct rx_mode_work;
struct work_struct kernel_thread_starter;
@@ -817,7 +831,6 @@ struct qeth_card {
unsigned long thread_start_mask;
unsigned long thread_allowed_mask;
unsigned long thread_running_mask;
- struct qeth_ipato ipato;
struct list_head cmd_waiter_list;
/* QDIO buffer handling */
struct qeth_qdio_info qdio;
@@ -1024,11 +1037,8 @@ struct net_device *qeth_clone_netdev(struct net_device *orig);
struct qeth_card *qeth_get_card_by_busid(char *bus_id);
void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
int qeth_threads_running(struct qeth_card *, unsigned long);
-int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok);
-int qeth_stop_channel(struct qeth_channel *channel);
int qeth_set_offline(struct qeth_card *card, bool resetting);
-void qeth_print_status_message(struct qeth_card *);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
(struct qeth_card *, struct qeth_reply *, unsigned long),
@@ -1052,12 +1062,7 @@ void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason);
void qeth_put_cmd(struct qeth_cmd_buffer *iob);
int qeth_schedule_recovery(struct qeth_card *card);
-void qeth_flush_local_addrs(struct qeth_card *card);
int qeth_poll(struct napi_struct *napi, int budget);
-void qeth_clear_ipacmd_list(struct qeth_card *);
-int qeth_qdio_clear_card(struct qeth_card *, int);
-void qeth_clear_working_pool_list(struct qeth_card *);
-void qeth_drain_output_queues(struct qeth_card *card);
void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
void qeth_tx_timeout(struct net_device *, unsigned int txqueue);
@@ -1081,7 +1086,6 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
-void qeth_trace_features(struct qeth_card *);
int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
int qeth_setup_netdev(struct qeth_card *card);
int qeth_set_features(struct net_device *, netdev_features_t);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index e19640bc6daa..fc2c3db9259f 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -201,7 +201,7 @@ int qeth_threads_running(struct qeth_card *card, unsigned long threads)
}
EXPORT_SYMBOL_GPL(qeth_threads_running);
-void qeth_clear_working_pool_list(struct qeth_card *card)
+static void qeth_clear_working_pool_list(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *pool_entry, *tmp;
struct qeth_qdio_q *queue = card->qdio.in_q;
@@ -216,7 +216,6 @@ void qeth_clear_working_pool_list(struct qeth_card *card)
for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
queue->bufs[i].pool_entry = NULL;
}
-EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
{
@@ -658,12 +657,11 @@ static void qeth_flush_local_addrs6(struct qeth_card *card)
spin_unlock_irq(&card->local_addrs6_lock);
}
-void qeth_flush_local_addrs(struct qeth_card *card)
+static void qeth_flush_local_addrs(struct qeth_card *card)
{
qeth_flush_local_addrs4(card);
qeth_flush_local_addrs6(card);
}
-EXPORT_SYMBOL_GPL(qeth_flush_local_addrs);
static void qeth_add_local_addrs4(struct qeth_card *card,
struct qeth_ipacmd_local_addrs4 *cmd)
@@ -965,7 +963,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
}
}
-void qeth_clear_ipacmd_list(struct qeth_card *card)
+static void qeth_clear_ipacmd_list(struct qeth_card *card)
{
struct qeth_cmd_buffer *iob;
unsigned long flags;
@@ -977,7 +975,6 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
qeth_notify_cmd(iob, -ECANCELED);
spin_unlock_irqrestore(&card->lock, flags);
}
-EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
static int qeth_check_idx_response(struct qeth_card *card,
unsigned char *buffer)
@@ -1502,7 +1499,7 @@ static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
}
}
-void qeth_drain_output_queues(struct qeth_card *card)
+static void qeth_drain_output_queues(struct qeth_card *card)
{
int i;
@@ -1513,7 +1510,6 @@ void qeth_drain_output_queues(struct qeth_card *card)
qeth_drain_output_queue(card->qdio.out_qs[i], false);
}
}
-EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
{
@@ -1754,7 +1750,7 @@ static int qeth_halt_channel(struct qeth_card *card,
return 0;
}
-int qeth_stop_channel(struct qeth_channel *channel)
+static int qeth_stop_channel(struct qeth_channel *channel)
{
struct ccw_device *cdev = channel->ccwdev;
int rc;
@@ -1772,7 +1768,6 @@ int qeth_stop_channel(struct qeth_channel *channel)
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_stop_channel);
static int qeth_start_channel(struct qeth_channel *channel)
{
@@ -1842,7 +1837,7 @@ static int qeth_clear_halt_card(struct qeth_card *card, int halt)
return qeth_clear_channels(card);
}
-int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
+static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
{
int rc = 0;
@@ -1870,7 +1865,6 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
QETH_CARD_TEXT_(card, 3, "2err%d", rc);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
{
@@ -2311,12 +2305,10 @@ static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
u8 port = ((u8)card->dev->dev_port) | 0x80;
struct ccw1 *ccw = __ccw_from_cmd(iob);
- struct ccw_dev_id dev_id;
qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
iob->data);
qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
- ccw_device_get_id(CARD_DDEV(card), &dev_id);
iob->finalize = qeth_idx_finalize_cmd;
port |= QETH_IDX_ACT_INVAL_FRAME;
@@ -2325,7 +2317,7 @@ static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
&card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
&card->info.func_level, 2);
- memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &dev_id.devno, 2);
+ memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
}
@@ -2599,7 +2591,6 @@ static int qeth_ulp_setup(struct qeth_card *card)
{
__u16 temp;
struct qeth_cmd_buffer *iob;
- struct ccw_dev_id dev_id;
QETH_CARD_TEXT(card, 2, "ulpsetup");
@@ -2614,8 +2605,7 @@ static int qeth_ulp_setup(struct qeth_card *card)
memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
&card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
- ccw_device_get_id(CARD_DDEV(card), &dev_id);
- memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
+ memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
temp = (card->info.cula << 8) + card->info.unit_addr2;
memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
@@ -2871,7 +2861,7 @@ static int qeth_mpc_initialize(struct qeth_card *card)
return 0;
}
-void qeth_print_status_message(struct qeth_card *card)
+static void qeth_print_status_message(struct qeth_card *card)
{
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
@@ -2912,7 +2902,6 @@ void qeth_print_status_message(struct qeth_card *card)
(card->info.mcl_level[0]) ? ")" : "",
qeth_get_cardname_short(card));
}
-EXPORT_SYMBOL_GPL(qeth_print_status_message);
static void qeth_initialize_working_pool_list(struct qeth_card *card)
{
@@ -4920,7 +4909,6 @@ int qeth_vm_request_mac(struct qeth_card *card)
{
struct diag26c_mac_resp *response;
struct diag26c_mac_req *request;
- struct ccw_dev_id id;
int rc;
QETH_CARD_TEXT(card, 2, "vmreqmac");
@@ -4932,11 +4920,10 @@ int qeth_vm_request_mac(struct qeth_card *card)
goto out;
}
- ccw_device_get_id(CARD_DDEV(card), &id);
request->resp_buf_len = sizeof(*response);
request->resp_version = DIAG26C_VERSION2;
request->op_code = DIAG26C_GET_MAC;
- request->devno = id.devno;
+ request->devno = card->info.ddev_devno;
QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
@@ -5017,6 +5004,33 @@ out:
return;
}
+static void qeth_read_ccw_conf_data(struct qeth_card *card)
+{
+ struct qeth_card_info *info = &card->info;
+ struct ccw_device *cdev = CARD_DDEV(card);
+ struct ccw_dev_id dev_id;
+
+ QETH_CARD_TEXT(card, 2, "ccwconfd");
+ ccw_device_get_id(cdev, &dev_id);
+
+ info->ddev_devno = dev_id.devno;
+ info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
+ !ccw_device_get_iid(cdev, &info->iid) &&
+ !ccw_device_get_chid(cdev, 0, &info->chid);
+ info->ssid = dev_id.ssid;
+
+ dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
+ info->chid, info->chpid);
+
+ QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
+ QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
+ QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
+ QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
+ QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
+ QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
+ QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
+}
+
static int qeth_qdio_establish(struct qeth_card *card)
{
struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
@@ -5103,7 +5117,7 @@ static void qeth_core_free_card(struct qeth_card *card)
kfree(card);
}
-void qeth_trace_features(struct qeth_card *card)
+static void qeth_trace_features(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 2, "features");
QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
@@ -5112,7 +5126,6 @@ void qeth_trace_features(struct qeth_card *card)
QETH_CARD_HEX(card, 2, &card->info.diagass_support,
sizeof(card->info.diagass_support));
}
-EXPORT_SYMBOL_GPL(qeth_trace_features);
static struct ccw_device_id qeth_ids[] = {
{CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
@@ -5143,7 +5156,7 @@ static struct ccw_driver qeth_ccw_driver = {
.remove = ccwgroup_remove_ccwdev,
};
-int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
+static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
{
int retries = 3;
int rc;
@@ -5185,6 +5198,7 @@ retriable:
}
qeth_determine_capabilities(card);
+ qeth_read_ccw_conf_data(card);
qeth_idx_init(card);
rc = qeth_idx_activate_read_channel(card);
@@ -5256,6 +5270,8 @@ retriable:
QETH_CARD_TEXT_(card, 2, "8err%d", rc);
}
+ qeth_trace_features(card);
+
if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
(card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
card->info.hwtrap = 0;
@@ -5281,21 +5297,49 @@ out:
CARD_DEVID(card), rc);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
static int qeth_set_online(struct qeth_card *card)
{
+ bool carrier_ok;
int rc;
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_CARD_TEXT(card, 2, "setonlin");
- rc = card->discipline->set_online(card);
+ rc = qeth_hardsetup_card(card, &carrier_ok);
+ if (rc) {
+ QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
+ rc = -ENODEV;
+ goto err_hardsetup;
+ }
+
+ qeth_print_status_message(card);
+
+ rc = card->discipline->set_online(card, carrier_ok);
+ if (rc)
+ goto err_online;
+
+ /* let user_space know that device is online */
+ kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
mutex_unlock(&card->discipline_mutex);
+ return 0;
+
+err_online:
+err_hardsetup:
+ qeth_qdio_clear_card(card, 0);
+ qeth_clear_working_pool_list(card);
+ qeth_flush_local_addrs(card);
+ qeth_stop_channel(&card->data);
+ qeth_stop_channel(&card->write);
+ qeth_stop_channel(&card->read);
+ qdio_free(CARD_DDEV(card));
+
+ mutex_unlock(&card->conf_mutex);
+ mutex_unlock(&card->discipline_mutex);
return rc;
}
@@ -5312,6 +5356,9 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
card->info.hwtrap = 1;
}
+ /* cancel any stalled cmd that might block the rtnl: */
+ qeth_clear_ipacmd_list(card);
+
rtnl_lock();
card->info.open_when_online = card->dev->flags & IFF_UP;
dev_close(card->dev);
@@ -5319,8 +5366,16 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
netif_carrier_off(card->dev);
rtnl_unlock();
+ cancel_work_sync(&card->rx_mode_work);
+
card->discipline->set_offline(card);
+ qeth_qdio_clear_card(card, 0);
+ qeth_drain_output_queues(card);
+ qeth_clear_working_pool_list(card);
+ qeth_flush_local_addrs(card);
+ card->info.promisc_mode = 0;
+
rc = qeth_stop_channel(&card->data);
rc2 = qeth_stop_channel(&card->write);
rc3 = qeth_stop_channel(&card->read);
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 8def82336f53..74c70364edc1 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -103,21 +103,21 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
- char *tmp;
unsigned int portno, limit;
int rc = 0;
+ rc = kstrtouint(buf, 16, &portno);
+ if (rc)
+ return rc;
+ if (portno > QETH_MAX_PORTNO)
+ return -EINVAL;
+
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
- portno = simple_strtoul(buf, &tmp, 16);
- if (portno > QETH_MAX_PORTNO) {
- rc = -EINVAL;
- goto out;
- }
limit = (card->ssqd.pcnt ? card->ssqd.pcnt - 1 : card->ssqd.pcnt);
if (portno > limit) {
rc = -EINVAL;
@@ -248,19 +248,19 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
unsigned int cnt;
- char *tmp;
int rc = 0;
+ rc = kstrtouint(buf, 10, &cnt);
+ if (rc)
+ return rc;
+
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
- cnt = simple_strtoul(buf, &tmp, 10);
- cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN :
- ((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt);
-
+ cnt = clamp(cnt, QETH_IN_BUF_COUNT_MIN, QETH_IN_BUF_COUNT_MAX);
rc = qeth_resize_buffer_pool(card, cnt);
out:
@@ -341,18 +341,15 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
struct net_device *ndev;
- char *tmp;
- int i, rc = 0;
enum qeth_discipline_id newdis;
+ unsigned int input;
+ int rc;
- mutex_lock(&card->discipline_mutex);
- if (card->state != CARD_STATE_DOWN) {
- rc = -EPERM;
- goto out;
- }
+ rc = kstrtouint(buf, 16, &input);
+ if (rc)
+ return rc;
- i = simple_strtoul(buf, &tmp, 16);
- switch (i) {
+ switch (input) {
case 0:
newdis = QETH_DISCIPLINE_LAYER3;
break;
@@ -360,7 +357,12 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
newdis = QETH_DISCIPLINE_LAYER2;
break;
default:
- rc = -EINVAL;
+ return -EINVAL;
+ }
+
+ mutex_lock(&card->discipline_mutex);
+ if (card->state != CARD_STATE_DOWN) {
+ rc = -EPERM;
goto out;
}
@@ -551,20 +553,21 @@ static DEVICE_ATTR(hw_trap, 0644, qeth_hw_trap_show,
static ssize_t qeth_dev_blkt_store(struct qeth_card *card,
const char *buf, size_t count, int *value, int max_value)
{
- char *tmp;
- int i, rc = 0;
+ unsigned int input;
+ int rc;
+
+ rc = kstrtouint(buf, 10, &input);
+ if (rc)
+ return rc;
+
+ if (input > max_value)
+ return -EINVAL;
mutex_lock(&card->conf_mutex);
- if (card->state != CARD_STATE_DOWN) {
+ if (card->state != CARD_STATE_DOWN)
rc = -EPERM;
- goto out;
- }
- i = simple_strtoul(buf, &tmp, 10);
- if (i <= max_value)
- *value = i;
else
- rc = -EINVAL;
-out:
+ *value = input;
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index adf25c9fd2b3..296d73d84326 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -23,7 +23,7 @@ int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state);
int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state);
int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout);
int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout);
-bool qeth_l2_vnicc_is_in_use(struct qeth_card *card);
+bool qeth_bridgeport_allowed(struct qeth_card *card);
struct qeth_mac {
u8 mac_addr[ETH_ALEN];
@@ -31,4 +31,11 @@ struct qeth_mac {
struct hlist_node hnode;
};
+static inline bool qeth_bridgeport_is_in_use(struct qeth_card *card)
+{
+ return card->options.sbp.role ||
+ card->options.sbp.reflect_promisc ||
+ card->options.sbp.hostnotification;
+}
+
#endif /* __QETH_L2_H__ */
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 491578009f12..1852d0a3c10a 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -17,24 +17,17 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/hashtable.h>
+#include <net/switchdev.h>
#include <asm/chsc.h>
+#include <asm/css_chars.h>
#include <asm/setup.h>
#include "qeth_core.h"
#include "qeth_l2.h"
-static void qeth_bridgeport_query_support(struct qeth_card *card);
-static void qeth_bridge_state_change(struct qeth_card *card,
- struct qeth_ipa_cmd *cmd);
-static void qeth_addr_change_event(struct qeth_card *card,
- struct qeth_ipa_cmd *cmd);
-static void qeth_l2_vnicc_set_defaults(struct qeth_card *card);
-static void qeth_l2_vnicc_init(struct qeth_card *card);
-static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
- u32 *timeout);
-
static int qeth_l2_setdelmac_makerc(struct qeth_card *card, u16 retcode)
{
int rc;
@@ -284,26 +277,20 @@ static void qeth_l2_set_pnso_mode(struct qeth_card *card,
drain_workqueue(card->event_wq);
}
-static void qeth_l2_stop_card(struct qeth_card *card)
+static void qeth_l2_dev2br_fdb_flush(struct qeth_card *card)
{
- QETH_CARD_TEXT(card, 2, "stopcard");
+ struct switchdev_notifier_fdb_info info;
- qeth_set_allowed_threads(card, 0, 1);
-
- cancel_work_sync(&card->rx_mode_work);
- qeth_l2_drain_rx_mode_cache(card);
+ QETH_CARD_TEXT(card, 2, "fdbflush");
- if (card->state == CARD_STATE_SOFTSETUP) {
- qeth_clear_ipacmd_list(card);
- qeth_drain_output_queues(card);
- card->state = CARD_STATE_DOWN;
- }
+ info.addr = NULL;
+ /* flush all VLANs: */
+ info.vid = 0;
+ info.added_by_user = false;
+ info.offloaded = true;
- qeth_qdio_clear_card(card, 0);
- qeth_clear_working_pool_list(card);
- qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
- qeth_flush_local_addrs(card);
- card->info.promisc_mode = 0;
+ call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
+ card->dev, &info.info, NULL);
}
static int qeth_l2_request_initial_mac(struct qeth_card *card)
@@ -589,49 +576,6 @@ static u16 qeth_l2_select_queue(struct net_device *dev, struct sk_buff *skb,
qeth_get_priority_queue(card, skb);
}
-static const struct device_type qeth_l2_devtype = {
- .name = "qeth_layer2",
- .groups = qeth_l2_attr_groups,
-};
-
-static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
-{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- int rc;
-
- if (IS_OSN(card))
- dev_notice(&gdev->dev, "OSN support will be dropped in 2021\n");
-
- qeth_l2_vnicc_set_defaults(card);
- mutex_init(&card->sbp_lock);
-
- if (gdev->dev.type == &qeth_generic_devtype) {
- rc = qeth_l2_create_device_attributes(&gdev->dev);
- if (rc)
- return rc;
- }
-
- INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work);
- return 0;
-}
-
-static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
-{
- struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
-
- if (cgdev->dev.type == &qeth_generic_devtype)
- qeth_l2_remove_device_attributes(&cgdev->dev);
- qeth_set_allowed_threads(card, 0, 1);
- wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
-
- if (cgdev->state == CCWGROUP_ONLINE)
- qeth_set_offline(card, false);
-
- cancel_work_sync(&card->close_dev_work);
- if (card->dev->reg_state == NETREG_REGISTERED)
- unregister_netdev(card->dev);
-}
-
static void qeth_l2_set_rx_mode(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
@@ -642,6 +586,7 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
/**
* qeth_l2_pnso() - perform network subchannel operation
* @card: qeth_card structure pointer
+ * @oc: Operation Code
* @cnc: Boolean Change-Notification Control
* @cb: Callback function will be executed for each element
* of the address list
@@ -652,7 +597,7 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
* control" is set, further changes in the address list will be reported
* via the IPA command.
*/
-static int qeth_l2_pnso(struct qeth_card *card, int cnc,
+static int qeth_l2_pnso(struct qeth_card *card, u8 oc, int cnc,
void (*cb)(void *priv, struct chsc_pnso_naid_l2 *entry),
void *priv)
{
@@ -663,13 +608,14 @@ static int qeth_l2_pnso(struct qeth_card *card, int cnc,
int i, size, elems;
int rc;
- QETH_CARD_TEXT(card, 2, "PNSO");
rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
if (rr == NULL)
return -ENOMEM;
do {
+ QETH_CARD_TEXT(card, 2, "PNSO");
/* on the first iteration, naihdr.resume_token will be zero */
- rc = ccw_device_pnso(ddev, rr, rr->naihdr.resume_token, cnc);
+ rc = ccw_device_pnso(ddev, rr, oc, rr->naihdr.resume_token,
+ cnc);
if (rc)
continue;
if (cb == NULL)
@@ -705,6 +651,218 @@ static int qeth_l2_pnso(struct qeth_card *card, int cnc,
return rc;
}
+static bool qeth_is_my_net_if_token(struct qeth_card *card,
+ struct net_if_token *token)
+{
+ return ((card->info.ddev_devno == token->devnum) &&
+ (card->info.cssid == token->cssid) &&
+ (card->info.iid == token->iid) &&
+ (card->info.ssid == token->ssid) &&
+ (card->info.chpid == token->chpid) &&
+ (card->info.chid == token->chid));
+}
+
+/**
+ * qeth_l2_dev2br_fdb_notify() - update fdb of master bridge
+ * @card: qeth_card structure pointer
+ * @code: event bitmask: high order bit 0x80 set to
+ * 1 - removal of an object
+ * 0 - addition of an object
+ * Object type(s):
+ * 0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC
+ * @token: "network token" structure identifying 'physical' location
+ * of the target
+ * @addr_lnid: structure with MAC address and VLAN ID of the target
+ */
+static void qeth_l2_dev2br_fdb_notify(struct qeth_card *card, u8 code,
+ struct net_if_token *token,
+ struct mac_addr_lnid *addr_lnid)
+{
+ struct switchdev_notifier_fdb_info info;
+ u8 ntfy_mac[ETH_ALEN];
+
+ ether_addr_copy(ntfy_mac, addr_lnid->mac);
+ /* Ignore VLAN only changes */
+ if (!(code & IPA_ADDR_CHANGE_CODE_MACADDR))
+ return;
+ /* Ignore mcast entries */
+ if (is_multicast_ether_addr(ntfy_mac))
+ return;
+ /* Ignore my own addresses */
+ if (qeth_is_my_net_if_token(card, token))
+ return;
+
+ info.addr = ntfy_mac;
+ /* don't report VLAN IDs */
+ info.vid = 0;
+ info.added_by_user = false;
+ info.offloaded = true;
+
+ if (code & IPA_ADDR_CHANGE_CODE_REMOVAL) {
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+ card->dev, &info.info, NULL);
+ QETH_CARD_TEXT(card, 4, "andelmac");
+ QETH_CARD_TEXT_(card, 4,
+ "mc%012lx", ether_addr_to_u64(ntfy_mac));
+ } else {
+ call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+ card->dev, &info.info, NULL);
+ QETH_CARD_TEXT(card, 4, "anaddmac");
+ QETH_CARD_TEXT_(card, 4,
+ "mc%012lx", ether_addr_to_u64(ntfy_mac));
+ }
+}
+
+static void qeth_l2_dev2br_an_set_cb(void *priv,
+ struct chsc_pnso_naid_l2 *entry)
+{
+ u8 code = IPA_ADDR_CHANGE_CODE_MACADDR;
+ struct qeth_card *card = priv;
+
+ if (entry->addr_lnid.lnid < VLAN_N_VID)
+ code |= IPA_ADDR_CHANGE_CODE_VLANID;
+ qeth_l2_dev2br_fdb_notify(card, code,
+ (struct net_if_token *)&entry->nit,
+ (struct mac_addr_lnid *)&entry->addr_lnid);
+}
+
+/**
+ * qeth_l2_dev2br_an_set() -
+ * Enable or disable 'dev to bridge network address notification'
+ * @card: qeth_card structure pointer
+ * @enable: Enable or disable 'dev to bridge network address notification'
+ *
+ * Returns negative errno-compatible error indication or 0 on success.
+ *
+ * On enable, emits a series of address notifications for all
+ * currently registered hosts.
+ *
+ * Must be called under rtnl_lock
+ */
+static int qeth_l2_dev2br_an_set(struct qeth_card *card, bool enable)
+{
+ int rc;
+
+ if (enable) {
+ QETH_CARD_TEXT(card, 2, "anseton");
+ rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 1,
+ qeth_l2_dev2br_an_set_cb, card);
+ if (rc == -EAGAIN)
+ /* address notification enabled, but inconsistent
+ * addresses reported -> disable address notification
+ */
+ qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0,
+ NULL, NULL);
+ } else {
+ QETH_CARD_TEXT(card, 2, "ansetoff");
+ rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0, NULL, NULL);
+ }
+
+ return rc;
+}
+
+static int qeth_l2_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, u32 filter_mask,
+ int nlflags)
+{
+ struct qeth_priv *priv = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
+ u16 mode = BRIDGE_MODE_UNDEF;
+
+ /* Do not even show qeth devs that cannot do bridge_setlink */
+ if (!priv->brport_hw_features || !netif_device_present(dev) ||
+ qeth_bridgeport_is_in_use(card))
+ return -EOPNOTSUPP;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
+ mode, priv->brport_features,
+ priv->brport_hw_features,
+ nlflags, filter_mask, NULL);
+}
+
+static const struct nla_policy qeth_brport_policy[IFLA_BRPORT_MAX + 1] = {
+ [IFLA_BRPORT_LEARNING_SYNC] = { .type = NLA_U8 },
+};
+
+/**
+ * qeth_l2_bridge_setlink() - set bridgeport attributes
+ * @dev: netdevice
+ * @nlh: netlink message header
+ * @flags: bridge flags (here: BRIDGE_FLAGS_SELF)
+ * @extack: extended ACK report struct
+ *
+ * Called under rtnl_lock
+ */
+static int qeth_l2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
+ u16 flags, struct netlink_ext_ack *extack)
+{
+ struct qeth_priv *priv = netdev_priv(dev);
+ struct nlattr *bp_tb[IFLA_BRPORT_MAX + 1];
+ struct qeth_card *card = dev->ml_priv;
+ struct nlattr *attr, *nested_attr;
+ bool enable, has_protinfo = false;
+ int rem1, rem2;
+ int rc;
+
+ if (!netif_device_present(dev))
+ return -ENODEV;
+ if (!(priv->brport_hw_features))
+ return -EOPNOTSUPP;
+
+ nlmsg_for_each_attr(attr, nlh, sizeof(struct ifinfomsg), rem1) {
+ if (nla_type(attr) == IFLA_PROTINFO) {
+ rc = nla_parse_nested(bp_tb, IFLA_BRPORT_MAX, attr,
+ qeth_brport_policy, extack);
+ if (rc)
+ return rc;
+ has_protinfo = true;
+ } else if (nla_type(attr) == IFLA_AF_SPEC) {
+ nla_for_each_nested(nested_attr, attr, rem2) {
+ if (nla_type(nested_attr) == IFLA_BRIDGE_FLAGS)
+ continue;
+ NL_SET_ERR_MSG_ATTR(extack, nested_attr,
+ "Unsupported attribute");
+ return -EINVAL;
+ }
+ } else {
+ NL_SET_ERR_MSG_ATTR(extack, attr, "Unsupported attribute");
+ return -EINVAL;
+ }
+ }
+ if (!has_protinfo)
+ return 0;
+ if (!bp_tb[IFLA_BRPORT_LEARNING_SYNC])
+ return -EINVAL;
+ enable = !!nla_get_u8(bp_tb[IFLA_BRPORT_LEARNING_SYNC]);
+
+ if (enable == !!(priv->brport_features & BR_LEARNING_SYNC))
+ return 0;
+
+ mutex_lock(&card->sbp_lock);
+ /* do not change anything if BridgePort is enabled */
+ if (qeth_bridgeport_is_in_use(card)) {
+ NL_SET_ERR_MSG(extack, "n/a (BridgePort)");
+ rc = -EBUSY;
+ } else if (enable) {
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
+ rc = qeth_l2_dev2br_an_set(card, true);
+ if (rc)
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+ else
+ priv->brport_features |= BR_LEARNING_SYNC;
+ } else {
+ rc = qeth_l2_dev2br_an_set(card, false);
+ if (!rc) {
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+ priv->brport_features ^= BR_LEARNING_SYNC;
+ qeth_l2_dev2br_fdb_flush(card);
+ }
+ }
+ mutex_unlock(&card->sbp_lock);
+
+ return rc;
+}
+
static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_open = qeth_open,
.ndo_stop = qeth_stop,
@@ -720,7 +878,9 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
.ndo_tx_timeout = qeth_tx_timeout,
.ndo_fix_features = qeth_fix_features,
- .ndo_set_features = qeth_set_features
+ .ndo_set_features = qeth_set_features,
+ .ndo_bridge_getlink = qeth_l2_bridge_getlink,
+ .ndo_bridge_setlink = qeth_l2_bridge_setlink,
};
static const struct net_device_ops qeth_osn_netdev_ops = {
@@ -824,130 +984,78 @@ static void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
}
}
-static int qeth_l2_set_online(struct qeth_card *card)
+/**
+ * qeth_l2_detect_dev2br_support() -
+ * Detect whether this card supports 'dev to bridge fdb network address
+ * change notification' and thus can support the learning_sync bridgeport
+ * attribute
+ * @card: qeth_card structure pointer
+ *
+ * This is a destructive test and must be called before dev2br or
+ * bridgeport address notification is enabled!
+ */
+static void qeth_l2_detect_dev2br_support(struct qeth_card *card)
{
- struct ccwgroup_device *gdev = card->gdev;
- struct net_device *dev = card->dev;
- int rc = 0;
- bool carrier_ok;
-
- rc = qeth_core_hardsetup_card(card, &carrier_ok);
- if (rc) {
- QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
- rc = -ENODEV;
- goto out_remove;
- }
-
- mutex_lock(&card->sbp_lock);
- qeth_bridgeport_query_support(card);
- if (card->options.sbp.supported_funcs) {
- qeth_l2_setup_bridgeport_attrs(card);
- dev_info(&card->gdev->dev,
- "The device represents a Bridge Capable Port\n");
- }
- mutex_unlock(&card->sbp_lock);
-
- qeth_l2_register_dev_addr(card);
-
- /* for the rx_bcast characteristic, init VNICC after setmac */
- qeth_l2_vnicc_init(card);
-
- qeth_trace_features(card);
- qeth_l2_trace_features(card);
-
- qeth_print_status_message(card);
-
- /* softsetup */
- QETH_CARD_TEXT(card, 2, "softsetp");
-
- card->state = CARD_STATE_SOFTSETUP;
-
- qeth_set_allowed_threads(card, 0xffffffff, 0);
+ struct qeth_priv *priv = netdev_priv(card->dev);
+ bool dev2br_supported;
+ int rc;
- if (dev->reg_state != NETREG_REGISTERED) {
- rc = qeth_l2_setup_netdev(card);
- if (rc)
- goto out_remove;
+ QETH_CARD_TEXT(card, 2, "d2brsup");
+ if (!IS_IQD(card))
+ return;
- if (carrier_ok)
- netif_carrier_on(dev);
+ /* dev2br requires valid cssid,iid,chid */
+ if (!card->info.ids_valid) {
+ dev2br_supported = false;
+ } else if (css_general_characteristics.enarf) {
+ dev2br_supported = true;
} else {
- rtnl_lock();
- if (carrier_ok)
- netif_carrier_on(dev);
- else
- netif_carrier_off(dev);
-
- netif_device_attach(dev);
- qeth_enable_hw_features(dev);
-
- if (card->info.open_when_online) {
- card->info.open_when_online = 0;
- dev_open(dev, NULL);
- }
- rtnl_unlock();
+ /* Old machines don't have the feature bit:
+ * Probe by testing whether a disable succeeds
+ */
+ rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0, NULL, NULL);
+ dev2br_supported = !rc;
}
- /* let user_space know that device is online */
- kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
- return 0;
+ QETH_CARD_TEXT_(card, 2, "D2Bsup%02x", dev2br_supported);
-out_remove:
- qeth_l2_stop_card(card);
- qeth_stop_channel(&card->data);
- qeth_stop_channel(&card->write);
- qeth_stop_channel(&card->read);
- qdio_free(CARD_DDEV(card));
- return rc;
-}
-
-static void qeth_l2_set_offline(struct qeth_card *card)
-{
- qeth_l2_stop_card(card);
-}
-
-static int __init qeth_l2_init(void)
-{
- pr_info("register layer 2 discipline\n");
- return 0;
+ if (dev2br_supported)
+ priv->brport_hw_features |= BR_LEARNING_SYNC;
+ else
+ priv->brport_hw_features &= ~BR_LEARNING_SYNC;
}
-static void __exit qeth_l2_exit(void)
+static void qeth_l2_enable_brport_features(struct qeth_card *card)
{
- pr_info("unregister layer 2 discipline\n");
-}
+ struct qeth_priv *priv = netdev_priv(card->dev);
+ int rc;
-/* Returns zero if the command is successfully "consumed" */
-static int qeth_l2_control_event(struct qeth_card *card,
- struct qeth_ipa_cmd *cmd)
-{
- switch (cmd->hdr.command) {
- case IPA_CMD_SETBRIDGEPORT_OSA:
- case IPA_CMD_SETBRIDGEPORT_IQD:
- if (cmd->data.sbp.hdr.command_code ==
- IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
- qeth_bridge_state_change(card, cmd);
- return 0;
- } else
- return 1;
- case IPA_CMD_ADDRESS_CHANGE_NOTIF:
- qeth_addr_change_event(card, cmd);
- return 0;
- default:
- return 1;
+ if (priv->brport_features & BR_LEARNING_SYNC) {
+ if (priv->brport_hw_features & BR_LEARNING_SYNC) {
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
+ rc = qeth_l2_dev2br_an_set(card, true);
+ if (rc == -EAGAIN) {
+ /* Recoverable error, retry once */
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+ qeth_l2_dev2br_fdb_flush(card);
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
+ rc = qeth_l2_dev2br_an_set(card, true);
+ }
+ if (rc) {
+ netdev_err(card->dev,
+ "failed to enable bridge learning_sync: %d\n",
+ rc);
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+ qeth_l2_dev2br_fdb_flush(card);
+ priv->brport_features ^= BR_LEARNING_SYNC;
+ }
+ } else {
+ dev_warn(&card->gdev->dev,
+ "bridge learning_sync not supported\n");
+ priv->brport_features ^= BR_LEARNING_SYNC;
+ }
}
}
-struct qeth_discipline qeth_l2_discipline = {
- .devtype = &qeth_l2_devtype,
- .setup = qeth_l2_probe_device,
- .remove = qeth_l2_remove_device,
- .set_online = qeth_l2_set_online,
- .set_offline = qeth_l2_set_offline,
- .do_ioctl = NULL,
- .control_event_handler = qeth_l2_control_event,
-};
-EXPORT_SYMBOL_GPL(qeth_l2_discipline);
-
#ifdef CONFIG_QETH_OSN
static void qeth_osn_assist_cb(struct qeth_card *card,
struct qeth_cmd_buffer *iob,
@@ -1169,6 +1277,81 @@ struct qeth_addr_change_data {
struct qeth_ipacmd_addr_change ac_event;
};
+static void qeth_l2_dev2br_worker(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct qeth_addr_change_data *data;
+ struct qeth_card *card;
+ struct qeth_priv *priv;
+ unsigned int i;
+ int rc;
+
+ data = container_of(dwork, struct qeth_addr_change_data, dwork);
+ card = data->card;
+ priv = netdev_priv(card->dev);
+
+ QETH_CARD_TEXT(card, 4, "dev2brew");
+
+ if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
+ goto free;
+
+ /* Potential re-config in progress, try again later: */
+ if (!rtnl_trylock()) {
+ queue_delayed_work(card->event_wq, dwork,
+ msecs_to_jiffies(100));
+ return;
+ }
+ if (!netif_device_present(card->dev))
+ goto out_unlock;
+
+ if (data->ac_event.lost_event_mask) {
+ QETH_DBF_MESSAGE(3,
+ "Address change notification overflow on device %x\n",
+ CARD_DEVID(card));
+ /* Card fdb and bridge fdb are out of sync, card has stopped
+ * notifications (no need to drain_workqueue). Purge all
+ * 'extern_learn' entries from the parent bridge and restart
+ * the notifications.
+ */
+ qeth_l2_dev2br_fdb_flush(card);
+ rc = qeth_l2_dev2br_an_set(card, true);
+ if (rc) {
+ /* TODO: if we want to retry after -EAGAIN, be
+ * aware there could be stale entries in the
+ * workqueue now, that need to be drained.
+ * For now we give up:
+ */
+ netdev_err(card->dev,
+ "bridge learning_sync failed to recover: %d\n",
+ rc);
+ WRITE_ONCE(card->info.pnso_mode,
+ QETH_PNSO_NONE);
+ /* To remove fdb entries reported by an_set: */
+ qeth_l2_dev2br_fdb_flush(card);
+ priv->brport_features ^= BR_LEARNING_SYNC;
+ } else {
+ QETH_DBF_MESSAGE(3,
+ "Address Notification resynced on device %x\n",
+ CARD_DEVID(card));
+ }
+ } else {
+ for (i = 0; i < data->ac_event.num_entries; i++) {
+ struct qeth_ipacmd_addr_change_entry *entry =
+ &data->ac_event.entry[i];
+ qeth_l2_dev2br_fdb_notify(card,
+ entry->change_code,
+ &entry->token,
+ &entry->addr_lnid);
+ }
+ }
+
+out_unlock:
+ rtnl_unlock();
+
+free:
+ kfree(data);
+}
+
static void qeth_addr_change_event_worker(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
@@ -1251,7 +1434,10 @@ static void qeth_addr_change_event(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "ACNalloc");
return;
}
- INIT_DELAYED_WORK(&data->dwork, qeth_addr_change_event_worker);
+ if (card->info.pnso_mode == QETH_PNSO_BRIDGEPORT)
+ INIT_DELAYED_WORK(&data->dwork, qeth_addr_change_event_worker);
+ else
+ INIT_DELAYED_WORK(&data->dwork, qeth_l2_dev2br_worker);
data->card = card;
memcpy(&data->ac_event, hostevs,
sizeof(struct qeth_ipacmd_addr_change) + extrasize);
@@ -1578,22 +1764,17 @@ int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
if (enable) {
qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL);
qeth_l2_set_pnso_mode(card, QETH_PNSO_BRIDGEPORT);
- rc = qeth_l2_pnso(card, 1, qeth_bridgeport_an_set_cb, card);
+ rc = qeth_l2_pnso(card, PNSO_OC_NET_BRIDGE_INFO, 1,
+ qeth_bridgeport_an_set_cb, card);
if (rc)
qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
} else {
- rc = qeth_l2_pnso(card, 0, NULL, NULL);
+ rc = qeth_l2_pnso(card, PNSO_OC_NET_BRIDGE_INFO, 0, NULL, NULL);
qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
}
return rc;
}
-static bool qeth_bridgeport_is_in_use(struct qeth_card *card)
-{
- return (card->options.sbp.role || card->options.sbp.reflect_promisc ||
- card->options.sbp.hostnotification);
-}
-
/* VNIC Characteristics support */
/* handle VNICC IPA command return codes; convert to error codes */
@@ -1739,6 +1920,19 @@ static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc,
return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, timeout);
}
+/* recover user timeout setting */
+static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
+ u32 *timeout)
+{
+ if (card->options.vnicc.sup_chars & vnicc &&
+ card->options.vnicc.getset_timeout_sup & vnicc &&
+ !qeth_l2_vnicc_getset_timeout(card, vnicc, IPA_VNICC_SET_TIMEOUT,
+ timeout))
+ return false;
+ *timeout = QETH_VNICC_DEFAULT_TIMEOUT;
+ return true;
+}
+
/* set current VNICC flag state; called from sysfs store function */
int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state)
{
@@ -1879,7 +2073,7 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout)
}
/* check if VNICC is currently enabled */
-bool qeth_l2_vnicc_is_in_use(struct qeth_card *card)
+static bool _qeth_l2_vnicc_is_in_use(struct qeth_card *card)
{
if (!card->options.vnicc.sup_chars)
return false;
@@ -1894,17 +2088,19 @@ bool qeth_l2_vnicc_is_in_use(struct qeth_card *card)
return true;
}
-/* recover user timeout setting */
-static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
- u32 *timeout)
+/**
+ * qeth_bridgeport_allowed - are any qeth_bridgeport functions allowed?
+ * @card: qeth_card structure pointer
+ *
+ * qeth_bridgeport functionality is mutually exclusive with usage of the
+ * VNIC Characteristics and dev2br address notifications
+ */
+bool qeth_bridgeport_allowed(struct qeth_card *card)
{
- if (card->options.vnicc.sup_chars & vnicc &&
- card->options.vnicc.getset_timeout_sup & vnicc &&
- !qeth_l2_vnicc_getset_timeout(card, vnicc, IPA_VNICC_SET_TIMEOUT,
- timeout))
- return false;
- *timeout = QETH_VNICC_DEFAULT_TIMEOUT;
- return true;
+ struct qeth_priv *priv = netdev_priv(card->dev);
+
+ return (!_qeth_l2_vnicc_is_in_use(card) &&
+ !(priv->brport_features & BR_LEARNING_SYNC));
}
/* recover user characteristic setting */
@@ -1995,6 +2191,174 @@ static void qeth_l2_vnicc_set_defaults(struct qeth_card *card)
card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT;
}
+static const struct device_type qeth_l2_devtype = {
+ .name = "qeth_layer2",
+ .groups = qeth_l2_attr_groups,
+};
+
+static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
+{
+ struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ int rc;
+
+ if (IS_OSN(card))
+ dev_notice(&gdev->dev, "OSN support will be dropped in 2021\n");
+
+ qeth_l2_vnicc_set_defaults(card);
+ mutex_init(&card->sbp_lock);
+
+ if (gdev->dev.type == &qeth_generic_devtype) {
+ rc = qeth_l2_create_device_attributes(&gdev->dev);
+ if (rc)
+ return rc;
+ }
+
+ INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work);
+ return 0;
+}
+
+static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
+{
+ struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+
+ if (gdev->dev.type == &qeth_generic_devtype)
+ qeth_l2_remove_device_attributes(&gdev->dev);
+ qeth_set_allowed_threads(card, 0, 1);
+ wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
+
+ if (gdev->state == CCWGROUP_ONLINE)
+ qeth_set_offline(card, false);
+
+ cancel_work_sync(&card->close_dev_work);
+ if (card->dev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(card->dev);
+}
+
+static int qeth_l2_set_online(struct qeth_card *card, bool carrier_ok)
+{
+ struct net_device *dev = card->dev;
+ int rc = 0;
+
+ /* query before bridgeport_notification may be enabled */
+ qeth_l2_detect_dev2br_support(card);
+
+ mutex_lock(&card->sbp_lock);
+ qeth_bridgeport_query_support(card);
+ if (card->options.sbp.supported_funcs) {
+ qeth_l2_setup_bridgeport_attrs(card);
+ dev_info(&card->gdev->dev,
+ "The device represents a Bridge Capable Port\n");
+ }
+ mutex_unlock(&card->sbp_lock);
+
+ qeth_l2_register_dev_addr(card);
+
+ /* for the rx_bcast characteristic, init VNICC after setmac */
+ qeth_l2_vnicc_init(card);
+
+ qeth_l2_trace_features(card);
+
+ /* softsetup */
+ QETH_CARD_TEXT(card, 2, "softsetp");
+
+ card->state = CARD_STATE_SOFTSETUP;
+
+ qeth_set_allowed_threads(card, 0xffffffff, 0);
+
+ if (dev->reg_state != NETREG_REGISTERED) {
+ rc = qeth_l2_setup_netdev(card);
+ if (rc)
+ goto err_setup;
+
+ if (carrier_ok)
+ netif_carrier_on(dev);
+ } else {
+ rtnl_lock();
+ if (carrier_ok)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ netif_device_attach(dev);
+ qeth_enable_hw_features(dev);
+ qeth_l2_enable_brport_features(card);
+
+ if (card->info.open_when_online) {
+ card->info.open_when_online = 0;
+ dev_open(dev, NULL);
+ }
+ rtnl_unlock();
+ }
+ return 0;
+
+err_setup:
+ qeth_set_allowed_threads(card, 0, 1);
+ card->state = CARD_STATE_DOWN;
+ return rc;
+}
+
+static void qeth_l2_set_offline(struct qeth_card *card)
+{
+ struct qeth_priv *priv = netdev_priv(card->dev);
+
+ qeth_set_allowed_threads(card, 0, 1);
+ qeth_l2_drain_rx_mode_cache(card);
+
+ if (card->state == CARD_STATE_SOFTSETUP)
+ card->state = CARD_STATE_DOWN;
+
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+ if (priv->brport_features & BR_LEARNING_SYNC) {
+ rtnl_lock();
+ qeth_l2_dev2br_fdb_flush(card);
+ rtnl_unlock();
+ }
+}
+
+/* Returns zero if the command is successfully "consumed" */
+static int qeth_l2_control_event(struct qeth_card *card,
+ struct qeth_ipa_cmd *cmd)
+{
+ switch (cmd->hdr.command) {
+ case IPA_CMD_SETBRIDGEPORT_OSA:
+ case IPA_CMD_SETBRIDGEPORT_IQD:
+ if (cmd->data.sbp.hdr.command_code ==
+ IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
+ qeth_bridge_state_change(card, cmd);
+ return 0;
+ }
+
+ return 1;
+ case IPA_CMD_ADDRESS_CHANGE_NOTIF:
+ qeth_addr_change_event(card, cmd);
+ return 0;
+ default:
+ return 1;
+ }
+}
+
+struct qeth_discipline qeth_l2_discipline = {
+ .devtype = &qeth_l2_devtype,
+ .setup = qeth_l2_probe_device,
+ .remove = qeth_l2_remove_device,
+ .set_online = qeth_l2_set_online,
+ .set_offline = qeth_l2_set_offline,
+ .do_ioctl = NULL,
+ .control_event_handler = qeth_l2_control_event,
+};
+EXPORT_SYMBOL_GPL(qeth_l2_discipline);
+
+static int __init qeth_l2_init(void)
+{
+ pr_info("register layer 2 discipline\n");
+ return 0;
+}
+
+static void __exit qeth_l2_exit(void)
+{
+ pr_info("unregister layer 2 discipline\n");
+}
+
module_init(qeth_l2_init);
module_exit(qeth_l2_exit);
MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index 4695d25e54f2..4ba3bc57263f 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -18,7 +18,7 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
int rc = 0;
char *word;
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
mutex_lock(&card->sbp_lock);
@@ -65,7 +65,7 @@ static ssize_t qeth_bridge_port_role_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
return qeth_bridge_port_role_state_show(dev, attr, buf, 0);
@@ -90,7 +90,7 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev,
mutex_lock(&card->conf_mutex);
mutex_lock(&card->sbp_lock);
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
rc = -EBUSY;
else if (card->options.sbp.reflect_promisc)
/* Forbid direct manipulation */
@@ -116,7 +116,7 @@ static ssize_t qeth_bridge_port_state_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
return qeth_bridge_port_role_state_show(dev, attr, buf, 1);
@@ -131,7 +131,7 @@ static ssize_t qeth_bridgeport_hostnotification_show(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
int enabled;
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
enabled = card->options.sbp.hostnotification;
@@ -153,7 +153,7 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
mutex_lock(&card->conf_mutex);
mutex_lock(&card->sbp_lock);
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
rc = -EBUSY;
else if (qeth_card_hw_is_reachable(card)) {
rc = qeth_bridgeport_an_set(card, enable);
@@ -179,7 +179,7 @@ static ssize_t qeth_bridgeport_reflect_show(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
char *state;
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
if (card->options.sbp.reflect_promisc) {
@@ -215,7 +215,7 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
mutex_lock(&card->conf_mutex);
mutex_lock(&card->sbp_lock);
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
rc = -EBUSY;
else if (card->options.sbp.role != QETH_SBP_ROLE_NONE)
rc = -EPERM;
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 6ccfe2121095..acd130cfbab3 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -96,7 +96,7 @@ struct qeth_ipato_entry {
struct list_head entry;
enum qeth_prot_versions proto;
char addr[16];
- int mask_bits;
+ unsigned int mask_bits;
};
extern const struct attribute_group *qeth_l3_attr_groups[];
@@ -110,7 +110,7 @@ int qeth_l3_setrouting_v6(struct qeth_card *);
int qeth_l3_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *);
int qeth_l3_del_ipato_entry(struct qeth_card *card,
enum qeth_prot_versions proto, u8 *addr,
- int mask_bits);
+ unsigned int mask_bits);
void qeth_l3_update_ipato(struct qeth_card *card);
int qeth_l3_modify_hsuid(struct qeth_card *card, bool add);
int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 767c5bb7c24c..a6f8878b55c6 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -105,11 +105,9 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
(ipatoe->proto == QETH_PROT_IPV4) ?
4 : 16);
if (addr->proto == QETH_PROT_IPV4)
- rc = !memcmp(addr_bits, ipatoe_bits,
- min(32, ipatoe->mask_bits));
+ rc = !memcmp(addr_bits, ipatoe_bits, ipatoe->mask_bits);
else
- rc = !memcmp(addr_bits, ipatoe_bits,
- min(128, ipatoe->mask_bits));
+ rc = !memcmp(addr_bits, ipatoe_bits, ipatoe->mask_bits);
if (rc)
break;
}
@@ -536,7 +534,6 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "addipato");
- mutex_lock(&card->conf_mutex);
mutex_lock(&card->ip_lock);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
@@ -556,21 +553,19 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
}
mutex_unlock(&card->ip_lock);
- mutex_unlock(&card->conf_mutex);
return rc;
}
int qeth_l3_del_ipato_entry(struct qeth_card *card,
enum qeth_prot_versions proto, u8 *addr,
- int mask_bits)
+ unsigned int mask_bits)
{
struct qeth_ipato_entry *ipatoe, *tmp;
int rc = -ENOENT;
QETH_CARD_TEXT(card, 2, "delipato");
- mutex_lock(&card->conf_mutex);
mutex_lock(&card->ip_lock);
list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
@@ -587,7 +582,6 @@ int qeth_l3_del_ipato_entry(struct qeth_card *card,
}
mutex_unlock(&card->ip_lock);
- mutex_unlock(&card->conf_mutex);
return rc;
}
@@ -597,7 +591,6 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr addr;
- int rc;
qeth_l3_init_ipaddr(&addr, type, proto);
if (proto == QETH_PROT_IPV4)
@@ -605,11 +598,7 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
else
memcpy(&addr.u.a6.addr, ip, 16);
- mutex_lock(&card->conf_mutex);
- rc = qeth_l3_modify_ip(card, &addr, add);
- mutex_unlock(&card->conf_mutex);
-
- return rc;
+ return qeth_l3_modify_ip(card, &addr, add);
}
int qeth_l3_modify_hsuid(struct qeth_card *card, bool add)
@@ -1153,33 +1142,6 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
return 0;
}
-static void qeth_l3_stop_card(struct qeth_card *card)
-{
- QETH_CARD_TEXT(card, 2, "stopcard");
-
- qeth_set_allowed_threads(card, 0, 1);
-
- cancel_work_sync(&card->rx_mode_work);
- qeth_l3_drain_rx_mode_cache(card);
-
- if (card->options.sniffer &&
- (card->info.promisc_mode == SET_PROMISC_MODE_ON))
- qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
-
- if (card->state == CARD_STATE_SOFTSETUP) {
- qeth_l3_clear_ip_htable(card, 1);
- qeth_clear_ipacmd_list(card);
- qeth_drain_output_queues(card);
- card->state = CARD_STATE_DOWN;
- }
-
- qeth_qdio_clear_card(card, 0);
- qeth_clear_working_pool_list(card);
- flush_workqueue(card->event_wq);
- qeth_flush_local_addrs(card);
- card->info.promisc_mode = 0;
-}
-
static void qeth_l3_set_promisc_mode(struct qeth_card *card)
{
bool enable = card->dev->flags & IFF_PROMISC;
@@ -1235,7 +1197,6 @@ static void qeth_l3_rx_mode_work(struct work_struct *work)
kfree(addr);
break;
}
- addr->ref_counter = 1;
fallthrough;
default:
/* for next call to set_rx_mode(): */
@@ -2025,21 +1986,10 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_l3_clear_ipato_list(card);
}
-static int qeth_l3_set_online(struct qeth_card *card)
+static int qeth_l3_set_online(struct qeth_card *card, bool carrier_ok)
{
- struct ccwgroup_device *gdev = card->gdev;
struct net_device *dev = card->dev;
int rc = 0;
- bool carrier_ok;
-
- rc = qeth_core_hardsetup_card(card, &carrier_ok);
- if (rc) {
- QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
- rc = -ENODEV;
- goto out_remove;
- }
-
- qeth_print_status_message(card);
/* softsetup */
QETH_CARD_TEXT(card, 2, "softsetp");
@@ -2066,7 +2016,7 @@ static int qeth_l3_set_online(struct qeth_card *card)
if (dev->reg_state != NETREG_REGISTERED) {
rc = qeth_l3_setup_netdev(card);
if (rc)
- goto out_remove;
+ goto err_setup;
if (carrier_ok)
netif_carrier_on(dev);
@@ -2086,22 +2036,28 @@ static int qeth_l3_set_online(struct qeth_card *card)
}
rtnl_unlock();
}
- qeth_trace_features(card);
- /* let user_space know that device is online */
- kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
return 0;
-out_remove:
- qeth_l3_stop_card(card);
- qeth_stop_channel(&card->data);
- qeth_stop_channel(&card->write);
- qeth_stop_channel(&card->read);
- qdio_free(CARD_DDEV(card));
+
+err_setup:
+ qeth_set_allowed_threads(card, 0, 1);
+ card->state = CARD_STATE_DOWN;
+ qeth_l3_clear_ip_htable(card, 1);
return rc;
}
static void qeth_l3_set_offline(struct qeth_card *card)
{
- qeth_l3_stop_card(card);
+ qeth_set_allowed_threads(card, 0, 1);
+ qeth_l3_drain_rx_mode_cache(card);
+
+ if (card->options.sniffer &&
+ (card->info.promisc_mode == SET_PROMISC_MODE_ON))
+ qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
+
+ if (card->state == CARD_STATE_SOFTSETUP) {
+ card->state = CARD_STATE_DOWN;
+ qeth_l3_clear_ip_htable(card, 1);
+ }
}
/* Returns zero if the command is successfully "consumed" */
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index dd0b39082534..351763ae9b9c 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -301,19 +301,21 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
goto out;
}
+ mutex_lock(&card->ip_lock);
if (sysfs_streq(buf, "toggle")) {
enable = !card->ipato.enabled;
} else if (kstrtobool(buf, &enable)) {
rc = -EINVAL;
- goto out;
+ goto unlock_ip;
}
if (card->ipato.enabled != enable) {
card->ipato.enabled = enable;
- mutex_lock(&card->ip_lock);
qeth_l3_update_ipato(card);
- mutex_unlock(&card->ip_lock);
}
+
+unlock_ip:
+ mutex_unlock(&card->ip_lock);
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
@@ -339,7 +341,7 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
bool invert;
int rc = 0;
- mutex_lock(&card->conf_mutex);
+ mutex_lock(&card->ip_lock);
if (sysfs_streq(buf, "toggle")) {
invert = !card->ipato.invert4;
} else if (kstrtobool(buf, &invert)) {
@@ -349,12 +351,11 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
if (card->ipato.invert4 != invert) {
card->ipato.invert4 = invert;
- mutex_lock(&card->ip_lock);
qeth_l3_update_ipato(card);
- mutex_unlock(&card->ip_lock);
}
+
out:
- mutex_unlock(&card->conf_mutex);
+ mutex_unlock(&card->ip_lock);
return rc ? rc : count;
}
@@ -406,29 +407,29 @@ static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev,
}
static int qeth_l3_parse_ipatoe(const char *buf, enum qeth_prot_versions proto,
- u8 *addr, int *mask_bits)
+ u8 *addr, unsigned int *mask_bits)
{
- const char *start, *end;
- char *tmp;
- char buffer[40] = {0, };
+ char *sep;
+ int rc;
- start = buf;
- /* get address string */
- end = strchr(start, '/');
- if (!end || (end - start >= 40)) {
+ /* Expected input pattern: %addr/%mask */
+ sep = strnchr(buf, 40, '/');
+ if (!sep)
return -EINVAL;
- }
- strncpy(buffer, start, end - start);
- if (qeth_l3_string_to_ipaddr(buffer, proto, addr)) {
- return -EINVAL;
- }
- start = end + 1;
- *mask_bits = simple_strtoul(start, &tmp, 10);
- if (!strlen(start) ||
- (tmp == start) ||
- (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))) {
+
+ /* Terminate the %addr sub-string, and parse it: */
+ *sep = '\0';
+ rc = qeth_l3_string_to_ipaddr(buf, proto, addr);
+ if (rc)
+ return rc;
+
+ rc = kstrtouint(sep + 1, 10, mask_bits);
+ if (rc)
+ return rc;
+
+ if (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))
return -EINVAL;
- }
+
return 0;
}
@@ -436,8 +437,8 @@ static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count,
struct qeth_card *card, enum qeth_prot_versions proto)
{
struct qeth_ipato_entry *ipatoe;
+ unsigned int mask_bits;
u8 addr[16];
- int mask_bits;
int rc = 0;
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
@@ -474,8 +475,8 @@ static QETH_DEVICE_ATTR(ipato_add4, add4, 0644,
static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count,
struct qeth_card *card, enum qeth_prot_versions proto)
{
+ unsigned int mask_bits;
u8 addr[16];
- int mask_bits;
int rc = 0;
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
@@ -510,7 +511,7 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
bool invert;
int rc = 0;
- mutex_lock(&card->conf_mutex);
+ mutex_lock(&card->ip_lock);
if (sysfs_streq(buf, "toggle")) {
invert = !card->ipato.invert6;
} else if (kstrtobool(buf, &invert)) {
@@ -520,12 +521,11 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
if (card->ipato.invert6 != invert) {
card->ipato.invert6 = invert;
- mutex_lock(&card->ip_lock);
qeth_l3_update_ipato(card);
- mutex_unlock(&card->ip_lock);
}
+
out:
- mutex_unlock(&card->conf_mutex);
+ mutex_unlock(&card->ip_lock);
return rc ? rc : count;
}
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index d8cbc9c0e766..e67abb184a8a 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -634,8 +634,6 @@ free_fp:
fc_frame_free(fp);
out:
kref_put(&rdata->kref, fc_rport_destroy);
- if (!IS_ERR(fp))
- fc_frame_free(fp);
}
/**
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 6a521ba7a616..a4887985aad6 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -209,7 +209,10 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
task->num_scatter = si;
}
- task->data_dir = qc->dma_dir;
+ if (qc->tf.protocol == ATA_PROT_NODATA)
+ task->data_dir = DMA_NONE;
+ else
+ task->data_dir = qc->dma_dir;
task->scatter = qc->sg;
task->ata_task.retry_count = 1;
task->task_state_flags = SAS_TASK_STATE_PENDING;
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index cd7c7d269f6f..d0f9e90e3279 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -182,10 +182,11 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
pr_warn("driver on host %s cannot handle device %016llx, error:%d\n",
dev_name(sas_ha->dev),
SAS_ADDR(dev->sas_addr), res);
+ return res;
}
set_bit(SAS_DEV_FOUND, &dev->state);
kref_get(&dev->kref);
- return res;
+ return 0;
}
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 6aae61d6ee16..b60945182db8 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3517,6 +3517,9 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1));
prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT);
prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY);
+ prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY);
+ prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST);
+ prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue RDF: did:x%x",
@@ -4656,7 +4659,9 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
out:
if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
+ if (mbox)
+ ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
+ ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
spin_unlock_irq(shost->host_lock);
/* If the node is not being used by another discovery thread,
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index c4ba8273a63f..12e4e76233e6 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -4800,7 +4800,7 @@ struct send_frame_wqe {
uint32_t fc_hdr_wd5; /* word 15 */
};
-#define ELS_RDF_REG_TAG_CNT 1
+#define ELS_RDF_REG_TAG_CNT 4
struct lpfc_els_rdf_reg_desc {
struct fc_df_desc_fpin_reg reg_desc; /* descriptor header */
__be32 desc_tags[ELS_RDF_REG_TAG_CNT];
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index c69725999315..ca25e54bb782 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -11376,7 +11376,6 @@ lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
{
cpumask_clear(&eqhdl->aff_mask);
irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
- irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 20adec4387f0..c657abf22b75 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.8.0.3"
+#define LPFC_DRIVER_VERSION "12.8.0.4"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 883cccb59c2d..b0c01cf0428f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -3689,7 +3689,7 @@ int megasas_irqpoll(struct irq_poll *irqpoll, int budget)
instance = irq_ctx->instance;
if (irq_ctx->irq_line_enable) {
- disable_irq(irq_ctx->os_irq);
+ disable_irq_nosync(irq_ctx->os_irq);
irq_ctx->irq_line_enable = false;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 5730f32496b6..8062bd99add8 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -1733,7 +1733,7 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget)
reply_q = container_of(irqpoll, struct adapter_reply_queue,
irqpoll);
if (reply_q->irq_line_enable) {
- disable_irq(reply_q->os_irq);
+ disable_irq_nosync(reply_q->os_irq);
reply_q->irq_line_enable = false;
}
num_entries = _base_process_reply_queue(reply_q);
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 337e79d6837f..9889bab7d31c 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -818,7 +818,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
if (res)
- return res;
+ goto ex_err;
ccb = &pm8001_ha->ccb_info[ccb_tag];
ccb->device = pm8001_dev;
ccb->ccb_tag = ccb_tag;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 1bc090d8a71b..a165120d2976 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1626,7 +1626,7 @@ typedef struct {
*/
uint8_t firmware_options[2];
- uint16_t frame_payload_size;
+ __le16 frame_payload_size;
__le16 max_iocb_allocation;
__le16 execution_throttle;
uint8_t retry_count;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 507919d4ab36..0bd04a62af83 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4603,18 +4603,18 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
nv->firmware_options[1] = BIT_7 | BIT_5;
nv->add_firmware_options[0] = BIT_5;
nv->add_firmware_options[1] = BIT_5 | BIT_4;
- nv->frame_payload_size = 2048;
+ nv->frame_payload_size = cpu_to_le16(2048);
nv->special_options[1] = BIT_7;
} else if (IS_QLA2200(ha)) {
nv->firmware_options[0] = BIT_2 | BIT_1;
nv->firmware_options[1] = BIT_7 | BIT_5;
nv->add_firmware_options[0] = BIT_5;
nv->add_firmware_options[1] = BIT_5 | BIT_4;
- nv->frame_payload_size = 1024;
+ nv->frame_payload_size = cpu_to_le16(1024);
} else if (IS_QLA2100(ha)) {
nv->firmware_options[0] = BIT_3 | BIT_1;
nv->firmware_options[1] = BIT_5;
- nv->frame_payload_size = 1024;
+ nv->frame_payload_size = cpu_to_le16(1024);
}
nv->max_iocb_allocation = cpu_to_le16(256);
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index e6e0fb9a81b4..da0201693c24 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -1372,7 +1372,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
return ret;
}
- /* Read Instat 1, Instat 2 and Instat 3 registers */
+ /* Read Intstat 1, Intstat 2 and Intstat 3 registers */
ret = sdw_read(slave, SDW_SCP_INT1);
if (ret < 0) {
dev_err(slave->bus->dev,
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 37290a799023..6e36deb505b1 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -717,6 +717,7 @@ error:
kfree(wbuf);
error_1:
kfree(wr_msg);
+ bus->defer_msg.msg = NULL;
return ret;
}
@@ -840,9 +841,10 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
error:
list_for_each_entry(m_rt, &stream->master_list, stream_node) {
bus = m_rt->bus;
-
- kfree(bus->defer_msg.msg->buf);
- kfree(bus->defer_msg.msg);
+ if (bus->defer_msg.msg) {
+ kfree(bus->defer_msg.msg->buf);
+ kfree(bus->defer_msg.msg);
+ }
}
msg_unlock:
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 1c1a9d17eec0..c6795c684b16 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -907,14 +907,16 @@ static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata,
struct dma_async_tx_descriptor *tx;
dma_cookie_t cookie;
dma_addr_t dma_dst;
+ struct device *ddev;
if (!cqspi->rx_chan || !virt_addr_valid(buf)) {
memcpy_fromio(buf, cqspi->ahb_base + from, len);
return 0;
}
- dma_dst = dma_map_single(dev, buf, len, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, dma_dst)) {
+ ddev = cqspi->rx_chan->device->dev;
+ dma_dst = dma_map_single(ddev, buf, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ddev, dma_dst)) {
dev_err(dev, "dma mapping failed\n");
return -ENOMEM;
}
@@ -948,7 +950,7 @@ static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata,
}
err_unmap:
- dma_unmap_single(dev, dma_dst, len, DMA_FROM_DEVICE);
+ dma_unmap_single(ddev, dma_dst, len, DMA_FROM_DEVICE);
return ret;
}
@@ -1128,8 +1130,17 @@ static int cqspi_request_mmap_dma(struct cqspi_st *cqspi)
return 0;
}
+static const char *cqspi_get_name(struct spi_mem *mem)
+{
+ struct cqspi_st *cqspi = spi_master_get_devdata(mem->spi->master);
+ struct device *dev = &cqspi->pdev->dev;
+
+ return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev), mem->spi->chip_select);
+}
+
static const struct spi_controller_mem_ops cqspi_mem_ops = {
.exec_op = cqspi_exec_mem_op,
+ .get_name = cqspi_get_name,
};
static int cqspi_setup_flash(struct cqspi_st *cqspi)
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index 9522d1b5786d..df981e55c24c 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -90,7 +90,7 @@ static struct spi_test spi_tests[] = {
{
.description = "tx/rx-transfer - crossing PAGE_SIZE",
.fill_option = FILL_COUNT_8,
- .iterate_len = { ITERATE_MAX_LEN },
+ .iterate_len = { ITERATE_LEN },
.iterate_tx_align = ITERATE_ALIGN,
.iterate_rx_align = ITERATE_ALIGN,
.transfer_count = 1,
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index d4b33b358a31..3056428b09f3 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -936,7 +936,11 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
}
if (sr & STM32H7_SPI_SR_SUSP) {
- dev_warn(spi->dev, "Communication suspended\n");
+ static DEFINE_RATELIMIT_STATE(rs,
+ DEFAULT_RATELIMIT_INTERVAL * 10,
+ 1);
+ if (__ratelimit(&rs))
+ dev_dbg_ratelimited(spi->dev, "Communication suspended\n");
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
stm32h7_spi_read_rxfifo(spi, false);
/*
@@ -2060,7 +2064,7 @@ static int stm32_spi_resume(struct device *dev)
}
ret = pm_runtime_get_sync(dev);
- if (ret) {
+ if (ret < 0) {
dev_err(dev, "Unable to power device:%d\n", ret);
return ret;
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index dc12af018350..0cab239d8e7f 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1327,8 +1327,6 @@ out:
if (msg->status && ctlr->handle_err)
ctlr->handle_err(ctlr, msg);
- spi_res_release(ctlr, msg);
-
spi_finalize_current_message(ctlr);
return ret;
@@ -1725,6 +1723,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
spi_unmap_msg(ctlr, mesg);
+ /* In the prepare_messages callback the spi bus has the opportunity to
+ * split a transfer to smaller chunks.
+ * Release splited transfers here since spi_map_msg is done on the
+ * splited transfers.
+ */
+ spi_res_release(ctlr, mesg);
+
if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
ret = ctlr->unprepare_message(ctlr, mesg);
if (ret) {
diff --git a/drivers/staging/greybus/audio_helper.c b/drivers/staging/greybus/audio_helper.c
index 8b100a71f02e..237531ba60f3 100644
--- a/drivers/staging/greybus/audio_helper.c
+++ b/drivers/staging/greybus/audio_helper.c
@@ -173,8 +173,7 @@ static int gbaudio_remove_controls(struct snd_card *card, struct device *dev,
id.index = control->index;
kctl = snd_ctl_find_id(card, &id);
if (!kctl) {
- dev_err(dev, "%d: Failed to find %s\n", err,
- control->name);
+ dev_err(dev, "Failed to find %s\n", control->name);
continue;
}
err = snd_ctl_remove(card, kctl);
diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
index 2f9fdbdcd547..83b38ae8908c 100644
--- a/drivers/staging/greybus/audio_topology.c
+++ b/drivers/staging/greybus/audio_topology.c
@@ -456,6 +456,15 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol,
val = ucontrol->value.integer.value[0] & mask;
connect = !!val;
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_audio_gb_get_control(module->mgmt_connection, data->ctl_id,
+ GB_AUDIO_INVALID_INDEX, &gbvalue);
+ if (ret)
+ goto exit;
+
/* update ucontrol */
if (gbvalue.value.integer_value[0] != val) {
for (wi = 0; wi < wlist->num_widgets; wi++) {
@@ -466,25 +475,17 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol,
gbvalue.value.integer_value[0] =
cpu_to_le32(ucontrol->value.integer.value[0]);
- ret = gb_pm_runtime_get_sync(bundle);
- if (ret)
- return ret;
-
ret = gb_audio_gb_set_control(module->mgmt_connection,
data->ctl_id,
GB_AUDIO_INVALID_INDEX, &gbvalue);
-
- gb_pm_runtime_put_autosuspend(bundle);
-
- if (ret) {
- dev_err_ratelimited(codec_dev,
- "%d:Error in %s for %s\n", ret,
- __func__, kcontrol->id.name);
- return ret;
- }
}
- return 0;
+exit:
+ gb_pm_runtime_put_autosuspend(bundle);
+ if (ret)
+ dev_err_ratelimited(codec_dev, "%d:Error in %s for %s\n", ret,
+ __func__, kcontrol->id.name);
+ return ret;
}
#define SOC_DAPM_MIXER_GB(xname, kcount, data) \
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index fa1bf8b069fd..2720f7319a3d 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -524,13 +524,8 @@ static void hfa384x_usb_defer(struct work_struct *data)
*/
void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
{
- memset(hw, 0, sizeof(*hw));
hw->usb = usb;
- /* set up the endpoints */
- hw->endp_in = usb_rcvbulkpipe(usb, 1);
- hw->endp_out = usb_sndbulkpipe(usb, 2);
-
/* Set up the waitq */
init_waitqueue_head(&hw->cmdq);
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index 456603fd26c0..4b08dc1da4f9 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -61,23 +61,14 @@ static int prism2sta_probe_usb(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *dev;
- const struct usb_endpoint_descriptor *epd;
- const struct usb_host_interface *iface_desc = interface->cur_altsetting;
+ struct usb_endpoint_descriptor *bulk_in, *bulk_out;
+ struct usb_host_interface *iface_desc = interface->cur_altsetting;
struct wlandevice *wlandev = NULL;
struct hfa384x *hw = NULL;
int result = 0;
- if (iface_desc->desc.bNumEndpoints != 2) {
- result = -ENODEV;
- goto failed;
- }
-
- result = -EINVAL;
- epd = &iface_desc->endpoint[1].desc;
- if (!usb_endpoint_is_bulk_in(epd))
- goto failed;
- epd = &iface_desc->endpoint[2].desc;
- if (!usb_endpoint_is_bulk_out(epd))
+ result = usb_find_common_endpoints(iface_desc, &bulk_in, &bulk_out, NULL, NULL);
+ if (result)
goto failed;
dev = interface_to_usbdev(interface);
@@ -96,6 +87,8 @@ static int prism2sta_probe_usb(struct usb_interface *interface,
}
/* Initialize the hw data */
+ hw->endp_in = usb_rcvbulkpipe(dev, bulk_in->bEndpointAddress);
+ hw->endp_out = usb_sndbulkpipe(dev, bulk_out->bEndpointAddress);
hfa384x_create(hw, dev);
hw->wlandev = wlandev;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index cd045dc75a58..7b56fe9f1062 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1389,14 +1389,27 @@ static u32 iscsit_do_crypto_hash_sg(
sg = cmd->first_data_sg;
page_off = cmd->first_data_sg_off;
+ if (data_length && page_off) {
+ struct scatterlist first_sg;
+ u32 len = min_t(u32, data_length, sg->length - page_off);
+
+ sg_init_table(&first_sg, 1);
+ sg_set_page(&first_sg, sg_page(sg), len, sg->offset + page_off);
+
+ ahash_request_set_crypt(hash, &first_sg, NULL, len);
+ crypto_ahash_update(hash);
+
+ data_length -= len;
+ sg = sg_next(sg);
+ }
+
while (data_length) {
- u32 cur_len = min_t(u32, data_length, (sg->length - page_off));
+ u32 cur_len = min_t(u32, data_length, sg->length);
ahash_request_set_crypt(hash, sg, NULL, cur_len);
crypto_ahash_update(hash);
data_length -= cur_len;
- page_off = 0;
/* iscsit_map_iovec has already checked for invalid sg pointers */
sg = sg_next(sg);
}
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 85748e338858..893d1b406c29 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1149,7 +1149,7 @@ void iscsit_free_conn(struct iscsi_conn *conn)
}
void iscsi_target_login_sess_out(struct iscsi_conn *conn,
- struct iscsi_np *np, bool zero_tsih, bool new_sess)
+ bool zero_tsih, bool new_sess)
{
if (!new_sess)
goto old_sess_out;
@@ -1167,7 +1167,6 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn,
conn->sess = NULL;
old_sess_out:
- iscsi_stop_login_thread_timer(np);
/*
* If login negotiation fails check if the Time2Retain timer
* needs to be restarted.
@@ -1407,8 +1406,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
new_sess_out:
new_sess = true;
old_sess_out:
+ iscsi_stop_login_thread_timer(np);
tpg_np = conn->tpg_np;
- iscsi_target_login_sess_out(conn, np, zero_tsih, new_sess);
+ iscsi_target_login_sess_out(conn, zero_tsih, new_sess);
new_sess = false;
if (tpg) {
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 3b8e3639ff5d..fc95e6150253 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -22,8 +22,7 @@ extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
extern void iscsit_free_conn(struct iscsi_conn *);
extern int iscsit_start_kthreads(struct iscsi_conn *);
extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
-extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
- bool, bool);
+extern void iscsi_target_login_sess_out(struct iscsi_conn *, bool, bool);
extern int iscsi_target_login_thread(void *);
extern void iscsi_handle_login_thread_timeout(struct timer_list *t);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index f88a52fec889..8b40f10976ff 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -535,12 +535,11 @@ static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned in
static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
{
- struct iscsi_np *np = login->np;
bool zero_tsih = login->zero_tsih;
iscsi_remove_failed_auth_entry(conn);
iscsi_target_nego_release(conn);
- iscsi_target_login_sess_out(conn, np, zero_tsih, true);
+ iscsi_target_login_sess_out(conn, zero_tsih, true);
}
struct conn_timeout {
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 3ebca44ab3fa..0c8471be3e32 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -7,6 +7,7 @@
*/
#include <linux/crc32.h>
+#include <linux/delay.h>
#include <linux/property.h>
#include <linux/slab.h>
#include "tb.h"
@@ -389,8 +390,8 @@ static int tb_drom_parse_entries(struct tb_switch *sw)
struct tb_drom_entry_header *entry = (void *) (sw->drom + pos);
if (pos + 1 == drom_size || pos + entry->len > drom_size
|| !entry->len) {
- tb_sw_warn(sw, "drom buffer overrun, aborting\n");
- return -EIO;
+ tb_sw_warn(sw, "DROM buffer overrun\n");
+ return -EILSEQ;
}
switch (entry->type) {
@@ -526,7 +527,8 @@ int tb_drom_read(struct tb_switch *sw)
u16 size;
u32 crc;
struct tb_drom_header *header;
- int res;
+ int res, retries = 1;
+
if (sw->drom)
return 0;
@@ -612,7 +614,17 @@ parse:
tb_sw_warn(sw, "drom device_rom_revision %#x unknown\n",
header->device_rom_revision);
- return tb_drom_parse_entries(sw);
+ res = tb_drom_parse_entries(sw);
+ /* If the DROM parsing fails, wait a moment and retry once */
+ if (res == -EILSEQ && retries--) {
+ tb_sw_warn(sw, "parsing DROM failed, retrying\n");
+ msleep(100);
+ res = tb_drom_read_n(sw, 0, sw->drom, size);
+ if (!res)
+ goto parse;
+ }
+
+ return res;
err:
kfree(sw->drom);
sw->drom = NULL;
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 3845db569e4c..a921de9ce7cb 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -684,6 +684,7 @@ static int tb_init_port(struct tb_port *port)
if (res == -ENODEV) {
tb_dbg(port->sw->tb, " Port %d: not implemented\n",
port->port);
+ port->disabled = true;
return 0;
}
return res;
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index a413d55b5f8b..3c620a9203c5 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -186,7 +186,7 @@ struct tb_switch {
* @cap_adap: Offset of the adapter specific capability (%0 if not present)
* @cap_usb4: Offset to the USB4 port capability (%0 if not present)
* @port: Port number on switch
- * @disabled: Disabled by eeprom
+ * @disabled: Disabled by eeprom or enabled but not implemented
* @bonded: true if the port is bonded (two lanes combined as one)
* @dual_link_port: If the switch is connected using two ports, points
* to the other port.
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 1a7e849840b2..829b6ccdd5d4 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -951,10 +951,18 @@ static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
int ret, max_rate, allocate_up, allocate_down;
ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
- if (ret <= 0) {
- tb_tunnel_warn(tunnel, "tunnel is not up\n");
+ if (ret < 0) {
+ tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
return;
+ } else if (!ret) {
+ /* Use maximum link rate if the link valid is not set */
+ ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
+ if (ret < 0) {
+ tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
+ return;
+ }
}
+
/*
* 90% of the max rate can be allocated for isochronous
* transfers.
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 3eb2d485eaeb..55bb7b897d97 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -5566,6 +5566,17 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_wch384_4 },
+ /*
+ * Realtek RealManage
+ */
+ { PCI_VENDOR_ID_REALTEK, 0x816a,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0, pbn_b0_1_115200 },
+
+ { PCI_VENDOR_ID_REALTEK, 0x816b,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0, pbn_b0_1_115200 },
+
/* Fintek PCI serial cards */
{ PCI_DEVICE(0x1c29, 0x1104), .driver_data = pbn_fintek_4 },
{ PCI_DEVICE(0x1c29, 0x1108), .driver_data = pbn_fintek_8 },
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index f797c971cd82..124524ecfe26 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1916,24 +1916,12 @@ static inline bool uart_console_enabled(struct uart_port *port)
return uart_console(port) && (port->cons->flags & CON_ENABLED);
}
-static void __uart_port_spin_lock_init(struct uart_port *port)
+static void uart_port_spin_lock_init(struct uart_port *port)
{
spin_lock_init(&port->lock);
lockdep_set_class(&port->lock, &port_lock_key);
}
-/*
- * Ensure that the serial console lock is initialised early.
- * If this port is a console, then the spinlock is already initialised.
- */
-static inline void uart_port_spin_lock_init(struct uart_port *port)
-{
- if (uart_console(port))
- return;
-
- __uart_port_spin_lock_init(port);
-}
-
#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
/**
* uart_console_write - write a console message to a serial port
@@ -2086,7 +2074,15 @@ uart_set_options(struct uart_port *port, struct console *co,
struct ktermios termios;
static struct ktermios dummy;
- uart_port_spin_lock_init(port);
+ /*
+ * Ensure that the serial-console lock is initialised early.
+ *
+ * Note that the console-enabled check is needed because of kgdboc,
+ * which can end up calling uart_set_options() for an already enabled
+ * console via tty_find_polling_driver() and uart_poll_init().
+ */
+ if (!uart_console_enabled(port) && !port->console_reinit)
+ uart_port_spin_lock_init(port);
memset(&termios, 0, sizeof(struct ktermios));
@@ -2379,13 +2375,6 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
uart_change_pm(state, UART_PM_STATE_ON);
/*
- * If this driver supports console, and it hasn't been
- * successfully registered yet, initialise spin lock for it.
- */
- if (port->cons && !(port->cons->flags & CON_ENABLED))
- __uart_port_spin_lock_init(port);
-
- /*
* Ensure that the modem control lines are de-activated.
* keep the DTR setting that is set in uart_set_options()
* We probably don't need a spinlock around this, but
@@ -2801,10 +2790,12 @@ static ssize_t console_store(struct device *dev,
if (oldconsole && !newconsole) {
ret = unregister_console(uport->cons);
} else if (!oldconsole && newconsole) {
- if (uart_console(uport))
+ if (uart_console(uport)) {
+ uport->console_reinit = 1;
register_console(uport->cons);
- else
+ } else {
ret = -ENOENT;
+ }
}
} else {
ret = -ENXIO;
@@ -2900,7 +2891,12 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
goto out;
}
- uart_port_spin_lock_init(uport);
+ /*
+ * If this port is in use as a console then the spinlock is already
+ * initialised.
+ */
+ if (!uart_console_enabled(uport))
+ uart_port_spin_lock_init(uport);
if (uport->cons && uport->dev)
of_console_check(uport->dev->of_node, uport->cons->name, uport->line);
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 084c48c5848f..67cbd42421be 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -827,6 +827,11 @@ static ssize_t usblp_read(struct file *file, char __user *buffer, size_t len, lo
if (rv < 0)
return rv;
+ if (!usblp->present) {
+ count = -ENODEV;
+ goto done;
+ }
+
if ((avail = usblp->rstatus) < 0) {
printk(KERN_ERR "usblp%d: error %d reading from printer\n",
usblp->minor, (int)avail);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 6197938dcc2d..ae1de9cc4b09 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1205,6 +1205,34 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
}
}
+/*
+ * usb_disable_device_endpoints -- Disable all endpoints for a device
+ * @dev: the device whose endpoints are being disabled
+ * @skip_ep0: 0 to disable endpoint 0, 1 to skip it.
+ */
+static void usb_disable_device_endpoints(struct usb_device *dev, int skip_ep0)
+{
+ struct usb_hcd *hcd = bus_to_hcd(dev->bus);
+ int i;
+
+ if (hcd->driver->check_bandwidth) {
+ /* First pass: Cancel URBs, leave endpoint pointers intact. */
+ for (i = skip_ep0; i < 16; ++i) {
+ usb_disable_endpoint(dev, i, false);
+ usb_disable_endpoint(dev, i + USB_DIR_IN, false);
+ }
+ /* Remove endpoints from the host controller internal state */
+ mutex_lock(hcd->bandwidth_mutex);
+ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
+ mutex_unlock(hcd->bandwidth_mutex);
+ }
+ /* Second pass: remove endpoint pointers */
+ for (i = skip_ep0; i < 16; ++i) {
+ usb_disable_endpoint(dev, i, true);
+ usb_disable_endpoint(dev, i + USB_DIR_IN, true);
+ }
+}
+
/**
* usb_disable_device - Disable all the endpoints for a USB device
* @dev: the device whose endpoints are being disabled
@@ -1218,7 +1246,6 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
void usb_disable_device(struct usb_device *dev, int skip_ep0)
{
int i;
- struct usb_hcd *hcd = bus_to_hcd(dev->bus);
/* getting rid of interfaces will disconnect
* any drivers bound to them (a key side effect)
@@ -1264,22 +1291,8 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
skip_ep0 ? "non-ep0" : "all");
- if (hcd->driver->check_bandwidth) {
- /* First pass: Cancel URBs, leave endpoint pointers intact. */
- for (i = skip_ep0; i < 16; ++i) {
- usb_disable_endpoint(dev, i, false);
- usb_disable_endpoint(dev, i + USB_DIR_IN, false);
- }
- /* Remove endpoints from the host controller internal state */
- mutex_lock(hcd->bandwidth_mutex);
- usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
- mutex_unlock(hcd->bandwidth_mutex);
- /* Second pass: remove endpoint pointers */
- }
- for (i = skip_ep0; i < 16; ++i) {
- usb_disable_endpoint(dev, i, true);
- usb_disable_endpoint(dev, i + USB_DIR_IN, true);
- }
+
+ usb_disable_device_endpoints(dev, skip_ep0);
}
/**
@@ -1522,6 +1535,9 @@ EXPORT_SYMBOL_GPL(usb_set_interface);
* The caller must own the device lock.
*
* Return: Zero on success, else a negative error code.
+ *
+ * If this routine fails the device will probably be in an unusable state
+ * with endpoints disabled, and interfaces only partially enabled.
*/
int usb_reset_configuration(struct usb_device *dev)
{
@@ -1537,10 +1553,7 @@ int usb_reset_configuration(struct usb_device *dev)
* calls during probe() are fine
*/
- for (i = 1; i < 16; ++i) {
- usb_disable_endpoint(dev, i, true);
- usb_disable_endpoint(dev, i + USB_DIR_IN, true);
- }
+ usb_disable_device_endpoints(dev, 1); /* skip ep0*/
config = dev->actconfig;
retval = 0;
@@ -1553,34 +1566,10 @@ int usb_reset_configuration(struct usb_device *dev)
mutex_unlock(hcd->bandwidth_mutex);
return -ENOMEM;
}
- /* Make sure we have enough bandwidth for each alternate setting 0 */
- for (i = 0; i < config->desc.bNumInterfaces; i++) {
- struct usb_interface *intf = config->interface[i];
- struct usb_host_interface *alt;
- alt = usb_altnum_to_altsetting(intf, 0);
- if (!alt)
- alt = &intf->altsetting[0];
- if (alt != intf->cur_altsetting)
- retval = usb_hcd_alloc_bandwidth(dev, NULL,
- intf->cur_altsetting, alt);
- if (retval < 0)
- break;
- }
- /* If not, reinstate the old alternate settings */
+ /* xHCI adds all endpoints in usb_hcd_alloc_bandwidth */
+ retval = usb_hcd_alloc_bandwidth(dev, config, NULL, NULL);
if (retval < 0) {
-reset_old_alts:
- for (i--; i >= 0; i--) {
- struct usb_interface *intf = config->interface[i];
- struct usb_host_interface *alt;
-
- alt = usb_altnum_to_altsetting(intf, 0);
- if (!alt)
- alt = &intf->altsetting[0];
- if (alt != intf->cur_altsetting)
- usb_hcd_alloc_bandwidth(dev, NULL,
- alt, intf->cur_altsetting);
- }
usb_enable_lpm(dev);
mutex_unlock(hcd->bandwidth_mutex);
return retval;
@@ -1589,8 +1578,12 @@ reset_old_alts:
USB_REQ_SET_CONFIGURATION, 0,
config->desc.bConfigurationValue, 0,
NULL, 0, USB_CTRL_SET_TIMEOUT);
- if (retval < 0)
- goto reset_old_alts;
+ if (retval < 0) {
+ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
+ usb_enable_lpm(dev);
+ mutex_unlock(hcd->bandwidth_mutex);
+ return retval;
+ }
mutex_unlock(hcd->bandwidth_mutex);
/* re-init hc/hcd interface/endpoint state */
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f232914de5fd..10574fa3f927 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -397,6 +397,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Generic RTL8153 based ethernet adapters */
{ USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM },
+ /* SONiX USB DEVICE Touchpad */
+ { USB_DEVICE(0x0c45, 0x7056), .driver_info =
+ USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+
/* Action Semiconductor flash disk */
{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index a2ca38e25e0c..8d134193fa0c 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -889,7 +889,11 @@ read_descriptors(struct file *filp, struct kobject *kobj,
size_t srclen, n;
int cfgno;
void *src;
+ int retval;
+ retval = usb_lock_device_interruptible(udev);
+ if (retval < 0)
+ return -EINTR;
/* The binary attribute begins with the device descriptor.
* Following that are the raw descriptor entries for all the
* configurations (config plus subsidiary descriptors).
@@ -914,6 +918,7 @@ read_descriptors(struct file *filp, struct kobject *kobj,
off -= srclen;
}
}
+ usb_unlock_device(udev);
return count - nleft;
}
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
index 88b75b5a039c..1f7f4d88ed9d 100644
--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -737,13 +737,13 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
goto err_disable_clks;
}
- ret = reset_control_deassert(priv->reset);
+ ret = reset_control_reset(priv->reset);
if (ret)
- goto err_assert_reset;
+ goto err_disable_clks;
ret = dwc3_meson_g12a_get_phys(priv);
if (ret)
- goto err_assert_reset;
+ goto err_disable_clks;
ret = priv->drvdata->setup_regmaps(priv, base);
if (ret)
@@ -752,7 +752,7 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
if (priv->vbus) {
ret = regulator_enable(priv->vbus);
if (ret)
- goto err_assert_reset;
+ goto err_disable_clks;
}
/* Get dr_mode */
@@ -765,13 +765,13 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
ret = priv->drvdata->usb_init(priv);
if (ret)
- goto err_assert_reset;
+ goto err_disable_clks;
/* Init PHYs */
for (i = 0 ; i < PHY_COUNT ; ++i) {
ret = phy_init(priv->phys[i]);
if (ret)
- goto err_assert_reset;
+ goto err_disable_clks;
}
/* Set PHY Power */
@@ -809,9 +809,6 @@ err_phys_exit:
for (i = 0 ; i < PHY_COUNT ; ++i)
phy_exit(priv->phys[i]);
-err_assert_reset:
- reset_control_assert(priv->reset);
-
err_disable_clks:
clk_bulk_disable_unprepare(priv->drvdata->num_clks,
priv->drvdata->clks);
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 6257be4110ca..3575b7201881 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/usb/otg.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index ce0eaf7d7c12..087402aec5cb 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -14,7 +14,6 @@
*/
/*-------------------------------------------------------------------------*/
-#include <linux/usb/otg.h>
#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 871cdccf3a5f..9823bb424abd 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -713,6 +713,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
{ USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
{ USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) },
+ { USB_DEVICE(XSENS_VID, XSENS_MTIUSBCONVERTER_PID) },
{ USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
{ USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index e8373528264c..b5ca17a5967a 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -160,6 +160,7 @@
#define XSENS_AWINDA_DONGLE_PID 0x0102
#define XSENS_MTW_PID 0x0200 /* Xsens MTw */
#define XSENS_MTDEVBOARD_PID 0x0300 /* Motion Tracker Development Board */
+#define XSENS_MTIUSBCONVERTER_PID 0x0301 /* MTi USB converter */
#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */
/* Xsens devices using FTDI VID */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 89b3192af326..0c6f160a214a 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1094,14 +1094,18 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
.driver_info = RSVD(1) | RSVD(3) },
/* Quectel products using Quectel vendor ID */
- { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
- .driver_info = RSVD(4) },
- { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
- .driver_info = RSVD(4) },
- { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95),
- .driver_info = RSVD(4) },
- { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
- .driver_info = RSVD(4) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff),
+ .driver_info = NUMEP2 },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0xff, 0xff),
+ .driver_info = NUMEP2 },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
+ .driver_info = NUMEP2 },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0xff, 0xff),
+ .driver_info = NUMEP2 },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
@@ -1819,6 +1823,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */
{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */
.driver_info = RSVD(7) },
+ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9205, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT+ECM mode */
+ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9206, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT-only mode */
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
.driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 08f9296431e9..8183504e3abb 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -662,8 +662,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
if (devinfo->resetting) {
cmnd->result = DID_ERROR << 16;
cmnd->scsi_done(cmnd);
- spin_unlock_irqrestore(&devinfo->lock, flags);
- return 0;
+ goto zombie;
}
/* Find a free uas-tag */
@@ -699,6 +698,16 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
cmdinfo->state &= ~(SUBMIT_DATA_IN_URB | SUBMIT_DATA_OUT_URB);
err = uas_submit_urbs(cmnd, devinfo);
+ /*
+ * in case of fatal errors the SCSI layer is peculiar
+ * a command that has finished is a success for the purpose
+ * of queueing, no matter how fatal the error
+ */
+ if (err == -ENODEV) {
+ cmnd->result = DID_ERROR << 16;
+ cmnd->scsi_done(cmnd);
+ goto zombie;
+ }
if (err) {
/* If we did nothing, give up now */
if (cmdinfo->state & SUBMIT_STATUS_URB) {
@@ -709,6 +718,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
}
devinfo->cmnd[idx] = cmnd;
+zombie:
spin_unlock_irqrestore(&devinfo->lock, flags);
return 0;
}
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index e4021e13af40..676b525c2a66 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -61,14 +61,11 @@ enum {
#define PMC_USB_ALTMODE_ORI_SHIFT 1
#define PMC_USB_ALTMODE_UFP_SHIFT 3
-#define PMC_USB_ALTMODE_ORI_AUX_SHIFT 4
-#define PMC_USB_ALTMODE_ORI_HSL_SHIFT 5
/* DP specific Mode Data bits */
#define PMC_USB_ALTMODE_DP_MODE_SHIFT 8
/* TBT specific Mode Data bits */
-#define PMC_USB_ALTMODE_HPD_HIGH BIT(14)
#define PMC_USB_ALTMODE_TBT_TYPE BIT(17)
#define PMC_USB_ALTMODE_CABLE_TYPE BIT(18)
#define PMC_USB_ALTMODE_ACTIVE_LINK BIT(20)
@@ -128,13 +125,19 @@ static int hsl_orientation(struct pmc_usb_port *port)
static int pmc_usb_command(struct pmc_usb_port *port, u8 *msg, u32 len)
{
u8 response[4];
+ int ret;
/*
* Error bit will always be 0 with the USBC command.
- * Status can be checked from the response message.
+ * Status can be checked from the response message if the
+ * function intel_scu_ipc_dev_command succeeds.
*/
- intel_scu_ipc_dev_command(port->pmc->ipc, PMC_USBC_CMD, 0, msg, len,
- response, sizeof(response));
+ ret = intel_scu_ipc_dev_command(port->pmc->ipc, PMC_USBC_CMD, 0, msg,
+ len, response, sizeof(response));
+
+ if (ret)
+ return ret;
+
if (response[2] & PMC_USB_RESP_STATUS_FAILURE) {
if (response[2] & PMC_USB_RESP_STATUS_FATAL)
return -EIO;
@@ -179,15 +182,9 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state)
req.mode_data = (port->orientation - 1) << PMC_USB_ALTMODE_ORI_SHIFT;
req.mode_data |= (port->role - 1) << PMC_USB_ALTMODE_UFP_SHIFT;
- req.mode_data |= sbu_orientation(port) << PMC_USB_ALTMODE_ORI_AUX_SHIFT;
- req.mode_data |= hsl_orientation(port) << PMC_USB_ALTMODE_ORI_HSL_SHIFT;
-
req.mode_data |= (state->mode - TYPEC_STATE_MODAL) <<
PMC_USB_ALTMODE_DP_MODE_SHIFT;
- if (data->status & DP_STATUS_HPD_STATE)
- req.mode_data |= PMC_USB_ALTMODE_HPD_HIGH;
-
ret = pmc_usb_command(port, (void *)&req, sizeof(req));
if (ret)
return ret;
@@ -212,9 +209,6 @@ pmc_usb_mux_tbt(struct pmc_usb_port *port, struct typec_mux_state *state)
req.mode_data = (port->orientation - 1) << PMC_USB_ALTMODE_ORI_SHIFT;
req.mode_data |= (port->role - 1) << PMC_USB_ALTMODE_UFP_SHIFT;
- req.mode_data |= sbu_orientation(port) << PMC_USB_ALTMODE_ORI_AUX_SHIFT;
- req.mode_data |= hsl_orientation(port) << PMC_USB_ALTMODE_ORI_HSL_SHIFT;
-
if (TBT_ADAPTER(data->device_mode) == TBT_ADAPTER_TBT3)
req.mode_data |= PMC_USB_ALTMODE_TBT_TYPE;
@@ -497,6 +491,7 @@ err_remove_ports:
for (i = 0; i < pmc->num_ports; i++) {
typec_switch_unregister(pmc->port[i].typec_sw);
typec_mux_unregister(pmc->port[i].typec_mux);
+ usb_role_switch_unregister(pmc->port[i].usb_sw);
}
return ret;
@@ -510,6 +505,7 @@ static int pmc_usb_remove(struct platform_device *pdev)
for (i = 0; i < pmc->num_ports; i++) {
typec_switch_unregister(pmc->port[i].typec_sw);
typec_mux_unregister(pmc->port[i].typec_mux);
+ usb_role_switch_unregister(pmc->port[i].usb_sw);
}
return 0;
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index e680fcfdee60..758b988ac518 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -216,14 +216,18 @@ void ucsi_altmode_update_active(struct ucsi_connector *con)
con->partner_altmode[i] == altmode);
}
-static u8 ucsi_altmode_next_mode(struct typec_altmode **alt, u16 svid)
+static int ucsi_altmode_next_mode(struct typec_altmode **alt, u16 svid)
{
u8 mode = 1;
int i;
- for (i = 0; alt[i]; i++)
+ for (i = 0; alt[i]; i++) {
+ if (i > MODE_DISCOVERY_MAX)
+ return -ERANGE;
+
if (alt[i]->svid == svid)
mode++;
+ }
return mode;
}
@@ -258,8 +262,11 @@ static int ucsi_register_altmode(struct ucsi_connector *con,
goto err;
}
- desc->mode = ucsi_altmode_next_mode(con->port_altmode,
- desc->svid);
+ ret = ucsi_altmode_next_mode(con->port_altmode, desc->svid);
+ if (ret < 0)
+ return ret;
+
+ desc->mode = ret;
switch (desc->svid) {
case USB_TYPEC_DP_SID:
@@ -292,8 +299,11 @@ static int ucsi_register_altmode(struct ucsi_connector *con,
goto err;
}
- desc->mode = ucsi_altmode_next_mode(con->partner_altmode,
- desc->svid);
+ ret = ucsi_altmode_next_mode(con->partner_altmode, desc->svid);
+ if (ret < 0)
+ return ret;
+
+ desc->mode = ret;
alt = typec_partner_register_altmode(con->partner, desc);
if (IS_ERR(alt)) {
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 9fc4f338e870..fbfe8f5933af 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -78,7 +78,7 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
if (ret)
goto out_clear_bit;
- if (!wait_for_completion_timeout(&ua->complete, msecs_to_jiffies(5000)))
+ if (!wait_for_completion_timeout(&ua->complete, 60 * HZ))
ret = -ETIMEDOUT;
out_clear_bit:
@@ -112,11 +112,15 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
static int ucsi_acpi_probe(struct platform_device *pdev)
{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
struct ucsi_acpi *ua;
struct resource *res;
acpi_status status;
int ret;
+ if (adev->dep_unmet)
+ return -EPROBE_DEFER;
+
ua = devm_kzalloc(&pdev->dev, sizeof(*ua), GFP_KERNEL);
if (!ua)
return -ENOMEM;
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 5e850cc9f891..39deb22a4180 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -22,52 +22,6 @@ config VGA_CONSOLE
Say Y.
-config VGACON_SOFT_SCROLLBACK
- bool "Enable Scrollback Buffer in System RAM"
- depends on VGA_CONSOLE
- default n
- help
- The scrollback buffer of the standard VGA console is located in
- the VGA RAM. The size of this RAM is fixed and is quite small.
- If you require a larger scrollback buffer, this can be placed in
- System RAM which is dynamically allocated during initialization.
- Placing the scrollback buffer in System RAM will slightly slow
- down the console.
-
- If you want this feature, say 'Y' here and enter the amount of
- RAM to allocate for this buffer. If unsure, say 'N'.
-
-config VGACON_SOFT_SCROLLBACK_SIZE
- int "Scrollback Buffer Size (in KB)"
- depends on VGACON_SOFT_SCROLLBACK
- range 1 1024
- default "64"
- help
- Enter the amount of System RAM to allocate for scrollback
- buffers of VGA consoles. Each 64KB will give you approximately
- 16 80x25 screenfuls of scrollback buffer.
-
-config VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT
- bool "Persistent Scrollback History for each console by default"
- depends on VGACON_SOFT_SCROLLBACK
- default n
- help
- Say Y here if the scrollback history should persist by default when
- switching between consoles. Otherwise, the scrollback history will be
- flushed each time the console is switched. This feature can also be
- enabled using the boot command line parameter
- 'vgacon.scrollback_persistent=1'.
-
- This feature might break your tool of choice to flush the scrollback
- buffer, e.g. clear(1) will work fine but Debian's clear_console(1)
- will be broken, which might cause security issues.
- You can use the escape sequence \e[3J instead if this feature is
- activated.
-
- Note that a buffer of VGACON_SOFT_SCROLLBACK_SIZE is taken for each
- created tty device.
- So if you use a RAM-constrained system, say N here.
-
config MDA_CONSOLE
depends on !M68K && !PARISC && ISA
tristate "MDA text console (dual-headed)"
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index a52bb3740073..17876f0179b5 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -165,214 +165,6 @@ static inline void vga_set_mem_top(struct vc_data *c)
write_vga(12, (c->vc_visible_origin - vga_vram_base) / 2);
}
-#ifdef CONFIG_VGACON_SOFT_SCROLLBACK
-/* software scrollback */
-struct vgacon_scrollback_info {
- void *data;
- int tail;
- int size;
- int rows;
- int cnt;
- int cur;
- int save;
- int restore;
-};
-
-static struct vgacon_scrollback_info *vgacon_scrollback_cur;
-static struct vgacon_scrollback_info vgacon_scrollbacks[MAX_NR_CONSOLES];
-static bool scrollback_persistent = \
- IS_ENABLED(CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT);
-module_param_named(scrollback_persistent, scrollback_persistent, bool, 0000);
-MODULE_PARM_DESC(scrollback_persistent, "Enable persistent scrollback for all vga consoles");
-
-static void vgacon_scrollback_reset(int vc_num, size_t reset_size)
-{
- struct vgacon_scrollback_info *scrollback = &vgacon_scrollbacks[vc_num];
-
- if (scrollback->data && reset_size > 0)
- memset(scrollback->data, 0, reset_size);
-
- scrollback->cnt = 0;
- scrollback->tail = 0;
- scrollback->cur = 0;
-}
-
-static void vgacon_scrollback_init(int vc_num)
-{
- int pitch = vga_video_num_columns * 2;
- size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024;
- int rows = size / pitch;
- void *data;
-
- data = kmalloc_array(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024,
- GFP_NOWAIT);
-
- vgacon_scrollbacks[vc_num].data = data;
- vgacon_scrollback_cur = &vgacon_scrollbacks[vc_num];
-
- vgacon_scrollback_cur->rows = rows - 1;
- vgacon_scrollback_cur->size = rows * pitch;
-
- vgacon_scrollback_reset(vc_num, size);
-}
-
-static void vgacon_scrollback_switch(int vc_num)
-{
- if (!scrollback_persistent)
- vc_num = 0;
-
- if (!vgacon_scrollbacks[vc_num].data) {
- vgacon_scrollback_init(vc_num);
- } else {
- if (scrollback_persistent) {
- vgacon_scrollback_cur = &vgacon_scrollbacks[vc_num];
- } else {
- size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024;
-
- vgacon_scrollback_reset(vc_num, size);
- }
- }
-}
-
-static void vgacon_scrollback_startup(void)
-{
- vgacon_scrollback_cur = &vgacon_scrollbacks[0];
- vgacon_scrollback_init(0);
-}
-
-static void vgacon_scrollback_update(struct vc_data *c, int t, int count)
-{
- void *p;
-
- if (!vgacon_scrollback_cur->data || !vgacon_scrollback_cur->size ||
- c->vc_num != fg_console)
- return;
-
- p = (void *) (c->vc_origin + t * c->vc_size_row);
-
- while (count--) {
- if ((vgacon_scrollback_cur->tail + c->vc_size_row) >
- vgacon_scrollback_cur->size)
- vgacon_scrollback_cur->tail = 0;
-
- scr_memcpyw(vgacon_scrollback_cur->data +
- vgacon_scrollback_cur->tail,
- p, c->vc_size_row);
-
- vgacon_scrollback_cur->cnt++;
- p += c->vc_size_row;
- vgacon_scrollback_cur->tail += c->vc_size_row;
-
- if (vgacon_scrollback_cur->tail >= vgacon_scrollback_cur->size)
- vgacon_scrollback_cur->tail = 0;
-
- if (vgacon_scrollback_cur->cnt > vgacon_scrollback_cur->rows)
- vgacon_scrollback_cur->cnt = vgacon_scrollback_cur->rows;
-
- vgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt;
- }
-}
-
-static void vgacon_restore_screen(struct vc_data *c)
-{
- c->vc_origin = c->vc_visible_origin;
- vgacon_scrollback_cur->save = 0;
-
- if (!vga_is_gfx && !vgacon_scrollback_cur->restore) {
- scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf,
- c->vc_screenbuf_size > vga_vram_size ?
- vga_vram_size : c->vc_screenbuf_size);
- vgacon_scrollback_cur->restore = 1;
- vgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt;
- }
-}
-
-static void vgacon_scrolldelta(struct vc_data *c, int lines)
-{
- int start, end, count, soff;
-
- if (!lines) {
- vgacon_restore_screen(c);
- return;
- }
-
- if (!vgacon_scrollback_cur->data)
- return;
-
- if (!vgacon_scrollback_cur->save) {
- vgacon_cursor(c, CM_ERASE);
- vgacon_save_screen(c);
- c->vc_origin = (unsigned long)c->vc_screenbuf;
- vgacon_scrollback_cur->save = 1;
- }
-
- vgacon_scrollback_cur->restore = 0;
- start = vgacon_scrollback_cur->cur + lines;
- end = start + abs(lines);
-
- if (start < 0)
- start = 0;
-
- if (start > vgacon_scrollback_cur->cnt)
- start = vgacon_scrollback_cur->cnt;
-
- if (end < 0)
- end = 0;
-
- if (end > vgacon_scrollback_cur->cnt)
- end = vgacon_scrollback_cur->cnt;
-
- vgacon_scrollback_cur->cur = start;
- count = end - start;
- soff = vgacon_scrollback_cur->tail -
- ((vgacon_scrollback_cur->cnt - end) * c->vc_size_row);
- soff -= count * c->vc_size_row;
-
- if (soff < 0)
- soff += vgacon_scrollback_cur->size;
-
- count = vgacon_scrollback_cur->cnt - start;
-
- if (count > c->vc_rows)
- count = c->vc_rows;
-
- if (count) {
- int copysize;
-
- int diff = c->vc_rows - count;
- void *d = (void *) c->vc_visible_origin;
- void *s = (void *) c->vc_screenbuf;
-
- count *= c->vc_size_row;
- /* how much memory to end of buffer left? */
- copysize = min(count, vgacon_scrollback_cur->size - soff);
- scr_memcpyw(d, vgacon_scrollback_cur->data + soff, copysize);
- d += copysize;
- count -= copysize;
-
- if (count) {
- scr_memcpyw(d, vgacon_scrollback_cur->data, count);
- d += count;
- }
-
- if (diff)
- scr_memcpyw(d, s, diff * c->vc_size_row);
- } else
- vgacon_cursor(c, CM_MOVE);
-}
-
-static void vgacon_flush_scrollback(struct vc_data *c)
-{
- size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024;
-
- vgacon_scrollback_reset(c->vc_num, size);
-}
-#else
-#define vgacon_scrollback_startup(...) do { } while (0)
-#define vgacon_scrollback_init(...) do { } while (0)
-#define vgacon_scrollback_update(...) do { } while (0)
-#define vgacon_scrollback_switch(...) do { } while (0)
-
static void vgacon_restore_screen(struct vc_data *c)
{
if (c->vc_origin != c->vc_visible_origin)
@@ -386,11 +178,6 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
vga_set_mem_top(c);
}
-static void vgacon_flush_scrollback(struct vc_data *c)
-{
-}
-#endif /* CONFIG_VGACON_SOFT_SCROLLBACK */
-
static const char *vgacon_startup(void)
{
const char *display_desc = NULL;
@@ -573,10 +360,7 @@ static const char *vgacon_startup(void)
vgacon_xres = screen_info.orig_video_cols * VGA_FONTWIDTH;
vgacon_yres = vga_scan_lines;
- if (!vga_init_done) {
- vgacon_scrollback_startup();
- vga_init_done = true;
- }
+ vga_init_done = true;
return display_desc;
}
@@ -869,7 +653,6 @@ static int vgacon_switch(struct vc_data *c)
vgacon_doresize(c, c->vc_cols, c->vc_rows);
}
- vgacon_scrollback_switch(c->vc_num);
return 0; /* Redrawing not needed */
}
@@ -1386,7 +1169,6 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
oldo = c->vc_origin;
delta = lines * c->vc_size_row;
if (dir == SM_UP) {
- vgacon_scrollback_update(c, t, lines);
if (c->vc_scr_end + delta >= vga_vram_end) {
scr_memcpyw((u16 *) vga_vram_base,
(u16 *) (oldo + delta),
@@ -1450,7 +1232,6 @@ const struct consw vga_con = {
.con_save_screen = vgacon_save_screen,
.con_build_attr = vgacon_build_attr,
.con_invert_region = vgacon_invert_region,
- .con_flush_scrollback = vgacon_flush_scrollback,
};
EXPORT_SYMBOL(vga_con);
diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c
index 4e6cbc24346d..9725ecd1255b 100644
--- a/drivers/video/fbdev/core/bitblit.c
+++ b/drivers/video/fbdev/core/bitblit.c
@@ -234,7 +234,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
}
static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
- int softback_lines, int fg, int bg)
+ int fg, int bg)
{
struct fb_cursor cursor;
struct fbcon_ops *ops = info->fbcon_par;
@@ -247,15 +247,6 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
cursor.set = 0;
- if (softback_lines) {
- if (y + softback_lines >= vc->vc_rows) {
- mode = CM_ERASE;
- ops->cursor_flash = 0;
- return;
- } else
- y += softback_lines;
- }
-
c = scr_readw((u16 *) vc->vc_pos);
attribute = get_attribute(info, c);
src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height));
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 66167830fefd..41f3fa3db6d4 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -122,12 +122,6 @@ static int logo_lines;
/* logo_shown is an index to vc_cons when >= 0; otherwise follows FBCON_LOGO
enums. */
static int logo_shown = FBCON_LOGO_CANSHOW;
-/* Software scrollback */
-static int fbcon_softback_size = 32768;
-static unsigned long softback_buf, softback_curr;
-static unsigned long softback_in;
-static unsigned long softback_top, softback_end;
-static int softback_lines;
/* console mappings */
static int first_fb_vc;
static int last_fb_vc = MAX_NR_CONSOLES - 1;
@@ -167,8 +161,6 @@ static int margin_color;
static const struct consw fb_con;
-#define CM_SOFTBACK (8)
-
#define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row)
static int fbcon_set_origin(struct vc_data *);
@@ -373,18 +365,6 @@ static int get_color(struct vc_data *vc, struct fb_info *info,
return color;
}
-static void fbcon_update_softback(struct vc_data *vc)
-{
- int l = fbcon_softback_size / vc->vc_size_row;
-
- if (l > 5)
- softback_end = softback_buf + l * vc->vc_size_row;
- else
- /* Smaller scrollback makes no sense, and 0 would screw
- the operation totally */
- softback_top = 0;
-}
-
static void fb_flashcursor(struct work_struct *work)
{
struct fb_info *info = container_of(work, struct fb_info, queue);
@@ -414,7 +394,7 @@ static void fb_flashcursor(struct work_struct *work)
c = scr_readw((u16 *) vc->vc_pos);
mode = (!ops->cursor_flash || ops->cursor_state.enable) ?
CM_ERASE : CM_DRAW;
- ops->cursor(vc, info, mode, softback_lines, get_color(vc, info, c, 1),
+ ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
get_color(vc, info, c, 0));
console_unlock();
}
@@ -471,13 +451,7 @@ static int __init fb_console_setup(char *this_opt)
}
if (!strncmp(options, "scrollback:", 11)) {
- options += 11;
- if (*options) {
- fbcon_softback_size = simple_strtoul(options, &options, 0);
- if (*options == 'k' || *options == 'K') {
- fbcon_softback_size *= 1024;
- }
- }
+ pr_warn("Ignoring scrollback size option\n");
continue;
}
@@ -1022,31 +996,6 @@ static const char *fbcon_startup(void)
set_blitting_type(vc, info);
- if (info->fix.type != FB_TYPE_TEXT) {
- if (fbcon_softback_size) {
- if (!softback_buf) {
- softback_buf =
- (unsigned long)
- kvmalloc(fbcon_softback_size,
- GFP_KERNEL);
- if (!softback_buf) {
- fbcon_softback_size = 0;
- softback_top = 0;
- }
- }
- } else {
- if (softback_buf) {
- kvfree((void *) softback_buf);
- softback_buf = 0;
- softback_top = 0;
- }
- }
- if (softback_buf)
- softback_in = softback_top = softback_curr =
- softback_buf;
- softback_lines = 0;
- }
-
/* Setup default font */
if (!p->fontdata && !vc->vc_font.data) {
if (!fontname[0] || !(font = find_font(fontname)))
@@ -1220,9 +1169,6 @@ static void fbcon_init(struct vc_data *vc, int init)
if (logo)
fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows);
- if (vc == svc && softback_buf)
- fbcon_update_softback(vc);
-
if (ops->rotate_font && ops->rotate_font(info, vc)) {
ops->rotate = FB_ROTATE_UR;
set_blitting_type(vc, info);
@@ -1385,7 +1331,6 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct fbcon_ops *ops = info->fbcon_par;
- int y;
int c = scr_readw((u16 *) vc->vc_pos);
ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
@@ -1399,16 +1344,8 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
fbcon_add_cursor_timer(info);
ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
- if (mode & CM_SOFTBACK) {
- mode &= ~CM_SOFTBACK;
- y = softback_lines;
- } else {
- if (softback_lines)
- fbcon_set_origin(vc);
- y = 0;
- }
- ops->cursor(vc, info, mode, y, get_color(vc, info, c, 1),
+ ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
get_color(vc, info, c, 0));
}
@@ -1479,8 +1416,6 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
if (con_is_visible(vc)) {
update_screen(vc);
- if (softback_buf)
- fbcon_update_softback(vc);
}
}
@@ -1618,99 +1553,6 @@ static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count)
scrollback_current = 0;
}
-static void fbcon_redraw_softback(struct vc_data *vc, struct fbcon_display *p,
- long delta)
-{
- int count = vc->vc_rows;
- unsigned short *d, *s;
- unsigned long n;
- int line = 0;
-
- d = (u16 *) softback_curr;
- if (d == (u16 *) softback_in)
- d = (u16 *) vc->vc_origin;
- n = softback_curr + delta * vc->vc_size_row;
- softback_lines -= delta;
- if (delta < 0) {
- if (softback_curr < softback_top && n < softback_buf) {
- n += softback_end - softback_buf;
- if (n < softback_top) {
- softback_lines -=
- (softback_top - n) / vc->vc_size_row;
- n = softback_top;
- }
- } else if (softback_curr >= softback_top
- && n < softback_top) {
- softback_lines -=
- (softback_top - n) / vc->vc_size_row;
- n = softback_top;
- }
- } else {
- if (softback_curr > softback_in && n >= softback_end) {
- n += softback_buf - softback_end;
- if (n > softback_in) {
- n = softback_in;
- softback_lines = 0;
- }
- } else if (softback_curr <= softback_in && n > softback_in) {
- n = softback_in;
- softback_lines = 0;
- }
- }
- if (n == softback_curr)
- return;
- softback_curr = n;
- s = (u16 *) softback_curr;
- if (s == (u16 *) softback_in)
- s = (u16 *) vc->vc_origin;
- while (count--) {
- unsigned short *start;
- unsigned short *le;
- unsigned short c;
- int x = 0;
- unsigned short attr = 1;
-
- start = s;
- le = advance_row(s, 1);
- do {
- c = scr_readw(s);
- if (attr != (c & 0xff00)) {
- attr = c & 0xff00;
- if (s > start) {
- fbcon_putcs(vc, start, s - start,
- line, x);
- x += s - start;
- start = s;
- }
- }
- if (c == scr_readw(d)) {
- if (s > start) {
- fbcon_putcs(vc, start, s - start,
- line, x);
- x += s - start + 1;
- start = s + 1;
- } else {
- x++;
- start++;
- }
- }
- s++;
- d++;
- } while (s < le);
- if (s > start)
- fbcon_putcs(vc, start, s - start, line, x);
- line++;
- if (d == (u16 *) softback_end)
- d = (u16 *) softback_buf;
- if (d == (u16 *) softback_in)
- d = (u16 *) vc->vc_origin;
- if (s == (u16 *) softback_end)
- s = (u16 *) softback_buf;
- if (s == (u16 *) softback_in)
- s = (u16 *) vc->vc_origin;
- }
-}
-
static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p,
int line, int count, int dy)
{
@@ -1850,31 +1692,6 @@ static void fbcon_redraw(struct vc_data *vc, struct fbcon_display *p,
}
}
-static inline void fbcon_softback_note(struct vc_data *vc, int t,
- int count)
-{
- unsigned short *p;
-
- if (vc->vc_num != fg_console)
- return;
- p = (unsigned short *) (vc->vc_origin + t * vc->vc_size_row);
-
- while (count) {
- scr_memcpyw((u16 *) softback_in, p, vc->vc_size_row);
- count--;
- p = advance_row(p, 1);
- softback_in += vc->vc_size_row;
- if (softback_in == softback_end)
- softback_in = softback_buf;
- if (softback_in == softback_top) {
- softback_top += vc->vc_size_row;
- if (softback_top == softback_end)
- softback_top = softback_buf;
- }
- }
- softback_curr = softback_in;
-}
-
static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
enum con_scroll dir, unsigned int count)
{
@@ -1897,8 +1714,6 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
case SM_UP:
if (count > vc->vc_rows) /* Maximum realistic size */
count = vc->vc_rows;
- if (softback_top)
- fbcon_softback_note(vc, t, count);
if (logo_shown >= 0)
goto redraw_up;
switch (p->scrollmode) {
@@ -2203,7 +2018,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
struct fb_var_screeninfo var = info->var;
int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh;
- if (ops->p && ops->p->userfont && FNTSIZE(vc->vc_font.data)) {
+ if (p->userfont && FNTSIZE(vc->vc_font.data)) {
int size;
int pitch = PITCH(vc->vc_font.width);
@@ -2269,14 +2084,6 @@ static int fbcon_switch(struct vc_data *vc)
info = registered_fb[con2fb_map[vc->vc_num]];
ops = info->fbcon_par;
- if (softback_top) {
- if (softback_lines)
- fbcon_set_origin(vc);
- softback_top = softback_curr = softback_in = softback_buf;
- softback_lines = 0;
- fbcon_update_softback(vc);
- }
-
if (logo_shown >= 0) {
struct vc_data *conp2 = vc_cons[logo_shown].d;
@@ -2600,9 +2407,6 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
int cnt;
char *old_data = NULL;
- if (con_is_visible(vc) && softback_lines)
- fbcon_set_origin(vc);
-
resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
if (p->userfont)
old_data = vc->vc_font.data;
@@ -2628,8 +2432,6 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
cols /= w;
rows /= h;
vc_resize(vc, cols, rows);
- if (con_is_visible(vc) && softback_buf)
- fbcon_update_softback(vc);
} else if (con_is_visible(vc)
&& vc->vc_mode == KD_TEXT) {
fbcon_clear_margins(vc, 0);
@@ -2788,19 +2590,7 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
{
- unsigned long p;
- int line;
-
- if (vc->vc_num != fg_console || !softback_lines)
- return (u16 *) (vc->vc_origin + offset);
- line = offset / vc->vc_size_row;
- if (line >= softback_lines)
- return (u16 *) (vc->vc_origin + offset -
- softback_lines * vc->vc_size_row);
- p = softback_curr + offset;
- if (p >= softback_end)
- p += softback_buf - softback_end;
- return (u16 *) p;
+ return (u16 *) (vc->vc_origin + offset);
}
static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos,
@@ -2814,22 +2604,7 @@ static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos,
x = offset % vc->vc_cols;
y = offset / vc->vc_cols;
- if (vc->vc_num == fg_console)
- y += softback_lines;
ret = pos + (vc->vc_cols - x) * 2;
- } else if (vc->vc_num == fg_console && softback_lines) {
- unsigned long offset = pos - softback_curr;
-
- if (pos < softback_curr)
- offset += softback_end - softback_buf;
- offset /= 2;
- x = offset % vc->vc_cols;
- y = offset / vc->vc_cols;
- ret = pos + (vc->vc_cols - x) * 2;
- if (ret == softback_end)
- ret = softback_buf;
- if (ret == softback_in)
- ret = vc->vc_origin;
} else {
/* Should not happen */
x = y = 0;
@@ -2857,106 +2632,11 @@ static void fbcon_invert_region(struct vc_data *vc, u16 * p, int cnt)
a = ((a) & 0x88ff) | (((a) & 0x7000) >> 4) |
(((a) & 0x0700) << 4);
scr_writew(a, p++);
- if (p == (u16 *) softback_end)
- p = (u16 *) softback_buf;
- if (p == (u16 *) softback_in)
- p = (u16 *) vc->vc_origin;
- }
-}
-
-static void fbcon_scrolldelta(struct vc_data *vc, int lines)
-{
- struct fb_info *info = registered_fb[con2fb_map[fg_console]];
- struct fbcon_ops *ops = info->fbcon_par;
- struct fbcon_display *disp = &fb_display[fg_console];
- int offset, limit, scrollback_old;
-
- if (softback_top) {
- if (vc->vc_num != fg_console)
- return;
- if (vc->vc_mode != KD_TEXT || !lines)
- return;
- if (logo_shown >= 0) {
- struct vc_data *conp2 = vc_cons[logo_shown].d;
-
- if (conp2->vc_top == logo_lines
- && conp2->vc_bottom == conp2->vc_rows)
- conp2->vc_top = 0;
- if (logo_shown == vc->vc_num) {
- unsigned long p, q;
- int i;
-
- p = softback_in;
- q = vc->vc_origin +
- logo_lines * vc->vc_size_row;
- for (i = 0; i < logo_lines; i++) {
- if (p == softback_top)
- break;
- if (p == softback_buf)
- p = softback_end;
- p -= vc->vc_size_row;
- q -= vc->vc_size_row;
- scr_memcpyw((u16 *) q, (u16 *) p,
- vc->vc_size_row);
- }
- softback_in = softback_curr = p;
- update_region(vc, vc->vc_origin,
- logo_lines * vc->vc_cols);
- }
- logo_shown = FBCON_LOGO_CANSHOW;
- }
- fbcon_cursor(vc, CM_ERASE | CM_SOFTBACK);
- fbcon_redraw_softback(vc, disp, lines);
- fbcon_cursor(vc, CM_DRAW | CM_SOFTBACK);
- return;
}
-
- if (!scrollback_phys_max)
- return;
-
- scrollback_old = scrollback_current;
- scrollback_current -= lines;
- if (scrollback_current < 0)
- scrollback_current = 0;
- else if (scrollback_current > scrollback_max)
- scrollback_current = scrollback_max;
- if (scrollback_current == scrollback_old)
- return;
-
- if (fbcon_is_inactive(vc, info))
- return;
-
- fbcon_cursor(vc, CM_ERASE);
-
- offset = disp->yscroll - scrollback_current;
- limit = disp->vrows;
- switch (disp->scrollmode) {
- case SCROLL_WRAP_MOVE:
- info->var.vmode |= FB_VMODE_YWRAP;
- break;
- case SCROLL_PAN_MOVE:
- case SCROLL_PAN_REDRAW:
- limit -= vc->vc_rows;
- info->var.vmode &= ~FB_VMODE_YWRAP;
- break;
- }
- if (offset < 0)
- offset += limit;
- else if (offset >= limit)
- offset -= limit;
-
- ops->var.xoffset = 0;
- ops->var.yoffset = offset * vc->vc_font.height;
- ops->update_start(info);
-
- if (!scrollback_current)
- fbcon_cursor(vc, CM_DRAW);
}
static int fbcon_set_origin(struct vc_data *vc)
{
- if (softback_lines)
- fbcon_scrolldelta(vc, softback_lines);
return 0;
}
@@ -3020,8 +2700,6 @@ static void fbcon_modechanged(struct fb_info *info)
fbcon_set_palette(vc, color_table);
update_screen(vc);
- if (softback_buf)
- fbcon_update_softback(vc);
}
}
@@ -3432,7 +3110,6 @@ static const struct consw fb_con = {
.con_font_default = fbcon_set_def_font,
.con_font_copy = fbcon_copy_font,
.con_set_palette = fbcon_set_palette,
- .con_scrolldelta = fbcon_scrolldelta,
.con_set_origin = fbcon_set_origin,
.con_invert_region = fbcon_invert_region,
.con_screen_pos = fbcon_screen_pos,
@@ -3667,9 +3344,6 @@ static void fbcon_exit(void)
}
#endif
- kvfree((void *)softback_buf);
- softback_buf = 0UL;
-
for_each_registered_fb(i) {
int pending = 0;
diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
index 20dea853765f..78bb14c03643 100644
--- a/drivers/video/fbdev/core/fbcon.h
+++ b/drivers/video/fbdev/core/fbcon.h
@@ -62,7 +62,7 @@ struct fbcon_ops {
void (*clear_margins)(struct vc_data *vc, struct fb_info *info,
int color, int bottom_only);
void (*cursor)(struct vc_data *vc, struct fb_info *info, int mode,
- int softback_lines, int fg, int bg);
+ int fg, int bg);
int (*update_start)(struct fb_info *info);
int (*rotate_font)(struct fb_info *info, struct vc_data *vc);
struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */
diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c
index 5b177131e062..bbd869efd03b 100644
--- a/drivers/video/fbdev/core/fbcon_ccw.c
+++ b/drivers/video/fbdev/core/fbcon_ccw.c
@@ -219,7 +219,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
}
static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
- int softback_lines, int fg, int bg)
+ int fg, int bg)
{
struct fb_cursor cursor;
struct fbcon_ops *ops = info->fbcon_par;
@@ -236,15 +236,6 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
cursor.set = 0;
- if (softback_lines) {
- if (y + softback_lines >= vc->vc_rows) {
- mode = CM_ERASE;
- ops->cursor_flash = 0;
- return;
- } else
- y += softback_lines;
- }
-
c = scr_readw((u16 *) vc->vc_pos);
attribute = get_attribute(info, c);
src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width));
diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c
index 894d01a62f30..a34cbe8e9874 100644
--- a/drivers/video/fbdev/core/fbcon_cw.c
+++ b/drivers/video/fbdev/core/fbcon_cw.c
@@ -202,7 +202,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
}
static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
- int softback_lines, int fg, int bg)
+ int fg, int bg)
{
struct fb_cursor cursor;
struct fbcon_ops *ops = info->fbcon_par;
@@ -219,15 +219,6 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
cursor.set = 0;
- if (softback_lines) {
- if (y + softback_lines >= vc->vc_rows) {
- mode = CM_ERASE;
- ops->cursor_flash = 0;
- return;
- } else
- y += softback_lines;
- }
-
c = scr_readw((u16 *) vc->vc_pos);
attribute = get_attribute(info, c);
src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width));
diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c
index 01b87f278d79..199cbc7abe35 100644
--- a/drivers/video/fbdev/core/fbcon_ud.c
+++ b/drivers/video/fbdev/core/fbcon_ud.c
@@ -249,7 +249,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
}
static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode,
- int softback_lines, int fg, int bg)
+ int fg, int bg)
{
struct fb_cursor cursor;
struct fbcon_ops *ops = info->fbcon_par;
@@ -267,15 +267,6 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode,
cursor.set = 0;
- if (softback_lines) {
- if (y + softback_lines >= vc->vc_rows) {
- mode = CM_ERASE;
- ops->cursor_flash = 0;
- return;
- } else
- y += softback_lines;
- }
-
c = scr_readw((u16 *) vc->vc_pos);
attribute = get_attribute(info, c);
src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.height));
diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c
index 1dfaff0881fb..31b85b71cc37 100644
--- a/drivers/video/fbdev/core/tileblit.c
+++ b/drivers/video/fbdev/core/tileblit.c
@@ -80,7 +80,7 @@ static void tile_clear_margins(struct vc_data *vc, struct fb_info *info,
}
static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode,
- int softback_lines, int fg, int bg)
+ int fg, int bg)
{
struct fb_tilecursor cursor;
int use_sw = vc->vc_cursor_type & CUR_SW;
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index a20eeb8308ff..578d3541e3d6 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -1121,7 +1121,7 @@ static void vga_8planes_imageblit(struct fb_info *info, const struct fb_image *i
char oldop = setop(0);
char oldsr = setsr(0);
char oldmask = selectmask();
- const char *cdat = image->data;
+ const unsigned char *cdat = image->data;
u32 dx = image->dx;
char __iomem *where;
int y;
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index ea6c1e7e3e42..41645fe6ad48 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -325,4 +325,14 @@ config XEN_HAVE_VPMU
config XEN_FRONT_PGDIR_SHBUF
tristate
+config XEN_UNPOPULATED_ALLOC
+ bool "Use unpopulated memory ranges for guest mappings"
+ depends on X86 && ZONE_DEVICE
+ default XEN_BACKEND || XEN_GNTDEV || XEN_DOM0
+ help
+ Use unpopulated memory ranges in order to create mappings for guest
+ memory regions, including grant maps and foreign pages. This avoids
+ having to balloon out RAM regions in order to obtain physical memory
+ space to create such mappings.
+
endmenu
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index c25c9a699b48..babdca808861 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -41,3 +41,4 @@ xen-gntdev-$(CONFIG_XEN_GNTDEV_DMABUF) += gntdev-dmabuf.o
xen-gntalloc-y := gntalloc.o
xen-privcmd-y := privcmd.o privcmd-buf.o
obj-$(CONFIG_XEN_FRONT_PGDIR_SHBUF) += xen-front-pgdir-shbuf.o
+obj-$(CONFIG_XEN_UNPOPULATED_ALLOC) += unpopulated-alloc.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 37ffccda8bb8..51427c752b37 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -653,7 +653,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
}
EXPORT_SYMBOL(free_xenballooned_pages);
-#ifdef CONFIG_XEN_PV
+#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
static void __init balloon_add_region(unsigned long start_pfn,
unsigned long pages)
{
@@ -707,7 +707,7 @@ static int __init balloon_init(void)
register_sysctl_table(xen_root);
#endif
-#ifdef CONFIG_XEN_PV
+#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
{
int i;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 8d06bf1cc347..523dcdf39cc9 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -801,7 +801,7 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages)
{
int ret;
- ret = alloc_xenballooned_pages(nr_pages, pages);
+ ret = xen_alloc_unpopulated_pages(nr_pages, pages);
if (ret < 0)
return ret;
@@ -836,7 +836,7 @@ EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
void gnttab_free_pages(int nr_pages, struct page **pages)
{
gnttab_pages_clear_private(nr_pages, pages);
- free_xenballooned_pages(nr_pages, pages);
+ xen_free_unpopulated_pages(nr_pages, pages);
}
EXPORT_SYMBOL_GPL(gnttab_free_pages);
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 63abe6c3642b..b0c73c58f987 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -424,7 +424,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
if (pages == NULL)
return -ENOMEM;
- rc = alloc_xenballooned_pages(numpgs, pages);
+ rc = xen_alloc_unpopulated_pages(numpgs, pages);
if (rc != 0) {
pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
numpgs, rc);
@@ -895,7 +895,7 @@ static void privcmd_close(struct vm_area_struct *vma)
rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
if (rc == 0)
- free_xenballooned_pages(numpgs, pages);
+ xen_free_unpopulated_pages(numpgs, pages);
else
pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
numpgs, rc);
diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c
new file mode 100644
index 000000000000..3b98dc921426
--- /dev/null
+++ b/drivers/xen/unpopulated-alloc.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/memremap.h>
+#include <linux/slab.h>
+
+#include <asm/page.h>
+
+#include <xen/page.h>
+#include <xen/xen.h>
+
+static DEFINE_MUTEX(list_lock);
+static LIST_HEAD(page_list);
+static unsigned int list_count;
+
+static int fill_list(unsigned int nr_pages)
+{
+ struct dev_pagemap *pgmap;
+ void *vaddr;
+ unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
+ int ret;
+
+ pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
+ if (!pgmap)
+ return -ENOMEM;
+
+ pgmap->type = MEMORY_DEVICE_GENERIC;
+ pgmap->res.name = "Xen scratch";
+ pgmap->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+
+ ret = allocate_resource(&iomem_resource, &pgmap->res,
+ alloc_pages * PAGE_SIZE, 0, -1,
+ PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
+ if (ret < 0) {
+ pr_err("Cannot allocate new IOMEM resource\n");
+ kfree(pgmap);
+ return ret;
+ }
+
+#ifdef CONFIG_XEN_HAVE_PVMMU
+ /*
+ * memremap will build page tables for the new memory so
+ * the p2m must contain invalid entries so the correct
+ * non-present PTEs will be written.
+ *
+ * If a failure occurs, the original (identity) p2m entries
+ * are not restored since this region is now known not to
+ * conflict with any devices.
+ */
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ xen_pfn_t pfn = PFN_DOWN(pgmap->res.start);
+
+ for (i = 0; i < alloc_pages; i++) {
+ if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
+ pr_warn("set_phys_to_machine() failed, no memory added\n");
+ release_resource(&pgmap->res);
+ kfree(pgmap);
+ return -ENOMEM;
+ }
+ }
+ }
+#endif
+
+ vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
+ if (IS_ERR(vaddr)) {
+ pr_err("Cannot remap memory range\n");
+ release_resource(&pgmap->res);
+ kfree(pgmap);
+ return PTR_ERR(vaddr);
+ }
+
+ for (i = 0; i < alloc_pages; i++) {
+ struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
+
+ BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
+ list_add(&pg->lru, &page_list);
+ list_count++;
+ }
+
+ return 0;
+}
+
+/**
+ * xen_alloc_unpopulated_pages - alloc unpopulated pages
+ * @nr_pages: Number of pages
+ * @pages: pages returned
+ * @return 0 on success, error otherwise
+ */
+int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
+{
+ unsigned int i;
+ int ret = 0;
+
+ mutex_lock(&list_lock);
+ if (list_count < nr_pages) {
+ ret = fill_list(nr_pages - list_count);
+ if (ret)
+ goto out;
+ }
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *pg = list_first_entry_or_null(&page_list,
+ struct page,
+ lru);
+
+ BUG_ON(!pg);
+ list_del(&pg->lru);
+ list_count--;
+ pages[i] = pg;
+
+#ifdef CONFIG_XEN_HAVE_PVMMU
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ ret = xen_alloc_p2m_entry(page_to_pfn(pg));
+ if (ret < 0) {
+ unsigned int j;
+
+ for (j = 0; j <= i; j++) {
+ list_add(&pages[j]->lru, &page_list);
+ list_count++;
+ }
+ goto out;
+ }
+ }
+#endif
+ }
+
+out:
+ mutex_unlock(&list_lock);
+ return ret;
+}
+EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
+
+/**
+ * xen_free_unpopulated_pages - return unpopulated pages
+ * @nr_pages: Number of pages
+ * @pages: pages to return
+ */
+void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
+{
+ unsigned int i;
+
+ mutex_lock(&list_lock);
+ for (i = 0; i < nr_pages; i++) {
+ list_add(&pages[i]->lru, &page_list);
+ list_count++;
+ }
+ mutex_unlock(&list_lock);
+}
+EXPORT_SYMBOL(xen_free_unpopulated_pages);
+
+#ifdef CONFIG_XEN_PV
+static int __init init(void)
+{
+ unsigned int i;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ if (!xen_pv_domain())
+ return 0;
+
+ /*
+ * Initialize with pages from the extra memory regions (see
+ * arch/x86/xen/setup.c).
+ */
+ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+ unsigned int j;
+
+ for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
+ struct page *pg =
+ pfn_to_page(xen_extra_mem[i].start_pfn + j);
+
+ list_add(&pg->lru, &page_list);
+ list_count++;
+ }
+ }
+
+ return 0;
+}
+subsys_initcall(init);
+#endif
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 907bcbb93afb..2690318ad50f 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -621,7 +621,7 @@ static int xenbus_map_ring_hvm(struct xenbus_device *dev,
bool leaked = false;
unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
- err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
+ err = xen_alloc_unpopulated_pages(nr_pages, node->hvm.pages);
if (err)
goto out_err;
@@ -662,7 +662,7 @@ static int xenbus_map_ring_hvm(struct xenbus_device *dev,
addr, nr_pages);
out_free_ballooned_pages:
if (!leaked)
- free_xenballooned_pages(nr_pages, node->hvm.pages);
+ xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
out_err:
return err;
}
@@ -858,7 +858,7 @@ static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
info.addrs);
if (!rv) {
vunmap(vaddr);
- free_xenballooned_pages(nr_pages, node->hvm.pages);
+ xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
}
else
WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages);
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index 7b1077f0abcb..34742c6e189e 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -232,7 +232,7 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
kfree(pages);
return -ENOMEM;
}
- rc = alloc_xenballooned_pages(nr_pages, pages);
+ rc = xen_alloc_unpopulated_pages(nr_pages, pages);
if (rc) {
pr_warn("%s Couldn't balloon alloc %ld pages rc:%d\n", __func__,
nr_pages, rc);
@@ -249,7 +249,7 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
if (!vaddr) {
pr_warn("%s Couldn't map %ld pages rc:%d\n", __func__,
nr_pages, rc);
- free_xenballooned_pages(nr_pages, pages);
+ xen_free_unpopulated_pages(nr_pages, pages);
kfree(pages);
kfree(pfns);
return -ENOMEM;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index f6bba7eb1fa1..abf86b202b43 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3418,6 +3418,8 @@ fail_block_groups:
btrfs_put_block_group_cache(fs_info);
fail_tree_roots:
+ if (fs_info->data_reloc_root)
+ btrfs_drop_and_free_fs_root(fs_info, fs_info->data_reloc_root);
free_root_pointers(fs_info, true);
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index e9eedc053fc5..780b9c9a98fe 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -400,12 +400,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
ASSERT(eb->fs_info);
/*
- * Every shared one has parent tree
- * block, which must be aligned to
- * nodesize.
+ * Every shared one has parent tree block,
+ * which must be aligned to sector size.
*/
if (offset &&
- IS_ALIGNED(offset, eb->fs_info->nodesize))
+ IS_ALIGNED(offset, eb->fs_info->sectorsize))
return type;
}
} else if (is_data == BTRFS_REF_TYPE_DATA) {
@@ -414,12 +413,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
if (type == BTRFS_SHARED_DATA_REF_KEY) {
ASSERT(eb->fs_info);
/*
- * Every shared one has parent tree
- * block, which must be aligned to
- * nodesize.
+ * Every shared one has parent tree block,
+ * which must be aligned to sector size.
*/
if (offset &&
- IS_ALIGNED(offset, eb->fs_info->nodesize))
+ IS_ALIGNED(offset, eb->fs_info->sectorsize))
return type;
}
} else {
@@ -429,8 +427,9 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
}
btrfs_print_leaf((struct extent_buffer *)eb);
- btrfs_err(eb->fs_info, "eb %llu invalid extent inline ref type %d",
- eb->start, type);
+ btrfs_err(eb->fs_info,
+ "eb %llu iref 0x%lx invalid extent inline ref type %d",
+ eb->start, (unsigned long)iref, type);
WARN_ON(1);
return BTRFS_REF_TYPE_INVALID;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index ac45f022b495..2d9109d9e98f 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2193,7 +2193,8 @@ static noinline int search_ioctl(struct inode *inode,
key.offset = sk->min_offset;
while (1) {
- ret = fault_in_pages_writeable(ubuf, *buf_size - sk_offset);
+ ret = fault_in_pages_writeable(ubuf + sk_offset,
+ *buf_size - sk_offset);
if (ret)
break;
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 61f44e78e3c9..80567c11ec12 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -95,9 +95,10 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
* offset is supposed to be a tree block which
* must be aligned to nodesize.
*/
- if (!IS_ALIGNED(offset, eb->fs_info->nodesize))
- pr_info("\t\t\t(parent %llu is NOT ALIGNED to nodesize %llu)\n",
- offset, (unsigned long long)eb->fs_info->nodesize);
+ if (!IS_ALIGNED(offset, eb->fs_info->sectorsize))
+ pr_info(
+ "\t\t\t(parent %llu not aligned to sectorsize %u)\n",
+ offset, eb->fs_info->sectorsize);
break;
case BTRFS_EXTENT_DATA_REF_KEY:
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
@@ -112,8 +113,9 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
* must be aligned to nodesize.
*/
if (!IS_ALIGNED(offset, eb->fs_info->nodesize))
- pr_info("\t\t\t(parent %llu is NOT ALIGNED to nodesize %llu)\n",
- offset, (unsigned long long)eb->fs_info->nodesize);
+ pr_info(
+ "\t\t\t(parent %llu not aligned to sectorsize %u)\n",
+ offset, eb->fs_info->sectorsize);
break;
default:
pr_cont("(extent %llu has INVALID ref type %d)\n",
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 20c6ac1a5de7..d2fc292ac61b 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1636,6 +1636,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
pending->snap = btrfs_get_new_fs_root(fs_info, objectid, pending->anon_dev);
if (IS_ERR(pending->snap)) {
ret = PTR_ERR(pending->snap);
+ pending->snap = NULL;
btrfs_abort_transaction(trans, ret);
goto fail;
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 214856c4ccb1..117b43367629 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -4,6 +4,7 @@
*/
#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
@@ -6484,8 +6485,17 @@ static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
u64 devid, u8 *dev_uuid)
{
struct btrfs_device *device;
+ unsigned int nofs_flag;
+ /*
+ * We call this under the chunk_mutex, so we want to use NOFS for this
+ * allocation, however we don't want to change btrfs_alloc_device() to
+ * always do NOFS because we use it in a lot of other GFP_KERNEL safe
+ * places.
+ */
+ nofs_flag = memalloc_nofs_save();
device = btrfs_alloc_device(NULL, &devid, dev_uuid);
+ memalloc_nofs_restore(nofs_flag);
if (IS_ERR(device))
return device;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 3989d08396ac..1f75b25e559a 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1017,6 +1017,8 @@ handle_mnt_opt:
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) {
rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, true,
full_path, fid);
+ if (rc == -EREMOTE)
+ rc = 0;
if (rc) {
cifs_dbg(FYI, "%s: Get mode from SID failed. rc=%d\n",
__func__, rc);
@@ -1025,6 +1027,8 @@ handle_mnt_opt:
} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, false,
full_path, fid);
+ if (rc == -EREMOTE)
+ rc = 0;
if (rc) {
cifs_dbg(FYI, "%s: Getting ACL failed with error: %d\n",
__func__, rc);
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index b167d2d02148..a768a09430c3 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -177,7 +177,7 @@ static int open_proxy_open(struct inode *inode, struct file *filp)
goto out;
if (!fops_get(real_fops)) {
-#ifdef MODULE
+#ifdef CONFIG_MODULES
if (real_fops->owner &&
real_fops->owner->state == MODULE_STATE_GOING)
goto out;
@@ -312,7 +312,7 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
goto out;
if (!fops_get(real_fops)) {
-#ifdef MODULE
+#ifdef CONFIG_MODULES
if (real_fops->owner &&
real_fops->owner->state == MODULE_STATE_GOING)
goto out;
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 60378ddf1424..96044f5dbc0e 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -93,8 +93,10 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
struct inode *inode = file_inode(vmf->vma->vm_file);
struct ext2_inode_info *ei = EXT2_I(inode);
vm_fault_t ret;
+ bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
+ (vmf->vma->vm_flags & VM_SHARED);
- if (vmf->flags & FAULT_FLAG_WRITE) {
+ if (write) {
sb_start_pagefault(inode->i_sb);
file_update_time(vmf->vma->vm_file);
}
@@ -103,7 +105,7 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, NULL, &ext2_iomap_ops);
up_read(&ei->dax_sem);
- if (vmf->flags & FAULT_FLAG_WRITE)
+ if (write)
sb_end_pagefault(inode->i_sb);
return ret;
}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index ed2bca0fce92..73683e58a08d 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -3550,6 +3550,9 @@ static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
unsigned long align = offset | iov_iter_alignment(iter);
struct block_device *bdev = inode->i_sb->s_bdev;
+ if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode))
+ return 1;
+
if (align & blocksize_mask) {
if (bdev)
blkbits = blksize_bits(bdev_logical_block_size(bdev));
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 3ad7bdbda5ca..cb1b5b61a1da 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -2373,6 +2373,9 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
if (unlikely(nid >= nm_i->max_nid))
nid = 0;
+ if (unlikely(nid % NAT_ENTRY_PER_BLOCK))
+ nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK;
+
/* Enough entries */
if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
return 0;
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index a65d357f89a9..e247a5ef3713 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -799,7 +799,7 @@ static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
if (__is_large_section(sbi)) {
unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
- unsigned short valid_blocks =
+ block_t valid_blocks =
get_valid_blocks(sbi, segno, true);
f2fs_bug_on(sbi, unlikely(!valid_blocks ||
@@ -815,7 +815,7 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
enum dirty_type dirty_type)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
- unsigned short valid_blocks;
+ block_t valid_blocks;
if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
dirty_i->nr_dirty[dirty_type]--;
@@ -4316,8 +4316,8 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi)
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct free_segmap_info *free_i = FREE_I(sbi);
unsigned int segno = 0, offset = 0, secno;
- unsigned short valid_blocks;
- unsigned short blks_per_sec = BLKS_PER_SEC(sbi);
+ block_t valid_blocks;
+ block_t blks_per_sec = BLKS_PER_SEC(sbi);
while (1) {
/* find dirty segment based on free segmap */
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 149227160ff0..58b27e4070a3 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -2184,7 +2184,7 @@ static int __init start_dirtytime_writeback(void)
__initcall(start_dirtytime_writeback);
int dirtytime_interval_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 6611ef3269a8..43c165e796da 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -3091,11 +3091,10 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
ssize_t ret = 0;
struct file *file = iocb->ki_filp;
struct fuse_file *ff = file->private_data;
- bool async_dio = ff->fc->async_dio;
loff_t pos = 0;
struct inode *inode;
loff_t i_size;
- size_t count = iov_iter_count(iter);
+ size_t count = iov_iter_count(iter), shortened = 0;
loff_t offset = iocb->ki_pos;
struct fuse_io_priv *io;
@@ -3103,17 +3102,9 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
inode = file->f_mapping->host;
i_size = i_size_read(inode);
- if ((iov_iter_rw(iter) == READ) && (offset > i_size))
+ if ((iov_iter_rw(iter) == READ) && (offset >= i_size))
return 0;
- /* optimization for short read */
- if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) {
- if (offset >= i_size)
- return 0;
- iov_iter_truncate(iter, fuse_round_up(ff->fc, i_size - offset));
- count = iov_iter_count(iter);
- }
-
io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
if (!io)
return -ENOMEM;
@@ -3129,15 +3120,22 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
* By default, we want to optimize all I/Os with async request
* submission to the client filesystem if supported.
*/
- io->async = async_dio;
+ io->async = ff->fc->async_dio;
io->iocb = iocb;
io->blocking = is_sync_kiocb(iocb);
+ /* optimization for short read */
+ if (io->async && !io->write && offset + count > i_size) {
+ iov_iter_truncate(iter, fuse_round_up(ff->fc, i_size - offset));
+ shortened = count - iov_iter_count(iter);
+ count -= shortened;
+ }
+
/*
* We cannot asynchronously extend the size of a file.
* In such case the aio will behave exactly like sync io.
*/
- if ((offset + count > i_size) && iov_iter_rw(iter) == WRITE)
+ if ((offset + count > i_size) && io->write)
io->blocking = true;
if (io->async && io->blocking) {
@@ -3155,6 +3153,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
} else {
ret = __fuse_direct_read(io, iter, &pos);
}
+ iov_iter_reexpand(iter, iov_iter_count(iter) + shortened);
if (io->async) {
bool blocking = io->blocking;
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 522b891dd187..c9aea6c44372 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1753,6 +1753,9 @@ static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb,
struct io_ring_ctx *ctx = req->ctx;
int ret, notify;
+ if (tsk->flags & PF_EXITING)
+ return -ESRCH;
+
/*
* SQPOLL kernel thread doesn't need notification, just a wakeup. For
* all other cases, use TWA_SIGNAL unconditionally to ensure we're
@@ -1787,8 +1790,10 @@ static void __io_req_task_cancel(struct io_kiocb *req, int error)
static void io_req_task_cancel(struct callback_head *cb)
{
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
+ struct io_ring_ctx *ctx = req->ctx;
__io_req_task_cancel(req, -ECANCELED);
+ percpu_ref_put(&ctx->refs);
}
static void __io_req_task_submit(struct io_kiocb *req)
@@ -2010,6 +2015,12 @@ static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
static inline bool io_run_task_work(void)
{
+ /*
+ * Not safe to run on exiting task, and the task_work handling will
+ * not add work to such a task.
+ */
+ if (unlikely(current->flags & PF_EXITING))
+ return false;
if (current->task_works) {
__set_current_state(TASK_RUNNING);
task_work_run();
@@ -2283,13 +2294,17 @@ static bool io_resubmit_prep(struct io_kiocb *req, int error)
goto end_req;
}
- ret = io_import_iovec(rw, req, &iovec, &iter, false);
- if (ret < 0)
- goto end_req;
- ret = io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
- if (!ret)
+ if (!req->io) {
+ ret = io_import_iovec(rw, req, &iovec, &iter, false);
+ if (ret < 0)
+ goto end_req;
+ ret = io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
+ if (!ret)
+ return true;
+ kfree(iovec);
+ } else {
return true;
- kfree(iovec);
+ }
end_req:
req_set_fail_links(req);
io_req_complete(req, ret);
@@ -2980,14 +2995,15 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw,
bool force_nonblock)
{
struct io_async_rw *iorw = &req->io->rw;
+ struct iovec *iov;
ssize_t ret;
- iorw->iter.iov = iorw->fast_iov;
- ret = __io_import_iovec(rw, req, (struct iovec **) &iorw->iter.iov,
- &iorw->iter, !force_nonblock);
+ iorw->iter.iov = iov = iorw->fast_iov;
+ ret = __io_import_iovec(rw, req, &iov, &iorw->iter, !force_nonblock);
if (unlikely(ret < 0))
return ret;
+ iorw->iter.iov = iov;
io_req_map_rw(req, iorw->iter.iov, iorw->fast_iov, &iorw->iter);
return 0;
}
@@ -3114,6 +3130,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
struct iov_iter __iter, *iter = &__iter;
ssize_t io_size, ret, ret2;
size_t iov_count;
+ bool no_async;
if (req->io)
iter = &req->io->rw.iter;
@@ -3131,7 +3148,8 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
kiocb->ki_flags &= ~IOCB_NOWAIT;
/* If the file doesn't support async, just async punt */
- if (force_nonblock && !io_file_supports_async(req->file, READ))
+ no_async = force_nonblock && !io_file_supports_async(req->file, READ);
+ if (no_async)
goto copy_iov;
ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), iov_count);
@@ -3175,6 +3193,8 @@ copy_iov:
ret = ret2;
goto out_free;
}
+ if (no_async)
+ return -EAGAIN;
/* it's copied and will be cleaned with ->io */
iovec = NULL;
/* now use our persistent iterator, if we aren't already */
@@ -3507,8 +3527,6 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
const char __user *fname;
int ret;
- if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
- return -EINVAL;
if (unlikely(sqe->ioprio || sqe->buf_index))
return -EINVAL;
if (unlikely(req->flags & REQ_F_FIXED_FILE))
@@ -3535,6 +3553,8 @@ static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
u64 flags, mode;
+ if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+ return -EINVAL;
if (req->flags & REQ_F_NEED_CLEANUP)
return 0;
mode = READ_ONCE(sqe->len);
@@ -3549,6 +3569,8 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
size_t len;
int ret;
+ if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+ return -EINVAL;
if (req->flags & REQ_F_NEED_CLEANUP)
return 0;
how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
@@ -3766,7 +3788,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
#if defined(CONFIG_EPOLL)
if (sqe->ioprio || sqe->buf_index)
return -EINVAL;
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
return -EINVAL;
req->epoll.epfd = READ_ONCE(sqe->fd);
@@ -3881,7 +3903,7 @@ static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
return -EINVAL;
if (sqe->ioprio || sqe->buf_index)
return -EINVAL;
@@ -5404,6 +5426,8 @@ static int io_async_cancel(struct io_kiocb *req)
static int io_files_update_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
+ if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
+ return -EINVAL;
if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
return -EINVAL;
if (sqe->ioprio || sqe->rw_flags)
@@ -5454,6 +5478,8 @@ static int io_req_defer_prep(struct io_kiocb *req,
if (unlikely(ret))
return ret;
+ io_prep_async_work(req);
+
switch (req->opcode) {
case IORING_OP_NOP:
break;
@@ -8029,6 +8055,28 @@ static bool io_match_link(struct io_kiocb *preq, struct io_kiocb *req)
return false;
}
+static inline bool io_match_files(struct io_kiocb *req,
+ struct files_struct *files)
+{
+ return (req->flags & REQ_F_WORK_INITIALIZED) && req->work.files == files;
+}
+
+static bool io_match_link_files(struct io_kiocb *req,
+ struct files_struct *files)
+{
+ struct io_kiocb *link;
+
+ if (io_match_files(req, files))
+ return true;
+ if (req->flags & REQ_F_LINK_HEAD) {
+ list_for_each_entry(link, &req->link_list, link_list) {
+ if (io_match_files(link, files))
+ return true;
+ }
+ }
+ return false;
+}
+
/*
* We're looking to cancel 'req' because it's holding on to our files, but
* 'req' could be a link to another request. See if it is, and cancel that
@@ -8103,12 +8151,38 @@ static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
io_timeout_remove_link(ctx, req);
}
+static void io_cancel_defer_files(struct io_ring_ctx *ctx,
+ struct files_struct *files)
+{
+ struct io_defer_entry *de = NULL;
+ LIST_HEAD(list);
+
+ spin_lock_irq(&ctx->completion_lock);
+ list_for_each_entry_reverse(de, &ctx->defer_list, list) {
+ if (io_match_link_files(de->req, files)) {
+ list_cut_position(&list, &ctx->defer_list, &de->list);
+ break;
+ }
+ }
+ spin_unlock_irq(&ctx->completion_lock);
+
+ while (!list_empty(&list)) {
+ de = list_first_entry(&list, struct io_defer_entry, list);
+ list_del_init(&de->list);
+ req_set_fail_links(de->req);
+ io_put_req(de->req);
+ io_req_complete(de->req, -ECANCELED);
+ kfree(de);
+ }
+}
+
static void io_uring_cancel_files(struct io_ring_ctx *ctx,
struct files_struct *files)
{
if (list_empty_careful(&ctx->inflight_list))
return;
+ io_cancel_defer_files(ctx, files);
/* cancel all at once, should be faster than doing it one by one*/
io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true);
@@ -8137,6 +8211,8 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
/* cancel this request, or head link requests */
io_attempt_cancel(ctx, cancel_req);
io_put_req(cancel_req);
+ /* cancellations _may_ trigger task work */
+ io_run_task_work();
schedule();
finish_wait(&ctx->inflight_wait, &wait);
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index f8946b9468ef..6e95c85fe395 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3293,8 +3293,10 @@ static int _nfs4_do_setattr(struct inode *inode,
/* Servers should only apply open mode checks for file size changes */
truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false;
- if (!truncate)
+ if (!truncate) {
+ nfs4_inode_make_writeable(inode);
goto zero_stateid;
+ }
if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
/* Use that stateid */
@@ -7298,7 +7300,12 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state,
err = nfs4_set_lock_state(state, fl);
if (err != 0)
return err;
- err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
+ do {
+ err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
+ if (err != -NFS4ERR_DELAY)
+ break;
+ ssleep(1);
+ } while (err == -NFS4ERR_DELAY);
return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
}
diff --git a/fs/vboxsf/super.c b/fs/vboxsf/super.c
index 8fe03b4a0d2b..25aade344192 100644
--- a/fs/vboxsf/super.c
+++ b/fs/vboxsf/super.c
@@ -384,7 +384,7 @@ fail_nomem:
static int vboxsf_parse_monolithic(struct fs_context *fc, void *data)
{
- char *options = data;
+ unsigned char *options = data;
if (options && options[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 &&
options[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 &&
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 9c40d5971035..1b0a01b06a05 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -6226,7 +6226,7 @@ xfs_bmap_validate_extent(
isrt = XFS_IS_REALTIME_INODE(ip);
endfsb = irec->br_startblock + irec->br_blockcount - 1;
- if (isrt) {
+ if (isrt && whichfork == XFS_DATA_FORK) {
if (!xfs_verify_rtbno(mp, irec->br_startblock))
return __this_address;
if (!xfs_verify_rtbno(mp, endfsb))
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index c31cd3be9fb2..a29f78a663ca 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1223,6 +1223,14 @@ __xfs_filemap_fault(
return ret;
}
+static inline bool
+xfs_is_write_fault(
+ struct vm_fault *vmf)
+{
+ return (vmf->flags & FAULT_FLAG_WRITE) &&
+ (vmf->vma->vm_flags & VM_SHARED);
+}
+
static vm_fault_t
xfs_filemap_fault(
struct vm_fault *vmf)
@@ -1230,7 +1238,7 @@ xfs_filemap_fault(
/* DAX can shortcut the normal fault path on write faults! */
return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
IS_DAX(file_inode(vmf->vma->vm_file)) &&
- (vmf->flags & FAULT_FLAG_WRITE));
+ xfs_is_write_fault(vmf));
}
static vm_fault_t
@@ -1243,7 +1251,7 @@ xfs_filemap_huge_fault(
/* DAX can shortcut the normal fault path on write faults! */
return __xfs_filemap_fault(vmf, pe_size,
- (vmf->flags & FAULT_FLAG_WRITE));
+ xfs_is_write_fault(vmf));
}
static vm_fault_t
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index c6d9f2c444f4..fc5c901c7542 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -292,6 +292,7 @@ enum bpf_arg_type {
ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */
ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */
ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */
+ __BPF_ARG_TYPE_MAX,
};
/* type of values returned from helper functions */
@@ -326,12 +327,16 @@ struct bpf_func_proto {
};
enum bpf_arg_type arg_type[5];
};
- int *btf_id; /* BTF ids of arguments */
- bool (*check_btf_id)(u32 btf_id, u32 arg); /* if the argument btf_id is
- * valid. Often used if more
- * than one btf id is permitted
- * for this argument.
- */
+ union {
+ struct {
+ u32 *arg1_btf_id;
+ u32 *arg2_btf_id;
+ u32 *arg3_btf_id;
+ u32 *arg4_btf_id;
+ u32 *arg5_btf_id;
+ };
+ u32 *arg_btf_id[5];
+ };
int *ret_btf_id; /* return value btf_id */
bool (*allowed)(const struct bpf_prog *prog);
};
@@ -697,16 +702,19 @@ enum bpf_jit_poke_reason {
/* Descriptor of pokes pointing /into/ the JITed image. */
struct bpf_jit_poke_descriptor {
- void *ip;
+ void *tailcall_target;
+ void *tailcall_bypass;
+ void *bypass_addr;
union {
struct {
struct bpf_map *map;
u32 key;
} tail_call;
};
- bool ip_stable;
+ bool tailcall_target_stable;
u8 adj_off;
u16 reason;
+ u32 insn_idx;
};
/* reg_type info for ctx arguments */
@@ -737,6 +745,7 @@ struct bpf_prog_aux {
bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
bool func_proto_unreliable;
bool sleepable;
+ bool tail_call_reachable;
enum bpf_tramp_prog_type trampoline_prog_type;
struct bpf_trampoline *trampoline;
struct hlist_node tramp_hlist;
@@ -751,6 +760,7 @@ struct bpf_prog_aux {
struct bpf_ksym ksym;
const struct bpf_prog_ops *ops;
struct bpf_map **used_maps;
+ struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
struct bpf_prog *prog;
struct user_struct *user;
u64 load_time; /* ns since boottime */
@@ -1380,8 +1390,6 @@ int btf_struct_access(struct bpf_verifier_log *log,
u32 *next_btf_id);
bool btf_struct_ids_match(struct bpf_verifier_log *log,
int off, u32 id, u32 need_type_id);
-int btf_resolve_helper_id(struct bpf_verifier_log *log,
- const struct bpf_func_proto *fn, int);
int btf_distill_func_proto(struct bpf_verifier_log *log,
struct btf *btf,
@@ -1900,6 +1908,6 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *addr1, void *addr2);
struct btf_id_set;
-bool btf_id_set_contains(struct btf_id_set *set, u32 id);
+bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 53c7bd568c5d..2bb48a2c4d08 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -358,6 +358,9 @@ struct bpf_subprog_info {
u32 start; /* insn idx of function entry point */
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
u16 stack_depth; /* max. stack depth used by this function */
+ bool has_tail_call;
+ bool tail_call_reachable;
+ bool has_ld_abs;
};
/* single container for all structs
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 6ad4c000661a..d0bd226d6bd9 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -30,6 +30,7 @@
#define PHY_ID_BCM57780 0x03625d90
#define PHY_ID_BCM89610 0x03625cd0
+#define PHY_ID_BCM72113 0x35905310
#define PHY_ID_BCM7250 0xae025280
#define PHY_ID_BCM7255 0xae025120
#define PHY_ID_BCM7260 0xae025190
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
index 210b086188a3..57890b357f85 100644
--- a/include/linux/btf_ids.h
+++ b/include/linux/btf_ids.h
@@ -76,6 +76,13 @@ extern u32 name[];
#define BTF_ID_LIST_GLOBAL(name) \
__BTF_ID_LIST(name, globl)
+/* The BTF_ID_LIST_SINGLE macro defines a BTF_ID_LIST with
+ * a single entry.
+ */
+#define BTF_ID_LIST_SINGLE(name, prefix, typename) \
+ BTF_ID_LIST(name) \
+ BTF_ID(prefix, typename)
+
/*
* The BTF_ID_UNUSED macro defines 4 zero bytes.
* It's used when we want to define 'unused' entry
@@ -140,6 +147,7 @@ extern struct btf_id_set name;
#define BTF_ID(prefix, name)
#define BTF_ID_UNUSED
#define BTF_ID_LIST_GLOBAL(name) u32 name[1];
+#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1];
#define BTF_SET_START(name) static struct btf_id_set name = { 0 };
#define BTF_SET_START_GLOBAL(name) static struct btf_id_set name = { 0 };
#define BTF_SET_END(name)
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index e20a0cd09ba5..7da9f1f82e8e 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -2,7 +2,7 @@
/*
* linux/can/core.h
*
- * Protoypes and definitions for CAN protocol modules using the PF_CAN core
+ * Prototypes and definitions for CAN protocol modules using the PF_CAN core
*
* Authors: Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
* Urs Thuermann <urs.thuermann@volkswagen.de>
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 5e3d45525bd3..ed0482b2f4b2 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -108,7 +108,7 @@ static inline bool can_skb_headroom_valid(struct net_device *dev,
skb->ip_summed = CHECKSUM_UNNECESSARY;
- /* preform proper loopback on capable devices */
+ /* perform proper loopback on capable devices */
if (dev->flags & IFF_ECHO)
skb->pkt_type = PACKET_LOOPBACK;
else
@@ -201,8 +201,8 @@ void can_bus_off(struct net_device *dev);
void can_change_state(struct net_device *dev, struct can_frame *cf,
enum can_state tx_state, enum can_state rx_state);
-void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
- unsigned int idx);
+int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ unsigned int idx);
struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx,
u8 *len_ptr);
unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index 1b78a0cfb615..f1b38088b765 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -35,6 +35,9 @@ int can_rx_offload_add_timestamp(struct net_device *dev,
int can_rx_offload_add_fifo(struct net_device *dev,
struct can_rx_offload *offload,
unsigned int weight);
+int can_rx_offload_add_manual(struct net_device *dev,
+ struct can_rx_offload *offload,
+ unsigned int weight);
int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
u64 reg);
int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index 6122efdad6ad..ea7b756b1c8f 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -22,14 +22,8 @@
/*
* __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17.
- * In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute
+ * In the meantime, to support gcc < 5, we implement __has_attribute
* by hand.
- *
- * sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__
- * depending on the compiler used to build it; however, these attributes have
- * no semantic effects for sparse, so it does not matter. Also note that,
- * in order to avoid sparse's warnings, even the unsupported ones must be
- * defined to 0.
*/
#ifndef __has_attribute
# define __has_attribute(x) __GCC4_has_attribute_##x
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 4b33cb385f96..6e390d58a9f8 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -11,8 +11,8 @@
# define __iomem __attribute__((noderef, address_space(__iomem)))
# define __percpu __attribute__((noderef, address_space(__percpu)))
# define __rcu __attribute__((noderef, address_space(__rcu)))
-extern void __chk_user_ptr(const volatile void __user *);
-extern void __chk_io_ptr(const volatile void __iomem *);
+static inline void __chk_user_ptr(const volatile void __user *ptr) { }
+static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
/* context/locking */
# define __must_hold(x) __attribute__((context(x,1,1)))
# define __acquires(x) __attribute__((context(x,0,1)))
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 3215023d4852..bf9181cef444 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -142,7 +142,6 @@ enum cpuhp_state {
/* Must be the last timer callback */
CPUHP_AP_DUMMY_TIMER_STARTING,
CPUHP_AP_ARM_XEN_STARTING,
- CPUHP_AP_ARM_KVMPV_STARTING,
CPUHP_AP_ARM_CORESIGHT_STARTING,
CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
CPUHP_AP_ARM64_ISNDEP_STARTING,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 75895e6363b8..6175c77bf25e 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -82,6 +82,7 @@ struct cpuidle_state {
#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */
#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */
#define CPUIDLE_FLAG_TLB_FLUSHED BIT(5) /* idle-state flushes TLBs */
+#define CPUIDLE_FLAG_RCU_IDLE BIT(6) /* idle-state takes care of RCU */
struct cpuidle_device_kobj;
struct cpuidle_state_kobj;
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 6904d4e0b2e0..43b39ab9de1a 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -58,6 +58,8 @@ static inline void set_dax_synchronous(struct dax_device *dax_dev)
{
__set_dax_synchronous(dax_dev);
}
+bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
+ int blocksize, sector_t start, sector_t len);
/*
* Check if given mapping is supported by the file / underlying device.
*/
@@ -104,6 +106,12 @@ static inline bool dax_synchronous(struct dax_device *dax_dev)
static inline void set_dax_synchronous(struct dax_device *dax_dev)
{
}
+static inline bool dax_supported(struct dax_device *dax_dev,
+ struct block_device *bdev, int blocksize, sector_t start,
+ sector_t len)
+{
+ return false;
+}
static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
struct dax_device *dax_dev)
{
@@ -189,14 +197,23 @@ static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
}
#endif
+#if IS_ENABLED(CONFIG_DAX)
int dax_read_lock(void);
void dax_read_unlock(int id);
+#else
+static inline int dax_read_lock(void)
+{
+ return 0;
+}
+
+static inline void dax_read_unlock(int id)
+{
+}
+#endif /* CONFIG_DAX */
bool dax_alive(struct dax_device *dax_dev);
void *dax_get_private(struct dax_device *dax_dev);
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
void **kaddr, pfn_t *pfn);
-bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
- int blocksize, sector_t start, sector_t len);
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i);
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
diff --git a/include/linux/device.h b/include/linux/device.h
index ca18da4768e3..9e6ea8931a52 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -454,6 +454,7 @@ struct dev_links_info {
* @pm_domain: Provide callbacks that are executed during system suspend,
* hibernation, system resume and during runtime PM transitions
* along with subsystem-level and driver-level callbacks.
+ * @em_pd: device's energy model performance domain
* @pins: For device pin management.
* See Documentation/driver-api/pinctl.rst for details.
* @msi_list: Hosts MSI descriptors
diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h
index 2b003ae9fb38..88cd72dfa4e0 100644
--- a/include/linux/dsa/8021q.h
+++ b/include/linux/dsa/8021q.h
@@ -31,6 +31,8 @@ struct dsa_8021q_context {
const struct dsa_8021q_ops *ops;
struct dsa_switch *ds;
struct list_head crosschip_links;
+ /* EtherType of RX VID, used for filtering on master interface */
+ __be16 proto;
};
#define DSA_8021Q_N_SUBVLAN 8
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index aa9ff9e1c0b3..8aa0c7c2608c 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -49,6 +49,10 @@ struct _ddebug {
#if defined(CONFIG_DYNAMIC_DEBUG_CORE)
+
+/* exported for module authors to exercise >control */
+int dynamic_debug_exec_queries(const char *query, const char *modname);
+
int ddebug_add_module(struct _ddebug *tab, unsigned int n,
const char *modname);
extern int ddebug_remove_module(const char *mod_name);
@@ -105,7 +109,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
static_branch_unlikely(&descriptor.key.dd_key_false)
#endif
-#else /* !HAVE_JUMP_LABEL */
+#else /* !CONFIG_JUMP_LABEL */
#define _DPRINTK_KEY_INIT
@@ -117,7 +121,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)
#endif
-#endif
+#endif /* CONFIG_JUMP_LABEL */
#define __dynamic_func_call(id, fmt, func, ...) do { \
DEFINE_DYNAMIC_DEBUG_METADATA(id, fmt); \
@@ -172,10 +176,11 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
KERN_DEBUG, prefix_str, prefix_type, \
rowsize, groupsize, buf, len, ascii)
-#else
+#else /* !CONFIG_DYNAMIC_DEBUG_CORE */
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/printk.h>
static inline int ddebug_add_module(struct _ddebug *tab, unsigned int n,
const char *modname)
@@ -210,6 +215,13 @@ static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, \
rowsize, groupsize, buf, len, ascii); \
} while (0)
-#endif
+
+static inline int dynamic_debug_exec_queries(const char *query, const char *modname)
+{
+ pr_warn("kernel not built with CONFIG_DYNAMIC_DEBUG_CORE\n");
+ return 0;
+}
+
+#endif /* !CONFIG_DYNAMIC_DEBUG_CORE */
#endif
diff --git a/include/linux/efi_embedded_fw.h b/include/linux/efi_embedded_fw.h
index 57eac5241303..a97a12bb2c9e 100644
--- a/include/linux/efi_embedded_fw.h
+++ b/include/linux/efi_embedded_fw.h
@@ -8,8 +8,8 @@
#define EFI_EMBEDDED_FW_PREFIX_LEN 8
/*
- * This struct and efi_embedded_fw_list are private to the efi-embedded fw
- * implementation they are in this header for use by lib/test_firmware.c only!
+ * This struct is private to the efi-embedded fw implementation.
+ * They are in this header for use by lib/test_firmware.c only!
*/
struct efi_embedded_fw {
struct list_head list;
@@ -18,8 +18,6 @@ struct efi_embedded_fw {
size_t length;
};
-extern struct list_head efi_embedded_fw_list;
-
/**
* struct efi_embedded_fw_desc - This struct is used by the EFI embedded-fw
* code to search for embedded firmwares.
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index efebbffcd5cc..159c7476b11b 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -110,15 +110,30 @@ static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs
#endif
/**
- * syscall_enter_from_user_mode - Check and handle work before invoking
- * a syscall
+ * syscall_enter_from_user_mode_prepare - Establish state and enable interrupts
* @regs: Pointer to currents pt_regs
- * @syscall: The syscall number
*
* Invoked from architecture specific syscall entry code with interrupts
* disabled. The calling code has to be non-instrumentable. When the
- * function returns all state is correct and the subsequent functions can be
- * instrumented.
+ * function returns all state is correct, interrupts are enabled and the
+ * subsequent functions can be instrumented.
+ *
+ * This handles lockdep, RCU (context tracking) and tracing state.
+ *
+ * This is invoked when there is extra architecture specific functionality
+ * to be done between establishing state and handling user mode entry work.
+ */
+void syscall_enter_from_user_mode_prepare(struct pt_regs *regs);
+
+/**
+ * syscall_enter_from_user_mode_work - Check and handle work before invoking
+ * a syscall
+ * @regs: Pointer to currents pt_regs
+ * @syscall: The syscall number
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * enabled after invoking syscall_enter_from_user_mode_prepare() and extra
+ * architecture specific work.
*
* Returns: The original or a modified syscall number
*
@@ -127,12 +142,30 @@ static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs
* syscall_set_return_value() first. If neither of those are called and -1
* is returned, then the syscall will fail with ENOSYS.
*
- * The following functionality is handled here:
+ * It handles the following work items:
*
- * 1) Establish state (lockdep, RCU (context tracking), tracing)
- * 2) TIF flag dependent invocations of arch_syscall_enter_tracehook(),
+ * 1) TIF flag dependent invocations of arch_syscall_enter_tracehook(),
* __secure_computing(), trace_sys_enter()
- * 3) Invocation of audit_syscall_entry()
+ * 2) Invocation of audit_syscall_entry()
+ */
+long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall);
+
+/**
+ * syscall_enter_from_user_mode - Establish state and check and handle work
+ * before invoking a syscall
+ * @regs: Pointer to currents pt_regs
+ * @syscall: The syscall number
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * disabled. The calling code has to be non-instrumentable. When the
+ * function returns all state is correct, interrupts are enabled and the
+ * subsequent functions can be instrumented.
+ *
+ * This is combination of syscall_enter_from_user_mode_prepare() and
+ * syscall_enter_from_user_mode_work().
+ *
+ * Returns: The original or a modified syscall number. See
+ * syscall_enter_from_user_mode_work() for further explanation.
*/
long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall);
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 969a80211df6..060b20f0b20f 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -241,6 +241,27 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
ETHTOOL_COALESCE_PKT_RATE_LOW | ETHTOOL_COALESCE_PKT_RATE_HIGH | \
ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL)
+#define ETHTOOL_STAT_NOT_SET (~0ULL)
+
+/**
+ * struct ethtool_pause_stats - statistics for IEEE 802.3x pause frames
+ * @tx_pause_frames: transmitted pause frame count. Reported to user space
+ * as %ETHTOOL_A_PAUSE_STAT_TX_FRAMES.
+ *
+ * Equivalent to `30.3.4.2 aPAUSEMACCtrlFramesTransmitted`
+ * from the standard.
+ *
+ * @rx_pause_frames: received pause frame count. Reported to user space
+ * as %ETHTOOL_A_PAUSE_STAT_RX_FRAMES. Equivalent to:
+ *
+ * Equivalent to `30.3.4.3 aPAUSEMACCtrlFramesReceived`
+ * from the standard.
+ */
+struct ethtool_pause_stats {
+ u64 tx_pause_frames;
+ u64 rx_pause_frames;
+};
+
/**
* struct ethtool_ops - optional netdev operations
* @supported_coalesce_params: supported types of interrupt coalescing.
@@ -282,6 +303,9 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* Returns a negative error code or zero.
* @get_ringparam: Report ring sizes
* @set_ringparam: Set ring sizes. Returns a negative error code or zero.
+ * @get_pause_stats: Report pause frame statistics. Drivers must not zero
+ * statistics which they don't report. The stats structure is initialized
+ * to ETHTOOL_STAT_NOT_SET indicating driver does not report statistics.
* @get_pauseparam: Report pause parameters
* @set_pauseparam: Set pause parameters. Returns a negative error code
* or zero.
@@ -418,6 +442,8 @@ struct ethtool_ops {
struct ethtool_ringparam *);
int (*set_ringparam)(struct net_device *,
struct ethtool_ringparam *);
+ void (*get_pause_stats)(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats);
void (*get_pauseparam)(struct net_device *,
struct ethtool_pauseparam*);
int (*set_pauseparam)(struct net_device *,
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 05b4052715b9..20fc24c9779a 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1287,6 +1287,8 @@ int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len);
struct bpf_sk_lookup_kern {
u16 family;
u16 protocol;
+ __be16 sport;
+ u16 dport;
struct {
__be32 saddr;
__be32 daddr;
@@ -1295,8 +1297,6 @@ struct bpf_sk_lookup_kern {
const struct in6_addr *saddr;
const struct in6_addr *daddr;
} v6;
- __be16 sport;
- u16 dport;
struct sock *selected_sk;
bool no_reuseport;
};
diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h
index 2eab6d5f6736..aab0ffc6bac6 100644
--- a/include/linux/fs_parser.h
+++ b/include/linux/fs_parser.h
@@ -120,7 +120,7 @@ static inline bool fs_validate_description(const char *name,
#define fsparam_u32oct(NAME, OPT) \
__fsparam(fs_param_is_u32, NAME, OPT, 0, (void *)8)
#define fsparam_u32hex(NAME, OPT) \
- __fsparam(fs_param_is_u32_hex, NAME, OPT, 0, (void *16))
+ __fsparam(fs_param_is_u32_hex, NAME, OPT, 0, (void *)16)
#define fsparam_s32(NAME, OPT) __fsparam(fs_param_is_s32, NAME, OPT, 0, NULL)
#define fsparam_u64(NAME, OPT) __fsparam(fs_param_is_u64, NAME, OPT, 0, NULL)
#define fsparam_enum(NAME, OPT, array) __fsparam(fs_param_is_enum, NAME, OPT, 0, array)
diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h
index 884b8f8ca06d..01acebe37fab 100644
--- a/include/linux/fsl/ptp_qoriq.h
+++ b/include/linux/fsl/ptp_qoriq.h
@@ -136,6 +136,7 @@ struct ptp_qoriq_registers {
#define DEFAULT_TMR_PRSC 2
#define DEFAULT_FIPER1_PERIOD 1000000000
#define DEFAULT_FIPER2_PERIOD 1000000000
+#define DEFAULT_FIPER3_PERIOD 1000000000
struct ptp_qoriq {
void __iomem *base;
@@ -147,6 +148,7 @@ struct ptp_qoriq {
struct dentry *debugfs_root;
struct device *dev;
bool extts_fifo_support;
+ bool fiper3_support;
int irq;
int phc_index;
u32 tclk_period; /* nanoseconds */
@@ -155,6 +157,7 @@ struct ptp_qoriq {
u32 cksel;
u32 tmr_fiper1;
u32 tmr_fiper2;
+ u32 tmr_fiper3;
u32 (*read)(unsigned __iomem *addr);
void (*write)(unsigned __iomem *addr, u32 val);
};
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index ce2c06f72e86..e5c2d5cc6e6a 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -85,8 +85,7 @@ static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *val
extern int ftrace_enabled;
extern int
ftrace_enable_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
+ void *buffer, size_t *lenp, loff_t *ppos);
struct ftrace_ops;
diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h
index d03071732db4..7c522fdd9ea7 100644
--- a/include/linux/i2c-algo-pca.h
+++ b/include/linux/i2c-algo-pca.h
@@ -53,6 +53,20 @@
#define I2C_PCA_CON_SI 0x08 /* Serial Interrupt */
#define I2C_PCA_CON_CR 0x07 /* Clock Rate (MASK) */
+/**
+ * struct pca_i2c_bus_settings - The configured PCA i2c bus settings
+ * @mode: Configured i2c bus mode
+ * @tlow: Configured SCL LOW period
+ * @thi: Configured SCL HIGH period
+ * @clock_freq: The configured clock frequency
+ */
+struct pca_i2c_bus_settings {
+ int mode;
+ int tlow;
+ int thi;
+ int clock_freq;
+};
+
struct i2c_algo_pca_data {
void *data; /* private low level data */
void (*write_byte) (void *data, int reg, int val);
@@ -64,6 +78,7 @@ struct i2c_algo_pca_data {
* For PCA9665, use the frequency you want here. */
unsigned int i2c_clock;
unsigned int chip;
+ struct pca_i2c_bus_settings bus_settings;
};
int i2c_pca_add_bus(struct i2c_adapter *);
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index c47f43e65a2f..53fba39d4ba6 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -2330,84 +2330,84 @@ ieee80211_he_spr_size(const u8 *he_spr_ie)
}
/* S1G Capabilities Information field */
-#define S1G_CAPAB_B0_S1G_LONG BIT(0)
-#define S1G_CAPAB_B0_SGI_1MHZ BIT(1)
-#define S1G_CAPAB_B0_SGI_2MHZ BIT(2)
-#define S1G_CAPAB_B0_SGI_4MHZ BIT(3)
-#define S1G_CAPAB_B0_SGI_8MHZ BIT(4)
-#define S1G_CAPAB_B0_SGI_16MHZ BIT(5)
-#define S1G_CAPAB_B0_SUPP_CH_WIDTH_MASK (BIT(6) | BIT(7))
-#define S1G_CAPAB_B0_SUPP_CH_WIDTH_SHIFT 6
-
-#define S1G_CAPAB_B1_RX_LDPC BIT(0)
-#define S1G_CAPAB_B1_TX_STBC BIT(1)
-#define S1G_CAPAB_B1_RX_STBC BIT(2)
-#define S1G_CAPAB_B1_SU_BFER BIT(3)
-#define S1G_CAPAB_B1_SU_BFEE BIT(4)
-#define S1G_CAPAB_B1_BFEE_STS_MASK (BIT(5) | BIT(6) | BIT(7))
-#define S1G_CAPAB_B1_BFEE_STS_SHIFT 5
-
-#define S1G_CAPAB_B2_SOUNDING_DIMENSIONS_MASK (BIT(0) | BIT(1) | BIT(2))
-#define S1G_CAPAB_B2_SOUNDING_DIMENSIONS_SHIFT 0
-#define S1G_CAPAB_B2_MU_BFER BIT(3)
-#define S1G_CAPAB_B2_MU_BFEE BIT(4)
-#define S1G_CAPAB_B2_PLUS_HTC_VHT BIT(5)
-#define S1G_CAPAB_B2_TRAVELING_PILOT_MASK (BIT(6) | BIT(7))
-#define S1G_CAPAB_B2_TRAVELING_PILOT_SHIFT 6
-
-#define S1G_CAPAB_B3_RD_RESPONDER BIT(0)
-#define S1G_CAPAB_B3_HT_DELAYED_BA BIT(1)
-#define S1G_CAPAB_B3_MAX_MPDU_LEN BIT(2)
-#define S1G_CAPAB_B3_MAX_AMPDU_LEN_EXP_MASK (BIT(3) | BIT(4))
-#define S1G_CAPAB_B3_MAX_AMPDU_LEN_EXP_SHIFT 3
-#define S1G_CAPAB_B3_MIN_MPDU_START_MASK (BIT(5) | BIT(6) | BIT(7))
-#define S1G_CAPAB_B3_MIN_MPDU_START_SHIFT 5
-
-#define S1G_CAPAB_B4_UPLINK_SYNC BIT(0)
-#define S1G_CAPAB_B4_DYNAMIC_AID BIT(1)
-#define S1G_CAPAB_B4_BAT BIT(2)
-#define S1G_CAPAB_B4_TIME_ADE BIT(3)
-#define S1G_CAPAB_B4_NON_TIM BIT(4)
-#define S1G_CAPAB_B4_GROUP_AID BIT(5)
-#define S1G_CAPAB_B4_STA_TYPE_MASK (BIT(6) | BIT(7))
-#define S1G_CAPAB_B4_STA_TYPE_SHIFT 6
-
-#define S1G_CAPAB_B5_CENT_AUTH_CONTROL BIT(0)
-#define S1G_CAPAB_B5_DIST_AUTH_CONTROL BIT(1)
-#define S1G_CAPAB_B5_AMSDU BIT(2)
-#define S1G_CAPAB_B5_AMPDU BIT(3)
-#define S1G_CAPAB_B5_ASYMMETRIC_BA BIT(4)
-#define S1G_CAPAB_B5_FLOW_CONTROL BIT(5)
-#define S1G_CAPAB_B5_SECTORIZED_BEAM_MASK (BIT(6) | BIT(7))
-#define S1G_CAPAB_B5_SECTORIZED_BEAM_SHIFT 6
-
-#define S1G_CAPAB_B6_OBSS_MITIGATION BIT(0)
-#define S1G_CAPAB_B6_FRAGMENT_BA BIT(1)
-#define S1G_CAPAB_B6_NDP_PS_POLL BIT(2)
-#define S1G_CAPAB_B6_RAW_OPERATION BIT(3)
-#define S1G_CAPAB_B6_PAGE_SLICING BIT(4)
-#define S1G_CAPAB_B6_TXOP_SHARING_IMP_ACK BIT(5)
-#define S1G_CAPAB_B6_VHT_LINK_ADAPT_MASK (BIT(6) | BIT(7))
-#define S1G_CAPAB_B6_VHT_LINK_ADAPT_SHIFT 6
-
-#define S1G_CAPAB_B7_TACK_AS_PS_POLL BIT(0)
-#define S1G_CAPAB_B7_DUP_1MHZ BIT(1)
-#define S1G_CAPAB_B7_MCS_NEGOTIATION BIT(2)
-#define S1G_CAPAB_B7_1MHZ_CTL_RESPONSE_PREAMBLE BIT(3)
-#define S1G_CAPAB_B7_NDP_BFING_REPORT_POLL BIT(4)
-#define S1G_CAPAB_B7_UNSOLICITED_DYN_AID BIT(5)
-#define S1G_CAPAB_B7_SECTOR_TRAINING_OPERATION BIT(6)
-#define S1G_CAPAB_B7_TEMP_PS_MODE_SWITCH BIT(7)
-
-#define S1G_CAPAB_B8_TWT_GROUPING BIT(0)
-#define S1G_CAPAB_B8_BDT BIT(1)
-#define S1G_CAPAB_B8_COLOR_MASK (BIT(2) | BIT(3) | BIT(4))
-#define S1G_CAPAB_B8_COLOR_SHIFT 2
-#define S1G_CAPAB_B8_TWT_REQUEST BIT(5)
-#define S1G_CAPAB_B8_TWT_RESPOND BIT(6)
-#define S1G_CAPAB_B8_PV1_FRAME BIT(7)
-
-#define S1G_CAPAB_B9_LINK_ADAPT_PER_CONTROL_RESPONSE BIT(0)
+#define S1G_CAP0_S1G_LONG BIT(0)
+#define S1G_CAP0_SGI_1MHZ BIT(1)
+#define S1G_CAP0_SGI_2MHZ BIT(2)
+#define S1G_CAP0_SGI_4MHZ BIT(3)
+#define S1G_CAP0_SGI_8MHZ BIT(4)
+#define S1G_CAP0_SGI_16MHZ BIT(5)
+#define S1G_CAP0_SUPP_CH_WIDTH GENMASK(7, 6)
+
+#define S1G_SUPP_CH_WIDTH_2 0
+#define S1G_SUPP_CH_WIDTH_4 1
+#define S1G_SUPP_CH_WIDTH_8 2
+#define S1G_SUPP_CH_WIDTH_16 3
+#define S1G_SUPP_CH_WIDTH_MAX(cap) ((1 << FIELD_GET(S1G_CAP0_SUPP_CH_WIDTH, \
+ cap[0])) << 1)
+
+#define S1G_CAP1_RX_LDPC BIT(0)
+#define S1G_CAP1_TX_STBC BIT(1)
+#define S1G_CAP1_RX_STBC BIT(2)
+#define S1G_CAP1_SU_BFER BIT(3)
+#define S1G_CAP1_SU_BFEE BIT(4)
+#define S1G_CAP1_BFEE_STS GENMASK(7, 5)
+
+#define S1G_CAP2_SOUNDING_DIMENSIONS GENMASK(2, 0)
+#define S1G_CAP2_MU_BFER BIT(3)
+#define S1G_CAP2_MU_BFEE BIT(4)
+#define S1G_CAP2_PLUS_HTC_VHT BIT(5)
+#define S1G_CAP2_TRAVELING_PILOT GENMASK(7, 6)
+
+#define S1G_CAP3_RD_RESPONDER BIT(0)
+#define S1G_CAP3_HT_DELAYED_BA BIT(1)
+#define S1G_CAP3_MAX_MPDU_LEN BIT(2)
+#define S1G_CAP3_MAX_AMPDU_LEN_EXP GENMASK(4, 3)
+#define S1G_CAP3_MIN_MPDU_START GENMASK(7, 5)
+
+#define S1G_CAP4_UPLINK_SYNC BIT(0)
+#define S1G_CAP4_DYNAMIC_AID BIT(1)
+#define S1G_CAP4_BAT BIT(2)
+#define S1G_CAP4_TIME_ADE BIT(3)
+#define S1G_CAP4_NON_TIM BIT(4)
+#define S1G_CAP4_GROUP_AID BIT(5)
+#define S1G_CAP4_STA_TYPE GENMASK(7, 6)
+
+#define S1G_CAP5_CENT_AUTH_CONTROL BIT(0)
+#define S1G_CAP5_DIST_AUTH_CONTROL BIT(1)
+#define S1G_CAP5_AMSDU BIT(2)
+#define S1G_CAP5_AMPDU BIT(3)
+#define S1G_CAP5_ASYMMETRIC_BA BIT(4)
+#define S1G_CAP5_FLOW_CONTROL BIT(5)
+#define S1G_CAP5_SECTORIZED_BEAM GENMASK(7, 6)
+
+#define S1G_CAP6_OBSS_MITIGATION BIT(0)
+#define S1G_CAP6_FRAGMENT_BA BIT(1)
+#define S1G_CAP6_NDP_PS_POLL BIT(2)
+#define S1G_CAP6_RAW_OPERATION BIT(3)
+#define S1G_CAP6_PAGE_SLICING BIT(4)
+#define S1G_CAP6_TXOP_SHARING_IMP_ACK BIT(5)
+#define S1G_CAP6_VHT_LINK_ADAPT GENMASK(7, 6)
+
+#define S1G_CAP7_TACK_AS_PS_POLL BIT(0)
+#define S1G_CAP7_DUP_1MHZ BIT(1)
+#define S1G_CAP7_MCS_NEGOTIATION BIT(2)
+#define S1G_CAP7_1MHZ_CTL_RESPONSE_PREAMBLE BIT(3)
+#define S1G_CAP7_NDP_BFING_REPORT_POLL BIT(4)
+#define S1G_CAP7_UNSOLICITED_DYN_AID BIT(5)
+#define S1G_CAP7_SECTOR_TRAINING_OPERATION BIT(6)
+#define S1G_CAP7_TEMP_PS_MODE_SWITCH BIT(7)
+
+#define S1G_CAP8_TWT_GROUPING BIT(0)
+#define S1G_CAP8_BDT BIT(1)
+#define S1G_CAP8_COLOR GENMASK(4, 2)
+#define S1G_CAP8_TWT_REQUEST BIT(5)
+#define S1G_CAP8_TWT_RESPOND BIT(6)
+#define S1G_CAP8_PV1_FRAME BIT(7)
+
+#define S1G_CAP9_LINK_ADAPT_PER_CONTROL_RESPONSE BIT(0)
+
+#define S1G_OPER_CH_WIDTH_PRIMARY_1MHZ BIT(0)
+#define S1G_OPER_CH_WIDTH_OPER GENMASK(4, 1)
/* Authentication algorithms */
#define WLAN_AUTH_OPEN 0
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 6479a38e52fa..556caed00258 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -19,7 +19,13 @@ struct br_ip {
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr ip6;
#endif
- } u;
+ } src;
+ union {
+ __be32 ip4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr ip6;
+#endif
+ } dst;
__be16 proto;
__u16 vid;
};
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 9be1bff4f586..8aab327b5539 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -373,6 +373,8 @@ void unregister_kretprobes(struct kretprobe **rps, int num);
void kprobe_flush_task(struct task_struct *tk);
void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
+void kprobe_free_init_mem(void);
+
int disable_kprobe(struct kprobe *kp);
int enable_kprobe(struct kprobe *kp);
@@ -435,6 +437,9 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num)
static inline void kprobe_flush_task(struct task_struct *tk)
{
}
+static inline void kprobe_free_init_mem(void)
+{
+}
static inline int disable_kprobe(struct kprobe *kp)
{
return -ENOSYS;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a23076765b4c..05e3c2fb3ef7 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -749,25 +749,46 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len);
-#define __kvm_put_guest(kvm, gfn, offset, value, type) \
+#define __kvm_get_guest(kvm, gfn, offset, v) \
({ \
unsigned long __addr = gfn_to_hva(kvm, gfn); \
- type __user *__uaddr = (type __user *)(__addr + offset); \
+ typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
int __ret = -EFAULT; \
\
if (!kvm_is_error_hva(__addr)) \
- __ret = put_user(value, __uaddr); \
+ __ret = get_user(v, __uaddr); \
+ __ret; \
+})
+
+#define kvm_get_guest(kvm, gpa, v) \
+({ \
+ gpa_t __gpa = gpa; \
+ struct kvm *__kvm = kvm; \
+ \
+ __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \
+ offset_in_page(__gpa), v); \
+})
+
+#define __kvm_put_guest(kvm, gfn, offset, v) \
+({ \
+ unsigned long __addr = gfn_to_hva(kvm, gfn); \
+ typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
+ int __ret = -EFAULT; \
+ \
+ if (!kvm_is_error_hva(__addr)) \
+ __ret = put_user(v, __uaddr); \
if (!__ret) \
mark_page_dirty(kvm, gfn); \
__ret; \
})
-#define kvm_put_guest(kvm, gpa, value, type) \
+#define kvm_put_guest(kvm, gpa, v) \
({ \
gpa_t __gpa = gpa; \
struct kvm *__kvm = kvm; \
+ \
__kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \
- offset_in_page(__gpa), (value), type); \
+ offset_in_page(__gpa), v); \
})
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
diff --git a/include/linux/log2.h b/include/linux/log2.h
index 83a4a3ca3e8a..c619ec6eff4a 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -173,7 +173,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
#define roundup_pow_of_two(n) \
( \
__builtin_constant_p(n) ? ( \
- (n == 1) ? 1 : \
+ ((n) == 1) ? 1 : \
(1UL << (ilog2((n) - 1) + 1)) \
) : \
__roundup_pow_of_two(n) \
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 3a88b699b758..dbd69b3d170b 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -306,7 +306,7 @@ static inline u32 linkmode_adv_to_mii_10gbt_adv_t(unsigned long *advertising)
/**
* mii_10gbt_stat_mod_linkmode_lpa_t
* @advertising: target the linkmode advertisement settings
- * @adv: value of the C45 10GBASE-T AN STATUS register
+ * @lpa: value of the C45 10GBASE-T AN STATUS register
*
* A small helper function that translates C45 10GBASE-T AN STATUS register bits
* to linkmode advertisement settings. Other bits in advertising aren't changed.
@@ -371,6 +371,7 @@ struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr);
/**
* mdio_module_driver() - Helper macro for registering mdio drivers
+ * @_mdio_driver: driver to register
*
* Helper macro for MDIO drivers which do not do anything special in module
* init/exit. Each module may only use this macro once, and calling it
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 5f5b2df06e61..e5862746751b 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -46,11 +46,10 @@ struct vmem_altmap {
* wakeup is used to coordinate physical address space management (ex:
* fs truncate/hole punch) vs pinned pages (ex: device dma).
*
- * MEMORY_DEVICE_DEVDAX:
+ * MEMORY_DEVICE_GENERIC:
* Host memory that has similar access semantics as System RAM i.e. DMA
- * coherent and supports page pinning. In contrast to
- * MEMORY_DEVICE_FS_DAX, this memory is access via a device-dax
- * character device.
+ * coherent and supports page pinning. This is for example used by DAX devices
+ * that expose memory using a character device.
*
* MEMORY_DEVICE_PCI_P2PDMA:
* Device memory residing in a PCI BAR intended for use with Peer-to-Peer
@@ -60,7 +59,7 @@ enum memory_type {
/* 0 is reserved to catch uninitialized type fields */
MEMORY_DEVICE_PRIVATE = 1,
MEMORY_DEVICE_FS_DAX,
- MEMORY_DEVICE_DEVDAX,
+ MEMORY_DEVICE_GENERIC,
MEMORY_DEVICE_PCI_P2PDMA,
};
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 75f880c25bb8..416ee6dd2574 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -27,6 +27,7 @@
#define PHY_ID_KSZ8061 0x00221570
#define PHY_ID_KSZ9031 0x00221620
#define PHY_ID_KSZ9131 0x00221640
+#define PHY_ID_LAN8814 0x00221660
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 4d3376e20f5e..81ca5989009b 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -816,7 +816,7 @@ struct mlx5_mini_cqe8 {
__be32 rx_hash_result;
struct {
__be16 checksum;
- __be16 rsvd;
+ __be16 stridx;
};
struct {
__be16 wqe_counter;
@@ -836,6 +836,7 @@ enum {
enum {
MLX5_CQE_FORMAT_CSUM = 0x1,
+ MLX5_CQE_FORMAT_CSUM_STRIDX = 0x3,
};
#define MLX5_MINI_CQE_ARRAY_SIZE 8
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index c145de0473bc..8dc3da6e6480 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -643,7 +643,6 @@ struct mlx5_pps {
};
struct mlx5_clock {
- struct mlx5_core_dev *mdev;
struct mlx5_nb pps_nb;
seqlock_t lock;
struct cyclecounter cycles;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ca6e6a81576b..b2f370f0b420 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -41,6 +41,8 @@ struct writeback_control;
struct bdi_writeback;
struct pt_regs;
+extern int sysctl_page_lock_unfairness;
+
void init_mm_internals(void);
#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 2cc3cf80b49a..0b17c4322b09 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -193,7 +193,7 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
#define NETIF_F_GSO_MASK (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \
__NETIF_F_BIT(NETIF_F_GSO_SHIFT))
-/* List of IP checksum features. Note that NETIF_F_ HW_CSUM should not be
+/* List of IP checksum features. Note that NETIF_F_HW_CSUM should not be
* set in features when NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM are set--
* this would be contradictory
*/
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 157e0242e9ee..a431c3229cbf 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -634,8 +634,9 @@ extern int sysctl_devconf_inherit_init_net;
*/
static inline bool net_has_fallback_tunnels(const struct net *net)
{
- return (net == &init_net && sysctl_fb_tunnels_only_for_init_net == 1) ||
- !sysctl_fb_tunnels_only_for_init_net;
+ return !IS_ENABLED(CONFIG_SYSCTL) ||
+ !sysctl_fb_tunnels_only_for_init_net ||
+ (net == &init_net && sysctl_fb_tunnels_only_for_init_net == 1);
}
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@ -1775,6 +1776,7 @@ enum netdev_priv_flags {
* the watchdog (see dev_watchdog())
* @watchdog_timer: List of timers
*
+ * @proto_down_reason: reason a netdev interface is held down
* @pcpu_refcnt: Number of references to this device
* @todo_list: Delayed register/unregister
* @link_watch_list: XXX: need comments on this one
@@ -1839,6 +1841,7 @@ enum netdev_priv_flags {
* @udp_tunnel_nic_info: static structure describing the UDP tunnel
* offload capabilities of the device
* @udp_tunnel_nic: UDP tunnel offload state
+ * @xdp_state: stores info on attached XDP BPF programs
*
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
@@ -4677,16 +4680,6 @@ int netdev_class_create_file_ns(const struct class_attribute *class_attr,
void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
const void *ns);
-static inline int netdev_class_create_file(const struct class_attribute *class_attr)
-{
- return netdev_class_create_file_ns(class_attr, NULL);
-}
-
-static inline void netdev_class_remove_file(const struct class_attribute *class_attr)
-{
- netdev_class_remove_file_ns(class_attr, NULL);
-}
-
extern const struct kobj_ns_type_operations net_ns_type_operations;
const char *netdev_drivername(const struct net_device *dev);
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 1efb88d9f892..cfe8c607a628 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -17,6 +17,7 @@ bool of_mdiobus_child_is_phy(struct device_node *child);
int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
struct device_node *np);
+struct mdio_device *of_mdio_find_device(struct device_node *np);
struct phy_device *of_phy_find_device(struct device_node *phy_np);
struct phy_device *
of_phy_connect(struct net_device *dev, struct device_node *phy_np,
@@ -74,6 +75,11 @@ static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *
return mdiobus_register(mdio);
}
+static inline struct mdio_device *of_mdio_find_device(struct device_node *np)
+{
+ return NULL;
+}
+
static inline struct phy_device *of_phy_find_device(struct device_node *phy_np)
{
return NULL;
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 5e033fe1ff4e..5fda40f97fe9 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -60,7 +60,7 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
* anything we did within this RCU-sched read-size critical section.
*/
if (likely(rcu_sync_is_idle(&sem->rss)))
- __this_cpu_inc(*sem->read_count);
+ this_cpu_inc(*sem->read_count);
else
__percpu_down_read(sem, false); /* Unconditional memory barrier */
/*
@@ -79,7 +79,7 @@ static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
* Same as in percpu_down_read().
*/
if (likely(rcu_sync_is_idle(&sem->rss)))
- __this_cpu_inc(*sem->read_count);
+ this_cpu_inc(*sem->read_count);
else
ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
preempt_enable();
@@ -103,7 +103,7 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
* Same as in percpu_down_read().
*/
if (likely(rcu_sync_is_idle(&sem->rss))) {
- __this_cpu_dec(*sem->read_count);
+ this_cpu_dec(*sem->read_count);
} else {
/*
* slowpath; reader will only ever wake a single blocked
@@ -115,7 +115,7 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
* aggregate zero, as that is the only time it matters) they
* will also see our critical section.
*/
- __this_cpu_dec(*sem->read_count);
+ this_cpu_dec(*sem->read_count);
rcuwait_wake_up(&sem->writer);
}
preempt_enable();
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 3a09d2bf69ea..eb3cb1a98b45 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -82,7 +82,39 @@ extern const int phy_10gbit_features_array[1];
#define PHY_POLL_CABLE_TEST 0x00000004
#define MDIO_DEVICE_IS_PHY 0x80000000
-/* Interface Mode definitions */
+/**
+ * enum phy_interface_t - Interface Mode definitions
+ *
+ * @PHY_INTERFACE_MODE_NA: Not Applicable - don't touch
+ * @PHY_INTERFACE_MODE_INTERNAL: No interface, MAC and PHY combined
+ * @PHY_INTERFACE_MODE_MII: Median-independent interface
+ * @PHY_INTERFACE_MODE_GMII: Gigabit median-independent interface
+ * @PHY_INTERFACE_MODE_SGMII: Serial gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_TBI: Ten Bit Interface
+ * @PHY_INTERFACE_MODE_REVMII: Reverse Media Independent Interface
+ * @PHY_INTERFACE_MODE_RMII: Reduced Media Independent Interface
+ * @PHY_INTERFACE_MODE_RGMII: Reduced gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_RGMII_ID: RGMII with Internal RX+TX delay
+ * @PHY_INTERFACE_MODE_RGMII_RXID: RGMII with Internal RX delay
+ * @PHY_INTERFACE_MODE_RGMII_TXID: RGMII with Internal RX delay
+ * @PHY_INTERFACE_MODE_RTBI: Reduced TBI
+ * @PHY_INTERFACE_MODE_SMII: ??? MII
+ * @PHY_INTERFACE_MODE_XGMII: 10 gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_XLGMII:40 gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_MOCA: Multimedia over Coax
+ * @PHY_INTERFACE_MODE_QSGMII: Quad SGMII
+ * @PHY_INTERFACE_MODE_TRGMII: Turbo RGMII
+ * @PHY_INTERFACE_MODE_1000BASEX: 1000 BaseX
+ * @PHY_INTERFACE_MODE_2500BASEX: 2500 BaseX
+ * @PHY_INTERFACE_MODE_RXAUI: Reduced XAUI
+ * @PHY_INTERFACE_MODE_XAUI: 10 Gigabit Attachment Unit Interface
+ * @PHY_INTERFACE_MODE_10GBASER: 10G BaseR
+ * @PHY_INTERFACE_MODE_USXGMII: Universal Serial 10GE MII
+ * @PHY_INTERFACE_MODE_10GKR: 10GBASE-KR - with Clause 73 AN
+ * @PHY_INTERFACE_MODE_MAX: Book keeping
+ *
+ * Describes the interface between the MAC and PHY.
+ */
typedef enum {
PHY_INTERFACE_MODE_NA,
PHY_INTERFACE_MODE_INTERNAL,
@@ -116,8 +148,8 @@ typedef enum {
} phy_interface_t;
/**
- * phy_supported_speeds - return all speeds currently supported by a phy device
- * @phy: The phy device to return supported speeds of.
+ * phy_supported_speeds - return all speeds currently supported by a PHY device
+ * @phy: The PHY device to return supported speeds of.
* @speeds: buffer to store supported speeds in.
* @size: size of speeds buffer.
*
@@ -134,9 +166,9 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
* phy_modes - map phy_interface_t enum to device tree binding of phy-mode
* @interface: enum phy_interface_t value
*
- * Description: maps 'enum phy_interface_t' defined in this file
+ * Description: maps enum &phy_interface_t defined in this file
* into the device tree binding of 'phy-mode', so that Ethernet
- * device driver can get phy interface from device tree.
+ * device driver can get PHY interface from device tree.
*/
static inline const char *phy_modes(phy_interface_t interface)
{
@@ -215,6 +247,14 @@ struct sfp_bus;
struct sfp_upstream_ops;
struct sk_buff;
+/**
+ * struct mdio_bus_stats - Statistics counters for MDIO busses
+ * @transfers: Total number of transfers, i.e. @writes + @reads
+ * @errors: Number of MDIO transfers that returned an error
+ * @writes: Number of write transfers
+ * @reads: Number of read transfers
+ * @syncp: Synchronisation for incrementing statistics
+ */
struct mdio_bus_stats {
u64_stats_t transfers;
u64_stats_t errors;
@@ -224,7 +264,15 @@ struct mdio_bus_stats {
struct u64_stats_sync syncp;
};
-/* Represents a shared structure between different phydev's in the same
+/**
+ * struct phy_package_shared - Shared information in PHY packages
+ * @addr: Common PHY address used to combine PHYs in one package
+ * @refcnt: Number of PHYs connected to this shared data
+ * @flags: Initialization of PHY package
+ * @priv_size: Size of the shared private data @priv
+ * @priv: Driver private data shared across a PHY package
+ *
+ * Represents a shared structure between different phydev's in the same
* package, for example a quad PHY. See phy_package_join() and
* phy_package_leave().
*/
@@ -247,7 +295,14 @@ struct phy_package_shared {
#define PHY_SHARED_F_INIT_DONE 0
#define PHY_SHARED_F_PROBE_DONE 1
-/*
+/**
+ * struct mii_bus - Represents an MDIO bus
+ *
+ * @owner: Who owns this device
+ * @name: User friendly name for this MDIO device, or driver name
+ * @id: Unique identifier for this bus, typical from bus hierarchy
+ * @priv: Driver private data
+ *
* The Bus class for PHYs. Devices which provide access to
* PHYs should register using this structure
*/
@@ -256,49 +311,58 @@ struct mii_bus {
const char *name;
char id[MII_BUS_ID_SIZE];
void *priv;
+ /** @read: Perform a read transfer on the bus */
int (*read)(struct mii_bus *bus, int addr, int regnum);
+ /** @write: Perform a write transfer on the bus */
int (*write)(struct mii_bus *bus, int addr, int regnum, u16 val);
+ /** @reset: Perform a reset of the bus */
int (*reset)(struct mii_bus *bus);
+
+ /** @stats: Statistic counters per device on the bus */
struct mdio_bus_stats stats[PHY_MAX_ADDR];
- /*
- * A lock to ensure that only one thing can read/write
+ /**
+ * @mdio_lock: A lock to ensure that only one thing can read/write
* the MDIO bus at a time
*/
struct mutex mdio_lock;
+ /** @parent: Parent device of this bus */
struct device *parent;
+ /** @state: State of bus structure */
enum {
MDIOBUS_ALLOCATED = 1,
MDIOBUS_REGISTERED,
MDIOBUS_UNREGISTERED,
MDIOBUS_RELEASED,
} state;
+
+ /** @dev: Kernel device representation */
struct device dev;
- /* list of all PHYs on bus */
+ /** @mdio_map: list of all MDIO devices on bus */
struct mdio_device *mdio_map[PHY_MAX_ADDR];
- /* PHY addresses to be ignored when probing */
+ /** @phy_mask: PHY addresses to be ignored when probing */
u32 phy_mask;
- /* PHY addresses to ignore the TA/read failure */
+ /** @phy_ignore_ta_mask: PHY addresses to ignore the TA/read failure */
u32 phy_ignore_ta_mask;
- /*
- * An array of interrupts, each PHY's interrupt at the index
+ /**
+ * @irq: An array of interrupts, each PHY's interrupt at the index
* matching its address
*/
int irq[PHY_MAX_ADDR];
- /* GPIO reset pulse width in microseconds */
+ /** @reset_delay_us: GPIO reset pulse width in microseconds */
int reset_delay_us;
- /* GPIO reset deassert delay in microseconds */
+ /** @reset_post_delay_us: GPIO reset deassert delay in microseconds */
int reset_post_delay_us;
- /* RESET GPIO descriptor pointer */
+ /** @reset_gpiod: Reset GPIO descriptor pointer */
struct gpio_desc *reset_gpiod;
- /* bus capabilities, used for probing */
+ /** @probe_capabilities: bus capabilities, used for probing */
enum {
MDIOBUS_NO_CAP = 0,
MDIOBUS_C22,
@@ -306,15 +370,22 @@ struct mii_bus {
MDIOBUS_C22_C45,
} probe_capabilities;
- /* protect access to the shared element */
+ /** @shared_lock: protect access to the shared element */
struct mutex shared_lock;
- /* shared state across different PHYs */
+ /** @shared: shared state across different PHYs */
struct phy_package_shared *shared[PHY_MAX_ADDR];
};
#define to_mii_bus(d) container_of(d, struct mii_bus, dev)
-struct mii_bus *mdiobus_alloc_size(size_t);
+struct mii_bus *mdiobus_alloc_size(size_t size);
+
+/**
+ * mdiobus_alloc - Allocate an MDIO bus structure
+ *
+ * The internal state of the MDIO bus will be set of MDIOBUS_ALLOCATED ready
+ * for the driver to register the bus.
+ */
static inline struct mii_bus *mdiobus_alloc(void)
{
return mdiobus_alloc_size(0);
@@ -341,40 +412,41 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
#define PHY_INTERRUPT_DISABLED false
#define PHY_INTERRUPT_ENABLED true
-/* PHY state machine states:
+/**
+ * enum phy_state - PHY state machine states:
*
- * DOWN: PHY device and driver are not ready for anything. probe
+ * @PHY_DOWN: PHY device and driver are not ready for anything. probe
* should be called if and only if the PHY is in this state,
* given that the PHY device exists.
- * - PHY driver probe function will set the state to READY
+ * - PHY driver probe function will set the state to @PHY_READY
*
- * READY: PHY is ready to send and receive packets, but the
+ * @PHY_READY: PHY is ready to send and receive packets, but the
* controller is not. By default, PHYs which do not implement
* probe will be set to this state by phy_probe().
* - start will set the state to UP
*
- * UP: The PHY and attached device are ready to do work.
+ * @PHY_UP: The PHY and attached device are ready to do work.
* Interrupts should be started here.
- * - timer moves to NOLINK or RUNNING
+ * - timer moves to @PHY_NOLINK or @PHY_RUNNING
*
- * NOLINK: PHY is up, but not currently plugged in.
- * - irq or timer will set RUNNING if link comes back
- * - phy_stop moves to HALTED
+ * @PHY_NOLINK: PHY is up, but not currently plugged in.
+ * - irq or timer will set @PHY_RUNNING if link comes back
+ * - phy_stop moves to @PHY_HALTED
*
- * RUNNING: PHY is currently up, running, and possibly sending
+ * @PHY_RUNNING: PHY is currently up, running, and possibly sending
* and/or receiving packets
- * - irq or timer will set NOLINK if link goes down
- * - phy_stop moves to HALTED
+ * - irq or timer will set @PHY_NOLINK if link goes down
+ * - phy_stop moves to @PHY_HALTED
*
- * CABLETEST: PHY is performing a cable test. Packet reception/sending
+ * @PHY_CABLETEST: PHY is performing a cable test. Packet reception/sending
* is not expected to work, carrier will be indicated as down. PHY will be
* poll once per second, or on interrupt for it current state.
* Once complete, move to UP to restart the PHY.
- * - phy_stop aborts the running test and moves to HALTED
+ * - phy_stop aborts the running test and moves to @PHY_HALTED
*
- * HALTED: PHY is up, but no polling or interrupts are done. Or
+ * @PHY_HALTED: PHY is up, but no polling or interrupts are done. Or
* PHY is in an error state.
- * - phy_start moves to UP
+ * - phy_start moves to @PHY_UP
*/
enum phy_state {
PHY_DOWN = 0,
@@ -403,34 +475,67 @@ struct phy_c45_device_ids {
struct macsec_context;
struct macsec_ops;
-/* phy_device: An instance of a PHY
+/**
+ * struct phy_device - An instance of a PHY
*
- * drv: Pointer to the driver for this PHY instance
- * phy_id: UID for this device found during discovery
- * c45_ids: 802.3-c45 Device Identifers if is_c45.
- * is_c45: Set to true if this phy uses clause 45 addressing.
- * is_internal: Set to true if this phy is internal to a MAC.
- * is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc.
- * is_gigabit_capable: Set to true if PHY supports 1000Mbps
- * has_fixups: Set to true if this phy has fixups/quirks.
- * suspended: Set to true if this phy has been suspended successfully.
- * suspended_by_mdio_bus: Set to true if this phy was suspended by MDIO bus.
- * sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
- * loopback_enabled: Set true if this phy has been loopbacked successfully.
- * downshifted_rate: Set true if link speed has been downshifted.
- * state: state of the PHY for management purposes
- * dev_flags: Device-specific flags used by the PHY driver.
- * irq: IRQ number of the PHY's interrupt (-1 if none)
- * phy_timer: The timer for handling the state machine
- * sfp_bus_attached: flag indicating whether the SFP bus has been attached
- * sfp_bus: SFP bus attached to this PHY's fiber port
- * attached_dev: The attached enet driver's device instance ptr
- * adjust_link: Callback for the enet controller to respond to
- * changes in the link state.
- * macsec_ops: MACsec offloading ops.
+ * @mdio: MDIO bus this PHY is on
+ * @drv: Pointer to the driver for this PHY instance
+ * @phy_id: UID for this device found during discovery
+ * @c45_ids: 802.3-c45 Device Identifiers if is_c45.
+ * @is_c45: Set to true if this PHY uses clause 45 addressing.
+ * @is_internal: Set to true if this PHY is internal to a MAC.
+ * @is_pseudo_fixed_link: Set to true if this PHY is an Ethernet switch, etc.
+ * @is_gigabit_capable: Set to true if PHY supports 1000Mbps
+ * @has_fixups: Set to true if this PHY has fixups/quirks.
+ * @suspended: Set to true if this PHY has been suspended successfully.
+ * @suspended_by_mdio_bus: Set to true if this PHY was suspended by MDIO bus.
+ * @sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
+ * @loopback_enabled: Set true if this PHY has been loopbacked successfully.
+ * @downshifted_rate: Set true if link speed has been downshifted.
+ * @state: State of the PHY for management purposes
+ * @dev_flags: Device-specific flags used by the PHY driver.
+ * @irq: IRQ number of the PHY's interrupt (-1 if none)
+ * @phy_timer: The timer for handling the state machine
+ * @phylink: Pointer to phylink instance for this PHY
+ * @sfp_bus_attached: Flag indicating whether the SFP bus has been attached
+ * @sfp_bus: SFP bus attached to this PHY's fiber port
+ * @attached_dev: The attached enet driver's device instance ptr
+ * @adjust_link: Callback for the enet controller to respond to changes: in the
+ * link state.
+ * @phy_link_change: Callback for phylink for notification of link change
+ * @macsec_ops: MACsec offloading ops.
*
- * speed, duplex, pause, supported, advertising, lp_advertising,
- * and autoneg are used like in mii_if_info
+ * @speed: Current link speed
+ * @duplex: Current duplex
+ * @pause: Current pause
+ * @asym_pause: Current asymmetric pause
+ * @supported: Combined MAC/PHY supported linkmodes
+ * @advertising: Currently advertised linkmodes
+ * @adv_old: Saved advertised while power saving for WoL
+ * @lp_advertising: Current link partner advertised linkmodes
+ * @eee_broken_modes: Energy efficient ethernet modes which should be prohibited
+ * @autoneg: Flag autoneg being used
+ * @link: Current link state
+ * @autoneg_complete: Flag auto negotiation of the link has completed
+ * @mdix: Current crossover
+ * @mdix_ctrl: User setting of crossover
+ * @interrupts: Flag interrupts have been enabled
+ * @interface: enum phy_interface_t value
+ * @skb: Netlink message for cable diagnostics
+ * @nest: Netlink nest used for cable diagnostics
+ * @ehdr: nNtlink header for cable diagnostics
+ * @phy_led_triggers: Array of LED triggers
+ * @phy_num_led_triggers: Number of triggers in @phy_led_triggers
+ * @led_link_trigger: LED trigger for link up/down
+ * @last_triggered: last LED trigger for link speed
+ * @master_slave_set: User requested master/slave configuration
+ * @master_slave_get: Current master/slave advertisement
+ * @master_slave_state: Current master/slave configuration
+ * @mii_ts: Pointer to time stamper callbacks
+ * @lock: Mutex for serialization access to PHY
+ * @state_queue: Work queue for state machine
+ * @shared: Pointer to private data shared by phys in one package
+ * @priv: Pointer to driver private data
*
* interrupts currently only supports enabled or disabled,
* but could be changed in the future to support enabling
@@ -550,9 +655,18 @@ struct phy_device {
#define to_phy_device(d) container_of(to_mdio_device(d), \
struct phy_device, mdio)
-/* A structure containing possible configuration parameters
+/**
+ * struct phy_tdr_config - Configuration of a TDR raw test
+ *
+ * @first: Distance for first data collection point
+ * @last: Distance for last data collection point
+ * @step: Step between data collection points
+ * @pair: Bitmap of cable pairs to collect data for
+ *
+ * A structure containing possible configuration parameters
* for a TDR cable test. The driver does not need to implement
* all the parameters, but should report what is actually used.
+ * All distances are in centimeters.
*/
struct phy_tdr_config {
u32 first;
@@ -562,18 +676,20 @@ struct phy_tdr_config {
};
#define PHY_PAIR_ALL -1
-/* struct phy_driver: Driver structure for a particular PHY type
+/**
+ * struct phy_driver - Driver structure for a particular PHY type
*
- * driver_data: static driver data
- * phy_id: The result of reading the UID registers of this PHY
+ * @mdiodrv: Data common to all MDIO devices
+ * @phy_id: The result of reading the UID registers of this PHY
* type, and ANDing them with the phy_id_mask. This driver
* only works for PHYs with IDs which match this field
- * name: The friendly name of this PHY type
- * phy_id_mask: Defines the important bits of the phy_id
- * features: A mandatory list of features (speed, duplex, etc)
+ * @name: The friendly name of this PHY type
+ * @phy_id_mask: Defines the important bits of the phy_id
+ * @features: A mandatory list of features (speed, duplex, etc)
* supported by this PHY
- * flags: A bitfield defining certain other features this PHY
+ * @flags: A bitfield defining certain other features this PHY
* supports (like interrupts)
+ * @driver_data: Static driver data
*
* All functions are optional. If config_aneg or read_status
* are not implemented, the phy core uses the genphy versions.
@@ -592,151 +708,178 @@ struct phy_driver {
u32 flags;
const void *driver_data;
- /*
- * Called to issue a PHY software reset
+ /**
+ * @soft_reset: Called to issue a PHY software reset
*/
int (*soft_reset)(struct phy_device *phydev);
- /*
- * Called to initialize the PHY,
+ /**
+ * @config_init: Called to initialize the PHY,
* including after a reset
*/
int (*config_init)(struct phy_device *phydev);
- /*
- * Called during discovery. Used to set
+ /**
+ * @probe: Called during discovery. Used to set
* up device-specific structures, if any
*/
int (*probe)(struct phy_device *phydev);
- /*
- * Probe the hardware to determine what abilities it has.
- * Should only set phydev->supported.
+ /**
+ * @get_features: Probe the hardware to determine what
+ * abilities it has. Should only set phydev->supported.
*/
int (*get_features)(struct phy_device *phydev);
/* PHY Power Management */
+ /** @suspend: Suspend the hardware, saving state if needed */
int (*suspend)(struct phy_device *phydev);
+ /** @resume: Resume the hardware, restoring state if needed */
int (*resume)(struct phy_device *phydev);
- /*
- * Configures the advertisement and resets
+ /**
+ * @config_aneg: Configures the advertisement and resets
* autonegotiation if phydev->autoneg is on,
* forces the speed to the current settings in phydev
* if phydev->autoneg is off
*/
int (*config_aneg)(struct phy_device *phydev);
- /* Determines the auto negotiation result */
+ /** @aneg_done: Determines the auto negotiation result */
int (*aneg_done)(struct phy_device *phydev);
- /* Determines the negotiated speed and duplex */
+ /** @read_status: Determines the negotiated speed and duplex */
int (*read_status)(struct phy_device *phydev);
- /* Clears any pending interrupts */
+ /** @ack_interrupt: Clears any pending interrupts */
int (*ack_interrupt)(struct phy_device *phydev);
- /* Enables or disables interrupts */
+ /** @config_intr: Enables or disables interrupts */
int (*config_intr)(struct phy_device *phydev);
- /*
- * Checks if the PHY generated an interrupt.
+ /**
+ * @did_interrupt: Checks if the PHY generated an interrupt.
* For multi-PHY devices with shared PHY interrupt pin
* Set interrupt bits have to be cleared.
*/
int (*did_interrupt)(struct phy_device *phydev);
- /* Override default interrupt handling */
+ /** @handle_interrupt: Override default interrupt handling */
irqreturn_t (*handle_interrupt)(struct phy_device *phydev);
- /* Clears up any memory if needed */
+ /** @remove: Clears up any memory if needed */
void (*remove)(struct phy_device *phydev);
- /* Returns true if this is a suitable driver for the given
- * phydev. If NULL, matching is based on phy_id and
- * phy_id_mask.
+ /**
+ * @match_phy_device: Returns true if this is a suitable
+ * driver for the given phydev. If NULL, matching is based on
+ * phy_id and phy_id_mask.
*/
int (*match_phy_device)(struct phy_device *phydev);
- /* Some devices (e.g. qnap TS-119P II) require PHY register changes to
- * enable Wake on LAN, so set_wol is provided to be called in the
- * ethernet driver's set_wol function. */
+ /**
+ * @set_wol: Some devices (e.g. qnap TS-119P II) require PHY
+ * register changes to enable Wake on LAN, so set_wol is
+ * provided to be called in the ethernet driver's set_wol
+ * function.
+ */
int (*set_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol);
- /* See set_wol, but for checking whether Wake on LAN is enabled. */
+ /**
+ * @get_wol: See set_wol, but for checking whether Wake on LAN
+ * is enabled.
+ */
void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol);
- /*
- * Called to inform a PHY device driver when the core is about to
- * change the link state. This callback is supposed to be used as
- * fixup hook for drivers that need to take action when the link
- * state changes. Drivers are by no means allowed to mess with the
+ /**
+ * @link_change_notify: Called to inform a PHY device driver
+ * when the core is about to change the link state. This
+ * callback is supposed to be used as fixup hook for drivers
+ * that need to take action when the link state
+ * changes. Drivers are by no means allowed to mess with the
* PHY device structure in their implementations.
*/
void (*link_change_notify)(struct phy_device *dev);
- /*
- * Phy specific driver override for reading a MMD register.
- * This function is optional for PHY specific drivers. When
- * not provided, the default MMD read function will be used
- * by phy_read_mmd(), which will use either a direct read for
- * Clause 45 PHYs or an indirect read for Clause 22 PHYs.
- * devnum is the MMD device number within the PHY device,
- * regnum is the register within the selected MMD device.
+ /**
+ * @read_mmd: PHY specific driver override for reading a MMD
+ * register. This function is optional for PHY specific
+ * drivers. When not provided, the default MMD read function
+ * will be used by phy_read_mmd(), which will use either a
+ * direct read for Clause 45 PHYs or an indirect read for
+ * Clause 22 PHYs. devnum is the MMD device number within the
+ * PHY device, regnum is the register within the selected MMD
+ * device.
*/
int (*read_mmd)(struct phy_device *dev, int devnum, u16 regnum);
- /*
- * Phy specific driver override for writing a MMD register.
- * This function is optional for PHY specific drivers. When
- * not provided, the default MMD write function will be used
- * by phy_write_mmd(), which will use either a direct write for
- * Clause 45 PHYs, or an indirect write for Clause 22 PHYs.
- * devnum is the MMD device number within the PHY device,
- * regnum is the register within the selected MMD device.
- * val is the value to be written.
+ /**
+ * @write_mmd: PHY specific driver override for writing a MMD
+ * register. This function is optional for PHY specific
+ * drivers. When not provided, the default MMD write function
+ * will be used by phy_write_mmd(), which will use either a
+ * direct write for Clause 45 PHYs, or an indirect write for
+ * Clause 22 PHYs. devnum is the MMD device number within the
+ * PHY device, regnum is the register within the selected MMD
+ * device. val is the value to be written.
*/
int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum,
u16 val);
+ /** @read_page: Return the current PHY register page number */
int (*read_page)(struct phy_device *dev);
+ /** @write_page: Set the current PHY register page number */
int (*write_page)(struct phy_device *dev, int page);
- /* Get the size and type of the eeprom contained within a plug-in
- * module */
+ /**
+ * @module_info: Get the size and type of the eeprom contained
+ * within a plug-in module
+ */
int (*module_info)(struct phy_device *dev,
struct ethtool_modinfo *modinfo);
- /* Get the eeprom information from the plug-in module */
+ /**
+ * @module_eeprom: Get the eeprom information from the plug-in
+ * module
+ */
int (*module_eeprom)(struct phy_device *dev,
struct ethtool_eeprom *ee, u8 *data);
- /* Start a cable test */
+ /** @cable_test_start: Start a cable test */
int (*cable_test_start)(struct phy_device *dev);
- /* Start a raw TDR cable test */
+ /** @cable_test_tdr_start: Start a raw TDR cable test */
int (*cable_test_tdr_start)(struct phy_device *dev,
const struct phy_tdr_config *config);
- /* Once per second, or on interrupt, request the status of the
- * test.
+ /**
+ * @cable_test_get_status: Once per second, or on interrupt,
+ * request the status of the test.
*/
int (*cable_test_get_status)(struct phy_device *dev, bool *finished);
- /* Get statistics from the phy using ethtool */
+ /* Get statistics from the PHY using ethtool */
+ /** @get_sset_count: Number of statistic counters */
int (*get_sset_count)(struct phy_device *dev);
+ /** @get_strings: Names of the statistic counters */
void (*get_strings)(struct phy_device *dev, u8 *data);
+ /** @get_stats: Return the statistic counter values */
void (*get_stats)(struct phy_device *dev,
struct ethtool_stats *stats, u64 *data);
/* Get and Set PHY tunables */
+ /** @get_tunable: Return the value of a tunable */
int (*get_tunable)(struct phy_device *dev,
struct ethtool_tunable *tuna, void *data);
+ /** @set_tunable: Set the value of a tunable */
int (*set_tunable)(struct phy_device *dev,
struct ethtool_tunable *tuna,
const void *data);
+ /** @set_loopback: Set the loopback mood of the PHY */
int (*set_loopback)(struct phy_device *dev, bool enable);
+ /** @get_sqi: Get the signal quality indication */
int (*get_sqi)(struct phy_device *dev);
+ /** @get_sqi_max: Get the maximum signal quality indication */
int (*get_sqi_max)(struct phy_device *dev);
};
#define to_phy_driver(d) container_of(to_mdio_common_driver(d), \
@@ -890,6 +1033,24 @@ static inline int __phy_modify_changed(struct phy_device *phydev, u32 regnum,
*/
int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
+/**
+ * phy_read_mmd_poll_timeout - Periodically poll a PHY register until a
+ * condition is met or a timeout occurs
+ *
+ * @phydev: The phy_device struct
+ * @devaddr: The MMD to read from
+ * @regnum: The register on the MMD to read
+ * @val: Variable to read the register into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0
+ * tight-loops). Should be less than ~20ms since usleep_range
+ * is used (see Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ * @sleep_before_read: if it is true, sleep @sleep_us before read.
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @args is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ */
#define phy_read_mmd_poll_timeout(phydev, devaddr, regnum, val, cond, \
sleep_us, timeout_us, sleep_before_read) \
({ \
@@ -1161,7 +1322,7 @@ static inline bool phy_is_internal(struct phy_device *phydev)
/**
* phy_interface_mode_is_rgmii - Convenience function for testing if a
* PHY interface mode is RGMII (all variants)
- * @mode: the phy_interface_t enum
+ * @mode: the &phy_interface_t enum
*/
static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode)
{
@@ -1170,11 +1331,11 @@ static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode)
};
/**
- * phy_interface_mode_is_8023z() - does the phy interface mode use 802.3z
+ * phy_interface_mode_is_8023z() - does the PHY interface mode use 802.3z
* negotiation
* @mode: one of &enum phy_interface_t
*
- * Returns true if the phy interface mode uses the 16-bit negotiation
+ * Returns true if the PHY interface mode uses the 16-bit negotiation
* word as defined in 802.3z. (See 802.3-2015 37.2.1 Config_Reg encoding)
*/
static inline bool phy_interface_mode_is_8023z(phy_interface_t mode)
@@ -1193,7 +1354,7 @@ static inline bool phy_interface_is_rgmii(struct phy_device *phydev)
return phy_interface_mode_is_rgmii(phydev->interface);
};
-/*
+/**
* phy_is_pseudo_fixed_link - Convenience function for testing if this
* PHY is the CPU port facing side of an Ethernet switch, or similar.
* @phydev: the phy_device struct
@@ -1566,8 +1727,9 @@ static inline int mdiobus_register_board_info(const struct mdio_board_info *i,
/**
- * module_phy_driver() - Helper macro for registering PHY drivers
+ * phy_module_driver() - Helper macro for registering PHY drivers
* @__phy_drivers: array of PHY drivers to register
+ * @__count: Numbers of members in array
*
* Helper macro for PHY drivers which do not do anything special in module
* init/exit. Each module may only use this macro once, and calling it
diff --git a/include/linux/powercap.h b/include/linux/powercap.h
index 4537f57f9e42..3d557bbcd2c7 100644
--- a/include/linux/powercap.h
+++ b/include/linux/powercap.h
@@ -44,19 +44,18 @@ struct powercap_control_type_ops {
};
/**
- * struct powercap_control_type- Defines a powercap control_type
- * @name: name of control_type
+ * struct powercap_control_type - Defines a powercap control_type
* @dev: device for this control_type
* @idr: idr to have unique id for its child
- * @root_node: Root holding power zones for this control_type
+ * @nr_zones: counter for number of zones of this type
* @ops: Pointer to callback struct
- * @node_lock: mutex for control type
+ * @lock: mutex for control type
* @allocated: This is possible that client owns the memory
* used by this structure. In this case
* this flag is set to false by framework to
* prevent deallocation during release process.
* Otherwise this flag is set to true.
- * @ctrl_inst: link to the control_type list
+ * @node: linked-list node
*
* Defines powercap control_type. This acts as a container for power
* zones, which use same method to control power. E.g. RAPL, RAPL-PCI etc.
@@ -129,7 +128,7 @@ struct powercap_zone_ops {
* this flag is set to false by framework to
* prevent deallocation during release process.
* Otherwise this flag is set to true.
- * @constraint_ptr: List of constraints for this zone.
+ * @constraints: List of constraints for this zone.
*
* This defines a power zone instance. The fields of this structure are
* private, and should not be used by client drivers.
diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h
index 13eafebf3549..b83a3f944f28 100644
--- a/include/linux/prefetch.h
+++ b/include/linux/prefetch.h
@@ -15,6 +15,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
+struct page;
/*
prefetch(x) attempts to pre-emptively get the memory pointed to
by address "x" into the CPU L1 cache.
@@ -62,4 +63,11 @@ static inline void prefetch_range(void *addr, size_t len)
#endif
}
+static inline void prefetch_page_address(struct page *page)
+{
+#if defined(WANT_PAGE_VIRTUAL) || defined(HASHED_PAGE_VIRTUAL)
+ prefetch(page);
+#endif
+}
+
#endif
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 56fa55841d39..57fb295ea41a 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -624,6 +624,7 @@ struct qed_dev_info {
#define QED_MFW_VERSION_3_OFFSET 24
u32 flash_size;
+ bool b_arfs_capable;
bool b_inter_pf_switch;
bool tx_switching;
bool rdma_supported;
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 01fc4d9c9c54..8a99279a579b 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -248,6 +248,7 @@ struct uart_port {
unsigned char hub6; /* this should be in the 8250 driver */
unsigned char suspended;
+ unsigned char console_reinit;
const char *name; /* port name */
struct attribute_group *attr_group; /* port specific attributes */
const struct attribute_group **tty_groups; /* all attributes (serial core use only) */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ed9bea924dc3..04a18e01b362 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3223,8 +3223,9 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
* is untouched. Otherwise it is extended. Returns zero on
* success. The skb is freed on error if @free_on_error is true.
*/
-static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
- bool free_on_error)
+static inline int __must_check __skb_put_padto(struct sk_buff *skb,
+ unsigned int len,
+ bool free_on_error)
{
unsigned int size = skb->len;
@@ -3247,7 +3248,7 @@ static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
* is untouched. Otherwise it is extended. Returns zero on
* success. The skb is freed on error.
*/
-static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
+static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len)
{
return __skb_put_padto(skb, len, true);
}
diff --git a/include/linux/stackleak.h b/include/linux/stackleak.h
index 3d5c3271a9a8..a59db2f08e76 100644
--- a/include/linux/stackleak.h
+++ b/include/linux/stackleak.h
@@ -25,7 +25,7 @@ static inline void stackleak_task_init(struct task_struct *t)
#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
int stack_erasing_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+ void *buffer, size_t *lenp, loff_t *ppos);
#endif
#else /* !CONFIG_GCC_PLUGIN_STACKLEAK */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 898c890fc153..27fb99cfeb02 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -21,6 +21,7 @@ int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int
#define WQ_FLAG_WOKEN 0x02
#define WQ_FLAG_BOOKMARK 0x04
#define WQ_FLAG_CUSTOM 0x08
+#define WQ_FLAG_DONE 0x10
/*
* A single wait-queue entry structure:
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index c9bce9bba511..10c2cc8f0efc 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -96,6 +96,16 @@ struct wiphy;
* @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted
* on this channel.
* @IEEE80211_CHAN_NO_HE: HE operation is not permitted on this channel.
+ * @IEEE80211_CHAN_1MHZ: 1 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_2MHZ: 2 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_4MHZ: 4 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_8MHZ: 8 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_16MHZ: 16 MHz bandwidth is permitted
+ * on this channel.
*
*/
enum ieee80211_channel_flags {
@@ -113,6 +123,11 @@ enum ieee80211_channel_flags {
IEEE80211_CHAN_NO_20MHZ = 1<<11,
IEEE80211_CHAN_NO_10MHZ = 1<<12,
IEEE80211_CHAN_NO_HE = 1<<13,
+ IEEE80211_CHAN_1MHZ = 1<<14,
+ IEEE80211_CHAN_2MHZ = 1<<15,
+ IEEE80211_CHAN_4MHZ = 1<<16,
+ IEEE80211_CHAN_8MHZ = 1<<17,
+ IEEE80211_CHAN_16MHZ = 1<<18,
};
#define IEEE80211_CHAN_NO_HT40 \
@@ -450,6 +465,7 @@ struct ieee80211_sta_s1g_cap {
* @ht_cap: HT capabilities in this band
* @vht_cap: VHT capabilities in this band
* @edmg_cap: EDMG capabilities in this band
+ * @s1g_cap: S1G capabilities in this band (S1B band only, of course)
* @n_iftype_data: number of iftype data entries
* @iftype_data: interface type data entries. Note that the bits in
* @types_mask inside this structure cannot overlap (i.e. only
@@ -1068,6 +1084,39 @@ struct cfg80211_acl_data {
};
/**
+ * struct cfg80211_fils_discovery - FILS discovery parameters from
+ * IEEE Std 802.11ai-2016, Annex C.3 MIB detail.
+ *
+ * @min_interval: Minimum packet interval in TUs (0 - 10000)
+ * @max_interval: Maximum packet interval in TUs (0 - 10000)
+ * @tmpl_len: Template length
+ * @tmpl: Template data for FILS discovery frame including the action
+ * frame headers.
+ */
+struct cfg80211_fils_discovery {
+ u32 min_interval;
+ u32 max_interval;
+ size_t tmpl_len;
+ const u8 *tmpl;
+};
+
+/**
+ * struct cfg80211_unsol_bcast_probe_resp - Unsolicited broadcast probe
+ * response parameters in 6GHz.
+ *
+ * @interval: Packet interval in TUs. Maximum allowed is 20 TU, as mentioned
+ * in IEEE P802.11ax/D6.0 26.17.2.3.2 - AP behavior for fast passive
+ * scanning
+ * @tmpl_len: Template length
+ * @tmpl: Template data for probe response
+ */
+struct cfg80211_unsol_bcast_probe_resp {
+ u32 interval;
+ size_t tmpl_len;
+ const u8 *tmpl;
+};
+
+/**
* enum cfg80211_ap_settings_flags - AP settings flags
*
* Used by cfg80211_ap_settings
@@ -1114,6 +1163,8 @@ enum cfg80211_ap_settings_flags {
* @he_obss_pd: OBSS Packet Detection settings
* @he_bss_color: BSS Color settings
* @he_oper: HE operation IE (or %NULL if HE isn't enabled)
+ * @fils_discovery: FILS discovery transmission parameters
+ * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -1144,6 +1195,8 @@ struct cfg80211_ap_settings {
u32 flags;
struct ieee80211_he_obss_pd he_obss_pd;
struct cfg80211_he_bss_color he_bss_color;
+ struct cfg80211_fils_discovery fils_discovery;
+ struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
};
/**
@@ -1787,6 +1840,7 @@ struct mpath_info {
* (or NULL for no change)
* @basic_rates_len: number of basic rates
* @ap_isolate: do not forward packets between connected stations
+ * (0 = no, 1 = yes, -1 = do not change)
* @ht_opmode: HT Operation mode
* (u16 = opmode, -1 = do not change)
* @p2p_ctwindow: P2P CT Window (-1 = no change)
@@ -5279,6 +5333,16 @@ ieee80211_channel_to_khz(const struct ieee80211_channel *chan)
}
/**
+ * ieee80211_s1g_channel_width - get allowed channel width from @chan
+ *
+ * Only allowed for band NL80211_BAND_S1GHZ
+ * @chan: channel
+ * Return: The allowed channel width for this center_freq
+ */
+enum nl80211_chan_width
+ieee80211_s1g_channel_width(const struct ieee80211_channel *chan);
+
+/**
* ieee80211_channel_to_freq_khz - convert channel number to frequency
* @chan: channel number
* @band: band, necessary due to channel number overlap
diff --git a/include/net/devlink.h b/include/net/devlink.h
index eaec0a8cc5ef..4883dbae7faf 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -392,6 +392,25 @@ struct devlink_param_gset_ctx {
};
/**
+ * struct devlink_flash_notify - devlink dev flash notify data
+ * @status_msg: current status string
+ * @component: firmware component being updated
+ * @done: amount of work completed of total amount
+ * @total: amount of work expected to be done
+ * @timeout: expected max timeout in seconds
+ *
+ * These are values to be given to userland to be displayed in order
+ * to show current activity in a firmware update process.
+ */
+struct devlink_flash_notify {
+ const char *status_msg;
+ const char *component;
+ unsigned long done;
+ unsigned long total;
+ unsigned long timeout;
+};
+
+/**
* struct devlink_param - devlink configuration parameter data
* @name: name of the parameter
* @generic: indicates if the parameter is generic or driver specific
@@ -542,12 +561,16 @@ struct devlink_info_req;
* the data variable must be updated to point to the snapshot data.
* The function will be called while the devlink instance lock is
* held.
+ * @priv: Pointer to driver private data for the region operation
*/
struct devlink_region_ops {
const char *name;
void (*destructor)(const void *data);
- int (*snapshot)(struct devlink *devlink, struct netlink_ext_ack *extack,
+ int (*snapshot)(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
u8 **data);
+ void *priv;
};
struct devlink_fmsg;
@@ -566,6 +589,7 @@ enum devlink_health_reporter_state {
* @dump: callback to dump an object
* if priv_ctx is NULL, run a full dump
* @diagnose: callback to diagnose the current status
+ * @test: callback to trigger a test event
*/
struct devlink_health_reporter_ops {
@@ -578,6 +602,8 @@ struct devlink_health_reporter_ops {
int (*diagnose)(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg,
struct netlink_ext_ack *extack);
+ int (*test)(struct devlink_health_reporter *reporter,
+ struct netlink_ext_ack *extack);
};
/**
@@ -1400,6 +1426,10 @@ void devlink_flash_update_status_notify(struct devlink *devlink,
const char *component,
unsigned long done,
unsigned long total);
+void devlink_flash_update_timeout_notify(struct devlink *devlink,
+ const char *status_msg,
+ const char *component,
+ unsigned long timeout);
int devlink_traps_register(struct devlink *devlink,
const struct devlink_trap *traps,
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 75c8fac82017..d16057c5987a 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -612,11 +612,14 @@ struct dsa_switch_ops {
bool (*port_rxtstamp)(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type);
- /* Devlink parameters */
+ /* Devlink parameters, etc */
int (*devlink_param_get)(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx);
int (*devlink_param_set)(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx);
+ int (*devlink_info_get)(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
/*
* MTU change functionality. Switches can also adjust their MRU through
@@ -658,12 +661,25 @@ void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
void *occ_get_priv);
void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
u64 resource_id);
+struct devlink_region *
+dsa_devlink_region_create(struct dsa_switch *ds,
+ const struct devlink_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size);
+void dsa_devlink_region_destroy(struct devlink_region *region);
+
struct dsa_port *dsa_port_from_netdev(struct net_device *netdev);
struct dsa_devlink_priv {
struct dsa_switch *ds;
};
+static inline struct dsa_switch *dsa_devlink_to_ds(struct devlink *dl)
+{
+ struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
+
+ return dl_priv->ds;
+}
+
struct dsa_switch_driver {
struct list_head list;
const struct dsa_switch_ops *ops;
diff --git a/include/net/flow.h b/include/net/flow.h
index 929d3ca614d0..b2531df3f65f 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -116,6 +116,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
fl4->saddr = saddr;
fl4->fl4_dport = dport;
fl4->fl4_sport = sport;
+ fl4->flowi4_multipath_hash = 0;
}
/* Reset some input parameters after previous lookup */
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 6e5f1e1aa822..b9eb92f3fe86 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -101,14 +101,6 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net)
#define GENL_SET_ERR_MSG(info, msg) NL_SET_ERR_MSG((info)->extack, msg)
-static inline int genl_err_attr(struct genl_info *info, int err,
- const struct nlattr *attr)
-{
- info->extack->bad_attr = attr;
-
- return err;
-}
-
enum genl_validate_flags {
GENL_DONT_VALIDATE_STRICT = BIT(0),
GENL_DONT_VALIDATE_DUMP = BIT(1),
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index c738abeb3265..dc763ca9413c 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -96,7 +96,8 @@ struct inet_connection_sock {
void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
struct hlist_node icsk_listen_portaddr_node;
unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
- __u8 icsk_ca_state:6,
+ __u8 icsk_ca_state:5,
+ icsk_ca_initialized:1,
icsk_ca_setsockopt:1,
icsk_ca_dst_locked:1;
__u8 icsk_retransmits;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index ec148b3e9c41..e90089d104b0 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -317,6 +317,9 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_TWT: TWT status changed
* @BSS_CHANGED_HE_OBSS_PD: OBSS Packet Detection status changed.
* @BSS_CHANGED_HE_BSS_COLOR: BSS Color has changed
+ * @BSS_CHANGED_FILS_DISCOVERY: FILS discovery status changed.
+ * @BSS_CHANGED_UNSOL_BCAST_PROBE_RESP: Unsolicited broadcast probe response
+ * status changed.
*
*/
enum ieee80211_bss_change {
@@ -350,6 +353,8 @@ enum ieee80211_bss_change {
BSS_CHANGED_TWT = 1<<27,
BSS_CHANGED_HE_OBSS_PD = 1<<28,
BSS_CHANGED_HE_BSS_COLOR = 1<<29,
+ BSS_CHANGED_FILS_DISCOVERY = 1<<30,
+ BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = 1<<31,
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -491,6 +496,18 @@ struct ieee80211_ftm_responder_params {
};
/**
+ * struct ieee80211_fils_discovery - FILS discovery parameters from
+ * IEEE Std 802.11ai-2016, Annex C.3 MIB detail.
+ *
+ * @min_interval: Minimum packet interval in TUs (0 - 10000)
+ * @max_interval: Maximum packet interval in TUs (0 - 10000)
+ */
+struct ieee80211_fils_discovery {
+ u32 min_interval;
+ u32 max_interval;
+};
+
+/**
* struct ieee80211_bss_conf - holds the BSS's changing parameters
*
* This structure keeps information about a BSS (and an association
@@ -607,6 +624,9 @@ struct ieee80211_ftm_responder_params {
* @he_oper: HE operation information of the AP we are connected to
* @he_obss_pd: OBSS Packet Detection parameters.
* @he_bss_color: BSS coloring settings, if BSS supports HE
+ * @fils_discovery: FILS discovery configuration
+ * @unsol_bcast_probe_resp_interval: Unsolicited broadcast probe response
+ * interval.
*/
struct ieee80211_bss_conf {
const u8 *bssid;
@@ -674,6 +694,8 @@ struct ieee80211_bss_conf {
} he_oper;
struct ieee80211_he_obss_pd he_obss_pd;
struct cfg80211_he_bss_color he_bss_color;
+ struct ieee80211_fils_discovery fils_discovery;
+ u32 unsol_bcast_probe_resp_interval;
};
/**
@@ -720,9 +742,8 @@ struct ieee80211_bss_conf {
* @IEEE80211_TX_INTFL_OFFCHAN_TX_OK: Internal to mac80211. Used to indicate
* that a frame can be transmitted while the queues are stopped for
* off-channel operation.
- * @IEEE80211_TX_INTFL_NEED_TXPROCESSING: completely internal to mac80211,
- * used to indicate that a pending frame requires TX processing before
- * it can be sent out.
+ * @IEEE80211_TX_CTL_HW_80211_ENCAP: This frame uses hardware encapsulation
+ * (header conversion)
* @IEEE80211_TX_INTFL_RETRIED: completely internal to mac80211,
* used to indicate that a frame was already retried due to PS
* @IEEE80211_TX_INTFL_DONT_ENCRYPT: completely internal to mac80211,
@@ -791,7 +812,7 @@ enum mac80211_tx_info_flags {
IEEE80211_TX_STAT_AMPDU_NO_BACK = BIT(11),
IEEE80211_TX_CTL_RATE_CTRL_PROBE = BIT(12),
IEEE80211_TX_INTFL_OFFCHAN_TX_OK = BIT(13),
- IEEE80211_TX_INTFL_NEED_TXPROCESSING = BIT(14),
+ IEEE80211_TX_CTL_HW_80211_ENCAP = BIT(14),
IEEE80211_TX_INTFL_RETRIED = BIT(15),
IEEE80211_TX_INTFL_DONT_ENCRYPT = BIT(16),
IEEE80211_TX_CTL_NO_PS_BUFFER = BIT(17),
@@ -823,8 +844,9 @@ enum mac80211_tx_info_flags {
* @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame
* @IEEE80211_TX_CTRL_FAST_XMIT: This frame is going through the fast_xmit path
* @IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP: This frame skips mesh path lookup
- * @IEEE80211_TX_CTRL_HW_80211_ENCAP: This frame uses hardware encapsulation
- * (header conversion)
+ * @IEEE80211_TX_INTCFL_NEED_TXPROCESSING: completely internal to mac80211,
+ * used to indicate that a pending frame requires TX processing before
+ * it can be sent out.
* @IEEE80211_TX_CTRL_NO_SEQNO: Do not overwrite the sequence number that
* has already been assigned to this frame.
*
@@ -837,7 +859,7 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_CTRL_AMSDU = BIT(3),
IEEE80211_TX_CTRL_FAST_XMIT = BIT(4),
IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP = BIT(5),
- IEEE80211_TX_CTRL_HW_80211_ENCAP = BIT(6),
+ IEEE80211_TX_INTCFL_NEED_TXPROCESSING = BIT(6),
IEEE80211_TX_CTRL_NO_SEQNO = BIT(7),
};
@@ -1002,7 +1024,8 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate)
* @status.ampdu_ack_len: AMPDU ack length
* @status.ampdu_len: AMPDU length
* @status.antenna: (legacy, kept only for iwlegacy)
- * @status.tx_time: airtime consumed for transmission
+ * @status.tx_time: airtime consumed for transmission; note this is only
+ * used for WMM AC, not for airtime fairness
* @status.is_valid_ack_signal: ACK signal is valid
* @status.status_driver_data: driver use area
* @ack: union part for pure ACK data
@@ -1095,12 +1118,14 @@ ieee80211_info_get_tx_time_est(struct ieee80211_tx_info *info)
* @info: Basic tx status information
* @skb: Packet skb (can be NULL if not provided by the driver)
* @rate: The TX rate that was used when sending the packet
+ * @free_list: list where processed skbs are stored to be free'd by the driver
*/
struct ieee80211_tx_status {
struct ieee80211_sta *sta;
struct ieee80211_tx_info *info;
struct sk_buff *skb;
struct rate_info *rate;
+ struct list_head *free_list;
};
/**
@@ -1606,6 +1631,21 @@ enum ieee80211_vif_flags {
IEEE80211_VIF_GET_NOA_UPDATE = BIT(3),
};
+
+/**
+ * enum ieee80211_offload_flags - virtual interface offload flags
+ *
+ * @IEEE80211_OFFLOAD_ENCAP_ENABLED: tx encapsulation offload is enabled
+ * The driver supports sending frames passed as 802.3 frames by mac80211.
+ * It must also support sending 802.11 packets for the same interface.
+ * @IEEE80211_OFFLOAD_ENCAP_4ADDR: support 4-address mode encapsulation offload
+ */
+
+enum ieee80211_offload_flags {
+ IEEE80211_OFFLOAD_ENCAP_ENABLED = BIT(0),
+ IEEE80211_OFFLOAD_ENCAP_4ADDR = BIT(1),
+};
+
/**
* struct ieee80211_vif - per-interface data
*
@@ -1626,6 +1666,11 @@ enum ieee80211_vif_flags {
* these need to be set (or cleared) when the interface is added
* or, if supported by the driver, the interface type is changed
* at runtime, mac80211 will never touch this field
+ * @offloaad_flags: hardware offload capabilities/flags for this interface.
+ * These are initialized by mac80211 before calling .add_interface,
+ * .change_interface or .update_vif_offload and updated by the driver
+ * within these ops, based on supported features or runtime change
+ * restrictions.
* @hw_queue: hardware queue for each AC
* @cab_queue: content-after-beacon (DTIM beacon really) queue, AP mode only
* @chanctx_conf: The channel context this interface is assigned to, or %NULL
@@ -1645,6 +1690,8 @@ enum ieee80211_vif_flags {
* @txq: the multicast data TX queue (if driver uses the TXQ abstraction)
* @txqs_stopped: per AC flag to indicate that intermediate TXQs are stopped,
* protected by fq->lock.
+ * @offload_flags: 802.3 -> 802.11 enapsulation offload flags, see
+ * &enum ieee80211_offload_flags.
*/
struct ieee80211_vif {
enum nl80211_iftype type;
@@ -1662,6 +1709,7 @@ struct ieee80211_vif {
struct ieee80211_chanctx_conf __rcu *chanctx_conf;
u32 driver_flags;
+ u32 offload_flags;
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *debugfs_dir;
@@ -2328,6 +2376,9 @@ struct ieee80211_txq {
* aggregating MPDUs with the same keyid, allowing mac80211 to keep Tx
* A-MPDU sessions active while rekeying with Extended Key ID.
*
+ * @IEEE80211_HW_SUPPORTS_TX_ENCAP_OFFLOAD: Hardware supports tx encapsulation
+ * offload
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2380,6 +2431,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_MULTI_BSSID,
IEEE80211_HW_SUPPORTS_ONLY_HE_MULTI_BSSID,
IEEE80211_HW_AMPDU_KEYBORDER_SUPPORT,
+ IEEE80211_HW_SUPPORTS_TX_ENCAP_OFFLOAD,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -3814,6 +3866,10 @@ enum ieee80211_reconfig_type {
* @set_tid_config: Apply TID specific configurations. This callback may sleep.
* @reset_tid_config: Reset TID specific configuration for the peer.
* This callback may sleep.
+ * @update_vif_offload: Update virtual interface offload flags
+ * This callback may sleep.
+ * @sta_set_4addr: Called to notify the driver when a station starts/stops using
+ * 4-address mode
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -4125,6 +4181,10 @@ struct ieee80211_ops {
int (*reset_tid_config)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u8 tids);
+ void (*update_vif_offload)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+ void (*sta_set_4addr)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enabled);
};
/**
@@ -5647,7 +5707,7 @@ void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid);
/**
* ieee80211_sta_register_airtime - register airtime usage for a sta/tid
*
- * Register airtime usage for a given sta on a given tid. The driver can call
+ * Register airtime usage for a given sta on a given tid. The driver must call
* this function to notify mac80211 that a station used a certain amount of
* airtime. This information will be used by the TXQ scheduler to schedule
* stations in a way that ensures airtime fairness.
@@ -6593,4 +6653,29 @@ u32 ieee80211_calc_tx_airtime(struct ieee80211_hw *hw,
*/
bool ieee80211_set_hw_80211_encap(struct ieee80211_vif *vif, bool enable);
+/**
+ * ieee80211_get_fils_discovery_tmpl - Get FILS discovery template.
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * The driver is responsible for freeing the returned skb.
+ *
+ * Return: FILS discovery template. %NULL on error.
+ */
+struct sk_buff *ieee80211_get_fils_discovery_tmpl(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_get_unsol_bcast_probe_resp_tmpl - Get unsolicited broadcast
+ * probe response template.
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * The driver is responsible for freeing the returned skb.
+ *
+ * Return: Unsolicited broadcast probe response template. %NULL on error.
+ */
+struct sk_buff *
+ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
#endif /* MAC80211_H */
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index 3525d2822abe..753ba7e755d6 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -85,8 +85,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
unsigned int *size, unsigned int remaining,
struct mptcp_out_options *opts);
-void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
- struct tcp_options_received *opt_rx);
+void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb);
void mptcp_write_options(__be32 *ptr, struct mptcp_out_options *opts);
@@ -185,8 +184,7 @@ static inline bool mptcp_established_options(struct sock *sk,
}
static inline void mptcp_incoming_options(struct sock *sk,
- struct sk_buff *skb,
- struct tcp_options_received *opt_rx)
+ struct sk_buff *skb)
{
}
diff --git a/include/net/netlink.h b/include/net/netlink.h
index fdd317f8fde4..b2cf34f53e55 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -730,7 +730,6 @@ static inline int __nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
* @hdrlen: length of family specific header
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
- * @validate: validation strictness
* @extack: extended ACK report struct
*
* See nla_parse()
@@ -828,7 +827,6 @@ static inline int nla_validate_deprecated(const struct nlattr *head, int len,
* @len: length of attribute stream
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
- * @validate: validation strictness
* @extack: extended ACK report struct
*
* Validates all attributes in the specified attribute stream against the
diff --git a/include/net/netns/nexthop.h b/include/net/netns/nexthop.h
index 1937476c94a0..1849e77eb68a 100644
--- a/include/net/netns/nexthop.h
+++ b/include/net/netns/nexthop.h
@@ -14,6 +14,6 @@ struct netns_nexthop {
unsigned int seq; /* protected by rtnl_mutex */
u32 last_id_allocated;
- struct atomic_notifier_head notifier_chain;
+ struct blocking_notifier_head notifier_chain;
};
#endif
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index a1a8d45adb42..6c0806bd8d1e 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -8,6 +8,7 @@ struct netns_nftables {
struct list_head tables;
struct list_head commit_list;
struct list_head module_list;
+ struct list_head notify_list;
struct mutex commit_mutex;
unsigned int base_seq;
u8 gencursor;
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 3a4f9e3b91a5..2fd76a9b6dc8 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -105,13 +105,9 @@ struct nexthop {
};
enum nexthop_event_type {
- NEXTHOP_EVENT_ADD,
NEXTHOP_EVENT_DEL
};
-int call_nexthop_notifier(struct notifier_block *nb, struct net *net,
- enum nexthop_event_type event_type,
- struct nexthop *nh);
int register_nexthop_notifier(struct net *net, struct notifier_block *nb);
int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index d60e7c39d60c..6c762457122f 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -1047,12 +1047,6 @@ static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
return 0;
}
-static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
- struct sk_buff **to_free)
-{
- return __qdisc_queue_drop_head(sch, &sch->q, to_free);
-}
-
static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
{
const struct qdisc_skb_head *qh = &sch->q;
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index b33f1aefad09..0bdff38eb4bb 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -226,12 +226,14 @@ struct sctp_sock {
data_ready_signalled:1;
atomic_t pd_mode;
+
+ /* Fields after this point will be skipped on copies, like on accept
+ * and peeloff operations
+ */
+
/* Receive to here while partial delivery is in effect. */
struct sk_buff_head pd_lobby;
- /* These must be the last fields, as they will skipped on copies,
- * like on accept and peeloff operations
- */
struct list_head auto_asconf_list;
int do_auto_asconf;
};
diff --git a/include/net/sock.h b/include/net/sock.h
index 7dd3051551fb..a5c6ae78df77 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -845,7 +845,6 @@ enum sock_flags {
SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
- SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
SOCK_MEMALLOC, /* VM depends on this socket for swapping */
SOCK_TIMESTAMPING_RX_SOFTWARE, /* %SOF_TIMESTAMPING_RX_SOFTWARE */
SOCK_FASYNC, /* fasync() active */
@@ -1526,7 +1525,6 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
{
- sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
sk_wmem_queued_add(sk, -skb->truesize);
sk_mem_uncharge(sk, skb->truesize);
if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
@@ -2197,6 +2195,8 @@ void sk_reset_timer(struct sock *sk, struct timer_list *timer,
void sk_stop_timer(struct sock *sk, struct timer_list *timer);
+void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer);
+
int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
struct sk_buff *skb, unsigned int flags,
void (*destructor)(struct sock *sk,
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index ff2246914301..53e8b4994296 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -203,6 +203,7 @@ enum switchdev_notifier_type {
SWITCHDEV_FDB_ADD_TO_DEVICE,
SWITCHDEV_FDB_DEL_TO_DEVICE,
SWITCHDEV_FDB_OFFLOADED,
+ SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
SWITCHDEV_PORT_OBJ_ADD, /* Blocking. */
SWITCHDEV_PORT_OBJ_DEL, /* Blocking. */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e85d564446c6..3601dea931a6 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1104,7 +1104,7 @@ void tcp_get_available_congestion_control(char *buf, size_t len);
void tcp_get_allowed_congestion_control(char *buf, size_t len);
int tcp_set_allowed_congestion_control(char *allowed);
int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
- bool reinit, bool cap_net_admin);
+ bool cap_net_admin);
u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
@@ -1414,6 +1414,8 @@ static inline int tcp_full_space(const struct sock *sk)
return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
}
+void tcp_cleanup_rbuf(struct sock *sk, int copied);
+
/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
* If 87.5 % (7/8) of the space has been consumed, we want to override
* SO_RCVLOWAT constraint, since we are receiving skbs with too small
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 3a41627cbdfe..08537aa14f7c 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -121,6 +121,9 @@ struct vxlanhdr_gbp {
#define VXLAN_GBP_POLICY_APPLIED (BIT(3) << 16)
#define VXLAN_GBP_ID_MASK (0xFFFF)
+#define VXLAN_GBP_MASK (VXLAN_GBP_DONT_LEARN | VXLAN_GBP_POLICY_APPLIED | \
+ VXLAN_GBP_ID_MASK)
+
/*
* VXLAN Generic Protocol Extension (VXLAN_F_GPE):
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index da369b12005f..3105bbb6cdcf 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -566,6 +566,7 @@ struct ocelot_port {
u8 ptp_cmd;
struct sk_buff_head tx_skbs;
u8 ts_id;
+ spinlock_t ts_id_lock;
phy_interface_t phy_mode;
@@ -677,6 +678,7 @@ void ocelot_configure_cpu(struct ocelot *ocelot, int npi,
int ocelot_init(struct ocelot *ocelot);
void ocelot_deinit(struct ocelot *ocelot);
void ocelot_init_port(struct ocelot *ocelot, int port);
+void ocelot_deinit_port(struct ocelot *ocelot, int port);
/* DSA callbacks */
void ocelot_port_enable(struct ocelot *ocelot, int port,
@@ -708,8 +710,8 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid);
int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr);
int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr);
-int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port,
- struct sk_buff *skb);
+void ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
+ struct sk_buff *clone);
void ocelot_get_txtstamp(struct ocelot *ocelot);
void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu);
int ocelot_get_max_mtu(struct ocelot *ocelot, int port);
diff --git a/include/soc/mscc/ocelot_ptp.h b/include/soc/mscc/ocelot_ptp.h
index 4a6b2f71b6b2..6a7388fa7cc5 100644
--- a/include/soc/mscc/ocelot_ptp.h
+++ b/include/soc/mscc/ocelot_ptp.h
@@ -53,6 +53,7 @@ int ocelot_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan);
int ocelot_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on);
-int ocelot_init_timestamp(struct ocelot *ocelot, struct ptp_clock_info *info);
+int ocelot_init_timestamp(struct ocelot *ocelot,
+ const struct ptp_clock_info *info);
int ocelot_deinit_timestamp(struct ocelot *ocelot);
#endif
diff --git a/include/soc/nps/common.h b/include/soc/nps/common.h
index 9b1d43d671a3..8c18dc6d3fde 100644
--- a/include/soc/nps/common.h
+++ b/include/soc/nps/common.h
@@ -45,6 +45,12 @@
#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST 0x5B60
#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM 0x00010422
+#ifndef AUX_IENABLE
+#define AUX_IENABLE 0x40c
+#endif
+
+#define CTOP_AUX_IACK (0xFFFFF800 + 0x088)
+
#ifndef __ASSEMBLY__
/* In order to increase compilation test coverage */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 5e3919ffb00c..fc4fcac72cf7 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -1193,6 +1193,8 @@ struct snd_soc_pcm_runtime {
((i) < (rtd)->num_cpus + (rtd)->num_codecs) && \
((dai) = (rtd)->dais[i]); \
(i)++)
+#define for_each_rtd_dais_rollback(rtd, i, dai) \
+ for (; (--(i) >= 0) && ((dai) = (rtd)->dais[i]);)
void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd);
@@ -1361,6 +1363,8 @@ void snd_soc_unregister_dai(struct snd_soc_dai *dai);
struct snd_soc_dai *snd_soc_find_dai(
const struct snd_soc_dai_link_component *dlc);
+struct snd_soc_dai *snd_soc_find_dai_with_mutex(
+ const struct snd_soc_dai_link_component *dlc);
#include <sound/soc-dai.h>
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 3b67d5981224..e70c90116eda 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -579,7 +579,7 @@ TRACE_EVENT(rxrpc_client,
__entry->channel = channel;
__entry->usage = conn ? atomic_read(&conn->usage) : -2;
__entry->op = op;
- __entry->cid = conn->proto.cid;
+ __entry->cid = conn ? conn->proto.cid : 0;
),
TP_printk("C=%08x h=%2d %s i=%08x u=%d",
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 8dda13880957..a22812561064 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -124,6 +124,7 @@ enum bpf_cmd {
BPF_ENABLE_STATS,
BPF_ITER_CREATE,
BPF_LINK_DETACH,
+ BPF_PROG_BIND_MAP,
};
enum bpf_map_type {
@@ -658,6 +659,12 @@ union bpf_attr {
__u32 flags;
} iter_create;
+ struct { /* struct used by BPF_PROG_BIND_MAP command */
+ __u32 prog_fd;
+ __u32 map_fd;
+ __u32 flags; /* extra flags */
+ } prog_bind_map;
+
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
@@ -1447,8 +1454,8 @@ union bpf_attr {
* Return
* The return value depends on the result of the test, and can be:
*
- * * 0, if the *skb* task belongs to the cgroup2.
- * * 1, if the *skb* task does not belong to the cgroup2.
+ * * 0, if current task belongs to the cgroup2.
+ * * 1, if current task does not belong to the cgroup2.
* * A negative error code, if an error occurred.
*
* long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
@@ -3349,38 +3356,38 @@ union bpf_attr {
* Description
* Dynamically cast a *sk* pointer to a *tcp6_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* struct tcp_sock *bpf_skc_to_tcp_sock(void *sk)
* Description
* Dynamically cast a *sk* pointer to a *tcp_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk)
* Description
* Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk)
* Description
* Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* struct udp6_sock *bpf_skc_to_udp6_sock(void *sk)
* Description
* Dynamically cast a *sk* pointer to a *udp6_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
* Description
* Return a user or a kernel stack in bpf program provided buffer.
* To achieve this, the helper needs *task*, which is a valid
- * pointer to struct task_struct. To store the stacktrace, the
- * bpf program provides *buf* with a nonnegative *size*.
+ * pointer to **struct task_struct**. To store the stacktrace, the
+ * bpf program provides *buf* with a nonnegative *size*.
*
* The last argument, *flags*, holds the number of stack frames to
* skip (from 0 to 255), masked with
@@ -3410,12 +3417,12 @@ union bpf_attr {
* long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags)
* Description
* Load header option. Support reading a particular TCP header
- * option for bpf program (BPF_PROG_TYPE_SOCK_OPS).
+ * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**).
*
* If *flags* is 0, it will search the option from the
- * sock_ops->skb_data. The comment in "struct bpf_sock_ops"
+ * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops**
* has details on what skb_data contains under different
- * sock_ops->op.
+ * *skops*\ **->op**.
*
* The first byte of the *searchby_res* specifies the
* kind that it wants to search.
@@ -3435,7 +3442,7 @@ union bpf_attr {
* [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ].
*
* To search for the standard window scale option (3),
- * the searchby_res should be [ 3, 0, 0, .... 0 ].
+ * the *searchby_res* should be [ 3, 0, 0, .... 0 ].
* Note, kind-length must be 0 for regular option.
*
* Searching for No-Op (0) and End-of-Option-List (1) are
@@ -3445,27 +3452,30 @@ union bpf_attr {
* of a header option.
*
* Supported flags:
+ *
* * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the
* saved_syn packet or the just-received syn packet.
*
* Return
- * >0 when found, the header option is copied to *searchby_res*.
- * The return value is the total length copied.
+ * > 0 when found, the header option is copied to *searchby_res*.
+ * The return value is the total length copied. On failure, a
+ * negative error code is returned:
*
- * **-EINVAL** If param is invalid
+ * **-EINVAL** if a parameter is invalid.
*
- * **-ENOMSG** The option is not found
+ * **-ENOMSG** if the option is not found.
*
- * **-ENOENT** No syn packet available when
- * **BPF_LOAD_HDR_OPT_TCP_SYN** is used
+ * **-ENOENT** if no syn packet is available when
+ * **BPF_LOAD_HDR_OPT_TCP_SYN** is used.
*
- * **-ENOSPC** Not enough space. Only *len* number of
- * bytes are copied.
+ * **-ENOSPC** if there is not enough space. Only *len* number of
+ * bytes are copied.
*
- * **-EFAULT** Cannot parse the header options in the packet
+ * **-EFAULT** on failure to parse the header options in the
+ * packet.
*
- * **-EPERM** This helper cannot be used under the
- * current sock_ops->op.
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
*
* long bpf_store_hdr_opt(struct bpf_sock_ops *skops, const void *from, u32 len, u64 flags)
* Description
@@ -3483,44 +3493,44 @@ union bpf_attr {
* by searching the same option in the outgoing skb.
*
* This helper can only be called during
- * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
+ * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
*
* Return
* 0 on success, or negative error in case of failure:
*
- * **-EINVAL** If param is invalid
+ * **-EINVAL** If param is invalid.
*
- * **-ENOSPC** Not enough space in the header.
- * Nothing has been written
+ * **-ENOSPC** if there is not enough space in the header.
+ * Nothing has been written
*
- * **-EEXIST** The option has already existed
+ * **-EEXIST** if the option already exists.
*
- * **-EFAULT** Cannot parse the existing header options
+ * **-EFAULT** on failrue to parse the existing header options.
*
- * **-EPERM** This helper cannot be used under the
- * current sock_ops->op.
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
*
* long bpf_reserve_hdr_opt(struct bpf_sock_ops *skops, u32 len, u64 flags)
* Description
* Reserve *len* bytes for the bpf header option. The
- * space will be used by bpf_store_hdr_opt() later in
- * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
+ * space will be used by **bpf_store_hdr_opt**\ () later in
+ * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
*
- * If bpf_reserve_hdr_opt() is called multiple times,
+ * If **bpf_reserve_hdr_opt**\ () is called multiple times,
* the total number of bytes will be reserved.
*
* This helper can only be called during
- * BPF_SOCK_OPS_HDR_OPT_LEN_CB.
+ * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**.
*
* Return
* 0 on success, or negative error in case of failure:
*
- * **-EINVAL** if param is invalid
+ * **-EINVAL** if a parameter is invalid.
*
- * **-ENOSPC** Not enough space in the header.
+ * **-ENOSPC** if there is not enough space in the header.
*
- * **-EPERM** This helper cannot be used under the
- * current sock_ops->op.
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
*
* void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags)
* Description
@@ -3560,9 +3570,9 @@ union bpf_attr {
*
* long bpf_d_path(struct path *path, char *buf, u32 sz)
* Description
- * Return full path for given 'struct path' object, which
- * needs to be the kernel BTF 'path' object. The path is
- * returned in the provided buffer 'buf' of size 'sz' and
+ * Return full path for given **struct path** object, which
+ * needs to be the kernel BTF *path* object. The path is
+ * returned in the provided buffer *buf* of size *sz* and
* is zero terminated.
*
* Return
@@ -3573,7 +3583,7 @@ union bpf_attr {
* long bpf_copy_from_user(void *dst, u32 size, const void *user_ptr)
* Description
* Read *size* bytes from user space address *user_ptr* and store
- * the data in *dst*. This is a wrapper of copy_from_user().
+ * the data in *dst*. This is a wrapper of **copy_from_user**\ ().
* Return
* 0 on success, or a negative error in case of failure.
*/
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 40d35145c879..a2ecc8b00611 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -122,6 +122,8 @@ enum devlink_command {
DEVLINK_CMD_TRAP_POLICER_NEW,
DEVLINK_CMD_TRAP_POLICER_DEL,
+ DEVLINK_CMD_HEALTH_REPORTER_TEST,
+
/* add new commands above here */
__DEVLINK_CMD_MAX,
DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1
@@ -460,6 +462,9 @@ enum devlink_attr {
DEVLINK_ATTR_PORT_EXTERNAL, /* u8 */
DEVLINK_ATTR_PORT_CONTROLLER_NUMBER, /* u32 */
+
+ DEVLINK_ATTR_FLASH_UPDATE_STATUS_TIMEOUT, /* u64 */
+
/* add new attributes above here, update the policy in devlink.c */
__DEVLINK_ATTR_MAX,
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index b4f2d134e713..9ca87bc73c44 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1617,6 +1617,8 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT = 87,
ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT = 88,
ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89,
+ ETHTOOL_LINK_MODE_100baseFX_Half_BIT = 90,
+ ETHTOOL_LINK_MODE_100baseFX_Full_BIT = 91,
/* must be last entry */
__ETHTOOL_LINK_MODE_MASK_NBITS
};
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index 5dcd24cb33ea..e2bf36e6964b 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -79,6 +79,7 @@ enum {
ETHTOOL_MSG_TSINFO_GET_REPLY,
ETHTOOL_MSG_CABLE_TEST_NTF,
ETHTOOL_MSG_CABLE_TEST_TDR_NTF,
+ ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY,
/* add new constants above here */
__ETHTOOL_MSG_KERNEL_CNT,
@@ -91,9 +92,12 @@ enum {
#define ETHTOOL_FLAG_COMPACT_BITSETS (1 << 0)
/* provide optional reply for SET or ACT requests */
#define ETHTOOL_FLAG_OMIT_REPLY (1 << 1)
+/* request statistics, if supported by the driver */
+#define ETHTOOL_FLAG_STATS (1 << 2)
#define ETHTOOL_FLAG_ALL (ETHTOOL_FLAG_COMPACT_BITSETS | \
- ETHTOOL_FLAG_OMIT_REPLY)
+ ETHTOOL_FLAG_OMIT_REPLY | \
+ ETHTOOL_FLAG_STATS)
enum {
ETHTOOL_A_HEADER_UNSPEC,
@@ -376,12 +380,25 @@ enum {
ETHTOOL_A_PAUSE_AUTONEG, /* u8 */
ETHTOOL_A_PAUSE_RX, /* u8 */
ETHTOOL_A_PAUSE_TX, /* u8 */
+ ETHTOOL_A_PAUSE_STATS, /* nest - _PAUSE_STAT_* */
/* add new constants above here */
__ETHTOOL_A_PAUSE_CNT,
ETHTOOL_A_PAUSE_MAX = (__ETHTOOL_A_PAUSE_CNT - 1)
};
+enum {
+ ETHTOOL_A_PAUSE_STAT_UNSPEC,
+ ETHTOOL_A_PAUSE_STAT_PAD,
+
+ ETHTOOL_A_PAUSE_STAT_TX_FRAMES,
+ ETHTOOL_A_PAUSE_STAT_RX_FRAMES,
+
+ /* add new constants above here */
+ __ETHTOOL_A_PAUSE_STAT_CNT,
+ ETHTOOL_A_PAUSE_STAT_MAX = (__ETHTOOL_A_PAUSE_STAT_CNT - 1)
+};
+
/* EEE */
enum {
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 75a2ac479247..4c687686aa8f 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -457,6 +457,8 @@ enum {
MDBA_MDB_EATTR_TIMER,
MDBA_MDB_EATTR_SRC_LIST,
MDBA_MDB_EATTR_GROUP_MODE,
+ MDBA_MDB_EATTR_SOURCE,
+ MDBA_MDB_EATTR_RTPROT,
__MDBA_MDB_EATTR_MAX
};
#define MDBA_MDB_EATTR_MAX (__MDBA_MDB_EATTR_MAX - 1)
@@ -516,6 +518,8 @@ struct br_mdb_entry {
__u8 state;
#define MDB_FLAGS_OFFLOAD (1 << 0)
#define MDB_FLAGS_FAST_LEAVE (1 << 1)
+#define MDB_FLAGS_STAR_EXCL (1 << 2)
+#define MDB_FLAGS_BLOCKED (1 << 3)
__u8 flags;
__u16 vid;
struct {
@@ -530,10 +534,23 @@ struct br_mdb_entry {
enum {
MDBA_SET_ENTRY_UNSPEC,
MDBA_SET_ENTRY,
+ MDBA_SET_ENTRY_ATTRS,
__MDBA_SET_ENTRY_MAX,
};
#define MDBA_SET_ENTRY_MAX (__MDBA_SET_ENTRY_MAX - 1)
+/* [MDBA_SET_ENTRY_ATTRS] = {
+ * [MDBE_ATTR_xxx]
+ * ...
+ * }
+ */
+enum {
+ MDBE_ATTR_UNSPEC,
+ MDBE_ATTR_SOURCE,
+ __MDBE_ATTR_MAX,
+};
+#define MDBE_ATTR_MAX (__MDBE_ATTR_MAX - 1)
+
/* Embedded inside LINK_XSTATS_TYPE_BRIDGE */
enum {
BRIDGE_XSTATS_UNSPEC,
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index bf4667403cab..c4b23f06f69e 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -7,24 +7,23 @@
/* This struct should be in sync with struct rtnl_link_stats64 */
struct rtnl_link_stats {
- __u32 rx_packets; /* total packets received */
- __u32 tx_packets; /* total packets transmitted */
- __u32 rx_bytes; /* total bytes received */
- __u32 tx_bytes; /* total bytes transmitted */
- __u32 rx_errors; /* bad packets received */
- __u32 tx_errors; /* packet transmit problems */
- __u32 rx_dropped; /* no space in linux buffers */
- __u32 tx_dropped; /* no space available in linux */
- __u32 multicast; /* multicast packets received */
+ __u32 rx_packets;
+ __u32 tx_packets;
+ __u32 rx_bytes;
+ __u32 tx_bytes;
+ __u32 rx_errors;
+ __u32 tx_errors;
+ __u32 rx_dropped;
+ __u32 tx_dropped;
+ __u32 multicast;
__u32 collisions;
-
/* detailed rx_errors: */
__u32 rx_length_errors;
- __u32 rx_over_errors; /* receiver ring buff overflow */
- __u32 rx_crc_errors; /* recved pkt with crc error */
- __u32 rx_frame_errors; /* recv'd frame alignment error */
- __u32 rx_fifo_errors; /* recv'r fifo overrun */
- __u32 rx_missed_errors; /* receiver missed packet */
+ __u32 rx_over_errors;
+ __u32 rx_crc_errors;
+ __u32 rx_frame_errors;
+ __u32 rx_fifo_errors;
+ __u32 rx_missed_errors;
/* detailed tx_errors */
__u32 tx_aborted_errors;
@@ -37,7 +36,7 @@ struct rtnl_link_stats {
__u32 rx_compressed;
__u32 tx_compressed;
- __u32 rx_nohandler; /* dropped, no handler found */
+ __u32 rx_nohandler;
};
/**
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index f6d86033c4fa..7d8eced6f459 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -790,9 +790,10 @@ struct kvm_ppc_resize_hpt {
#define KVM_VM_PPC_HV 1
#define KVM_VM_PPC_PR 2
-/* on MIPS, 0 forces trap & emulate, 1 forces VZ ASE */
-#define KVM_VM_MIPS_TE 0
+/* on MIPS, 0 indicates auto, 1 forces VZ ASE, 2 forces trap & emulate */
+#define KVM_VM_MIPS_AUTO 0
#define KVM_VM_MIPS_VZ 1
+#define KVM_VM_MIPS_TE 2
#define KVM_S390_SIE_PAGE_OFFSET 1
@@ -1035,6 +1036,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_LAST_CPU 184
#define KVM_CAP_SMALLER_MAXPHYADDR 185
#define KVM_CAP_S390_DIAG318 186
+#define KVM_CAP_STEAL_TIME 187
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 0584e0d349f0..bdc90b8dfd24 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -2513,6 +2513,14 @@ enum nl80211_commands {
* @NL80211_ATTR_HE_6GHZ_CAPABILITY: HE 6 GHz Band Capability element (from
* association request when used with NL80211_CMD_NEW_STATION).
*
+ * @NL80211_ATTR_FILS_DISCOVERY: Optional parameter to configure FILS
+ * discovery. It is a nested attribute, see
+ * &enum nl80211_fils_discovery_attributes.
+ *
+ * @NL80211_ATTR_UNSOL_BCAST_PROBE_RESP: Optional parameter to configure
+ * unsolicited broadcast probe response. It is a nested attribute, see
+ * &enum nl80211_unsol_bcast_probe_resp_attributes.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2995,6 +3003,10 @@ enum nl80211_attrs {
NL80211_ATTR_HE_6GHZ_CAPABILITY,
+ NL80211_ATTR_FILS_DISCOVERY,
+
+ NL80211_ATTR_UNSOL_BCAST_PROBE_RESP,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -3737,6 +3749,16 @@ enum nl80211_wmm_rule {
* @NL80211_FREQUENCY_ATTR_NO_HE: HE operation is not allowed on this channel
* in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_OFFSET: frequency offset in KHz
+ * @NL80211_FREQUENCY_ATTR_1MHZ: 1 MHz operation is allowed
+ * on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_2MHZ: 2 MHz operation is allowed
+ * on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_4MHZ: 4 MHz operation is allowed
+ * on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_8MHZ: 8 MHz operation is allowed
+ * on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_16MHZ: 16 MHz operation is allowed
+ * on this channel in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
* currently defined
* @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
@@ -3768,6 +3790,11 @@ enum nl80211_frequency_attr {
NL80211_FREQUENCY_ATTR_WMM,
NL80211_FREQUENCY_ATTR_NO_HE,
NL80211_FREQUENCY_ATTR_OFFSET,
+ NL80211_FREQUENCY_ATTR_1MHZ,
+ NL80211_FREQUENCY_ATTR_2MHZ,
+ NL80211_FREQUENCY_ATTR_4MHZ,
+ NL80211_FREQUENCY_ATTR_8MHZ,
+ NL80211_FREQUENCY_ATTR_16MHZ,
/* keep last */
__NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@ -5852,6 +5879,12 @@ enum nl80211_feature_flags {
* @NL80211_EXT_FEATURE_SAE_OFFLOAD_AP: Device wants to do SAE authentication
* in AP mode (SAE password is passed as part of the start AP command).
*
+ * @NL80211_EXT_FEATURE_FILS_DISCOVERY: Driver/device supports FILS discovery
+ * frames transmission
+ *
+ * @NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP: Driver/device supports
+ * unsolicited broadcast probe response transmission
+ *
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -5910,6 +5943,8 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION,
NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK,
NL80211_EXT_FEATURE_SAE_OFFLOAD_AP,
+ NL80211_EXT_FEATURE_FILS_DISCOVERY,
+ NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -7004,4 +7039,64 @@ enum nl80211_iftype_akm_attributes {
NL80211_IFTYPE_AKM_ATTR_MAX = __NL80211_IFTYPE_AKM_ATTR_LAST - 1,
};
+/**
+ * enum nl80211_fils_discovery_attributes - FILS discovery configuration
+ * from IEEE Std 802.11ai-2016, Annex C.3 MIB detail.
+ *
+ * @__NL80211_FILS_DISCOVERY_ATTR_INVALID: Invalid
+ *
+ * @NL80211_FILS_DISCOVERY_ATTR_INT_MIN: Minimum packet interval (u32, TU).
+ * Allowed range: 0..10000 (TU = Time Unit)
+ * @NL80211_FILS_DISCOVERY_ATTR_INT_MAX: Maximum packet interval (u32, TU).
+ * Allowed range: 0..10000 (TU = Time Unit)
+ * @NL80211_FILS_DISCOVERY_ATTR_TMPL: Template data for FILS discovery action
+ * frame including the headers.
+ *
+ * @__NL80211_FILS_DISCOVERY_ATTR_LAST: Internal
+ * @NL80211_FILS_DISCOVERY_ATTR_MAX: highest attribute
+ */
+enum nl80211_fils_discovery_attributes {
+ __NL80211_FILS_DISCOVERY_ATTR_INVALID,
+
+ NL80211_FILS_DISCOVERY_ATTR_INT_MIN,
+ NL80211_FILS_DISCOVERY_ATTR_INT_MAX,
+ NL80211_FILS_DISCOVERY_ATTR_TMPL,
+
+ /* keep last */
+ __NL80211_FILS_DISCOVERY_ATTR_LAST,
+ NL80211_FILS_DISCOVERY_ATTR_MAX = __NL80211_FILS_DISCOVERY_ATTR_LAST - 1
+};
+
+/*
+ * FILS discovery template minimum length with action frame headers and
+ * mandatory fields.
+ */
+#define NL80211_FILS_DISCOVERY_TMPL_MIN_LEN 42
+
+/**
+ * enum nl80211_unsol_bcast_probe_resp_attributes - Unsolicited broadcast probe
+ * response configuration. Applicable only in 6GHz.
+ *
+ * @__NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INVALID: Invalid
+ *
+ * @NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT: Maximum packet interval (u32, TU).
+ * Allowed range: 0..20 (TU = Time Unit). IEEE P802.11ax/D6.0
+ * 26.17.2.3.2 (AP behavior for fast passive scanning).
+ * @NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL: Unsolicited broadcast probe response
+ * frame template (binary).
+ *
+ * @__NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_LAST: Internal
+ * @NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX: highest attribute
+ */
+enum nl80211_unsol_bcast_probe_resp_attributes {
+ __NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INVALID,
+
+ NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT,
+ NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL,
+
+ /* keep last */
+ __NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_LAST,
+ NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX =
+ __NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_LAST - 1
+};
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h
index add01db1daef..80ea15e12113 100644
--- a/include/uapi/linux/tipc.h
+++ b/include/uapi/linux/tipc.h
@@ -254,6 +254,8 @@ static inline int tipc_aead_key_size(struct tipc_aead_key *key)
return sizeof(*key) + key->keylen;
}
+#define TIPC_REKEYING_NOW (~0U)
+
/* The macros and functions below are deprecated:
*/
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
index dc0d23a50e69..d847dd671d79 100644
--- a/include/uapi/linux/tipc_netlink.h
+++ b/include/uapi/linux/tipc_netlink.h
@@ -165,6 +165,8 @@ enum {
TIPC_NLA_NODE_UP, /* flag */
TIPC_NLA_NODE_ID, /* data */
TIPC_NLA_NODE_KEY, /* data */
+ TIPC_NLA_NODE_KEY_MASTER, /* flag */
+ TIPC_NLA_NODE_REKEYING, /* u32 */
__TIPC_NLA_NODE_MAX,
TIPC_NLA_NODE_MAX = __TIPC_NLA_NODE_MAX - 1
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index 6fb95aa19405..6dbdb0b3fd03 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -2,6 +2,8 @@
/******************************************************************************
* Xen balloon functionality
*/
+#ifndef _XEN_BALLOON_H
+#define _XEN_BALLOON_H
#define RETRY_UNLIMITED 0
@@ -34,3 +36,5 @@ static inline void xen_balloon_init(void)
{
}
#endif
+
+#endif /* _XEN_BALLOON_H */
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 19a72f591e2b..43efba045acc 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -52,4 +52,13 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
extern u64 xen_saved_max_mem_size;
#endif
+#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
+int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages);
+void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages);
+#else
+#define xen_alloc_unpopulated_pages alloc_xenballooned_pages
+#define xen_free_unpopulated_pages free_xenballooned_pages
+#include <xen/balloon.h>
+#endif
+
#endif /* _XEN_XEN_H */
diff --git a/init/main.c b/init/main.c
index ae78fb68d231..e880b4ecb314 100644
--- a/init/main.c
+++ b/init/main.c
@@ -33,6 +33,7 @@
#include <linux/nmi.h>
#include <linux/percpu.h>
#include <linux/kmod.h>
+#include <linux/kprobes.h>
#include <linux/vmalloc.h>
#include <linux/kernel_stat.h>
#include <linux/start_kernel.h>
@@ -303,7 +304,7 @@ static void * __init get_boot_config_from_initrd(u32 *_size, u32 *_csum)
#ifdef CONFIG_BOOT_CONFIG
-char xbc_namebuf[XBC_KEYLEN_MAX] __initdata;
+static char xbc_namebuf[XBC_KEYLEN_MAX] __initdata;
#define rest(dst, end) ((end) > (dst) ? (end) - (dst) : 0)
@@ -1402,6 +1403,7 @@ static int __ref kernel_init(void *unused)
kernel_init_freeable();
/* need to finish all async __init code before freeing the memory */
async_synchronize_full();
+ kprobe_free_init_mem();
ftrace_free_init_mem();
free_initmem();
mark_readonly();
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
index d1b8644bfb88..3f312bf2b116 100644
--- a/ipc/ipc_sysctl.c
+++ b/ipc/ipc_sysctl.c
@@ -85,7 +85,7 @@ static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
}
static int proc_ipc_sem_dointvec(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int ret, semmni;
struct ipc_namespace *ns = current->nsproxy->ipc_ns;
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index e046fb7d17cd..e5fd31268ae0 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -898,6 +898,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
struct bpf_prog *old,
struct bpf_prog *new)
{
+ u8 *old_addr, *new_addr, *old_bypass_addr;
struct prog_poke_elem *elem;
struct bpf_array_aux *aux;
@@ -918,12 +919,13 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
* there could be danger of use after free otherwise.
* 2) Initially when we start tracking aux, the program
* is not JITed yet and also does not have a kallsyms
- * entry. We skip these as poke->ip_stable is not
- * active yet. The JIT will do the final fixup before
- * setting it stable. The various poke->ip_stable are
- * successively activated, so tail call updates can
- * arrive from here while JIT is still finishing its
- * final fixup for non-activated poke entries.
+ * entry. We skip these as poke->tailcall_target_stable
+ * is not active yet. The JIT will do the final fixup
+ * before setting it stable. The various
+ * poke->tailcall_target_stable are successively
+ * activated, so tail call updates can arrive from here
+ * while JIT is still finishing its final fixup for
+ * non-activated poke entries.
* 3) On program teardown, the program's kallsym entry gets
* removed out of RCU callback, but we can only untrack
* from sleepable context, therefore bpf_arch_text_poke()
@@ -940,7 +942,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
* 5) Any other error happening below from bpf_arch_text_poke()
* is a unexpected bug.
*/
- if (!READ_ONCE(poke->ip_stable))
+ if (!READ_ONCE(poke->tailcall_target_stable))
continue;
if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
continue;
@@ -948,12 +950,39 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
poke->tail_call.key != key)
continue;
- ret = bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP,
- old ? (u8 *)old->bpf_func +
- poke->adj_off : NULL,
- new ? (u8 *)new->bpf_func +
- poke->adj_off : NULL);
- BUG_ON(ret < 0 && ret != -EINVAL);
+ old_bypass_addr = old ? NULL : poke->bypass_addr;
+ old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
+ new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
+
+ if (new) {
+ ret = bpf_arch_text_poke(poke->tailcall_target,
+ BPF_MOD_JUMP,
+ old_addr, new_addr);
+ BUG_ON(ret < 0 && ret != -EINVAL);
+ if (!old) {
+ ret = bpf_arch_text_poke(poke->tailcall_bypass,
+ BPF_MOD_JUMP,
+ poke->bypass_addr,
+ NULL);
+ BUG_ON(ret < 0 && ret != -EINVAL);
+ }
+ } else {
+ ret = bpf_arch_text_poke(poke->tailcall_bypass,
+ BPF_MOD_JUMP,
+ old_bypass_addr,
+ poke->bypass_addr);
+ BUG_ON(ret < 0 && ret != -EINVAL);
+ /* let other CPUs finish the execution of program
+ * so that it will not possible to expose them
+ * to invalid nop, stack unwind, nop state
+ */
+ if (!ret)
+ synchronize_rcu();
+ ret = bpf_arch_text_poke(poke->tailcall_target,
+ BPF_MOD_JUMP,
+ old_addr, NULL);
+ BUG_ON(ret < 0 && ret != -EINVAL);
+ }
}
}
}
diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c
index 75be02799c0f..6edff97ad594 100644
--- a/kernel/bpf/bpf_inode_storage.c
+++ b/kernel/bpf/bpf_inode_storage.c
@@ -249,9 +249,7 @@ const struct bpf_map_ops inode_storage_map_ops = {
.map_owner_storage_ptr = inode_storage_ptr,
};
-BTF_ID_LIST(bpf_inode_storage_btf_ids)
-BTF_ID_UNUSED
-BTF_ID(struct, inode)
+BTF_ID_LIST_SINGLE(bpf_inode_storage_btf_ids, struct, inode)
const struct bpf_func_proto bpf_inode_storage_get_proto = {
.func = bpf_inode_storage_get,
@@ -259,9 +257,9 @@ const struct bpf_func_proto bpf_inode_storage_get_proto = {
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
+ .arg2_btf_id = &bpf_inode_storage_btf_ids[0],
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
.arg4_type = ARG_ANYTHING,
- .btf_id = bpf_inode_storage_btf_ids,
};
const struct bpf_func_proto bpf_inode_storage_delete_proto = {
@@ -270,5 +268,5 @@ const struct bpf_func_proto bpf_inode_storage_delete_proto = {
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
- .btf_id = bpf_inode_storage_btf_ids,
+ .arg2_btf_id = &bpf_inode_storage_btf_ids[0],
};
diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
index ffa7d11fc2bd..5d3a7af9ba9b 100644
--- a/kernel/bpf/bpf_local_storage.c
+++ b/kernel/bpf/bpf_local_storage.c
@@ -159,7 +159,7 @@ void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
struct bpf_local_storage_elem *selem)
{
RCU_INIT_POINTER(selem->local_storage, local_storage);
- hlist_add_head(&selem->snode, &local_storage->list);
+ hlist_add_head_rcu(&selem->snode, &local_storage->list);
}
void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index f9ac6935ab3c..5d3c36e13139 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -4193,19 +4193,6 @@ again:
return true;
}
-int btf_resolve_helper_id(struct bpf_verifier_log *log,
- const struct bpf_func_proto *fn, int arg)
-{
- int id;
-
- if (fn->arg_type[arg] != ARG_PTR_TO_BTF_ID || !btf_vmlinux)
- return -EINVAL;
- id = fn->btf_id[arg];
- if (!id || id > btf_vmlinux->nr_types)
- return -EINVAL;
- return id;
-}
-
static int __get_type_size(struct btf *btf, u32 btf_id,
const struct btf_type **bad_type)
{
@@ -4772,7 +4759,7 @@ static int btf_id_cmp_func(const void *a, const void *b)
return *pa - *pb;
}
-bool btf_id_set_contains(struct btf_id_set *set, u32 id)
+bool btf_id_set_contains(const struct btf_id_set *set, u32 id)
{
return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL;
}
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index ed0b3578867c..c4811b139caa 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -98,6 +98,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
fp->jit_requested = ebpf_jit_enabled();
INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
+ mutex_init(&fp->aux->used_maps_mutex);
return fp;
}
@@ -253,6 +254,7 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
void __bpf_prog_free(struct bpf_prog *fp)
{
if (fp->aux) {
+ mutex_destroy(&fp->aux->used_maps_mutex);
free_percpu(fp->aux->stats);
kfree(fp->aux->poke_tab);
kfree(fp->aux);
@@ -773,7 +775,8 @@ int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
if (size > poke_tab_max)
return -ENOSPC;
- if (poke->ip || poke->ip_stable || poke->adj_off)
+ if (poke->tailcall_target || poke->tailcall_target_stable ||
+ poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
return -EINVAL;
switch (poke->reason) {
@@ -1747,8 +1750,9 @@ bool bpf_prog_array_compatible(struct bpf_array *array,
static int bpf_check_tail_call(const struct bpf_prog *fp)
{
struct bpf_prog_aux *aux = fp->aux;
- int i;
+ int i, ret = 0;
+ mutex_lock(&aux->used_maps_mutex);
for (i = 0; i < aux->used_map_cnt; i++) {
struct bpf_map *map = aux->used_maps[i];
struct bpf_array *array;
@@ -1757,11 +1761,15 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
continue;
array = container_of(map, struct bpf_array, map);
- if (!bpf_prog_array_compatible(array, fp))
- return -EINVAL;
+ if (!bpf_prog_array_compatible(array, fp)) {
+ ret = -EINVAL;
+ goto out;
+ }
}
- return 0;
+out:
+ mutex_unlock(&aux->used_maps_mutex);
+ return ret;
}
static void bpf_prog_select_func(struct bpf_prog *fp)
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index fe0e06284d33..3395cf140d22 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -1622,7 +1622,6 @@ struct bpf_iter_seq_hash_map_info {
struct bpf_map *map;
struct bpf_htab *htab;
void *percpu_value_buf; // non-zero means percpu hash
- unsigned long flags;
u32 bucket_id;
u32 skip_elems;
};
@@ -1632,7 +1631,6 @@ bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
struct htab_elem *prev_elem)
{
const struct bpf_htab *htab = info->htab;
- unsigned long flags = info->flags;
u32 skip_elems = info->skip_elems;
u32 bucket_id = info->bucket_id;
struct hlist_nulls_head *head;
@@ -1656,19 +1654,18 @@ bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
/* not found, unlock and go to the next bucket */
b = &htab->buckets[bucket_id++];
- htab_unlock_bucket(htab, b, flags);
+ rcu_read_unlock();
skip_elems = 0;
}
for (i = bucket_id; i < htab->n_buckets; i++) {
b = &htab->buckets[i];
- flags = htab_lock_bucket(htab, b);
+ rcu_read_lock();
count = 0;
head = &b->head;
hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
if (count >= skip_elems) {
- info->flags = flags;
info->bucket_id = i;
info->skip_elems = count;
return elem;
@@ -1676,7 +1673,7 @@ bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
count++;
}
- htab_unlock_bucket(htab, b, flags);
+ rcu_read_unlock();
skip_elems = 0;
}
@@ -1754,14 +1751,10 @@ static int bpf_hash_map_seq_show(struct seq_file *seq, void *v)
static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v)
{
- struct bpf_iter_seq_hash_map_info *info = seq->private;
-
if (!v)
(void)__bpf_hash_map_seq_show(seq, NULL);
else
- htab_unlock_bucket(info->htab,
- &info->htab->buckets[info->bucket_id],
- info->flags);
+ rcu_read_unlock();
}
static int bpf_iter_init_hash_map(void *priv_data,
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index b48a56f53495..dd4b7fd60ee7 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -227,10 +227,12 @@ static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
else
prev_key = key;
+ rcu_read_lock();
if (map->ops->map_get_next_key(map, prev_key, key)) {
map_iter(m)->done = true;
- return NULL;
+ key = NULL;
}
+ rcu_read_unlock();
return key;
}
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index a2fa006f430e..06065fa27124 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -665,18 +665,17 @@ BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
return __bpf_get_stack(regs, task, NULL, buf, size, flags);
}
-BTF_ID_LIST(bpf_get_task_stack_btf_ids)
-BTF_ID(struct, task_struct)
+BTF_ID_LIST_SINGLE(bpf_get_task_stack_btf_ids, struct, task_struct)
const struct bpf_func_proto bpf_get_task_stack_proto = {
.func = bpf_get_task_stack,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &bpf_get_task_stack_btf_ids[0],
.arg2_type = ARG_PTR_TO_UNINIT_MEM,
.arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
- .btf_id = bpf_get_task_stack_btf_ids,
};
BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 178c147350f5..34268491d2de 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -2345,12 +2345,8 @@ void bpf_link_put(struct bpf_link *link)
if (!atomic64_dec_and_test(&link->refcnt))
return;
- if (in_atomic()) {
- INIT_WORK(&link->work, bpf_link_put_deferred);
- schedule_work(&link->work);
- } else {
- bpf_link_free(link);
- }
+ INIT_WORK(&link->work, bpf_link_put_deferred);
+ schedule_work(&link->work);
}
static int bpf_link_release(struct inode *inode, struct file *filp)
@@ -3162,21 +3158,25 @@ static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
const struct bpf_map *map;
int i;
+ mutex_lock(&prog->aux->used_maps_mutex);
for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
map = prog->aux->used_maps[i];
if (map == (void *)addr) {
*type = BPF_PSEUDO_MAP_FD;
- return map;
+ goto out;
}
if (!map->ops->map_direct_value_meta)
continue;
if (!map->ops->map_direct_value_meta(map, addr, off)) {
*type = BPF_PSEUDO_MAP_VALUE;
- return map;
+ goto out;
}
}
+ map = NULL;
- return NULL;
+out:
+ mutex_unlock(&prog->aux->used_maps_mutex);
+ return map;
}
static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
@@ -3294,6 +3294,7 @@ static int bpf_prog_get_info_by_fd(struct file *file,
memcpy(info.tag, prog->tag, sizeof(prog->tag));
memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
+ mutex_lock(&prog->aux->used_maps_mutex);
ulen = info.nr_map_ids;
info.nr_map_ids = prog->aux->used_map_cnt;
ulen = min_t(u32, info.nr_map_ids, ulen);
@@ -3303,9 +3304,12 @@ static int bpf_prog_get_info_by_fd(struct file *file,
for (i = 0; i < ulen; i++)
if (put_user(prog->aux->used_maps[i]->id,
- &user_map_ids[i]))
+ &user_map_ids[i])) {
+ mutex_unlock(&prog->aux->used_maps_mutex);
return -EFAULT;
+ }
}
+ mutex_unlock(&prog->aux->used_maps_mutex);
err = set_info_rec_size(&info);
if (err)
@@ -4153,6 +4157,66 @@ static int bpf_iter_create(union bpf_attr *attr)
return err;
}
+#define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
+
+static int bpf_prog_bind_map(union bpf_attr *attr)
+{
+ struct bpf_prog *prog;
+ struct bpf_map *map;
+ struct bpf_map **used_maps_old, **used_maps_new;
+ int i, ret = 0;
+
+ if (CHECK_ATTR(BPF_PROG_BIND_MAP))
+ return -EINVAL;
+
+ if (attr->prog_bind_map.flags)
+ return -EINVAL;
+
+ prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+
+ map = bpf_map_get(attr->prog_bind_map.map_fd);
+ if (IS_ERR(map)) {
+ ret = PTR_ERR(map);
+ goto out_prog_put;
+ }
+
+ mutex_lock(&prog->aux->used_maps_mutex);
+
+ used_maps_old = prog->aux->used_maps;
+
+ for (i = 0; i < prog->aux->used_map_cnt; i++)
+ if (used_maps_old[i] == map)
+ goto out_unlock;
+
+ used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
+ sizeof(used_maps_new[0]),
+ GFP_KERNEL);
+ if (!used_maps_new) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ memcpy(used_maps_new, used_maps_old,
+ sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
+ used_maps_new[prog->aux->used_map_cnt] = map;
+
+ prog->aux->used_map_cnt++;
+ prog->aux->used_maps = used_maps_new;
+
+ kfree(used_maps_old);
+
+out_unlock:
+ mutex_unlock(&prog->aux->used_maps_mutex);
+
+ if (ret)
+ bpf_map_put(map);
+out_prog_put:
+ bpf_prog_put(prog);
+ return ret;
+}
+
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
{
union bpf_attr attr;
@@ -4286,6 +4350,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
case BPF_LINK_DETACH:
err = link_detach(&attr);
break;
+ case BPF_PROG_BIND_MAP:
+ err = bpf_prog_bind_map(&attr);
+ break;
default:
err = -EINVAL;
break;
diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
index 99af4cea1102..5b6af30bfbcd 100644
--- a/kernel/bpf/task_iter.c
+++ b/kernel/bpf/task_iter.c
@@ -22,7 +22,8 @@ struct bpf_iter_seq_task_info {
};
static struct task_struct *task_seq_get_next(struct pid_namespace *ns,
- u32 *tid)
+ u32 *tid,
+ bool skip_if_dup_files)
{
struct task_struct *task = NULL;
struct pid *pid;
@@ -36,6 +37,12 @@ retry:
if (!task) {
++*tid;
goto retry;
+ } else if (skip_if_dup_files && task->tgid != task->pid &&
+ task->files == task->group_leader->files) {
+ put_task_struct(task);
+ task = NULL;
+ ++*tid;
+ goto retry;
}
}
rcu_read_unlock();
@@ -48,7 +55,7 @@ static void *task_seq_start(struct seq_file *seq, loff_t *pos)
struct bpf_iter_seq_task_info *info = seq->private;
struct task_struct *task;
- task = task_seq_get_next(info->common.ns, &info->tid);
+ task = task_seq_get_next(info->common.ns, &info->tid, false);
if (!task)
return NULL;
@@ -65,7 +72,7 @@ static void *task_seq_next(struct seq_file *seq, void *v, loff_t *pos)
++*pos;
++info->tid;
put_task_struct((struct task_struct *)v);
- task = task_seq_get_next(info->common.ns, &info->tid);
+ task = task_seq_get_next(info->common.ns, &info->tid, false);
if (!task)
return NULL;
@@ -148,7 +155,7 @@ again:
curr_files = *fstruct;
curr_fd = info->fd;
} else {
- curr_task = task_seq_get_next(ns, &curr_tid);
+ curr_task = task_seq_get_next(ns, &curr_tid, true);
if (!curr_task)
return NULL;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 86fdebb5ffd8..42dee5dcbc74 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -238,7 +238,6 @@ struct bpf_call_arg_meta {
u64 msize_max_value;
int ref_obj_id;
int func_id;
- u32 btf_id;
};
struct btf *btf_vmlinux;
@@ -436,6 +435,15 @@ static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
return type == ARG_PTR_TO_SOCK_COMMON;
}
+static bool arg_type_may_be_null(enum bpf_arg_type type)
+{
+ return type == ARG_PTR_TO_MAP_VALUE_OR_NULL ||
+ type == ARG_PTR_TO_MEM_OR_NULL ||
+ type == ARG_PTR_TO_CTX_OR_NULL ||
+ type == ARG_PTR_TO_SOCKET_OR_NULL ||
+ type == ARG_PTR_TO_ALLOC_MEM_OR_NULL;
+}
+
/* Determine whether the function releases some resources allocated by another
* function call. The first reference type argument will be assumed to be
* released by release_reference().
@@ -1490,6 +1498,13 @@ static int check_subprogs(struct bpf_verifier_env *env)
for (i = 0; i < insn_cnt; i++) {
u8 code = insn[i].code;
+ if (code == (BPF_JMP | BPF_CALL) &&
+ insn[i].imm == BPF_FUNC_tail_call &&
+ insn[i].src_reg != BPF_PSEUDO_CALL)
+ subprog[cur_subprog].has_tail_call = true;
+ if (BPF_CLASS(code) == BPF_LD &&
+ (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND))
+ subprog[cur_subprog].has_ld_abs = true;
if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
goto next;
if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
@@ -2979,10 +2994,37 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
struct bpf_subprog_info *subprog = env->subprog_info;
struct bpf_insn *insn = env->prog->insnsi;
+ bool tail_call_reachable = false;
int ret_insn[MAX_CALL_FRAMES];
int ret_prog[MAX_CALL_FRAMES];
+ int j;
process_func:
+ /* protect against potential stack overflow that might happen when
+ * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
+ * depth for such case down to 256 so that the worst case scenario
+ * would result in 8k stack size (32 which is tailcall limit * 256 =
+ * 8k).
+ *
+ * To get the idea what might happen, see an example:
+ * func1 -> sub rsp, 128
+ * subfunc1 -> sub rsp, 256
+ * tailcall1 -> add rsp, 256
+ * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
+ * subfunc2 -> sub rsp, 64
+ * subfunc22 -> sub rsp, 128
+ * tailcall2 -> add rsp, 128
+ * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
+ *
+ * tailcall will unwind the current stack frame but it will not get rid
+ * of caller's stack as shown on the example above.
+ */
+ if (idx && subprog[idx].has_tail_call && depth >= 256) {
+ verbose(env,
+ "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
+ depth);
+ return -EACCES;
+ }
/* round up to 32-bytes, since this is granularity
* of interpreter stack size
*/
@@ -3011,6 +3053,10 @@ continue_func:
i);
return -EFAULT;
}
+
+ if (subprog[idx].has_tail_call)
+ tail_call_reachable = true;
+
frame++;
if (frame >= MAX_CALL_FRAMES) {
verbose(env, "the call stack of %d frames is too deep !\n",
@@ -3019,6 +3065,15 @@ continue_func:
}
goto process_func;
}
+ /* if tail call got detected across bpf2bpf calls then mark each of the
+ * currently present subprog frames as tail call reachable subprogs;
+ * this info will be utilized by JIT so that we will be preserving the
+ * tail call counter throughout bpf2bpf calls combined with tailcalls
+ */
+ if (tail_call_reachable)
+ for (j = 0; j < frame; j++)
+ subprog[ret_prog[j]].tail_call_reachable = true;
+
/* end of for() loop means the last insn of the 'subprog'
* was reached. Doesn't matter whether it was JA or EXIT
*/
@@ -3594,18 +3649,6 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
struct bpf_func_state *state = func(env, reg);
int err, min_off, max_off, i, j, slot, spi;
- if (reg->type != PTR_TO_STACK) {
- /* Allow zero-byte read from NULL, regardless of pointer type */
- if (zero_size_allowed && access_size == 0 &&
- register_is_null(reg))
- return 0;
-
- verbose(env, "R%d type=%s expected=%s\n", regno,
- reg_type_str[reg->type],
- reg_type_str[PTR_TO_STACK]);
- return -EACCES;
- }
-
if (tnum_is_const(reg->var_off)) {
min_off = max_off = reg->var_off.value + reg->off;
err = __check_stack_boundary(env, regno, min_off, access_size,
@@ -3750,9 +3793,19 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
access_size, zero_size_allowed,
"rdwr",
&env->prog->aux->max_rdwr_access);
- default: /* scalar_value|ptr_to_stack or invalid ptr */
+ case PTR_TO_STACK:
return check_stack_boundary(env, regno, access_size,
zero_size_allowed, meta);
+ default: /* scalar_value or invalid ptr */
+ /* Allow zero-byte read from NULL, regardless of pointer type */
+ if (zero_size_allowed && access_size == 0 &&
+ register_is_null(reg))
+ return 0;
+
+ verbose(env, "R%d type=%s expected=%s\n", regno,
+ reg_type_str[reg->type],
+ reg_type_str[PTR_TO_STACK]);
+ return -EACCES;
}
}
@@ -3784,10 +3837,6 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno,
struct bpf_map *map = reg->map_ptr;
u64 val = reg->var_off.value;
- if (reg->type != PTR_TO_MAP_VALUE) {
- verbose(env, "R%d is not a pointer to map_value\n", regno);
- return -EINVAL;
- }
if (!is_const) {
verbose(env,
"R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
@@ -3854,12 +3903,6 @@ static bool arg_type_is_mem_size(enum bpf_arg_type type)
type == ARG_CONST_SIZE_OR_ZERO;
}
-static bool arg_type_is_alloc_mem_ptr(enum bpf_arg_type type)
-{
- return type == ARG_PTR_TO_ALLOC_MEM ||
- type == ARG_PTR_TO_ALLOC_MEM_OR_NULL;
-}
-
static bool arg_type_is_alloc_size(enum bpf_arg_type type)
{
return type == ARG_CONST_ALLOC_SIZE_OR_ZERO;
@@ -3908,14 +3951,114 @@ static int resolve_map_arg_type(struct bpf_verifier_env *env,
return 0;
}
+struct bpf_reg_types {
+ const enum bpf_reg_type types[10];
+};
+
+static const struct bpf_reg_types map_key_value_types = {
+ .types = {
+ PTR_TO_STACK,
+ PTR_TO_PACKET,
+ PTR_TO_PACKET_META,
+ PTR_TO_MAP_VALUE,
+ },
+};
+
+static const struct bpf_reg_types sock_types = {
+ .types = {
+ PTR_TO_SOCK_COMMON,
+ PTR_TO_SOCKET,
+ PTR_TO_TCP_SOCK,
+ PTR_TO_XDP_SOCK,
+ },
+};
+
+static const struct bpf_reg_types mem_types = {
+ .types = {
+ PTR_TO_STACK,
+ PTR_TO_PACKET,
+ PTR_TO_PACKET_META,
+ PTR_TO_MAP_VALUE,
+ PTR_TO_MEM,
+ PTR_TO_RDONLY_BUF,
+ PTR_TO_RDWR_BUF,
+ },
+};
+
+static const struct bpf_reg_types int_ptr_types = {
+ .types = {
+ PTR_TO_STACK,
+ PTR_TO_PACKET,
+ PTR_TO_PACKET_META,
+ PTR_TO_MAP_VALUE,
+ },
+};
+
+static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
+static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
+static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
+static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM } };
+static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
+static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } };
+static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } };
+
+static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
+ [ARG_PTR_TO_MAP_KEY] = &map_key_value_types,
+ [ARG_PTR_TO_MAP_VALUE] = &map_key_value_types,
+ [ARG_PTR_TO_UNINIT_MAP_VALUE] = &map_key_value_types,
+ [ARG_PTR_TO_MAP_VALUE_OR_NULL] = &map_key_value_types,
+ [ARG_CONST_SIZE] = &scalar_types,
+ [ARG_CONST_SIZE_OR_ZERO] = &scalar_types,
+ [ARG_CONST_ALLOC_SIZE_OR_ZERO] = &scalar_types,
+ [ARG_CONST_MAP_PTR] = &const_map_ptr_types,
+ [ARG_PTR_TO_CTX] = &context_types,
+ [ARG_PTR_TO_CTX_OR_NULL] = &context_types,
+ [ARG_PTR_TO_SOCK_COMMON] = &sock_types,
+ [ARG_PTR_TO_SOCKET] = &fullsock_types,
+ [ARG_PTR_TO_SOCKET_OR_NULL] = &fullsock_types,
+ [ARG_PTR_TO_BTF_ID] = &btf_ptr_types,
+ [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types,
+ [ARG_PTR_TO_MEM] = &mem_types,
+ [ARG_PTR_TO_MEM_OR_NULL] = &mem_types,
+ [ARG_PTR_TO_UNINIT_MEM] = &mem_types,
+ [ARG_PTR_TO_ALLOC_MEM] = &alloc_mem_types,
+ [ARG_PTR_TO_ALLOC_MEM_OR_NULL] = &alloc_mem_types,
+ [ARG_PTR_TO_INT] = &int_ptr_types,
+ [ARG_PTR_TO_LONG] = &int_ptr_types,
+};
+
+static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
+ const struct bpf_reg_types *compatible)
+{
+ struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
+ enum bpf_reg_type expected, type = reg->type;
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
+ expected = compatible->types[i];
+ if (expected == NOT_INIT)
+ break;
+
+ if (type == expected)
+ return 0;
+ }
+
+ verbose(env, "R%d type=%s expected=", regno, reg_type_str[type]);
+ for (j = 0; j + 1 < i; j++)
+ verbose(env, "%s, ", reg_type_str[compatible->types[j]]);
+ verbose(env, "%s\n", reg_type_str[compatible->types[j]]);
+ return -EACCES;
+}
+
static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
struct bpf_call_arg_meta *meta,
const struct bpf_func_proto *fn)
{
u32 regno = BPF_REG_1 + arg;
struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
- enum bpf_reg_type expected_type, type = reg->type;
enum bpf_arg_type arg_type = fn->arg_type[arg];
+ const struct bpf_reg_types *compatible;
+ enum bpf_reg_type type = reg->type;
int err = 0;
if (arg_type == ARG_DONTCARE)
@@ -3948,125 +4091,48 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
return err;
}
- if (arg_type == ARG_PTR_TO_MAP_KEY ||
- arg_type == ARG_PTR_TO_MAP_VALUE ||
- arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE ||
- arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) {
- expected_type = PTR_TO_STACK;
- if (register_is_null(reg) &&
- arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL)
- /* final test in check_stack_boundary() */;
- else if (!type_is_pkt_pointer(type) &&
- type != PTR_TO_MAP_VALUE &&
- type != expected_type)
- goto err_type;
- } else if (arg_type == ARG_CONST_SIZE ||
- arg_type == ARG_CONST_SIZE_OR_ZERO ||
- arg_type == ARG_CONST_ALLOC_SIZE_OR_ZERO) {
- expected_type = SCALAR_VALUE;
- if (type != expected_type)
- goto err_type;
- } else if (arg_type == ARG_CONST_MAP_PTR) {
- expected_type = CONST_PTR_TO_MAP;
- if (type != expected_type)
- goto err_type;
- } else if (arg_type == ARG_PTR_TO_CTX ||
- arg_type == ARG_PTR_TO_CTX_OR_NULL) {
- expected_type = PTR_TO_CTX;
- if (!(register_is_null(reg) &&
- arg_type == ARG_PTR_TO_CTX_OR_NULL)) {
- if (type != expected_type)
- goto err_type;
- err = check_ctx_reg(env, reg, regno);
- if (err < 0)
- return err;
+ if (register_is_null(reg) && arg_type_may_be_null(arg_type))
+ /* A NULL register has a SCALAR_VALUE type, so skip
+ * type checking.
+ */
+ goto skip_type_check;
+
+ compatible = compatible_reg_types[arg_type];
+ if (!compatible) {
+ verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
+ return -EFAULT;
+ }
+
+ err = check_reg_type(env, regno, compatible);
+ if (err)
+ return err;
+
+ if (type == PTR_TO_BTF_ID) {
+ const u32 *btf_id = fn->arg_btf_id[arg];
+
+ if (!btf_id) {
+ verbose(env, "verifier internal error: missing BTF ID\n");
+ return -EFAULT;
}
- } else if (arg_type == ARG_PTR_TO_SOCK_COMMON) {
- expected_type = PTR_TO_SOCK_COMMON;
- /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */
- if (!type_is_sk_pointer(type))
- goto err_type;
- if (reg->ref_obj_id) {
- if (meta->ref_obj_id) {
- verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
- regno, reg->ref_obj_id,
- meta->ref_obj_id);
- return -EFAULT;
- }
- meta->ref_obj_id = reg->ref_obj_id;
- }
- } else if (arg_type == ARG_PTR_TO_SOCKET ||
- arg_type == ARG_PTR_TO_SOCKET_OR_NULL) {
- expected_type = PTR_TO_SOCKET;
- if (!(register_is_null(reg) &&
- arg_type == ARG_PTR_TO_SOCKET_OR_NULL)) {
- if (type != expected_type)
- goto err_type;
- }
- } else if (arg_type == ARG_PTR_TO_BTF_ID) {
- bool ids_match = false;
-
- expected_type = PTR_TO_BTF_ID;
- if (type != expected_type)
- goto err_type;
- if (!fn->check_btf_id) {
- if (reg->btf_id != meta->btf_id) {
- ids_match = btf_struct_ids_match(&env->log, reg->off, reg->btf_id,
- meta->btf_id);
- if (!ids_match) {
- verbose(env, "Helper has type %s got %s in R%d\n",
- kernel_type_name(meta->btf_id),
- kernel_type_name(reg->btf_id), regno);
- return -EACCES;
- }
- }
- } else if (!fn->check_btf_id(reg->btf_id, arg)) {
- verbose(env, "Helper does not support %s in R%d\n",
- kernel_type_name(reg->btf_id), regno);
+ if (!btf_struct_ids_match(&env->log, reg->off, reg->btf_id, *btf_id)) {
+ verbose(env, "R%d is of type %s but %s is expected\n",
+ regno, kernel_type_name(reg->btf_id), kernel_type_name(*btf_id));
return -EACCES;
}
- if ((reg->off && !ids_match) || !tnum_is_const(reg->var_off) || reg->var_off.value) {
+ if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n",
regno);
return -EACCES;
}
- } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
- if (meta->func_id == BPF_FUNC_spin_lock) {
- if (process_spin_lock(env, regno, true))
- return -EACCES;
- } else if (meta->func_id == BPF_FUNC_spin_unlock) {
- if (process_spin_lock(env, regno, false))
- return -EACCES;
- } else {
- verbose(env, "verifier internal error\n");
- return -EFAULT;
- }
- } else if (arg_type_is_mem_ptr(arg_type)) {
- expected_type = PTR_TO_STACK;
- /* One exception here. In case function allows for NULL to be
- * passed in as argument, it's a SCALAR_VALUE type. Final test
- * happens during stack boundary checking.
- */
- if (register_is_null(reg) &&
- (arg_type == ARG_PTR_TO_MEM_OR_NULL ||
- arg_type == ARG_PTR_TO_ALLOC_MEM_OR_NULL))
- /* final test in check_stack_boundary() */;
- else if (!type_is_pkt_pointer(type) &&
- type != PTR_TO_MAP_VALUE &&
- type != PTR_TO_MEM &&
- type != PTR_TO_RDONLY_BUF &&
- type != PTR_TO_RDWR_BUF &&
- type != expected_type)
- goto err_type;
- meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
- } else if (arg_type_is_alloc_mem_ptr(arg_type)) {
- expected_type = PTR_TO_MEM;
- if (register_is_null(reg) &&
- arg_type == ARG_PTR_TO_ALLOC_MEM_OR_NULL)
- /* final test in check_stack_boundary() */;
- else if (type != expected_type)
- goto err_type;
+ } else if (type == PTR_TO_CTX) {
+ err = check_ctx_reg(env, reg, regno);
+ if (err < 0)
+ return err;
+ }
+
+skip_type_check:
+ if (reg->ref_obj_id) {
if (meta->ref_obj_id) {
verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
regno, reg->ref_obj_id,
@@ -4074,15 +4140,6 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
return -EFAULT;
}
meta->ref_obj_id = reg->ref_obj_id;
- } else if (arg_type_is_int_ptr(arg_type)) {
- expected_type = PTR_TO_STACK;
- if (!type_is_pkt_pointer(type) &&
- type != PTR_TO_MAP_VALUE &&
- type != expected_type)
- goto err_type;
- } else {
- verbose(env, "unsupported arg_type %d\n", arg_type);
- return -EFAULT;
}
if (arg_type == ARG_CONST_MAP_PTR) {
@@ -4121,6 +4178,22 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
err = check_helper_mem_access(env, regno,
meta->map_ptr->value_size, false,
meta);
+ } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
+ if (meta->func_id == BPF_FUNC_spin_lock) {
+ if (process_spin_lock(env, regno, true))
+ return -EACCES;
+ } else if (meta->func_id == BPF_FUNC_spin_unlock) {
+ if (process_spin_lock(env, regno, false))
+ return -EACCES;
+ } else {
+ verbose(env, "verifier internal error\n");
+ return -EFAULT;
+ }
+ } else if (arg_type_is_mem_ptr(arg_type)) {
+ /* The access to this pointer is only checked when we hit the
+ * next is_mem_size argument below.
+ */
+ meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MEM);
} else if (arg_type_is_mem_size(arg_type)) {
bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
@@ -4186,10 +4259,6 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
}
return err;
-err_type:
- verbose(env, "R%d type=%s expected=%s\n", regno,
- reg_type_str[type], reg_type_str[expected_type]);
- return -EACCES;
}
static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
@@ -4224,6 +4293,11 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
return false;
}
+static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
+{
+ return env->prog->jit_requested && IS_ENABLED(CONFIG_X86_64);
+}
+
static int check_map_func_compatibility(struct bpf_verifier_env *env,
struct bpf_map *map, int func_id)
{
@@ -4339,8 +4413,8 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_FUNC_tail_call:
if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
goto error;
- if (env->subprog_cnt > 1) {
- verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
+ if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) {
+ verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
return -EINVAL;
}
break;
@@ -4495,10 +4569,22 @@ static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
return count <= 1;
}
+static bool check_btf_id_ok(const struct bpf_func_proto *fn)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++)
+ if (fn->arg_type[i] == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i])
+ return false;
+
+ return true;
+}
+
static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
{
return check_raw_mode_ok(fn) &&
check_arg_pair_ok(fn) &&
+ check_btf_id_ok(fn) &&
check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
}
@@ -4894,11 +4980,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
meta.func_id = func_id;
/* check args */
for (i = 0; i < 5; i++) {
- if (!fn->check_btf_id) {
- err = btf_resolve_helper_id(&env->log, fn, i);
- if (err > 0)
- meta.btf_id = err;
- }
err = check_func_arg(env, i, &meta, fn);
if (err)
return err;
@@ -5317,6 +5398,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
dst, reg_type_str[ptr_reg->type]);
return -EACCES;
case CONST_PTR_TO_MAP:
+ /* smin_val represents the known value */
+ if (known && smin_val == 0 && opcode == BPF_ADD)
+ break;
+ /* fall-through */
case PTR_TO_PACKET_END:
case PTR_TO_SOCKET:
case PTR_TO_SOCKET_OR_NULL:
@@ -7461,18 +7546,6 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
return -EINVAL;
}
- if (env->subprog_cnt > 1) {
- /* when program has LD_ABS insn JITs and interpreter assume
- * that r1 == ctx == skb which is not the case for callees
- * that can have arbitrary arguments. It's problematic
- * for main prog as well since JITs would need to analyze
- * all functions in order to make proper register save/restore
- * decisions in the main prog. Hence disallow LD_ABS with calls
- */
- verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
- return -EINVAL;
- }
-
if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
BPF_SIZE(insn->code) == BPF_DW ||
(mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
@@ -7883,6 +7956,23 @@ err_free:
return ret;
}
+static int check_abnormal_return(struct bpf_verifier_env *env)
+{
+ int i;
+
+ for (i = 1; i < env->subprog_cnt; i++) {
+ if (env->subprog_info[i].has_ld_abs) {
+ verbose(env, "LD_ABS is not allowed in subprogs without BTF\n");
+ return -EINVAL;
+ }
+ if (env->subprog_info[i].has_tail_call) {
+ verbose(env, "tail_call is not allowed in subprogs without BTF\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
/* The minimum supported BTF func info size */
#define MIN_BPF_FUNCINFO_SIZE 8
#define MAX_FUNCINFO_REC_SIZE 252
@@ -7891,20 +7981,24 @@ static int check_btf_func(struct bpf_verifier_env *env,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
+ const struct btf_type *type, *func_proto, *ret_type;
u32 i, nfuncs, urec_size, min_size;
u32 krec_size = sizeof(struct bpf_func_info);
struct bpf_func_info *krecord;
struct bpf_func_info_aux *info_aux = NULL;
- const struct btf_type *type;
struct bpf_prog *prog;
const struct btf *btf;
void __user *urecord;
u32 prev_offset = 0;
+ bool scalar_return;
int ret = -ENOMEM;
nfuncs = attr->func_info_cnt;
- if (!nfuncs)
+ if (!nfuncs) {
+ if (check_abnormal_return(env))
+ return -EINVAL;
return 0;
+ }
if (nfuncs != env->subprog_cnt) {
verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
@@ -7952,25 +8046,23 @@ static int check_btf_func(struct bpf_verifier_env *env,
}
/* check insn_off */
+ ret = -EINVAL;
if (i == 0) {
if (krecord[i].insn_off) {
verbose(env,
"nonzero insn_off %u for the first func info record",
krecord[i].insn_off);
- ret = -EINVAL;
goto err_free;
}
} else if (krecord[i].insn_off <= prev_offset) {
verbose(env,
"same or smaller insn offset (%u) than previous func info record (%u)",
krecord[i].insn_off, prev_offset);
- ret = -EINVAL;
goto err_free;
}
if (env->subprog_info[i].start != krecord[i].insn_off) {
verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
- ret = -EINVAL;
goto err_free;
}
@@ -7979,10 +8071,26 @@ static int check_btf_func(struct bpf_verifier_env *env,
if (!type || !btf_type_is_func(type)) {
verbose(env, "invalid type id %d in func info",
krecord[i].type_id);
- ret = -EINVAL;
goto err_free;
}
info_aux[i].linkage = BTF_INFO_VLEN(type->info);
+
+ func_proto = btf_type_by_id(btf, type->type);
+ if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto)))
+ /* btf_func_check() already verified it during BTF load */
+ goto err_free;
+ ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
+ scalar_return =
+ btf_type_is_small_int(ret_type) || btf_type_is_enum(ret_type);
+ if (i && !scalar_return && env->subprog_info[i].has_ld_abs) {
+ verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n");
+ goto err_free;
+ }
+ if (i && !scalar_return && env->subprog_info[i].has_tail_call) {
+ verbose(env, "tail_call is only allowed in functions that return 'int'.\n");
+ goto err_free;
+ }
+
prev_offset = krecord[i].insn_off;
urecord += urec_size;
}
@@ -8143,8 +8251,11 @@ static int check_btf_info(struct bpf_verifier_env *env,
struct btf *btf;
int err;
- if (!attr->func_info_cnt && !attr->line_info_cnt)
+ if (!attr->func_info_cnt && !attr->line_info_cnt) {
+ if (check_abnormal_return(env))
+ return -EINVAL;
return 0;
+ }
btf = btf_get_by_fd(attr->prog_btf_fd);
if (IS_ERR(btf))
@@ -9619,6 +9730,18 @@ static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len
}
}
+static void adjust_poke_descs(struct bpf_prog *prog, u32 len)
+{
+ struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
+ int i, sz = prog->aux->size_poke_tab;
+ struct bpf_jit_poke_descriptor *desc;
+
+ for (i = 0; i < sz; i++) {
+ desc = &tab[i];
+ desc->insn_idx += len - 1;
+ }
+}
+
static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
const struct bpf_insn *patch, u32 len)
{
@@ -9635,6 +9758,7 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
if (adjust_insn_aux_data(env, new_prog, off, len))
return NULL;
adjust_subprog_starts(env, off, len);
+ adjust_poke_descs(new_prog, len);
return new_prog;
}
@@ -10165,6 +10289,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
{
struct bpf_prog *prog = env->prog, **func, *tmp;
int i, j, subprog_start, subprog_end = 0, len, subprog;
+ struct bpf_map *map_ptr;
struct bpf_insn *insn;
void *old_bpf_func;
int err, num_exentries;
@@ -10232,6 +10357,31 @@ static int jit_subprogs(struct bpf_verifier_env *env)
func[i]->aux->btf = prog->aux->btf;
func[i]->aux->func_info = prog->aux->func_info;
+ for (j = 0; j < prog->aux->size_poke_tab; j++) {
+ u32 insn_idx = prog->aux->poke_tab[j].insn_idx;
+ int ret;
+
+ if (!(insn_idx >= subprog_start &&
+ insn_idx <= subprog_end))
+ continue;
+
+ ret = bpf_jit_add_poke_descriptor(func[i],
+ &prog->aux->poke_tab[j]);
+ if (ret < 0) {
+ verbose(env, "adding tail call poke descriptor failed\n");
+ goto out_free;
+ }
+
+ func[i]->insnsi[insn_idx - subprog_start].imm = ret + 1;
+
+ map_ptr = func[i]->aux->poke_tab[ret].tail_call.map;
+ ret = map_ptr->ops->map_poke_track(map_ptr, func[i]->aux);
+ if (ret < 0) {
+ verbose(env, "tracking tail call prog failed\n");
+ goto out_free;
+ }
+ }
+
/* Use bpf_prog_F_tag to indicate functions in stack traces.
* Long term would need debug info to populate names
*/
@@ -10250,6 +10400,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
num_exentries++;
}
func[i]->aux->num_exentries = num_exentries;
+ func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
func[i] = bpf_int_jit_compile(func[i]);
if (!func[i]->jited) {
err = -ENOTSUPP;
@@ -10257,6 +10408,19 @@ static int jit_subprogs(struct bpf_verifier_env *env)
}
cond_resched();
}
+
+ /* Untrack main program's aux structs so that during map_poke_run()
+ * we will not stumble upon the unfilled poke descriptors; each
+ * of the main program's poke descs got distributed across subprogs
+ * and got tracked onto map, so we are sure that none of them will
+ * be missed after the operation below
+ */
+ for (i = 0; i < prog->aux->size_poke_tab; i++) {
+ map_ptr = prog->aux->poke_tab[i].tail_call.map;
+
+ map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
+ }
+
/* at this point all bpf functions were successfully JITed
* now populate all bpf_calls with correct addresses and
* run last pass of JIT
@@ -10325,9 +10489,16 @@ static int jit_subprogs(struct bpf_verifier_env *env)
bpf_prog_free_unused_jited_linfo(prog);
return 0;
out_free:
- for (i = 0; i < env->subprog_cnt; i++)
- if (func[i])
- bpf_jit_free(func[i]);
+ for (i = 0; i < env->subprog_cnt; i++) {
+ if (!func[i])
+ continue;
+
+ for (j = 0; j < func[i]->aux->size_poke_tab; j++) {
+ map_ptr = func[i]->aux->poke_tab[j].tail_call.map;
+ map_ptr->ops->map_poke_untrack(map_ptr, func[i]->aux);
+ }
+ bpf_jit_free(func[i]);
+ }
kfree(func);
out_undo_insn:
/* cleanup main prog to be interpreted */
@@ -10361,6 +10532,13 @@ static int fixup_call_args(struct bpf_verifier_env *env)
return err;
}
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) {
+ /* When JIT fails the progs with bpf2bpf calls and tail_calls
+ * have to be rejected, since interpreter doesn't support them yet.
+ */
+ verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
+ return -EINVAL;
+ }
for (i = 0; i < prog->len; i++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL)
@@ -10524,8 +10702,9 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
* the program array.
*/
prog->cb_access = 1;
- env->prog->aux->stack_depth = MAX_BPF_STACK;
- env->prog->aux->max_pkt_offset = MAX_PACKET_OFF;
+ if (!allow_tail_call_in_subprogs(env))
+ prog->aux->stack_depth = MAX_BPF_STACK;
+ prog->aux->max_pkt_offset = MAX_PACKET_OFF;
/* mark bpf_tail_call as different opcode to avoid
* conditional branch in the interpeter for every normal
@@ -10545,6 +10724,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
.reason = BPF_POKE_REASON_TAIL_CALL,
.tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
.tail_call.key = bpf_map_key_immediate(aux),
+ .insn_idx = i + delta,
};
ret = bpf_jit_add_poke_descriptor(prog, &desc);
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index fcae019158ca..6fdb6105e6d6 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -60,31 +60,56 @@ static long syscall_trace_enter(struct pt_regs *regs, long syscall,
return ret;
}
+ /* Either of the above might have changed the syscall number */
+ syscall = syscall_get_nr(current, regs);
+
if (unlikely(ti_work & _TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, syscall);
syscall_enter_audit(regs, syscall);
- /* The above might have changed the syscall number */
- return ret ? : syscall_get_nr(current, regs);
+ return ret ? : syscall;
}
-noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
+static __always_inline long
+__syscall_enter_from_user_work(struct pt_regs *regs, long syscall)
{
unsigned long ti_work;
- enter_from_user_mode(regs);
- instrumentation_begin();
-
- local_irq_enable();
ti_work = READ_ONCE(current_thread_info()->flags);
if (ti_work & SYSCALL_ENTER_WORK)
syscall = syscall_trace_enter(regs, syscall, ti_work);
- instrumentation_end();
return syscall;
}
+long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
+{
+ return __syscall_enter_from_user_work(regs, syscall);
+}
+
+noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
+{
+ long ret;
+
+ enter_from_user_mode(regs);
+
+ instrumentation_begin();
+ local_irq_enable();
+ ret = __syscall_enter_from_user_work(regs, syscall);
+ instrumentation_end();
+
+ return ret;
+}
+
+noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs)
+{
+ enter_from_user_mode(regs);
+ instrumentation_begin();
+ local_irq_enable();
+ instrumentation_end();
+}
+
/**
* exit_to_user_mode - Fixup state when exiting to user mode
*
diff --git a/kernel/fork.c b/kernel/fork.c
index 4d32190861bd..49677d668de4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -3014,7 +3014,7 @@ int unshare_files(struct files_struct **displaced)
}
int sysctl_max_threads(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
int ret;
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index bb4b680e8455..3110c77230c7 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -4,7 +4,6 @@ menu "GCOV-based kernel profiling"
config GCOV_KERNEL
bool "Enable gcov-based kernel profiling"
depends on DEBUG_FS
- depends on !CC_IS_GCC || GCC_VERSION < 100000
select CONSTRUCTORS if !UML
default n
help
diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
index 908fdf5098c3..53c67c87f141 100644
--- a/kernel/gcov/gcc_4_7.c
+++ b/kernel/gcov/gcc_4_7.c
@@ -19,7 +19,9 @@
#include <linux/vmalloc.h>
#include "gcov.h"
-#if (__GNUC__ >= 7)
+#if (__GNUC__ >= 10)
+#define GCOV_COUNTERS 8
+#elif (__GNUC__ >= 7)
#define GCOV_COUNTERS 9
#elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)
#define GCOV_COUNTERS 10
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 287b263c9cb9..e995541d277d 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2140,6 +2140,9 @@ static void kill_kprobe(struct kprobe *p)
lockdep_assert_held(&kprobe_mutex);
+ if (WARN_ON_ONCE(kprobe_gone(p)))
+ return;
+
p->flags |= KPROBE_FLAG_GONE;
if (kprobe_aggrprobe(p)) {
/*
@@ -2159,9 +2162,10 @@ static void kill_kprobe(struct kprobe *p)
/*
* The module is going away. We should disarm the kprobe which
- * is using ftrace.
+ * is using ftrace, because ftrace framework is still available at
+ * MODULE_STATE_GOING notification.
*/
- if (kprobe_ftrace(p))
+ if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
disarm_kprobe_ftrace(p);
}
@@ -2419,7 +2423,10 @@ static int kprobes_module_callback(struct notifier_block *nb,
mutex_lock(&kprobe_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
- hlist_for_each_entry(p, head, hlist)
+ hlist_for_each_entry(p, head, hlist) {
+ if (kprobe_gone(p))
+ continue;
+
if (within_module_init((unsigned long)p->addr, mod) ||
(checkcore &&
within_module_core((unsigned long)p->addr, mod))) {
@@ -2436,6 +2443,7 @@ static int kprobes_module_callback(struct notifier_block *nb,
*/
kill_kprobe(p);
}
+ }
}
if (val == MODULE_STATE_GOING)
remove_module_kprobe_blacklist(mod);
@@ -2452,6 +2460,28 @@ static struct notifier_block kprobe_module_nb = {
extern unsigned long __start_kprobe_blacklist[];
extern unsigned long __stop_kprobe_blacklist[];
+void kprobe_free_init_mem(void)
+{
+ void *start = (void *)(&__init_begin);
+ void *end = (void *)(&__init_end);
+ struct hlist_head *head;
+ struct kprobe *p;
+ int i;
+
+ mutex_lock(&kprobe_mutex);
+
+ /* Kill all kprobes on initmem */
+ for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
+ head = &kprobe_table[i];
+ hlist_for_each_entry(p, head, hlist) {
+ if (start <= (void *)p->addr && (void *)p->addr < end)
+ kill_kprobe(p);
+ }
+ }
+
+ mutex_unlock(&kprobe_mutex);
+}
+
static int __init init_kprobes(void)
{
int i, err = 0;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 54b74fabf40c..2facbbd146ec 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3969,13 +3969,18 @@ static int separate_irq_context(struct task_struct *curr,
static int mark_lock(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit)
{
- unsigned int new_mask = 1 << new_bit, ret = 1;
+ unsigned int old_mask, new_mask, ret = 1;
if (new_bit >= LOCK_USAGE_STATES) {
DEBUG_LOCKS_WARN_ON(1);
return 0;
}
+ if (new_bit == LOCK_USED && this->read)
+ new_bit = LOCK_USED_READ;
+
+ new_mask = 1 << new_bit;
+
/*
* If already set then do not dirty the cacheline,
* nor do any checks:
@@ -3988,13 +3993,22 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
/*
* Make sure we didn't race:
*/
- if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
- graph_unlock();
- return 1;
- }
+ if (unlikely(hlock_class(this)->usage_mask & new_mask))
+ goto unlock;
+ old_mask = hlock_class(this)->usage_mask;
hlock_class(this)->usage_mask |= new_mask;
+ /*
+ * Save one usage_traces[] entry and map both LOCK_USED and
+ * LOCK_USED_READ onto the same entry.
+ */
+ if (new_bit == LOCK_USED || new_bit == LOCK_USED_READ) {
+ if (old_mask & (LOCKF_USED | LOCKF_USED_READ))
+ goto unlock;
+ new_bit = LOCK_USED;
+ }
+
if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
return 0;
@@ -4008,6 +4022,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
return 0;
}
+unlock:
graph_unlock();
/*
@@ -4942,12 +4957,20 @@ static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock
{
#ifdef CONFIG_PROVE_LOCKING
struct lock_class *class = look_up_lock_class(lock, subclass);
+ unsigned long mask = LOCKF_USED;
/* if it doesn't have a class (yet), it certainly hasn't been used yet */
if (!class)
return;
- if (!(class->usage_mask & LOCK_USED))
+ /*
+ * READ locks only conflict with USED, such that if we only ever use
+ * READ locks, there is no deadlock possible -- RCU.
+ */
+ if (!hlock->read)
+ mask |= LOCKF_USED_READ;
+
+ if (!(class->usage_mask & mask))
return;
hlock->class_idx = class - lock_classes;
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
index baca699b94e9..b0be1560ed17 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -19,6 +19,7 @@ enum lock_usage_bit {
#include "lockdep_states.h"
#undef LOCKDEP_STATE
LOCK_USED,
+ LOCK_USED_READ,
LOCK_USAGE_STATES
};
@@ -40,6 +41,7 @@ enum {
#include "lockdep_states.h"
#undef LOCKDEP_STATE
__LOCKF(USED)
+ __LOCKF(USED_READ)
};
#define LOCKDEP_STATE(__STATE) LOCKF_ENABLED_##__STATE |
diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c
index 8bbafe3e5203..70a32a576f3f 100644
--- a/kernel/locking/percpu-rwsem.c
+++ b/kernel/locking/percpu-rwsem.c
@@ -45,7 +45,7 @@ EXPORT_SYMBOL_GPL(percpu_free_rwsem);
static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
{
- __this_cpu_inc(*sem->read_count);
+ this_cpu_inc(*sem->read_count);
/*
* Due to having preemption disabled the decrement happens on
@@ -71,7 +71,7 @@ static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
if (likely(!atomic_read_acquire(&sem->block)))
return true;
- __this_cpu_dec(*sem->read_count);
+ this_cpu_dec(*sem->read_count);
/* Prod writer to re-evaluate readers_active_check() */
rcuwait_wake_up(&sem->writer);
diff --git a/kernel/padata.c b/kernel/padata.c
index 16cb894dc272..d4d3ba6e1728 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -215,12 +215,13 @@ int padata_do_parallel(struct padata_shell *ps,
padata->pd = pd;
padata->cb_cpu = *cb_cpu;
- rcu_read_unlock_bh();
-
spin_lock(&padata_works_lock);
padata->seq_nr = ++pd->seq_nr;
pw = padata_work_alloc();
spin_unlock(&padata_works_lock);
+
+ rcu_read_unlock_bh();
+
if (pw) {
padata_work_init(pw, padata_parallel_worker, padata, 0);
queue_work(pinst->parallel_wq, &pw->pw_work);
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index 835e2df8590a..05d3e1375e4c 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -590,7 +590,7 @@ void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
}
#else /* #ifdef CONFIG_TASKS_RCU */
-static void show_rcu_tasks_classic_gp_kthread(void) { }
+static inline void show_rcu_tasks_classic_gp_kthread(void) { }
void exit_tasks_rcu_start(void) { }
void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
#endif /* #else #ifdef CONFIG_TASKS_RCU */
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 3ee59ce0a323..676d4af62103 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -1109,13 +1109,18 @@ out:
}
#ifdef CONFIG_SECCOMP_FILTER
-static int seccomp_notify_release(struct inode *inode, struct file *file)
+static void seccomp_notify_free(struct seccomp_filter *filter)
+{
+ kfree(filter->notif);
+ filter->notif = NULL;
+}
+
+static void seccomp_notify_detach(struct seccomp_filter *filter)
{
- struct seccomp_filter *filter = file->private_data;
struct seccomp_knotif *knotif;
if (!filter)
- return 0;
+ return;
mutex_lock(&filter->notify_lock);
@@ -1139,9 +1144,15 @@ static int seccomp_notify_release(struct inode *inode, struct file *file)
complete(&knotif->ready);
}
- kfree(filter->notif);
- filter->notif = NULL;
+ seccomp_notify_free(filter);
mutex_unlock(&filter->notify_lock);
+}
+
+static int seccomp_notify_release(struct inode *inode, struct file *file)
+{
+ struct seccomp_filter *filter = file->private_data;
+
+ seccomp_notify_detach(filter);
__put_seccomp_filter(filter);
return 0;
}
@@ -1488,7 +1499,7 @@ static struct file *init_listener(struct seccomp_filter *filter)
out_notif:
if (IS_ERR(ret))
- kfree(filter->notif);
+ seccomp_notify_free(filter);
out:
return ret;
}
@@ -1581,6 +1592,7 @@ out_put_fd:
listener_f->private_data = NULL;
fput(listener_f);
put_unused_fd(listener);
+ seccomp_notify_detach(prepared);
} else {
fd_install(listener, listener_f);
ret = listener;
diff --git a/kernel/stackleak.c b/kernel/stackleak.c
index a8fc9ae1d03d..ce161a8e8d97 100644
--- a/kernel/stackleak.c
+++ b/kernel/stackleak.c
@@ -20,7 +20,7 @@
static DEFINE_STATIC_KEY_FALSE(stack_erasing_bypass);
int stack_erasing_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int ret = 0;
int state = !static_branch_unlikely(&stack_erasing_bypass);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 09e70ee2332e..afad085960b8 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2912,6 +2912,14 @@ static struct ctl_table vm_table[] = {
.proc_handler = percpu_pagelist_fraction_sysctl_handler,
.extra1 = SYSCTL_ZERO,
},
+ {
+ .procname = "page_lock_unfairness",
+ .data = &sysctl_page_lock_unfairness,
+ .maxlen = sizeof(sysctl_page_lock_unfairness),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ },
#ifdef CONFIG_MMU
{
.procname = "max_map_count",
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index b2a5380eb187..36508f46a8db 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -743,19 +743,18 @@ out:
return err;
}
-BTF_ID_LIST(bpf_seq_printf_btf_ids)
-BTF_ID(struct, seq_file)
+BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
static const struct bpf_func_proto bpf_seq_printf_proto = {
.func = bpf_seq_printf,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &btf_seq_file_ids[0],
.arg2_type = ARG_PTR_TO_MEM,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_PTR_TO_MEM_OR_NULL,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
- .btf_id = bpf_seq_printf_btf_ids,
};
BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
@@ -763,17 +762,14 @@ BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
return seq_write(m, data, len) ? -EOVERFLOW : 0;
}
-BTF_ID_LIST(bpf_seq_write_btf_ids)
-BTF_ID(struct, seq_file)
-
static const struct bpf_func_proto bpf_seq_write_proto = {
.func = bpf_seq_write,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &btf_seq_file_ids[0],
.arg2_type = ARG_PTR_TO_MEM,
.arg3_type = ARG_CONST_SIZE_OR_ZERO,
- .btf_id = bpf_seq_write_btf_ids,
};
static __always_inline int
@@ -1118,6 +1114,14 @@ BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
}
BTF_SET_START(btf_allowlist_d_path)
+#ifdef CONFIG_SECURITY
+BTF_ID(func, security_file_permission)
+BTF_ID(func, security_inode_getattr)
+BTF_ID(func, security_file_open)
+#endif
+#ifdef CONFIG_SECURITY_PATH
+BTF_ID(func, security_path_truncate)
+#endif
BTF_ID(func, vfs_truncate)
BTF_ID(func, vfs_fallocate)
BTF_ID(func, dentry_open)
@@ -1130,17 +1134,16 @@ static bool bpf_d_path_allowed(const struct bpf_prog *prog)
return btf_id_set_contains(&btf_allowlist_d_path, prog->aux->attach_btf_id);
}
-BTF_ID_LIST(bpf_d_path_btf_ids)
-BTF_ID(struct, path)
+BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
static const struct bpf_func_proto bpf_d_path_proto = {
.func = bpf_d_path,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &bpf_d_path_btf_ids[0],
.arg2_type = ARG_PTR_TO_MEM,
.arg3_type = ARG_CONST_SIZE_OR_ZERO,
- .btf_id = bpf_d_path_btf_ids,
.allowed = bpf_d_path_allowed,
};
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 275441254bb5..603255f5f085 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2782,6 +2782,7 @@ static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops)
{
lockdep_assert_held(&ftrace_lock);
list_del_rcu(&ops->list);
+ synchronize_rcu();
}
/*
@@ -2862,6 +2863,8 @@ int ftrace_startup(struct ftrace_ops *ops, int command)
__unregister_ftrace_function(ops);
ftrace_start_up--;
ops->flags &= ~FTRACE_OPS_FL_ENABLED;
+ if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
+ ftrace_trampoline_free(ops);
return ret;
}
@@ -7531,8 +7534,7 @@ static bool is_permanent_ops_registered(void)
int
ftrace_enable_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int ret = -ENODEV;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f40d850ebabc..2a7c26345e83 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3782,14 +3782,14 @@ unsigned long trace_total_entries(struct trace_array *tr)
static void print_lat_help_header(struct seq_file *m)
{
- seq_puts(m, "# _------=> CPU# \n"
- "# / _-----=> irqs-off \n"
- "# | / _----=> need-resched \n"
- "# || / _---=> hardirq/softirq \n"
- "# ||| / _--=> preempt-depth \n"
- "# |||| / delay \n"
- "# cmd pid ||||| time | caller \n"
- "# \\ / ||||| \\ | / \n");
+ seq_puts(m, "# _------=> CPU# \n"
+ "# / _-----=> irqs-off \n"
+ "# | / _----=> need-resched \n"
+ "# || / _---=> hardirq/softirq \n"
+ "# ||| / _--=> preempt-depth \n"
+ "# |||| / delay \n"
+ "# cmd pid ||||| time | caller \n"
+ "# \\ / ||||| \\ | / \n");
}
static void print_event_info(struct array_buffer *buf, struct seq_file *m)
@@ -3810,26 +3810,26 @@ static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
print_event_info(buf, m);
- seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
- seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
+ seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
+ seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
}
static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
unsigned int flags)
{
bool tgid = flags & TRACE_ITER_RECORD_TGID;
- const char *space = " ";
- int prec = tgid ? 10 : 2;
+ const char *space = " ";
+ int prec = tgid ? 12 : 2;
print_event_info(buf, m);
- seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
- seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
- seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
- seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
- seq_printf(m, "# %.*s||| / delay\n", prec, space);
- seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
- seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
+ seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
+ seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
+ seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
+ seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
+ seq_printf(m, "# %.*s||| / delay\n", prec, space);
+ seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
+ seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
}
void
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 0b933546142e..1b2ef6490229 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -3865,7 +3865,6 @@ static int parse_var_defs(struct hist_trigger_data *hist_data)
s = kstrdup(field_str, GFP_KERNEL);
if (!s) {
- kfree(hist_data->attrs->var_defs.name[n_vars]);
ret = -ENOMEM;
goto free;
}
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 4d1893564912..000e9dc224c6 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -497,7 +497,7 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
trace_find_cmdline(entry->pid, comm);
- trace_seq_printf(s, "%8.8s-%-5d %3d",
+ trace_seq_printf(s, "%8.8s-%-7d %3d",
comm, entry->pid, cpu);
return trace_print_lat_fmt(s, entry);
@@ -588,15 +588,15 @@ int trace_print_context(struct trace_iterator *iter)
trace_find_cmdline(entry->pid, comm);
- trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
+ trace_seq_printf(s, "%16s-%-7d ", comm, entry->pid);
if (tr->trace_flags & TRACE_ITER_RECORD_TGID) {
unsigned int tgid = trace_find_tgid(entry->pid);
if (!tgid)
- trace_seq_printf(s, "(-----) ");
+ trace_seq_printf(s, "(-------) ");
else
- trace_seq_printf(s, "(%5d) ", tgid);
+ trace_seq_printf(s, "(%7d) ", tgid);
}
trace_seq_printf(s, "[%03d] ", iter->cpu);
@@ -636,7 +636,7 @@ int trace_print_lat_context(struct trace_iterator *iter)
trace_find_cmdline(entry->pid, comm);
trace_seq_printf(
- s, "%16s %5d %3d %d %08x %08lx ",
+ s, "%16s %7d %3d %d %08x %08lx ",
comm, entry->pid, iter->cpu, entry->flags,
entry->preempt_count, iter->idx);
} else {
@@ -917,7 +917,7 @@ static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
S = task_index_to_char(field->prev_state);
trace_find_cmdline(field->next_pid, comm);
trace_seq_printf(&iter->seq,
- " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
+ " %7d:%3d:%c %s [%03d] %7d:%3d:%c %s\n",
field->prev_pid,
field->prev_prio,
S, delim,
diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
index f10073e62603..f4938040c228 100644
--- a/kernel/trace/trace_preemptirq.c
+++ b/kernel/trace/trace_preemptirq.c
@@ -102,14 +102,14 @@ NOKPROBE_SYMBOL(trace_hardirqs_on_caller);
__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
{
+ lockdep_hardirqs_off(CALLER_ADDR0);
+
if (!this_cpu_read(tracing_irq_cpu)) {
this_cpu_write(tracing_irq_cpu, 1);
tracer_hardirqs_off(CALLER_ADDR0, caller_addr);
if (!in_nmi())
trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
}
-
- lockdep_hardirqs_off(CALLER_ADDR0);
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);
NOKPROBE_SYMBOL(trace_hardirqs_off_caller);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e068c3c7189a..0c781f912f9f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -520,8 +520,8 @@ config DEBUG_FS_ALLOW_NONE
endchoice
source "lib/Kconfig.kgdb"
-
source "lib/Kconfig.ubsan"
+source "lib/Kconfig.kcsan"
endmenu
@@ -1620,8 +1620,6 @@ config PROVIDE_OHCI1394_DMA_INIT
source "samples/Kconfig"
-source "lib/Kconfig.kcsan"
-
config ARCH_HAS_DEVMEM_IS_ALLOWED
bool
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 1d012e597cc3..2d4dfd44b0fa 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -353,8 +353,7 @@ static int check_set(const char **dest, char *src, char *name)
/*
* Parse words[] as a ddebug query specification, which is a series
- * of (keyword, value) pairs or combined keyword=value terms,
- * chosen from these possibilities:
+ * of (keyword, value) pairs chosen from these possibilities:
*
* func <function-name>
* file <full-pathname>
@@ -373,34 +372,22 @@ static int ddebug_parse_query(char *words[], int nwords,
unsigned int i;
int rc = 0;
char *fline;
- char *keyword, *arg;
+
+ /* check we have an even number of words */
+ if (nwords % 2 != 0) {
+ pr_err("expecting pairs of match-spec <value>\n");
+ return -EINVAL;
+ }
if (modname)
/* support $modname.dyndbg=<multiple queries> */
query->module = modname;
- for (i = 0; i < nwords; i++) {
- /* accept keyword=arg */
- vpr_info("%d w:%s\n", i, words[i]);
-
- keyword = words[i];
- arg = strchr(keyword, '=');
- if (arg) {
- *arg++ = '\0';
- } else {
- i++; /* next word is arg */
- if (!(i < nwords)) {
- pr_err("missing arg to keyword: %s\n", keyword);
- return -EINVAL;
- }
- arg = words[i];
- }
- vpr_info("%d key:%s arg:%s\n", i, keyword, arg);
-
- if (!strcmp(keyword, "func")) {
- rc = check_set(&query->function, arg, "func");
- } else if (!strcmp(keyword, "file")) {
- if (check_set(&query->filename, arg, "file"))
+ for (i = 0; i < nwords; i += 2) {
+ if (!strcmp(words[i], "func")) {
+ rc = check_set(&query->function, words[i+1], "func");
+ } else if (!strcmp(words[i], "file")) {
+ if (check_set(&query->filename, words[i+1], "file"))
return -EINVAL;
/* tail :$info is function or line-range */
@@ -416,18 +403,18 @@ static int ddebug_parse_query(char *words[], int nwords,
if (parse_linerange(query, fline))
return -EINVAL;
}
- } else if (!strcmp(keyword, "module")) {
- rc = check_set(&query->module, arg, "module");
- } else if (!strcmp(keyword, "format")) {
- string_unescape_inplace(arg, UNESCAPE_SPACE |
+ } else if (!strcmp(words[i], "module")) {
+ rc = check_set(&query->module, words[i+1], "module");
+ } else if (!strcmp(words[i], "format")) {
+ string_unescape_inplace(words[i+1], UNESCAPE_SPACE |
UNESCAPE_OCTAL |
UNESCAPE_SPECIAL);
- rc = check_set(&query->format, arg, "format");
- } else if (!strcmp(keyword, "line")) {
- if (parse_linerange(query, arg))
+ rc = check_set(&query->format, words[i+1], "format");
+ } else if (!strcmp(words[i], "line")) {
+ if (parse_linerange(query, words[i+1]))
return -EINVAL;
} else {
- pr_err("unknown keyword \"%s\"\n", keyword);
+ pr_err("unknown keyword \"%s\"\n", words[i]);
return -EINVAL;
}
if (rc)
@@ -525,7 +512,7 @@ static int ddebug_exec_query(char *query_string, const char *modname)
last error or number of matching callsites. Module name is either
in param (for boot arg) or perhaps in query string.
*/
-int ddebug_exec_queries(char *query, const char *modname)
+static int ddebug_exec_queries(char *query, const char *modname)
{
char *split;
int i, errs = 0, exitcode = 0, rc, nfound = 0;
@@ -557,7 +544,30 @@ int ddebug_exec_queries(char *query, const char *modname)
return exitcode;
return nfound;
}
-EXPORT_SYMBOL_GPL(ddebug_exec_queries);
+
+/**
+ * dynamic_debug_exec_queries - select and change dynamic-debug prints
+ * @query: query-string described in admin-guide/dynamic-debug-howto
+ * @modname: string containing module name, usually &module.mod_name
+ *
+ * This uses the >/proc/dynamic_debug/control reader, allowing module
+ * authors to modify their dynamic-debug callsites. The modname is
+ * canonically struct module.mod_name, but can also be null or a
+ * module-wildcard, for example: "drm*".
+ */
+int dynamic_debug_exec_queries(const char *query, const char *modname)
+{
+ int rc;
+ char *qry = kstrndup(query, PAGE_SIZE, GFP_KERNEL);
+
+ if (!query)
+ return -ENOMEM;
+
+ rc = ddebug_exec_queries(qry, modname);
+ kfree(qry);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dynamic_debug_exec_queries);
#define PREFIX_SIZE 64
@@ -947,7 +957,7 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
list_add(&dt->link, &ddebug_tables);
mutex_unlock(&ddebug_lock);
- v2pr_info("%u debug prints in module %s\n", n, dt->mod_name);
+ v2pr_info("%3u debug prints in module %s\n", n, dt->mod_name);
return 0;
}
diff --git a/lib/kobject.c b/lib/kobject.c
index 3afb939f2a1c..ea53b30cf483 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -604,9 +604,6 @@ static void __kobject_del(struct kobject *kobj)
struct kernfs_node *sd;
const struct kobj_type *ktype;
- if (!kobj)
- return;
-
sd = kobj->sd;
ktype = get_ktype(kobj);
@@ -637,8 +634,12 @@ static void __kobject_del(struct kobject *kobj)
*/
void kobject_del(struct kobject *kobj)
{
- struct kobject *parent = kobj->parent;
+ struct kobject *parent;
+
+ if (!kobj)
+ return;
+ parent = kobj->parent;
__kobject_del(kobj);
kobject_put(parent);
}
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 9fee2b93a8d1..06c955057756 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -26,6 +26,8 @@
#include <linux/vmalloc.h>
#include <linux/efi_embedded_fw.h>
+MODULE_IMPORT_NS(TEST_FIRMWARE);
+
#define TEST_FIRMWARE_NAME "test-firmware.bin"
#define TEST_FIRMWARE_NUM_REQS 4
#define TEST_FIRMWARE_BUF_SIZE SZ_1K
@@ -489,6 +491,9 @@ out:
static DEVICE_ATTR_WO(trigger_request);
#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
+extern struct list_head efi_embedded_fw_list;
+extern bool efi_embedded_fw_checked;
+
static ssize_t trigger_request_platform_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -501,6 +506,7 @@ static ssize_t trigger_request_platform_store(struct device *dev,
};
struct efi_embedded_fw efi_embedded_fw;
const struct firmware *firmware = NULL;
+ bool saved_efi_embedded_fw_checked;
char *name;
int rc;
@@ -513,6 +519,8 @@ static ssize_t trigger_request_platform_store(struct device *dev,
efi_embedded_fw.data = (void *)test_data;
efi_embedded_fw.length = sizeof(test_data);
list_add(&efi_embedded_fw.list, &efi_embedded_fw_list);
+ saved_efi_embedded_fw_checked = efi_embedded_fw_checked;
+ efi_embedded_fw_checked = true;
pr_info("loading '%s'\n", name);
rc = firmware_request_platform(&firmware, name, dev);
@@ -530,6 +538,7 @@ static ssize_t trigger_request_platform_store(struct device *dev,
rc = count;
out:
+ efi_embedded_fw_checked = saved_efi_embedded_fw_checked;
release_firmware(firmware);
list_del(&efi_embedded_fw.list);
kfree(name);
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index c5a6fef7b45d..76c607ee6db5 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -434,7 +434,7 @@ static int __init test_rhltable(unsigned int entries)
} else {
if (WARN(err != -ENOENT, "removed non-existent element, error %d not %d",
err, -ENOENT))
- continue;
+ continue;
}
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 054d93a86f8a..8c8f2deee4e3 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -988,9 +988,43 @@ void __init pagecache_init(void)
page_writeback_init();
}
+/*
+ * The page wait code treats the "wait->flags" somewhat unusually, because
+ * we have multiple different kinds of waits, not just the usual "exclusive"
+ * one.
+ *
+ * We have:
+ *
+ * (a) no special bits set:
+ *
+ * We're just waiting for the bit to be released, and when a waker
+ * calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up,
+ * and remove it from the wait queue.
+ *
+ * Simple and straightforward.
+ *
+ * (b) WQ_FLAG_EXCLUSIVE:
+ *
+ * The waiter is waiting to get the lock, and only one waiter should
+ * be woken up to avoid any thundering herd behavior. We'll set the
+ * WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue.
+ *
+ * This is the traditional exclusive wait.
+ *
+ * (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM:
+ *
+ * The waiter is waiting to get the bit, and additionally wants the
+ * lock to be transferred to it for fair lock behavior. If the lock
+ * cannot be taken, we stop walking the wait queue without waking
+ * the waiter.
+ *
+ * This is the "fair lock handoff" case, and in addition to setting
+ * WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see
+ * that it now has the lock.
+ */
static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
{
- int ret;
+ unsigned int flags;
struct wait_page_key *key = arg;
struct wait_page_queue *wait_page
= container_of(wait, struct wait_page_queue, wait);
@@ -999,35 +1033,44 @@ static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync,
return 0;
/*
- * If it's an exclusive wait, we get the bit for it, and
- * stop walking if we can't.
- *
- * If it's a non-exclusive wait, then the fact that this
- * wake function was called means that the bit already
- * was cleared, and we don't care if somebody then
- * re-took it.
+ * If it's a lock handoff wait, we get the bit for it, and
+ * stop walking (and do not wake it up) if we can't.
*/
- ret = 0;
- if (wait->flags & WQ_FLAG_EXCLUSIVE) {
- if (test_and_set_bit(key->bit_nr, &key->page->flags))
+ flags = wait->flags;
+ if (flags & WQ_FLAG_EXCLUSIVE) {
+ if (test_bit(key->bit_nr, &key->page->flags))
return -1;
- ret = 1;
+ if (flags & WQ_FLAG_CUSTOM) {
+ if (test_and_set_bit(key->bit_nr, &key->page->flags))
+ return -1;
+ flags |= WQ_FLAG_DONE;
+ }
}
- wait->flags |= WQ_FLAG_WOKEN;
+ /*
+ * We are holding the wait-queue lock, but the waiter that
+ * is waiting for this will be checking the flags without
+ * any locking.
+ *
+ * So update the flags atomically, and wake up the waiter
+ * afterwards to avoid any races. This store-release pairs
+ * with the load-acquire in wait_on_page_bit_common().
+ */
+ smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN);
wake_up_state(wait->private, mode);
/*
* Ok, we have successfully done what we're waiting for,
* and we can unconditionally remove the wait entry.
*
- * Note that this has to be the absolute last thing we do,
- * since after list_del_init(&wait->entry) the wait entry
+ * Note that this pairs with the "finish_wait()" in the
+ * waiter, and has to be the absolute last thing we do.
+ * After this list_del_init(&wait->entry) the wait entry
* might be de-allocated and the process might even have
* exited.
*/
list_del_init_careful(&wait->entry);
- return ret;
+ return (flags & WQ_FLAG_EXCLUSIVE) != 0;
}
static void wake_up_page_bit(struct page *page, int bit_nr)
@@ -1107,8 +1150,8 @@ enum behavior {
};
/*
- * Attempt to check (or get) the page bit, and mark the
- * waiter woken if successful.
+ * Attempt to check (or get) the page bit, and mark us done
+ * if successful.
*/
static inline bool trylock_page_bit_common(struct page *page, int bit_nr,
struct wait_queue_entry *wait)
@@ -1119,13 +1162,17 @@ static inline bool trylock_page_bit_common(struct page *page, int bit_nr,
} else if (test_bit(bit_nr, &page->flags))
return false;
- wait->flags |= WQ_FLAG_WOKEN;
+ wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE;
return true;
}
+/* How many times do we accept lock stealing from under a waiter? */
+int sysctl_page_lock_unfairness = 5;
+
static inline int wait_on_page_bit_common(wait_queue_head_t *q,
struct page *page, int bit_nr, int state, enum behavior behavior)
{
+ int unfairness = sysctl_page_lock_unfairness;
struct wait_page_queue wait_page;
wait_queue_entry_t *wait = &wait_page.wait;
bool thrashing = false;
@@ -1143,11 +1190,18 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
}
init_wait(wait);
- wait->flags = behavior == EXCLUSIVE ? WQ_FLAG_EXCLUSIVE : 0;
wait->func = wake_page_function;
wait_page.page = page;
wait_page.bit_nr = bit_nr;
+repeat:
+ wait->flags = 0;
+ if (behavior == EXCLUSIVE) {
+ wait->flags = WQ_FLAG_EXCLUSIVE;
+ if (--unfairness < 0)
+ wait->flags |= WQ_FLAG_CUSTOM;
+ }
+
/*
* Do one last check whether we can get the
* page bit synchronously.
@@ -1170,27 +1224,63 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
/*
* From now on, all the logic will be based on
- * the WQ_FLAG_WOKEN flag, and the and the page
- * bit testing (and setting) will be - or has
- * already been - done by the wake function.
+ * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to
+ * see whether the page bit testing has already
+ * been done by the wake function.
*
* We can drop our reference to the page.
*/
if (behavior == DROP)
put_page(page);
+ /*
+ * Note that until the "finish_wait()", or until
+ * we see the WQ_FLAG_WOKEN flag, we need to
+ * be very careful with the 'wait->flags', because
+ * we may race with a waker that sets them.
+ */
for (;;) {
+ unsigned int flags;
+
set_current_state(state);
- if (signal_pending_state(state, current))
+ /* Loop until we've been woken or interrupted */
+ flags = smp_load_acquire(&wait->flags);
+ if (!(flags & WQ_FLAG_WOKEN)) {
+ if (signal_pending_state(state, current))
+ break;
+
+ io_schedule();
+ continue;
+ }
+
+ /* If we were non-exclusive, we're done */
+ if (behavior != EXCLUSIVE)
break;
- if (wait->flags & WQ_FLAG_WOKEN)
+ /* If the waker got the lock for us, we're done */
+ if (flags & WQ_FLAG_DONE)
break;
- io_schedule();
+ /*
+ * Otherwise, if we're getting the lock, we need to
+ * try to get it ourselves.
+ *
+ * And if that fails, we'll have to retry this all.
+ */
+ if (unlikely(test_and_set_bit(bit_nr, &page->flags)))
+ goto repeat;
+
+ wait->flags |= WQ_FLAG_DONE;
+ break;
}
+ /*
+ * If a signal happened, this 'finish_wait()' may remove the last
+ * waiter from the wait-queues, but the PageWaiters bit will remain
+ * set. That's ok. The next wakeup will take care of it, and trying
+ * to do it here would be difficult and prone to races.
+ */
finish_wait(q, wait);
if (thrashing) {
@@ -1200,12 +1290,20 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
}
/*
- * A signal could leave PageWaiters set. Clearing it here if
- * !waitqueue_active would be possible (by open-coding finish_wait),
- * but still fail to catch it in the case of wait hash collision. We
- * already can fail to clear wait hash collision cases, so don't
- * bother with signals either.
+ * NOTE! The wait->flags weren't stable until we've done the
+ * 'finish_wait()', and we could have exited the loop above due
+ * to a signal, and had a wakeup event happen after the signal
+ * test but before the 'finish_wait()'.
+ *
+ * So only after the finish_wait() can we reliably determine
+ * if we got woken up or not, so we can now figure out the final
+ * return value based on that state without races.
+ *
+ * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive
+ * waiter, but an exclusive one requires WQ_FLAG_DONE.
*/
+ if (behavior == EXCLUSIVE)
+ return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR;
return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 7ff29cc3d55c..faadc449cca5 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2022,7 +2022,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
put_page(page);
add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
return;
- } else if (is_huge_zero_pmd(*pmd)) {
+ } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
/*
* FIXME: Do we want to invalidate secondary mmu by calling
* mmu_notifier_invalidate_range() see comments below inside
@@ -2116,30 +2116,34 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
pte = pte_offset_map(&_pmd, addr);
BUG_ON(!pte_none(*pte));
set_pte_at(mm, addr, pte, entry);
- atomic_inc(&page[i]._mapcount);
- pte_unmap(pte);
- }
-
- /*
- * Set PG_double_map before dropping compound_mapcount to avoid
- * false-negative page_mapped().
- */
- if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) {
- for (i = 0; i < HPAGE_PMD_NR; i++)
+ if (!pmd_migration)
atomic_inc(&page[i]._mapcount);
+ pte_unmap(pte);
}
- lock_page_memcg(page);
- if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
- /* Last compound_mapcount is gone. */
- __dec_lruvec_page_state(page, NR_ANON_THPS);
- if (TestClearPageDoubleMap(page)) {
- /* No need in mapcount reference anymore */
+ if (!pmd_migration) {
+ /*
+ * Set PG_double_map before dropping compound_mapcount to avoid
+ * false-negative page_mapped().
+ */
+ if (compound_mapcount(page) > 1 &&
+ !TestSetPageDoubleMap(page)) {
for (i = 0; i < HPAGE_PMD_NR; i++)
- atomic_dec(&page[i]._mapcount);
+ atomic_inc(&page[i]._mapcount);
+ }
+
+ lock_page_memcg(page);
+ if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
+ /* Last compound_mapcount is gone. */
+ __dec_lruvec_page_state(page, NR_ANON_THPS);
+ if (TestClearPageDoubleMap(page)) {
+ /* No need in mapcount reference anymore */
+ for (i = 0; i < HPAGE_PMD_NR; i++)
+ atomic_dec(&page[i]._mapcount);
+ }
}
+ unlock_page_memcg(page);
}
- unlock_page_memcg(page);
smp_wmb(); /* make pte visible before pmd */
pmd_populate(mm, pmd, pgtable);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a301c2d672bf..67fc6383995b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1250,21 +1250,32 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nodemask)
{
unsigned long nr_pages = 1UL << huge_page_order(h);
+ if (nid == NUMA_NO_NODE)
+ nid = numa_mem_id();
#ifdef CONFIG_CMA
{
struct page *page;
int node;
- for_each_node_mask(node, *nodemask) {
- if (!hugetlb_cma[node])
- continue;
-
- page = cma_alloc(hugetlb_cma[node], nr_pages,
- huge_page_order(h), true);
+ if (hugetlb_cma[nid]) {
+ page = cma_alloc(hugetlb_cma[nid], nr_pages,
+ huge_page_order(h), true);
if (page)
return page;
}
+
+ if (!(gfp_mask & __GFP_THISNODE)) {
+ for_each_node_mask(node, *nodemask) {
+ if (node == nid || !hugetlb_cma[node])
+ continue;
+
+ page = cma_alloc(hugetlb_cma[node], nr_pages,
+ huge_page_order(h), true);
+ if (page)
+ return page;
+ }
+ }
}
#endif
@@ -3454,6 +3465,22 @@ static unsigned int allowed_mems_nr(struct hstate *h)
}
#ifdef CONFIG_SYSCTL
+static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
+ void *buffer, size_t *length,
+ loff_t *ppos, unsigned long *out)
+{
+ struct ctl_table dup_table;
+
+ /*
+ * In order to avoid races with __do_proc_doulongvec_minmax(), we
+ * can duplicate the @table and alter the duplicate of it.
+ */
+ dup_table = *table;
+ dup_table.data = out;
+
+ return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
+}
+
static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
@@ -3465,9 +3492,8 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
if (!hugepages_supported())
return -EOPNOTSUPP;
- table->data = &tmp;
- table->maxlen = sizeof(unsigned long);
- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
+ ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
+ &tmp);
if (ret)
goto out;
@@ -3510,9 +3536,8 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
if (write && hstate_is_gigantic(h))
return -EINVAL;
- table->data = &tmp;
- table->maxlen = sizeof(unsigned long);
- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
+ ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
+ &tmp);
if (ret)
goto out;
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index e749e568e1ea..cfa0dba5fd3b 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1709,7 +1709,7 @@ static void collapse_file(struct mm_struct *mm,
xas_unlock_irq(&xas);
page_cache_sync_readahead(mapping, &file->f_ra,
file, index,
- PAGE_SIZE);
+ end - index);
/* drain pagevecs to help isolate_lru_page() */
lru_add_drain();
page = find_lock_page(mapping, index);
diff --git a/mm/ksm.c b/mm/ksm.c
index 235f55d01541..9afccc36dbd2 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2586,6 +2586,10 @@ struct page *ksm_might_need_to_copy(struct page *page,
return page; /* let do_swap_page report the error */
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+ if (new_page && mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL)) {
+ put_page(new_page);
+ new_page = NULL;
+ }
if (new_page) {
copy_user_highpage(new_page, page, address, vma);
diff --git a/mm/madvise.c b/mm/madvise.c
index dd1d43cf026d..d4aa5f776543 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -289,9 +289,9 @@ static long madvise_willneed(struct vm_area_struct *vma,
*/
*prev = NULL; /* tell sys_madvise we drop mmap_lock */
get_file(file);
- mmap_read_unlock(current->mm);
offset = (loff_t)(start - vma->vm_start)
+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+ mmap_read_unlock(current->mm);
vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
fput(file);
mmap_read_lock(current->mm);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b807952b4d43..cfa6cbad21d5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6774,6 +6774,9 @@ static void uncharge_batch(const struct uncharge_gather *ug)
__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
memcg_check_events(ug->memcg, ug->dummy_page);
local_irq_restore(flags);
+
+ /* drop reference from uncharge_page */
+ css_put(&ug->memcg->css);
}
static void uncharge_page(struct page *page, struct uncharge_gather *ug)
@@ -6797,6 +6800,9 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
uncharge_gather_clear(ug);
}
ug->memcg = page->mem_cgroup;
+
+ /* pairs with css_put in uncharge_batch */
+ css_get(&ug->memcg->css);
}
nr_pages = compound_nr(page);
diff --git a/mm/memory.c b/mm/memory.c
index 148eafb8cbb1..469af373ae76 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -73,6 +73,7 @@
#include <linux/numa.h>
#include <linux/perf_event.h>
#include <linux/ptrace.h>
+#include <linux/vmalloc.h>
#include <trace/events/kmem.h>
@@ -83,6 +84,7 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>
+#include "pgalloc-track.h"
#include "internal.h"
#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
@@ -2206,7 +2208,8 @@ EXPORT_SYMBOL(vm_iomap_memory);
static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end,
- pte_fn_t fn, void *data, bool create)
+ pte_fn_t fn, void *data, bool create,
+ pgtbl_mod_mask *mask)
{
pte_t *pte;
int err = 0;
@@ -2214,7 +2217,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
if (create) {
pte = (mm == &init_mm) ?
- pte_alloc_kernel(pmd, addr) :
+ pte_alloc_kernel_track(pmd, addr, mask) :
pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
return -ENOMEM;
@@ -2235,6 +2238,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
break;
}
} while (addr += PAGE_SIZE, addr != end);
+ *mask |= PGTBL_PTE_MODIFIED;
arch_leave_lazy_mmu_mode();
@@ -2245,7 +2249,8 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
unsigned long addr, unsigned long end,
- pte_fn_t fn, void *data, bool create)
+ pte_fn_t fn, void *data, bool create,
+ pgtbl_mod_mask *mask)
{
pmd_t *pmd;
unsigned long next;
@@ -2254,7 +2259,7 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
BUG_ON(pud_huge(*pud));
if (create) {
- pmd = pmd_alloc(mm, pud, addr);
+ pmd = pmd_alloc_track(mm, pud, addr, mask);
if (!pmd)
return -ENOMEM;
} else {
@@ -2264,7 +2269,7 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
next = pmd_addr_end(addr, end);
if (create || !pmd_none_or_clear_bad(pmd)) {
err = apply_to_pte_range(mm, pmd, addr, next, fn, data,
- create);
+ create, mask);
if (err)
break;
}
@@ -2274,14 +2279,15 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
unsigned long addr, unsigned long end,
- pte_fn_t fn, void *data, bool create)
+ pte_fn_t fn, void *data, bool create,
+ pgtbl_mod_mask *mask)
{
pud_t *pud;
unsigned long next;
int err = 0;
if (create) {
- pud = pud_alloc(mm, p4d, addr);
+ pud = pud_alloc_track(mm, p4d, addr, mask);
if (!pud)
return -ENOMEM;
} else {
@@ -2291,7 +2297,7 @@ static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
next = pud_addr_end(addr, end);
if (create || !pud_none_or_clear_bad(pud)) {
err = apply_to_pmd_range(mm, pud, addr, next, fn, data,
- create);
+ create, mask);
if (err)
break;
}
@@ -2301,14 +2307,15 @@ static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long addr, unsigned long end,
- pte_fn_t fn, void *data, bool create)
+ pte_fn_t fn, void *data, bool create,
+ pgtbl_mod_mask *mask)
{
p4d_t *p4d;
unsigned long next;
int err = 0;
if (create) {
- p4d = p4d_alloc(mm, pgd, addr);
+ p4d = p4d_alloc_track(mm, pgd, addr, mask);
if (!p4d)
return -ENOMEM;
} else {
@@ -2318,7 +2325,7 @@ static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
next = p4d_addr_end(addr, end);
if (create || !p4d_none_or_clear_bad(p4d)) {
err = apply_to_pud_range(mm, p4d, addr, next, fn, data,
- create);
+ create, mask);
if (err)
break;
}
@@ -2331,8 +2338,9 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
void *data, bool create)
{
pgd_t *pgd;
- unsigned long next;
+ unsigned long start = addr, next;
unsigned long end = addr + size;
+ pgtbl_mod_mask mask = 0;
int err = 0;
if (WARN_ON(addr >= end))
@@ -2343,11 +2351,14 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
next = pgd_addr_end(addr, end);
if (!create && pgd_none_or_clear_bad(pgd))
continue;
- err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create);
+ err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create, &mask);
if (err)
break;
} while (pgd++, addr = next, addr != end);
+ if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
+ arch_sync_kernel_mappings(start, start + size);
+
return err;
}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index e9d5ab5d3ca0..b11a269e2356 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1575,6 +1575,20 @@ static int __ref __offline_pages(unsigned long start_pfn,
/* check again */
ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
NULL, check_pages_isolated_cb);
+ /*
+ * per-cpu pages are drained in start_isolate_page_range, but if
+ * there are still pages that are not free, make sure that we
+ * drain again, because when we isolated range we might
+ * have raced with another thread that was adding pages to pcp
+ * list.
+ *
+ * Forward progress should be still guaranteed because
+ * pages on the pcp list can only belong to MOVABLE_ZONE
+ * because has_unmovable_pages explicitly checks for
+ * PageBuddy on freed pages on other zones.
+ */
+ if (ret)
+ drain_all_pages(zone);
} while (ret);
/* Ok, all of our target is isolated.
diff --git a/mm/memremap.c b/mm/memremap.c
index 03e38b7a38f1..006dace60b1a 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -216,7 +216,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
return ERR_PTR(-EINVAL);
}
break;
- case MEMORY_DEVICE_DEVDAX:
+ case MEMORY_DEVICE_GENERIC:
need_devmap_managed = false;
break;
case MEMORY_DEVICE_PCI_P2PDMA:
diff --git a/mm/migrate.c b/mm/migrate.c
index 34a842a8eb6a..aecb1433cf3c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -246,13 +246,13 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
else if (pte_swp_uffd_wp(*pvmw.pte))
pte = pte_mkuffd_wp(pte);
- if (unlikely(is_zone_device_page(new))) {
- if (is_device_private_page(new)) {
- entry = make_device_private_entry(new, pte_write(pte));
- pte = swp_entry_to_pte(entry);
- if (pte_swp_uffd_wp(*pvmw.pte))
- pte = pte_mkuffd_wp(pte);
- }
+ if (unlikely(is_device_private_page(new))) {
+ entry = make_device_private_entry(new, pte_write(pte));
+ pte = swp_entry_to_pte(entry);
+ if (pte_swp_soft_dirty(*pvmw.pte))
+ pte = pte_swp_mksoft_dirty(pte);
+ if (pte_swp_uffd_wp(*pvmw.pte))
+ pte = pte_swp_mkuffd_wp(pte);
}
#ifdef CONFIG_HUGETLB_PAGE
@@ -668,7 +668,8 @@ void migrate_page_states(struct page *newpage, struct page *page)
copy_page_owner(page, newpage);
- mem_cgroup_migrate(page, newpage);
+ if (!PageHuge(page))
+ mem_cgroup_migrate(page, newpage);
}
EXPORT_SYMBOL(migrate_page_states);
@@ -2427,10 +2428,17 @@ again:
entry = make_migration_entry(page, mpfn &
MIGRATE_PFN_WRITE);
swp_pte = swp_entry_to_pte(entry);
- if (pte_soft_dirty(pte))
- swp_pte = pte_swp_mksoft_dirty(swp_pte);
- if (pte_uffd_wp(pte))
- swp_pte = pte_swp_mkuffd_wp(swp_pte);
+ if (pte_present(pte)) {
+ if (pte_soft_dirty(pte))
+ swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ if (pte_uffd_wp(pte))
+ swp_pte = pte_swp_mkuffd_wp(swp_pte);
+ } else {
+ if (pte_swp_soft_dirty(pte))
+ swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ if (pte_swp_uffd_wp(pte))
+ swp_pte = pte_swp_mkuffd_wp(swp_pte);
+ }
set_pte_at(mm, addr, ptep, swp_pte);
/*
diff --git a/mm/mlock.c b/mm/mlock.c
index 93ca2bf30b4f..884b1216da6a 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -58,11 +58,14 @@ EXPORT_SYMBOL(can_do_mlock);
*/
void clear_page_mlock(struct page *page)
{
+ int nr_pages;
+
if (!TestClearPageMlocked(page))
return;
- mod_zone_page_state(page_zone(page), NR_MLOCK, -thp_nr_pages(page));
- count_vm_event(UNEVICTABLE_PGCLEARED);
+ nr_pages = thp_nr_pages(page);
+ mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
+ count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
/*
* The previous TestClearPageMlocked() corresponds to the smp_mb()
* in __pagevec_lru_add_fn().
@@ -76,7 +79,7 @@ void clear_page_mlock(struct page *page)
* We lost the race. the page already moved to evictable list.
*/
if (PageUnevictable(page))
- count_vm_event(UNEVICTABLE_PGSTRANDED);
+ count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
}
}
@@ -93,9 +96,10 @@ void mlock_vma_page(struct page *page)
VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
if (!TestSetPageMlocked(page)) {
- mod_zone_page_state(page_zone(page), NR_MLOCK,
- thp_nr_pages(page));
- count_vm_event(UNEVICTABLE_PGMLOCKED);
+ int nr_pages = thp_nr_pages(page);
+
+ mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
+ count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
if (!isolate_lru_page(page))
putback_lru_page(page);
}
@@ -138,7 +142,7 @@ static void __munlock_isolated_page(struct page *page)
/* Did try_to_unlock() succeed or punt? */
if (!PageMlocked(page))
- count_vm_event(UNEVICTABLE_PGMUNLOCKED);
+ count_vm_events(UNEVICTABLE_PGMUNLOCKED, thp_nr_pages(page));
putback_lru_page(page);
}
@@ -154,10 +158,12 @@ static void __munlock_isolated_page(struct page *page)
*/
static void __munlock_isolation_failed(struct page *page)
{
+ int nr_pages = thp_nr_pages(page);
+
if (PageUnevictable(page))
- __count_vm_event(UNEVICTABLE_PGSTRANDED);
+ __count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
else
- __count_vm_event(UNEVICTABLE_PGMUNLOCKED);
+ __count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
}
/**
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 242c03121d73..63a3db10a8c0 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -170,6 +170,14 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* pageblocks we may have modified and return -EBUSY to caller. This
* prevents two threads from simultaneously working on overlapping ranges.
*
+ * Please note that there is no strong synchronization with the page allocator
+ * either. Pages might be freed while their page blocks are marked ISOLATED.
+ * In some cases pages might still end up on pcp lists and that would allow
+ * for their allocation even when they are in fact isolated already. Depending
+ * on how strong of a guarantee the caller needs drain_all_pages might be needed
+ * (e.g. __offline_pages will need to call it after check for isolated range for
+ * a next retry).
+ *
* Return: the number of isolated pageblocks on success and -EBUSY if any part
* of range cannot be isolated.
*/
diff --git a/mm/percpu.c b/mm/percpu.c
index f4709629e6de..1ed1a349eab8 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1316,7 +1316,7 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
/* allocate chunk */
alloc_size = sizeof(struct pcpu_chunk) +
- BITS_TO_LONGS(region_size >> PAGE_SHIFT);
+ BITS_TO_LONGS(region_size >> PAGE_SHIFT) * sizeof(unsigned long);
chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
if (!chunk)
panic("%s: Failed to allocate %zu bytes\n", __func__,
diff --git a/mm/rmap.c b/mm/rmap.c
index 83cc459edc40..9425260774a1 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1511,9 +1511,14 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
*/
entry = make_migration_entry(page, 0);
swp_pte = swp_entry_to_pte(entry);
- if (pte_soft_dirty(pteval))
+
+ /*
+ * pteval maps a zone device page and is therefore
+ * a swap pte.
+ */
+ if (pte_swp_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
- if (pte_uffd_wp(pteval))
+ if (pte_swp_uffd_wp(pteval))
swp_pte = pte_swp_mkuffd_wp(swp_pte);
set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
/*
diff --git a/mm/shmem.c b/mm/shmem.c
index 271548ca20f3..8e2b35ba93ad 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -279,11 +279,13 @@ static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
if (!(sb->s_flags & SB_KERNMOUNT)) {
spin_lock(&sbinfo->stat_lock);
- if (!sbinfo->free_inodes) {
- spin_unlock(&sbinfo->stat_lock);
- return -ENOSPC;
+ if (sbinfo->max_inodes) {
+ if (!sbinfo->free_inodes) {
+ spin_unlock(&sbinfo->stat_lock);
+ return -ENOSPC;
+ }
+ sbinfo->free_inodes--;
}
- sbinfo->free_inodes--;
if (inop) {
ino = sbinfo->next_ino++;
if (unlikely(is_zero_ino(ino)))
diff --git a/mm/slub.c b/mm/slub.c
index 68c02b2eecd9..d4177aecedf6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -672,12 +672,12 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...)
}
static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
- void *freelist, void *nextfree)
+ void **freelist, void *nextfree)
{
if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
- !check_valid_pointer(s, page, nextfree)) {
- object_err(s, page, freelist, "Freechain corrupt");
- freelist = NULL;
+ !check_valid_pointer(s, page, nextfree) && freelist) {
+ object_err(s, page, *freelist, "Freechain corrupt");
+ *freelist = NULL;
slab_fix(s, "Isolate corrupted freechain");
return true;
}
@@ -1494,7 +1494,7 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
int objects) {}
static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
- void *freelist, void *nextfree)
+ void **freelist, void *nextfree)
{
return false;
}
@@ -2184,7 +2184,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
* 'freelist' is already corrupted. So isolate all objects
* starting at 'freelist'.
*/
- if (freelist_corrupted(s, page, freelist, nextfree))
+ if (freelist_corrupted(s, page, &freelist, nextfree))
break;
do {
diff --git a/mm/swap.c b/mm/swap.c
index d16d65d9b4e0..e7bdf094f76a 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -494,14 +494,14 @@ void lru_cache_add_inactive_or_unevictable(struct page *page,
unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED;
if (unlikely(unevictable) && !TestSetPageMlocked(page)) {
+ int nr_pages = thp_nr_pages(page);
/*
* We use the irq-unsafe __mod_zone_page_stat because this
* counter is not modified from interrupt context, and the pte
* lock is held(spinlock), which implies preemption disabled.
*/
- __mod_zone_page_state(page_zone(page), NR_MLOCK,
- thp_nr_pages(page));
- count_vm_event(UNEVICTABLE_PGMLOCKED);
+ __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
+ count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
}
lru_cache_add(page);
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 99e1796eb833..466fc3144fff 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2615,6 +2615,14 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
unsigned long reclaimed;
unsigned long scanned;
+ /*
+ * This loop can become CPU-bound when target memcgs
+ * aren't eligible for reclaim - either because they
+ * don't have any reclaimable pages, or because their
+ * memory is explicitly protected. Avoid soft lockups.
+ */
+ cond_resched();
+
mem_cgroup_calculate_protection(target_memcg, memcg);
if (mem_cgroup_below_min(memcg)) {
@@ -4260,8 +4268,14 @@ void check_move_unevictable_pages(struct pagevec *pvec)
for (i = 0; i < pvec->nr; i++) {
struct page *page = pvec->pages[i];
struct pglist_data *pagepgdat = page_pgdat(page);
+ int nr_pages;
+
+ if (PageTransTail(page))
+ continue;
+
+ nr_pages = thp_nr_pages(page);
+ pgscanned += nr_pages;
- pgscanned++;
if (pagepgdat != pgdat) {
if (pgdat)
spin_unlock_irq(&pgdat->lru_lock);
@@ -4280,7 +4294,7 @@ void check_move_unevictable_pages(struct pagevec *pvec)
ClearPageUnevictable(page);
del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
add_page_to_lru_list(page, lruvec, lru);
- pgrescued++;
+ pgrescued += nr_pages;
}
}
diff --git a/net/atm/lec.c b/net/atm/lec.c
index b570ef919c28..dbabb65d8b67 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1070,7 +1070,7 @@ module_exit(lane_module_cleanup);
/*
* LANE2: 3.1.3, LE_RESOLVE.request
* Non force allocates memory and fills in *tlvs, fills in *sizeoftlvs.
- * If sizeoftlvs == NULL the default TLVs associated with with this
+ * If sizeoftlvs == NULL the default TLVs associated with this
* lec will be used.
* If dst_mac == NULL, targetless LE_ARP will be sent
*/
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index fbd0c5e7b299..5de06ab8ed75 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -52,7 +52,7 @@ static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg)
msg->type = as_okay;
}
/*
- * Should probably just turn around the old skb. But the, the buffer
+ * Should probably just turn around the old skb. But then, the buffer
* space accounting needs to follow the change too. Maybe later.
*/
while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index ab6cec3c7586..ba0027d1f2df 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -25,6 +25,7 @@
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
+#include <linux/preempt.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/seq_file.h>
@@ -83,11 +84,12 @@ static inline u32 batadv_choose_claim(const void *data, u32 size)
*/
static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
{
- const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
+ const struct batadv_bla_backbone_gw *gw;
u32 hash = 0;
- hash = jhash(&claim->addr, sizeof(claim->addr), hash);
- hash = jhash(&claim->vid, sizeof(claim->vid), hash);
+ gw = (struct batadv_bla_backbone_gw *)data;
+ hash = jhash(&gw->orig, sizeof(gw->orig), hash);
+ hash = jhash(&gw->vid, sizeof(gw->vid), hash);
return hash % size;
}
@@ -1579,13 +1581,16 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
}
/**
- * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup.
+ * batadv_bla_check_duplist() - Check if a frame is in the broadcast dup.
* @bat_priv: the bat priv with all the soft interface information
- * @skb: contains the bcast_packet to be checked
+ * @skb: contains the multicast packet to be checked
+ * @payload_ptr: pointer to position inside the head buffer of the skb
+ * marking the start of the data to be CRC'ed
+ * @orig: originator mac address, NULL if unknown
*
- * check if it is on our broadcast list. Another gateway might
- * have sent the same packet because it is connected to the same backbone,
- * so we have to remove this duplicate.
+ * Check if it is on our broadcast list. Another gateway might have sent the
+ * same packet because it is connected to the same backbone, so we have to
+ * remove this duplicate.
*
* This is performed by checking the CRC, which will tell us
* with a good chance that it is the same packet. If it is furthermore
@@ -1594,19 +1599,17 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
*
* Return: true if a packet is in the duplicate list, false otherwise.
*/
-bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
- struct sk_buff *skb)
+static bool batadv_bla_check_duplist(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, u8 *payload_ptr,
+ const u8 *orig)
{
- int i, curr;
- __be32 crc;
- struct batadv_bcast_packet *bcast_packet;
struct batadv_bcast_duplist_entry *entry;
bool ret = false;
-
- bcast_packet = (struct batadv_bcast_packet *)skb->data;
+ int i, curr;
+ __be32 crc;
/* calculate the crc ... */
- crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
+ crc = batadv_skb_crc32(skb, payload_ptr);
spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
@@ -1625,8 +1628,21 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
if (entry->crc != crc)
continue;
- if (batadv_compare_eth(entry->orig, bcast_packet->orig))
- continue;
+ /* are the originators both known and not anonymous? */
+ if (orig && !is_zero_ether_addr(orig) &&
+ !is_zero_ether_addr(entry->orig)) {
+ /* If known, check if the new frame came from
+ * the same originator:
+ * We are safe to take identical frames from the
+ * same orig, if known, as multiplications in
+ * the mesh are detected via the (orig, seqno) pair.
+ * So we can be a bit more liberal here and allow
+ * identical frames from the same orig which the source
+ * host might have sent multiple times on purpose.
+ */
+ if (batadv_compare_eth(entry->orig, orig))
+ continue;
+ }
/* this entry seems to match: same crc, not too old,
* and from another gw. therefore return true to forbid it.
@@ -1642,7 +1658,14 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
entry = &bat_priv->bla.bcast_duplist[curr];
entry->crc = crc;
entry->entrytime = jiffies;
- ether_addr_copy(entry->orig, bcast_packet->orig);
+
+ /* known originator */
+ if (orig)
+ ether_addr_copy(entry->orig, orig);
+ /* anonymous originator */
+ else
+ eth_zero_addr(entry->orig);
+
bat_priv->bla.bcast_duplist_curr = curr;
out:
@@ -1652,6 +1675,48 @@ out:
}
/**
+ * batadv_bla_check_ucast_duplist() - Check if a frame is in the broadcast dup.
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: contains the multicast packet to be checked, decapsulated from a
+ * unicast_packet
+ *
+ * Check if it is on our broadcast list. Another gateway might have sent the
+ * same packet because it is connected to the same backbone, so we have to
+ * remove this duplicate.
+ *
+ * Return: true if a packet is in the duplicate list, false otherwise.
+ */
+static bool batadv_bla_check_ucast_duplist(struct batadv_priv *bat_priv,
+ struct sk_buff *skb)
+{
+ return batadv_bla_check_duplist(bat_priv, skb, (u8 *)skb->data, NULL);
+}
+
+/**
+ * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup.
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: contains the bcast_packet to be checked
+ *
+ * Check if it is on our broadcast list. Another gateway might have sent the
+ * same packet because it is connected to the same backbone, so we have to
+ * remove this duplicate.
+ *
+ * Return: true if a packet is in the duplicate list, false otherwise.
+ */
+bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
+ struct sk_buff *skb)
+{
+ struct batadv_bcast_packet *bcast_packet;
+ u8 *payload_ptr;
+
+ bcast_packet = (struct batadv_bcast_packet *)skb->data;
+ payload_ptr = (u8 *)(bcast_packet + 1);
+
+ return batadv_bla_check_duplist(bat_priv, skb, payload_ptr,
+ bcast_packet->orig);
+}
+
+/**
* batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for
* the VLAN identified by vid.
* @bat_priv: the bat priv with all the soft interface information
@@ -1812,7 +1877,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
* @bat_priv: the bat priv with all the soft interface information
* @skb: the frame to be checked
* @vid: the VLAN ID of the frame
- * @is_bcast: the packet came in a broadcast packet type.
+ * @packet_type: the batman packet type this frame came in
*
* batadv_bla_rx avoidance checks if:
* * we have to race for a claim
@@ -1824,7 +1889,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
* further process the skb.
*/
bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
- unsigned short vid, bool is_bcast)
+ unsigned short vid, int packet_type)
{
struct batadv_bla_backbone_gw *backbone_gw;
struct ethhdr *ethhdr;
@@ -1846,9 +1911,32 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
goto handled;
if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
- /* don't allow broadcasts while requests are in flight */
- if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
- goto handled;
+ /* don't allow multicast packets while requests are in flight */
+ if (is_multicast_ether_addr(ethhdr->h_dest))
+ /* Both broadcast flooding or multicast-via-unicasts
+ * delivery might send to multiple backbone gateways
+ * sharing the same LAN and therefore need to coordinate
+ * which backbone gateway forwards into the LAN,
+ * by claiming the payload source address.
+ *
+ * Broadcast flooding and multicast-via-unicasts
+ * delivery use the following two batman packet types.
+ * Note: explicitly exclude BATADV_UNICAST_4ADDR,
+ * as the DHCP gateway feature will send explicitly
+ * to only one BLA gateway, so the claiming process
+ * should be avoided there.
+ */
+ if (packet_type == BATADV_BCAST ||
+ packet_type == BATADV_UNICAST)
+ goto handled;
+
+ /* potential duplicates from foreign BLA backbone gateways via
+ * multicast-in-unicast packets
+ */
+ if (is_multicast_ether_addr(ethhdr->h_dest) &&
+ packet_type == BATADV_UNICAST &&
+ batadv_bla_check_ucast_duplist(bat_priv, skb))
+ goto handled;
ether_addr_copy(search_claim.addr, ethhdr->h_source);
search_claim.vid = vid;
@@ -1883,13 +1971,14 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
goto allow;
}
- /* if it is a broadcast ... */
- if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
+ /* if it is a multicast ... */
+ if (is_multicast_ether_addr(ethhdr->h_dest) &&
+ (packet_type == BATADV_BCAST || packet_type == BATADV_UNICAST)) {
/* ... drop it. the responsible gateway is in charge.
*
- * We need to check is_bcast because with the gateway
+ * We need to check packet type because with the gateway
* feature, broadcasts (like DHCP requests) may be sent
- * using a unicast packet type.
+ * using a unicast 4 address packet type. See comment above.
*/
goto handled;
} else {
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 41edb2c4a327..a81c41b636f9 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -35,7 +35,7 @@ static inline bool batadv_bla_is_loopdetect_mac(const uint8_t *mac)
#ifdef CONFIG_BATMAN_ADV_BLA
bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
- unsigned short vid, bool is_bcast);
+ unsigned short vid, int packet_type);
bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid);
bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
@@ -66,7 +66,7 @@ bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr,
static inline bool batadv_bla_rx(struct batadv_priv *bat_priv,
struct sk_buff *skb, unsigned short vid,
- bool is_bcast)
+ int packet_type)
{
return false;
}
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 1622c3f5898f..9af99c39b9fd 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -51,6 +51,7 @@
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
+#include "bridge_loop_avoidance.h"
#include "hard-interface.h"
#include "hash.h"
#include "log.h"
@@ -220,7 +221,7 @@ static u8 batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv *bat_priv,
* address here, only IPv6 ones
*/
if (br_ip_entry->addr.proto == htons(ETH_P_IPV6) &&
- ipv6_addr_is_ll_all_routers(&br_ip_entry->addr.u.ip6))
+ ipv6_addr_is_ll_all_routers(&br_ip_entry->addr.dst.ip6))
flags &= ~BATADV_MCAST_WANT_NO_RTR6;
list_del(&br_ip_entry->list);
@@ -561,10 +562,10 @@ out:
static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src)
{
if (src->proto == htons(ETH_P_IP))
- ip_eth_mc_map(src->u.ip4, dst);
+ ip_eth_mc_map(src->dst.ip4, dst);
#if IS_ENABLED(CONFIG_IPV6)
else if (src->proto == htons(ETH_P_IPV6))
- ipv6_eth_mc_map(&src->u.ip6, dst);
+ ipv6_eth_mc_map(&src->dst.ip6, dst);
#endif
else
eth_zero_addr(dst);
@@ -608,11 +609,11 @@ static int batadv_mcast_mla_bridge_get(struct net_device *dev,
continue;
if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
- ipv4_is_local_multicast(br_ip_entry->addr.u.ip4))
+ ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4))
continue;
if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) &&
- !ipv4_is_local_multicast(br_ip_entry->addr.u.ip4))
+ !ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4))
continue;
}
@@ -622,11 +623,11 @@ static int batadv_mcast_mla_bridge_get(struct net_device *dev,
continue;
if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
- ipv6_addr_is_ll_all_nodes(&br_ip_entry->addr.u.ip6))
+ ipv6_addr_is_ll_all_nodes(&br_ip_entry->addr.dst.ip6))
continue;
if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) &&
- IPV6_ADDR_MC_SCOPE(&br_ip_entry->addr.u.ip6) >
+ IPV6_ADDR_MC_SCOPE(&br_ip_entry->addr.dst.ip6) >
IPV6_ADDR_SCOPE_LINKLOCAL)
continue;
}
@@ -1435,6 +1436,35 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
}
/**
+ * batadv_mcast_forw_send_orig() - send a multicast packet to an originator
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the multicast packet to send
+ * @vid: the vlan identifier
+ * @orig_node: the originator to send the packet to
+ *
+ * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ unsigned short vid,
+ struct batadv_orig_node *orig_node)
+{
+ /* Avoid sending multicast-in-unicast packets to other BLA
+ * gateways - they already got the frame from the LAN side
+ * we share with them.
+ * TODO: Refactor to take BLA into account earlier, to avoid
+ * reducing the mcast_fanout count.
+ */
+ if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid)) {
+ dev_kfree_skb(skb);
+ return NET_XMIT_SUCCESS;
+ }
+
+ return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
+ orig_node, vid);
+}
+
+/**
* batadv_mcast_forw_tt() - forwards a packet to multicast listeners
* @bat_priv: the bat priv with all the soft interface information
* @skb: the multicast packet to transmit
@@ -1471,8 +1501,8 @@ batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb,
break;
}
- batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0,
- orig_entry->orig_node, vid);
+ batadv_mcast_forw_send_orig(bat_priv, newskb, vid,
+ orig_entry->orig_node);
}
rcu_read_unlock();
@@ -1513,8 +1543,7 @@ batadv_mcast_forw_want_all_ipv4(struct batadv_priv *bat_priv,
break;
}
- batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0,
- orig_node, vid);
+ batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
}
rcu_read_unlock();
return ret;
@@ -1551,8 +1580,7 @@ batadv_mcast_forw_want_all_ipv6(struct batadv_priv *bat_priv,
break;
}
- batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0,
- orig_node, vid);
+ batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
}
rcu_read_unlock();
return ret;
@@ -1618,8 +1646,7 @@ batadv_mcast_forw_want_all_rtr4(struct batadv_priv *bat_priv,
break;
}
- batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0,
- orig_node, vid);
+ batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
}
rcu_read_unlock();
return ret;
@@ -1656,8 +1683,7 @@ batadv_mcast_forw_want_all_rtr6(struct batadv_priv *bat_priv,
break;
}
- batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0,
- orig_node, vid);
+ batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
}
rcu_read_unlock();
return ret;
diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h
index ebf825991ecd..3e114bc5ca3b 100644
--- a/net/batman-adv/multicast.h
+++ b/net/batman-adv/multicast.h
@@ -46,6 +46,11 @@ enum batadv_forw_mode
batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
struct batadv_orig_node **mcast_single_orig);
+int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ unsigned short vid,
+ struct batadv_orig_node *orig_node);
+
int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid);
@@ -72,6 +77,16 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
}
static inline int
+batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
+ struct sk_buff *skb,
+ unsigned short vid,
+ struct batadv_orig_node *orig_node)
+{
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+}
+
+static inline int
batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid)
{
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 27cdf5e4349a..9e5c71e406ff 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -826,6 +826,10 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
vid = batadv_get_vid(skb, hdr_len);
ethhdr = (struct ethhdr *)(skb->data + hdr_len);
+ /* do not reroute multicast frames in a unicast header */
+ if (is_multicast_ether_addr(ethhdr->h_dest))
+ return true;
+
/* check if the destination client was served by this node and it is now
* roaming. In this case, it means that the node has got a ROAM_ADV
* message and that it knows the new destination in the mesh to re-route
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 9d3974ba11ed..82e7ca886605 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -364,9 +364,8 @@ send:
goto dropped;
ret = batadv_send_skb_via_gw(bat_priv, skb, vid);
} else if (mcast_single_orig) {
- ret = batadv_send_skb_unicast(bat_priv, skb,
- BATADV_UNICAST, 0,
- mcast_single_orig, vid);
+ ret = batadv_mcast_forw_send_orig(bat_priv, skb, vid,
+ mcast_single_orig);
} else if (forw_mode == BATADV_FORW_SOME) {
ret = batadv_mcast_forw_send(bat_priv, skb, vid);
} else {
@@ -425,10 +424,10 @@ void batadv_interface_rx(struct net_device *soft_iface,
struct vlan_ethhdr *vhdr;
struct ethhdr *ethhdr;
unsigned short vid;
- bool is_bcast;
+ int packet_type;
batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
- is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST);
+ packet_type = batadv_bcast_packet->packet_type;
skb_pull_rcsum(skb, hdr_size);
skb_reset_mac_header(skb);
@@ -471,7 +470,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
/* Let the bridge loop avoidance check the packet. If will
* not handle it, we can safely push it up.
*/
- if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
+ if (batadv_bla_rx(bat_priv, skb, vid, packet_type))
goto out;
if (orig_node)
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 9832f8445d43..d0c1024bf600 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1388,7 +1388,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
return 0;
}
-/* Encrypt the the link */
+/* Encrypt the link */
static void hci_conn_encrypt(struct hci_conn *conn)
{
BT_DBG("hcon %p", conn);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 68bfe57b6625..b0209e35284a 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -808,7 +808,7 @@ static int hci_init4_req(struct hci_request *req, unsigned long opt)
* Delete Stored Link Key command. They are clearly indicating its
* absence in the bit mask of supported commands.
*
- * Check the supported commands and only if the the command is marked
+ * Check the supported commands and only if the command is marked
* as supported send it. If not supported assume that the controller
* does not have actual support for stored link keys which makes this
* command redundant anyway.
diff --git a/net/bridge/br.c b/net/bridge/br.c
index b6fe30e3768f..401eeb9142eb 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -183,6 +183,11 @@ static int br_switchdev_event(struct notifier_block *unused,
br_fdb_offloaded_set(br, p, fdb_info->addr,
fdb_info->vid, fdb_info->offloaded);
break;
+ case SWITCHDEV_FDB_FLUSH_TO_BRIDGE:
+ fdb_info = ptr;
+ /* Don't delete static entries */
+ br_fdb_delete_by_port(br, p, fdb_info->vid, 0);
+ break;
}
out:
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 7629b63f6f30..e28ffadd1371 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -274,14 +274,23 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *prev = NULL;
struct net_bridge_port_group *p;
+ bool allow_mode_include = true;
struct hlist_node *rp;
rp = rcu_dereference(hlist_first_rcu(&br->router_list));
- p = mdst ? rcu_dereference(mdst->ports) : NULL;
+ if (mdst) {
+ p = rcu_dereference(mdst->ports);
+ if (br_multicast_should_handle_mode(br, mdst->addr.proto) &&
+ br_multicast_is_star_g(&mdst->addr))
+ allow_mode_include = false;
+ } else {
+ p = NULL;
+ }
+
while (p || rp) {
struct net_bridge_port *port, *lport, *rport;
- lport = p ? p->port : NULL;
+ lport = p ? p->key.port : NULL;
rport = hlist_entry_safe(rp, struct net_bridge_port, rlist);
if ((unsigned long)lport > (unsigned long)rport) {
@@ -292,6 +301,10 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
local_orig);
goto delivered;
}
+ if ((!allow_mode_include &&
+ p->filter_mode == MCAST_INCLUDE) ||
+ (p->flags & MDB_PG_FLAGS_BLOCKED))
+ goto delivered;
} else {
port = rport;
}
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 5e71fc8b826f..2db800fc27ca 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -103,7 +103,7 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
/*
* Legacy ioctl's through SIOCDEVPRIVATE
- * This interface is deprecated because it was too difficult to
+ * This interface is deprecated because it was too difficult
* to do the translation for 32/64bit ioctl compatibility.
*/
static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 00f1651a6aba..e15bab19a012 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -62,19 +62,33 @@ static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
e->flags |= MDB_FLAGS_OFFLOAD;
if (flags & MDB_PG_FLAGS_FAST_LEAVE)
e->flags |= MDB_FLAGS_FAST_LEAVE;
+ if (flags & MDB_PG_FLAGS_STAR_EXCL)
+ e->flags |= MDB_FLAGS_STAR_EXCL;
+ if (flags & MDB_PG_FLAGS_BLOCKED)
+ e->flags |= MDB_FLAGS_BLOCKED;
}
-static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip)
+static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
+ struct nlattr **mdb_attrs)
{
memset(ip, 0, sizeof(struct br_ip));
ip->vid = entry->vid;
ip->proto = entry->addr.proto;
- if (ip->proto == htons(ETH_P_IP))
- ip->u.ip4 = entry->addr.u.ip4;
+ switch (ip->proto) {
+ case htons(ETH_P_IP):
+ ip->dst.ip4 = entry->addr.u.ip4;
+ if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
+ ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
+ break;
#if IS_ENABLED(CONFIG_IPV6)
- else
- ip->u.ip6 = entry->addr.u.ip6;
+ case htons(ETH_P_IPV6):
+ ip->dst.ip6 = entry->addr.u.ip6;
+ if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
+ ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
+ break;
#endif
+ }
+
}
static int __mdb_fill_srcs(struct sk_buff *skb,
@@ -91,14 +105,14 @@ static int __mdb_fill_srcs(struct sk_buff *skb,
return -EMSGSIZE;
hlist_for_each_entry_rcu(ent, &p->src_list, node,
- lockdep_is_held(&p->port->br->multicast_lock)) {
+ lockdep_is_held(&p->key.port->br->multicast_lock)) {
nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
if (!nest_ent)
goto out_cancel_err;
switch (ent->addr.proto) {
case htons(ETH_P_IP):
if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
- ent->addr.u.ip4)) {
+ ent->addr.src.ip4)) {
nla_nest_cancel(skb, nest_ent);
goto out_cancel_err;
}
@@ -106,7 +120,7 @@ static int __mdb_fill_srcs(struct sk_buff *skb,
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
- &ent->addr.u.ip6)) {
+ &ent->addr.src.ip6)) {
nla_nest_cancel(skb, nest_ent);
goto out_cancel_err;
}
@@ -146,7 +160,7 @@ static int __mdb_fill_info(struct sk_buff *skb,
memset(&e, 0, sizeof(e));
if (p) {
- ifindex = p->port->dev->ifindex;
+ ifindex = p->key.port->dev->ifindex;
mtimer = &p->timer;
flags = p->flags;
} else {
@@ -158,10 +172,10 @@ static int __mdb_fill_info(struct sk_buff *skb,
e.ifindex = ifindex;
e.vid = mp->addr.vid;
if (mp->addr.proto == htons(ETH_P_IP))
- e.addr.u.ip4 = mp->addr.u.ip4;
+ e.addr.u.ip4 = mp->addr.dst.ip4;
#if IS_ENABLED(CONFIG_IPV6)
if (mp->addr.proto == htons(ETH_P_IPV6))
- e.addr.u.ip6 = mp->addr.u.ip6;
+ e.addr.u.ip6 = mp->addr.dst.ip6;
#endif
e.addr.proto = mp->addr.proto;
nest_ent = nla_nest_start_noflag(skb,
@@ -172,30 +186,47 @@ static int __mdb_fill_info(struct sk_buff *skb,
if (nla_put_nohdr(skb, sizeof(e), &e) ||
nla_put_u32(skb,
MDBA_MDB_EATTR_TIMER,
- br_timer_value(mtimer))) {
- nla_nest_cancel(skb, nest_ent);
- return -EMSGSIZE;
- }
+ br_timer_value(mtimer)))
+ goto nest_err;
+
switch (mp->addr.proto) {
case htons(ETH_P_IP):
- dump_srcs_mode = !!(p && mp->br->multicast_igmp_version == 3);
+ dump_srcs_mode = !!(mp->br->multicast_igmp_version == 3);
+ if (mp->addr.src.ip4) {
+ if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
+ mp->addr.src.ip4))
+ goto nest_err;
+ break;
+ }
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
- dump_srcs_mode = !!(p && mp->br->multicast_mld_version == 2);
+ dump_srcs_mode = !!(mp->br->multicast_mld_version == 2);
+ if (!ipv6_addr_any(&mp->addr.src.ip6)) {
+ if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
+ &mp->addr.src.ip6))
+ goto nest_err;
+ break;
+ }
break;
#endif
}
- if (dump_srcs_mode &&
- (__mdb_fill_srcs(skb, p) ||
- nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE, p->filter_mode))) {
- nla_nest_cancel(skb, nest_ent);
- return -EMSGSIZE;
+ if (p) {
+ if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
+ goto nest_err;
+ if (dump_srcs_mode &&
+ (__mdb_fill_srcs(skb, p) ||
+ nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
+ p->filter_mode)))
+ goto nest_err;
}
-
nla_nest_end(skb, nest_ent);
return 0;
+
+nest_err:
+ nla_nest_cancel(skb, nest_ent);
+ return -EMSGSIZE;
}
static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
@@ -236,7 +267,7 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
pp = &p->next) {
- if (!p->port)
+ if (!p->key.port)
continue;
if (pidx < s_pidx)
goto skip_pg;
@@ -393,15 +424,24 @@ static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
if (!pg)
goto out;
- switch (pg->addr.proto) {
+ /* MDBA_MDB_EATTR_RTPROT */
+ nlmsg_size += nla_total_size(sizeof(u8));
+
+ switch (pg->key.addr.proto) {
case htons(ETH_P_IP):
- if (pg->port->br->multicast_igmp_version == 2)
+ /* MDBA_MDB_EATTR_SOURCE */
+ if (pg->key.addr.src.ip4)
+ nlmsg_size += nla_total_size(sizeof(__be32));
+ if (pg->key.port->br->multicast_igmp_version == 2)
goto out;
addr_size = sizeof(__be32);
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
- if (pg->port->br->multicast_mld_version == 1)
+ /* MDBA_MDB_EATTR_SOURCE */
+ if (!ipv6_addr_any(&pg->key.addr.src.ip6))
+ nlmsg_size += nla_total_size(sizeof(struct in6_addr));
+ if (pg->key.port->br->multicast_mld_version == 1)
goto out;
addr_size = sizeof(struct in6_addr);
break;
@@ -450,7 +490,7 @@ static void br_mdb_complete(struct net_device *dev, int err, void *priv)
goto out;
for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
- if (p->port != port)
+ if (p->key.port != port)
continue;
p->flags |= MDB_PG_FLAGS_OFFLOAD;
}
@@ -474,10 +514,10 @@ static void br_mdb_switchdev_host_port(struct net_device *dev,
};
if (mp->addr.proto == htons(ETH_P_IP))
- ip_eth_mc_map(mp->addr.u.ip4, mdb.addr);
+ ip_eth_mc_map(mp->addr.dst.ip4, mdb.addr);
#if IS_ENABLED(CONFIG_IPV6)
else
- ipv6_eth_mc_map(&mp->addr.u.ip6, mdb.addr);
+ ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb.addr);
#endif
mdb.obj.orig_dev = dev;
@@ -520,26 +560,26 @@ void br_mdb_notify(struct net_device *dev,
if (pg) {
if (mp->addr.proto == htons(ETH_P_IP))
- ip_eth_mc_map(mp->addr.u.ip4, mdb.addr);
+ ip_eth_mc_map(mp->addr.dst.ip4, mdb.addr);
#if IS_ENABLED(CONFIG_IPV6)
else
- ipv6_eth_mc_map(&mp->addr.u.ip6, mdb.addr);
+ ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb.addr);
#endif
- mdb.obj.orig_dev = pg->port->dev;
+ mdb.obj.orig_dev = pg->key.port->dev;
switch (type) {
case RTM_NEWMDB:
complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
if (!complete_info)
break;
- complete_info->port = pg->port;
+ complete_info->port = pg->key.port;
complete_info->ip = mp->addr;
mdb.obj.complete_priv = complete_info;
mdb.obj.complete = br_mdb_complete;
- if (switchdev_port_obj_add(pg->port->dev, &mdb.obj, NULL))
+ if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
kfree(complete_info);
break;
case RTM_DELMDB:
- switchdev_port_obj_del(pg->port->dev, &mdb.obj);
+ switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
break;
}
} else {
@@ -629,33 +669,94 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_MDB, err);
}
-static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
+static bool is_valid_mdb_entry(struct br_mdb_entry *entry,
+ struct netlink_ext_ack *extack)
{
- if (entry->ifindex == 0)
+ if (entry->ifindex == 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Zero entry ifindex is not allowed");
return false;
+ }
if (entry->addr.proto == htons(ETH_P_IP)) {
- if (!ipv4_is_multicast(entry->addr.u.ip4))
+ if (!ipv4_is_multicast(entry->addr.u.ip4)) {
+ NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is not multicast");
return false;
- if (ipv4_is_local_multicast(entry->addr.u.ip4))
+ }
+ if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
+ NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is local multicast");
return false;
+ }
#if IS_ENABLED(CONFIG_IPV6)
} else if (entry->addr.proto == htons(ETH_P_IPV6)) {
- if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6))
+ if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
+ NL_SET_ERR_MSG_MOD(extack, "IPv6 entry group address is link-local all nodes");
return false;
+ }
#endif
- } else
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown entry protocol");
return false;
- if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
+ }
+
+ if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown entry state");
return false;
- if (entry->vid >= VLAN_VID_MASK)
+ }
+ if (entry->vid >= VLAN_VID_MASK) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid entry VLAN id");
return false;
+ }
+
+ return true;
+}
+
+static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
+ struct netlink_ext_ack *extack)
+{
+ switch (proto) {
+ case htons(ETH_P_IP):
+ if (nla_len(attr) != sizeof(struct in_addr)) {
+ NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
+ return false;
+ }
+ if (ipv4_is_multicast(nla_get_in_addr(attr))) {
+ NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
+ return false;
+ }
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6): {
+ struct in6_addr src;
+
+ if (nla_len(attr) != sizeof(struct in6_addr)) {
+ NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
+ return false;
+ }
+ src = nla_get_in6_addr(attr);
+ if (ipv6_addr_is_multicast(&src)) {
+ NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
+ return false;
+ }
+ break;
+ }
+#endif
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
+ return false;
+ }
return true;
}
+static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
+ [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
+ sizeof(struct in_addr),
+ sizeof(struct in6_addr)),
+};
+
static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct net_device **pdev, struct br_mdb_entry **pentry)
+ struct net_device **pdev, struct br_mdb_entry **pentry,
+ struct nlattr **mdb_attrs, struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct br_mdb_entry *entry;
@@ -671,51 +772,86 @@ static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
bpm = nlmsg_data(nlh);
if (bpm->ifindex == 0) {
- pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid bridge ifindex");
return -EINVAL;
}
dev = __dev_get_by_index(net, bpm->ifindex);
if (dev == NULL) {
- pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n");
+ NL_SET_ERR_MSG_MOD(extack, "Bridge device doesn't exist");
return -ENODEV;
}
if (!(dev->priv_flags & IFF_EBRIDGE)) {
- pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n");
+ NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge");
return -EOPNOTSUPP;
}
*pdev = dev;
- if (!tb[MDBA_SET_ENTRY] ||
- nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
- pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n");
+ if (!tb[MDBA_SET_ENTRY]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing MDBA_SET_ENTRY attribute");
+ return -EINVAL;
+ }
+ if (nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid MDBA_SET_ENTRY attribute length");
return -EINVAL;
}
entry = nla_data(tb[MDBA_SET_ENTRY]);
- if (!is_valid_mdb_entry(entry)) {
- pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n");
+ if (!is_valid_mdb_entry(entry, extack))
return -EINVAL;
+ *pentry = entry;
+
+ if (tb[MDBA_SET_ENTRY_ATTRS]) {
+ err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX,
+ tb[MDBA_SET_ENTRY_ATTRS],
+ br_mdbe_attrs_pol, extack);
+ if (err)
+ return err;
+ if (mdb_attrs[MDBE_ATTR_SOURCE] &&
+ !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
+ entry->addr.proto, extack))
+ return -EINVAL;
+ } else {
+ memset(mdb_attrs, 0,
+ sizeof(struct nlattr *) * (MDBE_ATTR_MAX + 1));
}
- *pentry = entry;
return 0;
}
static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
- struct br_ip *group, struct br_mdb_entry *entry)
+ struct br_mdb_entry *entry,
+ struct nlattr **mdb_attrs,
+ struct netlink_ext_ack *extack)
{
- struct net_bridge_mdb_entry *mp;
+ struct net_bridge_mdb_entry *mp, *star_mp;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
+ struct br_ip group, star_group;
unsigned long now = jiffies;
+ u8 filter_mode;
int err;
- mp = br_mdb_ip_get(br, group);
+ __mdb_entry_to_br_ip(entry, &group, mdb_attrs);
+
+ /* host join errors which can happen before creating the group */
+ if (!port) {
+ /* don't allow any flags for host-joined groups */
+ if (entry->state) {
+ NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
+ return -EINVAL;
+ }
+ if (!br_multicast_is_star_g(&group)) {
+ NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
+ return -EINVAL;
+ }
+ }
+
+ mp = br_mdb_ip_get(br, &group);
if (!mp) {
- mp = br_multicast_new_group(br, group);
+ mp = br_multicast_new_group(br, &group);
err = PTR_ERR_OR_ZERO(mp);
if (err)
return err;
@@ -723,11 +859,10 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
/* host join */
if (!port) {
- /* don't allow any flags for host-joined groups */
- if (entry->state)
- return -EINVAL;
- if (mp->host_joined)
+ if (mp->host_joined) {
+ NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
return -EEXIST;
+ }
br_multicast_host_join(mp, false);
br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
@@ -738,56 +873,69 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
- if (p->port == port)
+ if (p->key.port == port) {
+ NL_SET_ERR_MSG_MOD(extack, "Group is already joined by port");
return -EEXIST;
- if ((unsigned long)p->port < (unsigned long)port)
+ }
+ if ((unsigned long)p->key.port < (unsigned long)port)
break;
}
- p = br_multicast_new_port_group(port, group, *pp, entry->state, NULL,
- MCAST_EXCLUDE);
- if (unlikely(!p))
+ filter_mode = br_multicast_is_star_g(&group) ? MCAST_EXCLUDE :
+ MCAST_INCLUDE;
+
+ p = br_multicast_new_port_group(port, &group, *pp, entry->state, NULL,
+ filter_mode, RTPROT_STATIC);
+ if (unlikely(!p)) {
+ NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
return -ENOMEM;
+ }
rcu_assign_pointer(*pp, p);
if (entry->state == MDB_TEMPORARY)
mod_timer(&p->timer, now + br->multicast_membership_interval);
br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
+ /* if we are adding a new EXCLUDE port group (*,G) it needs to be also
+ * added to all S,G entries for proper replication, if we are adding
+ * a new INCLUDE port (S,G) then all of *,G EXCLUDE ports need to be
+ * added to it for proper replication
+ */
+ if (br_multicast_should_handle_mode(br, group.proto)) {
+ switch (filter_mode) {
+ case MCAST_EXCLUDE:
+ br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
+ break;
+ case MCAST_INCLUDE:
+ star_group = p->key.addr;
+ memset(&star_group.src, 0, sizeof(star_group.src));
+ star_mp = br_mdb_ip_get(br, &star_group);
+ if (star_mp)
+ br_multicast_sg_add_exclude_ports(star_mp, p);
+ break;
+ }
+ }
return 0;
}
static int __br_mdb_add(struct net *net, struct net_bridge *br,
- struct br_mdb_entry *entry)
+ struct net_bridge_port *p,
+ struct br_mdb_entry *entry,
+ struct nlattr **mdb_attrs,
+ struct netlink_ext_ack *extack)
{
- struct br_ip ip;
- struct net_device *dev;
- struct net_bridge_port *p = NULL;
int ret;
- if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
- return -EINVAL;
-
- if (entry->ifindex != br->dev->ifindex) {
- dev = __dev_get_by_index(net, entry->ifindex);
- if (!dev)
- return -ENODEV;
-
- p = br_port_get_rtnl(dev);
- if (!p || p->br != br || p->state == BR_STATE_DISABLED)
- return -EINVAL;
- }
-
- __mdb_entry_to_br_ip(entry, &ip);
-
spin_lock_bh(&br->multicast_lock);
- ret = br_mdb_add_group(br, p, &ip, entry);
+ ret = br_mdb_add_group(br, p, entry, mdb_attrs, extack);
spin_unlock_bh(&br->multicast_lock);
+
return ret;
}
static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
+ struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
struct net *net = sock_net(skb->sk);
struct net_bridge_vlan_group *vg;
struct net_bridge_port *p = NULL;
@@ -797,20 +945,43 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net_bridge *br;
int err;
- err = br_mdb_parse(skb, nlh, &dev, &entry);
+ err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
if (err < 0)
return err;
br = netdev_priv(dev);
+ if (!netif_running(br->dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
+ return -EINVAL;
+ }
+
+ if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
+ NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
+ return -EINVAL;
+ }
+
if (entry->ifindex != br->dev->ifindex) {
pdev = __dev_get_by_index(net, entry->ifindex);
- if (!pdev)
+ if (!pdev) {
+ NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
return -ENODEV;
+ }
p = br_port_get_rtnl(pdev);
- if (!p || p->br != br || p->state == BR_STATE_DISABLED)
+ if (!p) {
+ NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
+ return -EINVAL;
+ }
+
+ if (p->br != br) {
+ NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
return -EINVAL;
+ }
+ if (p->state == BR_STATE_DISABLED) {
+ NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state");
+ return -EINVAL;
+ }
vg = nbp_vlan_group(p);
} else {
vg = br_vlan_group(br);
@@ -822,18 +993,19 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
list_for_each_entry(v, &vg->vlan_list, vlist) {
entry->vid = v->vid;
- err = __br_mdb_add(net, br, entry);
+ err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
if (err)
break;
}
} else {
- err = __br_mdb_add(net, br, entry);
+ err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
}
return err;
}
-static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
+static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry,
+ struct nlattr **mdb_attrs)
{
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
@@ -844,7 +1016,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
return -EINVAL;
- __mdb_entry_to_br_ip(entry, &ip);
+ __mdb_entry_to_br_ip(entry, &ip, mdb_attrs);
spin_lock_bh(&br->multicast_lock);
mp = br_mdb_ip_get(br, &ip);
@@ -864,10 +1036,10 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
- if (!p->port || p->port->dev->ifindex != entry->ifindex)
+ if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
continue;
- if (p->port->state == BR_STATE_DISABLED)
+ if (p->key.port->state == BR_STATE_DISABLED)
goto unlock;
br_multicast_del_pg(mp, p, pp);
@@ -883,6 +1055,7 @@ unlock:
static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
+ struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
struct net *net = sock_net(skb->sk);
struct net_bridge_vlan_group *vg;
struct net_bridge_port *p = NULL;
@@ -892,7 +1065,7 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net_bridge *br;
int err;
- err = br_mdb_parse(skb, nlh, &dev, &entry);
+ err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
if (err < 0)
return err;
@@ -917,10 +1090,10 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
list_for_each_entry(v, &vg->vlan_list, vlist) {
entry->vid = v->vid;
- err = __br_mdb_del(br, entry);
+ err = __br_mdb_del(br, entry, mdb_attrs);
}
} else {
- err = __br_mdb_del(br, entry);
+ err = __br_mdb_del(br, entry, mdb_attrs);
}
return err;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 33adf44ef7ec..66eb62ded192 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -41,6 +41,13 @@ static const struct rhashtable_params br_mdb_rht_params = {
.automatic_shrinking = true,
};
+static const struct rhashtable_params br_sg_port_rht_params = {
+ .head_offset = offsetof(struct net_bridge_port_group, rhnode),
+ .key_offset = offsetof(struct net_bridge_port_group, key),
+ .key_len = sizeof(struct net_bridge_port_group_sg_key),
+ .automatic_shrinking = true,
+};
+
static void br_multicast_start_querier(struct net_bridge *br,
struct bridge_mcast_own_query *query);
static void br_multicast_add_router(struct net_bridge *br,
@@ -59,6 +66,26 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
const struct in6_addr *group,
__u16 vid, const unsigned char *src);
#endif
+static struct net_bridge_port_group *
+__br_multicast_add_group(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct br_ip *group,
+ const unsigned char *src,
+ u8 filter_mode,
+ bool igmpv2_mldv1,
+ bool blocked);
+static void br_multicast_find_del_pg(struct net_bridge *br,
+ struct net_bridge_port_group *pg);
+
+static struct net_bridge_port_group *
+br_sg_port_find(struct net_bridge *br,
+ struct net_bridge_port_group_sg_key *sg_p)
+{
+ lockdep_assert_held_once(&br->multicast_lock);
+
+ return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p,
+ br_sg_port_rht_params);
+}
static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
struct br_ip *dst)
@@ -86,7 +113,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
struct br_ip br_dst;
memset(&br_dst, 0, sizeof(br_dst));
- br_dst.u.ip4 = dst;
+ br_dst.dst.ip4 = dst;
br_dst.proto = htons(ETH_P_IP);
br_dst.vid = vid;
@@ -101,7 +128,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
struct br_ip br_dst;
memset(&br_dst, 0, sizeof(br_dst));
- br_dst.u.ip6 = *dst;
+ br_dst.dst.ip6 = *dst;
br_dst.proto = htons(ETH_P_IPV6);
br_dst.vid = vid;
@@ -126,11 +153,29 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
switch (skb->protocol) {
case htons(ETH_P_IP):
- ip.u.ip4 = ip_hdr(skb)->daddr;
+ ip.dst.ip4 = ip_hdr(skb)->daddr;
+ if (br->multicast_igmp_version == 3) {
+ struct net_bridge_mdb_entry *mdb;
+
+ ip.src.ip4 = ip_hdr(skb)->saddr;
+ mdb = br_mdb_ip_get_rcu(br, &ip);
+ if (mdb)
+ return mdb;
+ ip.src.ip4 = 0;
+ }
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
- ip.u.ip6 = ipv6_hdr(skb)->daddr;
+ ip.dst.ip6 = ipv6_hdr(skb)->daddr;
+ if (br->multicast_mld_version == 2) {
+ struct net_bridge_mdb_entry *mdb;
+
+ ip.src.ip6 = ipv6_hdr(skb)->saddr;
+ mdb = br_mdb_ip_get_rcu(br, &ip);
+ if (mdb)
+ return mdb;
+ memset(&ip.src.ip6, 0, sizeof(ip.src.ip6));
+ }
break;
#endif
default:
@@ -140,6 +185,326 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
return br_mdb_ip_get_rcu(br, &ip);
}
+static bool br_port_group_equal(struct net_bridge_port_group *p,
+ struct net_bridge_port *port,
+ const unsigned char *src)
+{
+ if (p->key.port != port)
+ return false;
+
+ if (!(port->flags & BR_MULTICAST_TO_UNICAST))
+ return true;
+
+ return ether_addr_equal(src, p->eth_addr);
+}
+
+static void __fwd_add_star_excl(struct net_bridge_port_group *pg,
+ struct br_ip *sg_ip)
+{
+ struct net_bridge_port_group_sg_key sg_key;
+ struct net_bridge *br = pg->key.port->br;
+ struct net_bridge_port_group *src_pg;
+
+ memset(&sg_key, 0, sizeof(sg_key));
+ sg_key.port = pg->key.port;
+ sg_key.addr = *sg_ip;
+ if (br_sg_port_find(br, &sg_key))
+ return;
+
+ src_pg = __br_multicast_add_group(br, pg->key.port, sg_ip, pg->eth_addr,
+ MCAST_INCLUDE, false, false);
+ if (IS_ERR_OR_NULL(src_pg) ||
+ src_pg->rt_protocol != RTPROT_KERNEL)
+ return;
+
+ src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
+}
+
+static void __fwd_del_star_excl(struct net_bridge_port_group *pg,
+ struct br_ip *sg_ip)
+{
+ struct net_bridge_port_group_sg_key sg_key;
+ struct net_bridge *br = pg->key.port->br;
+ struct net_bridge_port_group *src_pg;
+
+ memset(&sg_key, 0, sizeof(sg_key));
+ sg_key.port = pg->key.port;
+ sg_key.addr = *sg_ip;
+ src_pg = br_sg_port_find(br, &sg_key);
+ if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) ||
+ src_pg->rt_protocol != RTPROT_KERNEL)
+ return;
+
+ br_multicast_find_del_pg(br, src_pg);
+}
+
+/* When a port group transitions to (or is added as) EXCLUDE we need to add it
+ * to all other ports' S,G entries which are not blocked by the current group
+ * for proper replication, the assumption is that any S,G blocked entries
+ * are already added so the S,G,port lookup should skip them.
+ * When a port group transitions from EXCLUDE -> INCLUDE mode or is being
+ * deleted we need to remove it from all ports' S,G entries where it was
+ * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL).
+ */
+void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg,
+ u8 filter_mode)
+{
+ struct net_bridge *br = pg->key.port->br;
+ struct net_bridge_port_group *pg_lst;
+ struct net_bridge_mdb_entry *mp;
+ struct br_ip sg_ip;
+
+ if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr)))
+ return;
+
+ mp = br_mdb_ip_get(br, &pg->key.addr);
+ if (!mp)
+ return;
+
+ memset(&sg_ip, 0, sizeof(sg_ip));
+ sg_ip = pg->key.addr;
+ for (pg_lst = mlock_dereference(mp->ports, br);
+ pg_lst;
+ pg_lst = mlock_dereference(pg_lst->next, br)) {
+ struct net_bridge_group_src *src_ent;
+
+ if (pg_lst == pg)
+ continue;
+ hlist_for_each_entry(src_ent, &pg_lst->src_list, node) {
+ if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
+ continue;
+ sg_ip.src = src_ent->addr.src;
+ switch (filter_mode) {
+ case MCAST_INCLUDE:
+ __fwd_del_star_excl(pg, &sg_ip);
+ break;
+ case MCAST_EXCLUDE:
+ __fwd_add_star_excl(pg, &sg_ip);
+ break;
+ }
+ }
+ }
+}
+
+/* called when adding a new S,G with host_joined == false by default */
+static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp,
+ struct net_bridge_port_group *sg)
+{
+ struct net_bridge_mdb_entry *sg_mp;
+
+ if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
+ return;
+ if (!star_mp->host_joined)
+ return;
+
+ sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr);
+ if (!sg_mp)
+ return;
+ sg_mp->host_joined = true;
+}
+
+/* set the host_joined state of all of *,G's S,G entries */
+static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp)
+{
+ struct net_bridge *br = star_mp->br;
+ struct net_bridge_mdb_entry *sg_mp;
+ struct net_bridge_port_group *pg;
+ struct br_ip sg_ip;
+
+ if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
+ return;
+
+ memset(&sg_ip, 0, sizeof(sg_ip));
+ sg_ip = star_mp->addr;
+ for (pg = mlock_dereference(star_mp->ports, br);
+ pg;
+ pg = mlock_dereference(pg->next, br)) {
+ struct net_bridge_group_src *src_ent;
+
+ hlist_for_each_entry(src_ent, &pg->src_list, node) {
+ if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
+ continue;
+ sg_ip.src = src_ent->addr.src;
+ sg_mp = br_mdb_ip_get(br, &sg_ip);
+ if (!sg_mp)
+ continue;
+ sg_mp->host_joined = star_mp->host_joined;
+ }
+ }
+}
+
+static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp)
+{
+ struct net_bridge_port_group __rcu **pp;
+ struct net_bridge_port_group *p;
+
+ /* *,G exclude ports are only added to S,G entries */
+ if (WARN_ON(br_multicast_is_star_g(&sgmp->addr)))
+ return;
+
+ /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports
+ * we should ignore perm entries since they're managed by user-space
+ */
+ for (pp = &sgmp->ports;
+ (p = mlock_dereference(*pp, sgmp->br)) != NULL;
+ pp = &p->next)
+ if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL |
+ MDB_PG_FLAGS_PERMANENT)))
+ return;
+
+ /* currently the host can only have joined the *,G which means
+ * we treat it as EXCLUDE {}, so for an S,G it's considered a
+ * STAR_EXCLUDE entry and we can safely leave it
+ */
+ sgmp->host_joined = false;
+
+ for (pp = &sgmp->ports;
+ (p = mlock_dereference(*pp, sgmp->br)) != NULL;) {
+ if (!(p->flags & MDB_PG_FLAGS_PERMANENT))
+ br_multicast_del_pg(sgmp, p, pp);
+ else
+ pp = &p->next;
+ }
+}
+
+void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp,
+ struct net_bridge_port_group *sg)
+{
+ struct net_bridge_port_group_sg_key sg_key;
+ struct net_bridge *br = star_mp->br;
+ struct net_bridge_port_group *pg;
+
+ if (WARN_ON(br_multicast_is_star_g(&sg->key.addr)))
+ return;
+ if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
+ return;
+
+ br_multicast_sg_host_state(star_mp, sg);
+ memset(&sg_key, 0, sizeof(sg_key));
+ sg_key.addr = sg->key.addr;
+ /* we need to add all exclude ports to the S,G */
+ for (pg = mlock_dereference(star_mp->ports, br);
+ pg;
+ pg = mlock_dereference(pg->next, br)) {
+ struct net_bridge_port_group *src_pg;
+
+ if (pg == sg || pg->filter_mode == MCAST_INCLUDE)
+ continue;
+
+ sg_key.port = pg->key.port;
+ if (br_sg_port_find(br, &sg_key))
+ continue;
+
+ src_pg = __br_multicast_add_group(br, pg->key.port,
+ &sg->key.addr,
+ sg->eth_addr,
+ MCAST_INCLUDE, false, false);
+ if (IS_ERR_OR_NULL(src_pg) ||
+ src_pg->rt_protocol != RTPROT_KERNEL)
+ continue;
+ src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
+ }
+}
+
+static void br_multicast_fwd_src_add(struct net_bridge_group_src *src)
+{
+ struct net_bridge_mdb_entry *star_mp;
+ struct net_bridge_port_group *sg;
+ struct br_ip sg_ip;
+
+ if (src->flags & BR_SGRP_F_INSTALLED)
+ return;
+
+ memset(&sg_ip, 0, sizeof(sg_ip));
+ sg_ip = src->pg->key.addr;
+ sg_ip.src = src->addr.src;
+ sg = __br_multicast_add_group(src->br, src->pg->key.port, &sg_ip,
+ src->pg->eth_addr, MCAST_INCLUDE, false,
+ !timer_pending(&src->timer));
+ if (IS_ERR_OR_NULL(sg))
+ return;
+ src->flags |= BR_SGRP_F_INSTALLED;
+ sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL;
+
+ /* if it was added by user-space as perm we can skip next steps */
+ if (sg->rt_protocol != RTPROT_KERNEL &&
+ (sg->flags & MDB_PG_FLAGS_PERMANENT))
+ return;
+
+ /* the kernel is now responsible for removing this S,G */
+ del_timer(&sg->timer);
+ star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr);
+ if (!star_mp)
+ return;
+
+ br_multicast_sg_add_exclude_ports(star_mp, sg);
+}
+
+static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src)
+{
+ struct net_bridge_port_group *p, *pg = src->pg;
+ struct net_bridge_port_group __rcu **pp;
+ struct net_bridge_mdb_entry *mp;
+ struct br_ip sg_ip;
+
+ memset(&sg_ip, 0, sizeof(sg_ip));
+ sg_ip = pg->key.addr;
+ sg_ip.src = src->addr.src;
+
+ mp = br_mdb_ip_get(src->br, &sg_ip);
+ if (!mp)
+ return;
+
+ for (pp = &mp->ports;
+ (p = mlock_dereference(*pp, src->br)) != NULL;
+ pp = &p->next) {
+ if (!br_port_group_equal(p, pg->key.port, pg->eth_addr))
+ continue;
+
+ if (p->rt_protocol != RTPROT_KERNEL &&
+ (p->flags & MDB_PG_FLAGS_PERMANENT))
+ break;
+
+ br_multicast_del_pg(mp, p, pp);
+ break;
+ }
+ src->flags &= ~BR_SGRP_F_INSTALLED;
+}
+
+/* install S,G and based on src's timer enable or disable forwarding */
+static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src)
+{
+ struct net_bridge_port_group_sg_key sg_key;
+ struct net_bridge_port_group *sg;
+ u8 old_flags;
+
+ br_multicast_fwd_src_add(src);
+
+ memset(&sg_key, 0, sizeof(sg_key));
+ sg_key.addr = src->pg->key.addr;
+ sg_key.addr.src = src->addr.src;
+ sg_key.port = src->pg->key.port;
+
+ sg = br_sg_port_find(src->br, &sg_key);
+ if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT))
+ return;
+
+ old_flags = sg->flags;
+ if (timer_pending(&src->timer))
+ sg->flags &= ~MDB_PG_FLAGS_BLOCKED;
+ else
+ sg->flags |= MDB_PG_FLAGS_BLOCKED;
+
+ if (old_flags != sg->flags) {
+ struct net_bridge_mdb_entry *sg_mp;
+
+ sg_mp = br_mdb_ip_get(src->br, &sg_key.addr);
+ if (!sg_mp)
+ return;
+ br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB);
+ }
+}
+
static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc)
{
struct net_bridge_mdb_entry *mp;
@@ -169,7 +534,8 @@ static void br_multicast_group_expired(struct timer_list *t)
struct net_bridge *br = mp->br;
spin_lock(&br->multicast_lock);
- if (!netif_running(br->dev) || timer_pending(&mp->timer))
+ if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) ||
+ timer_pending(&mp->timer))
goto out;
br_multicast_host_leave(mp, true);
@@ -194,8 +560,9 @@ static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc)
static void br_multicast_del_group_src(struct net_bridge_group_src *src)
{
- struct net_bridge *br = src->pg->port->br;
+ struct net_bridge *br = src->pg->key.port->br;
+ br_multicast_fwd_src_remove(src);
hlist_del_init_rcu(&src->node);
src->pg->src_ents--;
hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list);
@@ -219,15 +586,21 @@ void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
struct net_bridge_port_group *pg,
struct net_bridge_port_group __rcu **pp)
{
- struct net_bridge *br = pg->port->br;
+ struct net_bridge *br = pg->key.port->br;
struct net_bridge_group_src *ent;
struct hlist_node *tmp;
+ rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode,
+ br_sg_port_rht_params);
rcu_assign_pointer(*pp, pg->next);
hlist_del_init(&pg->mglist);
hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
br_multicast_del_group_src(ent);
br_mdb_notify(br->dev, mp, pg, RTM_DELMDB);
+ if (!br_multicast_is_star_g(&mp->addr))
+ br_multicast_sg_del_exclude_ports(mp);
+ else
+ br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list);
queue_work(system_long_wq, &br->mcast_gc_work);
@@ -242,7 +615,7 @@ static void br_multicast_find_del_pg(struct net_bridge *br,
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
- mp = br_mdb_ip_get(br, &pg->addr);
+ mp = br_mdb_ip_get(br, &pg->key.addr);
if (WARN_ON(!mp))
return;
@@ -263,7 +636,7 @@ static void br_multicast_port_group_expired(struct timer_list *t)
{
struct net_bridge_port_group *pg = from_timer(pg, t, timer);
struct net_bridge_group_src *src_ent;
- struct net_bridge *br = pg->port->br;
+ struct net_bridge *br = pg->key.port->br;
struct hlist_node *tmp;
bool changed;
@@ -284,7 +657,10 @@ static void br_multicast_port_group_expired(struct timer_list *t)
if (hlist_empty(&pg->src_list)) {
br_multicast_find_del_pg(br, pg);
} else if (changed) {
- struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->addr);
+ struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr);
+
+ if (changed && br_multicast_is_star_g(&pg->key.addr))
+ br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
if (WARN_ON(!mp))
goto out;
@@ -312,7 +688,7 @@ static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
u8 sflag, u8 *igmp_type,
bool *need_rexmit)
{
- struct net_bridge_port *p = pg ? pg->port : NULL;
+ struct net_bridge_port *p = pg ? pg->key.port : NULL;
struct net_bridge_group_src *ent;
size_t pkt_size, igmp_hdr_size;
unsigned long now = jiffies;
@@ -423,7 +799,7 @@ static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
if (over_lmqt == time_after(ent->timer.expires,
lmqt) &&
ent->src_query_rexmit_cnt > 0) {
- ihv3->srcs[lmqt_srcs++] = ent->addr.u.ip4;
+ ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4;
ent->src_query_rexmit_cnt--;
if (need_rexmit && ent->src_query_rexmit_cnt)
*need_rexmit = true;
@@ -458,7 +834,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
u8 sflag, u8 *igmp_type,
bool *need_rexmit)
{
- struct net_bridge_port *p = pg ? pg->port : NULL;
+ struct net_bridge_port *p = pg ? pg->key.port : NULL;
struct net_bridge_group_src *ent;
size_t pkt_size, mld_hdr_size;
unsigned long now = jiffies;
@@ -584,7 +960,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
if (over_llqt == time_after(ent->timer.expires,
llqt) &&
ent->src_query_rexmit_cnt > 0) {
- mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.u.ip6;
+ mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6;
ent->src_query_rexmit_cnt--;
if (need_rexmit && ent->src_query_rexmit_cnt)
*need_rexmit = true;
@@ -625,9 +1001,9 @@ static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
switch (group->proto) {
case htons(ETH_P_IP):
- ip4_dst = ip_dst ? ip_dst->u.ip4 : htonl(INADDR_ALLHOSTS_GROUP);
+ ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP);
return br_ip4_multicast_alloc_query(br, pg,
- ip4_dst, group->u.ip4,
+ ip4_dst, group->dst.ip4,
with_srcs, over_lmqt,
sflag, igmp_type,
need_rexmit);
@@ -636,13 +1012,13 @@ static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
struct in6_addr ip6_dst;
if (ip_dst)
- ip6_dst = ip_dst->u.ip6;
+ ip6_dst = ip_dst->dst.ip6;
else
ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0,
htonl(1));
return br_ip6_multicast_alloc_query(br, pg,
- &ip6_dst, &group->u.ip6,
+ &ip6_dst, &group->dst.ip6,
with_srcs, over_lmqt,
sflag, igmp_type,
need_rexmit);
@@ -704,7 +1080,10 @@ static void br_multicast_group_src_expired(struct timer_list *t)
if (!hlist_empty(&pg->src_list))
goto out;
br_multicast_find_del_pg(br, pg);
+ } else {
+ br_multicast_fwd_src_handle(src);
}
+
out:
spin_unlock(&br->multicast_lock);
}
@@ -717,13 +1096,13 @@ br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip)
switch (ip->proto) {
case htons(ETH_P_IP):
hlist_for_each_entry(ent, &pg->src_list, node)
- if (ip->u.ip4 == ent->addr.u.ip4)
+ if (ip->src.ip4 == ent->addr.src.ip4)
return ent;
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
hlist_for_each_entry(ent, &pg->src_list, node)
- if (!ipv6_addr_cmp(&ent->addr.u.ip6, &ip->u.ip6))
+ if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6))
return ent;
break;
#endif
@@ -742,14 +1121,14 @@ br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_i
switch (src_ip->proto) {
case htons(ETH_P_IP):
- if (ipv4_is_zeronet(src_ip->u.ip4) ||
- ipv4_is_multicast(src_ip->u.ip4))
+ if (ipv4_is_zeronet(src_ip->src.ip4) ||
+ ipv4_is_multicast(src_ip->src.ip4))
return NULL;
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
- if (ipv6_addr_any(&src_ip->u.ip6) ||
- ipv6_addr_is_multicast(&src_ip->u.ip6))
+ if (ipv6_addr_any(&src_ip->src.ip6) ||
+ ipv6_addr_is_multicast(&src_ip->src.ip6))
return NULL;
break;
#endif
@@ -760,7 +1139,7 @@ br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_i
return NULL;
grp_src->pg = pg;
- grp_src->br = pg->port->br;
+ grp_src->br = pg->key.port->br;
grp_src->addr = *src_ip;
grp_src->mcast_gc.destroy = br_multicast_destroy_group_src;
timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0);
@@ -777,7 +1156,8 @@ struct net_bridge_port_group *br_multicast_new_port_group(
struct net_bridge_port_group __rcu *next,
unsigned char flags,
const unsigned char *src,
- u8 filter_mode)
+ u8 filter_mode,
+ u8 rt_protocol)
{
struct net_bridge_port_group *p;
@@ -785,12 +1165,21 @@ struct net_bridge_port_group *br_multicast_new_port_group(
if (unlikely(!p))
return NULL;
- p->addr = *group;
- p->port = port;
+ p->key.addr = *group;
+ p->key.port = port;
p->flags = flags;
p->filter_mode = filter_mode;
+ p->rt_protocol = rt_protocol;
p->mcast_gc.destroy = br_multicast_destroy_port_group;
INIT_HLIST_HEAD(&p->src_list);
+
+ if (!br_multicast_is_star_g(group) &&
+ rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode,
+ br_sg_port_rht_params)) {
+ kfree(p);
+ return NULL;
+ }
+
rcu_assign_pointer(p->next, next);
timer_setup(&p->timer, br_multicast_port_group_expired, 0);
timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0);
@@ -804,23 +1193,12 @@ struct net_bridge_port_group *br_multicast_new_port_group(
return p;
}
-static bool br_port_group_equal(struct net_bridge_port_group *p,
- struct net_bridge_port *port,
- const unsigned char *src)
-{
- if (p->port != port)
- return false;
-
- if (!(port->flags & BR_MULTICAST_TO_UNICAST))
- return true;
-
- return ether_addr_equal(src, p->eth_addr);
-}
-
void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify)
{
if (!mp->host_joined) {
mp->host_joined = true;
+ if (br_multicast_is_star_g(&mp->addr))
+ br_multicast_star_g_host_state(mp);
if (notify)
br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB);
}
@@ -833,32 +1211,33 @@ void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
return;
mp->host_joined = false;
+ if (br_multicast_is_star_g(&mp->addr))
+ br_multicast_star_g_host_state(mp);
if (notify)
br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB);
}
-static int br_multicast_add_group(struct net_bridge *br,
- struct net_bridge_port *port,
- struct br_ip *group,
- const unsigned char *src,
- u8 filter_mode,
- bool igmpv2_mldv1)
+static struct net_bridge_port_group *
+__br_multicast_add_group(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct br_ip *group,
+ const unsigned char *src,
+ u8 filter_mode,
+ bool igmpv2_mldv1,
+ bool blocked)
{
struct net_bridge_port_group __rcu **pp;
- struct net_bridge_port_group *p;
+ struct net_bridge_port_group *p = NULL;
struct net_bridge_mdb_entry *mp;
unsigned long now = jiffies;
- int err;
- spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) ||
(port && port->state == BR_STATE_DISABLED))
goto out;
mp = br_multicast_new_group(br, group);
- err = PTR_ERR(mp);
if (IS_ERR(mp))
- goto err;
+ return ERR_PTR(PTR_ERR(mp));
if (!port) {
br_multicast_host_join(mp, true);
@@ -870,14 +1249,19 @@ static int br_multicast_add_group(struct net_bridge *br,
pp = &p->next) {
if (br_port_group_equal(p, port, src))
goto found;
- if ((unsigned long)p->port < (unsigned long)port)
+ if ((unsigned long)p->key.port < (unsigned long)port)
break;
}
- p = br_multicast_new_port_group(port, group, *pp, 0, src, filter_mode);
- if (unlikely(!p))
- goto err;
+ p = br_multicast_new_port_group(port, group, *pp, 0, src, filter_mode,
+ RTPROT_KERNEL);
+ if (unlikely(!p)) {
+ p = ERR_PTR(-ENOMEM);
+ goto out;
+ }
rcu_assign_pointer(*pp, p);
+ if (blocked)
+ p->flags |= MDB_PG_FLAGS_BLOCKED;
br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
found:
@@ -885,10 +1269,26 @@ found:
mod_timer(&p->timer, now + br->multicast_membership_interval);
out:
- err = 0;
+ return p;
+}
-err:
+static int br_multicast_add_group(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct br_ip *group,
+ const unsigned char *src,
+ u8 filter_mode,
+ bool igmpv2_mldv1)
+{
+ struct net_bridge_port_group *pg;
+ int err;
+
+ spin_lock(&br->multicast_lock);
+ pg = __br_multicast_add_group(br, port, group, src, filter_mode,
+ igmpv2_mldv1, false);
+ /* NULL is considered valid for host joined groups */
+ err = IS_ERR(pg) ? PTR_ERR(pg) : 0;
spin_unlock(&br->multicast_lock);
+
return err;
}
@@ -906,7 +1306,7 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
return 0;
memset(&br_group, 0, sizeof(br_group));
- br_group.u.ip4 = group;
+ br_group.dst.ip4 = group;
br_group.proto = htons(ETH_P_IP);
br_group.vid = vid;
filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE;
@@ -930,7 +1330,7 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
return 0;
memset(&br_group, 0, sizeof(br_group));
- br_group.u.ip6 = *group;
+ br_group.dst.ip6 = *group;
br_group.proto = htons(ETH_P_IPV6);
br_group.vid = vid;
filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE;
@@ -1019,10 +1419,10 @@ static void br_multicast_select_own_querier(struct net_bridge *br,
struct sk_buff *skb)
{
if (ip->proto == htons(ETH_P_IP))
- br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
+ br->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr;
#if IS_ENABLED(CONFIG_IPV6)
else
- br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
+ br->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr;
#endif
}
@@ -1079,7 +1479,7 @@ static void br_multicast_send_query(struct net_bridge *br,
!br_opt_get(br, BROPT_MULTICAST_QUERIER))
return;
- memset(&br_group.u, 0, sizeof(br_group.u));
+ memset(&br_group.dst, 0, sizeof(br_group.dst));
if (port ? (own_query == &port->ip4_own_query) :
(own_query == &br->ip4_own_query)) {
@@ -1145,7 +1545,7 @@ static void br_multicast_port_group_rexmit(struct timer_list *t)
{
struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer);
struct bridge_mcast_other_query *other_query = NULL;
- struct net_bridge *br = pg->port->br;
+ struct net_bridge *br = pg->key.port->br;
bool need_rexmit = false;
spin_lock(&br->multicast_lock);
@@ -1154,7 +1554,7 @@ static void br_multicast_port_group_rexmit(struct timer_list *t)
!br_opt_get(br, BROPT_MULTICAST_QUERIER))
goto out;
- if (pg->addr.proto == htons(ETH_P_IP))
+ if (pg->key.addr.proto == htons(ETH_P_IP))
other_query = &br->ip4_other_query;
#if IS_ENABLED(CONFIG_IPV6)
else
@@ -1166,11 +1566,11 @@ static void br_multicast_port_group_rexmit(struct timer_list *t)
if (pg->grp_query_rexmit_cnt) {
pg->grp_query_rexmit_cnt--;
- __br_multicast_send_query(br, pg->port, pg, &pg->addr,
- &pg->addr, false, 1, NULL);
+ __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr,
+ &pg->key.addr, false, 1, NULL);
}
- __br_multicast_send_query(br, pg->port, pg, &pg->addr,
- &pg->addr, true, 0, &need_rexmit);
+ __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr,
+ &pg->key.addr, true, 0, &need_rexmit);
if (pg->grp_query_rexmit_cnt || need_rexmit)
mod_timer(&pg->rexmit_timer, jiffies +
@@ -1301,10 +1701,17 @@ static int __grp_src_delete_marked(struct net_bridge_port_group *pg)
return deleted;
}
+static void __grp_src_mod_timer(struct net_bridge_group_src *src,
+ unsigned long expires)
+{
+ mod_timer(&src->timer, expires);
+ br_multicast_fwd_src_handle(src);
+}
+
static void __grp_src_query_marked_and_rexmit(struct net_bridge_port_group *pg)
{
struct bridge_mcast_other_query *other_query = NULL;
- struct net_bridge *br = pg->port->br;
+ struct net_bridge *br = pg->key.port->br;
u32 lmqc = br->multicast_last_member_count;
unsigned long lmqt, lmi, now = jiffies;
struct net_bridge_group_src *ent;
@@ -1313,7 +1720,7 @@ static void __grp_src_query_marked_and_rexmit(struct net_bridge_port_group *pg)
!br_opt_get(br, BROPT_MULTICAST_ENABLED))
return;
- if (pg->addr.proto == htons(ETH_P_IP))
+ if (pg->key.addr.proto == htons(ETH_P_IP))
other_query = &br->ip4_other_query;
#if IS_ENABLED(CONFIG_IPV6)
else
@@ -1329,7 +1736,7 @@ static void __grp_src_query_marked_and_rexmit(struct net_bridge_port_group *pg)
other_query &&
!timer_pending(&other_query->timer))
ent->src_query_rexmit_cnt = lmqc;
- mod_timer(&ent->timer, lmqt);
+ __grp_src_mod_timer(ent, lmqt);
}
}
}
@@ -1338,8 +1745,8 @@ static void __grp_src_query_marked_and_rexmit(struct net_bridge_port_group *pg)
!other_query || timer_pending(&other_query->timer))
return;
- __br_multicast_send_query(br, pg->port, pg, &pg->addr,
- &pg->addr, true, 1, NULL);
+ __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr,
+ &pg->key.addr, true, 1, NULL);
lmi = now + br->multicast_last_member_interval;
if (!timer_pending(&pg->rexmit_timer) ||
@@ -1350,14 +1757,14 @@ static void __grp_src_query_marked_and_rexmit(struct net_bridge_port_group *pg)
static void __grp_send_query_and_rexmit(struct net_bridge_port_group *pg)
{
struct bridge_mcast_other_query *other_query = NULL;
- struct net_bridge *br = pg->port->br;
+ struct net_bridge *br = pg->key.port->br;
unsigned long now = jiffies, lmi;
if (!netif_running(br->dev) ||
!br_opt_get(br, BROPT_MULTICAST_ENABLED))
return;
- if (pg->addr.proto == htons(ETH_P_IP))
+ if (pg->key.addr.proto == htons(ETH_P_IP))
other_query = &br->ip4_other_query;
#if IS_ENABLED(CONFIG_IPV6)
else
@@ -1368,8 +1775,8 @@ static void __grp_send_query_and_rexmit(struct net_bridge_port_group *pg)
other_query && !timer_pending(&other_query->timer)) {
lmi = now + br->multicast_last_member_interval;
pg->grp_query_rexmit_cnt = br->multicast_last_member_count - 1;
- __br_multicast_send_query(br, pg->port, pg, &pg->addr,
- &pg->addr, false, 0, NULL);
+ __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr,
+ &pg->key.addr, false, 0, NULL);
if (!timer_pending(&pg->rexmit_timer) ||
time_after(pg->rexmit_timer.expires, lmi))
mod_timer(&pg->rexmit_timer, lmi);
@@ -1389,7 +1796,7 @@ static void __grp_send_query_and_rexmit(struct net_bridge_port_group *pg)
static bool br_multicast_isinc_allow(struct net_bridge_port_group *pg,
void *srcs, u32 nsrcs, size_t src_size)
{
- struct net_bridge *br = pg->port->br;
+ struct net_bridge *br = pg->key.port->br;
struct net_bridge_group_src *ent;
unsigned long now = jiffies;
bool changed = false;
@@ -1397,9 +1804,9 @@ static bool br_multicast_isinc_allow(struct net_bridge_port_group *pg,
u32 src_idx;
memset(&src_ip, 0, sizeof(src_ip));
- src_ip.proto = pg->addr.proto;
+ src_ip.proto = pg->key.addr.proto;
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
- memcpy(&src_ip.u, srcs, src_size);
+ memcpy(&src_ip.src, srcs, src_size);
ent = br_multicast_find_group_src(pg, &src_ip);
if (!ent) {
ent = br_multicast_new_group_src(pg, &src_ip);
@@ -1408,7 +1815,7 @@ static bool br_multicast_isinc_allow(struct net_bridge_port_group *pg,
}
if (ent)
- mod_timer(&ent->timer, now + br_multicast_gmi(br));
+ __grp_src_mod_timer(ent, now + br_multicast_gmi(br));
srcs += src_size;
}
@@ -1431,14 +1838,16 @@ static void __grp_src_isexc_incl(struct net_bridge_port_group *pg,
ent->flags |= BR_SGRP_F_DELETE;
memset(&src_ip, 0, sizeof(src_ip));
- src_ip.proto = pg->addr.proto;
+ src_ip.proto = pg->key.addr.proto;
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
- memcpy(&src_ip.u, srcs, src_size);
+ memcpy(&src_ip.src, srcs, src_size);
ent = br_multicast_find_group_src(pg, &src_ip);
if (ent)
ent->flags &= ~BR_SGRP_F_DELETE;
else
- br_multicast_new_group_src(pg, &src_ip);
+ ent = br_multicast_new_group_src(pg, &src_ip);
+ if (ent)
+ br_multicast_fwd_src_handle(ent);
srcs += src_size;
}
@@ -1454,7 +1863,7 @@ static void __grp_src_isexc_incl(struct net_bridge_port_group *pg,
static bool __grp_src_isexc_excl(struct net_bridge_port_group *pg,
void *srcs, u32 nsrcs, size_t src_size)
{
- struct net_bridge *br = pg->port->br;
+ struct net_bridge *br = pg->key.port->br;
struct net_bridge_group_src *ent;
unsigned long now = jiffies;
bool changed = false;
@@ -1465,17 +1874,17 @@ static bool __grp_src_isexc_excl(struct net_bridge_port_group *pg,
ent->flags |= BR_SGRP_F_DELETE;
memset(&src_ip, 0, sizeof(src_ip));
- src_ip.proto = pg->addr.proto;
+ src_ip.proto = pg->key.addr.proto;
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
- memcpy(&src_ip.u, srcs, src_size);
+ memcpy(&src_ip.src, srcs, src_size);
ent = br_multicast_find_group_src(pg, &src_ip);
if (ent) {
ent->flags &= ~BR_SGRP_F_DELETE;
} else {
ent = br_multicast_new_group_src(pg, &src_ip);
if (ent) {
- mod_timer(&ent->timer,
- now + br_multicast_gmi(br));
+ __grp_src_mod_timer(ent,
+ now + br_multicast_gmi(br));
changed = true;
}
}
@@ -1491,12 +1900,13 @@ static bool __grp_src_isexc_excl(struct net_bridge_port_group *pg,
static bool br_multicast_isexc(struct net_bridge_port_group *pg,
void *srcs, u32 nsrcs, size_t src_size)
{
- struct net_bridge *br = pg->port->br;
+ struct net_bridge *br = pg->key.port->br;
bool changed = false;
switch (pg->filter_mode) {
case MCAST_INCLUDE:
__grp_src_isexc_incl(pg, srcs, nsrcs, src_size);
+ br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
changed = true;
break;
case MCAST_EXCLUDE:
@@ -1517,7 +1927,7 @@ static bool br_multicast_isexc(struct net_bridge_port_group *pg,
static bool __grp_src_toin_incl(struct net_bridge_port_group *pg,
void *srcs, u32 nsrcs, size_t src_size)
{
- struct net_bridge *br = pg->port->br;
+ struct net_bridge *br = pg->key.port->br;
u32 src_idx, to_send = pg->src_ents;
struct net_bridge_group_src *ent;
unsigned long now = jiffies;
@@ -1528,9 +1938,9 @@ static bool __grp_src_toin_incl(struct net_bridge_port_group *pg,
ent->flags |= BR_SGRP_F_SEND;
memset(&src_ip, 0, sizeof(src_ip));
- src_ip.proto = pg->addr.proto;
+ src_ip.proto = pg->key.addr.proto;
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
- memcpy(&src_ip.u, srcs, src_size);
+ memcpy(&src_ip.src, srcs, src_size);
ent = br_multicast_find_group_src(pg, &src_ip);
if (ent) {
ent->flags &= ~BR_SGRP_F_SEND;
@@ -1541,7 +1951,7 @@ static bool __grp_src_toin_incl(struct net_bridge_port_group *pg,
changed = true;
}
if (ent)
- mod_timer(&ent->timer, now + br_multicast_gmi(br));
+ __grp_src_mod_timer(ent, now + br_multicast_gmi(br));
srcs += src_size;
}
@@ -1559,7 +1969,7 @@ static bool __grp_src_toin_incl(struct net_bridge_port_group *pg,
static bool __grp_src_toin_excl(struct net_bridge_port_group *pg,
void *srcs, u32 nsrcs, size_t src_size)
{
- struct net_bridge *br = pg->port->br;
+ struct net_bridge *br = pg->key.port->br;
u32 src_idx, to_send = pg->src_ents;
struct net_bridge_group_src *ent;
unsigned long now = jiffies;
@@ -1571,9 +1981,9 @@ static bool __grp_src_toin_excl(struct net_bridge_port_group *pg,
ent->flags |= BR_SGRP_F_SEND;
memset(&src_ip, 0, sizeof(src_ip));
- src_ip.proto = pg->addr.proto;
+ src_ip.proto = pg->key.addr.proto;
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
- memcpy(&src_ip.u, srcs, src_size);
+ memcpy(&src_ip.src, srcs, src_size);
ent = br_multicast_find_group_src(pg, &src_ip);
if (ent) {
if (timer_pending(&ent->timer)) {
@@ -1586,7 +1996,7 @@ static bool __grp_src_toin_excl(struct net_bridge_port_group *pg,
changed = true;
}
if (ent)
- mod_timer(&ent->timer, now + br_multicast_gmi(br));
+ __grp_src_mod_timer(ent, now + br_multicast_gmi(br));
srcs += src_size;
}
@@ -1632,17 +2042,19 @@ static void __grp_src_toex_incl(struct net_bridge_port_group *pg,
ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
memset(&src_ip, 0, sizeof(src_ip));
- src_ip.proto = pg->addr.proto;
+ src_ip.proto = pg->key.addr.proto;
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
- memcpy(&src_ip.u, srcs, src_size);
+ memcpy(&src_ip.src, srcs, src_size);
ent = br_multicast_find_group_src(pg, &src_ip);
if (ent) {
ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) |
BR_SGRP_F_SEND;
to_send++;
} else {
- br_multicast_new_group_src(pg, &src_ip);
+ ent = br_multicast_new_group_src(pg, &src_ip);
}
+ if (ent)
+ br_multicast_fwd_src_handle(ent);
srcs += src_size;
}
@@ -1670,16 +2082,16 @@ static bool __grp_src_toex_excl(struct net_bridge_port_group *pg,
ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
memset(&src_ip, 0, sizeof(src_ip));
- src_ip.proto = pg->addr.proto;
+ src_ip.proto = pg->key.addr.proto;
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
- memcpy(&src_ip.u, srcs, src_size);
+ memcpy(&src_ip.src, srcs, src_size);
ent = br_multicast_find_group_src(pg, &src_ip);
if (ent) {
ent->flags &= ~BR_SGRP_F_DELETE;
} else {
ent = br_multicast_new_group_src(pg, &src_ip);
if (ent) {
- mod_timer(&ent->timer, pg->timer.expires);
+ __grp_src_mod_timer(ent, pg->timer.expires);
changed = true;
}
}
@@ -1701,16 +2113,17 @@ static bool __grp_src_toex_excl(struct net_bridge_port_group *pg,
static bool br_multicast_toex(struct net_bridge_port_group *pg,
void *srcs, u32 nsrcs, size_t src_size)
{
- struct net_bridge *br = pg->port->br;
+ struct net_bridge *br = pg->key.port->br;
bool changed = false;
switch (pg->filter_mode) {
case MCAST_INCLUDE:
__grp_src_toex_incl(pg, srcs, nsrcs, src_size);
+ br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
changed = true;
break;
case MCAST_EXCLUDE:
- __grp_src_toex_excl(pg, srcs, nsrcs, src_size);
+ changed = __grp_src_toex_excl(pg, srcs, nsrcs, src_size);
break;
}
@@ -1734,9 +2147,9 @@ static void __grp_src_block_incl(struct net_bridge_port_group *pg,
ent->flags &= ~BR_SGRP_F_SEND;
memset(&src_ip, 0, sizeof(src_ip));
- src_ip.proto = pg->addr.proto;
+ src_ip.proto = pg->key.addr.proto;
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
- memcpy(&src_ip.u, srcs, src_size);
+ memcpy(&src_ip.src, srcs, src_size);
ent = br_multicast_find_group_src(pg, &src_ip);
if (ent) {
ent->flags |= BR_SGRP_F_SEND;
@@ -1749,7 +2162,7 @@ static void __grp_src_block_incl(struct net_bridge_port_group *pg,
__grp_src_query_marked_and_rexmit(pg);
if (pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list))
- br_multicast_find_del_pg(pg->port->br, pg);
+ br_multicast_find_del_pg(pg->key.port->br, pg);
}
/* State Msg type New state Actions
@@ -1768,14 +2181,14 @@ static bool __grp_src_block_excl(struct net_bridge_port_group *pg,
ent->flags &= ~BR_SGRP_F_SEND;
memset(&src_ip, 0, sizeof(src_ip));
- src_ip.proto = pg->addr.proto;
+ src_ip.proto = pg->key.addr.proto;
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
- memcpy(&src_ip.u, srcs, src_size);
+ memcpy(&src_ip.src, srcs, src_size);
ent = br_multicast_find_group_src(pg, &src_ip);
if (!ent) {
ent = br_multicast_new_group_src(pg, &src_ip);
if (ent) {
- mod_timer(&ent->timer, pg->timer.expires);
+ __grp_src_mod_timer(ent, pg->timer.expires);
changed = true;
}
}
@@ -2071,16 +2484,16 @@ static bool br_ip4_multicast_select_querier(struct net_bridge *br,
!timer_pending(&br->ip4_other_query.timer))
goto update;
- if (!br->ip4_querier.addr.u.ip4)
+ if (!br->ip4_querier.addr.src.ip4)
goto update;
- if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
+ if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.src.ip4))
goto update;
return false;
update:
- br->ip4_querier.addr.u.ip4 = saddr;
+ br->ip4_querier.addr.src.ip4 = saddr;
/* update protected by general multicast_lock by caller */
rcu_assign_pointer(br->ip4_querier.port, port);
@@ -2097,13 +2510,13 @@ static bool br_ip6_multicast_select_querier(struct net_bridge *br,
!timer_pending(&br->ip6_other_query.timer))
goto update;
- if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
+ if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.src.ip6) <= 0)
goto update;
return false;
update:
- br->ip6_querier.addr.u.ip6 = *saddr;
+ br->ip6_querier.addr.src.ip6 = *saddr;
/* update protected by general multicast_lock by caller */
rcu_assign_pointer(br->ip6_querier.port, port);
@@ -2118,10 +2531,10 @@ static bool br_multicast_select_querier(struct net_bridge *br,
{
switch (saddr->proto) {
case htons(ETH_P_IP):
- return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
+ return br_ip4_multicast_select_querier(br, port, saddr->src.ip4);
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
- return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
+ return br_ip6_multicast_select_querier(br, port, &saddr->src.ip6);
#endif
}
@@ -2263,7 +2676,7 @@ static void br_ip4_multicast_query(struct net_bridge *br,
if (!group) {
saddr.proto = htons(ETH_P_IP);
- saddr.u.ip4 = iph->saddr;
+ saddr.src.ip4 = iph->saddr;
br_multicast_query_received(br, port, &br->ip4_other_query,
&saddr, max_delay);
@@ -2351,7 +2764,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
if (is_general_query) {
saddr.proto = htons(ETH_P_IPV6);
- saddr.u.ip6 = ipv6_hdr(skb)->saddr;
+ saddr.src.ip6 = ipv6_hdr(skb)->saddr;
br_multicast_query_received(br, port, &br->ip6_other_query,
&saddr, max_delay);
@@ -2475,7 +2888,7 @@ br_multicast_leave_group(struct net_bridge *br,
for (p = mlock_dereference(mp->ports, br);
p != NULL;
p = mlock_dereference(p->next, br)) {
- if (p->port != port)
+ if (p->key.port != port)
continue;
if (!hlist_unhashed(&p->mglist) &&
@@ -2506,7 +2919,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
memset(&br_group, 0, sizeof(br_group));
- br_group.u.ip4 = group;
+ br_group.dst.ip4 = group;
br_group.proto = htons(ETH_P_IP);
br_group.vid = vid;
@@ -2530,7 +2943,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
memset(&br_group, 0, sizeof(br_group));
- br_group.u.ip6 = *group;
+ br_group.dst.ip6 = *group;
br_group.proto = htons(ETH_P_IPV6);
br_group.vid = vid;
@@ -3235,7 +3648,7 @@ int br_multicast_list_adjacent(struct net_device *dev,
if (!entry)
goto unlock;
- entry->addr = group->addr;
+ entry->addr = group->key.addr;
list_add(&entry->list, br_ip_list);
count++;
}
@@ -3492,10 +3905,23 @@ void br_multicast_get_stats(const struct net_bridge *br,
int br_mdb_hash_init(struct net_bridge *br)
{
- return rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
+ int err;
+
+ err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params);
+ if (err)
+ return err;
+
+ err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
+ if (err) {
+ rhashtable_destroy(&br->sg_port_tbl);
+ return err;
+ }
+
+ return 0;
}
void br_mdb_hash_fini(struct net_bridge *br)
{
+ rhashtable_destroy(&br->sg_port_tbl);
rhashtable_destroy(&br->mdb_hash_tbl);
}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index a23d2bae56e1..345118e35c42 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -213,11 +213,14 @@ struct net_bridge_fdb_entry {
#define MDB_PG_FLAGS_PERMANENT BIT(0)
#define MDB_PG_FLAGS_OFFLOAD BIT(1)
#define MDB_PG_FLAGS_FAST_LEAVE BIT(2)
+#define MDB_PG_FLAGS_STAR_EXCL BIT(3)
+#define MDB_PG_FLAGS_BLOCKED BIT(4)
#define PG_SRC_ENT_LIMIT 32
#define BR_SGRP_F_DELETE BIT(0)
#define BR_SGRP_F_SEND BIT(1)
+#define BR_SGRP_F_INSTALLED BIT(2)
struct net_bridge_mcast_gc {
struct hlist_node gc_node;
@@ -238,14 +241,19 @@ struct net_bridge_group_src {
struct rcu_head rcu;
};
-struct net_bridge_port_group {
+struct net_bridge_port_group_sg_key {
struct net_bridge_port *port;
- struct net_bridge_port_group __rcu *next;
struct br_ip addr;
+};
+
+struct net_bridge_port_group {
+ struct net_bridge_port_group __rcu *next;
+ struct net_bridge_port_group_sg_key key;
unsigned char eth_addr[ETH_ALEN] __aligned(2);
unsigned char flags;
unsigned char filter_mode;
unsigned char grp_query_rexmit_cnt;
+ unsigned char rt_protocol;
struct hlist_head src_list;
unsigned int src_ents;
@@ -253,6 +261,7 @@ struct net_bridge_port_group {
struct timer_list rexmit_timer;
struct hlist_node mglist;
+ struct rhash_head rhnode;
struct net_bridge_mcast_gc mcast_gc;
struct rcu_head rcu;
};
@@ -440,6 +449,7 @@ struct net_bridge {
unsigned long multicast_startup_query_interval;
struct rhashtable mdb_hash_tbl;
+ struct rhashtable sg_port_tbl;
struct hlist_head mcast_gc_list;
struct hlist_head mdb_list;
@@ -804,7 +814,7 @@ struct net_bridge_port_group *
br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
struct net_bridge_port_group __rcu *next,
unsigned char flags, const unsigned char *src,
- u8 filter_mode);
+ u8 filter_mode, u8 rt_protocol);
int br_mdb_hash_init(struct net_bridge *br);
void br_mdb_hash_fini(struct net_bridge *br);
void br_mdb_notify(struct net_device *dev, struct net_bridge_mdb_entry *mp,
@@ -825,6 +835,10 @@ void br_mdb_init(void);
void br_mdb_uninit(void);
void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify);
void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify);
+void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg,
+ u8 filter_mode);
+void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp,
+ struct net_bridge_port_group *sg);
#define mlock_dereference(X, br) \
rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
@@ -873,6 +887,35 @@ static inline bool br_multicast_querier_exists(struct net_bridge *br,
}
}
+static inline bool br_multicast_is_star_g(const struct br_ip *ip)
+{
+ switch (ip->proto) {
+ case htons(ETH_P_IP):
+ return ipv4_is_zeronet(ip->src.ip4);
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ return ipv6_addr_any(&ip->src.ip6);
+#endif
+ default:
+ return false;
+ }
+}
+
+static inline bool br_multicast_should_handle_mode(const struct net_bridge *br,
+ __be16 proto)
+{
+ switch (proto) {
+ case htons(ETH_P_IP):
+ return !!(br->multicast_igmp_version == 3);
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ return !!(br->multicast_mld_version == 2);
+#endif
+ default:
+ return false;
+ }
+}
+
static inline int br_multicast_igmp_type(const struct sk_buff *skb)
{
return BR_INPUT_SKB_CB(skb)->igmp;
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index d2b8737f9fc0..002bbc93209d 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -140,7 +140,7 @@ static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
return err == -EOPNOTSUPP ? 0 : err;
}
-/* Returns a master vlan, if it didn't exist it gets created. In all cases a
+/* Returns a master vlan, if it didn't exist it gets created. In all cases
* a reference is taken to the master vlan before returning.
*/
static struct net_bridge_vlan *
@@ -1288,11 +1288,13 @@ void br_vlan_get_stats(const struct net_bridge_vlan *v,
}
}
-static int __br_vlan_get_pvid(const struct net_device *dev,
- struct net_bridge_port *p, u16 *p_pvid)
+int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
{
struct net_bridge_vlan_group *vg;
+ struct net_bridge_port *p;
+ ASSERT_RTNL();
+ p = br_port_get_check_rtnl(dev);
if (p)
vg = nbp_vlan_group(p);
else if (netif_is_bridge_master(dev))
@@ -1303,18 +1305,23 @@ static int __br_vlan_get_pvid(const struct net_device *dev,
*p_pvid = br_get_pvid(vg);
return 0;
}
-
-int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
-{
- ASSERT_RTNL();
-
- return __br_vlan_get_pvid(dev, br_port_get_check_rtnl(dev), p_pvid);
-}
EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
{
- return __br_vlan_get_pvid(dev, br_port_get_check_rcu(dev), p_pvid);
+ struct net_bridge_vlan_group *vg;
+ struct net_bridge_port *p;
+
+ p = br_port_get_check_rcu(dev);
+ if (p)
+ vg = nbp_vlan_group_rcu(p);
+ else if (netif_is_bridge_master(dev))
+ vg = br_vlan_group_rcu(netdev_priv(dev));
+ else
+ return -EINVAL;
+
+ *p_pvid = br_get_pvid(vg);
+ return 0;
}
EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 5c06404bdf3e..ea29a6d97ef5 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -358,7 +358,7 @@ static unsigned int effhash(canid_t can_id)
*
* Return:
* Pointer to optimal filterlist for the given can_id/mask pair.
- * Constistency checked mask.
+ * Consistency checked mask.
* Reduced can_id to have a preprocessed filter compare value.
*/
static struct hlist_head *can_rcv_list_find(canid_t *can_id, canid_t *mask,
@@ -411,7 +411,7 @@ static struct hlist_head *can_rcv_list_find(canid_t *can_id, canid_t *mask,
/**
* can_rx_register - subscribe CAN frames from a specific interface
* @net: the applicable net namespace
- * @dev: pointer to netdevice (NULL => subcribe from 'all' CAN devices list)
+ * @dev: pointer to netdevice (NULL => subscribe from 'all' CAN devices list)
* @can_id: CAN identifier (see description)
* @mask: CAN mask (see description)
* @func: callback function on filter match
diff --git a/net/can/bcm.c b/net/can/bcm.c
index d14ea12affb1..4253915800e6 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
*
diff --git a/net/can/gw.c b/net/can/gw.c
index 65d60c93af29..49b4e3d91ad6 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/* gw.c - CAN frame Gateway/Router/Bridge with netlink interface
*
* Copyright (c) 2019 Volkswagen Group Electronic Research
diff --git a/net/can/proc.c b/net/can/proc.c
index e6881bfc3ed1..a4eb06c9eb70 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* proc.c - procfs support for Protocol family CAN core module
*
diff --git a/net/can/raw.c b/net/can/raw.c
index 94a9405658dc..24db4b4afdc7 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/* raw.c - Raw sockets for protocol family CAN
*
* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
@@ -154,16 +154,16 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
if (!skb)
return;
- /* Put the datagram to the queue so that raw_recvmsg() can
- * get it from there. We need to pass the interface index to
- * raw_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
- * containing the interface index.
+ /* Put the datagram to the queue so that raw_recvmsg() can get
+ * it from there. We need to pass the interface index to
+ * raw_recvmsg(). We pass a whole struct sockaddr_can in
+ * skb->cb containing the interface index.
*/
sock_skb_cb_check_size(sizeof(struct sockaddr_can));
addr = (struct sockaddr_can *)skb->cb;
memset(addr, 0, sizeof(*addr));
- addr->can_family = AF_CAN;
+ addr->can_family = AF_CAN;
addr->can_ifindex = skb->dev->ifindex;
/* add CAN specific message flags for raw_recvmsg() */
@@ -290,8 +290,8 @@ static int raw_notifier(struct notifier_block *nb,
kfree(ro->filter);
ro->ifindex = 0;
- ro->bound = 0;
- ro->count = 0;
+ ro->bound = 0;
+ ro->count = 0;
release_sock(sk);
sk->sk_err = ENODEV;
@@ -374,8 +374,8 @@ static int raw_release(struct socket *sock)
kfree(ro->filter);
ro->ifindex = 0;
- ro->bound = 0;
- ro->count = 0;
+ ro->bound = 0;
+ ro->count = 0;
free_percpu(ro->uniq);
sock_orphan(sk);
@@ -773,7 +773,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
skb_setup_tx_timestamp(skb, sk->sk_tsflags);
skb->dev = dev;
- skb->sk = sk;
+ skb->sk = sk;
skb->priority = sk->sk_priority;
err = can_send(skb, ro->loopback);
@@ -801,8 +801,8 @@ static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int err = 0;
int noblock;
- noblock = flags & MSG_DONTWAIT;
- flags &= ~MSG_DONTWAIT;
+ noblock = flags & MSG_DONTWAIT;
+ flags &= ~MSG_DONTWAIT;
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index a0d1a3265b71..838efc682cff 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -12,7 +12,6 @@
#include <net/sock.h>
#include <uapi/linux/sock_diag.h>
#include <uapi/linux/btf.h>
-#include <linux/btf_ids.h>
DEFINE_BPF_STORAGE_CACHE(sk_cache);
@@ -379,19 +378,15 @@ const struct bpf_func_proto bpf_sk_storage_delete_proto = {
.arg2_type = ARG_PTR_TO_SOCKET,
};
-BTF_ID_LIST(sk_storage_btf_ids)
-BTF_ID_UNUSED
-BTF_ID(struct, sock)
-
const struct bpf_func_proto sk_storage_get_btf_proto = {
.func = bpf_sk_storage_get,
.gpl_only = false,
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
+ .arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
.arg4_type = ARG_ANYTHING,
- .btf_id = sk_storage_btf_ids,
};
const struct bpf_func_proto sk_storage_delete_btf_proto = {
@@ -400,7 +395,7 @@ const struct bpf_func_proto sk_storage_delete_btf_proto = {
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
- .btf_id = sk_storage_btf_ids,
+ .arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
};
struct bpf_sk_storage_diag {
@@ -679,6 +674,7 @@ struct bpf_iter_seq_sk_storage_map_info {
static struct bpf_local_storage_elem *
bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
struct bpf_local_storage_elem *prev_selem)
+ __acquires(RCU) __releases(RCU)
{
struct bpf_local_storage *sk_storage;
struct bpf_local_storage_elem *selem;
@@ -697,16 +693,16 @@ bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
selem = prev_selem;
count = 0;
while (selem) {
- selem = hlist_entry_safe(selem->map_node.next,
+ selem = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&selem->map_node)),
struct bpf_local_storage_elem, map_node);
if (!selem) {
/* not found, unlock and go to the next bucket */
b = &smap->buckets[bucket_id++];
- raw_spin_unlock_bh(&b->lock);
+ rcu_read_unlock();
skip_elems = 0;
break;
}
- sk_storage = rcu_dereference_raw(selem->local_storage);
+ sk_storage = rcu_dereference(selem->local_storage);
if (sk_storage) {
info->skip_elems = skip_elems + count;
return selem;
@@ -716,10 +712,10 @@ bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
for (i = bucket_id; i < (1U << smap->bucket_log); i++) {
b = &smap->buckets[i];
- raw_spin_lock_bh(&b->lock);
+ rcu_read_lock();
count = 0;
- hlist_for_each_entry(selem, &b->list, map_node) {
- sk_storage = rcu_dereference_raw(selem->local_storage);
+ hlist_for_each_entry_rcu(selem, &b->list, map_node) {
+ sk_storage = rcu_dereference(selem->local_storage);
if (sk_storage && count >= skip_elems) {
info->bucket_id = i;
info->skip_elems = count;
@@ -727,7 +723,7 @@ bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
}
count++;
}
- raw_spin_unlock_bh(&b->lock);
+ rcu_read_unlock();
skip_elems = 0;
}
@@ -786,7 +782,7 @@ static int __bpf_sk_storage_map_seq_show(struct seq_file *seq,
ctx.meta = &meta;
ctx.map = info->map;
if (selem) {
- sk_storage = rcu_dereference_raw(selem->local_storage);
+ sk_storage = rcu_dereference(selem->local_storage);
ctx.sk = sk_storage->owner;
ctx.value = SDATA(selem)->data;
}
@@ -802,18 +798,12 @@ static int bpf_sk_storage_map_seq_show(struct seq_file *seq, void *v)
}
static void bpf_sk_storage_map_seq_stop(struct seq_file *seq, void *v)
+ __releases(RCU)
{
- struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
- struct bpf_local_storage_map *smap;
- struct bpf_local_storage_map_bucket *b;
-
- if (!v) {
+ if (!v)
(void)__bpf_sk_storage_map_seq_show(seq, v);
- } else {
- smap = (struct bpf_local_storage_map *)info->map;
- b = &smap->buckets[info->bucket_id];
- raw_spin_unlock_bh(&b->lock);
- }
+ else
+ rcu_read_unlock();
}
static int bpf_iter_init_sk_storage_map(void *priv_data,
diff --git a/net/core/dev.c b/net/core/dev.c
index 03624192862a..873b50ac9668 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -98,6 +98,7 @@
#include <net/busy_poll.h>
#include <linux/rtnetlink.h>
#include <linux/stat.h>
+#include <net/dsa.h>
#include <net/dst.h>
#include <net/dst_metadata.h>
#include <net/pkt_sched.h>
@@ -1130,7 +1131,7 @@ EXPORT_SYMBOL(__dev_get_by_flags);
* @name: name string
*
* Network device names need to be valid file names to
- * to allow sysfs to work. We also disallow any kind of
+ * allow sysfs to work. We also disallow any kind of
* whitespace.
*/
bool dev_valid_name(const char *name)
@@ -5192,7 +5193,7 @@ skip_classify:
}
}
- if (unlikely(skb_vlan_tag_present(skb))) {
+ if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) {
check_vlan_id:
if (skb_vlan_tag_get_id(skb)) {
/* Vlan id is non 0 and vlan_do_receive() above couldn't
@@ -5441,15 +5442,20 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
if (new) {
u32 i;
+ mutex_lock(&new->aux->used_maps_mutex);
+
/* generic XDP does not work with DEVMAPs that can
* have a bpf_prog installed on an entry
*/
for (i = 0; i < new->aux->used_map_cnt; i++) {
- if (dev_map_can_have_prog(new->aux->used_maps[i]))
- return -EINVAL;
- if (cpu_map_prog_allowed(new->aux->used_maps[i]))
+ if (dev_map_can_have_prog(new->aux->used_maps[i]) ||
+ cpu_map_prog_allowed(new->aux->used_maps[i])) {
+ mutex_unlock(&new->aux->used_maps_mutex);
return -EINVAL;
+ }
}
+
+ mutex_unlock(&new->aux->used_maps_mutex);
}
switch (xdp->command) {
@@ -5621,17 +5627,60 @@ static void flush_backlog(struct work_struct *work)
local_bh_enable();
}
+static bool flush_required(int cpu)
+{
+#if IS_ENABLED(CONFIG_RPS)
+ struct softnet_data *sd = &per_cpu(softnet_data, cpu);
+ bool do_flush;
+
+ local_irq_disable();
+ rps_lock(sd);
+
+ /* as insertion into process_queue happens with the rps lock held,
+ * process_queue access may race only with dequeue
+ */
+ do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
+ !skb_queue_empty_lockless(&sd->process_queue);
+ rps_unlock(sd);
+ local_irq_enable();
+
+ return do_flush;
+#endif
+ /* without RPS we can't safely check input_pkt_queue: during a
+ * concurrent remote skb_queue_splice() we can detect as empty both
+ * input_pkt_queue and process_queue even if the latter could end-up
+ * containing a lot of packets.
+ */
+ return true;
+}
+
static void flush_all_backlogs(void)
{
+ static cpumask_t flush_cpus;
unsigned int cpu;
+ /* since we are under rtnl lock protection we can use static data
+ * for the cpumask and avoid allocating on stack the possibly
+ * large mask
+ */
+ ASSERT_RTNL();
+
get_online_cpus();
- for_each_online_cpu(cpu)
- queue_work_on(cpu, system_highpri_wq,
- per_cpu_ptr(&flush_works, cpu));
+ cpumask_clear(&flush_cpus);
+ for_each_online_cpu(cpu) {
+ if (flush_required(cpu)) {
+ queue_work_on(cpu, system_highpri_wq,
+ per_cpu_ptr(&flush_works, cpu));
+ cpumask_set_cpu(cpu, &flush_cpus);
+ }
+ }
- for_each_online_cpu(cpu)
+ /* we can have in flight packet[s] on the cpus we are not flushing,
+ * synchronize_net() in rollback_registered_many() will take care of
+ * them
+ */
+ for_each_cpu(cpu, &flush_cpus)
flush_work(per_cpu_ptr(&flush_works, cpu));
put_online_cpus();
@@ -8645,7 +8694,7 @@ int dev_get_port_parent_id(struct net_device *dev,
if (!first.id_len)
first = *ppid;
else if (memcmp(&first, ppid, sizeof(*ppid)))
- return -ENODATA;
+ return -EOPNOTSUPP;
}
return err;
@@ -9468,7 +9517,7 @@ int __netdev_update_features(struct net_device *dev)
/* driver might be less strict about feature dependencies */
features = netdev_fix_features(dev, features);
- /* some features can't be enabled if they're off an an upper device */
+ /* some features can't be enabled if they're off on an upper device */
netdev_for_each_upper_dev_rcu(dev, upper, iter)
features = netdev_sync_upper_features(dev, upper, features);
@@ -9972,6 +10021,8 @@ int netdev_refcnt_read(const struct net_device *dev)
}
EXPORT_SYMBOL(netdev_refcnt_read);
+#define WAIT_REFS_MIN_MSECS 1
+#define WAIT_REFS_MAX_MSECS 250
/**
* netdev_wait_allrefs - wait until all references are gone.
* @dev: target net_device
@@ -9987,7 +10038,7 @@ EXPORT_SYMBOL(netdev_refcnt_read);
static void netdev_wait_allrefs(struct net_device *dev)
{
unsigned long rebroadcast_time, warning_time;
- int refcnt;
+ int wait = 0, refcnt;
linkwatch_forget_dev(dev);
@@ -10021,7 +10072,13 @@ static void netdev_wait_allrefs(struct net_device *dev)
rebroadcast_time = jiffies;
}
- msleep(250);
+ if (!wait) {
+ rcu_barrier();
+ wait = WAIT_REFS_MIN_MSECS;
+ } else {
+ msleep(wait);
+ wait = min(wait << 1, WAIT_REFS_MAX_MSECS);
+ }
refcnt = netdev_refcnt_read(dev);
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 19037f114307..ac32b672a04b 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -811,8 +811,6 @@ static int devlink_port_type_set(struct devlink *devlink,
int err;
if (devlink->ops->port_type_set) {
- if (port_type == DEVLINK_PORT_TYPE_NOTSET)
- return -EINVAL;
if (port_type == devlink_port->type)
return 0;
err = devlink->ops->port_type_set(devlink_port, port_type);
@@ -3022,9 +3020,7 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
static int devlink_nl_flash_update_fill(struct sk_buff *msg,
struct devlink *devlink,
enum devlink_command cmd,
- const char *status_msg,
- const char *component,
- unsigned long done, unsigned long total)
+ struct devlink_flash_notify *params)
{
void *hdr;
@@ -3038,19 +3034,22 @@ static int devlink_nl_flash_update_fill(struct sk_buff *msg,
if (cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS)
goto out;
- if (status_msg &&
+ if (params->status_msg &&
nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_MSG,
- status_msg))
+ params->status_msg))
goto nla_put_failure;
- if (component &&
+ if (params->component &&
nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_COMPONENT,
- component))
+ params->component))
goto nla_put_failure;
if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE,
- done, DEVLINK_ATTR_PAD))
+ params->done, DEVLINK_ATTR_PAD))
goto nla_put_failure;
if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL,
- total, DEVLINK_ATTR_PAD))
+ params->total, DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TIMEOUT,
+ params->timeout, DEVLINK_ATTR_PAD))
goto nla_put_failure;
out:
@@ -3064,10 +3063,7 @@ nla_put_failure:
static void __devlink_flash_update_notify(struct devlink *devlink,
enum devlink_command cmd,
- const char *status_msg,
- const char *component,
- unsigned long done,
- unsigned long total)
+ struct devlink_flash_notify *params)
{
struct sk_buff *msg;
int err;
@@ -3080,8 +3076,7 @@ static void __devlink_flash_update_notify(struct devlink *devlink,
if (!msg)
return;
- err = devlink_nl_flash_update_fill(msg, devlink, cmd, status_msg,
- component, done, total);
+ err = devlink_nl_flash_update_fill(msg, devlink, cmd, params);
if (err)
goto out_free_msg;
@@ -3095,17 +3090,21 @@ out_free_msg:
void devlink_flash_update_begin_notify(struct devlink *devlink)
{
+ struct devlink_flash_notify params = { 0 };
+
__devlink_flash_update_notify(devlink,
DEVLINK_CMD_FLASH_UPDATE,
- NULL, NULL, 0, 0);
+ &params);
}
EXPORT_SYMBOL_GPL(devlink_flash_update_begin_notify);
void devlink_flash_update_end_notify(struct devlink *devlink)
{
+ struct devlink_flash_notify params = { 0 };
+
__devlink_flash_update_notify(devlink,
DEVLINK_CMD_FLASH_UPDATE_END,
- NULL, NULL, 0, 0);
+ &params);
}
EXPORT_SYMBOL_GPL(devlink_flash_update_end_notify);
@@ -3115,12 +3114,36 @@ void devlink_flash_update_status_notify(struct devlink *devlink,
unsigned long done,
unsigned long total)
{
+ struct devlink_flash_notify params = {
+ .status_msg = status_msg,
+ .component = component,
+ .done = done,
+ .total = total,
+ };
+
__devlink_flash_update_notify(devlink,
DEVLINK_CMD_FLASH_UPDATE_STATUS,
- status_msg, component, done, total);
+ &params);
}
EXPORT_SYMBOL_GPL(devlink_flash_update_status_notify);
+void devlink_flash_update_timeout_notify(struct devlink *devlink,
+ const char *status_msg,
+ const char *component,
+ unsigned long timeout)
+{
+ struct devlink_flash_notify params = {
+ .status_msg = status_msg,
+ .component = component,
+ .timeout = timeout,
+ };
+
+ __devlink_flash_update_notify(devlink,
+ DEVLINK_CMD_FLASH_UPDATE_STATUS,
+ &params);
+}
+EXPORT_SYMBOL_GPL(devlink_flash_update_timeout_notify);
+
static int devlink_nl_cmd_flash_update(struct sk_buff *skb,
struct genl_info *info)
{
@@ -4322,7 +4345,7 @@ devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
}
}
- err = region->ops->snapshot(devlink, info->extack, &data);
+ err = region->ops->snapshot(devlink, region->ops, info->extack, &data);
if (err)
goto err_snapshot_capture;
@@ -6096,6 +6119,28 @@ devlink_nl_cmd_health_reporter_dump_clear_doit(struct sk_buff *skb,
return 0;
}
+static int devlink_nl_cmd_health_reporter_test_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_health_reporter *reporter;
+ int err;
+
+ reporter = devlink_health_reporter_get_from_info(devlink, info);
+ if (!reporter)
+ return -EINVAL;
+
+ if (!reporter->ops->test) {
+ devlink_health_reporter_put(reporter);
+ return -EOPNOTSUPP;
+ }
+
+ err = reporter->ops->test(reporter, info->extack);
+
+ devlink_health_reporter_put(reporter);
+ return err;
+}
+
struct devlink_stats {
u64 rx_bytes;
u64 rx_packets;
@@ -7008,7 +7053,8 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32 },
- [DEVLINK_ATTR_PORT_TYPE] = { .type = NLA_U16 },
+ [DEVLINK_ATTR_PORT_TYPE] = NLA_POLICY_RANGE(NLA_U16, DEVLINK_PORT_TYPE_AUTO,
+ DEVLINK_PORT_TYPE_IB),
[DEVLINK_ATTR_PORT_SPLIT_COUNT] = { .type = NLA_U32 },
[DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32 },
[DEVLINK_ATTR_SB_POOL_INDEX] = { .type = NLA_U16 },
@@ -7017,7 +7063,8 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE] = { .type = NLA_U8 },
[DEVLINK_ATTR_SB_THRESHOLD] = { .type = NLA_U32 },
[DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 },
- [DEVLINK_ATTR_ESWITCH_MODE] = { .type = NLA_U16 },
+ [DEVLINK_ATTR_ESWITCH_MODE] = NLA_POLICY_RANGE(NLA_U16, DEVLINK_ESWITCH_MODE_LEGACY,
+ DEVLINK_ESWITCH_MODE_SWITCHDEV),
[DEVLINK_ATTR_ESWITCH_INLINE_MODE] = { .type = NLA_U8 },
[DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = { .type = NLA_U8 },
[DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .type = NLA_NUL_STRING },
@@ -7317,6 +7364,14 @@ static const struct genl_ops devlink_nl_ops[] = {
DEVLINK_NL_FLAG_NO_LOCK,
},
{
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_TEST,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_health_reporter_test_doit,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT |
+ DEVLINK_NL_FLAG_NO_LOCK,
+ },
+ {
.cmd = DEVLINK_CMD_FLASH_UPDATE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_flash_update,
diff --git a/net/core/dst.c b/net/core/dst.c
index d6b6ced0d451..0c01bd8d9d81 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -144,7 +144,7 @@ static void dst_destroy_rcu(struct rcu_head *head)
/* Operations to mark dst as DEAD and clean up the net device referenced
* by dst:
- * 1. put the dst under loopback interface and discard all tx/rx packets
+ * 1. put the dst under blackhole interface and discard all tx/rx packets
* on this route.
* 2. release the net_device
* This function should be called when removing routes from the fib tree
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 51678a528f85..7bcfb16854cb 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -16,7 +16,7 @@
#include <net/ip_tunnels.h>
#include <linux/indirect_call_wrapper.h>
-#ifdef CONFIG_IPV6_MULTIPLE_TABLES
+#if defined(CONFIG_IPV6) && defined(CONFIG_IPV6_MULTIPLE_TABLES)
#ifdef CONFIG_IP_MULTIPLE_TABLES
#define INDIRECT_CALL_MT(f, f2, f1, ...) \
INDIRECT_CALL_INET(f, f2, f1, __VA_ARGS__)
diff --git a/net/core/filter.c b/net/core/filter.c
index 2ad9c0ef1946..706f8db0ccf8 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3803,19 +3803,18 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = {
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
-BTF_ID_LIST(bpf_skb_output_btf_ids)
-BTF_ID(struct, sk_buff)
+BTF_ID_LIST_SINGLE(bpf_skb_output_btf_ids, struct, sk_buff)
const struct bpf_func_proto bpf_skb_output_proto = {
.func = bpf_skb_event_output,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &bpf_skb_output_btf_ids[0],
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
- .btf_id = bpf_skb_output_btf_ids,
};
static unsigned short bpf_tunnel_key_af(u64 flags)
@@ -4199,19 +4198,18 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = {
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
-BTF_ID_LIST(bpf_xdp_output_btf_ids)
-BTF_ID(struct, xdp_buff)
+BTF_ID_LIST_SINGLE(bpf_xdp_output_btf_ids, struct, xdp_buff)
const struct bpf_func_proto bpf_xdp_output_proto = {
.func = bpf_xdp_event_output,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &bpf_xdp_output_btf_ids[0],
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
- .btf_id = bpf_xdp_output_btf_ids,
};
BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
@@ -4313,10 +4311,8 @@ static const struct bpf_func_proto bpf_get_socket_uid_proto = {
.arg1_type = ARG_PTR_TO_CTX,
};
-#define SOCKOPT_CC_REINIT (1 << 0)
-
static int _bpf_setsockopt(struct sock *sk, int level, int optname,
- char *optval, int optlen, u32 flags)
+ char *optval, int optlen)
{
char devname[IFNAMSIZ];
int val, valbool;
@@ -4449,13 +4445,11 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
sk->sk_prot->setsockopt == tcp_setsockopt) {
if (optname == TCP_CONGESTION) {
char name[TCP_CA_NAME_MAX];
- bool reinit = flags & SOCKOPT_CC_REINIT;
strncpy(name, optval, min_t(long, optlen,
TCP_CA_NAME_MAX-1));
name[TCP_CA_NAME_MAX-1] = 0;
- ret = tcp_set_congestion_control(sk, name, false,
- reinit, true);
+ ret = tcp_set_congestion_control(sk, name, false, true);
} else {
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -4615,9 +4609,7 @@ err_clear:
BPF_CALL_5(bpf_sock_addr_setsockopt, struct bpf_sock_addr_kern *, ctx,
int, level, int, optname, char *, optval, int, optlen)
{
- u32 flags = 0;
- return _bpf_setsockopt(ctx->sk, level, optname, optval, optlen,
- flags);
+ return _bpf_setsockopt(ctx->sk, level, optname, optval, optlen);
}
static const struct bpf_func_proto bpf_sock_addr_setsockopt_proto = {
@@ -4651,11 +4643,7 @@ static const struct bpf_func_proto bpf_sock_addr_getsockopt_proto = {
BPF_CALL_5(bpf_sock_ops_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
int, level, int, optname, char *, optval, int, optlen)
{
- u32 flags = 0;
- if (bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN)
- flags |= SOCKOPT_CC_REINIT;
- return _bpf_setsockopt(bpf_sock->sk, level, optname, optval, optlen,
- flags);
+ return _bpf_setsockopt(bpf_sock->sk, level, optname, optval, optlen);
}
static const struct bpf_func_proto bpf_sock_ops_setsockopt_proto = {
@@ -4943,6 +4931,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
fl4.saddr = params->ipv4_src;
fl4.fl4_sport = params->sport;
fl4.fl4_dport = params->dport;
+ fl4.flowi4_multipath_hash = 0;
if (flags & BPF_FIB_LOOKUP_DIRECT) {
u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
@@ -7405,8 +7394,6 @@ static int bpf_gen_ld_abs(const struct bpf_insn *orig,
bool indirect = BPF_MODE(orig->code) == BPF_IND;
struct bpf_insn *insn = insn_buf;
- /* We're guaranteed here that CTX is in R6. */
- *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
if (!indirect) {
*insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm);
} else {
@@ -7414,6 +7401,8 @@ static int bpf_gen_ld_abs(const struct bpf_insn *orig,
if (orig->imm)
*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm);
}
+ /* We're guaranteed here that CTX is in R6. */
+ *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
switch (BPF_SIZE(orig->code)) {
case BPF_B:
@@ -9907,24 +9896,13 @@ BTF_SOCK_TYPE_xxx
u32 btf_sock_ids[MAX_BTF_SOCK_TYPE];
#endif
-static bool check_arg_btf_id(u32 btf_id, u32 arg)
-{
- int i;
-
- /* only one argument, no need to check arg */
- for (i = 0; i < MAX_BTF_SOCK_TYPE; i++)
- if (btf_sock_ids[i] == btf_id)
- return true;
- return false;
-}
-
BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk)
{
/* tcp6_sock type is not generated in dwarf and hence btf,
* trigger an explicit type generation here.
*/
BTF_TYPE_EMIT(struct tcp6_sock);
- if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP &&
+ if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP &&
sk->sk_family == AF_INET6)
return (unsigned long)sk;
@@ -9936,13 +9914,13 @@ const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = {
.gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
.arg1_type = ARG_PTR_TO_BTF_ID,
- .check_btf_id = check_arg_btf_id,
+ .arg1_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP6],
};
BPF_CALL_1(bpf_skc_to_tcp_sock, struct sock *, sk)
{
- if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
+ if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
return (unsigned long)sk;
return (unsigned long)NULL;
@@ -9953,19 +9931,19 @@ const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = {
.gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
.arg1_type = ARG_PTR_TO_BTF_ID,
- .check_btf_id = check_arg_btf_id,
+ .arg1_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP],
};
BPF_CALL_1(bpf_skc_to_tcp_timewait_sock, struct sock *, sk)
{
#ifdef CONFIG_INET
- if (sk->sk_prot == &tcp_prot && sk->sk_state == TCP_TIME_WAIT)
+ if (sk && sk->sk_prot == &tcp_prot && sk->sk_state == TCP_TIME_WAIT)
return (unsigned long)sk;
#endif
#if IS_BUILTIN(CONFIG_IPV6)
- if (sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_TIME_WAIT)
+ if (sk && sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_TIME_WAIT)
return (unsigned long)sk;
#endif
@@ -9977,19 +9955,19 @@ const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto = {
.gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
.arg1_type = ARG_PTR_TO_BTF_ID,
- .check_btf_id = check_arg_btf_id,
+ .arg1_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_TW],
};
BPF_CALL_1(bpf_skc_to_tcp_request_sock, struct sock *, sk)
{
#ifdef CONFIG_INET
- if (sk->sk_prot == &tcp_prot && sk->sk_state == TCP_NEW_SYN_RECV)
+ if (sk && sk->sk_prot == &tcp_prot && sk->sk_state == TCP_NEW_SYN_RECV)
return (unsigned long)sk;
#endif
#if IS_BUILTIN(CONFIG_IPV6)
- if (sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_NEW_SYN_RECV)
+ if (sk && sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_NEW_SYN_RECV)
return (unsigned long)sk;
#endif
@@ -10001,7 +9979,7 @@ const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = {
.gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
.arg1_type = ARG_PTR_TO_BTF_ID,
- .check_btf_id = check_arg_btf_id,
+ .arg1_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_REQ],
};
@@ -10011,7 +9989,7 @@ BPF_CALL_1(bpf_skc_to_udp6_sock, struct sock *, sk)
* trigger an explicit type generation here.
*/
BTF_TYPE_EMIT(struct udp6_sock);
- if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_UDP &&
+ if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_UDP &&
sk->sk_type == SOCK_DGRAM && sk->sk_family == AF_INET6)
return (unsigned long)sk;
@@ -10023,6 +10001,6 @@ const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = {
.gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
.arg1_type = ARG_PTR_TO_BTF_ID,
- .check_btf_id = check_arg_btf_id,
+ .arg1_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UDP6],
};
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 6bbd06f7dc7d..c714e6a9dad4 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -116,6 +116,12 @@ static int dev_seq_show(struct seq_file *seq, void *v)
return 0;
}
+static u32 softnet_backlog_len(struct softnet_data *sd)
+{
+ return skb_queue_len_lockless(&sd->input_pkt_queue) +
+ skb_queue_len_lockless(&sd->process_queue);
+}
+
static struct softnet_data *softnet_get_online(loff_t *pos)
{
struct softnet_data *sd = NULL;
@@ -159,12 +165,17 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
rcu_read_unlock();
#endif
+ /* the index is the CPU id owing this sd. Since offline CPUs are not
+ * displayed, it would be othrwise not trivial for the user-space
+ * mapping the data a specific CPU
+ */
seq_printf(seq,
- "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
sd->processed, sd->dropped, sd->time_squeeze, 0,
0, 0, 0, 0, /* was fastroute */
0, /* was cpu_collision */
- sd->received_rps, flow_limit_count);
+ sd->received_rps, flow_limit_count,
+ softnet_backlog_len(sd), (int)seq->index);
return 0;
}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index dcd61aca343e..944ab214e5ae 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -251,10 +251,10 @@ int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
if (refcount_read(&net->count) == 0)
return NETNSA_NSID_NOT_ASSIGNED;
- spin_lock(&net->nsid_lock);
+ spin_lock_bh(&net->nsid_lock);
id = __peernet2id(net, peer);
if (id >= 0) {
- spin_unlock(&net->nsid_lock);
+ spin_unlock_bh(&net->nsid_lock);
return id;
}
@@ -264,12 +264,12 @@ int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
* just been idr_remove()'d from there in cleanup_net().
*/
if (!maybe_get_net(peer)) {
- spin_unlock(&net->nsid_lock);
+ spin_unlock_bh(&net->nsid_lock);
return NETNSA_NSID_NOT_ASSIGNED;
}
id = alloc_netid(net, peer, -1);
- spin_unlock(&net->nsid_lock);
+ spin_unlock_bh(&net->nsid_lock);
put_net(peer);
if (id < 0)
@@ -534,20 +534,20 @@ static void unhash_nsid(struct net *net, struct net *last)
for_each_net(tmp) {
int id;
- spin_lock(&tmp->nsid_lock);
+ spin_lock_bh(&tmp->nsid_lock);
id = __peernet2id(tmp, net);
if (id >= 0)
idr_remove(&tmp->netns_ids, id);
- spin_unlock(&tmp->nsid_lock);
+ spin_unlock_bh(&tmp->nsid_lock);
if (id >= 0)
rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
GFP_KERNEL);
if (tmp == last)
break;
}
- spin_lock(&net->nsid_lock);
+ spin_lock_bh(&net->nsid_lock);
idr_destroy(&net->netns_ids);
- spin_unlock(&net->nsid_lock);
+ spin_unlock_bh(&net->nsid_lock);
}
static LLIST_HEAD(cleanup_list);
@@ -760,9 +760,9 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
return PTR_ERR(peer);
}
- spin_lock(&net->nsid_lock);
+ spin_lock_bh(&net->nsid_lock);
if (__peernet2id(net, peer) >= 0) {
- spin_unlock(&net->nsid_lock);
+ spin_unlock_bh(&net->nsid_lock);
err = -EEXIST;
NL_SET_BAD_ATTR(extack, nla);
NL_SET_ERR_MSG(extack,
@@ -771,7 +771,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
}
err = alloc_netid(net, peer, nsid);
- spin_unlock(&net->nsid_lock);
+ spin_unlock_bh(&net->nsid_lock);
if (err >= 0) {
rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
nlh, GFP_KERNEL);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index bfd748346f20..e0774471f56d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -895,9 +895,6 @@ void __kfree_skb_defer(struct sk_buff *skb)
void napi_consume_skb(struct sk_buff *skb, int budget)
{
- if (unlikely(!skb))
- return;
-
/* Zero budget indicate non-NAPI context called us, like netpoll */
if (unlikely(!budget)) {
dev_consume_skb_any(skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index ba9e7d91e2ef..d9a537e6876a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2947,6 +2947,13 @@ void sk_stop_timer(struct sock *sk, struct timer_list* timer)
}
EXPORT_SYMBOL(sk_stop_timer);
+void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer)
+{
+ if (del_timer_sync(timer))
+ __sock_put(sk);
+}
+EXPORT_SYMBOL(sk_stop_timer_sync);
+
void sock_init_data(struct socket *sock, struct sock *sk)
{
sk_init_common(sk);
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 078386d7d9a2..e1f05e3fa1d0 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
#include <linux/bpf.h>
+#include <linux/btf_ids.h>
#include <linux/filter.h>
#include <linux/errno.h>
#include <linux/file.h>
@@ -382,7 +383,7 @@ static void *sock_map_lookup(struct bpf_map *map, void *key)
struct sock *sk;
sk = __sock_map_lookup_elem(map, *(u32 *)key);
- if (!sk || !sk_fullsock(sk))
+ if (!sk)
return NULL;
if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
return NULL;
@@ -703,6 +704,109 @@ const struct bpf_func_proto bpf_msg_redirect_map_proto = {
.arg4_type = ARG_ANYTHING,
};
+struct sock_map_seq_info {
+ struct bpf_map *map;
+ struct sock *sk;
+ u32 index;
+};
+
+struct bpf_iter__sockmap {
+ __bpf_md_ptr(struct bpf_iter_meta *, meta);
+ __bpf_md_ptr(struct bpf_map *, map);
+ __bpf_md_ptr(void *, key);
+ __bpf_md_ptr(struct sock *, sk);
+};
+
+DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta,
+ struct bpf_map *map, void *key,
+ struct sock *sk)
+
+static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info)
+{
+ if (unlikely(info->index >= info->map->max_entries))
+ return NULL;
+
+ info->sk = __sock_map_lookup_elem(info->map, info->index);
+
+ /* can't return sk directly, since that might be NULL */
+ return info;
+}
+
+static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct sock_map_seq_info *info = seq->private;
+
+ if (*pos == 0)
+ ++*pos;
+
+ /* pairs with sock_map_seq_stop */
+ rcu_read_lock();
+ return sock_map_seq_lookup_elem(info);
+}
+
+static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct sock_map_seq_info *info = seq->private;
+
+ ++*pos;
+ ++info->index;
+
+ return sock_map_seq_lookup_elem(info);
+}
+
+static int sock_map_seq_show(struct seq_file *seq, void *v)
+{
+ struct sock_map_seq_info *info = seq->private;
+ struct bpf_iter__sockmap ctx = {};
+ struct bpf_iter_meta meta;
+ struct bpf_prog *prog;
+
+ meta.seq = seq;
+ prog = bpf_iter_get_info(&meta, !v);
+ if (!prog)
+ return 0;
+
+ ctx.meta = &meta;
+ ctx.map = info->map;
+ if (v) {
+ ctx.key = &info->index;
+ ctx.sk = info->sk;
+ }
+
+ return bpf_iter_run_prog(prog, &ctx);
+}
+
+static void sock_map_seq_stop(struct seq_file *seq, void *v)
+{
+ if (!v)
+ (void)sock_map_seq_show(seq, NULL);
+
+ /* pairs with sock_map_seq_start */
+ rcu_read_unlock();
+}
+
+static const struct seq_operations sock_map_seq_ops = {
+ .start = sock_map_seq_start,
+ .next = sock_map_seq_next,
+ .stop = sock_map_seq_stop,
+ .show = sock_map_seq_show,
+};
+
+static int sock_map_init_seq_private(void *priv_data,
+ struct bpf_iter_aux_info *aux)
+{
+ struct sock_map_seq_info *info = priv_data;
+
+ info->map = aux->map;
+ return 0;
+}
+
+static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
+ .seq_ops = &sock_map_seq_ops,
+ .init_seq_private = sock_map_init_seq_private,
+ .seq_priv_size = sizeof(struct sock_map_seq_info),
+};
+
static int sock_map_btf_id;
const struct bpf_map_ops sock_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
@@ -717,6 +821,7 @@ const struct bpf_map_ops sock_map_ops = {
.map_check_btf = map_check_no_btf,
.map_btf_name = "bpf_stab",
.map_btf_id = &sock_map_btf_id,
+ .iter_seq_info = &sock_map_iter_seq_info,
};
struct bpf_shtab_elem {
@@ -953,7 +1058,7 @@ static int sock_hash_get_next_key(struct bpf_map *map, void *key,
if (!elem)
goto find_first_elem;
- elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&elem->node)),
+ elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)),
struct bpf_shtab_elem, node);
if (elem_next) {
memcpy(key_next, elem_next->key, key_size);
@@ -965,7 +1070,7 @@ static int sock_hash_get_next_key(struct bpf_map *map, void *key,
find_first_elem:
for (; i < htab->buckets_num; i++) {
head = &sock_hash_select_bucket(htab, i)->head;
- elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
+ elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)),
struct bpf_shtab_elem, node);
if (elem_next) {
memcpy(key_next, elem_next->key, key_size);
@@ -1110,7 +1215,7 @@ static void *sock_hash_lookup(struct bpf_map *map, void *key)
struct sock *sk;
sk = __sock_hash_lookup_elem(map, key);
- if (!sk || !sk_fullsock(sk))
+ if (!sk)
return NULL;
if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
return NULL;
@@ -1199,6 +1304,117 @@ const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
.arg4_type = ARG_ANYTHING,
};
+struct sock_hash_seq_info {
+ struct bpf_map *map;
+ struct bpf_shtab *htab;
+ u32 bucket_id;
+};
+
+static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info,
+ struct bpf_shtab_elem *prev_elem)
+{
+ const struct bpf_shtab *htab = info->htab;
+ struct bpf_shtab_bucket *bucket;
+ struct bpf_shtab_elem *elem;
+ struct hlist_node *node;
+
+ /* try to find next elem in the same bucket */
+ if (prev_elem) {
+ node = rcu_dereference(hlist_next_rcu(&prev_elem->node));
+ elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
+ if (elem)
+ return elem;
+
+ /* no more elements, continue in the next bucket */
+ info->bucket_id++;
+ }
+
+ for (; info->bucket_id < htab->buckets_num; info->bucket_id++) {
+ bucket = &htab->buckets[info->bucket_id];
+ node = rcu_dereference(hlist_first_rcu(&bucket->head));
+ elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
+ if (elem)
+ return elem;
+ }
+
+ return NULL;
+}
+
+static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct sock_hash_seq_info *info = seq->private;
+
+ if (*pos == 0)
+ ++*pos;
+
+ /* pairs with sock_hash_seq_stop */
+ rcu_read_lock();
+ return sock_hash_seq_find_next(info, NULL);
+}
+
+static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct sock_hash_seq_info *info = seq->private;
+
+ ++*pos;
+ return sock_hash_seq_find_next(info, v);
+}
+
+static int sock_hash_seq_show(struct seq_file *seq, void *v)
+{
+ struct sock_hash_seq_info *info = seq->private;
+ struct bpf_iter__sockmap ctx = {};
+ struct bpf_shtab_elem *elem = v;
+ struct bpf_iter_meta meta;
+ struct bpf_prog *prog;
+
+ meta.seq = seq;
+ prog = bpf_iter_get_info(&meta, !elem);
+ if (!prog)
+ return 0;
+
+ ctx.meta = &meta;
+ ctx.map = info->map;
+ if (elem) {
+ ctx.key = elem->key;
+ ctx.sk = elem->sk;
+ }
+
+ return bpf_iter_run_prog(prog, &ctx);
+}
+
+static void sock_hash_seq_stop(struct seq_file *seq, void *v)
+{
+ if (!v)
+ (void)sock_hash_seq_show(seq, NULL);
+
+ /* pairs with sock_hash_seq_start */
+ rcu_read_unlock();
+}
+
+static const struct seq_operations sock_hash_seq_ops = {
+ .start = sock_hash_seq_start,
+ .next = sock_hash_seq_next,
+ .stop = sock_hash_seq_stop,
+ .show = sock_hash_seq_show,
+};
+
+static int sock_hash_init_seq_private(void *priv_data,
+ struct bpf_iter_aux_info *aux)
+{
+ struct sock_hash_seq_info *info = priv_data;
+
+ info->map = aux->map;
+ info->htab = container_of(aux->map, struct bpf_shtab, map);
+ return 0;
+}
+
+static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
+ .seq_ops = &sock_hash_seq_ops,
+ .init_seq_private = sock_hash_init_seq_private,
+ .seq_priv_size = sizeof(struct sock_hash_seq_info),
+};
+
static int sock_hash_map_btf_id;
const struct bpf_map_ops sock_hash_ops = {
.map_meta_equal = bpf_map_meta_equal,
@@ -1213,6 +1429,7 @@ const struct bpf_map_ops sock_hash_ops = {
.map_check_btf = map_check_no_btf,
.map_btf_name = "bpf_shtab",
.map_btf_id = &sock_hash_map_btf_id,
+ .iter_seq_info = &sock_hash_iter_seq_info,
};
static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
@@ -1323,3 +1540,62 @@ void sock_map_close(struct sock *sk, long timeout)
release_sock(sk);
saved_close(sk, timeout);
}
+
+static int sock_map_iter_attach_target(struct bpf_prog *prog,
+ union bpf_iter_link_info *linfo,
+ struct bpf_iter_aux_info *aux)
+{
+ struct bpf_map *map;
+ int err = -EINVAL;
+
+ if (!linfo->map.map_fd)
+ return -EBADF;
+
+ map = bpf_map_get_with_uref(linfo->map.map_fd);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ if (map->map_type != BPF_MAP_TYPE_SOCKMAP &&
+ map->map_type != BPF_MAP_TYPE_SOCKHASH)
+ goto put_map;
+
+ if (prog->aux->max_rdonly_access > map->key_size) {
+ err = -EACCES;
+ goto put_map;
+ }
+
+ aux->map = map;
+ return 0;
+
+put_map:
+ bpf_map_put_with_uref(map);
+ return err;
+}
+
+static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux)
+{
+ bpf_map_put_with_uref(aux->map);
+}
+
+static struct bpf_iter_reg sock_map_iter_reg = {
+ .target = "sockmap",
+ .attach_target = sock_map_iter_attach_target,
+ .detach_target = sock_map_iter_detach_target,
+ .show_fdinfo = bpf_iter_map_show_fdinfo,
+ .fill_link_info = bpf_iter_map_fill_link_info,
+ .ctx_arg_info_size = 2,
+ .ctx_arg_info = {
+ { offsetof(struct bpf_iter__sockmap, key),
+ PTR_TO_RDONLY_BUF_OR_NULL },
+ { offsetof(struct bpf_iter__sockmap, sk),
+ PTR_TO_BTF_ID_OR_NULL },
+ },
+};
+
+static int __init bpf_sockmap_iter_init(void)
+{
+ sock_map_iter_reg.ctx_arg_info[1].btf_id =
+ btf_sock_ids[BTF_SOCK_TYPE_SOCK];
+ return bpf_iter_reg_target(&sock_map_iter_reg);
+}
+late_initcall(bpf_sockmap_iter_init);
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 84dde5a2066e..16014ad19406 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1426,6 +1426,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
{
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
+ int prio;
int err;
if (!ops)
@@ -1475,6 +1476,13 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
struct dcbnl_buffer *buffer =
nla_data(ieee[DCB_ATTR_DCB_BUFFER]);
+ for (prio = 0; prio < ARRAY_SIZE(buffer->prio2buffer); prio++) {
+ if (buffer->prio2buffer[prio] >= DCBX_MAX_BUFFERS) {
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
err = ops->dcbnl_setbuffer(netdev, buffer);
if (err)
goto err;
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 1ce9ba8cf545..5c18c0214aac 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -330,11 +330,7 @@ EXPORT_SYMBOL_GPL(call_dsa_notifiers);
int dsa_devlink_param_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
- struct dsa_devlink_priv *dl_priv;
- struct dsa_switch *ds;
-
- dl_priv = devlink_priv(dl);
- ds = dl_priv->ds;
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
if (!ds->ops->devlink_param_get)
return -EOPNOTSUPP;
@@ -346,11 +342,7 @@ EXPORT_SYMBOL_GPL(dsa_devlink_param_get);
int dsa_devlink_param_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
- struct dsa_devlink_priv *dl_priv;
- struct dsa_switch *ds;
-
- dl_priv = devlink_priv(dl);
- ds = dl_priv->ds;
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
if (!ds->ops->devlink_param_set)
return -EOPNOTSUPP;
@@ -412,6 +404,22 @@ void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
}
EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_unregister);
+struct devlink_region *
+dsa_devlink_region_create(struct dsa_switch *ds,
+ const struct devlink_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size)
+{
+ return devlink_region_create(ds->devlink, ops, region_max_snapshots,
+ region_size);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_region_create);
+
+void dsa_devlink_region_destroy(struct devlink_region *region)
+{
+ devlink_region_destroy(region);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_region_destroy);
+
struct dsa_port *dsa_port_from_netdev(struct net_device *netdev)
{
if (!netdev || !dsa_slave_dev_check(netdev))
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index c0ffc7a2b65f..3cf67f5fe54a 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -21,9 +21,6 @@
static DEFINE_MUTEX(dsa2_mutex);
LIST_HEAD(dsa_tree_list);
-static const struct devlink_ops dsa_devlink_ops = {
-};
-
struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
{
struct dsa_switch_tree *dst;
@@ -382,6 +379,22 @@ static void dsa_port_teardown(struct dsa_port *dp)
dp->setup = false;
}
+static int dsa_devlink_info_get(struct devlink *dl,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+
+ if (ds->ops->devlink_info_get)
+ return ds->ops->devlink_info_get(ds, req, extack);
+
+ return -EOPNOTSUPP;
+}
+
+static const struct devlink_ops dsa_devlink_ops = {
+ .info_get = dsa_devlink_info_get,
+};
+
static int dsa_switch_setup(struct dsa_switch *ds)
{
struct dsa_devlink_priv *dl_priv;
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 2da656d984ef..0348dbab4131 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -7,6 +7,7 @@
#ifndef __DSA_PRIV_H
#define __DSA_PRIV_H
+#include <linux/if_bridge.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
#include <linux/netpoll.h>
@@ -194,6 +195,71 @@ dsa_slave_to_master(const struct net_device *dev)
return dp->cpu_dp->master;
}
+/* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
+ * frames as untagged, since the bridge will not untag them.
+ */
+static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
+{
+ struct dsa_port *dp = dsa_slave_to_port(skb->dev);
+ struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
+ struct net_device *br = dp->bridge_dev;
+ struct net_device *dev = skb->dev;
+ struct net_device *upper_dev;
+ struct list_head *iter;
+ u16 vid, pvid, proto;
+ int err;
+
+ if (!br || br_vlan_enabled(br))
+ return skb;
+
+ err = br_vlan_get_proto(br, &proto);
+ if (err)
+ return skb;
+
+ /* Move VLAN tag from data to hwaccel */
+ if (!skb_vlan_tag_present(skb) && hdr->h_vlan_proto == htons(proto)) {
+ skb = skb_vlan_untag(skb);
+ if (!skb)
+ return NULL;
+ }
+
+ if (!skb_vlan_tag_present(skb))
+ return skb;
+
+ vid = skb_vlan_tag_get_id(skb);
+
+ /* We already run under an RCU read-side critical section since
+ * we are called from netif_receive_skb_list_internal().
+ */
+ err = br_vlan_get_pvid_rcu(dev, &pvid);
+ if (err)
+ return skb;
+
+ if (vid != pvid)
+ return skb;
+
+ /* The sad part about attempting to untag from DSA is that we
+ * don't know, unless we check, if the skb will end up in
+ * the bridge's data path - br_allowed_ingress() - or not.
+ * For example, there might be an 8021q upper for the
+ * default_pvid of the bridge, which will steal VLAN-tagged traffic
+ * from the bridge's data path. This is a configuration that DSA
+ * supports because vlan_filtering is 0. In that case, we should
+ * definitely keep the tag, to make sure it keeps working.
+ */
+ netdev_for_each_upper_dev_rcu(dev, upper_dev, iter) {
+ if (!is_vlan_dev(upper_dev))
+ continue;
+
+ if (vid == vlan_dev_vlan_id(upper_dev))
+ return skb;
+ }
+
+ __vlan_hwaccel_clear_tag(skb);
+
+ return skb;
+}
+
/* switch.c */
int dsa_switch_register_notifier(struct dsa_switch *ds);
void dsa_switch_unregister_notifier(struct dsa_switch *ds);
diff --git a/net/dsa/port.c b/net/dsa/port.c
index 46c9bf709683..9a4fb80d2731 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -193,11 +193,44 @@ void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
}
+/* Must be called under rcu_read_lock() */
static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
bool vlan_filtering)
{
struct dsa_switch *ds = dp->ds;
- int i;
+ int err, i;
+
+ /* VLAN awareness was off, so the question is "can we turn it on".
+ * We may have had 8021q uppers, those need to go. Make sure we don't
+ * enter an inconsistent state: deny changing the VLAN awareness state
+ * as long as we have 8021q uppers.
+ */
+ if (vlan_filtering && dsa_is_user_port(ds, dp->index)) {
+ struct net_device *upper_dev, *slave = dp->slave;
+ struct net_device *br = dp->bridge_dev;
+ struct list_head *iter;
+
+ netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
+ struct bridge_vlan_info br_info;
+ u16 vid;
+
+ if (!is_vlan_dev(upper_dev))
+ continue;
+
+ vid = vlan_dev_vlan_id(upper_dev);
+
+ /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
+ * device, respectively the VID is not found, returning
+ * 0 means success, which is a failure for us here.
+ */
+ err = br_vlan_get_info(br, vid, &br_info);
+ if (err == 0) {
+ dev_err(ds->dev, "Must remove upper %s first\n",
+ upper_dev->name);
+ return false;
+ }
+ }
+ }
if (!ds->vlan_filtering_is_global)
return true;
@@ -232,15 +265,24 @@ int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
struct dsa_switch *ds = dp->ds;
int err;
- /* bridge skips -EOPNOTSUPP, so skip the prepare phase */
- if (switchdev_trans_ph_prepare(trans))
- return 0;
+ if (switchdev_trans_ph_prepare(trans)) {
+ bool apply;
- if (!ds->ops->port_vlan_filtering)
- return 0;
+ if (!ds->ops->port_vlan_filtering)
+ return -EOPNOTSUPP;
- if (!dsa_port_can_apply_vlan_filtering(dp, vlan_filtering))
- return -EINVAL;
+ /* We are called from dsa_slave_switchdev_blocking_event(),
+ * which is not under rcu_read_lock(), unlike
+ * dsa_slave_switchdev_event().
+ */
+ rcu_read_lock();
+ apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering);
+ rcu_read_unlock();
+ if (!apply)
+ return -EINVAL;
+
+ return 0;
+ }
if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
return 0;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 66a5268398a5..e7c1d62fde99 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -303,13 +303,36 @@ static int dsa_slave_port_attr_set(struct net_device *dev,
return ret;
}
+/* Must be called under rcu_read_lock() */
+static int
+dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct net_device *upper_dev;
+ struct list_head *iter;
+
+ netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
+ u16 vid;
+
+ if (!is_vlan_dev(upper_dev))
+ continue;
+
+ vid = vlan_dev_vlan_id(upper_dev);
+ if (vid >= vlan->vid_begin && vid <= vlan->vid_end)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
static int dsa_slave_vlan_add(struct net_device *dev,
const struct switchdev_obj *obj,
struct switchdev_trans *trans)
{
+ struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct switchdev_obj_port_vlan vlan;
- int err;
+ int vid, err;
if (obj->orig_dev != dev)
return -EOPNOTSUPP;
@@ -319,6 +342,17 @@ static int dsa_slave_vlan_add(struct net_device *dev,
vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
+ /* Deny adding a bridge VLAN when there is already an 802.1Q upper with
+ * the same VID.
+ */
+ if (trans->ph_prepare && br_vlan_enabled(dp->bridge_dev)) {
+ rcu_read_lock();
+ err = dsa_slave_vlan_check_for_8021q_uppers(dev, &vlan);
+ rcu_read_unlock();
+ if (err)
+ return err;
+ }
+
err = dsa_port_vlan_add(dp, &vlan, trans);
if (err)
return err;
@@ -333,6 +367,12 @@ static int dsa_slave_vlan_add(struct net_device *dev,
if (err)
return err;
+ for (vid = vlan.vid_begin; vid <= vlan.vid_end; vid++) {
+ err = vlan_vid_add(master, htons(ETH_P_8021Q), vid);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -376,7 +416,10 @@ static int dsa_slave_port_obj_add(struct net_device *dev,
static int dsa_slave_vlan_del(struct net_device *dev,
const struct switchdev_obj *obj)
{
+ struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct switchdev_obj_port_vlan *vlan;
+ int vid, err;
if (obj->orig_dev != dev)
return -EOPNOTSUPP;
@@ -384,10 +427,19 @@ static int dsa_slave_vlan_del(struct net_device *dev,
if (dsa_port_skip_vlan_configuration(dp))
return 0;
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+
/* Do not deprogram the CPU port as it may be shared with other user
* ports which can be members of this VLAN as well.
*/
- return dsa_port_vlan_del(dp, SWITCHDEV_OBJ_PORT_VLAN(obj));
+ err = dsa_port_vlan_del(dp, vlan);
+ if (err)
+ return err;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
+ vlan_vid_del(master, htons(ETH_P_8021Q), vid);
+
+ return 0;
}
static int dsa_slave_port_obj_del(struct net_device *dev,
@@ -1232,6 +1284,7 @@ static int dsa_slave_get_ts_info(struct net_device *dev,
static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
u16 vid)
{
+ struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct switchdev_obj_port_vlan vlan = {
.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
@@ -1240,26 +1293,9 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
/* This API only allows programming tagged, non-PVID VIDs */
.flags = 0,
};
- struct bridge_vlan_info info;
struct switchdev_trans trans;
int ret;
- /* Check for a possible bridge VLAN entry now since there is no
- * need to emulate the switchdev prepare + commit phase.
- */
- if (dp->bridge_dev) {
- if (dsa_port_skip_vlan_configuration(dp))
- return 0;
-
- /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
- * device, respectively the VID is not found, returning
- * 0 means success, which is a failure for us here.
- */
- ret = br_vlan_get_info(dp->bridge_dev, vid, &info);
- if (ret == 0)
- return -EBUSY;
- }
-
/* User port... */
trans.ph_prepare = true;
ret = dsa_port_vlan_add(dp, &vlan, &trans);
@@ -1282,12 +1318,13 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
if (ret)
return ret;
- return 0;
+ return vlan_vid_add(master, proto, vid);
}
static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
u16 vid)
{
+ struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct switchdev_obj_port_vlan vlan = {
.vid_begin = vid,
@@ -1295,29 +1332,18 @@ static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
/* This API only allows programming tagged, non-PVID VIDs */
.flags = 0,
};
- struct bridge_vlan_info info;
- int ret;
-
- /* Check for a possible bridge VLAN entry now since there is no
- * need to emulate the switchdev prepare + commit phase.
- */
- if (dp->bridge_dev) {
- if (dsa_port_skip_vlan_configuration(dp))
- return 0;
-
- /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
- * device, respectively the VID is not found, returning
- * 0 means success, which is a failure for us here.
- */
- ret = br_vlan_get_info(dp->bridge_dev, vid, &info);
- if (ret == 0)
- return -EBUSY;
- }
+ int err;
/* Do not deprogram the CPU port as it may be shared with other user
* ports which can be members of this VLAN as well.
*/
- return dsa_port_vlan_del(dp, &vlan);
+ err = dsa_port_vlan_del(dp, &vlan);
+ if (err)
+ return err;
+
+ vlan_vid_del(master, proto, vid);
+
+ return 0;
}
struct dsa_hw_port {
@@ -1828,15 +1854,27 @@ int dsa_slave_create(struct dsa_port *port)
dsa_slave_notify(slave_dev, DSA_PORT_REGISTER);
- ret = register_netdev(slave_dev);
+ rtnl_lock();
+
+ ret = register_netdevice(slave_dev);
if (ret) {
netdev_err(master, "error %d registering interface %s\n",
ret, slave_dev->name);
+ rtnl_unlock();
goto out_phy;
}
+ ret = netdev_upper_dev_link(master, slave_dev, NULL);
+
+ rtnl_unlock();
+
+ if (ret)
+ goto out_unregister;
+
return 0;
+out_unregister:
+ unregister_netdev(slave_dev);
out_phy:
rtnl_lock();
phylink_disconnect_phy(p->dp->pl);
@@ -1853,16 +1891,18 @@ out_free:
void dsa_slave_destroy(struct net_device *slave_dev)
{
+ struct net_device *master = dsa_slave_to_master(slave_dev);
struct dsa_port *dp = dsa_slave_to_port(slave_dev);
struct dsa_slave_priv *p = netdev_priv(slave_dev);
netif_carrier_off(slave_dev);
rtnl_lock();
+ netdev_upper_dev_unlink(master, slave_dev);
+ unregister_netdevice(slave_dev);
phylink_disconnect_phy(dp->pl);
rtnl_unlock();
dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
- unregister_netdev(slave_dev);
phylink_destroy(dp->pl);
gro_cells_destroy(&p->gcells);
free_percpu(p->stats64);
@@ -1895,9 +1935,9 @@ static int dsa_slave_changeupper(struct net_device *dev,
return err;
}
-static int dsa_slave_upper_vlan_check(struct net_device *dev,
- struct netdev_notifier_changeupper_info *
- info)
+static int
+dsa_prevent_bridging_8021q_upper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *ext_ack;
struct net_device *slave;
@@ -1927,14 +1967,56 @@ static int dsa_slave_upper_vlan_check(struct net_device *dev,
return NOTIFY_DONE;
}
+static int
+dsa_slave_check_8021q_upper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *br = dp->bridge_dev;
+ struct bridge_vlan_info br_info;
+ struct netlink_ext_ack *extack;
+ int err = NOTIFY_DONE;
+ u16 vid;
+
+ if (!br || !br_vlan_enabled(br))
+ return NOTIFY_DONE;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+ vid = vlan_dev_vlan_id(info->upper_dev);
+
+ /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
+ * device, respectively the VID is not found, returning
+ * 0 means success, which is a failure for us here.
+ */
+ err = br_vlan_get_info(br, vid, &br_info);
+ if (err == 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "This VLAN is already configured by the bridge");
+ return notifier_from_errno(-EBUSY);
+ }
+
+ return NOTIFY_DONE;
+}
+
static int dsa_slave_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- if (event == NETDEV_CHANGEUPPER) {
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER: {
+ struct netdev_notifier_changeupper_info *info = ptr;
+
+ if (!dsa_slave_dev_check(dev))
+ return dsa_prevent_bridging_8021q_upper(dev, ptr);
+
+ if (is_vlan_dev(info->upper_dev))
+ return dsa_slave_check_8021q_upper(dev, ptr);
+ break;
+ }
+ case NETDEV_CHANGEUPPER:
if (!dsa_slave_dev_check(dev))
- return dsa_slave_upper_vlan_check(dev, ptr);
+ return NOTIFY_DONE;
return dsa_slave_changeupper(dev, ptr);
}
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 86c8dc5c32a0..9afef6f0f9df 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -232,43 +232,6 @@ static int dsa_switch_mdb_del(struct dsa_switch *ds,
return 0;
}
-static int dsa_port_vlan_device_check(struct net_device *vlan_dev,
- int vlan_dev_vid,
- void *arg)
-{
- struct switchdev_obj_port_vlan *vlan = arg;
- u16 vid;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- if (vid == vlan_dev_vid)
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int dsa_port_vlan_check(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- const struct dsa_port *dp = dsa_to_port(ds, port);
- int err = 0;
-
- /* Device is not bridged, let it proceed with the VLAN device
- * creation.
- */
- if (!dp->bridge_dev)
- return err;
-
- /* dsa_slave_vlan_rx_{add,kill}_vid() cannot use the prepare phase and
- * already checks whether there is an overlapping bridge VLAN entry
- * with the same VID, so here we only need to check that if we are
- * adding a bridge VLAN entry there is not an overlapping VLAN device
- * claiming that VID.
- */
- return vlan_for_each(dp->slave, dsa_port_vlan_device_check,
- (void *)vlan);
-}
-
static bool dsa_switch_vlan_match(struct dsa_switch *ds, int port,
struct dsa_notifier_vlan_info *info)
{
@@ -291,10 +254,6 @@ static int dsa_switch_vlan_prepare(struct dsa_switch *ds,
for (port = 0; port < ds->num_ports; port++) {
if (dsa_switch_vlan_match(ds, port, info)) {
- err = dsa_port_vlan_check(ds, port, info->vlan);
- if (err)
- return err;
-
err = ds->ops->port_vlan_prepare(ds, port, info->vlan);
if (err)
return err;
diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
index 5baeb0893950..8e3e8a5b8559 100644
--- a/net/dsa/tag_8021q.c
+++ b/net/dsa/tag_8021q.c
@@ -215,7 +215,8 @@ static int dsa_8021q_setup_port(struct dsa_8021q_context *ctx, int port,
int upstream = dsa_upstream_port(ctx->ds, port);
u16 rx_vid = dsa_8021q_rx_vid(ctx->ds, port);
u16 tx_vid = dsa_8021q_tx_vid(ctx->ds, port);
- int i, err;
+ struct net_device *master;
+ int i, err, subvlan;
/* The CPU port is implicitly configured by
* configuring the front-panel ports
@@ -223,6 +224,8 @@ static int dsa_8021q_setup_port(struct dsa_8021q_context *ctx, int port,
if (!dsa_is_user_port(ctx->ds, port))
return 0;
+ master = dsa_to_port(ctx->ds, port)->cpu_dp->master;
+
/* Add this user port's RX VID to the membership list of all others
* (including itself). This is so that bridging will not be hindered.
* L2 forwarding rules still take precedence when there are no VLAN
@@ -261,6 +264,19 @@ static int dsa_8021q_setup_port(struct dsa_8021q_context *ctx, int port,
return err;
}
+ /* Add to the master's RX filter not only @rx_vid, but in fact
+ * the entire subvlan range, just in case this DSA switch might
+ * want to use sub-VLANs.
+ */
+ for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) {
+ u16 vid = dsa_8021q_rx_vid_subvlan(ctx->ds, port, subvlan);
+
+ if (enabled)
+ vlan_vid_add(master, ctx->proto, vid);
+ else
+ vlan_vid_del(master, ctx->proto, vid);
+ }
+
/* Finally apply the TX VID on this port and on the CPU port */
err = dsa_8021q_vid_apply(ctx, port, tx_vid, BRIDGE_VLAN_INFO_UNTAGGED,
enabled);
@@ -285,6 +301,8 @@ int dsa_8021q_setup(struct dsa_8021q_context *ctx, bool enabled)
{
int rc, port;
+ ASSERT_RTNL();
+
for (port = 0; port < ctx->ds->num_ports; port++) {
rc = dsa_8021q_setup_port(ctx, port, enabled);
if (rc < 0) {
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index cc8512b5f9e2..1dab212a294f 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -140,6 +140,11 @@ static struct sk_buff *brcm_tag_rcv_ll(struct sk_buff *skb,
/* Remove Broadcom tag and update checksum */
skb_pull_rcsum(skb, BRCM_TAG_LEN);
+ /* Set the MAC header to where it should point for
+ * dsa_untag_bridge_pvid() to parse the correct VLAN header.
+ */
+ skb_set_mac_header(skb, -ETH_HLEN);
+
skb->offload_fwd_mark = 1;
return skb;
@@ -191,7 +196,7 @@ static struct sk_buff *brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
nskb->data - ETH_HLEN - BRCM_TAG_LEN,
2 * ETH_ALEN);
- return nskb;
+ return dsa_untag_bridge_pvid(nskb);
}
static const struct dsa_device_ops brcm_netdev_ops = {
@@ -219,8 +224,14 @@ static struct sk_buff *brcm_tag_rcv_prepend(struct sk_buff *skb,
struct net_device *dev,
struct packet_type *pt)
{
+ struct sk_buff *nskb;
+
/* tag is prepended to the packet */
- return brcm_tag_rcv_ll(skb, dev, pt, ETH_HLEN);
+ nskb = brcm_tag_rcv_ll(skb, dev, pt, ETH_HLEN);
+ if (!nskb)
+ return nskb;
+
+ return dsa_untag_bridge_pvid(nskb);
}
static const struct dsa_device_ops brcm_prepend_netdev_ops = {
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
index 42f327c06dca..d1a7e224adff 100644
--- a/net/dsa/tag_ocelot.c
+++ b/net/dsa/tag_ocelot.c
@@ -137,6 +137,7 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct dsa_port *dp = dsa_slave_to_port(netdev);
+ struct sk_buff *clone = DSA_SKB_CB(skb)->clone;
struct dsa_switch *ds = dp->ds;
struct ocelot *ocelot = ds->priv;
struct ocelot_port *ocelot_port;
@@ -159,12 +160,14 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
qos_class = skb->priority;
packing(injection, &qos_class, 19, 17, OCELOT_TAG_LEN, PACK, 0);
- if (ocelot->ptp && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ /* TX timestamping was requested */
+ if (clone) {
rew_op = ocelot_port->ptp_cmd;
- if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
- rew_op |= (ocelot_port->ts_id % 4) << 3;
- ocelot_port->ts_id++;
- }
+ /* Retrieve timestamp ID populated inside skb->cb[0] of the
+ * clone by ocelot_port_add_txtstamp_skb
+ */
+ if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP)
+ rew_op |= clone->cb[0] << 3;
packing(injection, &rew_op, 125, 117, OCELOT_TAG_LEN, PACK, 0);
}
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index 9b4a4d719291..3710f9daa46d 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -72,14 +72,21 @@ static inline bool sja1105_is_meta_frame(const struct sk_buff *skb)
static bool sja1105_can_use_vlan_as_tags(const struct sk_buff *skb)
{
struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
+ u16 vlan_tci;
if (hdr->h_vlan_proto == htons(ETH_P_SJA1105))
return true;
- if (hdr->h_vlan_proto != htons(ETH_P_8021Q))
+ if (hdr->h_vlan_proto != htons(ETH_P_8021Q) &&
+ !skb_vlan_tag_present(skb))
return false;
- return vid_is_dsa_8021q(ntohs(hdr->h_vlan_TCI) & VLAN_VID_MASK);
+ if (skb_vlan_tag_present(skb))
+ vlan_tci = skb_vlan_tag_get(skb);
+ else
+ vlan_tci = ntohs(hdr->h_vlan_TCI);
+
+ return vid_is_dsa_8021q(vlan_tci & VLAN_VID_MASK);
}
/* This is the first time the tagger sees the frame on RX.
@@ -283,7 +290,8 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
hdr = eth_hdr(skb);
tpid = ntohs(hdr->h_proto);
- is_tagged = (tpid == ETH_P_SJA1105 || tpid == ETH_P_8021Q);
+ is_tagged = (tpid == ETH_P_SJA1105 || tpid == ETH_P_8021Q ||
+ skb_vlan_tag_present(skb));
is_link_local = sja1105_is_link_local(skb);
is_meta = sja1105_is_meta_frame(skb);
@@ -292,7 +300,12 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
if (is_tagged) {
/* Normal traffic path. */
skb_push_rcsum(skb, ETH_HLEN);
- __skb_vlan_pop(skb, &tci);
+ if (skb_vlan_tag_present(skb)) {
+ tci = skb_vlan_tag_get(skb);
+ __vlan_hwaccel_clear_tag(skb);
+ } else {
+ __skb_vlan_pop(skb, &tci);
+ }
skb_pull_rcsum(skb, ETH_HLEN);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index ed19573fccd7..24036e3055a1 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -192,6 +192,8 @@ const char link_mode_names[][ETH_GSTRING_LEN] = {
__DEFINE_LINK_MODE_NAME(400000, LR4_ER4_FR4, Full),
__DEFINE_LINK_MODE_NAME(400000, DR4, Full),
__DEFINE_LINK_MODE_NAME(400000, CR4, Full),
+ __DEFINE_LINK_MODE_NAME(100, FX, Half),
+ __DEFINE_LINK_MODE_NAME(100, FX, Full),
};
static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS);
diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c
index 7044a2853886..29dcd675b65a 100644
--- a/net/ethtool/linkmodes.c
+++ b/net/ethtool/linkmodes.c
@@ -272,6 +272,8 @@ static const struct link_mode_info link_mode_params[] = {
__DEFINE_LINK_MODE_PARAMS(400000, LR4_ER4_FR4, Full),
__DEFINE_LINK_MODE_PARAMS(400000, DR4, Full),
__DEFINE_LINK_MODE_PARAMS(400000, CR4, Full),
+ __DEFINE_LINK_MODE_PARAMS(100, FX, Half),
+ __DEFINE_LINK_MODE_PARAMS(100, FX, Full),
};
static const struct nla_policy
diff --git a/net/ethtool/pause.c b/net/ethtool/pause.c
index 7aea35d1e8a5..1980aa7eb2b6 100644
--- a/net/ethtool/pause.c
+++ b/net/ethtool/pause.c
@@ -10,6 +10,7 @@ struct pause_req_info {
struct pause_reply_data {
struct ethnl_reply_data base;
struct ethtool_pauseparam pauseparam;
+ struct ethtool_pause_stats pausestat;
};
#define PAUSE_REPDATA(__reply_base) \
@@ -22,8 +23,15 @@ pause_get_policy[ETHTOOL_A_PAUSE_MAX + 1] = {
[ETHTOOL_A_PAUSE_AUTONEG] = { .type = NLA_REJECT },
[ETHTOOL_A_PAUSE_RX] = { .type = NLA_REJECT },
[ETHTOOL_A_PAUSE_TX] = { .type = NLA_REJECT },
+ [ETHTOOL_A_PAUSE_STATS] = { .type = NLA_REJECT },
};
+static void ethtool_stats_init(u64 *stats, unsigned int n)
+{
+ while (n--)
+ stats[n] = ETHTOOL_STAT_NOT_SET;
+}
+
static int pause_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
struct genl_info *info)
@@ -34,10 +42,17 @@ static int pause_prepare_data(const struct ethnl_req_info *req_base,
if (!dev->ethtool_ops->get_pauseparam)
return -EOPNOTSUPP;
+
ret = ethnl_ops_begin(dev);
if (ret < 0)
return ret;
dev->ethtool_ops->get_pauseparam(dev, &data->pauseparam);
+ if (req_base->flags & ETHTOOL_FLAG_STATS &&
+ dev->ethtool_ops->get_pause_stats) {
+ ethtool_stats_init((u64 *)&data->pausestat,
+ sizeof(data->pausestat) / 8);
+ dev->ethtool_ops->get_pause_stats(dev, &data->pausestat);
+ }
ethnl_ops_complete(dev);
return 0;
@@ -46,9 +61,50 @@ static int pause_prepare_data(const struct ethnl_req_info *req_base,
static int pause_reply_size(const struct ethnl_req_info *req_base,
const struct ethnl_reply_data *reply_base)
{
- return nla_total_size(sizeof(u8)) + /* _PAUSE_AUTONEG */
+ int n = nla_total_size(sizeof(u8)) + /* _PAUSE_AUTONEG */
nla_total_size(sizeof(u8)) + /* _PAUSE_RX */
nla_total_size(sizeof(u8)); /* _PAUSE_TX */
+
+ if (req_base->flags & ETHTOOL_FLAG_STATS)
+ n += nla_total_size(0) + /* _PAUSE_STATS */
+ nla_total_size_64bit(sizeof(u64)) *
+ (ETHTOOL_A_PAUSE_STAT_MAX - 2);
+ return n;
+}
+
+static int ethtool_put_stat(struct sk_buff *skb, u64 val, u16 attrtype,
+ u16 padtype)
+{
+ if (val == ETHTOOL_STAT_NOT_SET)
+ return 0;
+ if (nla_put_u64_64bit(skb, attrtype, val, padtype))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int pause_put_stats(struct sk_buff *skb,
+ const struct ethtool_pause_stats *pause_stats)
+{
+ const u16 pad = ETHTOOL_A_PAUSE_STAT_PAD;
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, ETHTOOL_A_PAUSE_STATS);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (ethtool_put_stat(skb, pause_stats->tx_pause_frames,
+ ETHTOOL_A_PAUSE_STAT_TX_FRAMES, pad) ||
+ ethtool_put_stat(skb, pause_stats->rx_pause_frames,
+ ETHTOOL_A_PAUSE_STAT_RX_FRAMES, pad))
+ goto err_cancel;
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+err_cancel:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
}
static int pause_fill_reply(struct sk_buff *skb,
@@ -63,6 +119,10 @@ static int pause_fill_reply(struct sk_buff *skb,
nla_put_u8(skb, ETHTOOL_A_PAUSE_TX, !!pauseparam->tx_pause))
return -EMSGSIZE;
+ if (req_base->flags & ETHTOOL_FLAG_STATS &&
+ pause_put_stats(skb, &data->pausestat))
+ return -EMSGSIZE;
+
return 0;
}
@@ -89,6 +149,7 @@ pause_set_policy[ETHTOOL_A_PAUSE_MAX + 1] = {
[ETHTOOL_A_PAUSE_AUTONEG] = { .type = NLA_U8 },
[ETHTOOL_A_PAUSE_RX] = { .type = NLA_U8 },
[ETHTOOL_A_PAUSE_TX] = { .type = NLA_U8 },
+ [ETHTOOL_A_PAUSE_STATS] = { .type = NLA_REJECT },
};
int ethnl_set_pause(struct sk_buff *skb, struct genl_info *info)
diff --git a/net/ethtool/tunnels.c b/net/ethtool/tunnels.c
index 84f23289475b..d93bf2da0f34 100644
--- a/net/ethtool/tunnels.c
+++ b/net/ethtool/tunnels.c
@@ -200,7 +200,7 @@ int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info)
reply_len = ret + ethnl_reply_header_size();
rskb = ethnl_reply_init(reply_len, req_info.dev,
- ETHTOOL_MSG_TUNNEL_INFO_GET,
+ ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY,
ETHTOOL_A_TUNNEL_INFO_HEADER,
info, &reply_payload);
if (!rskb) {
@@ -273,7 +273,7 @@ int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
goto cont;
ehdr = ethnl_dump_put(skb, cb,
- ETHTOOL_MSG_TUNNEL_INFO_GET);
+ ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY);
if (!ehdr) {
ret = -EMSGSIZE;
goto out;
diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c
index 7e11a6c35bc3..4cfd9e829c7b 100644
--- a/net/hsr/hsr_debugfs.c
+++ b/net/hsr/hsr_debugfs.c
@@ -60,17 +60,7 @@ hsr_node_table_show(struct seq_file *sfp, void *data)
return 0;
}
-/* hsr_node_table_open - Open the node_table file
- *
- * Description:
- * This routine opens a debugfs file node_table of specific hsr
- * or prp device
- */
-static int
-hsr_node_table_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, hsr_node_table_show, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(hsr_node_table);
void hsr_debugfs_rename(struct net_device *dev)
{
@@ -85,13 +75,6 @@ void hsr_debugfs_rename(struct net_device *dev)
priv->node_tbl_root = d;
}
-static const struct file_operations hsr_fops = {
- .open = hsr_node_table_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
/* hsr_debugfs_init - create hsr node_table file for dumping
* the node table
*
@@ -113,7 +96,7 @@ void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
de = debugfs_create_file("node_table", S_IFREG | 0444,
priv->node_tbl_root, priv,
- &hsr_fops);
+ &hsr_node_table_fops);
if (IS_ERR(de)) {
pr_err("Cannot create hsr node_table file\n");
debugfs_remove(priv->node_tbl_root);
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
index 06c3cd988760..0e4681cf71db 100644
--- a/net/hsr/hsr_netlink.c
+++ b/net/hsr/hsr_netlink.c
@@ -76,7 +76,7 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
proto = nla_get_u8(data[IFLA_HSR_PROTOCOL]);
if (proto >= HSR_PROTOCOL_MAX) {
- NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol");
return -EINVAL;
}
@@ -84,14 +84,14 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
proto_version = HSR_V0;
} else {
if (proto == HSR_PROTOCOL_PRP) {
- NL_SET_ERR_MSG_MOD(extack, "PRP version unsupported\n");
+ NL_SET_ERR_MSG_MOD(extack, "PRP version unsupported");
return -EINVAL;
}
proto_version = nla_get_u8(data[IFLA_HSR_VERSION]);
if (proto_version > HSR_V1) {
NL_SET_ERR_MSG_MOD(extack,
- "Only HSR version 0/1 supported\n");
+ "Only HSR version 0/1 supported");
return -EINVAL;
}
}
diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
index e3939f76b024..74a2ef598c31 100644
--- a/net/ipv4/bpf_tcp_ca.c
+++ b/net/ipv4/bpf_tcp_ca.c
@@ -28,23 +28,18 @@ static u32 unsupported_ops[] = {
static const struct btf_type *tcp_sock_type;
static u32 tcp_sock_id, sock_id;
-static int btf_sk_storage_get_ids[5];
static struct bpf_func_proto btf_sk_storage_get_proto __read_mostly;
-
-static int btf_sk_storage_delete_ids[5];
static struct bpf_func_proto btf_sk_storage_delete_proto __read_mostly;
-static void convert_sk_func_proto(struct bpf_func_proto *to, int *to_btf_ids,
- const struct bpf_func_proto *from)
+static void convert_sk_func_proto(struct bpf_func_proto *to, const struct bpf_func_proto *from)
{
int i;
*to = *from;
- to->btf_id = to_btf_ids;
for (i = 0; i < ARRAY_SIZE(to->arg_type); i++) {
if (to->arg_type[i] == ARG_PTR_TO_SOCKET) {
to->arg_type[i] = ARG_PTR_TO_BTF_ID;
- to->btf_id[i] = tcp_sock_id;
+ to->arg_btf_id[i] = &tcp_sock_id;
}
}
}
@@ -64,12 +59,8 @@ static int bpf_tcp_ca_init(struct btf *btf)
tcp_sock_id = type_id;
tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
- convert_sk_func_proto(&btf_sk_storage_get_proto,
- btf_sk_storage_get_ids,
- &bpf_sk_storage_get_proto);
- convert_sk_func_proto(&btf_sk_storage_delete_proto,
- btf_sk_storage_delete_ids,
- &bpf_sk_storage_delete_proto);
+ convert_sk_func_proto(&btf_sk_storage_get_proto, &bpf_sk_storage_get_proto);
+ convert_sk_func_proto(&btf_sk_storage_delete_proto, &bpf_sk_storage_delete_proto);
return 0;
}
@@ -185,8 +176,8 @@ static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
/* In case we want to report error later */
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &tcp_sock_id,
.arg2_type = ARG_ANYTHING,
- .btf_id = &tcp_sock_id,
};
static const struct bpf_func_proto *
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 41079490a118..86a23e4a6a50 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -362,6 +362,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
fl4.flowi4_tun_key.tun_id = 0;
fl4.flowi4_flags = 0;
fl4.flowi4_uid = sock_net_uid(net, NULL);
+ fl4.flowi4_multipath_hash = 0;
no_addr = idev->ifa_list == NULL;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 93816d47e55a..366a4507b5a3 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -203,8 +203,8 @@ errout:
}
EXPORT_SYMBOL_GPL(inet_diag_msg_attrs_fill);
-static void inet_diag_parse_attrs(const struct nlmsghdr *nlh, int hdrlen,
- struct nlattr **req_nlas)
+static int inet_diag_parse_attrs(const struct nlmsghdr *nlh, int hdrlen,
+ struct nlattr **req_nlas)
{
struct nlattr *nla;
int remaining;
@@ -212,9 +212,13 @@ static void inet_diag_parse_attrs(const struct nlmsghdr *nlh, int hdrlen,
nlmsg_for_each_attr(nla, nlh, hdrlen, remaining) {
int type = nla_type(nla);
+ if (type == INET_DIAG_REQ_PROTOCOL && nla_len(nla) != sizeof(u32))
+ return -EINVAL;
+
if (type < __INET_DIAG_REQ_MAX)
req_nlas[type] = nla;
}
+ return 0;
}
static int inet_diag_get_protocol(const struct inet_diag_req_v2 *req,
@@ -591,7 +595,10 @@ static int inet_diag_cmd_exact(int cmd, struct sk_buff *in_skb,
int err, protocol;
memset(&dump_data, 0, sizeof(dump_data));
- inet_diag_parse_attrs(nlh, hdrlen, dump_data.req_nlas);
+ err = inet_diag_parse_attrs(nlh, hdrlen, dump_data.req_nlas);
+ if (err)
+ return err;
+
protocol = inet_diag_get_protocol(req, &dump_data);
handler = inet_diag_lock_handler(protocol);
@@ -1197,8 +1204,11 @@ static int __inet_diag_dump_start(struct netlink_callback *cb, int hdrlen)
if (!cb_data)
return -ENOMEM;
- inet_diag_parse_attrs(nlh, hdrlen, cb_data->req_nlas);
-
+ err = inet_diag_parse_attrs(nlh, hdrlen, cb_data->req_nlas);
+ if (err) {
+ kfree(cb_data);
+ return err;
+ }
nla = cb_data->inet_diag_nla_bc;
if (nla) {
err = inet_diag_bc_audit(nla, skb);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 5fb536ff51f0..8b200252c655 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -74,6 +74,7 @@
#include <net/icmp.h>
#include <net/checksum.h>
#include <net/inetpeer.h>
+#include <net/inet_ecn.h>
#include <net/lwtunnel.h>
#include <linux/bpf-cgroup.h>
#include <linux/igmp.h>
@@ -1704,7 +1705,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
if (IS_ERR(rt))
return;
- inet_sk(sk)->tos = arg->tos;
+ inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK;
sk->sk_protocol = ip_hdr(skb)->protocol;
sk->sk_bound_dev_if = arg->bound_dev_if;
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 75c6013ff9a4..b2ea1a8c5fd6 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -554,6 +554,7 @@ static int ip_tun_parse_opts_vxlan(struct nlattr *attr,
attr = tb[LWTUNNEL_IP_OPT_VXLAN_GBP];
md->gbp = nla_get_u32(attr);
+ md->gbp &= VXLAN_GBP_MASK;
info->key.tun_flags |= TUNNEL_VXLAN_OPT;
}
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index bf9d4cd2d6e5..8c0f17c6863c 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -42,8 +42,8 @@ static int call_nexthop_notifiers(struct net *net,
{
int err;
- err = atomic_notifier_call_chain(&net->nexthop.notifier_chain,
- event_type, nh);
+ err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
+ event_type, nh);
return notifier_to_errno(err);
}
@@ -870,8 +870,6 @@ static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)
bool do_flush = false;
struct fib_info *fi;
- call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh);
-
list_for_each_entry(fi, &nh->fi_list, nh_list) {
fi->fib_flags |= RTNH_F_DEAD;
do_flush = true;
@@ -909,6 +907,8 @@ static void __remove_nexthop(struct net *net, struct nexthop *nh,
static void remove_nexthop(struct net *net, struct nexthop *nh,
struct nl_info *nlinfo)
{
+ call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh);
+
/* remove from the tree */
rb_erase(&nh->rb_node, &net->nexthop.rb_root);
@@ -1959,14 +1959,15 @@ static struct notifier_block nh_netdev_notifier = {
int register_nexthop_notifier(struct net *net, struct notifier_block *nb)
{
- return atomic_notifier_chain_register(&net->nexthop.notifier_chain, nb);
+ return blocking_notifier_chain_register(&net->nexthop.notifier_chain,
+ nb);
}
EXPORT_SYMBOL(register_nexthop_notifier);
int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
{
- return atomic_notifier_chain_unregister(&net->nexthop.notifier_chain,
- nb);
+ return blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
+ nb);
}
EXPORT_SYMBOL(unregister_nexthop_notifier);
@@ -1986,7 +1987,7 @@ static int __net_init nexthop_net_init(struct net *net)
net->nexthop.devhash = kzalloc(sz, GFP_KERNEL);
if (!net->nexthop.devhash)
return -ENOMEM;
- ATOMIC_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain);
+ BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain);
return 0;
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 2c05b863ae43..d15a78b26dfa 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -786,8 +786,10 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
neigh_event_send(n, NULL);
} else {
if (fib_lookup(net, fl4, &res, 0) == 0) {
- struct fib_nh_common *nhc = FIB_RES_NHC(res);
+ struct fib_nh_common *nhc;
+ fib_select_path(net, &res, fl4, skb);
+ nhc = FIB_RES_NHC(res);
update_or_create_fnhe(nhc, fl4->daddr, new_gw,
0, false,
jiffies + ip_rt_gc_timeout);
@@ -1013,6 +1015,7 @@ out: kfree_skb(skb);
static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
{
struct dst_entry *dst = &rt->dst;
+ struct net *net = dev_net(dst->dev);
struct fib_result res;
bool lock = false;
u32 old_mtu;
@@ -1034,9 +1037,11 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
return;
rcu_read_lock();
- if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
- struct fib_nh_common *nhc = FIB_RES_NHC(res);
+ if (fib_lookup(net, fl4, &res, 0) == 0) {
+ struct fib_nh_common *nhc;
+ fib_select_path(net, &res, fl4, NULL);
+ nhc = FIB_RES_NHC(res);
update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
jiffies + ip_rt_mtu_expires);
}
@@ -2148,6 +2153,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
fl4.daddr = daddr;
fl4.saddr = saddr;
fl4.flowi4_uid = sock_net_uid(net, NULL);
+ fl4.flowi4_multipath_hash = 0;
if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
flkeys = &_flkeys;
@@ -2668,8 +2674,6 @@ struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
fib_select_path(net, res, fl4, skb);
dev_out = FIB_RES_DEV(*res);
- fl4->flowi4_oif = dev_out->ifindex;
-
make_route:
rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 57a568875539..2a8bfa89a515 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -564,7 +564,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
mask |= EPOLLIN | EPOLLRDNORM;
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
- if (sk_stream_is_writeable(sk)) {
+ if (__sk_stream_is_writeable(sk, 1)) {
mask |= EPOLLOUT | EPOLLWRNORM;
} else { /* send SIGIO later */
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -576,7 +576,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
* pairs with the input side.
*/
smp_mb__after_atomic();
- if (sk_stream_is_writeable(sk))
+ if (__sk_stream_is_writeable(sk, 1))
mask |= EPOLLOUT | EPOLLWRNORM;
}
} else
@@ -1004,12 +1004,12 @@ ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
!tcp_skb_can_collapse_to(skb)) {
new_segment:
if (!sk_stream_memory_free(sk))
- goto wait_for_sndbuf;
+ goto wait_for_space;
skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
tcp_rtx_and_write_queues_empty(sk));
if (!skb)
- goto wait_for_memory;
+ goto wait_for_space;
#ifdef CONFIG_TLS_DEVICE
skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
@@ -1028,7 +1028,7 @@ new_segment:
goto new_segment;
}
if (!sk_wmem_schedule(sk, copy))
- goto wait_for_memory;
+ goto wait_for_space;
if (can_coalesce) {
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
@@ -1069,9 +1069,8 @@ new_segment:
tcp_push_one(sk, mss_now);
continue;
-wait_for_sndbuf:
+wait_for_space:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-wait_for_memory:
tcp_push(sk, flags & ~MSG_MORE, mss_now,
TCP_NAGLE_PUSH, size_goal);
@@ -1282,7 +1281,7 @@ restart:
new_segment:
if (!sk_stream_memory_free(sk))
- goto wait_for_sndbuf;
+ goto wait_for_space;
if (unlikely(process_backlog >= 16)) {
process_backlog = 0;
@@ -1293,7 +1292,7 @@ new_segment:
skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
first_skb);
if (!skb)
- goto wait_for_memory;
+ goto wait_for_space;
process_backlog++;
skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1326,7 +1325,7 @@ new_segment:
struct page_frag *pfrag = sk_page_frag(sk);
if (!sk_page_frag_refill(sk, pfrag))
- goto wait_for_memory;
+ goto wait_for_space;
if (!skb_can_coalesce(skb, i, pfrag->page,
pfrag->offset)) {
@@ -1340,7 +1339,7 @@ new_segment:
copy = min_t(int, copy, pfrag->size - pfrag->offset);
if (!sk_wmem_schedule(sk, copy))
- goto wait_for_memory;
+ goto wait_for_space;
err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
pfrag->page,
@@ -1393,9 +1392,8 @@ new_segment:
tcp_push_one(sk, mss_now);
continue;
-wait_for_sndbuf:
+wait_for_space:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-wait_for_memory:
if (copied)
tcp_push(sk, flags & ~MSG_MORE, mss_now,
TCP_NAGLE_PUSH, size_goal);
@@ -1527,7 +1525,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
* calculation of whether or not we must ACK for the sake of
* a window update.
*/
-static void tcp_cleanup_rbuf(struct sock *sk, int copied)
+void tcp_cleanup_rbuf(struct sock *sk, int copied)
{
struct tcp_sock *tp = tcp_sk(sk);
bool time_to_ack = false;
@@ -2698,6 +2696,7 @@ int tcp_disconnect(struct sock *sk, int flags)
if (icsk->icsk_ca_ops->release)
icsk->icsk_ca_ops->release(sk);
memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
+ icsk->icsk_ca_initialized = 0;
tcp_set_ca_state(sk, TCP_CA_Open);
tp->is_sack_reneg = 0;
tcp_clear_retrans(tp);
@@ -3049,7 +3048,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, int optname,
name[val] = 0;
lock_sock(sk);
- err = tcp_set_congestion_control(sk, name, true, true,
+ err = tcp_set_congestion_control(sk, name, true,
ns_capable(sock_net(sk)->user_ns,
CAP_NET_ADMIN));
release_sock(sk);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 62878cf26d9c..db47ac24d057 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -176,7 +176,7 @@ void tcp_assign_congestion_control(struct sock *sk)
void tcp_init_congestion_control(struct sock *sk)
{
- const struct inet_connection_sock *icsk = inet_csk(sk);
+ struct inet_connection_sock *icsk = inet_csk(sk);
tcp_sk(sk)->prior_ssthresh = 0;
if (icsk->icsk_ca_ops->init)
@@ -185,6 +185,7 @@ void tcp_init_congestion_control(struct sock *sk)
INET_ECN_xmit(sk);
else
INET_ECN_dontxmit(sk);
+ icsk->icsk_ca_initialized = 1;
}
static void tcp_reinit_congestion_control(struct sock *sk,
@@ -340,7 +341,7 @@ out:
* already initialized.
*/
int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
- bool reinit, bool cap_net_admin)
+ bool cap_net_admin)
{
struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_congestion_ops *ca;
@@ -361,28 +362,14 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
goto out;
}
- if (!ca) {
+ if (!ca)
err = -ENOENT;
- } else if (!load) {
- const struct tcp_congestion_ops *old_ca = icsk->icsk_ca_ops;
-
- if (bpf_try_module_get(ca, ca->owner)) {
- if (reinit) {
- tcp_reinit_congestion_control(sk, ca);
- } else {
- icsk->icsk_ca_ops = ca;
- bpf_module_put(old_ca, old_ca->owner);
- }
- } else {
- err = -EBUSY;
- }
- } else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || cap_net_admin)) {
+ else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || cap_net_admin))
err = -EPERM;
- } else if (!bpf_try_module_get(ca, ca->owner)) {
+ else if (!bpf_try_module_get(ca, ca->owner))
err = -EBUSY;
- } else {
+ else
tcp_reinit_congestion_control(sk, ca);
- }
out:
rcu_read_unlock();
return err;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3658ad84f0c6..748980862a09 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4908,7 +4908,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
int eaten;
if (sk_is_mptcp(sk))
- mptcp_incoming_options(sk, skb, &tp->rx_opt);
+ mptcp_incoming_options(sk, skb);
if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
__kfree_skb(skb);
@@ -5332,12 +5332,6 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk)
return true;
}
-/* When incoming ACK allowed to free some skb from write_queue,
- * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket
- * on the exit from tcp input handler.
- *
- * PROBLEM: sndbuf expansion does not work well with largesend.
- */
static void tcp_new_space(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -5352,16 +5346,13 @@ static void tcp_new_space(struct sock *sk)
static void tcp_check_space(struct sock *sk)
{
- if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
- sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
- /* pairs with tcp_poll() */
- smp_mb();
- if (sk->sk_socket &&
- test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
- tcp_new_space(sk);
- if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
- tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
- }
+ /* pairs with tcp_poll() */
+ smp_mb();
+ if (sk->sk_socket &&
+ test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
+ tcp_new_space(sk);
+ if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
+ tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
}
}
@@ -5894,8 +5885,10 @@ void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb)
tp->snd_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
tp->snd_cwnd_stamp = tcp_jiffies32;
+ icsk->icsk_ca_initialized = 0;
bpf_skops_established(sk, bpf_op, skb);
- tcp_init_congestion_control(sk);
+ if (!icsk->icsk_ca_initialized)
+ tcp_init_congestion_control(sk);
tcp_init_buffer_space(sk);
}
@@ -6496,7 +6489,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
case TCP_LAST_ACK:
if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
if (sk_is_mptcp(sk))
- mptcp_incoming_options(sk, skb, &tp->rx_opt);
+ mptcp_incoming_options(sk, skb);
break;
}
fallthrough;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index ab79d36ed07f..386978dcd318 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1682,7 +1682,6 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
skb->truesize -= delta_truesize;
sk_wmem_queued_add(sk, -delta_truesize);
sk_mem_uncharge(sk, delta_truesize);
- sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
}
/* Any change of skb->len requires recalculation of tso factor. */
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 76bff79d6fed..747f56e0c636 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -303,6 +303,7 @@ config IPV6_SEG6_LWTUNNEL
config IPV6_SEG6_HMAC
bool "IPv6: Segment Routing HMAC support"
depends on IPV6
+ select CRYPTO
select CRYPTO_HMAC
select CRYPTO_SHA1
select CRYPTO_SHA256
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 44d68ed70f24..141c0a4c569a 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1997,14 +1997,19 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
/* Need to own table->tb6_lock */
int fib6_del(struct fib6_info *rt, struct nl_info *info)
{
- struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node,
- lockdep_is_held(&rt->fib6_table->tb6_lock));
- struct fib6_table *table = rt->fib6_table;
struct net *net = info->nl_net;
struct fib6_info __rcu **rtp;
struct fib6_info __rcu **rtp_next;
+ struct fib6_table *table;
+ struct fib6_node *fn;
+
+ if (rt == net->ipv6.fib6_null_entry)
+ return -ENOENT;
- if (!fn || rt == net->ipv6.fib6_null_entry)
+ table = rt->fib6_table;
+ fn = rcu_dereference_protected(rt->fib6_node,
+ lockdep_is_held(&table->tb6_lock));
+ if (!fn)
return -ENOENT;
WARN_ON(!(fn->fn_flags & RTN_RTINFO));
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index c78e67d7747f..a2a65e327f49 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1492,7 +1492,7 @@ emsgsize:
* Otherwise, we need to reserve fragment header and
* fragment alignment (= 8-15 octects, in total).
*
- * Note that we may need to "move" the data from the tail of
+ * Note that we may need to "move" the data from the tail
* of the buffer to the new fragment when we split
* the message.
*
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index e8ee20720fe0..bde6d48a9942 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -4202,7 +4202,7 @@ static struct fib6_info *rt6_add_route_info(struct net *net,
.fc_nlinfo.nl_net = net,
};
- cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
+ cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
cfg.fc_dst = *prefix;
cfg.fc_gateway = *gwaddr;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 862058dce6d0..8db59f4e5f13 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -458,7 +458,7 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
case TCP_SYN_SENT:
case TCP_SYN_RECV:
/* Only in fast or simultaneous open. If a fast open socket is
- * is already accepted it is treated as a connected one below.
+ * already accepted it is treated as a connected one below.
*/
if (fastopen && !fastopen->sk)
break;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 7de05be4fc33..7be5103ff2a8 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1006,7 +1006,7 @@ static int l2tp_xmit_queue(struct l2tp_tunnel *tunnel, struct sk_buff *skb, stru
return err >= 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
}
-static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb)
+static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, unsigned int *len)
{
struct l2tp_tunnel *tunnel = session->tunnel;
unsigned int data_len = skb->len;
@@ -1054,6 +1054,11 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb)
goto out_unlock;
}
+ /* Report transmitted length before we add encap header, which keeps
+ * statistics consistent for both UDP and IP encap tx/rx paths.
+ */
+ *len = skb->len;
+
inet = inet_sk(sk);
switch (tunnel->encap) {
case L2TP_ENCAPTYPE_UDP:
@@ -1095,10 +1100,10 @@ out_unlock:
*/
int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb)
{
- unsigned int len = skb->len;
+ unsigned int len = 0;
int ret;
- ret = l2tp_xmit_core(session, skb);
+ ret = l2tp_xmit_core(session, skb, &len);
if (ret == NET_XMIT_SUCCESS) {
atomic_long_inc(&session->tunnel->stats.tx_packets);
atomic_long_add(len, &session->tunnel->stats.tx_bytes);
diff --git a/net/mac80211/airtime.c b/net/mac80211/airtime.c
index 314973033d03..26d2f8ba7029 100644
--- a/net/mac80211/airtime.c
+++ b/net/mac80211/airtime.c
@@ -560,7 +560,9 @@ static int ieee80211_fill_rx_status(struct ieee80211_rx_status *stat,
if (rate->idx < 0 || !rate->count)
return -1;
- if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+ stat->bw = RATE_INFO_BW_160;
+ else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
stat->bw = RATE_INFO_BW_80;
else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
stat->bw = RATE_INFO_BW_40;
@@ -668,20 +670,26 @@ u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw,
* This will not be very accurate, but much better than simply
* assuming un-aggregated tx in all cases.
*/
- if (duration > 400) /* <= VHT20 MCS2 1S */
+ if (duration > 400 * 1024) /* <= VHT20 MCS2 1S */
agg_shift = 1;
- else if (duration > 250) /* <= VHT20 MCS3 1S or MCS1 2S */
+ else if (duration > 250 * 1024) /* <= VHT20 MCS3 1S or MCS1 2S */
agg_shift = 2;
- else if (duration > 150) /* <= VHT20 MCS5 1S or MCS3 2S */
+ else if (duration > 150 * 1024) /* <= VHT20 MCS5 1S or MCS2 2S */
agg_shift = 3;
- else
+ else if (duration > 70 * 1024) /* <= VHT20 MCS5 2S */
agg_shift = 4;
+ else if (stat.encoding != RX_ENC_HE ||
+ duration > 20 * 1024) /* <= HE40 MCS6 2S */
+ agg_shift = 5;
+ else
+ agg_shift = 6;
duration *= len;
duration /= AVG_PKT_SIZE;
duration /= 1024;
+ duration += (overhead >> agg_shift);
- return duration + (overhead >> agg_shift);
+ return max_t(u32, duration, 4);
}
if (!conf)
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index b4e39e31a985..8d75a4045d6e 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -837,6 +837,59 @@ static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
return 0;
}
+static int ieee80211_set_fils_discovery(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_fils_discovery *params)
+{
+ struct fils_discovery_data *new, *old = NULL;
+ struct ieee80211_fils_discovery *fd;
+
+ if (!params->tmpl || !params->tmpl_len)
+ return -EINVAL;
+
+ fd = &sdata->vif.bss_conf.fils_discovery;
+ fd->min_interval = params->min_interval;
+ fd->max_interval = params->max_interval;
+
+ old = sdata_dereference(sdata->u.ap.fils_discovery, sdata);
+ new = kzalloc(sizeof(*new) + params->tmpl_len, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+ new->len = params->tmpl_len;
+ memcpy(new->data, params->tmpl, params->tmpl_len);
+ rcu_assign_pointer(sdata->u.ap.fils_discovery, new);
+
+ if (old)
+ kfree_rcu(old, rcu_head);
+
+ return 0;
+}
+
+static int
+ieee80211_set_unsol_bcast_probe_resp(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_unsol_bcast_probe_resp *params)
+{
+ struct unsol_bcast_probe_resp_data *new, *old = NULL;
+
+ if (!params->tmpl || !params->tmpl_len)
+ return -EINVAL;
+
+ old = sdata_dereference(sdata->u.ap.unsol_bcast_probe_resp, sdata);
+ new = kzalloc(sizeof(*new) + params->tmpl_len, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+ new->len = params->tmpl_len;
+ memcpy(new->data, params->tmpl, params->tmpl_len);
+ rcu_assign_pointer(sdata->u.ap.unsol_bcast_probe_resp, new);
+
+ if (old)
+ kfree_rcu(old, rcu_head);
+
+ sdata->vif.bss_conf.unsol_bcast_probe_resp_interval =
+ params->interval;
+
+ return 0;
+}
+
static int ieee80211_set_ftm_responder_params(
struct ieee80211_sub_if_data *sdata,
const u8 *lci, size_t lci_len,
@@ -1099,12 +1152,26 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
}
err = ieee80211_assign_beacon(sdata, &params->beacon, NULL);
- if (err < 0) {
- ieee80211_vif_release_channel(sdata);
- return err;
- }
+ if (err < 0)
+ goto error;
changed |= err;
+ if (params->fils_discovery.max_interval) {
+ err = ieee80211_set_fils_discovery(sdata,
+ &params->fils_discovery);
+ if (err < 0)
+ goto error;
+ changed |= BSS_CHANGED_FILS_DISCOVERY;
+ }
+
+ if (params->unsol_bcast_probe_resp.interval) {
+ err = ieee80211_set_unsol_bcast_probe_resp(sdata,
+ &params->unsol_bcast_probe_resp);
+ if (err < 0)
+ goto error;
+ changed |= BSS_CHANGED_UNSOL_BCAST_PROBE_RESP;
+ }
+
err = drv_start_ap(sdata->local, sdata);
if (err) {
old = sdata_dereference(sdata->u.ap.beacon, sdata);
@@ -1112,8 +1179,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (old)
kfree_rcu(old, rcu_head);
RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
- ieee80211_vif_release_channel(sdata);
- return err;
+ goto error;
}
ieee80211_recalc_dtim(local, sdata);
@@ -1124,6 +1190,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
netif_carrier_on(vlan->dev);
return 0;
+
+error:
+ ieee80211_vif_release_channel(sdata);
+ return err;
}
static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
@@ -1160,6 +1230,8 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
struct ieee80211_local *local = sdata->local;
struct beacon_data *old_beacon;
struct probe_resp *old_probe_resp;
+ struct fils_discovery_data *old_fils_discovery;
+ struct unsol_bcast_probe_resp_data *old_unsol_bcast_probe_resp;
struct cfg80211_chan_def chandef;
sdata_assert_lock(sdata);
@@ -1168,6 +1240,11 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
if (!old_beacon)
return -ENOENT;
old_probe_resp = sdata_dereference(sdata->u.ap.probe_resp, sdata);
+ old_fils_discovery = sdata_dereference(sdata->u.ap.fils_discovery,
+ sdata);
+ old_unsol_bcast_probe_resp =
+ sdata_dereference(sdata->u.ap.unsol_bcast_probe_resp,
+ sdata);
/* abort any running channel switch */
mutex_lock(&local->mtx);
@@ -1191,9 +1268,15 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
/* remove beacon and probe response */
RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL);
+ RCU_INIT_POINTER(sdata->u.ap.fils_discovery, NULL);
+ RCU_INIT_POINTER(sdata->u.ap.unsol_bcast_probe_resp, NULL);
kfree_rcu(old_beacon, rcu_head);
if (old_probe_resp)
kfree_rcu(old_probe_resp, rcu_head);
+ if (old_fils_discovery)
+ kfree_rcu(old_fils_discovery, rcu_head);
+ if (old_unsol_bcast_probe_resp)
+ kfree_rcu(old_unsol_bcast_probe_resp, rcu_head);
kfree(sdata->vif.bss_conf.ftmr_params);
sdata->vif.bss_conf.ftmr_params = NULL;
@@ -1696,6 +1779,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
rcu_assign_pointer(vlansdata->u.vlan.sta, sta);
__ieee80211_check_fast_rx_iface(vlansdata);
+ drv_sta_set_4addr(local, sta->sdata, &sta->sta, true);
}
if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 54080290d6e2..90470392fdaa 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -408,6 +408,7 @@ static const char *hw_flag_names[] = {
FLAG(SUPPORTS_MULTI_BSSID),
FLAG(SUPPORTS_ONLY_HE_MULTI_BSSID),
FLAG(AMPDU_KEYBORDER_SUPPORT),
+ FLAG(SUPPORTS_TX_ENCAP_OFFLOAD),
#undef FLAG
};
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 41d495d73d3a..bcdfd19a596b 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1384,4 +1384,33 @@ static inline int drv_reset_tid_config(struct ieee80211_local *local,
return ret;
}
+
+static inline void drv_update_vif_offload(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
+{
+ might_sleep();
+ check_sdata_in_driver(sdata);
+
+ if (!local->ops->update_vif_offload)
+ return;
+
+ trace_drv_update_vif_offload(local, sdata);
+ local->ops->update_vif_offload(&local->hw, &sdata->vif);
+ trace_drv_return_void(local);
+}
+
+static inline void drv_sta_set_4addr(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta, bool enabled)
+{
+ sdata = get_bss_sdata(sdata);
+ if (!check_sdata_in_driver(sdata))
+ return;
+
+ trace_drv_sta_set_4addr(local, sdata, sta, enabled);
+ if (local->ops->sta_set_4addr)
+ local->ops->sta_set_4addr(&local->hw, &sdata->vif, sta, enabled);
+ trace_drv_return_void(local);
+}
+
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 6bf879660a93..ecd9229012bf 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -271,6 +271,18 @@ struct probe_resp {
u8 data[];
};
+struct fils_discovery_data {
+ struct rcu_head rcu_head;
+ int len;
+ u8 data[];
+};
+
+struct unsol_bcast_probe_resp_data {
+ struct rcu_head rcu_head;
+ int len;
+ u8 data[];
+};
+
struct ps_data {
/* yes, this looks ugly, but guarantees that we can later use
* bitmap_empty :)
@@ -286,6 +298,8 @@ struct ps_data {
struct ieee80211_if_ap {
struct beacon_data __rcu *beacon;
struct probe_resp __rcu *probe_resp;
+ struct fils_discovery_data __rcu *fils_discovery;
+ struct unsol_bcast_probe_resp_data __rcu *unsol_bcast_probe_resp;
/* to be used after channel switch. */
struct cfg80211_beacon_data *next_beacon;
@@ -989,8 +1003,6 @@ struct ieee80211_sub_if_data {
} debugfs;
#endif
- bool hw_80211_encap;
-
/* must be last, dynamically sized area in this! */
struct ieee80211_vif vif;
};
@@ -1767,6 +1779,7 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local);
bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata,
bool update_bss);
+void ieee80211_recalc_offload(struct ieee80211_local *local);
static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
{
@@ -2040,8 +2053,6 @@ void ieee80211_dynamic_ps_timer(struct timer_list *t);
void ieee80211_send_nullfunc(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
bool powersave);
-void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_hdr *hdr);
void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
struct ieee80211_hdr *hdr, bool ack, u16 tx_time);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 9740ae8fa697..7ac9af66f545 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -348,439 +348,6 @@ static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata,
return 0;
}
-void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
- const int offset)
-{
- struct ieee80211_local *local = sdata->local;
- u32 flags = sdata->u.mntr.flags;
-
-#define ADJUST(_f, _s) do { \
- if (flags & MONITOR_FLAG_##_f) \
- local->fif_##_s += offset; \
- } while (0)
-
- ADJUST(FCSFAIL, fcsfail);
- ADJUST(PLCPFAIL, plcpfail);
- ADJUST(CONTROL, control);
- ADJUST(CONTROL, pspoll);
- ADJUST(OTHER_BSS, other_bss);
-
-#undef ADJUST
-}
-
-static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)
-{
- struct ieee80211_local *local = sdata->local;
- int i;
-
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
- sdata->vif.hw_queue[i] = IEEE80211_INVAL_HW_QUEUE;
- else if (local->hw.queues >= IEEE80211_NUM_ACS)
- sdata->vif.hw_queue[i] = i;
- else
- sdata->vif.hw_queue[i] = 0;
- }
- sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE;
-}
-
-int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
-{
- struct ieee80211_sub_if_data *sdata;
- int ret;
-
- if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
- return 0;
-
- ASSERT_RTNL();
-
- if (local->monitor_sdata)
- return 0;
-
- sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL);
- if (!sdata)
- return -ENOMEM;
-
- /* set up data */
- sdata->local = local;
- sdata->vif.type = NL80211_IFTYPE_MONITOR;
- snprintf(sdata->name, IFNAMSIZ, "%s-monitor",
- wiphy_name(local->hw.wiphy));
- sdata->wdev.iftype = NL80211_IFTYPE_MONITOR;
-
- sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
-
- ieee80211_set_default_queues(sdata);
-
- ret = drv_add_interface(local, sdata);
- if (WARN_ON(ret)) {
- /* ok .. stupid driver, it asked for this! */
- kfree(sdata);
- return ret;
- }
-
- ret = ieee80211_check_queues(sdata, NL80211_IFTYPE_MONITOR);
- if (ret) {
- kfree(sdata);
- return ret;
- }
-
- mutex_lock(&local->iflist_mtx);
- rcu_assign_pointer(local->monitor_sdata, sdata);
- mutex_unlock(&local->iflist_mtx);
-
- mutex_lock(&local->mtx);
- ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef,
- IEEE80211_CHANCTX_EXCLUSIVE);
- mutex_unlock(&local->mtx);
- if (ret) {
- mutex_lock(&local->iflist_mtx);
- RCU_INIT_POINTER(local->monitor_sdata, NULL);
- mutex_unlock(&local->iflist_mtx);
- synchronize_net();
- drv_remove_interface(local, sdata);
- kfree(sdata);
- return ret;
- }
-
- skb_queue_head_init(&sdata->skb_queue);
- INIT_WORK(&sdata->work, ieee80211_iface_work);
-
- return 0;
-}
-
-void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
-{
- struct ieee80211_sub_if_data *sdata;
-
- if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
- return;
-
- ASSERT_RTNL();
-
- mutex_lock(&local->iflist_mtx);
-
- sdata = rcu_dereference_protected(local->monitor_sdata,
- lockdep_is_held(&local->iflist_mtx));
- if (!sdata) {
- mutex_unlock(&local->iflist_mtx);
- return;
- }
-
- RCU_INIT_POINTER(local->monitor_sdata, NULL);
- mutex_unlock(&local->iflist_mtx);
-
- synchronize_net();
-
- mutex_lock(&local->mtx);
- ieee80211_vif_release_channel(sdata);
- mutex_unlock(&local->mtx);
-
- drv_remove_interface(local, sdata);
-
- kfree(sdata);
-}
-
-/*
- * NOTE: Be very careful when changing this function, it must NOT return
- * an error on interface type changes that have been pre-checked, so most
- * checks should be in ieee80211_check_concurrent_iface.
- */
-int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
-{
- struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
- struct net_device *dev = wdev->netdev;
- struct ieee80211_local *local = sdata->local;
- struct sta_info *sta;
- u32 changed = 0;
- int res;
- u32 hw_reconf_flags = 0;
-
- switch (sdata->vif.type) {
- case NL80211_IFTYPE_WDS:
- if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
- return -ENOLINK;
- break;
- case NL80211_IFTYPE_AP_VLAN: {
- struct ieee80211_sub_if_data *master;
-
- if (!sdata->bss)
- return -ENOLINK;
-
- mutex_lock(&local->mtx);
- list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
- mutex_unlock(&local->mtx);
-
- master = container_of(sdata->bss,
- struct ieee80211_sub_if_data, u.ap);
- sdata->control_port_protocol =
- master->control_port_protocol;
- sdata->control_port_no_encrypt =
- master->control_port_no_encrypt;
- sdata->control_port_over_nl80211 =
- master->control_port_over_nl80211;
- sdata->control_port_no_preauth =
- master->control_port_no_preauth;
- sdata->vif.cab_queue = master->vif.cab_queue;
- memcpy(sdata->vif.hw_queue, master->vif.hw_queue,
- sizeof(sdata->vif.hw_queue));
- sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef;
-
- mutex_lock(&local->key_mtx);
- sdata->crypto_tx_tailroom_needed_cnt +=
- master->crypto_tx_tailroom_needed_cnt;
- mutex_unlock(&local->key_mtx);
-
- break;
- }
- case NL80211_IFTYPE_AP:
- sdata->bss = &sdata->u.ap;
- break;
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_MONITOR:
- case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_P2P_DEVICE:
- case NL80211_IFTYPE_OCB:
- case NL80211_IFTYPE_NAN:
- /* no special treatment */
- break;
- case NL80211_IFTYPE_UNSPECIFIED:
- case NUM_NL80211_IFTYPES:
- case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_P2P_GO:
- /* cannot happen */
- WARN_ON(1);
- break;
- }
-
- if (local->open_count == 0) {
- res = drv_start(local);
- if (res)
- goto err_del_bss;
- /* we're brought up, everything changes */
- hw_reconf_flags = ~0;
- ieee80211_led_radio(local, true);
- ieee80211_mod_tpt_led_trig(local,
- IEEE80211_TPT_LEDTRIG_FL_RADIO, 0);
- }
-
- /*
- * Copy the hopefully now-present MAC address to
- * this interface, if it has the special null one.
- */
- if (dev && is_zero_ether_addr(dev->dev_addr)) {
- memcpy(dev->dev_addr,
- local->hw.wiphy->perm_addr,
- ETH_ALEN);
- memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
-
- if (!is_valid_ether_addr(dev->dev_addr)) {
- res = -EADDRNOTAVAIL;
- goto err_stop;
- }
- }
-
- switch (sdata->vif.type) {
- case NL80211_IFTYPE_AP_VLAN:
- /* no need to tell driver, but set carrier and chanctx */
- if (rtnl_dereference(sdata->bss->beacon)) {
- ieee80211_vif_vlan_copy_chanctx(sdata);
- netif_carrier_on(dev);
- } else {
- netif_carrier_off(dev);
- }
- break;
- case NL80211_IFTYPE_MONITOR:
- if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) {
- local->cooked_mntrs++;
- break;
- }
-
- if (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
- res = drv_add_interface(local, sdata);
- if (res)
- goto err_stop;
- } else if (local->monitors == 0 && local->open_count == 0) {
- res = ieee80211_add_virtual_monitor(local);
- if (res)
- goto err_stop;
- }
-
- /* must be before the call to ieee80211_configure_filter */
- local->monitors++;
- if (local->monitors == 1) {
- local->hw.conf.flags |= IEEE80211_CONF_MONITOR;
- hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR;
- }
-
- ieee80211_adjust_monitor_flags(sdata, 1);
- ieee80211_configure_filter(local);
- mutex_lock(&local->mtx);
- ieee80211_recalc_idle(local);
- mutex_unlock(&local->mtx);
-
- netif_carrier_on(dev);
- break;
- default:
- if (coming_up) {
- ieee80211_del_virtual_monitor(local);
-
- res = drv_add_interface(local, sdata);
- if (res)
- goto err_stop;
- res = ieee80211_check_queues(sdata,
- ieee80211_vif_type_p2p(&sdata->vif));
- if (res)
- goto err_del_interface;
- }
-
- if (sdata->vif.type == NL80211_IFTYPE_AP) {
- local->fif_pspoll++;
- local->fif_probe_req++;
-
- ieee80211_configure_filter(local);
- } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
- local->fif_probe_req++;
- }
-
- if (sdata->vif.probe_req_reg)
- drv_config_iface_filter(local, sdata,
- FIF_PROBE_REQ,
- FIF_PROBE_REQ);
-
- if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
- sdata->vif.type != NL80211_IFTYPE_NAN)
- changed |= ieee80211_reset_erp_info(sdata);
- ieee80211_bss_info_change_notify(sdata, changed);
-
- switch (sdata->vif.type) {
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_OCB:
- netif_carrier_off(dev);
- break;
- case NL80211_IFTYPE_WDS:
- case NL80211_IFTYPE_P2P_DEVICE:
- case NL80211_IFTYPE_NAN:
- break;
- default:
- /* not reached */
- WARN_ON(1);
- }
-
- /*
- * Set default queue parameters so drivers don't
- * need to initialise the hardware if the hardware
- * doesn't start up with sane defaults.
- * Enable QoS for anything but station interfaces.
- */
- ieee80211_set_wmm_default(sdata, true,
- sdata->vif.type != NL80211_IFTYPE_STATION);
- }
-
- set_bit(SDATA_STATE_RUNNING, &sdata->state);
-
- switch (sdata->vif.type) {
- case NL80211_IFTYPE_WDS:
- /* Create STA entry for the WDS peer */
- sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
- GFP_KERNEL);
- if (!sta) {
- res = -ENOMEM;
- goto err_del_interface;
- }
-
- sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
- sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
- sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
-
- res = sta_info_insert(sta);
- if (res) {
- /* STA has been freed */
- goto err_del_interface;
- }
-
- rate_control_rate_init(sta);
- netif_carrier_on(dev);
- break;
- case NL80211_IFTYPE_P2P_DEVICE:
- rcu_assign_pointer(local->p2p_sdata, sdata);
- break;
- case NL80211_IFTYPE_MONITOR:
- if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)
- break;
- list_add_tail_rcu(&sdata->u.mntr.list, &local->mon_list);
- break;
- default:
- break;
- }
-
- /*
- * set_multicast_list will be invoked by the networking core
- * which will check whether any increments here were done in
- * error and sync them down to the hardware as filter flags.
- */
- if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
- atomic_inc(&local->iff_allmultis);
-
- if (coming_up)
- local->open_count++;
-
- if (hw_reconf_flags)
- ieee80211_hw_config(local, hw_reconf_flags);
-
- ieee80211_recalc_ps(local);
-
- if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
- local->ops->wake_tx_queue) {
- /* XXX: for AP_VLAN, actually track AP queues */
- if (dev)
- netif_tx_start_all_queues(dev);
- } else if (dev) {
- unsigned long flags;
- int n_acs = IEEE80211_NUM_ACS;
- int ac;
-
- if (local->hw.queues < IEEE80211_NUM_ACS)
- n_acs = 1;
-
- spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
- if (sdata->vif.cab_queue == IEEE80211_INVAL_HW_QUEUE ||
- (local->queue_stop_reasons[sdata->vif.cab_queue] == 0 &&
- skb_queue_empty(&local->pending[sdata->vif.cab_queue]))) {
- for (ac = 0; ac < n_acs; ac++) {
- int ac_queue = sdata->vif.hw_queue[ac];
-
- if (local->queue_stop_reasons[ac_queue] == 0 &&
- skb_queue_empty(&local->pending[ac_queue]))
- netif_start_subqueue(dev, ac);
- }
- }
- spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
- }
-
- return 0;
- err_del_interface:
- drv_remove_interface(local, sdata);
- err_stop:
- if (!local->open_count)
- drv_stop(local);
- err_del_bss:
- sdata->bss = NULL;
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
- mutex_lock(&local->mtx);
- list_del(&sdata->u.vlan.list);
- mutex_unlock(&local->mtx);
- }
- /* might already be clear but that doesn't matter */
- clear_bit(SDATA_STATE_RUNNING, &sdata->state);
- return res;
-}
-
static int ieee80211_open(struct net_device *dev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -1227,60 +794,547 @@ static const struct net_device_ops ieee80211_dataif_8023_ops = {
.ndo_get_stats64 = ieee80211_get_stats64,
};
-static void __ieee80211_set_hw_80211_encap(struct ieee80211_sub_if_data *sdata,
- bool enable)
+static bool ieee80211_iftype_supports_encap_offload(enum nl80211_iftype iftype)
{
- sdata->dev->netdev_ops = enable ? &ieee80211_dataif_8023_ops :
- &ieee80211_dataif_ops;
- sdata->hw_80211_encap = enable;
+ switch (iftype) {
+ /* P2P GO and client are mapped to AP/STATION types */
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_STATION:
+ return true;
+ default:
+ return false;
+ }
}
-bool ieee80211_set_hw_80211_encap(struct ieee80211_vif *vif, bool enable)
+static bool ieee80211_set_sdata_offload_flags(struct ieee80211_sub_if_data *sdata)
{
- struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
- struct ieee80211_sub_if_data *iter;
- struct ieee80211_key *key;
+ u32 flags;
+
+ flags = sdata->vif.offload_flags;
+
+ if (ieee80211_hw_check(&local->hw, SUPPORTS_TX_ENCAP_OFFLOAD) &&
+ ieee80211_iftype_supports_encap_offload(sdata->vif.type)) {
+ flags |= IEEE80211_OFFLOAD_ENCAP_ENABLED;
+
+ if (!ieee80211_hw_check(&local->hw, SUPPORTS_TX_FRAG) &&
+ local->hw.wiphy->frag_threshold != (u32)-1)
+ flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
+
+ if (local->monitors)
+ flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
+ } else {
+ flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
+ }
+
+ if (sdata->vif.offload_flags == flags)
+ return false;
+
+ sdata->vif.offload_flags = flags;
+ return true;
+}
+
+static void ieee80211_set_vif_encap_ops(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_sub_if_data *bss = sdata;
+ bool enabled;
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+ if (!sdata->bss)
+ return;
+
+ bss = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
+ }
+
+ if (!ieee80211_hw_check(&local->hw, SUPPORTS_TX_ENCAP_OFFLOAD) ||
+ !ieee80211_iftype_supports_encap_offload(bss->vif.type))
+ return;
+
+ enabled = bss->vif.offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED;
+ if (sdata->wdev.use_4addr &&
+ !(bss->vif.offload_flags & IEEE80211_OFFLOAD_ENCAP_4ADDR))
+ enabled = false;
+
+ sdata->dev->netdev_ops = enabled ? &ieee80211_dataif_8023_ops :
+ &ieee80211_dataif_ops;
+}
+
+static void ieee80211_recalc_sdata_offload(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_sub_if_data *vsdata;
+
+ if (ieee80211_set_sdata_offload_flags(sdata)) {
+ drv_update_vif_offload(local, sdata);
+ ieee80211_set_vif_encap_ops(sdata);
+ }
+
+ list_for_each_entry(vsdata, &local->interfaces, list) {
+ if (vsdata->vif.type != NL80211_IFTYPE_AP_VLAN ||
+ vsdata->bss != &sdata->u.ap)
+ continue;
+
+ ieee80211_set_vif_encap_ops(vsdata);
+ }
+}
+
+void ieee80211_recalc_offload(struct ieee80211_local *local)
+{
+ struct ieee80211_sub_if_data *sdata;
+
+ if (!ieee80211_hw_check(&local->hw, SUPPORTS_TX_ENCAP_OFFLOAD))
+ return;
mutex_lock(&local->iflist_mtx);
- list_for_each_entry(iter, &local->interfaces, list) {
- struct ieee80211_sub_if_data *disable = NULL;
-
- if (vif->type == NL80211_IFTYPE_MONITOR) {
- disable = iter;
- __ieee80211_set_hw_80211_encap(iter, false);
- } else if (iter->vif.type == NL80211_IFTYPE_MONITOR) {
- disable = sdata;
- enable = false;
- }
- if (disable)
- sdata_dbg(disable,
- "disable hw 80211 encap due to mon co-exist\n");
+
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(sdata))
+ continue;
+
+ ieee80211_recalc_sdata_offload(sdata);
}
+
mutex_unlock(&local->iflist_mtx);
+}
- if (enable == sdata->hw_80211_encap)
- return enable;
+void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
+ const int offset)
+{
+ struct ieee80211_local *local = sdata->local;
+ u32 flags = sdata->u.mntr.flags;
- if (!sdata->dev)
- return false;
+#define ADJUST(_f, _s) do { \
+ if (flags & MONITOR_FLAG_##_f) \
+ local->fif_##_s += offset; \
+ } while (0)
+
+ ADJUST(FCSFAIL, fcsfail);
+ ADJUST(PLCPFAIL, plcpfail);
+ ADJUST(CONTROL, control);
+ ADJUST(CONTROL, pspoll);
+ ADJUST(OTHER_BSS, other_bss);
+
+#undef ADJUST
+}
+
+static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ int i;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
+ sdata->vif.hw_queue[i] = IEEE80211_INVAL_HW_QUEUE;
+ else if (local->hw.queues >= IEEE80211_NUM_ACS)
+ sdata->vif.hw_queue[i] = i;
+ else
+ sdata->vif.hw_queue[i] = 0;
+ }
+ sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE;
+}
+
+int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
+{
+ struct ieee80211_sub_if_data *sdata;
+ int ret;
+
+ if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ return 0;
+
+ ASSERT_RTNL();
+
+ if (local->monitor_sdata)
+ return 0;
+
+ sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL);
+ if (!sdata)
+ return -ENOMEM;
+
+ /* set up data */
+ sdata->local = local;
+ sdata->vif.type = NL80211_IFTYPE_MONITOR;
+ snprintf(sdata->name, IFNAMSIZ, "%s-monitor",
+ wiphy_name(local->hw.wiphy));
+ sdata->wdev.iftype = NL80211_IFTYPE_MONITOR;
+
+ sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
+
+ ieee80211_set_default_queues(sdata);
+
+ ret = drv_add_interface(local, sdata);
+ if (WARN_ON(ret)) {
+ /* ok .. stupid driver, it asked for this! */
+ kfree(sdata);
+ return ret;
+ }
+
+ ret = ieee80211_check_queues(sdata, NL80211_IFTYPE_MONITOR);
+ if (ret) {
+ kfree(sdata);
+ return ret;
+ }
+
+ mutex_lock(&local->iflist_mtx);
+ rcu_assign_pointer(local->monitor_sdata, sdata);
+ mutex_unlock(&local->iflist_mtx);
+
+ mutex_lock(&local->mtx);
+ ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef,
+ IEEE80211_CHANCTX_EXCLUSIVE);
+ mutex_unlock(&local->mtx);
+ if (ret) {
+ mutex_lock(&local->iflist_mtx);
+ RCU_INIT_POINTER(local->monitor_sdata, NULL);
+ mutex_unlock(&local->iflist_mtx);
+ synchronize_net();
+ drv_remove_interface(local, sdata);
+ kfree(sdata);
+ return ret;
+ }
+
+ skb_queue_head_init(&sdata->skb_queue);
+ INIT_WORK(&sdata->work, ieee80211_iface_work);
+
+ return 0;
+}
+
+void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
+{
+ struct ieee80211_sub_if_data *sdata;
+
+ if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ return;
+
+ ASSERT_RTNL();
+
+ mutex_lock(&local->iflist_mtx);
+
+ sdata = rcu_dereference_protected(local->monitor_sdata,
+ lockdep_is_held(&local->iflist_mtx));
+ if (!sdata) {
+ mutex_unlock(&local->iflist_mtx);
+ return;
+ }
+
+ RCU_INIT_POINTER(local->monitor_sdata, NULL);
+ mutex_unlock(&local->iflist_mtx);
+
+ synchronize_net();
+
+ mutex_lock(&local->mtx);
+ ieee80211_vif_release_channel(sdata);
+ mutex_unlock(&local->mtx);
+
+ drv_remove_interface(local, sdata);
+
+ kfree(sdata);
+}
+
+/*
+ * NOTE: Be very careful when changing this function, it must NOT return
+ * an error on interface type changes that have been pre-checked, so most
+ * checks should be in ieee80211_check_concurrent_iface.
+ */
+int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ struct net_device *dev = wdev->netdev;
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+ u32 changed = 0;
+ int res;
+ u32 hw_reconf_flags = 0;
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_WDS:
+ if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
+ return -ENOLINK;
+ break;
+ case NL80211_IFTYPE_AP_VLAN: {
+ struct ieee80211_sub_if_data *master;
+
+ if (!sdata->bss)
+ return -ENOLINK;
+
+ mutex_lock(&local->mtx);
+ list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
+ mutex_unlock(&local->mtx);
+
+ master = container_of(sdata->bss,
+ struct ieee80211_sub_if_data, u.ap);
+ sdata->control_port_protocol =
+ master->control_port_protocol;
+ sdata->control_port_no_encrypt =
+ master->control_port_no_encrypt;
+ sdata->control_port_over_nl80211 =
+ master->control_port_over_nl80211;
+ sdata->control_port_no_preauth =
+ master->control_port_no_preauth;
+ sdata->vif.cab_queue = master->vif.cab_queue;
+ memcpy(sdata->vif.hw_queue, master->vif.hw_queue,
+ sizeof(sdata->vif.hw_queue));
+ sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef;
+
+ mutex_lock(&local->key_mtx);
+ sdata->crypto_tx_tailroom_needed_cnt +=
+ master->crypto_tx_tailroom_needed_cnt;
+ mutex_unlock(&local->key_mtx);
+
+ break;
+ }
+ case NL80211_IFTYPE_AP:
+ sdata->bss = &sdata->u.ap;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_P2P_DEVICE:
+ case NL80211_IFTYPE_OCB:
+ case NL80211_IFTYPE_NAN:
+ /* no special treatment */
+ break;
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NUM_NL80211_IFTYPES:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ /* cannot happen */
+ WARN_ON(1);
+ break;
+ }
+
+ if (local->open_count == 0) {
+ res = drv_start(local);
+ if (res)
+ goto err_del_bss;
+ /* we're brought up, everything changes */
+ hw_reconf_flags = ~0;
+ ieee80211_led_radio(local, true);
+ ieee80211_mod_tpt_led_trig(local,
+ IEEE80211_TPT_LEDTRIG_FL_RADIO, 0);
+ }
+
+ /*
+ * Copy the hopefully now-present MAC address to
+ * this interface, if it has the special null one.
+ */
+ if (dev && is_zero_ether_addr(dev->dev_addr)) {
+ memcpy(dev->dev_addr,
+ local->hw.wiphy->perm_addr,
+ ETH_ALEN);
+ memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ res = -EADDRNOTAVAIL;
+ goto err_stop;
+ }
+ }
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP_VLAN:
+ /* no need to tell driver, but set carrier and chanctx */
+ if (rtnl_dereference(sdata->bss->beacon)) {
+ ieee80211_vif_vlan_copy_chanctx(sdata);
+ netif_carrier_on(dev);
+ ieee80211_set_vif_encap_ops(sdata);
+ } else {
+ netif_carrier_off(dev);
+ }
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) {
+ local->cooked_mntrs++;
+ break;
+ }
+
+ if (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
+ res = drv_add_interface(local, sdata);
+ if (res)
+ goto err_stop;
+ } else if (local->monitors == 0 && local->open_count == 0) {
+ res = ieee80211_add_virtual_monitor(local);
+ if (res)
+ goto err_stop;
+ }
+
+ /* must be before the call to ieee80211_configure_filter */
+ local->monitors++;
+ if (local->monitors == 1) {
+ local->hw.conf.flags |= IEEE80211_CONF_MONITOR;
+ hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR;
+ }
+
+ ieee80211_adjust_monitor_flags(sdata, 1);
+ ieee80211_configure_filter(local);
+ ieee80211_recalc_offload(local);
+ mutex_lock(&local->mtx);
+ ieee80211_recalc_idle(local);
+ mutex_unlock(&local->mtx);
+
+ netif_carrier_on(dev);
+ break;
+ default:
+ if (coming_up) {
+ ieee80211_del_virtual_monitor(local);
+ ieee80211_set_sdata_offload_flags(sdata);
+
+ res = drv_add_interface(local, sdata);
+ if (res)
+ goto err_stop;
+
+ ieee80211_set_vif_encap_ops(sdata);
+ res = ieee80211_check_queues(sdata,
+ ieee80211_vif_type_p2p(&sdata->vif));
+ if (res)
+ goto err_del_interface;
+ }
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP) {
+ local->fif_pspoll++;
+ local->fif_probe_req++;
+
+ ieee80211_configure_filter(local);
+ } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
+ local->fif_probe_req++;
+ }
+
+ if (sdata->vif.probe_req_reg)
+ drv_config_iface_filter(local, sdata,
+ FIF_PROBE_REQ,
+ FIF_PROBE_REQ);
+
+ if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
+ sdata->vif.type != NL80211_IFTYPE_NAN)
+ changed |= ieee80211_reset_erp_info(sdata);
+ ieee80211_bss_info_change_notify(sdata, changed);
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_OCB:
+ netif_carrier_off(dev);
+ break;
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_P2P_DEVICE:
+ case NL80211_IFTYPE_NAN:
+ break;
+ default:
+ /* not reached */
+ WARN_ON(1);
+ }
- if (!ieee80211_hw_check(&local->hw, SUPPORTS_TX_FRAG) &&
- (local->hw.wiphy->frag_threshold != (u32)-1))
- enable = false;
+ /*
+ * Set default queue parameters so drivers don't
+ * need to initialise the hardware if the hardware
+ * doesn't start up with sane defaults.
+ * Enable QoS for anything but station interfaces.
+ */
+ ieee80211_set_wmm_default(sdata, true,
+ sdata->vif.type != NL80211_IFTYPE_STATION);
+ }
- mutex_lock(&sdata->local->key_mtx);
- list_for_each_entry(key, &sdata->key_list, list) {
- if (key->conf.cipher == WLAN_CIPHER_SUITE_TKIP)
- enable = false;
+ set_bit(SDATA_STATE_RUNNING, &sdata->state);
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_WDS:
+ /* Create STA entry for the WDS peer */
+ sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
+ GFP_KERNEL);
+ if (!sta) {
+ res = -ENOMEM;
+ goto err_del_interface;
+ }
+
+ sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
+ sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
+ sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
+
+ res = sta_info_insert(sta);
+ if (res) {
+ /* STA has been freed */
+ goto err_del_interface;
+ }
+
+ rate_control_rate_init(sta);
+ netif_carrier_on(dev);
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ rcu_assign_pointer(local->p2p_sdata, sdata);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)
+ break;
+ list_add_tail_rcu(&sdata->u.mntr.list, &local->mon_list);
+ break;
+ default:
+ break;
}
- mutex_unlock(&sdata->local->key_mtx);
- __ieee80211_set_hw_80211_encap(sdata, enable);
+ /*
+ * set_multicast_list will be invoked by the networking core
+ * which will check whether any increments here were done in
+ * error and sync them down to the hardware as filter flags.
+ */
+ if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
+ atomic_inc(&local->iff_allmultis);
+
+ if (coming_up)
+ local->open_count++;
- return enable;
+ if (hw_reconf_flags)
+ ieee80211_hw_config(local, hw_reconf_flags);
+
+ ieee80211_recalc_ps(local);
+
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ local->ops->wake_tx_queue) {
+ /* XXX: for AP_VLAN, actually track AP queues */
+ if (dev)
+ netif_tx_start_all_queues(dev);
+ } else if (dev) {
+ unsigned long flags;
+ int n_acs = IEEE80211_NUM_ACS;
+ int ac;
+
+ if (local->hw.queues < IEEE80211_NUM_ACS)
+ n_acs = 1;
+
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+ if (sdata->vif.cab_queue == IEEE80211_INVAL_HW_QUEUE ||
+ (local->queue_stop_reasons[sdata->vif.cab_queue] == 0 &&
+ skb_queue_empty(&local->pending[sdata->vif.cab_queue]))) {
+ for (ac = 0; ac < n_acs; ac++) {
+ int ac_queue = sdata->vif.hw_queue[ac];
+
+ if (local->queue_stop_reasons[ac_queue] == 0 &&
+ skb_queue_empty(&local->pending[ac_queue]))
+ netif_start_subqueue(dev, ac);
+ }
+ }
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ }
+
+ return 0;
+ err_del_interface:
+ drv_remove_interface(local, sdata);
+ err_stop:
+ if (!local->open_count)
+ drv_stop(local);
+ err_del_bss:
+ sdata->bss = NULL;
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+ mutex_lock(&local->mtx);
+ list_del(&sdata->u.vlan.list);
+ mutex_unlock(&local->mtx);
+ }
+ /* might already be clear but that doesn't matter */
+ clear_bit(SDATA_STATE_RUNNING, &sdata->state);
+ return res;
}
-EXPORT_SYMBOL(ieee80211_set_hw_80211_encap);
static void ieee80211_if_free(struct net_device *dev)
{
@@ -1484,7 +1538,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.txpower = INT_MIN; /* unset */
sdata->noack_map = 0;
- sdata->hw_80211_encap = false;
/* only monitor/p2p-device differ */
if (sdata->dev) {
@@ -1619,6 +1672,7 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
ieee80211_teardown_sdata(sdata);
+ ieee80211_set_sdata_offload_flags(sdata);
ret = drv_change_interface(local, sdata, internal_type, p2p);
if (ret)
type = ieee80211_vif_type_p2p(&sdata->vif);
@@ -1631,6 +1685,7 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
ieee80211_check_queues(sdata, type);
ieee80211_setup_sdata(sdata, type);
+ ieee80211_set_vif_encap_ops(sdata);
err = ieee80211_do_open(&sdata->wdev, false);
WARN(err, "type change: do_open returned %d", err);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 2df636c32432..8c5f829ff6d7 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -177,13 +177,6 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
}
}
- /* TKIP countermeasures don't work in encap offload mode */
- if (key->conf.cipher == WLAN_CIPHER_SUITE_TKIP &&
- sdata->hw_80211_encap) {
- sdata_dbg(sdata, "TKIP is not allowed in hw 80211 encap mode\n");
- return -EINVAL;
- }
-
ret = drv_set_key(key->local, SET_KEY, sdata,
sta ? &sta->sta : NULL, &key->conf);
@@ -219,14 +212,6 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
case WLAN_CIPHER_SUITE_CCMP_256:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
- /* We cannot do software crypto of data frames with
- * encapsulation offload enabled. However for 802.11w to
- * function properly we need cmac/gmac keys.
- */
- if (sdata->hw_80211_encap)
- return -EINVAL;
- fallthrough;
-
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index bec23d2eee7a..313eee12410e 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -212,7 +212,7 @@ static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
skb->priority = 7;
info->control.vif = &sdata->vif;
- info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
ieee80211_set_qos_hdr(sdata, skb);
ieee80211_mps_set_frame_flags(sdata, NULL, hdr);
}
@@ -1163,7 +1163,7 @@ int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata,
if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
skb_to_free = skb_dequeue(&mpath->frame_queue);
- info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
ieee80211_set_qos_hdr(sdata, skb);
skb_queue_tail(&mpath->frame_queue, skb);
if (skb_to_free)
diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
index 031e905f684a..76d19c09d26e 100644
--- a/net/mac80211/mesh_ps.c
+++ b/net/mac80211/mesh_ps.c
@@ -432,7 +432,7 @@ static void mpsp_qos_null_append(struct sta_info *sta,
info = IEEE80211_SKB_CB(new_skb);
info->control.vif = &sdata->vif;
- info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
__skb_queue_tail(frames, new_skb);
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index ac870309b911..2489c6c64c2d 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2432,23 +2432,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
}
-void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_hdr *hdr)
-{
- /*
- * We can postpone the mgd.timer whenever receiving unicast frames
- * from AP because we know that the connection is working both ways
- * at that time. But multicast frames (and hence also beacons) must
- * be ignored here, because we need to trigger the timer during
- * data idle periods for sending the periodic probe request to the
- * AP we're connected to.
- */
- if (is_multicast_ether_addr(hdr->addr1))
- return;
-
- ieee80211_sta_reset_conn_monitor(sdata);
-}
-
static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -2521,21 +2504,13 @@ void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
{
ieee80211_sta_tx_wmm_ac_notify(sdata, hdr, tx_time);
- if (!ieee80211_is_data(hdr->frame_control))
- return;
-
- if (ieee80211_is_any_nullfunc(hdr->frame_control) &&
- sdata->u.mgd.probe_send_count > 0) {
- if (ack)
- ieee80211_sta_reset_conn_monitor(sdata);
- else
- sdata->u.mgd.nullfunc_failed = true;
- ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+ if (!ieee80211_is_any_nullfunc(hdr->frame_control) ||
+ !sdata->u.mgd.probe_send_count)
return;
- }
- if (ack)
- ieee80211_sta_reset_conn_monitor(sdata);
+ if (!ack)
+ sdata->u.mgd.nullfunc_failed = true;
+ ieee80211_queue_work(&sdata->local->hw, &sdata->work);
}
static void ieee80211_mlme_send_probe_req(struct ieee80211_sub_if_data *sdata,
@@ -3548,6 +3523,9 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
goto out;
}
+ if (sdata->wdev.use_4addr)
+ drv_sta_set_4addr(local, sdata, &sta->sta, true);
+
mutex_unlock(&sdata->local->sta_mtx);
/*
@@ -3605,8 +3583,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
* Start timer to probe the connection to the AP now.
* Also start the timer that will detect beacon loss.
*/
- ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt);
ieee80211_sta_reset_beacon_monitor(sdata);
+ ieee80211_sta_reset_conn_monitor(sdata);
ret = true;
out:
@@ -4577,10 +4555,26 @@ static void ieee80211_sta_conn_mon_timer(struct timer_list *t)
from_timer(sdata, t, u.mgd.conn_mon_timer);
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+ unsigned long timeout;
if (sdata->vif.csa_active && !ifmgd->csa_waiting_bcn)
return;
+ sta = sta_info_get(sdata, ifmgd->bssid);
+ if (!sta)
+ return;
+
+ timeout = sta->status_stats.last_ack;
+ if (time_before(sta->status_stats.last_ack, sta->rx_stats.last_rx))
+ timeout = sta->rx_stats.last_rx;
+ timeout += IEEE80211_CONNECTION_IDLE_TIME;
+
+ if (time_is_before_jiffies(timeout)) {
+ mod_timer(&ifmgd->conn_mon_timer, round_jiffies_up(timeout));
+ return;
+ }
+
ieee80211_queue_work(&local->hw, &ifmgd->monitor_work);
}
@@ -4861,6 +4855,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband;
struct cfg80211_chan_def chandef;
bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
+ bool is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ;
struct ieee80211_bss *bss = (void *)cbss->priv;
int ret;
u32 i;
@@ -4879,7 +4874,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
}
- if (!sband->vht_cap.vht_supported && !is_6ghz) {
+ if (!sband->vht_cap.vht_supported && is_5ghz) {
ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 836cde516a18..7f88a1b2215c 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -451,7 +451,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
else if (status->bw == RATE_INFO_BW_5)
channel_flags |= IEEE80211_CHAN_QUARTER;
- if (status->band == NL80211_BAND_5GHZ)
+ if (status->band == NL80211_BAND_5GHZ ||
+ status->band == NL80211_BAND_6GHZ)
channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
else if (status->encoding != RX_ENC_LEGACY)
channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
@@ -1811,9 +1812,6 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
sta->rx_stats.last_rate = sta_stats_encode_rate(status);
}
- if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
- ieee80211_sta_rx_notify(rx->sdata, hdr);
-
sta->rx_stats.fragments++;
u64_stats_update_begin(&rx->sta->rx_stats.syncp);
@@ -2899,7 +2897,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY);
info = IEEE80211_SKB_CB(fwd_skb);
memset(info, 0, sizeof(*info));
- info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
info->control.vif = &rx->sdata->vif;
info->control.jiffies = jiffies;
if (is_multicast_ether_addr(fwd_hdr->addr1)) {
@@ -4148,7 +4146,6 @@ void ieee80211_check_fast_rx(struct sta_info *sta)
fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
fastrx.expected_ds_bits = 0;
} else {
- fastrx.sta_notify = sdata->u.mgd.probe_send_count > 0;
fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3);
fastrx.expected_ds_bits =
@@ -4378,11 +4375,6 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
pskb_trim(skb, skb->len - fast_rx->icv_len))
goto drop;
- if (unlikely(fast_rx->sta_notify)) {
- ieee80211_sta_rx_notify(rx->sdata, hdr);
- fast_rx->sta_notify = false;
- }
-
/* statistics part of ieee80211_rx_h_sta_process() */
if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
stats->last_signal = status->signal;
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index d5010116cf4d..91a61b44b4e0 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -336,7 +336,6 @@ struct ieee80211_fast_tx {
* @expected_ds_bits: from/to DS bits expected
* @icv_len: length of the MIC if present
* @key: bool indicating encryption is expected (key is set)
- * @sta_notify: notify the MLME code (once)
* @internal_forward: forward froms internally on AP/VLAN type interfaces
* @uses_rss: copy of USES_RSS hw flag
* @da_offs: offset of the DA in the header (for header conversion)
@@ -352,7 +351,6 @@ struct ieee80211_fast_rx {
__le16 expected_ds_bits;
u8 icv_len;
u8 key:1,
- sta_notify:1,
internal_forward:1,
uses_rss:1;
u8 da_offs, sa_offs;
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 0794396a7988..7fe5bececfd9 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -66,8 +66,8 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
info->control.jiffies = jiffies;
info->control.vif = &sta->sdata->vif;
- info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING |
- IEEE80211_TX_INTFL_RETRANSMISSION;
+ info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
+ info->flags |= IEEE80211_TX_INTFL_RETRANSMISSION;
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
sta->status_stats.filtered++;
@@ -184,18 +184,6 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
struct ieee80211_mgmt *mgmt = (void *) skb->data;
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
- struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
-
- if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
- sta->status_stats.last_ack = jiffies;
- if (txinfo->status.is_valid_ack_signal) {
- sta->status_stats.last_ack_signal =
- (s8)txinfo->status.ack_signal;
- sta->status_stats.ack_signal_filled = true;
- ewma_avg_signal_add(&sta->status_stats.avg_ack_signal,
- -txinfo->status.ack_signal);
- }
- }
if (ieee80211_is_data_qos(mgmt->frame_control)) {
struct ieee80211_hdr *hdr = (void *) skb->data;
@@ -890,7 +878,8 @@ void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb,
}
static void __ieee80211_tx_status(struct ieee80211_hw *hw,
- struct ieee80211_tx_status *status)
+ struct ieee80211_tx_status *status,
+ int rates_idx, int retry_count)
{
struct sk_buff *skb = status->skb;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -899,17 +888,12 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
struct sta_info *sta;
__le16 fc;
struct ieee80211_supported_band *sband;
- int retry_count;
- int rates_idx;
bool send_to_cooked;
bool acked;
bool noack_success;
struct ieee80211_bar *bar;
int shift = 0;
int tid = IEEE80211_NUM_TIDS;
- u16 tx_time_est;
-
- rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
sband = local->hw.wiphy->bands[info->band];
fc = hdr->frame_control;
@@ -987,24 +971,14 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
ieee80211_handle_filtered_frame(local, sta, skb);
return;
- } else {
+ } else if (ieee80211_is_data_present(fc)) {
if (!acked && !noack_success)
- sta->status_stats.retry_failed++;
- sta->status_stats.retry_count += retry_count;
+ sta->status_stats.msdu_failed[tid]++;
- if (ieee80211_is_data_present(fc)) {
- if (!acked && !noack_success)
- sta->status_stats.msdu_failed[tid]++;
-
- sta->status_stats.msdu_retries[tid] +=
- retry_count;
- }
+ sta->status_stats.msdu_retries[tid] +=
+ retry_count;
}
- rate_control_tx_status(local, sband, status);
- if (ieee80211_vif_is_mesh(&sta->sdata->vif))
- ieee80211s_update_metric(local, sta, status);
-
if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
ieee80211_frame_acked(sta, skb);
@@ -1012,37 +986,6 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
ieee80211_sta_tx_notify(sta->sdata, (void *) skb->data,
acked, info->status.tx_time);
-
- if (info->status.tx_time &&
- wiphy_ext_feature_isset(local->hw.wiphy,
- NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
- ieee80211_sta_register_airtime(&sta->sta, tid,
- info->status.tx_time, 0);
-
- if ((tx_time_est = ieee80211_info_get_tx_time_est(info)) > 0) {
- /* Do this here to avoid the expensive lookup of the sta
- * in ieee80211_report_used_skb().
- */
- ieee80211_sta_update_pending_airtime(local, sta,
- skb_get_queue_mapping(skb),
- tx_time_est,
- true);
- ieee80211_info_set_tx_time_est(info, 0);
- }
-
- if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
- if (acked) {
- if (sta->status_stats.lost_packets)
- sta->status_stats.lost_packets = 0;
-
- /* Track when last TDLS packet was ACKed */
- sta->status_stats.last_pkt_time = jiffies;
- } else if (noack_success) {
- /* nothing to do here, do not account as lost */
- } else {
- ieee80211_lost_packet(sta, info);
- }
- }
}
/* SNMP counters
@@ -1101,7 +1044,10 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
* with this test...
*/
if (!local->monitors && (!send_to_cooked || !local->cooked_mntrs)) {
- dev_kfree_skb(skb);
+ if (status->free_list)
+ list_add_tail(&skb->list, status->free_list);
+ else
+ dev_kfree_skb(skb);
return;
}
@@ -1126,7 +1072,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if (sta)
status.sta = &sta->sta;
- __ieee80211_tx_status(hw, &status);
+ ieee80211_tx_status_ext(hw, &status);
rcu_read_unlock();
}
EXPORT_SYMBOL(ieee80211_tx_status);
@@ -1137,10 +1083,12 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_tx_info *info = status->info;
struct ieee80211_sta *pubsta = status->sta;
+ struct sk_buff *skb = status->skb;
struct ieee80211_supported_band *sband;
- struct sta_info *sta;
- int retry_count;
+ struct sta_info *sta = NULL;
+ int rates_idx, retry_count;
bool acked, noack_success;
+ u16 tx_time_est;
if (pubsta) {
sta = container_of(pubsta, struct sta_info, sta);
@@ -1149,13 +1097,22 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
sta->tx_stats.last_rate_info = *status->rate;
}
- if (status->skb)
- return __ieee80211_tx_status(hw, status);
+ if (skb && (tx_time_est =
+ ieee80211_info_get_tx_time_est(IEEE80211_SKB_CB(skb))) > 0) {
+ /* Do this here to avoid the expensive lookup of the sta
+ * in ieee80211_report_used_skb().
+ */
+ ieee80211_sta_update_pending_airtime(local, sta,
+ skb_get_queue_mapping(skb),
+ tx_time_est,
+ true);
+ ieee80211_info_set_tx_time_est(IEEE80211_SKB_CB(skb), 0);
+ }
- if (!status->sta)
- return;
+ if (!status->info)
+ goto free;
- ieee80211_tx_get_rates(hw, info, &retry_count);
+ rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
sband = hw->wiphy->bands[info->band];
@@ -1167,20 +1124,30 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
sta->status_stats.retry_failed++;
sta->status_stats.retry_count += retry_count;
- if (acked) {
- sta->status_stats.last_ack = jiffies;
+ if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
+ if (acked) {
+ sta->status_stats.last_ack = jiffies;
- if (sta->status_stats.lost_packets)
- sta->status_stats.lost_packets = 0;
+ if (sta->status_stats.lost_packets)
+ sta->status_stats.lost_packets = 0;
- /* Track when last packet was ACKed */
- sta->status_stats.last_pkt_time = jiffies;
- } else if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
- return;
- } else if (noack_success) {
- /* nothing to do here, do not account as lost */
- } else {
- ieee80211_lost_packet(sta, info);
+ /* Track when last packet was ACKed */
+ sta->status_stats.last_pkt_time = jiffies;
+
+ if (info->status.is_valid_ack_signal) {
+ sta->status_stats.last_ack_signal =
+ (s8)info->status.ack_signal;
+ sta->status_stats.ack_signal_filled = true;
+ ewma_avg_signal_add(&sta->status_stats.avg_ack_signal,
+ -info->status.ack_signal);
+ }
+ } else if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
+ return;
+ } else if (noack_success) {
+ /* nothing to do here, do not account as lost */
+ } else {
+ ieee80211_lost_packet(sta, info);
+ }
}
rate_control_tx_status(local, sband, status);
@@ -1188,6 +1155,10 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
ieee80211s_update_metric(local, sta, status);
}
+ if (skb && !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP))
+ return __ieee80211_tx_status(hw, status, rates_idx,
+ retry_count);
+
if (acked || noack_success) {
I802_DEBUG_INC(local->dot11TransmittedFrameCount);
if (!pubsta)
@@ -1199,6 +1170,16 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
} else {
I802_DEBUG_INC(local->dot11FailedCount);
}
+
+free:
+ if (!skb)
+ return;
+
+ ieee80211_report_used_skb(local, skb, false);
+ if (status->free_list)
+ list_add_tail(&skb->list, status->free_list);
+ else
+ dev_kfree_skb(skb);
}
EXPORT_SYMBOL(ieee80211_tx_status_ext);
@@ -1225,69 +1206,23 @@ void ieee80211_tx_status_8023(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct sk_buff *skb)
{
- struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_status status = {
+ .skb = skb,
+ .info = IEEE80211_SKB_CB(skb),
+ };
struct sta_info *sta;
- int retry_count;
- int rates_idx;
- bool acked;
sdata = vif_to_sdata(vif);
- acked = info->flags & IEEE80211_TX_STAT_ACK;
- rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
-
rcu_read_lock();
- if (ieee80211_lookup_ra_sta(sdata, skb, &sta))
- goto counters_update;
-
- if (IS_ERR(sta))
- goto counters_update;
-
- if (!acked)
- sta->status_stats.retry_failed++;
-
- if (rates_idx != -1)
- sta->tx_stats.last_rate = info->status.rates[rates_idx];
-
- sta->status_stats.retry_count += retry_count;
-
- if (ieee80211_hw_check(hw, REPORTS_TX_ACK_STATUS)) {
- if (acked && vif->type == NL80211_IFTYPE_STATION)
- ieee80211_sta_reset_conn_monitor(sdata);
-
- sta->status_stats.last_ack = jiffies;
- if (info->flags & IEEE80211_TX_STAT_ACK) {
- if (sta->status_stats.lost_packets)
- sta->status_stats.lost_packets = 0;
+ if (!ieee80211_lookup_ra_sta(sdata, skb, &sta) && !IS_ERR(sta))
+ status.sta = &sta->sta;
- sta->status_stats.last_pkt_time = jiffies;
- } else {
- ieee80211_lost_packet(sta, info);
- }
- }
+ ieee80211_tx_status_ext(hw, &status);
-counters_update:
rcu_read_unlock();
- ieee80211_led_tx(local);
-
- if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
- !(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED))
- goto skip_stats_update;
-
- I802_DEBUG_INC(local->dot11TransmittedFrameCount);
- if (is_multicast_ether_addr(skb->data))
- I802_DEBUG_INC(local->dot11MulticastTransmittedFrameCount);
- if (retry_count > 0)
- I802_DEBUG_INC(local->dot11RetryCount);
- if (retry_count > 1)
- I802_DEBUG_INC(local->dot11MultipleRetryCount);
-
-skip_stats_update:
- ieee80211_report_used_skb(local, skb, false);
- dev_kfree_skb(skb);
}
EXPORT_SYMBOL(ieee80211_tx_status_8023);
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 50ab5b9d8eab..89723907a094 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -2734,6 +2734,39 @@ TRACE_EVENT(drv_get_ftm_responder_stats,
)
);
+DEFINE_EVENT(local_sdata_addr_evt, drv_update_vif_offload,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata),
+ TP_ARGS(local, sdata)
+);
+
+TRACE_EVENT(drv_sta_set_4addr,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta, bool enabled),
+
+ TP_ARGS(local, sdata, sta, enabled),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ STA_ENTRY
+ __field(bool, enabled)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ STA_ASSIGN;
+ __entry->enabled = enabled;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " enabled:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->enabled
+ )
+);
+
#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index d2136007e2eb..adc83d830691 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -531,7 +531,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
info->control.jiffies = jiffies;
info->control.vif = &tx->sdata->vif;
- info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
spin_unlock(&sta->ps_lock);
@@ -1134,7 +1134,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
tx->sta->sta.addr, tx->sta->sta.aid);
}
info->control.vif = &tx->sdata->vif;
- info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
__skb_queue_tail(&tid_tx->pending, skb);
if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
@@ -1179,7 +1179,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
* we are doing the needed processing, so remove the flag
* now.
*/
- info->flags &= ~IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ info->control.flags &= ~IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
hdr = (struct ieee80211_hdr *) skb->data;
@@ -1258,7 +1258,7 @@ static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
(info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
return NULL;
- if (!(info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) &&
+ if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
unlikely(!ieee80211_is_data_present(hdr->frame_control))) {
if ((!ieee80211_is_mgmt(hdr->frame_control) ||
ieee80211_is_bufferable_mmpdu(hdr->frame_control) ||
@@ -3649,7 +3649,7 @@ begin:
else
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
- if (info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP)
+ if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
goto encap_out;
if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) {
@@ -4190,38 +4190,18 @@ static bool ieee80211_tx_8023(struct ieee80211_sub_if_data *sdata,
static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
struct net_device *dev, struct sta_info *sta,
- struct sk_buff *skb)
+ struct ieee80211_key *key, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ethhdr *ehdr = (struct ethhdr *)skb->data;
struct ieee80211_local *local = sdata->local;
- bool authorized = false;
- bool multicast;
- unsigned char *ra = ehdr->h_dest;
-
- if (IS_ERR(sta) || (sta && !sta->uploaded))
- sta = NULL;
-
- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
- (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER)))
- ra = sdata->u.mgd.bssid;
-
- if (is_zero_ether_addr(ra))
- goto out_free;
-
- multicast = is_multicast_ether_addr(ra);
-
- if (sta)
- authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
+ struct tid_ampdu_tx *tid_tx;
+ u8 tid;
- if (!multicast && !authorized &&
- (ehdr->h_proto != sdata->control_port_protocol ||
- !ether_addr_equal(sdata->vif.addr, ehdr->h_source)))
- goto out_free;
-
- if (multicast && sdata->vif.type == NL80211_IFTYPE_AP &&
- !atomic_read(&sdata->u.ap.num_mcast_sta))
- goto out_free;
+ if (local->ops->wake_tx_queue) {
+ u16 queue = __ieee80211_select_queue(sdata, sta, skb);
+ skb_set_queue_mapping(skb, queue);
+ skb_get_hash(skb);
+ }
if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning)) &&
test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
@@ -4229,36 +4209,42 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
memset(info, 0, sizeof(*info));
- if (unlikely(!multicast && skb->sk &&
- skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS))
- info->ack_frame_id = ieee80211_store_ack_skb(local, skb,
- &info->flags, NULL);
+ tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
+ tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
+ if (tid_tx) {
+ if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
+ /* fall back to non-offload slow path */
+ __ieee80211_subif_start_xmit(skb, dev, 0, 0, NULL);
+ return;
+ }
- if (unlikely(sdata->control_port_protocol == ehdr->h_proto)) {
- if (sdata->control_port_no_encrypt)
- info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
- info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
+ info->flags |= IEEE80211_TX_CTL_AMPDU;
+ if (tid_tx->timeout)
+ tid_tx->last_tx = jiffies;
}
- if (multicast)
- info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ if (unlikely(skb->sk &&
+ skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS))
+ info->ack_frame_id = ieee80211_store_ack_skb(local, skb,
+ &info->flags, NULL);
info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
ieee80211_tx_stats(dev, skb->len);
- if (sta) {
- sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
- sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
- }
+ sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
+ sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
struct ieee80211_sub_if_data, u.ap);
- info->control.flags |= IEEE80211_TX_CTRL_HW_80211_ENCAP;
+ info->flags |= IEEE80211_TX_CTL_HW_80211_ENCAP;
info->control.vif = &sdata->vif;
+ if (key)
+ info->control.hw_key = &key->conf;
+
ieee80211_tx_8023(sdata, skb, skb->len, sta, false);
return;
@@ -4271,12 +4257,10 @@ netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
struct net_device *dev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ethhdr *ehdr = (struct ethhdr *)skb->data;
+ struct ieee80211_key *key;
struct sta_info *sta;
-
- if (WARN_ON(!sdata->hw_80211_encap)) {
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
+ bool offload = true;
if (unlikely(skb->len < ETH_HLEN)) {
kfree_skb(skb);
@@ -4285,11 +4269,26 @@ netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
rcu_read_lock();
- if (ieee80211_lookup_ra_sta(sdata, skb, &sta))
+ if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
kfree_skb(skb);
+ goto out;
+ }
+
+ if (unlikely(IS_ERR_OR_NULL(sta) || !sta->uploaded ||
+ !test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
+ sdata->control_port_protocol == ehdr->h_proto))
+ offload = false;
+ else if ((key = rcu_dereference(sta->ptk[sta->ptk_idx])) &&
+ (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
+ key->conf.cipher == WLAN_CIPHER_SUITE_TKIP))
+ offload = false;
+
+ if (offload)
+ ieee80211_8023_xmit(sdata, dev, sta, key, skb);
else
- ieee80211_8023_xmit(sdata, dev, sta, skb);
+ ieee80211_subif_start_xmit(skb, dev);
+out:
rcu_read_unlock();
return NETDEV_TX_OK;
@@ -4365,7 +4364,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
sdata = vif_to_sdata(info->control.vif);
- if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
+ if (info->control.flags & IEEE80211_TX_INTCFL_NEED_TXPROCESSING) {
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
if (unlikely(!chanctx_conf)) {
dev_kfree_skb(skb);
@@ -4373,7 +4372,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
}
info->band = chanctx_conf->def.chan->band;
result = ieee80211_tx(sdata, NULL, skb, true);
- } else if (info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) {
+ } else if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
dev_kfree_skb(skb);
return true;
@@ -5000,6 +4999,63 @@ out:
}
EXPORT_SYMBOL(ieee80211_proberesp_get);
+struct sk_buff *ieee80211_get_fils_discovery_tmpl(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct sk_buff *skb = NULL;
+ struct fils_discovery_data *tmpl = NULL;
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ if (sdata->vif.type != NL80211_IFTYPE_AP)
+ return NULL;
+
+ rcu_read_lock();
+ tmpl = rcu_dereference(sdata->u.ap.fils_discovery);
+ if (!tmpl) {
+ rcu_read_unlock();
+ return NULL;
+ }
+
+ skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom + tmpl->len);
+ if (skb) {
+ skb_reserve(skb, sdata->local->hw.extra_tx_headroom);
+ skb_put_data(skb, tmpl->data, tmpl->len);
+ }
+
+ rcu_read_unlock();
+ return skb;
+}
+EXPORT_SYMBOL(ieee80211_get_fils_discovery_tmpl);
+
+struct sk_buff *
+ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct sk_buff *skb = NULL;
+ struct unsol_bcast_probe_resp_data *tmpl = NULL;
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ if (sdata->vif.type != NL80211_IFTYPE_AP)
+ return NULL;
+
+ rcu_read_lock();
+ tmpl = rcu_dereference(sdata->u.ap.unsol_bcast_probe_resp);
+ if (!tmpl) {
+ rcu_read_unlock();
+ return NULL;
+ }
+
+ skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom + tmpl->len);
+ if (skb) {
+ skb_reserve(skb, sdata->local->hw.extra_tx_headroom);
+ skb_put_data(skb, tmpl->data, tmpl->len);
+ }
+
+ rcu_read_unlock();
+ return skb;
+}
+EXPORT_SYMBOL(ieee80211_get_unsol_bcast_probe_resp_tmpl);
+
struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index c8504ffc71a1..8d3bfc0fe176 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -3353,9 +3353,10 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
he_chandef.center_freq1 =
ieee80211_channel_to_frequency(he_6ghz_oper->ccfs0,
NL80211_BAND_6GHZ);
- he_chandef.center_freq2 =
- ieee80211_channel_to_frequency(he_6ghz_oper->ccfs1,
- NL80211_BAND_6GHZ);
+ if (support_80_80 || support_160)
+ he_chandef.center_freq2 =
+ ieee80211_channel_to_frequency(he_6ghz_oper->ccfs1,
+ NL80211_BAND_6GHZ);
}
if (!cfg80211_chandef_valid(&he_chandef)) {
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 9c6045f9c24d..fb0e3a657d2d 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -168,10 +168,7 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
/* take some capabilities as-is */
cap_info = le32_to_cpu(vht_cap_ie->vht_cap_info);
vht_cap->cap = cap_info;
- vht_cap->cap &= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 |
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
- IEEE80211_VHT_CAP_RXLDPC |
+ vht_cap->cap &= IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_VHT_TXOP_PS |
IEEE80211_VHT_CAP_HTC_VHT |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
@@ -180,6 +177,9 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
+ vht_cap->cap |= min_t(u32, cap_info & IEEE80211_VHT_CAP_MAX_MPDU_MASK,
+ own_cap.cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK);
+
/* and some based on our own capabilities */
switch (own_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
@@ -315,10 +315,6 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
sta->sta.bandwidth = ieee80211_sta_cur_vht_bw(sta);
- /* If HT IE reported 3839 bytes only, stay with that size. */
- if (sta->sta.max_amsdu_len == IEEE80211_MAX_MPDU_LEN_HT_3839)
- return;
-
switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) {
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_11454;
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index ab52811523e9..c829e4a75325 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -34,11 +34,11 @@ void ieee802154_xmit_worker(struct work_struct *work)
if (res)
goto err_tx;
- ieee802154_xmit_complete(&local->hw, skb, false);
-
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
+ ieee802154_xmit_complete(&local->hw, skb, false);
+
return;
err_tx:
@@ -78,6 +78,8 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
/* async is priority, otherwise sync is fallback */
if (local->ops->xmit_async) {
+ unsigned int len = skb->len;
+
ret = drv_xmit_async(local, skb);
if (ret) {
ieee802154_wake_queue(&local->hw);
@@ -85,7 +87,7 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
}
dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
+ dev->stats.tx_bytes += len;
} else {
local->tx_skb = skb;
queue_work(local->workqueue, &local->tx_work);
diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
index 0a6a15f3456d..84d119436b22 100644
--- a/net/mptcp/mib.c
+++ b/net/mptcp/mib.c
@@ -22,6 +22,15 @@ static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("MPJoinAckHMacFailure", MPTCP_MIB_JOINACKMAC),
SNMP_MIB_ITEM("DSSNotMatching", MPTCP_MIB_DSSNOMATCH),
SNMP_MIB_ITEM("InfiniteMapRx", MPTCP_MIB_INFINITEMAPRX),
+ SNMP_MIB_ITEM("OFOQueueTail", MPTCP_MIB_OFOQUEUETAIL),
+ SNMP_MIB_ITEM("OFOQueue", MPTCP_MIB_OFOQUEUE),
+ SNMP_MIB_ITEM("OFOMerge", MPTCP_MIB_OFOMERGE),
+ SNMP_MIB_ITEM("NoDSSInWindow", MPTCP_MIB_NODSSWINDOW),
+ SNMP_MIB_ITEM("DuplicateData", MPTCP_MIB_DUPDATA),
+ SNMP_MIB_ITEM("AddAddr", MPTCP_MIB_ADDADDR),
+ SNMP_MIB_ITEM("EchoAdd", MPTCP_MIB_ECHOADD),
+ SNMP_MIB_ITEM("RmAddr", MPTCP_MIB_RMADDR),
+ SNMP_MIB_ITEM("RmSubflow", MPTCP_MIB_RMSUBFLOW),
SNMP_MIB_SENTINEL
};
diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
index d7de340fc997..47bcecce1106 100644
--- a/net/mptcp/mib.h
+++ b/net/mptcp/mib.h
@@ -15,6 +15,15 @@ enum linux_mptcp_mib_field {
MPTCP_MIB_JOINACKMAC, /* HMAC was wrong on ACK + MP_JOIN */
MPTCP_MIB_DSSNOMATCH, /* Received a new mapping that did not match the previous one */
MPTCP_MIB_INFINITEMAPRX, /* Received an infinite mapping */
+ MPTCP_MIB_OFOQUEUETAIL, /* Segments inserted into OoO queue tail */
+ MPTCP_MIB_OFOQUEUE, /* Segments inserted into OoO queue */
+ MPTCP_MIB_OFOMERGE, /* Segments merged in OoO queue */
+ MPTCP_MIB_NODSSWINDOW, /* Segments not in MPTCP windows */
+ MPTCP_MIB_DUPDATA, /* Segments discarded due to duplicate DSS */
+ MPTCP_MIB_ADDADDR, /* Received ADD_ADDR with echo-flag=0 */
+ MPTCP_MIB_ECHOADD, /* Received ADD_ADDR with echo-flag=1 */
+ MPTCP_MIB_RMADDR, /* Received RM_ADDR */
+ MPTCP_MIB_RMSUBFLOW, /* Remove a subflow */
__MPTCP_MIB_MAX
};
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 7fa822b55c34..411fd4a41796 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -11,6 +11,7 @@
#include <net/tcp.h>
#include <net/mptcp.h>
#include "protocol.h"
+#include "mib.h"
static bool mptcp_cap_flag_sha256(u8 flags)
{
@@ -242,7 +243,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->add_addr = 1;
mp_opt->port = 0;
mp_opt->addr_id = *ptr++;
- pr_debug("ADD_ADDR: id=%d", mp_opt->addr_id);
+ pr_debug("ADD_ADDR: id=%d, echo=%d", mp_opt->addr_id, mp_opt->echo);
if (mp_opt->family == MPTCP_ADDR_IPVERSION_4) {
memcpy((u8 *)&mp_opt->addr.s_addr, (u8 *)ptr, 4);
ptr += 4;
@@ -571,18 +572,19 @@ static u64 add_addr6_generate_hmac(u64 key1, u64 key2, u8 addr_id,
}
#endif
-static bool mptcp_established_options_addr(struct sock *sk,
- unsigned int *size,
- unsigned int remaining,
- struct mptcp_out_options *opts)
+static bool mptcp_established_options_add_addr(struct sock *sk,
+ unsigned int *size,
+ unsigned int remaining,
+ struct mptcp_out_options *opts)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
struct mptcp_addr_info saddr;
+ bool echo;
int len;
- if (!mptcp_pm_should_signal(msk) ||
- !(mptcp_pm_addr_signal(msk, remaining, &saddr)))
+ if (!mptcp_pm_should_add_signal(msk) ||
+ !(mptcp_pm_add_addr_signal(msk, remaining, &saddr, &echo)))
return false;
len = mptcp_add_addr_len(saddr.family);
@@ -594,22 +596,51 @@ static bool mptcp_established_options_addr(struct sock *sk,
if (saddr.family == AF_INET) {
opts->suboptions |= OPTION_MPTCP_ADD_ADDR;
opts->addr = saddr.addr;
- opts->ahmac = add_addr_generate_hmac(msk->local_key,
- msk->remote_key,
- opts->addr_id,
- &opts->addr);
+ if (!echo) {
+ opts->ahmac = add_addr_generate_hmac(msk->local_key,
+ msk->remote_key,
+ opts->addr_id,
+ &opts->addr);
+ }
}
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
else if (saddr.family == AF_INET6) {
opts->suboptions |= OPTION_MPTCP_ADD_ADDR6;
opts->addr6 = saddr.addr6;
- opts->ahmac = add_addr6_generate_hmac(msk->local_key,
- msk->remote_key,
- opts->addr_id,
- &opts->addr6);
+ if (!echo) {
+ opts->ahmac = add_addr6_generate_hmac(msk->local_key,
+ msk->remote_key,
+ opts->addr_id,
+ &opts->addr6);
+ }
}
#endif
- pr_debug("addr_id=%d, ahmac=%llu", opts->addr_id, opts->ahmac);
+ pr_debug("addr_id=%d, ahmac=%llu, echo=%d", opts->addr_id, opts->ahmac, echo);
+
+ return true;
+}
+
+static bool mptcp_established_options_rm_addr(struct sock *sk,
+ unsigned int *size,
+ unsigned int remaining,
+ struct mptcp_out_options *opts)
+{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+ u8 rm_id;
+
+ if (!mptcp_pm_should_rm_signal(msk) ||
+ !(mptcp_pm_rm_addr_signal(msk, remaining, &rm_id)))
+ return false;
+
+ if (remaining < TCPOLEN_MPTCP_RM_ADDR_BASE)
+ return false;
+
+ *size = TCPOLEN_MPTCP_RM_ADDR_BASE;
+ opts->suboptions |= OPTION_MPTCP_RM_ADDR;
+ opts->rm_id = rm_id;
+
+ pr_debug("rm_id=%d", opts->rm_id);
return true;
}
@@ -640,7 +671,11 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
*size += opt_size;
remaining -= opt_size;
- if (mptcp_established_options_addr(sk, &opt_size, remaining, opts)) {
+ if (mptcp_established_options_add_addr(sk, &opt_size, remaining, opts)) {
+ *size += opt_size;
+ remaining -= opt_size;
+ ret = true;
+ } else if (mptcp_established_options_rm_addr(sk, &opt_size, remaining, opts)) {
*size += opt_size;
remaining -= opt_size;
ret = true;
@@ -824,8 +859,7 @@ static bool add_addr_hmac_valid(struct mptcp_sock *msk,
return hmac == mp_opt->ahmac;
}
-void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
- struct tcp_options_received *opt_rx)
+void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
@@ -854,11 +888,21 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
addr.addr6 = mp_opt.addr6;
}
#endif
- if (!mp_opt.echo)
+ if (!mp_opt.echo) {
mptcp_pm_add_addr_received(msk, &addr);
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDR);
+ } else {
+ mptcp_pm_del_add_timer(msk, &addr);
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADD);
+ }
mp_opt.add_addr = 0;
}
+ if (mp_opt.rm_addr) {
+ mptcp_pm_rm_addr_received(msk, mp_opt.rm_id);
+ mp_opt.rm_addr = 0;
+ }
+
if (!mp_opt.dss)
return;
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index a8ad20559aaa..7e81f53d1e5d 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -13,23 +13,34 @@
/* path manager command handlers */
int mptcp_pm_announce_addr(struct mptcp_sock *msk,
- const struct mptcp_addr_info *addr)
+ const struct mptcp_addr_info *addr,
+ bool echo)
{
pr_debug("msk=%p, local_id=%d", msk, addr->id);
msk->pm.local = *addr;
- WRITE_ONCE(msk->pm.addr_signal, true);
+ WRITE_ONCE(msk->pm.add_addr_echo, echo);
+ WRITE_ONCE(msk->pm.add_addr_signal, true);
return 0;
}
int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id)
{
- return -ENOTSUPP;
+ pr_debug("msk=%p, local_id=%d", msk, local_id);
+
+ msk->pm.rm_id = local_id;
+ WRITE_ONCE(msk->pm.rm_addr_signal, true);
+ return 0;
}
-int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 remote_id)
+int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 local_id)
{
- return -ENOTSUPP;
+ pr_debug("msk=%p, local_id=%d", msk, local_id);
+
+ spin_lock_bh(&msk->pm.lock);
+ mptcp_pm_nl_rm_subflow_received(msk, local_id);
+ spin_unlock_bh(&msk->pm.lock);
+ return 0;
}
/* path manager event handlers */
@@ -46,7 +57,7 @@ void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side)
bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
{
struct mptcp_pm_data *pm = &msk->pm;
- int ret;
+ int ret = 0;
pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
pm->subflows_max, READ_ONCE(pm->accept_subflow));
@@ -56,9 +67,11 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
return false;
spin_lock_bh(&pm->lock);
- ret = pm->subflows < pm->subflows_max;
- if (ret && ++pm->subflows == pm->subflows_max)
- WRITE_ONCE(pm->accept_subflow, false);
+ if (READ_ONCE(pm->accept_subflow)) {
+ ret = pm->subflows < pm->subflows_max;
+ if (ret && ++pm->subflows == pm->subflows_max)
+ WRITE_ONCE(pm->accept_subflow, false);
+ }
spin_unlock_bh(&pm->lock);
return ret;
@@ -135,38 +148,70 @@ void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
READ_ONCE(pm->accept_addr));
- /* avoid acquiring the lock if there is no room for fouther addresses */
- if (!READ_ONCE(pm->accept_addr))
- return;
-
spin_lock_bh(&pm->lock);
- /* be sure there is something to signal re-checking under PM lock */
- if (READ_ONCE(pm->accept_addr) &&
- mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED))
+ if (!READ_ONCE(pm->accept_addr))
+ mptcp_pm_announce_addr(msk, addr, true);
+ else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED))
pm->remote = *addr;
spin_unlock_bh(&pm->lock);
}
+void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, u8 rm_id)
+{
+ struct mptcp_pm_data *pm = &msk->pm;
+
+ pr_debug("msk=%p remote_id=%d", msk, rm_id);
+
+ spin_lock_bh(&pm->lock);
+ mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED);
+ pm->rm_id = rm_id;
+ spin_unlock_bh(&pm->lock);
+}
+
/* path manager helpers */
-bool mptcp_pm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
- struct mptcp_addr_info *saddr)
+bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
+ struct mptcp_addr_info *saddr, bool *echo)
{
int ret = false;
spin_lock_bh(&msk->pm.lock);
/* double check after the lock is acquired */
- if (!mptcp_pm_should_signal(msk))
+ if (!mptcp_pm_should_add_signal(msk))
goto out_unlock;
if (remaining < mptcp_add_addr_len(msk->pm.local.family))
goto out_unlock;
*saddr = msk->pm.local;
- WRITE_ONCE(msk->pm.addr_signal, false);
+ *echo = READ_ONCE(msk->pm.add_addr_echo);
+ WRITE_ONCE(msk->pm.add_addr_signal, false);
+ ret = true;
+
+out_unlock:
+ spin_unlock_bh(&msk->pm.lock);
+ return ret;
+}
+
+bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
+ u8 *rm_id)
+{
+ int ret = false;
+
+ spin_lock_bh(&msk->pm.lock);
+
+ /* double check after the lock is acquired */
+ if (!mptcp_pm_should_rm_signal(msk))
+ goto out_unlock;
+
+ if (remaining < TCPOLEN_MPTCP_RM_ADDR_BASE)
+ goto out_unlock;
+
+ *rm_id = msk->pm.rm_id;
+ WRITE_ONCE(msk->pm.rm_addr_signal, false);
ret = true;
out_unlock:
@@ -185,13 +230,17 @@ void mptcp_pm_data_init(struct mptcp_sock *msk)
msk->pm.add_addr_accepted = 0;
msk->pm.local_addr_used = 0;
msk->pm.subflows = 0;
+ msk->pm.rm_id = 0;
WRITE_ONCE(msk->pm.work_pending, false);
- WRITE_ONCE(msk->pm.addr_signal, false);
+ WRITE_ONCE(msk->pm.add_addr_signal, false);
+ WRITE_ONCE(msk->pm.rm_addr_signal, false);
WRITE_ONCE(msk->pm.accept_addr, false);
WRITE_ONCE(msk->pm.accept_subflow, false);
+ WRITE_ONCE(msk->pm.add_addr_echo, false);
msk->pm.status = 0;
spin_lock_init(&msk->pm.lock);
+ INIT_LIST_HEAD(&msk->pm.anno_list);
mptcp_pm_nl_data_init(msk);
}
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 2c208d2e65cd..5a0e4d11bcc3 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -15,6 +15,7 @@
#include <uapi/linux/mptcp.h>
#include "protocol.h"
+#include "mib.h"
/* forward declaration */
static struct genl_family mptcp_genl_family;
@@ -23,12 +24,18 @@ static int pm_nl_pernet_id;
struct mptcp_pm_addr_entry {
struct list_head list;
- unsigned int flags;
- int ifindex;
struct mptcp_addr_info addr;
struct rcu_head rcu;
};
+struct mptcp_pm_add_entry {
+ struct list_head list;
+ struct mptcp_addr_info addr;
+ struct timer_list add_timer;
+ struct mptcp_sock *sock;
+ u8 retrans_times;
+};
+
struct pm_nl_pernet {
/* protects pernet updates */
spinlock_t lock;
@@ -42,6 +49,7 @@ struct pm_nl_pernet {
};
#define MPTCP_PM_ADDR_MAX 8
+#define ADD_ADDR_RETRANS_MAX 3
static bool addresses_equal(const struct mptcp_addr_info *a,
struct mptcp_addr_info *b, bool use_port)
@@ -66,6 +74,16 @@ static bool addresses_equal(const struct mptcp_addr_info *a,
return a->port == b->port;
}
+static bool address_zero(const struct mptcp_addr_info *addr)
+{
+ struct mptcp_addr_info zero;
+
+ memset(&zero, 0, sizeof(zero));
+ zero.family = addr->family;
+
+ return addresses_equal(addr, &zero, false);
+}
+
static void local_address(const struct sock_common *skc,
struct mptcp_addr_info *addr)
{
@@ -119,7 +137,7 @@ select_local_address(const struct pm_nl_pernet *pernet,
rcu_read_lock();
spin_lock_bh(&msk->join_list_lock);
list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
- if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW))
+ if (!(entry->addr.flags & MPTCP_PM_ADDR_FLAG_SUBFLOW))
continue;
/* avoid any address already in use by subflows and
@@ -150,7 +168,7 @@ select_signal_address(struct pm_nl_pernet *pernet, unsigned int pos)
* can lead to additional addresses not being announced.
*/
list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
- if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL))
+ if (!(entry->addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL))
continue;
if (i++ == pos) {
ret = entry;
@@ -169,11 +187,126 @@ static void check_work_pending(struct mptcp_sock *msk)
WRITE_ONCE(msk->pm.work_pending, false);
}
+static struct mptcp_pm_add_entry *
+lookup_anno_list_by_saddr(struct mptcp_sock *msk,
+ struct mptcp_addr_info *addr)
+{
+ struct mptcp_pm_add_entry *entry;
+
+ list_for_each_entry(entry, &msk->pm.anno_list, list) {
+ if (addresses_equal(&entry->addr, addr, false))
+ return entry;
+ }
+
+ return NULL;
+}
+
+static void mptcp_pm_add_timer(struct timer_list *timer)
+{
+ struct mptcp_pm_add_entry *entry = from_timer(entry, timer, add_timer);
+ struct mptcp_sock *msk = entry->sock;
+ struct sock *sk = (struct sock *)msk;
+
+ pr_debug("msk=%p", msk);
+
+ if (!msk)
+ return;
+
+ if (inet_sk_state_load(sk) == TCP_CLOSE)
+ return;
+
+ if (!entry->addr.id)
+ return;
+
+ if (mptcp_pm_should_add_signal(msk)) {
+ sk_reset_timer(sk, timer, jiffies + TCP_RTO_MAX / 8);
+ goto out;
+ }
+
+ spin_lock_bh(&msk->pm.lock);
+
+ if (!mptcp_pm_should_add_signal(msk)) {
+ pr_debug("retransmit ADD_ADDR id=%d", entry->addr.id);
+ mptcp_pm_announce_addr(msk, &entry->addr, false);
+ entry->retrans_times++;
+ }
+
+ if (entry->retrans_times < ADD_ADDR_RETRANS_MAX)
+ sk_reset_timer(sk, timer, jiffies + TCP_RTO_MAX);
+
+ spin_unlock_bh(&msk->pm.lock);
+
+out:
+ __sock_put(sk);
+}
+
+struct mptcp_pm_add_entry *
+mptcp_pm_del_add_timer(struct mptcp_sock *msk,
+ struct mptcp_addr_info *addr)
+{
+ struct mptcp_pm_add_entry *entry;
+ struct sock *sk = (struct sock *)msk;
+
+ spin_lock_bh(&msk->pm.lock);
+ entry = lookup_anno_list_by_saddr(msk, addr);
+ if (entry)
+ entry->retrans_times = ADD_ADDR_RETRANS_MAX;
+ spin_unlock_bh(&msk->pm.lock);
+
+ if (entry)
+ sk_stop_timer_sync(sk, &entry->add_timer);
+
+ return entry;
+}
+
+static bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
+ struct mptcp_pm_addr_entry *entry)
+{
+ struct mptcp_pm_add_entry *add_entry = NULL;
+ struct sock *sk = (struct sock *)msk;
+
+ if (lookup_anno_list_by_saddr(msk, &entry->addr))
+ return false;
+
+ add_entry = kmalloc(sizeof(*add_entry), GFP_ATOMIC);
+ if (!add_entry)
+ return false;
+
+ list_add(&add_entry->list, &msk->pm.anno_list);
+
+ add_entry->addr = entry->addr;
+ add_entry->sock = msk;
+ add_entry->retrans_times = 0;
+
+ timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0);
+ sk_reset_timer(sk, &add_entry->add_timer, jiffies + TCP_RTO_MAX);
+
+ return true;
+}
+
+void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
+{
+ struct mptcp_pm_add_entry *entry, *tmp;
+ struct sock *sk = (struct sock *)msk;
+ LIST_HEAD(free_list);
+
+ pr_debug("msk=%p", msk);
+
+ spin_lock_bh(&msk->pm.lock);
+ list_splice_init(&msk->pm.anno_list, &free_list);
+ spin_unlock_bh(&msk->pm.lock);
+
+ list_for_each_entry_safe(entry, tmp, &free_list, list) {
+ sk_stop_timer_sync(sk, &entry->add_timer);
+ kfree(entry);
+ }
+}
+
static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
{
+ struct mptcp_addr_info remote = { 0 };
struct sock *sk = (struct sock *)msk;
struct mptcp_pm_addr_entry *local;
- struct mptcp_addr_info remote;
struct pm_nl_pernet *pernet;
pernet = net_generic(sock_net((struct sock *)msk), pm_nl_pernet_id);
@@ -189,8 +322,10 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
msk->pm.add_addr_signaled);
if (local) {
- msk->pm.add_addr_signaled++;
- mptcp_pm_announce_addr(msk, &local->addr);
+ if (mptcp_pm_alloc_anno_list(msk, local)) {
+ msk->pm.add_addr_signaled++;
+ mptcp_pm_announce_addr(msk, &local->addr, false);
+ }
} else {
/* pick failed, avoid fourther attempts later */
msk->pm.local_addr_used = msk->pm.add_addr_signal_max;
@@ -210,8 +345,7 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
msk->pm.subflows++;
check_work_pending(msk);
spin_unlock_bh(&msk->pm.lock);
- __mptcp_subflow_connect(sk, local->ifindex,
- &local->addr, &remote);
+ __mptcp_subflow_connect(sk, &local->addr, &remote);
spin_lock_bh(&msk->pm.lock);
return;
}
@@ -257,13 +391,86 @@ void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
local.family = remote.family;
spin_unlock_bh(&msk->pm.lock);
- __mptcp_subflow_connect((struct sock *)msk, 0, &local, &remote);
+ __mptcp_subflow_connect((struct sock *)msk, &local, &remote);
spin_lock_bh(&msk->pm.lock);
+
+ mptcp_pm_announce_addr(msk, &remote, true);
+}
+
+void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow, *tmp;
+ struct sock *sk = (struct sock *)msk;
+
+ pr_debug("address rm_id %d", msk->pm.rm_id);
+
+ if (!msk->pm.rm_id)
+ return;
+
+ if (list_empty(&msk->conn_list))
+ return;
+
+ list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
+ long timeout = 0;
+
+ if (msk->pm.rm_id != subflow->remote_id)
+ continue;
+
+ spin_unlock_bh(&msk->pm.lock);
+ mptcp_subflow_shutdown(sk, ssk, how);
+ __mptcp_close_ssk(sk, ssk, subflow, timeout);
+ spin_lock_bh(&msk->pm.lock);
+
+ msk->pm.add_addr_accepted--;
+ msk->pm.subflows--;
+ WRITE_ONCE(msk->pm.accept_addr, true);
+
+ __MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RMADDR);
+
+ break;
+ }
+}
+
+void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk, u8 rm_id)
+{
+ struct mptcp_subflow_context *subflow, *tmp;
+ struct sock *sk = (struct sock *)msk;
+
+ pr_debug("subflow rm_id %d", rm_id);
+
+ if (!rm_id)
+ return;
+
+ if (list_empty(&msk->conn_list))
+ return;
+
+ list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
+ long timeout = 0;
+
+ if (rm_id != subflow->local_id)
+ continue;
+
+ spin_unlock_bh(&msk->pm.lock);
+ mptcp_subflow_shutdown(sk, ssk, how);
+ __mptcp_close_ssk(sk, ssk, subflow, timeout);
+ spin_lock_bh(&msk->pm.lock);
+
+ msk->pm.local_addr_used--;
+ msk->pm.subflows--;
+
+ __MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RMSUBFLOW);
+
+ break;
+ }
}
static bool address_use_port(struct mptcp_pm_addr_entry *entry)
{
- return (entry->flags &
+ return (entry->addr.flags &
(MPTCP_PM_ADDR_FLAG_SIGNAL | MPTCP_PM_ADDR_FLAG_SUBFLOW)) ==
MPTCP_PM_ADDR_FLAG_SIGNAL;
}
@@ -293,9 +500,9 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
goto out;
}
- if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)
+ if (entry->addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL)
pernet->add_addr_signal_max++;
- if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)
+ if (entry->addr.flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)
pernet->local_addr_max++;
entry->addr.id = pernet->next_id++;
@@ -323,10 +530,13 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
* addr
*/
local_address((struct sock_common *)msk, &msk_local);
- local_address((struct sock_common *)msk, &skc_local);
+ local_address((struct sock_common *)skc, &skc_local);
if (addresses_equal(&msk_local, &skc_local, false))
return 0;
+ if (address_zero(&skc_local))
+ return 0;
+
pernet = net_generic(sock_net((struct sock *)msk), pm_nl_pernet_id);
rcu_read_lock();
@@ -341,12 +551,13 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
return ret;
/* address not found, add to local list */
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
- entry->flags = 0;
entry->addr = skc_local;
+ entry->addr.ifindex = 0;
+ entry->addr.flags = 0;
ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
if (ret < 0)
kfree(entry);
@@ -460,14 +671,17 @@ static int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info,
entry->addr.addr.s_addr = nla_get_in_addr(tb[addr_addr]);
skip_family:
- if (tb[MPTCP_PM_ADDR_ATTR_IF_IDX])
- entry->ifindex = nla_get_s32(tb[MPTCP_PM_ADDR_ATTR_IF_IDX]);
+ if (tb[MPTCP_PM_ADDR_ATTR_IF_IDX]) {
+ u32 val = nla_get_s32(tb[MPTCP_PM_ADDR_ATTR_IF_IDX]);
+
+ entry->addr.ifindex = val;
+ }
if (tb[MPTCP_PM_ADDR_ATTR_ID])
entry->addr.id = nla_get_u8(tb[MPTCP_PM_ADDR_ATTR_ID]);
if (tb[MPTCP_PM_ADDR_ATTR_FLAGS])
- entry->flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]);
+ entry->addr.flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]);
return 0;
}
@@ -517,6 +731,68 @@ __lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id)
return NULL;
}
+static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
+ struct mptcp_addr_info *addr)
+{
+ struct mptcp_pm_add_entry *entry;
+
+ entry = mptcp_pm_del_add_timer(msk, addr);
+ if (entry) {
+ list_del(&entry->list);
+ kfree(entry);
+ return true;
+ }
+
+ return false;
+}
+
+static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
+ struct mptcp_addr_info *addr,
+ bool force)
+{
+ bool ret;
+
+ ret = remove_anno_list_by_saddr(msk, addr);
+ if (ret || force) {
+ spin_lock_bh(&msk->pm.lock);
+ mptcp_pm_remove_addr(msk, addr->id);
+ spin_unlock_bh(&msk->pm.lock);
+ }
+ return ret;
+}
+
+static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
+ struct mptcp_addr_info *addr)
+{
+ struct mptcp_sock *msk;
+ long s_slot = 0, s_num = 0;
+
+ pr_debug("remove_id=%d", addr->id);
+
+ while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
+ struct sock *sk = (struct sock *)msk;
+ bool remove_subflow;
+
+ if (list_empty(&msk->conn_list)) {
+ mptcp_pm_remove_anno_addr(msk, addr, false);
+ goto next;
+ }
+
+ lock_sock(sk);
+ remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr);
+ mptcp_pm_remove_anno_addr(msk, addr, remove_subflow);
+ if (remove_subflow)
+ mptcp_pm_remove_subflow(msk, addr->id);
+ release_sock(sk);
+
+next:
+ sock_put(sk);
+ cond_resched();
+ }
+
+ return 0;
+}
+
static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
@@ -532,19 +808,21 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
entry = __lookup_addr_by_id(pernet, addr.addr.id);
if (!entry) {
GENL_SET_ERR_MSG(info, "address not found");
- ret = -EINVAL;
- goto out;
+ spin_unlock_bh(&pernet->lock);
+ return -EINVAL;
}
- if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)
+ if (entry->addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL)
pernet->add_addr_signal_max--;
- if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)
+ if (entry->addr.flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)
pernet->local_addr_max--;
pernet->addrs--;
list_del_rcu(&entry->list);
- kfree_rcu(entry, rcu);
-out:
spin_unlock_bh(&pernet->lock);
+
+ mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), &entry->addr);
+ kfree_rcu(entry, rcu);
+
return ret;
}
@@ -593,10 +871,10 @@ static int mptcp_nl_fill_addr(struct sk_buff *skb,
goto nla_put_failure;
if (nla_put_u8(skb, MPTCP_PM_ADDR_ATTR_ID, addr->id))
goto nla_put_failure;
- if (nla_put_u32(skb, MPTCP_PM_ADDR_ATTR_FLAGS, entry->flags))
+ if (nla_put_u32(skb, MPTCP_PM_ADDR_ATTR_FLAGS, entry->addr.flags))
goto nla_put_failure;
- if (entry->ifindex &&
- nla_put_s32(skb, MPTCP_PM_ADDR_ATTR_IF_IDX, entry->ifindex))
+ if (entry->addr.ifindex &&
+ nla_put_s32(skb, MPTCP_PM_ADDR_ATTR_IF_IDX, entry->addr.ifindex))
goto nla_put_failure;
if (addr->family == AF_INET &&
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 683196225f91..34c037731f35 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -32,6 +32,8 @@ struct mptcp6_sock {
#endif
struct mptcp_skb_cb {
+ u64 map_seq;
+ u64 end_seq;
u32 offset;
};
@@ -110,64 +112,205 @@ static int __mptcp_socket_create(struct mptcp_sock *msk)
return 0;
}
-static void __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
- struct sk_buff *skb,
- unsigned int offset, size_t copy_len)
+static void mptcp_drop(struct sock *sk, struct sk_buff *skb)
+{
+ sk_drops_add(sk, skb);
+ __kfree_skb(skb);
+}
+
+static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
+ struct sk_buff *from)
+{
+ bool fragstolen;
+ int delta;
+
+ if (MPTCP_SKB_CB(from)->offset ||
+ !skb_try_coalesce(to, from, &fragstolen, &delta))
+ return false;
+
+ pr_debug("colesced seq %llx into %llx new len %d new end seq %llx",
+ MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
+ to->len, MPTCP_SKB_CB(from)->end_seq);
+ MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
+ kfree_skb_partial(from, fragstolen);
+ atomic_add(delta, &sk->sk_rmem_alloc);
+ sk_mem_charge(sk, delta);
+ return true;
+}
+
+static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
+ struct sk_buff *from)
+{
+ if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq)
+ return false;
+
+ return mptcp_try_coalesce((struct sock *)msk, to, from);
+}
+
+/* "inspired" by tcp_data_queue_ofo(), main differences:
+ * - use mptcp seqs
+ * - don't cope with sacks
+ */
+static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
{
struct sock *sk = (struct sock *)msk;
- struct sk_buff *tail;
+ struct rb_node **p, *parent;
+ u64 seq, end_seq, max_seq;
+ struct sk_buff *skb1;
+ int space;
+
+ seq = MPTCP_SKB_CB(skb)->map_seq;
+ end_seq = MPTCP_SKB_CB(skb)->end_seq;
+ space = tcp_space(sk);
+ max_seq = space > 0 ? space + msk->ack_seq : msk->ack_seq;
+
+ pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq,
+ RB_EMPTY_ROOT(&msk->out_of_order_queue));
+ if (after64(seq, max_seq)) {
+ /* out of window */
+ mptcp_drop(sk, skb);
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_NODSSWINDOW);
+ return;
+ }
- __skb_unlink(skb, &ssk->sk_receive_queue);
+ p = &msk->out_of_order_queue.rb_node;
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUE);
+ if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) {
+ rb_link_node(&skb->rbnode, NULL, p);
+ rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
+ msk->ooo_last_skb = skb;
+ goto end;
+ }
- skb_ext_reset(skb);
- skb_orphan(skb);
- msk->ack_seq += copy_len;
+ /* with 2 subflows, adding at end of ooo queue is quite likely
+ * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
+ */
+ if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) {
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
+ return;
+ }
- tail = skb_peek_tail(&sk->sk_receive_queue);
- if (offset == 0 && tail) {
- bool fragstolen;
- int delta;
+ /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */
+ if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) {
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
+ parent = &msk->ooo_last_skb->rbnode;
+ p = &parent->rb_right;
+ goto insert;
+ }
- if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
- kfree_skb_partial(skb, fragstolen);
- atomic_add(delta, &sk->sk_rmem_alloc);
- sk_mem_charge(sk, delta);
+ /* Find place to insert this segment. Handle overlaps on the way. */
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+ skb1 = rb_to_skb(parent);
+ if (before64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
+ p = &parent->rb_left;
+ continue;
+ }
+ if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) {
+ if (!after64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) {
+ /* All the bits are present. Drop. */
+ mptcp_drop(sk, skb);
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
+ return;
+ }
+ if (after64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
+ /* partial overlap:
+ * | skb |
+ * | skb1 |
+ * continue traversing
+ */
+ } else {
+ /* skb's seq == skb1's seq and skb covers skb1.
+ * Replace skb1 with skb.
+ */
+ rb_replace_node(&skb1->rbnode, &skb->rbnode,
+ &msk->out_of_order_queue);
+ mptcp_drop(sk, skb1);
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
+ goto merge_right;
+ }
+ } else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) {
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
return;
}
+ p = &parent->rb_right;
}
- skb_set_owner_r(skb, sk);
- __skb_queue_tail(&sk->sk_receive_queue, skb);
- MPTCP_SKB_CB(skb)->offset = offset;
-}
+insert:
+ /* Insert segment into RB tree. */
+ rb_link_node(&skb->rbnode, parent, p);
+ rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
-static void mptcp_stop_timer(struct sock *sk)
-{
- struct inet_connection_sock *icsk = inet_csk(sk);
+merge_right:
+ /* Remove other segments covered by skb. */
+ while ((skb1 = skb_rb_next(skb)) != NULL) {
+ if (before64(end_seq, MPTCP_SKB_CB(skb1)->end_seq))
+ break;
+ rb_erase(&skb1->rbnode, &msk->out_of_order_queue);
+ mptcp_drop(sk, skb1);
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
+ }
+ /* If there is no skb after us, we are the last_skb ! */
+ if (!skb1)
+ msk->ooo_last_skb = skb;
- sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
- mptcp_sk(sk)->timer_ival = 0;
+end:
+ skb_condense(skb);
+ skb_set_owner_r(skb, sk);
}
-/* both sockets must be locked */
-static bool mptcp_subflow_dsn_valid(const struct mptcp_sock *msk,
- struct sock *ssk)
+static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
+ struct sk_buff *skb, unsigned int offset,
+ size_t copy_len)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
- u64 dsn = mptcp_subflow_get_mapped_dsn(subflow);
+ struct sock *sk = (struct sock *)msk;
+ struct sk_buff *tail;
- /* revalidate data sequence number.
- *
- * mptcp_subflow_data_available() is usually called
- * without msk lock. Its unlikely (but possible)
- * that msk->ack_seq has been advanced since the last
- * call found in-sequence data.
+ __skb_unlink(skb, &ssk->sk_receive_queue);
+
+ skb_ext_reset(skb);
+ skb_orphan(skb);
+
+ /* the skb map_seq accounts for the skb offset:
+ * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq
+ * value
*/
- if (likely(dsn == msk->ack_seq))
+ MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow);
+ MPTCP_SKB_CB(skb)->end_seq = MPTCP_SKB_CB(skb)->map_seq + copy_len;
+ MPTCP_SKB_CB(skb)->offset = offset;
+
+ if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) {
+ /* in sequence */
+ msk->ack_seq += copy_len;
+ tail = skb_peek_tail(&sk->sk_receive_queue);
+ if (tail && mptcp_try_coalesce(sk, tail, skb))
+ return true;
+
+ skb_set_owner_r(skb, sk);
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
return true;
+ } else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) {
+ mptcp_data_queue_ofo(msk, skb);
+ return false;
+ }
- subflow->data_avail = 0;
- return mptcp_subflow_data_available(ssk);
+ /* old data, keep it simple and drop the whole pkt, sender
+ * will retransmit as needed, if needed.
+ */
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
+ mptcp_drop(sk, skb);
+ return false;
+}
+
+static void mptcp_stop_timer(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
+ mptcp_sk(sk)->timer_ival = 0;
}
static void mptcp_check_data_fin_ack(struct sock *sk)
@@ -313,11 +456,7 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
struct tcp_sock *tp;
bool done = false;
- if (!mptcp_subflow_dsn_valid(msk, ssk)) {
- *bytes = 0;
- return false;
- }
-
+ pr_debug("msk=%p ssk=%p", msk, ssk);
tp = tcp_sk(ssk);
do {
u32 map_remaining, offset;
@@ -355,9 +494,9 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
if (tp->urg_data)
done = true;
- __mptcp_move_skb(msk, ssk, skb, offset, len);
+ if (__mptcp_move_skb(msk, ssk, skb, offset, len))
+ moved += len;
seq += len;
- moved += len;
if (WARN_ON_ONCE(map_remaining < len))
break;
@@ -376,20 +515,56 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
}
} while (more_data_avail);
- *bytes = moved;
-
- /* If the moves have caught up with the DATA_FIN sequence number
- * it's time to ack the DATA_FIN and change socket state, but
- * this is not a good place to change state. Let the workqueue
- * do it.
- */
- if (mptcp_pending_data_fin(sk, NULL) &&
- schedule_work(&msk->work))
- sock_hold(sk);
+ *bytes += moved;
+ if (moved)
+ tcp_cleanup_rbuf(ssk, moved);
return done;
}
+static bool mptcp_ofo_queue(struct mptcp_sock *msk)
+{
+ struct sock *sk = (struct sock *)msk;
+ struct sk_buff *skb, *tail;
+ bool moved = false;
+ struct rb_node *p;
+ u64 end_seq;
+
+ p = rb_first(&msk->out_of_order_queue);
+ pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
+ while (p) {
+ skb = rb_to_skb(p);
+ if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
+ break;
+
+ p = rb_next(p);
+ rb_erase(&skb->rbnode, &msk->out_of_order_queue);
+
+ if (unlikely(!after64(MPTCP_SKB_CB(skb)->end_seq,
+ msk->ack_seq))) {
+ mptcp_drop(sk, skb);
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
+ continue;
+ }
+
+ end_seq = MPTCP_SKB_CB(skb)->end_seq;
+ tail = skb_peek_tail(&sk->sk_receive_queue);
+ if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) {
+ int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
+
+ /* skip overlapping data, if any */
+ pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d",
+ MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
+ delta);
+ MPTCP_SKB_CB(skb)->offset += delta;
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+ }
+ msk->ack_seq = end_seq;
+ moved = true;
+ }
+ return moved;
+}
+
/* In most cases we will be able to lock the mptcp socket. If its already
* owned, we need to defer to the work queue to avoid ABBA deadlock.
*/
@@ -405,8 +580,19 @@ static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
return false;
/* must re-check after taking the lock */
- if (!READ_ONCE(sk->sk_lock.owned))
+ if (!READ_ONCE(sk->sk_lock.owned)) {
__mptcp_move_skbs_from_subflow(msk, ssk, &moved);
+ mptcp_ofo_queue(msk);
+
+ /* If the moves have caught up with the DATA_FIN sequence number
+ * it's time to ack the DATA_FIN and change socket state, but
+ * this is not a good place to change state. Let the workqueue
+ * do it.
+ */
+ if (mptcp_pending_data_fin(sk, NULL) &&
+ schedule_work(&msk->work))
+ sock_hold(sk);
+ }
spin_unlock_bh(&sk->sk_lock.slock);
@@ -415,9 +601,17 @@ static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
void mptcp_data_ready(struct sock *sk, struct sock *ssk)
{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct mptcp_sock *msk = mptcp_sk(sk);
+ bool wake;
- set_bit(MPTCP_DATA_READY, &msk->flags);
+ /* move_skbs_to_msk below can legitly clear the data_avail flag,
+ * but we will need later to properly woke the reader, cache its
+ * value
+ */
+ wake = subflow->data_avail == MPTCP_SUBFLOW_DATA_AVAIL;
+ if (wake)
+ set_bit(MPTCP_DATA_READY, &msk->flags);
if (atomic_read(&sk->sk_rmem_alloc) < READ_ONCE(sk->sk_rcvbuf) &&
move_skbs_to_msk(msk, ssk))
@@ -438,7 +632,8 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
move_skbs_to_msk(msk, ssk);
}
wake:
- sk->sk_data_ready(sk);
+ if (wake)
+ sk->sk_data_ready(sk);
}
static void __mptcp_flush_join_list(struct mptcp_sock *msk)
@@ -472,7 +667,7 @@ void mptcp_data_acked(struct sock *sk)
{
mptcp_reset_timer(sk);
- if ((!sk_stream_is_writeable(sk) ||
+ if ((!test_bit(MPTCP_SEND_SPACE, &mptcp_sk(sk)->flags) ||
(inet_sk_state_load(sk) != TCP_ESTABLISHED)) &&
schedule_work(&mptcp_sk(sk)->work))
sock_hold(sk);
@@ -567,6 +762,20 @@ static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
put_page(dfrag->page);
}
+static bool mptcp_is_writeable(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow;
+
+ if (!sk_stream_is_writeable((struct sock *)msk))
+ return false;
+
+ mptcp_for_each_subflow(msk, subflow) {
+ if (sk_stream_is_writeable(subflow->tcp_sock))
+ return true;
+ }
+ return false;
+}
+
static void mptcp_clean_una(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -609,8 +818,15 @@ out:
sk_mem_reclaim_partial(sk);
/* Only wake up writers if a subflow is ready */
- if (test_bit(MPTCP_SEND_SPACE, &msk->flags))
+ if (mptcp_is_writeable(msk)) {
+ set_bit(MPTCP_SEND_SPACE, &mptcp_sk(sk)->flags);
+ smp_mb__after_atomic();
+
+ /* set SEND_SPACE before sk_stream_write_space clears
+ * NOSPACE
+ */
sk_stream_write_space(sk);
+ }
}
}
@@ -801,60 +1017,128 @@ out:
return ret;
}
-static void mptcp_nospace(struct mptcp_sock *msk, struct socket *sock)
+static void mptcp_nospace(struct mptcp_sock *msk)
{
+ struct mptcp_subflow_context *subflow;
+
clear_bit(MPTCP_SEND_SPACE, &msk->flags);
smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */
- /* enables sk->write_space() callbacks */
- set_bit(SOCK_NOSPACE, &sock->flags);
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ struct socket *sock = READ_ONCE(ssk->sk_socket);
+
+ /* enables ssk->write_space() callbacks */
+ if (sock)
+ set_bit(SOCK_NOSPACE, &sock->flags);
+ }
+}
+
+static bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
+{
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ /* can't send if JOIN hasn't completed yet (i.e. is usable for mptcp) */
+ if (subflow->request_join && !subflow->fully_established)
+ return false;
+
+ /* only send if our side has not closed yet */
+ return ((1 << ssk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT));
}
-static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
+#define MPTCP_SEND_BURST_SIZE ((1 << 16) - \
+ sizeof(struct tcphdr) - \
+ MAX_TCP_OPTION_SPACE - \
+ sizeof(struct ipv6hdr) - \
+ sizeof(struct frag_hdr))
+
+struct subflow_send_info {
+ struct sock *ssk;
+ u64 ratio;
+};
+
+static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk,
+ u32 *sndbuf)
{
+ struct subflow_send_info send_info[2];
struct mptcp_subflow_context *subflow;
- struct sock *backup = NULL;
+ int i, nr_active = 0;
+ struct sock *ssk;
+ u64 ratio;
+ u32 pace;
- sock_owned_by_me((const struct sock *)msk);
+ sock_owned_by_me((struct sock *)msk);
+ *sndbuf = 0;
if (!mptcp_ext_cache_refill(msk))
return NULL;
+ if (__mptcp_check_fallback(msk)) {
+ if (!msk->first)
+ return NULL;
+ *sndbuf = msk->first->sk_sndbuf;
+ return sk_stream_memory_free(msk->first) ? msk->first : NULL;
+ }
+
+ /* re-use last subflow, if the burst allow that */
+ if (msk->last_snd && msk->snd_burst > 0 &&
+ sk_stream_memory_free(msk->last_snd) &&
+ mptcp_subflow_active(mptcp_subflow_ctx(msk->last_snd))) {
+ mptcp_for_each_subflow(msk, subflow) {
+ ssk = mptcp_subflow_tcp_sock(subflow);
+ *sndbuf = max(tcp_sk(ssk)->snd_wnd, *sndbuf);
+ }
+ return msk->last_snd;
+ }
+
+ /* pick the subflow with the lower wmem/wspace ratio */
+ for (i = 0; i < 2; ++i) {
+ send_info[i].ssk = NULL;
+ send_info[i].ratio = -1;
+ }
mptcp_for_each_subflow(msk, subflow) {
- struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ ssk = mptcp_subflow_tcp_sock(subflow);
+ if (!mptcp_subflow_active(subflow))
+ continue;
- if (!sk_stream_memory_free(ssk)) {
- struct socket *sock = ssk->sk_socket;
+ nr_active += !subflow->backup;
+ *sndbuf = max(tcp_sk(ssk)->snd_wnd, *sndbuf);
+ if (!sk_stream_memory_free(subflow->tcp_sock))
+ continue;
- if (sock)
- mptcp_nospace(msk, sock);
+ pace = READ_ONCE(ssk->sk_pacing_rate);
+ if (!pace)
+ continue;
- return NULL;
+ ratio = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32,
+ pace);
+ if (ratio < send_info[subflow->backup].ratio) {
+ send_info[subflow->backup].ssk = ssk;
+ send_info[subflow->backup].ratio = ratio;
}
+ }
- if (subflow->backup) {
- if (!backup)
- backup = ssk;
+ pr_debug("msk=%p nr_active=%d ssk=%p:%lld backup=%p:%lld",
+ msk, nr_active, send_info[0].ssk, send_info[0].ratio,
+ send_info[1].ssk, send_info[1].ratio);
- continue;
- }
+ /* pick the best backup if no other subflow is active */
+ if (!nr_active)
+ send_info[0].ssk = send_info[1].ssk;
- return ssk;
+ if (send_info[0].ssk) {
+ msk->last_snd = send_info[0].ssk;
+ msk->snd_burst = min_t(int, MPTCP_SEND_BURST_SIZE,
+ sk_stream_wspace(msk->last_snd));
+ return msk->last_snd;
}
-
- return backup;
+ return NULL;
}
-static void ssk_check_wmem(struct mptcp_sock *msk, struct sock *ssk)
+static void ssk_check_wmem(struct mptcp_sock *msk)
{
- struct socket *sock;
-
- if (likely(sk_stream_is_writeable(ssk)))
- return;
-
- sock = READ_ONCE(ssk->sk_socket);
- if (sock)
- mptcp_nospace(msk, sock);
+ if (unlikely(!mptcp_is_writeable(msk)))
+ mptcp_nospace(msk);
}
static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
@@ -864,6 +1148,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
struct page_frag *pfrag;
size_t copied = 0;
struct sock *ssk;
+ u32 sndbuf;
bool tx_ok;
long timeo;
@@ -890,7 +1175,7 @@ restart:
}
__mptcp_flush_join_list(msk);
- ssk = mptcp_subflow_get_send(msk);
+ ssk = mptcp_subflow_get_send(msk, &sndbuf);
while (!sk_stream_memory_free(sk) ||
!ssk ||
!mptcp_page_frag_refill(ssk, pfrag)) {
@@ -907,19 +1192,25 @@ restart:
mptcp_reset_timer(sk);
}
+ mptcp_nospace(msk);
ret = sk_stream_wait_memory(sk, &timeo);
if (ret)
goto out;
mptcp_clean_una(sk);
- ssk = mptcp_subflow_get_send(msk);
+ ssk = mptcp_subflow_get_send(msk, &sndbuf);
if (list_empty(&msk->conn_list)) {
ret = -ENOTCONN;
goto out;
}
}
+ /* do auto tuning */
+ if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK) &&
+ sndbuf > READ_ONCE(sk->sk_sndbuf))
+ WRITE_ONCE(sk->sk_sndbuf, sndbuf);
+
pr_debug("conn_list->subflow=%p", ssk);
lock_sock(ssk);
@@ -936,6 +1227,10 @@ restart:
break;
}
+ /* burst can be negative, we will try move to the next subflow
+ * at selection time, if possible.
+ */
+ msk->snd_burst -= ret;
copied += ret;
tx_ok = msg_data_left(msg);
@@ -945,7 +1240,6 @@ restart:
if (!sk_stream_memory_free(ssk) ||
!mptcp_page_frag_refill(ssk, pfrag) ||
!mptcp_ext_cache_refill(msk)) {
- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
tcp_push(ssk, msg->msg_flags, mss_now,
tcp_sk(ssk)->nonagle, size_goal);
mptcp_set_timeout(sk, ssk);
@@ -993,9 +1287,9 @@ restart:
mptcp_reset_timer(sk);
}
- ssk_check_wmem(msk, ssk);
release_sock(ssk);
out:
+ ssk_check_wmem(msk);
release_sock(sk);
return copied ? : ret;
}
@@ -1133,10 +1427,14 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
*/
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk;
+ bool slow;
ssk = mptcp_subflow_tcp_sock(subflow);
+ slow = lock_sock_fast(ssk);
WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf);
tcp_sk(ssk)->window_clamp = window_clamp;
+ tcp_cleanup_rbuf(ssk, 1);
+ unlock_sock_fast(ssk, slow);
}
}
}
@@ -1152,6 +1450,11 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk)
unsigned int moved = 0;
bool done;
+ /* avoid looping forever below on racing close */
+ if (((struct sock *)msk)->sk_state == TCP_CLOSE)
+ return false;
+
+ __mptcp_flush_join_list(msk);
do {
struct sock *ssk = mptcp_subflow_recv_lookup(msk);
@@ -1163,7 +1466,11 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk)
release_sock(ssk);
} while (!done);
- return moved > 0;
+ if (mptcp_ofo_queue(msk) || moved > 0) {
+ mptcp_check_data_fin((struct sock *)msk);
+ return true;
+ }
+ return false;
}
static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
@@ -1259,6 +1566,9 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
set_bit(MPTCP_DATA_READY, &msk->flags);
}
out_err:
+ pr_debug("msk=%p data_ready=%d rx queue empty=%d copied=%d",
+ msk, test_bit(MPTCP_DATA_READY, &msk->flags),
+ skb_queue_empty(&sk->sk_receive_queue), copied);
mptcp_rcv_space_adjust(msk, copied);
release_sock(sk);
@@ -1309,9 +1619,15 @@ static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
sock_owned_by_me((const struct sock *)msk);
+ if (__mptcp_check_fallback(msk))
+ return msk->first;
+
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ if (!mptcp_subflow_active(subflow))
+ continue;
+
/* still data outstanding at TCP level? Don't retransmit. */
if (!tcp_write_queue_empty(ssk))
return NULL;
@@ -1336,9 +1652,9 @@ static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
* so we need to use tcp_close() after detaching them from the mptcp
* parent socket.
*/
-static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
- struct mptcp_subflow_context *subflow,
- long timeout)
+void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ struct mptcp_subflow_context *subflow,
+ long timeout)
{
struct socket *sock = READ_ONCE(ssk->sk_socket);
@@ -1369,6 +1685,10 @@ static void pm_work(struct mptcp_sock *msk)
pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
mptcp_pm_nl_add_addr_received(msk);
}
+ if (pm->status & BIT(MPTCP_PM_RM_ADDR_RECEIVED)) {
+ pm->status &= ~BIT(MPTCP_PM_RM_ADDR_RECEIVED);
+ mptcp_pm_nl_rm_addr_received(msk);
+ }
if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) {
pm->status &= ~BIT(MPTCP_PM_ESTABLISHED);
mptcp_pm_nl_fully_established(msk);
@@ -1472,6 +1792,7 @@ static int __mptcp_init_sock(struct sock *sk)
INIT_LIST_HEAD(&msk->rtx_queue);
__set_bit(MPTCP_SEND_SPACE, &msk->flags);
INIT_WORK(&msk->work, mptcp_worker);
+ msk->out_of_order_queue = RB_ROOT;
msk->first = NULL;
inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
@@ -1489,23 +1810,23 @@ static int mptcp_init_sock(struct sock *sk)
struct net *net = sock_net(sk);
int ret;
+ ret = __mptcp_init_sock(sk);
+ if (ret)
+ return ret;
+
if (!mptcp_is_enabled(net))
return -ENOPROTOOPT;
if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net))
return -ENOMEM;
- ret = __mptcp_init_sock(sk);
- if (ret)
- return ret;
-
ret = __mptcp_socket_create(mptcp_sk(sk));
if (ret)
return ret;
sk_sockets_allocated_inc(sk);
sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
- sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[2];
+ sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
return 0;
}
@@ -1529,7 +1850,7 @@ static void mptcp_cancel_work(struct sock *sk)
sock_put(sk);
}
-static void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
+void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
{
lock_sock(ssk);
@@ -1807,14 +2128,21 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
return newsk;
}
+void mptcp_destroy_common(struct mptcp_sock *msk)
+{
+ skb_rbtree_purge(&msk->out_of_order_queue);
+ mptcp_token_destroy(msk);
+ mptcp_pm_free_anno_list(msk);
+}
+
static void mptcp_destroy(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- mptcp_token_destroy(msk);
if (msk->cached_ext)
__skb_ext_put(msk->cached_ext);
+ mptcp_destroy_common(msk);
sk_sockets_allocated_dec(sk);
}
@@ -2286,13 +2614,13 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
sock_poll_wait(file, sock, wait);
state = inet_sk_state_load(sk);
+ pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
if (state == TCP_LISTEN)
return mptcp_check_readable(msk);
if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
mask |= mptcp_check_readable(msk);
- if (sk_stream_is_writeable(sk) &&
- test_bit(MPTCP_SEND_SPACE, &msk->flags))
+ if (test_bit(MPTCP_SEND_SPACE, &msk->flags))
mask |= EPOLLOUT | EPOLLWRNORM;
}
if (sk->sk_shutdown & RCV_SHUTDOWN)
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 60b27d44c184..7cfe52aeb2b8 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -140,6 +140,8 @@ struct mptcp_addr_info {
sa_family_t family;
__be16 port;
u8 id;
+ u8 flags;
+ int ifindex;
union {
struct in_addr addr;
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
@@ -150,6 +152,7 @@ struct mptcp_addr_info {
enum mptcp_pm_status {
MPTCP_PM_ADD_ADDR_RECEIVED,
+ MPTCP_PM_RM_ADDR_RECEIVED,
MPTCP_PM_ESTABLISHED,
MPTCP_PM_SUBFLOW_ESTABLISHED,
};
@@ -157,14 +160,17 @@ enum mptcp_pm_status {
struct mptcp_pm_data {
struct mptcp_addr_info local;
struct mptcp_addr_info remote;
+ struct list_head anno_list;
spinlock_t lock; /*protects the whole PM data */
- bool addr_signal;
+ bool add_addr_signal;
+ bool rm_addr_signal;
bool server_side;
bool work_pending;
bool accept_addr;
bool accept_subflow;
+ bool add_addr_echo;
u8 add_addr_signaled;
u8 add_addr_accepted;
u8 local_addr_used;
@@ -174,6 +180,7 @@ struct mptcp_pm_data {
u8 local_addr_max;
u8 subflows_max;
u8 status;
+ u8 rm_id;
};
struct mptcp_data_frag {
@@ -194,6 +201,8 @@ struct mptcp_sock {
u64 write_seq;
u64 ack_seq;
u64 rcv_data_fin_seq;
+ struct sock *last_snd;
+ int snd_burst;
atomic64_t snd_una;
unsigned long timer_ival;
u32 token;
@@ -204,6 +213,8 @@ struct mptcp_sock {
bool snd_data_fin_enable;
spinlock_t join_list_lock;
struct work_struct work;
+ struct sk_buff *ooo_last_skb;
+ struct rb_root out_of_order_queue;
struct list_head conn_list;
struct list_head rtx_queue;
struct list_head join_list;
@@ -268,6 +279,12 @@ mptcp_subflow_rsk(const struct request_sock *rsk)
return (struct mptcp_subflow_request_sock *)rsk;
}
+enum mptcp_data_avail {
+ MPTCP_SUBFLOW_NODATA,
+ MPTCP_SUBFLOW_DATA_AVAIL,
+ MPTCP_SUBFLOW_OOO_DATA
+};
+
/* MPTCP subflow context */
struct mptcp_subflow_context {
struct list_head node;/* conn_list of subflows */
@@ -292,10 +309,10 @@ struct mptcp_subflow_context {
map_valid : 1,
mpc_map : 1,
backup : 1,
- data_avail : 1,
rx_eof : 1,
use_64bit_ack : 1, /* Set when we received a 64-bit DSN */
can_ack : 1; /* only after processing the remote a key */
+ enum mptcp_data_avail data_avail;
u32 remote_nonce;
u64 thmac;
u32 local_nonce;
@@ -348,10 +365,13 @@ void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
struct mptcp_options_received *mp_opt);
bool mptcp_subflow_data_available(struct sock *sk);
void __init mptcp_subflow_init(void);
+void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how);
+void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ struct mptcp_subflow_context *subflow,
+ long timeout);
/* called with sk socket lock held */
-int __mptcp_subflow_connect(struct sock *sk, int ifindex,
- const struct mptcp_addr_info *loc,
+int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
const struct mptcp_addr_info *remote);
int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock);
@@ -388,6 +408,7 @@ bool mptcp_finish_join(struct sock *sk);
void mptcp_data_acked(struct sock *sk);
void mptcp_subflow_eof(struct sock *sk);
bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq);
+void mptcp_destroy_common(struct mptcp_sock *msk);
void __init mptcp_token_init(void);
static inline void mptcp_token_init_request(struct request_sock *req)
@@ -421,15 +442,26 @@ void mptcp_pm_subflow_established(struct mptcp_sock *msk,
void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id);
void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr);
+void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, u8 rm_id);
+void mptcp_pm_free_anno_list(struct mptcp_sock *msk);
+struct mptcp_pm_add_entry *
+mptcp_pm_del_add_timer(struct mptcp_sock *msk,
+ struct mptcp_addr_info *addr);
int mptcp_pm_announce_addr(struct mptcp_sock *msk,
- const struct mptcp_addr_info *addr);
+ const struct mptcp_addr_info *addr,
+ bool echo);
int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id);
-int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 remote_id);
+int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 local_id);
+
+static inline bool mptcp_pm_should_add_signal(struct mptcp_sock *msk)
+{
+ return READ_ONCE(msk->pm.add_addr_signal);
+}
-static inline bool mptcp_pm_should_signal(struct mptcp_sock *msk)
+static inline bool mptcp_pm_should_rm_signal(struct mptcp_sock *msk)
{
- return READ_ONCE(msk->pm.addr_signal);
+ return READ_ONCE(msk->pm.rm_addr_signal);
}
static inline unsigned int mptcp_add_addr_len(int family)
@@ -439,8 +471,10 @@ static inline unsigned int mptcp_add_addr_len(int family)
return TCPOLEN_MPTCP_ADD_ADDR6;
}
-bool mptcp_pm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
- struct mptcp_addr_info *saddr);
+bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
+ struct mptcp_addr_info *saddr, bool *echo);
+bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
+ u8 *rm_id);
int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
void __init mptcp_pm_nl_init(void);
@@ -448,6 +482,8 @@ void mptcp_pm_nl_data_init(struct mptcp_sock *msk);
void mptcp_pm_nl_fully_established(struct mptcp_sock *msk);
void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk);
void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk);
+void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk);
+void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk, u8 rm_id);
int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
static inline struct mptcp_ext *mptcp_get_ext(struct sk_buff *skb)
@@ -464,12 +500,12 @@ static inline bool before64(__u64 seq1, __u64 seq2)
void mptcp_diag_subflow_init(struct tcp_ulp_ops *ops);
-static inline bool __mptcp_check_fallback(struct mptcp_sock *msk)
+static inline bool __mptcp_check_fallback(const struct mptcp_sock *msk)
{
return test_bit(MPTCP_FALLBACK_DONE, &msk->flags);
}
-static inline bool mptcp_check_fallback(struct sock *sk)
+static inline bool mptcp_check_fallback(const struct sock *sk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index e8cac2655c82..ac2b19993f1a 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -20,6 +20,7 @@
#include <net/ip6_route.h>
#endif
#include <net/mptcp.h>
+#include <uapi/linux/mptcp.h>
#include "protocol.h"
#include "mib.h"
@@ -434,7 +435,7 @@ static void mptcp_sock_destruct(struct sock *sk)
sock_orphan(sk);
}
- mptcp_token_destroy(mptcp_sk(sk));
+ mptcp_destroy_common(mptcp_sk(sk));
inet_sock_destruct(sk);
}
@@ -804,16 +805,25 @@ validate_seq:
return MAPPING_OK;
}
-static int subflow_read_actor(read_descriptor_t *desc,
- struct sk_buff *skb,
- unsigned int offset, size_t len)
+static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
+ u64 limit)
{
- size_t copy_len = min(desc->count, len);
-
- desc->count -= copy_len;
-
- pr_debug("flushed %zu bytes, %zu left", copy_len, desc->count);
- return copy_len;
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
+ u32 incr;
+
+ incr = limit >= skb->len ? skb->len + fin : limit;
+
+ pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
+ subflow->map_subflow_seq);
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
+ tcp_sk(ssk)->copied_seq += incr;
+ if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
+ sk_eat_skb(ssk, skb);
+ if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
+ subflow->map_valid = 0;
+ if (incr)
+ tcp_cleanup_rbuf(ssk, incr);
}
static bool subflow_check_data_avail(struct sock *ssk)
@@ -825,13 +835,13 @@ static bool subflow_check_data_avail(struct sock *ssk)
pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk,
subflow->data_avail, skb_peek(&ssk->sk_receive_queue));
+ if (!skb_peek(&ssk->sk_receive_queue))
+ subflow->data_avail = 0;
if (subflow->data_avail)
return true;
msk = mptcp_sk(subflow->conn);
for (;;) {
- u32 map_remaining;
- size_t delta;
u64 ack_seq;
u64 old_ack;
@@ -849,6 +859,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
subflow->map_data_len = skb->len;
subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
subflow->ssn_offset;
+ subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
return true;
}
@@ -876,42 +887,18 @@ static bool subflow_check_data_avail(struct sock *ssk)
ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
ack_seq);
- if (ack_seq == old_ack)
+ if (ack_seq == old_ack) {
+ subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
break;
+ } else if (after64(ack_seq, old_ack)) {
+ subflow->data_avail = MPTCP_SUBFLOW_OOO_DATA;
+ break;
+ }
/* only accept in-sequence mapping. Old values are spurious
- * retransmission; we can hit "future" values on active backup
- * subflow switch, we relay on retransmissions to get
- * in-sequence data.
- * Cuncurrent subflows support will require subflow data
- * reordering
+ * retransmission
*/
- map_remaining = subflow->map_data_len -
- mptcp_subflow_get_map_offset(subflow);
- if (before64(ack_seq, old_ack))
- delta = min_t(size_t, old_ack - ack_seq, map_remaining);
- else
- delta = min_t(size_t, ack_seq - old_ack, map_remaining);
-
- /* discard mapped data */
- pr_debug("discarding %zu bytes, current map len=%d", delta,
- map_remaining);
- if (delta) {
- read_descriptor_t desc = {
- .count = delta,
- };
- int ret;
-
- ret = tcp_read_sock(ssk, &desc, subflow_read_actor);
- if (ret < 0) {
- ssk->sk_err = -ret;
- goto fatal;
- }
- if (ret < delta)
- return false;
- if (delta == map_remaining)
- subflow->map_valid = 0;
- }
+ mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
}
return true;
@@ -922,13 +909,13 @@ fatal:
ssk->sk_error_report(ssk);
tcp_set_state(ssk, TCP_CLOSE);
tcp_send_active_reset(ssk, GFP_ATOMIC);
+ subflow->data_avail = 0;
return false;
}
bool mptcp_subflow_data_available(struct sock *sk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
- struct sk_buff *skb;
/* check if current mapping is still valid */
if (subflow->map_valid &&
@@ -941,15 +928,7 @@ bool mptcp_subflow_data_available(struct sock *sk)
subflow->map_data_len);
}
- if (!subflow_check_data_avail(sk)) {
- subflow->data_avail = 0;
- return false;
- }
-
- skb = skb_peek(&sk->sk_receive_queue);
- subflow->data_avail = skb &&
- before(tcp_sk(sk)->copied_seq, TCP_SKB_CB(skb)->end_seq);
- return subflow->data_avail;
+ return subflow_check_data_avail(sk);
}
/* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
@@ -996,8 +975,10 @@ static void subflow_write_space(struct sock *sk)
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct sock *parent = subflow->conn;
- sk_stream_write_space(sk);
- if (sk_stream_is_writeable(sk)) {
+ if (!sk_stream_is_writeable(sk))
+ return;
+
+ if (sk_stream_is_writeable(parent)) {
set_bit(MPTCP_SEND_SPACE, &mptcp_sk(parent)->flags);
smp_mb__after_atomic();
/* set SEND_SPACE before sk_stream_write_space clears NOSPACE */
@@ -1056,13 +1037,13 @@ static void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
#endif
}
-int __mptcp_subflow_connect(struct sock *sk, int ifindex,
- const struct mptcp_addr_info *loc,
+int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
const struct mptcp_addr_info *remote)
{
struct mptcp_sock *msk = mptcp_sk(sk);
struct mptcp_subflow_context *subflow;
struct sockaddr_storage addr;
+ int remote_id = remote->id;
int local_id = loc->id;
struct socket *sf;
struct sock *ssk;
@@ -1101,18 +1082,19 @@ int __mptcp_subflow_connect(struct sock *sk, int ifindex,
if (loc->family == AF_INET6)
addrlen = sizeof(struct sockaddr_in6);
#endif
- ssk->sk_bound_dev_if = ifindex;
+ ssk->sk_bound_dev_if = loc->ifindex;
err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
if (err)
goto failed;
mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
- pr_debug("msk=%p remote_token=%u local_id=%d", msk, remote_token,
- local_id);
+ pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
+ remote_token, local_id, remote_id);
subflow->remote_token = remote_token;
subflow->local_id = local_id;
+ subflow->remote_id = remote_id;
subflow->request_join = 1;
- subflow->request_bkup = 1;
+ subflow->request_bkup = !!(loc->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
mptcp_info2sockaddr(remote, &addr);
err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
@@ -1347,6 +1329,7 @@ static void subflow_ulp_clone(const struct request_sock *req,
new_ctx->fully_established = 1;
new_ctx->backup = subflow_req->backup;
new_ctx->local_id = subflow_req->local_id;
+ new_ctx->remote_id = subflow_req->remote_id;
new_ctx->token = subflow_req->token;
new_ctx->thmac = subflow_req->thmac;
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 89d99f6dfd0a..3d0fd33be018 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -851,7 +851,6 @@ static int ctnetlink_done(struct netlink_callback *cb)
}
struct ctnetlink_filter {
- u_int32_t cta_flags;
u8 family;
u_int32_t orig_flags;
@@ -906,10 +905,6 @@ static int ctnetlink_parse_tuple_filter(const struct nlattr * const cda[],
struct nf_conntrack_zone *zone,
u_int32_t flags);
-/* applied on filters */
-#define CTA_FILTER_F_CTA_MARK (1 << 0)
-#define CTA_FILTER_F_CTA_MARK_MASK (1 << 1)
-
static struct ctnetlink_filter *
ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
{
@@ -930,14 +925,10 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
#ifdef CONFIG_NF_CONNTRACK_MARK
if (cda[CTA_MARK]) {
filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
- filter->cta_flags |= CTA_FILTER_FLAG(CTA_MARK);
-
- if (cda[CTA_MARK_MASK]) {
+ if (cda[CTA_MARK_MASK])
filter->mark.mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
- filter->cta_flags |= CTA_FILTER_FLAG(CTA_MARK_MASK);
- } else {
+ else
filter->mark.mask = 0xffffffff;
- }
} else if (cda[CTA_MARK_MASK]) {
err = -EINVAL;
goto err_filter;
@@ -1117,11 +1108,7 @@ static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
}
#ifdef CONFIG_NF_CONNTRACK_MARK
- if ((filter->cta_flags & CTA_FILTER_FLAG(CTA_MARK_MASK)) &&
- (ct->mark & filter->mark.mask) != filter->mark.val)
- goto ignore_entry;
- else if ((filter->cta_flags & CTA_FILTER_FLAG(CTA_MARK)) &&
- ct->mark != filter->mark.val)
+ if ((ct->mark & filter->mark.mask) != filter->mark.val)
goto ignore_entry;
#endif
@@ -1404,7 +1391,8 @@ ctnetlink_parse_tuple_filter(const struct nlattr * const cda[],
if (err < 0)
return err;
-
+ if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6)
+ return -EOPNOTSUPP;
tuple->src.l3num = l3num;
if (flags & CTA_FILTER_FLAG(CTA_IP_DST) ||
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 95f79980348c..47e9319d2cf3 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -565,6 +565,7 @@ static int nf_ct_netns_inet_get(struct net *net)
int err;
err = nf_ct_netns_do_get(net, NFPROTO_IPV4);
+#if IS_ENABLED(CONFIG_IPV6)
if (err < 0)
goto err1;
err = nf_ct_netns_do_get(net, NFPROTO_IPV6);
@@ -575,6 +576,7 @@ static int nf_ct_netns_inet_get(struct net *net)
err2:
nf_ct_netns_put(net, NFPROTO_IPV4);
err1:
+#endif
return err;
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 84c0c1aaae99..97fb6f776114 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -691,6 +691,18 @@ nla_put_failure:
return -1;
}
+struct nftnl_skb_parms {
+ bool report;
+};
+#define NFT_CB(skb) (*(struct nftnl_skb_parms*)&((skb)->cb))
+
+static void nft_notify_enqueue(struct sk_buff *skb, bool report,
+ struct list_head *notify_list)
+{
+ NFT_CB(skb).report = report;
+ list_add_tail(&skb->list, notify_list);
+}
+
static void nf_tables_table_notify(const struct nft_ctx *ctx, int event)
{
struct sk_buff *skb;
@@ -722,8 +734,7 @@ static void nf_tables_table_notify(const struct nft_ctx *ctx, int event)
goto err;
}
- nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
- ctx->report, GFP_KERNEL);
+ nft_notify_enqueue(skb, ctx->report, &ctx->net->nft.notify_list);
return;
err:
nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS);
@@ -1488,8 +1499,7 @@ static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event)
goto err;
}
- nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
- ctx->report, GFP_KERNEL);
+ nft_notify_enqueue(skb, ctx->report, &ctx->net->nft.notify_list);
return;
err:
nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS);
@@ -2827,8 +2837,7 @@ static void nf_tables_rule_notify(const struct nft_ctx *ctx,
goto err;
}
- nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
- ctx->report, GFP_KERNEL);
+ nft_notify_enqueue(skb, ctx->report, &ctx->net->nft.notify_list);
return;
err:
nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS);
@@ -3857,8 +3866,7 @@ static void nf_tables_set_notify(const struct nft_ctx *ctx,
goto err;
}
- nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES, ctx->report,
- gfp_flags);
+ nft_notify_enqueue(skb, ctx->report, &ctx->net->nft.notify_list);
return;
err:
nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, -ENOBUFS);
@@ -4979,8 +4987,7 @@ static void nf_tables_setelem_notify(const struct nft_ctx *ctx,
goto err;
}
- nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, ctx->report,
- GFP_KERNEL);
+ nft_notify_enqueue(skb, ctx->report, &ctx->net->nft.notify_list);
return;
err:
nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, -ENOBUFS);
@@ -6314,7 +6321,7 @@ void nft_obj_notify(struct net *net, const struct nft_table *table,
goto err;
}
- nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report, gfp);
+ nft_notify_enqueue(skb, report, &net->nft.notify_list);
return;
err:
nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, -ENOBUFS);
@@ -7124,8 +7131,7 @@ static void nf_tables_flowtable_notify(struct nft_ctx *ctx,
goto err;
}
- nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES,
- ctx->report, GFP_KERNEL);
+ nft_notify_enqueue(skb, ctx->report, &ctx->net->nft.notify_list);
return;
err:
nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS);
@@ -7734,6 +7740,41 @@ static void nf_tables_commit_release(struct net *net)
mutex_unlock(&net->nft.commit_mutex);
}
+static void nft_commit_notify(struct net *net, u32 portid)
+{
+ struct sk_buff *batch_skb = NULL, *nskb, *skb;
+ unsigned char *data;
+ int len;
+
+ list_for_each_entry_safe(skb, nskb, &net->nft.notify_list, list) {
+ if (!batch_skb) {
+new_batch:
+ batch_skb = skb;
+ len = NLMSG_GOODSIZE - skb->len;
+ list_del(&skb->list);
+ continue;
+ }
+ len -= skb->len;
+ if (len > 0 && NFT_CB(skb).report == NFT_CB(batch_skb).report) {
+ data = skb_put(batch_skb, skb->len);
+ memcpy(data, skb->data, skb->len);
+ list_del(&skb->list);
+ kfree_skb(skb);
+ continue;
+ }
+ nfnetlink_send(batch_skb, net, portid, NFNLGRP_NFTABLES,
+ NFT_CB(batch_skb).report, GFP_KERNEL);
+ goto new_batch;
+ }
+
+ if (batch_skb) {
+ nfnetlink_send(batch_skb, net, portid, NFNLGRP_NFTABLES,
+ NFT_CB(batch_skb).report, GFP_KERNEL);
+ }
+
+ WARN_ON_ONCE(!list_empty(&net->nft.notify_list));
+}
+
static int nf_tables_commit(struct net *net, struct sk_buff *skb)
{
struct nft_trans *trans, *next;
@@ -7936,6 +7977,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
}
}
+ nft_commit_notify(net, NETLINK_CB(skb).portid);
nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
nf_tables_commit_release(net);
@@ -8760,6 +8802,7 @@ static int __net_init nf_tables_init_net(struct net *net)
INIT_LIST_HEAD(&net->nft.tables);
INIT_LIST_HEAD(&net->nft.commit_list);
INIT_LIST_HEAD(&net->nft.module_list);
+ INIT_LIST_HEAD(&net->nft.notify_list);
mutex_init(&net->nft.commit_mutex);
net->nft.base_seq = 1;
net->nft.validate_state = NFT_VALIDATE_SKIP;
@@ -8776,6 +8819,7 @@ static void __net_exit nf_tables_exit_net(struct net *net)
mutex_unlock(&net->nft.commit_mutex);
WARN_ON_ONCE(!list_empty(&net->nft.tables));
WARN_ON_ONCE(!list_empty(&net->nft.module_list));
+ WARN_ON_ONCE(!list_empty(&net->nft.notify_list));
}
static struct pernet_operations nf_tables_net_ops = {
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 7bc6537f3ccb..b37bd02448d8 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -147,11 +147,11 @@ nft_meta_get_eval_skugid(enum nft_meta_keys key,
switch (key) {
case NFT_META_SKUID:
- *dest = from_kuid_munged(&init_user_ns,
+ *dest = from_kuid_munged(sock_net(sk)->user_ns,
sock->file->f_cred->fsuid);
break;
case NFT_META_SKGID:
- *dest = from_kgid_munged(&init_user_ns,
+ *dest = from_kgid_munged(sock_net(sk)->user_ns,
sock->file->f_cred->fsgid);
break;
default:
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index f9efd2c1cb50..4ec4c0f72f61 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1853,7 +1853,7 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
struct scm_cookie scm;
u32 netlink_skb_flags = 0;
- if (msg->msg_flags&MSG_OOB)
+ if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
err = scm_send(sock, msg, &scm, true);
@@ -1916,7 +1916,7 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
refcount_inc(&skb->users);
netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
}
- err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
+ err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags & MSG_DONTWAIT);
out:
scm_destroy(&scm);
@@ -1929,12 +1929,12 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
struct scm_cookie scm;
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
- int noblock = flags&MSG_DONTWAIT;
+ int noblock = flags & MSG_DONTWAIT;
size_t copied;
struct sk_buff *skb, *data_skb;
int err, ret;
- if (flags&MSG_OOB)
+ if (flags & MSG_OOB)
return -EOPNOTSUPP;
copied = 0;
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index a3f1204f1ed2..e86b9601f5b1 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -1901,8 +1901,8 @@ static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net)
lockdep_ovsl_is_held())
kfree_rcu(ct_limit, rcu);
}
- kfree(ovs_net->ct_limit_info->limits);
- kfree(ovs_net->ct_limit_info);
+ kfree(info->limits);
+ kfree(info);
}
static struct sk_buff *
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index af6c93ed9fa0..cefbd50c1090 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -93,52 +93,56 @@
/*
Assumptions:
- - if device has no dev->hard_header routine, it adds and removes ll header
- inside itself. In this case ll header is invisible outside of device,
- but higher levels still should reserve dev->hard_header_len.
- Some devices are enough clever to reallocate skb, when header
- will not fit to reserved space (tunnel), another ones are silly
- (PPP).
+ - If the device has no dev->header_ops, there is no LL header visible
+ above the device. In this case, its hard_header_len should be 0.
+ The device may prepend its own header internally. In this case, its
+ needed_headroom should be set to the space needed for it to add its
+ internal header.
+ For example, a WiFi driver pretending to be an Ethernet driver should
+ set its hard_header_len to be the Ethernet header length, and set its
+ needed_headroom to be (the real WiFi header length - the fake Ethernet
+ header length).
- packet socket receives packets with pulled ll header,
so that SOCK_RAW should push it back.
On receive:
-----------
-Incoming, dev->hard_header!=NULL
+Incoming, dev->header_ops != NULL
mac_header -> ll header
data -> data
-Outgoing, dev->hard_header!=NULL
+Outgoing, dev->header_ops != NULL
mac_header -> ll header
data -> ll header
-Incoming, dev->hard_header==NULL
- mac_header -> UNKNOWN position. It is very likely, that it points to ll
- header. PPP makes it, that is wrong, because introduce
- assymetry between rx and tx paths.
+Incoming, dev->header_ops == NULL
+ mac_header -> data
+ However drivers often make it point to the ll header.
+ This is incorrect because the ll header should be invisible to us.
data -> data
-Outgoing, dev->hard_header==NULL
- mac_header -> data. ll header is still not built!
+Outgoing, dev->header_ops == NULL
+ mac_header -> data. ll header is invisible to us.
data -> data
Resume
- If dev->hard_header==NULL we are unlikely to restore sensible ll header.
+ If dev->header_ops == NULL we are unable to restore the ll header,
+ because it is invisible to us.
On transmit:
------------
-dev->hard_header != NULL
+dev->header_ops != NULL
mac_header -> ll header
data -> ll header
-dev->hard_header == NULL (ll header is added by device, we cannot control it)
+dev->header_ops == NULL (ll header is invisible to us)
mac_header -> data
data -> data
- We should set nh.raw on output to correct posistion,
+ We should set network_header on output to the correct position,
packet classifier depends on it.
*/
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 90c558f89d46..957aa9263ba4 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -332,8 +332,7 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
{
struct qrtr_hdr_v1 *hdr;
size_t len = skb->len;
- int rc = -ENODEV;
- int confirm_rx;
+ int rc, confirm_rx;
confirm_rx = qrtr_tx_wait(node, to->sq_node, to->sq_port, type);
if (confirm_rx < 0) {
@@ -357,15 +356,17 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
hdr->size = cpu_to_le32(len);
hdr->confirm_rx = !!confirm_rx;
- skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
-
- mutex_lock(&node->ep_lock);
- if (node->ep)
- rc = node->ep->xmit(node->ep, skb);
- else
- kfree_skb(skb);
- mutex_unlock(&node->ep_lock);
+ rc = skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
+ if (!rc) {
+ mutex_lock(&node->ep_lock);
+ rc = -ENODEV;
+ if (node->ep)
+ rc = node->ep->xmit(node->ep, skb);
+ else
+ kfree_skb(skb);
+ mutex_unlock(&node->ep_lock);
+ }
/* Need to ensure that a subsequent message carries the otherwise lost
* confirm_rx flag if we dropped this one */
if (rc && confirm_rx)
diff --git a/net/rds/cong.c b/net/rds/cong.c
index ccdff09a79c8..8b689ebbd5b5 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -236,7 +236,7 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
* tcp_setsockopt and/or tcp_sendmsg will deadlock
* when it tries to get the sock_lock())
* 2. Interrupts are masked so that we can mark the
- * the port congested from both send and recv paths.
+ * port congested from both send and recv paths.
* (See comment around declaration of rdc_cong_lock).
* An attempt to get the sock_lock() here will
* therefore trigger warnings.
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index c3319ff3ee11..06603dd1c8aa 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -711,7 +711,7 @@ static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event, bool isv6)
* original size. The only way to tell the difference is by looking at
* the contents, which are initialized to zero.
* If the protocol version fields aren't set, this is a connection attempt
- * from an older version. This could could be 3.0 or 2.0 - we can't tell.
+ * from an older version. This could be 3.0 or 2.0 - we can't tell.
* We really should have changed this for OFED 1.3 :-(
*/
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index ccdd304eae0a..1d0afb1dd77b 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -269,7 +269,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
goto out;
} else {
nents = ret;
- sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
+ sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL);
if (!sg) {
ret = -ENOMEM;
goto out;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index cd5a80b34738..19f714386654 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -395,7 +395,7 @@ struct rxrpc_bundle {
unsigned int debug_id;
bool try_upgrade; /* True if the bundle is attempting upgrade */
bool alloc_conn; /* True if someone's getting a conn */
- unsigned short alloc_error; /* Error from last conn allocation */
+ short alloc_error; /* Error from last conn allocation */
spinlock_t channel_lock;
struct rb_node local_node; /* Node in local->client_conns */
struct list_head waiting_calls; /* Calls waiting for channels */
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 0e4e1879c24d..78c845a4f1ad 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -433,7 +433,6 @@ static void rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle, gfp_t gfp)
if (!rxrpc_may_reuse_conn(old)) {
if (old)
trace_rxrpc_client(old, -1, rxrpc_client_replace);
- candidate->bundle = rxrpc_get_bundle(bundle);
candidate->bundle_shift = shift;
bundle->conns[i] = candidate;
for (j = 0; j < RXRPC_MAXCALLS; j++)
@@ -724,8 +723,9 @@ granted_channel:
/* Paired with the write barrier in rxrpc_activate_one_channel(). */
smp_rmb();
-out:
+out_put_bundle:
rxrpc_put_bundle(bundle);
+out:
_leave(" = %d", ret);
return ret;
@@ -742,7 +742,7 @@ wait_failed:
trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed);
rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret);
rxrpc_disconnect_client_call(bundle, call);
- goto out;
+ goto out_put_bundle;
}
/*
@@ -1111,6 +1111,7 @@ void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
conn = list_entry(graveyard.next,
struct rxrpc_connection, cache_link);
list_del_init(&conn->cache_link);
+ rxrpc_unbundle_conn(conn);
rxrpc_put_connection(conn);
}
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index c1fcd85719d6..5c568757643b 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -436,6 +436,25 @@ static void tcf_ife_cleanup(struct tc_action *a)
kfree_rcu(p, rcu);
}
+static int load_metalist(struct nlattr **tb, bool rtnl_held)
+{
+ int i;
+
+ for (i = 1; i < max_metacnt; i++) {
+ if (tb[i]) {
+ void *val = nla_data(tb[i]);
+ int len = nla_len(tb[i]);
+ int rc;
+
+ rc = load_metaops_and_vet(i, val, len, rtnl_held);
+ if (rc != 0)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
bool exists, bool rtnl_held)
{
@@ -449,10 +468,6 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
val = nla_data(tb[i]);
len = nla_len(tb[i]);
- rc = load_metaops_and_vet(i, val, len, rtnl_held);
- if (rc != 0)
- return rc;
-
rc = add_metainfo(ife, i, val, len, exists);
if (rc)
return rc;
@@ -509,6 +524,21 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
if (!p)
return -ENOMEM;
+ if (tb[TCA_IFE_METALST]) {
+ err = nla_parse_nested_deprecated(tb2, IFE_META_MAX,
+ tb[TCA_IFE_METALST], NULL,
+ NULL);
+ if (err) {
+ kfree(p);
+ return err;
+ }
+ err = load_metalist(tb2, rtnl_held);
+ if (err) {
+ kfree(p);
+ return err;
+ }
+ }
+
index = parm->index;
err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0) {
@@ -570,15 +600,9 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
}
if (tb[TCA_IFE_METALST]) {
- err = nla_parse_nested_deprecated(tb2, IFE_META_MAX,
- tb[TCA_IFE_METALST], NULL,
- NULL);
- if (err)
- goto metadata_parse_err;
err = populate_metalist(ife, tb2, exists, rtnl_held);
if (err)
goto metadata_parse_err;
-
} else {
/* if no passed metadata allow list or passed allow-all
* then here we process by adding as many supported metadatum
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 536c4bc31be6..37f1e10f35e0 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -156,6 +156,7 @@ tunnel_key_copy_vxlan_opt(const struct nlattr *nla, void *dst, int dst_len,
struct vxlan_metadata *md = dst;
md->gbp = nla_get_u32(tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]);
+ md->gbp &= VXLAN_GBP_MASK;
}
return sizeof(struct vxlan_metadata);
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index a4f7ef1de7e7..fed18fd2c50b 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1175,8 +1175,10 @@ static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
return -EINVAL;
}
- if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP])
+ if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
+ md->gbp &= VXLAN_GBP_MASK;
+ }
return sizeof(*md);
}
@@ -1221,6 +1223,7 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
}
if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
+ memset(&md->u, 0x00, sizeof(md->u));
md->u.index = nla_get_be32(nla);
}
} else if (md->version == 2) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 265a61d011df..54c417244642 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -1131,24 +1131,10 @@ EXPORT_SYMBOL(dev_activate);
static void qdisc_deactivate(struct Qdisc *qdisc)
{
- bool nolock = qdisc->flags & TCQ_F_NOLOCK;
-
if (qdisc->flags & TCQ_F_BUILTIN)
return;
- if (test_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state))
- return;
-
- if (nolock)
- spin_lock_bh(&qdisc->seqlock);
- spin_lock_bh(qdisc_lock(qdisc));
set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
-
- qdisc_reset(qdisc);
-
- spin_unlock_bh(qdisc_lock(qdisc));
- if (nolock)
- spin_unlock_bh(&qdisc->seqlock);
}
static void dev_deactivate_queue(struct net_device *dev,
@@ -1165,6 +1151,30 @@ static void dev_deactivate_queue(struct net_device *dev,
}
}
+static void dev_reset_queue(struct net_device *dev,
+ struct netdev_queue *dev_queue,
+ void *_unused)
+{
+ struct Qdisc *qdisc;
+ bool nolock;
+
+ qdisc = dev_queue->qdisc_sleeping;
+ if (!qdisc)
+ return;
+
+ nolock = qdisc->flags & TCQ_F_NOLOCK;
+
+ if (nolock)
+ spin_lock_bh(&qdisc->seqlock);
+ spin_lock_bh(qdisc_lock(qdisc));
+
+ qdisc_reset(qdisc);
+
+ spin_unlock_bh(qdisc_lock(qdisc));
+ if (nolock)
+ spin_unlock_bh(&qdisc->seqlock);
+}
+
static bool some_qdisc_is_busy(struct net_device *dev)
{
unsigned int i;
@@ -1213,12 +1223,20 @@ void dev_deactivate_many(struct list_head *head)
dev_watchdog_down(dev);
}
- /* Wait for outstanding qdisc-less dev_queue_xmit calls.
+ /* Wait for outstanding qdisc-less dev_queue_xmit calls or
+ * outstanding qdisc enqueuing calls.
* This is avoided if all devices are in dismantle phase :
* Caller will call synchronize_net() for us
*/
synchronize_net();
+ list_for_each_entry(dev, head, close_list) {
+ netdev_for_each_tx_queue(dev, dev_reset_queue, NULL);
+
+ if (dev_ingress_queue(dev))
+ dev_reset_queue(dev, dev_ingress_queue(dev), NULL);
+ }
+
/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, close_list) {
while (some_qdisc_is_busy(dev)) {
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index fe53c1e38c7d..b0ad7687ee2c 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -777,9 +777,11 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
[TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 },
};
-static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
+static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb,
+ struct sched_entry *entry,
struct netlink_ext_ack *extack)
{
+ int min_duration = length_to_duration(q, ETH_ZLEN);
u32 interval = 0;
if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD])
@@ -794,7 +796,10 @@ static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
interval = nla_get_u32(
tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]);
- if (interval == 0) {
+ /* The interval should allow at least the minimum ethernet
+ * frame to go out.
+ */
+ if (interval < min_duration) {
NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry");
return -EINVAL;
}
@@ -804,8 +809,9 @@ static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
return 0;
}
-static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry,
- int index, struct netlink_ext_ack *extack)
+static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n,
+ struct sched_entry *entry, int index,
+ struct netlink_ext_ack *extack)
{
struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { };
int err;
@@ -819,10 +825,10 @@ static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry,
entry->index = index;
- return fill_sched_entry(tb, entry, extack);
+ return fill_sched_entry(q, tb, entry, extack);
}
-static int parse_sched_list(struct nlattr *list,
+static int parse_sched_list(struct taprio_sched *q, struct nlattr *list,
struct sched_gate_list *sched,
struct netlink_ext_ack *extack)
{
@@ -847,7 +853,7 @@ static int parse_sched_list(struct nlattr *list,
return -ENOMEM;
}
- err = parse_sched_entry(n, entry, i, extack);
+ err = parse_sched_entry(q, n, entry, i, extack);
if (err < 0) {
kfree(entry);
return err;
@@ -862,7 +868,7 @@ static int parse_sched_list(struct nlattr *list,
return i;
}
-static int parse_taprio_schedule(struct nlattr **tb,
+static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
struct sched_gate_list *new,
struct netlink_ext_ack *extack)
{
@@ -883,8 +889,8 @@ static int parse_taprio_schedule(struct nlattr **tb,
new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]);
if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST])
- err = parse_sched_list(
- tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], new, extack);
+ err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST],
+ new, extack);
if (err < 0)
return err;
@@ -1473,7 +1479,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
goto free_sched;
}
- err = parse_taprio_schedule(tb, new_admin, extack);
+ err = parse_taprio_schedule(q, tb, new_admin, extack);
if (err < 0)
goto free_sched;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 836615f71a7d..53d0a4161df3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -9220,13 +9220,10 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
static inline void sctp_copy_descendant(struct sock *sk_to,
const struct sock *sk_from)
{
- int ancestor_size = sizeof(struct inet_sock) +
- sizeof(struct sctp_sock) -
- offsetof(struct sctp_sock, pd_lobby);
-
- if (sk_from->sk_family == PF_INET6)
- ancestor_size += sizeof(struct ipv6_pinfo);
+ size_t ancestor_size = sizeof(struct inet_sock);
+ ancestor_size += sk_from->sk_prot->obj_size;
+ ancestor_size -= offsetof(struct sctp_sock, pd_lobby);
__inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
}
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index f5bececfedaa..ed8f97166be9 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -1371,7 +1371,6 @@ static void smc_listen_work(struct work_struct *work)
}
/* finish worker */
- kfree(buf);
if (!ism_supported) {
rc = smc_listen_rdma_finish(new_smc, &cclc,
ini.first_contact_local);
@@ -1381,12 +1380,13 @@ static void smc_listen_work(struct work_struct *work)
}
smc_conn_save_peer_info(new_smc, &cclc);
smc_listen_out_connected(new_smc);
- return;
+ goto out_free;
out_unlock:
mutex_unlock(&smc_server_lgr_pending);
out_decl:
smc_listen_decline(new_smc, rc, ini.first_contact_local);
+out_free:
kfree(buf);
}
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 10d05a6d34fc..0f9ffba07d26 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -208,11 +208,12 @@ again:
break;
case SMC_LISTEN:
sk->sk_state = SMC_CLOSED;
- smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
- smc->clcsock->sk->sk_user_data = NULL;
sk->sk_state_change(sk); /* wake up accept */
- if (smc->clcsock && smc->clcsock->sk)
+ if (smc->clcsock && smc->clcsock->sk) {
+ smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
+ smc->clcsock->sk->sk_user_data = NULL;
rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
+ }
smc_close_cleanup_listen(sk);
release_sock(sk);
flush_work(&smc->tcp_listen_work);
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index c27123e6ba80..4a67685c83eb 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -982,8 +982,8 @@ static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
p = xdr_inline_decode(xdr, len);
if (unlikely(p == NULL))
goto out_fail;
- dprintk("RPC: %5u RPCB_%s reply: %s\n", req->rq_task->tk_pid,
- req->rq_task->tk_msg.rpc_proc->p_name, (char *)p);
+ dprintk("RPC: %5u RPCB_%s reply: %*pE\n", req->rq_task->tk_pid,
+ req->rq_task->tk_msg.rpc_proc->p_name, len, (char *)p);
if (rpc_uaddr2sockaddr(req->rq_xprt->xprt_net, (char *)p, len,
sap, sizeof(address)) == 0)
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 3f86d039875c..ad6e2e4994ce 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -933,6 +933,8 @@ static void rpcrdma_req_reset(struct rpcrdma_req *req)
rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
rpcrdma_regbuf_dma_unmap(req->rl_recvbuf);
+
+ frwr_reset(req);
}
/* ASSUMPTION: the rb_allreqs list is stable for the duration,
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index 7c523dc81575..40c44101fe8e 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -36,22 +36,28 @@
#include <crypto/aead.h>
#include <crypto/aes.h>
+#include <crypto/rng.h>
#include "crypto.h"
+#include "msg.h"
+#include "bcast.h"
-#define TIPC_TX_PROBE_LIM msecs_to_jiffies(1000) /* > 1s */
-#define TIPC_TX_LASTING_LIM msecs_to_jiffies(120000) /* 2 mins */
+#define TIPC_TX_GRACE_PERIOD msecs_to_jiffies(5000) /* 5s */
+#define TIPC_TX_LASTING_TIME msecs_to_jiffies(10000) /* 10s */
#define TIPC_RX_ACTIVE_LIM msecs_to_jiffies(3000) /* 3s */
-#define TIPC_RX_PASSIVE_LIM msecs_to_jiffies(180000) /* 3 mins */
+#define TIPC_RX_PASSIVE_LIM msecs_to_jiffies(15000) /* 15s */
+
#define TIPC_MAX_TFMS_DEF 10
#define TIPC_MAX_TFMS_LIM 1000
+#define TIPC_REKEYING_INTV_DEF (60 * 24) /* default: 1 day */
+
/**
* TIPC Key ids
*/
enum {
- KEY_UNUSED = 0,
- KEY_MIN,
- KEY_1 = KEY_MIN,
+ KEY_MASTER = 0,
+ KEY_MIN = KEY_MASTER,
+ KEY_1 = 1,
KEY_2,
KEY_3,
KEY_MAX = KEY_3,
@@ -81,6 +87,8 @@ static const char *hstats[MAX_STATS] = {"ok", "nok", "async", "async_ok",
/* Max TFMs number per key */
int sysctl_tipc_max_tfms __read_mostly = TIPC_MAX_TFMS_DEF;
+/* Key exchange switch, default: on */
+int sysctl_tipc_key_exchange_enabled __read_mostly = 1;
/**
* struct tipc_key - TIPC keys' status indicator
@@ -132,6 +140,8 @@ struct tipc_tfm {
* @mode: crypto mode is applied to the key
* @hint[]: a hint for user key
* @rcu: struct rcu_head
+ * @key: the aead key
+ * @gen: the key's generation
* @seqno: the key seqno (cluster scope)
* @refcnt: the key reference counter
*/
@@ -144,8 +154,10 @@ struct tipc_aead {
u32 salt;
u8 authsize;
u8 mode;
- char hint[TIPC_AEAD_HINT_LEN + 1];
+ char hint[2 * TIPC_AEAD_HINT_LEN + 1];
struct rcu_head rcu;
+ struct tipc_aead_key *key;
+ u16 gen;
atomic64_t seqno ____cacheline_aligned;
refcount_t refcnt ____cacheline_aligned;
@@ -165,26 +177,56 @@ struct tipc_crypto_stats {
* @node: TIPC node (RX)
* @aead: array of pointers to AEAD keys for encryption/decryption
* @peer_rx_active: replicated peer RX active key index
+ * @key_gen: TX/RX key generation
* @key: the key states
- * @working: the crypto is working or not
+ * @skey_mode: session key's mode
+ * @skey: received session key
+ * @wq: common workqueue on TX crypto
+ * @work: delayed work sched for TX/RX
+ * @key_distr: key distributing state
+ * @rekeying_intv: rekeying interval (in minutes)
* @stats: the crypto statistics
+ * @name: the crypto name
* @sndnxt: the per-peer sndnxt (TX)
* @timer1: general timer 1 (jiffies)
- * @timer2: general timer 1 (jiffies)
+ * @timer2: general timer 2 (jiffies)
+ * @working: the crypto is working or not
+ * @key_master: flag indicates if master key exists
+ * @legacy_user: flag indicates if a peer joins w/o master key (for bwd comp.)
+ * @nokey: no key indication
* @lock: tipc_key lock
*/
struct tipc_crypto {
struct net *net;
struct tipc_node *node;
- struct tipc_aead __rcu *aead[KEY_MAX + 1]; /* key[0] is UNUSED */
+ struct tipc_aead __rcu *aead[KEY_MAX + 1];
atomic_t peer_rx_active;
+ u16 key_gen;
struct tipc_key key;
- u8 working:1;
+ u8 skey_mode;
+ struct tipc_aead_key *skey;
+ struct workqueue_struct *wq;
+ struct delayed_work work;
+#define KEY_DISTR_SCHED 1
+#define KEY_DISTR_COMPL 2
+ atomic_t key_distr;
+ u32 rekeying_intv;
+
struct tipc_crypto_stats __percpu *stats;
+ char name[48];
atomic64_t sndnxt ____cacheline_aligned;
unsigned long timer1;
unsigned long timer2;
+ union {
+ struct {
+ u8 working:1;
+ u8 key_master:1;
+ u8 legacy_user:1;
+ u8 nokey: 1;
+ };
+ u8 flags;
+ };
spinlock_t lock; /* crypto lock */
} ____cacheline_aligned;
@@ -234,23 +276,35 @@ static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
u8 new_active,
u8 new_pending);
static int tipc_crypto_key_attach(struct tipc_crypto *c,
- struct tipc_aead *aead, u8 pos);
+ struct tipc_aead *aead, u8 pos,
+ bool master_key);
static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending);
static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
struct tipc_crypto *rx,
- struct sk_buff *skb);
-static void tipc_crypto_key_synch(struct tipc_crypto *rx, u8 new_rx_active,
- struct tipc_msg *hdr);
+ struct sk_buff *skb,
+ u8 tx_key);
+static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb);
static int tipc_crypto_key_revoke(struct net *net, u8 tx_key);
+static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb,
+ struct tipc_bearer *b,
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode, u8 type);
static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
struct tipc_bearer *b,
struct sk_buff **skb, int err);
static void tipc_crypto_do_cmd(struct net *net, int cmd);
static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf);
-#ifdef TIPC_CRYPTO_DEBUG
static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
char *buf);
-#endif
+static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey,
+ u16 gen, u8 mode, u32 dnode);
+static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr);
+static void tipc_crypto_work_tx(struct work_struct *work);
+static void tipc_crypto_work_rx(struct work_struct *work);
+static int tipc_aead_key_generate(struct tipc_aead_key *skey);
+
+#define is_tx(crypto) (!(crypto)->node)
+#define is_rx(crypto) (!is_tx(crypto))
#define key_next(cur) ((cur) % KEY_MAX + 1)
@@ -271,30 +325,55 @@ do { \
/**
* tipc_aead_key_validate - Validate a AEAD user key
*/
-int tipc_aead_key_validate(struct tipc_aead_key *ukey)
+int tipc_aead_key_validate(struct tipc_aead_key *ukey, struct genl_info *info)
{
int keylen;
/* Check if algorithm exists */
if (unlikely(!crypto_has_alg(ukey->alg_name, 0, 0))) {
- pr_info("Not found cipher: \"%s\"!\n", ukey->alg_name);
+ GENL_SET_ERR_MSG(info, "unable to load the algorithm (module existed?)");
return -ENODEV;
}
/* Currently, we only support the "gcm(aes)" cipher algorithm */
- if (strcmp(ukey->alg_name, "gcm(aes)"))
+ if (strcmp(ukey->alg_name, "gcm(aes)")) {
+ GENL_SET_ERR_MSG(info, "not supported yet the algorithm");
return -ENOTSUPP;
+ }
/* Check if key size is correct */
keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE;
if (unlikely(keylen != TIPC_AES_GCM_KEY_SIZE_128 &&
keylen != TIPC_AES_GCM_KEY_SIZE_192 &&
- keylen != TIPC_AES_GCM_KEY_SIZE_256))
- return -EINVAL;
+ keylen != TIPC_AES_GCM_KEY_SIZE_256)) {
+ GENL_SET_ERR_MSG(info, "incorrect key length (20, 28 or 36 octets?)");
+ return -EKEYREJECTED;
+ }
return 0;
}
+/**
+ * tipc_aead_key_generate - Generate new session key
+ * @skey: input/output key with new content
+ *
+ * Return: 0 in case of success, otherwise < 0
+ */
+static int tipc_aead_key_generate(struct tipc_aead_key *skey)
+{
+ int rc = 0;
+
+ /* Fill the key's content with a random value via RNG cipher */
+ rc = crypto_get_default_rng();
+ if (likely(!rc)) {
+ rc = crypto_rng_get_bytes(crypto_default_rng, skey->key,
+ skey->keylen);
+ crypto_put_default_rng();
+ }
+
+ return rc;
+}
+
static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead)
{
struct tipc_aead *tmp;
@@ -339,6 +418,7 @@ static void tipc_aead_free(struct rcu_head *rp)
kfree(head);
}
free_percpu(aead->tfm_entry);
+ kzfree(aead->key);
kfree(aead);
}
@@ -501,14 +581,15 @@ static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
return err;
}
- /* Copy some chars from the user key as a hint */
- memcpy(tmp->hint, ukey->key, TIPC_AEAD_HINT_LEN);
- tmp->hint[TIPC_AEAD_HINT_LEN] = '\0';
+ /* Form a hex string of some last bytes as the key's hint */
+ bin2hex(tmp->hint, ukey->key + keylen - TIPC_AEAD_HINT_LEN,
+ TIPC_AEAD_HINT_LEN);
/* Initialize the other data */
tmp->mode = mode;
tmp->cloned = NULL;
tmp->authsize = TIPC_AES_GCM_TAG_SIZE;
+ tmp->key = kmemdup(ukey, tipc_aead_key_size(ukey), GFP_KERNEL);
memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE);
atomic_set(&tmp->users, 0);
atomic64_set(&tmp->seqno, 0);
@@ -663,13 +744,11 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
* but there is no frag_list, it should be still fine!
* Otherwise, we must cow it to be a writable buffer with the tailroom.
*/
-#ifdef TIPC_CRYPTO_DEBUG
SKB_LINEAR_ASSERT(skb);
if (tailen > skb_tailroom(skb)) {
- pr_warn("TX: skb tailroom is not enough: %d, requires: %d\n",
- skb_tailroom(skb), tailen);
+ pr_debug("TX(): skb tailroom is not enough: %d, requires: %d\n",
+ skb_tailroom(skb), tailen);
}
-#endif
if (unlikely(!skb_cloned(skb) && tailen <= skb_tailroom(skb))) {
nsg = 1;
@@ -940,8 +1019,6 @@ bool tipc_ehdr_validate(struct sk_buff *skb)
return false;
if (unlikely(skb->len <= ehsz + TIPC_AES_GCM_TAG_SIZE))
return false;
- if (unlikely(!ehdr->tx_key))
- return false;
return true;
}
@@ -994,6 +1071,8 @@ static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead,
ehdr->tx_key = tx_key;
ehdr->destined = (__rx) ? 1 : 0;
ehdr->rx_key_active = (__rx) ? __rx->key.active : 0;
+ ehdr->rx_nokey = (__rx) ? __rx->nokey : 0;
+ ehdr->master_key = aead->crypto->key_master;
ehdr->reserved_1 = 0;
ehdr->reserved_2 = 0;
@@ -1019,23 +1098,16 @@ static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
u8 new_active,
u8 new_pending)
{
-#ifdef TIPC_CRYPTO_DEBUG
struct tipc_key old = c->key;
char buf[32];
-#endif
c->key.keys = ((new_passive & KEY_MASK) << (KEY_BITS * 2)) |
((new_active & KEY_MASK) << (KEY_BITS)) |
((new_pending & KEY_MASK));
-#ifdef TIPC_CRYPTO_DEBUG
- pr_info("%s(%s): key changing %s ::%pS\n",
- (c->node) ? "RX" : "TX",
- (c->node) ? tipc_node_get_id_str(c->node) :
- tipc_own_id_string(c->net),
- tipc_key_change_dump(old, c->key, buf),
- __builtin_return_address(0));
-#endif
+ pr_debug("%s: key changing %s ::%pS\n", c->name,
+ tipc_key_change_dump(old, c->key, buf),
+ __builtin_return_address(0));
}
/**
@@ -1043,6 +1115,7 @@ static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
* @c: TIPC crypto to which new key is attached
* @ukey: the user key
* @mode: the key mode (CLUSTER_KEY or PER_NODE_KEY)
+ * @master_key: specify this is a cluster master key
*
* A new TIPC AEAD key will be allocated and initiated with the specified user
* key, then attached to the TIPC crypto.
@@ -1050,7 +1123,7 @@ static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
* Return: new key id in case of success, otherwise: < 0
*/
int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey,
- u8 mode)
+ u8 mode, bool master_key)
{
struct tipc_aead *aead = NULL;
int rc = 0;
@@ -1060,17 +1133,11 @@ int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey,
/* Attach it to the crypto */
if (likely(!rc)) {
- rc = tipc_crypto_key_attach(c, aead, 0);
+ rc = tipc_crypto_key_attach(c, aead, 0, master_key);
if (rc < 0)
tipc_aead_free(&aead->rcu);
}
- pr_info("%s(%s): key initiating, rc %d!\n",
- (c->node) ? "RX" : "TX",
- (c->node) ? tipc_node_get_id_str(c->node) :
- tipc_own_id_string(c->net),
- rc);
-
return rc;
}
@@ -1079,58 +1146,58 @@ int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey,
* @c: TIPC crypto to which the new AEAD key is attached
* @aead: the new AEAD key pointer
* @pos: desired slot in the crypto key array, = 0 if any!
+ * @master_key: specify this is a cluster master key
*
* Return: new key id in case of success, otherwise: -EBUSY
*/
static int tipc_crypto_key_attach(struct tipc_crypto *c,
- struct tipc_aead *aead, u8 pos)
+ struct tipc_aead *aead, u8 pos,
+ bool master_key)
{
- u8 new_pending, new_passive, new_key;
struct tipc_key key;
int rc = -EBUSY;
+ u8 new_key;
spin_lock_bh(&c->lock);
key = c->key;
+ if (master_key) {
+ new_key = KEY_MASTER;
+ goto attach;
+ }
if (key.active && key.passive)
goto exit;
- if (key.passive && !tipc_aead_users(c->aead[key.passive]))
- goto exit;
if (key.pending) {
- if (pos)
- goto exit;
if (tipc_aead_users(c->aead[key.pending]) > 0)
goto exit;
+ /* if (pos): ok with replacing, will be aligned when needed */
/* Replace it */
- new_pending = key.pending;
- new_passive = key.passive;
- new_key = new_pending;
+ new_key = key.pending;
} else {
if (pos) {
if (key.active && pos != key_next(key.active)) {
- new_pending = key.pending;
- new_passive = pos;
- new_key = new_passive;
+ key.passive = pos;
+ new_key = pos;
goto attach;
} else if (!key.active && !key.passive) {
- new_pending = pos;
- new_passive = key.passive;
- new_key = new_pending;
+ key.pending = pos;
+ new_key = pos;
goto attach;
}
}
- new_pending = key_next(key.active ?: key.passive);
- new_passive = key.passive;
- new_key = new_pending;
+ key.pending = key_next(key.active ?: key.passive);
+ new_key = key.pending;
}
attach:
aead->crypto = c;
- tipc_crypto_key_set_state(c, new_passive, key.active, new_pending);
+ aead->gen = (is_tx(c)) ? ++c->key_gen : c->key_gen;
tipc_aead_rcu_replace(c->aead[new_key], aead, &c->lock);
-
+ if (likely(c->key.keys != key.keys))
+ tipc_crypto_key_set_state(c, key.passive, key.active,
+ key.pending);
c->working = 1;
- c->timer1 = jiffies;
- c->timer2 = jiffies;
+ c->nokey = 0;
+ c->key_master |= master_key;
rc = new_key;
exit:
@@ -1140,14 +1207,33 @@ exit:
void tipc_crypto_key_flush(struct tipc_crypto *c)
{
+ struct tipc_crypto *tx, *rx;
int k;
spin_lock_bh(&c->lock);
- c->working = 0;
+ if (is_rx(c)) {
+ /* Try to cancel pending work */
+ rx = c;
+ tx = tipc_net(rx->net)->crypto_tx;
+ if (cancel_delayed_work(&rx->work)) {
+ kfree(rx->skey);
+ rx->skey = NULL;
+ atomic_xchg(&rx->key_distr, 0);
+ tipc_node_put(rx->node);
+ }
+ /* RX stopping => decrease TX key users if any */
+ k = atomic_xchg(&rx->peer_rx_active, 0);
+ if (k) {
+ tipc_aead_users_dec(tx->aead[k], 0);
+ /* Mark the point TX key users changed */
+ tx->timer1 = jiffies;
+ }
+ }
+
+ c->flags = 0;
tipc_crypto_key_set_state(c, 0, 0, 0);
for (k = KEY_MIN; k <= KEY_MAX; k++)
tipc_crypto_key_detach(c->aead[k], &c->lock);
- atomic_set(&c->peer_rx_active, 0);
atomic64_set(&c->sndnxt, 0);
spin_unlock_bh(&c->lock);
}
@@ -1206,7 +1292,8 @@ static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending)
rcu_assign_pointer(rx->aead[new_passive], tmp2);
refcount_set(&tmp1->refcnt, 1);
aligned = true;
- pr_info("RX(%s): key is aligned!\n", tipc_node_get_id_str(rx->node));
+ pr_info_ratelimited("%s: key[%d] -> key[%d]\n", rx->name, key.pending,
+ new_pending);
exit:
spin_unlock(&rx->lock);
@@ -1218,6 +1305,7 @@ exit:
* @tx: TX crypto handle
* @rx: RX crypto handle (can be NULL)
* @skb: the message skb which will be decrypted later
+ * @tx_key: peer TX key id
*
* This function looks up the existing TX keys and pick one which is suitable
* for the message decryption, that must be a cluster key and not used before
@@ -1227,7 +1315,8 @@ exit:
*/
static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
struct tipc_crypto *rx,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ u8 tx_key)
{
struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(skb);
struct tipc_aead *aead = NULL;
@@ -1246,6 +1335,10 @@ static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
/* Pick one TX key */
spin_lock(&tx->lock);
+ if (tx_key == KEY_MASTER) {
+ aead = tipc_aead_rcu_ptr(tx->aead[KEY_MASTER], &tx->lock);
+ goto done;
+ }
do {
k = (i == 0) ? key.pending :
((i == 1) ? key.active : key.passive);
@@ -1265,9 +1358,12 @@ static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
skb->next = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!skb->next))
pr_warn("Failed to clone skb for next round if any\n");
- WARN_ON(!refcount_inc_not_zero(&aead->refcnt));
break;
} while (++i < 3);
+
+done:
+ if (likely(aead))
+ WARN_ON(!refcount_inc_not_zero(&aead->refcnt));
spin_unlock(&tx->lock);
return aead;
@@ -1276,53 +1372,73 @@ static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
/**
* tipc_crypto_key_synch: Synch own key data according to peer key status
* @rx: RX crypto handle
- * @new_rx_active: latest RX active key from peer
- * @hdr: TIPCv2 message
+ * @skb: TIPCv2 message buffer (incl. the ehdr from peer)
*
* This function updates the peer node related data as the peer RX active key
* has changed, so the number of TX keys' users on this node are increased and
* decreased correspondingly.
*
+ * It also considers if peer has no key, then we need to make own master key
+ * (if any) taking over i.e. starting grace period and also trigger key
+ * distributing process.
+ *
* The "per-peer" sndnxt is also reset when the peer key has switched.
*/
-static void tipc_crypto_key_synch(struct tipc_crypto *rx, u8 new_rx_active,
- struct tipc_msg *hdr)
+static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb)
{
- struct net *net = rx->net;
- struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
- u8 cur_rx_active;
+ struct tipc_ehdr *ehdr = (struct tipc_ehdr *)skb_network_header(skb);
+ struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
+ struct tipc_msg *hdr = buf_msg(skb);
+ u32 self = tipc_own_addr(rx->net);
+ u8 cur, new;
+ unsigned long delay;
- /* TX might be even not ready yet */
- if (unlikely(!tx->key.active && !tx->key.pending))
- return;
+ /* Update RX 'key_master' flag according to peer, also mark "legacy" if
+ * a peer has no master key.
+ */
+ rx->key_master = ehdr->master_key;
+ if (!rx->key_master)
+ tx->legacy_user = 1;
- cur_rx_active = atomic_read(&rx->peer_rx_active);
- if (likely(cur_rx_active == new_rx_active))
+ /* For later cases, apply only if message is destined to this node */
+ if (!ehdr->destined || msg_short(hdr) || msg_destnode(hdr) != self)
return;
- /* Make sure this message destined for this node */
- if (unlikely(msg_short(hdr) ||
- msg_destnode(hdr) != tipc_own_addr(net)))
- return;
+ /* Case 1: Peer has no keys, let's make master key take over */
+ if (ehdr->rx_nokey) {
+ /* Set or extend grace period */
+ tx->timer2 = jiffies;
+ /* Schedule key distributing for the peer if not yet */
+ if (tx->key.keys &&
+ !atomic_cmpxchg(&rx->key_distr, 0, KEY_DISTR_SCHED)) {
+ get_random_bytes(&delay, 2);
+ delay %= 5;
+ delay = msecs_to_jiffies(500 * ++delay);
+ if (queue_delayed_work(tx->wq, &rx->work, delay))
+ tipc_node_get(rx->node);
+ }
+ } else {
+ /* Cancel a pending key distributing if any */
+ atomic_xchg(&rx->key_distr, 0);
+ }
- /* Peer RX active key has changed, try to update owns' & TX users */
- if (atomic_cmpxchg(&rx->peer_rx_active,
- cur_rx_active,
- new_rx_active) == cur_rx_active) {
- if (new_rx_active)
- tipc_aead_users_inc(tx->aead[new_rx_active], INT_MAX);
- if (cur_rx_active)
- tipc_aead_users_dec(tx->aead[cur_rx_active], 0);
+ /* Case 2: Peer RX active key has changed, let's update own TX users */
+ cur = atomic_read(&rx->peer_rx_active);
+ new = ehdr->rx_key_active;
+ if (tx->key.keys &&
+ cur != new &&
+ atomic_cmpxchg(&rx->peer_rx_active, cur, new) == cur) {
+ if (new)
+ tipc_aead_users_inc(tx->aead[new], INT_MAX);
+ if (cur)
+ tipc_aead_users_dec(tx->aead[cur], 0);
atomic64_set(&rx->sndnxt, 0);
/* Mark the point TX key users changed */
tx->timer1 = jiffies;
-#ifdef TIPC_CRYPTO_DEBUG
- pr_info("TX(%s): key users changed %d-- %d++, peer RX(%s)\n",
- tipc_own_id_string(net), cur_rx_active,
- new_rx_active, tipc_node_get_id_str(rx->node));
-#endif
+ pr_debug("%s: key users changed %d-- %d++, peer %s\n",
+ tx->name, cur, new, rx->name);
}
}
@@ -1340,7 +1456,7 @@ static int tipc_crypto_key_revoke(struct net *net, u8 tx_key)
tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
spin_unlock(&tx->lock);
- pr_warn("TX(%s): key is revoked!\n", tipc_own_id_string(net));
+ pr_warn("%s: key is revoked\n", tx->name);
return -EKEYREVOKED;
}
@@ -1357,6 +1473,15 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
if (!c)
return -ENOMEM;
+ /* Allocate workqueue on TX */
+ if (!node) {
+ c->wq = alloc_ordered_workqueue("tipc_crypto", 0);
+ if (!c->wq) {
+ kfree(c);
+ return -ENOMEM;
+ }
+ }
+
/* Allocate statistic structure */
c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
if (!c->stats) {
@@ -1364,53 +1489,52 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
return -ENOMEM;
}
- c->working = 0;
+ c->flags = 0;
c->net = net;
c->node = node;
+ get_random_bytes(&c->key_gen, 2);
tipc_crypto_key_set_state(c, 0, 0, 0);
+ atomic_set(&c->key_distr, 0);
atomic_set(&c->peer_rx_active, 0);
atomic64_set(&c->sndnxt, 0);
c->timer1 = jiffies;
c->timer2 = jiffies;
+ c->rekeying_intv = TIPC_REKEYING_INTV_DEF;
spin_lock_init(&c->lock);
- *crypto = c;
+ scnprintf(c->name, 48, "%s(%s)", (is_rx(c)) ? "RX" : "TX",
+ (is_rx(c)) ? tipc_node_get_id_str(c->node) :
+ tipc_own_id_string(c->net));
+
+ if (is_rx(c))
+ INIT_DELAYED_WORK(&c->work, tipc_crypto_work_rx);
+ else
+ INIT_DELAYED_WORK(&c->work, tipc_crypto_work_tx);
+ *crypto = c;
return 0;
}
void tipc_crypto_stop(struct tipc_crypto **crypto)
{
- struct tipc_crypto *c, *tx, *rx;
- bool is_rx;
+ struct tipc_crypto *c = *crypto;
u8 k;
- if (!*crypto)
+ if (!c)
return;
- rcu_read_lock();
- /* RX stopping? => decrease TX key users if any */
- is_rx = !!((*crypto)->node);
- if (is_rx) {
- rx = *crypto;
- tx = tipc_net(rx->net)->crypto_tx;
- k = atomic_read(&rx->peer_rx_active);
- if (k) {
- tipc_aead_users_dec(tx->aead[k], 0);
- /* Mark the point TX key users changed */
- tx->timer1 = jiffies;
- }
+ /* Flush any queued works & destroy wq */
+ if (is_tx(c)) {
+ c->rekeying_intv = 0;
+ cancel_delayed_work_sync(&c->work);
+ destroy_workqueue(c->wq);
}
/* Release AEAD keys */
- c = *crypto;
+ rcu_read_lock();
for (k = KEY_MIN; k <= KEY_MAX; k++)
tipc_aead_put(rcu_dereference(c->aead[k]));
rcu_read_unlock();
-
- pr_warn("%s(%s) has been purged, node left!\n",
- (is_rx) ? "RX" : "TX",
- (is_rx) ? tipc_node_get_id_str((*crypto)->node) :
- tipc_own_id_string((*crypto)->net));
+ pr_debug("%s: has been stopped\n", c->name);
/* Free this crypto statistics */
free_percpu(c->stats);
@@ -1424,106 +1548,91 @@ void tipc_crypto_timeout(struct tipc_crypto *rx)
struct tipc_net *tn = tipc_net(rx->net);
struct tipc_crypto *tx = tn->crypto_tx;
struct tipc_key key;
- u8 new_pending, new_passive;
int cmd;
- /* TX key activating:
- * The pending key (users > 0) -> active
- * The active key if any (users == 0) -> free
- */
+ /* TX pending: taking all users & stable -> active */
spin_lock(&tx->lock);
key = tx->key;
if (key.active && tipc_aead_users(tx->aead[key.active]) > 0)
goto s1;
if (!key.pending || tipc_aead_users(tx->aead[key.pending]) <= 0)
goto s1;
- if (time_before(jiffies, tx->timer1 + TIPC_TX_LASTING_LIM))
+ if (time_before(jiffies, tx->timer1 + TIPC_TX_LASTING_TIME))
goto s1;
tipc_crypto_key_set_state(tx, key.passive, key.pending, 0);
if (key.active)
tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
this_cpu_inc(tx->stats->stat[STAT_SWITCHES]);
- pr_info("TX(%s): key %d is activated!\n", tipc_own_id_string(tx->net),
- key.pending);
+ pr_info("%s: key[%d] is activated\n", tx->name, key.pending);
s1:
spin_unlock(&tx->lock);
- /* RX key activating:
- * The pending key (users > 0) -> active
- * The active key if any -> passive, freed later
- */
+ /* RX pending: having user -> active */
spin_lock(&rx->lock);
key = rx->key;
if (!key.pending || tipc_aead_users(rx->aead[key.pending]) <= 0)
goto s2;
- new_pending = (key.passive &&
- !tipc_aead_users(rx->aead[key.passive])) ?
- key.passive : 0;
- new_passive = (key.active) ?: ((new_pending) ? 0 : key.passive);
- tipc_crypto_key_set_state(rx, new_passive, key.pending, new_pending);
+ if (key.active)
+ key.passive = key.active;
+ key.active = key.pending;
+ rx->timer2 = jiffies;
+ tipc_crypto_key_set_state(rx, key.passive, key.active, 0);
this_cpu_inc(rx->stats->stat[STAT_SWITCHES]);
- pr_info("RX(%s): key %d is activated!\n",
- tipc_node_get_id_str(rx->node), key.pending);
+ pr_info("%s: key[%d] is activated\n", rx->name, key.pending);
goto s5;
s2:
- /* RX key "faulty" switching:
- * The faulty pending key (users < -30) -> passive
- * The passive key (users = 0) -> pending
- * Note: This only happens after RX deactivated - s3!
- */
- key = rx->key;
- if (!key.pending || tipc_aead_users(rx->aead[key.pending]) > -30)
- goto s3;
- if (!key.passive || tipc_aead_users(rx->aead[key.passive]) != 0)
+ /* RX pending: not working -> remove */
+ if (!key.pending || tipc_aead_users(rx->aead[key.pending]) > -10)
goto s3;
- new_pending = key.passive;
- new_passive = key.pending;
- tipc_crypto_key_set_state(rx, new_passive, key.active, new_pending);
+ tipc_crypto_key_set_state(rx, key.passive, key.active, 0);
+ tipc_crypto_key_detach(rx->aead[key.pending], &rx->lock);
+ pr_debug("%s: key[%d] is removed\n", rx->name, key.pending);
goto s5;
s3:
- /* RX key deactivating:
- * The passive key if any -> pending
- * The active key -> passive (users = 0) / pending
- * The pending key if any -> passive (users = 0)
- */
- key = rx->key;
+ /* RX active: timed out or no user -> pending */
if (!key.active)
goto s4;
- if (time_before(jiffies, rx->timer1 + TIPC_RX_ACTIVE_LIM))
+ if (time_before(jiffies, rx->timer1 + TIPC_RX_ACTIVE_LIM) &&
+ tipc_aead_users(rx->aead[key.active]) > 0)
goto s4;
- new_pending = (key.passive) ?: key.active;
- new_passive = (key.passive) ? key.active : key.pending;
- tipc_aead_users_set(rx->aead[new_pending], 0);
- if (new_passive)
- tipc_aead_users_set(rx->aead[new_passive], 0);
- tipc_crypto_key_set_state(rx, new_passive, 0, new_pending);
- pr_info("RX(%s): key %d is deactivated!\n",
- tipc_node_get_id_str(rx->node), key.active);
+ if (key.pending)
+ key.passive = key.active;
+ else
+ key.pending = key.active;
+ rx->timer2 = jiffies;
+ tipc_crypto_key_set_state(rx, key.passive, 0, key.pending);
+ tipc_aead_users_set(rx->aead[key.pending], 0);
+ pr_debug("%s: key[%d] is deactivated\n", rx->name, key.active);
goto s5;
s4:
- /* RX key passive -> freed: */
- key = rx->key;
- if (!key.passive || !tipc_aead_users(rx->aead[key.passive]))
+ /* RX passive: outdated or not working -> free */
+ if (!key.passive)
goto s5;
- if (time_before(jiffies, rx->timer2 + TIPC_RX_PASSIVE_LIM))
+ if (time_before(jiffies, rx->timer2 + TIPC_RX_PASSIVE_LIM) &&
+ tipc_aead_users(rx->aead[key.passive]) > -10)
goto s5;
tipc_crypto_key_set_state(rx, 0, key.active, key.pending);
tipc_crypto_key_detach(rx->aead[key.passive], &rx->lock);
- pr_info("RX(%s): key %d is freed!\n", tipc_node_get_id_str(rx->node),
- key.passive);
+ pr_debug("%s: key[%d] is freed\n", rx->name, key.passive);
s5:
spin_unlock(&rx->lock);
+ /* Relax it here, the flag will be set again if it really is, but only
+ * when we are not in grace period for safety!
+ */
+ if (time_after(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD))
+ tx->legacy_user = 0;
+
/* Limit max_tfms & do debug commands if needed */
if (likely(sysctl_tipc_max_tfms <= TIPC_MAX_TFMS_LIM))
return;
@@ -1533,6 +1642,22 @@ s5:
tipc_crypto_do_cmd(rx->net, cmd);
}
+static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb,
+ struct tipc_bearer *b,
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode, u8 type)
+{
+ struct sk_buff *skb;
+
+ skb = skb_clone(_skb, GFP_ATOMIC);
+ if (skb) {
+ TIPC_SKB_CB(skb)->xmit_type = type;
+ tipc_crypto_xmit(net, &skb, b, dst, __dnode);
+ if (skb)
+ b->media->send_msg(net, skb, b, dst);
+ }
+}
+
/**
* tipc_crypto_xmit - Build & encrypt TIPC message for xmit
* @net: struct net
@@ -1542,7 +1667,8 @@ s5:
* @__dnode: destination node for reference if any
*
* First, build an encryption message header on the top of the message, then
- * encrypt the original TIPC message by using the active or pending TX key.
+ * encrypt the original TIPC message by using the pending, master or active
+ * key with this preference order.
* If the encryption is successful, the encrypted skb is returned directly or
* via the callback.
* Otherwise, the skb is freed!
@@ -1562,46 +1688,67 @@ int tipc_crypto_xmit(struct net *net, struct sk_buff **skb,
struct tipc_crypto *__rx = tipc_node_crypto_rx(__dnode);
struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
struct tipc_crypto_stats __percpu *stats = tx->stats;
+ struct tipc_msg *hdr = buf_msg(*skb);
struct tipc_key key = tx->key;
struct tipc_aead *aead = NULL;
- struct sk_buff *probe;
+ u32 user = msg_user(hdr);
+ u32 type = msg_type(hdr);
int rc = -ENOKEY;
- u8 tx_key;
+ u8 tx_key = 0;
/* No encryption? */
if (!tx->working)
return 0;
- /* Try with the pending key if available and:
- * 1) This is the only choice (i.e. no active key) or;
- * 2) Peer has switched to this key (unicast only) or;
- * 3) It is time to do a pending key probe;
- */
+ /* Pending key if peer has active on it or probing time */
if (unlikely(key.pending)) {
tx_key = key.pending;
- if (!key.active)
+ if (!tx->key_master && !key.active)
goto encrypt;
if (__rx && atomic_read(&__rx->peer_rx_active) == tx_key)
goto encrypt;
- if (TIPC_SKB_CB(*skb)->probe)
+ if (TIPC_SKB_CB(*skb)->xmit_type == SKB_PROBING) {
+ pr_debug("%s: probing for key[%d]\n", tx->name,
+ key.pending);
+ goto encrypt;
+ }
+ if (user == LINK_CONFIG || user == LINK_PROTOCOL)
+ tipc_crypto_clone_msg(net, *skb, b, dst, __dnode,
+ SKB_PROBING);
+ }
+
+ /* Master key if this is a *vital* message or in grace period */
+ if (tx->key_master) {
+ tx_key = KEY_MASTER;
+ if (!key.active)
+ goto encrypt;
+ if (TIPC_SKB_CB(*skb)->xmit_type == SKB_GRACING) {
+ pr_debug("%s: gracing for msg (%d %d)\n", tx->name,
+ user, type);
goto encrypt;
- if (!__rx &&
- time_after(jiffies, tx->timer2 + TIPC_TX_PROBE_LIM)) {
- tx->timer2 = jiffies;
- probe = skb_clone(*skb, GFP_ATOMIC);
- if (probe) {
- TIPC_SKB_CB(probe)->probe = 1;
- tipc_crypto_xmit(net, &probe, b, dst, __dnode);
- if (probe)
- b->media->send_msg(net, probe, b, dst);
+ }
+ if (user == LINK_CONFIG ||
+ (user == LINK_PROTOCOL && type == RESET_MSG) ||
+ (user == MSG_CRYPTO && type == KEY_DISTR_MSG) ||
+ time_before(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) {
+ if (__rx && __rx->key_master &&
+ !atomic_read(&__rx->peer_rx_active))
+ goto encrypt;
+ if (!__rx) {
+ if (likely(!tx->legacy_user))
+ goto encrypt;
+ tipc_crypto_clone_msg(net, *skb, b, dst,
+ __dnode, SKB_GRACING);
}
}
}
+
/* Else, use the active key if any */
if (likely(key.active)) {
tx_key = key.active;
goto encrypt;
}
+
goto exit;
encrypt:
@@ -1667,30 +1814,21 @@ int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx,
struct tipc_aead *aead = NULL;
struct tipc_key key;
int rc = -ENOKEY;
- u8 tx_key = 0;
+ u8 tx_key, n;
+
+ tx_key = ((struct tipc_ehdr *)(*skb)->data)->tx_key;
/* New peer?
* Let's try with TX key (i.e. cluster mode) & verify the skb first!
*/
- if (unlikely(!rx))
+ if (unlikely(!rx || tx_key == KEY_MASTER))
goto pick_tx;
- /* Pick RX key according to TX key, three cases are possible:
- * 1) The current active key (likely) or;
- * 2) The pending (new or deactivated) key (if any) or;
- * 3) The passive or old active key (i.e. users > 0);
- */
- tx_key = ((struct tipc_ehdr *)(*skb)->data)->tx_key;
+ /* Pick RX key according to TX key if any */
key = rx->key;
- if (likely(tx_key == key.active))
+ if (tx_key == key.active || tx_key == key.pending ||
+ tx_key == key.passive)
goto decrypt;
- if (tx_key == key.pending)
- goto decrypt;
- if (tx_key == key.passive) {
- rx->timer2 = jiffies;
- if (tipc_aead_users(rx->aead[key.passive]) > 0)
- goto decrypt;
- }
/* Unknown key, let's try to align RX key(s) */
if (tipc_crypto_key_try_align(rx, tx_key))
@@ -1698,7 +1836,7 @@ int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx,
pick_tx:
/* No key suitable? Try to pick one from TX... */
- aead = tipc_crypto_key_pick_tx(tx, rx, *skb);
+ aead = tipc_crypto_key_pick_tx(tx, rx, *skb, tx_key);
if (aead)
goto decrypt;
goto exit;
@@ -1726,8 +1864,19 @@ exit:
if (rc == -ENOKEY) {
kfree_skb(*skb);
*skb = NULL;
- if (rx)
+ if (rx) {
+ /* Mark rx->nokey only if we dont have a
+ * pending received session key, nor a newer
+ * one i.e. in the next slot.
+ */
+ n = key_next(tx_key);
+ rx->nokey = !(rx->skey ||
+ rcu_access_pointer(rx->aead[n]));
+ pr_debug_ratelimited("%s: nokey %d, key %d/%x\n",
+ rx->name, rx->nokey,
+ tx_key, rx->key.keys);
tipc_node_put(rx->node);
+ }
this_cpu_inc(stats->stat[STAT_NOKEYS]);
return rc;
} else if (rc == -EBADMSG) {
@@ -1749,21 +1898,17 @@ static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
struct tipc_aead *tmp = NULL;
struct tipc_ehdr *ehdr;
struct tipc_node *n;
- u8 rx_key_active;
- bool destined;
/* Is this completed by TX? */
- if (unlikely(!rx->node)) {
+ if (unlikely(is_tx(aead->crypto))) {
rx = skb_cb->tx_clone_ctx.rx;
-#ifdef TIPC_CRYPTO_DEBUG
- pr_info("TX->RX(%s): err %d, aead %p, skb->next %p, flags %x\n",
- (rx) ? tipc_node_get_id_str(rx->node) : "-", err, aead,
- (*skb)->next, skb_cb->flags);
- pr_info("skb_cb [recurs %d, last %p], tx->aead [%p %p %p]\n",
- skb_cb->tx_clone_ctx.recurs, skb_cb->tx_clone_ctx.last,
- aead->crypto->aead[1], aead->crypto->aead[2],
- aead->crypto->aead[3]);
-#endif
+ pr_debug("TX->RX(%s): err %d, aead %p, skb->next %p, flags %x\n",
+ (rx) ? tipc_node_get_id_str(rx->node) : "-", err, aead,
+ (*skb)->next, skb_cb->flags);
+ pr_debug("skb_cb [recurs %d, last %p], tx->aead [%p %p %p]\n",
+ skb_cb->tx_clone_ctx.recurs, skb_cb->tx_clone_ctx.last,
+ aead->crypto->aead[1], aead->crypto->aead[2],
+ aead->crypto->aead[3]);
if (unlikely(err)) {
if (err == -EBADMSG && (*skb)->next)
tipc_rcv(net, (*skb)->next, b);
@@ -1784,12 +1929,12 @@ static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
goto free_skb;
}
- /* Skip cloning this time as we had a RX pending key */
- if (rx->key.pending)
+ /* Ignore cloning if it was TX master key */
+ if (ehdr->tx_key == KEY_MASTER)
goto rcv;
if (tipc_aead_clone(&tmp, aead) < 0)
goto rcv;
- if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key) < 0) {
+ if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key, false) < 0) {
tipc_aead_free(&tmp->rcu);
goto rcv;
}
@@ -1805,14 +1950,18 @@ static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
/* Set the RX key's user */
tipc_aead_users_set(aead, 1);
-rcv:
/* Mark this point, RX works */
rx->timer1 = jiffies;
+rcv:
/* Remove ehdr & auth. tag prior to tipc_rcv() */
ehdr = (struct tipc_ehdr *)(*skb)->data;
- destined = ehdr->destined;
- rx_key_active = ehdr->rx_key_active;
+
+ /* Mark this point, RX passive still works */
+ if (rx->key.passive && ehdr->tx_key == rx->key.passive)
+ rx->timer2 = jiffies;
+
+ skb_reset_network_header(*skb);
skb_pull(*skb, tipc_ehdr_size(ehdr));
pskb_trim(*skb, (*skb)->len - aead->authsize);
@@ -1822,9 +1971,8 @@ rcv:
goto free_skb;
}
- /* Update peer RX active key & TX users */
- if (destined)
- tipc_crypto_key_synch(rx, rx_key_active, buf_msg(*skb));
+ /* Ok, everything's fine, try to synch own keys according to peers' */
+ tipc_crypto_key_synch(rx, *skb);
/* Mark skb decrypted */
skb_cb->decrypted = 1;
@@ -1883,7 +2031,7 @@ print_stats:
/* Print crypto statistics */
for (i = 0, j = 0; i < MAX_STATS; i++)
j += scnprintf(buf + j, 200 - j, "|%11s ", hstats[i]);
- pr_info("\nCounter %s", buf);
+ pr_info("Counter %s", buf);
memset(buf, '-', 115);
buf[115] = '\0';
@@ -1927,21 +2075,31 @@ static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf)
char *s;
for (k = KEY_MIN; k <= KEY_MAX; k++) {
- if (k == key.passive)
- s = "PAS";
- else if (k == key.active)
- s = "ACT";
- else if (k == key.pending)
- s = "PEN";
- else
- s = "-";
+ if (k == KEY_MASTER) {
+ if (is_rx(c))
+ continue;
+ if (time_before(jiffies,
+ c->timer2 + TIPC_TX_GRACE_PERIOD))
+ s = "ACT";
+ else
+ s = "PAS";
+ } else {
+ if (k == key.passive)
+ s = "PAS";
+ else if (k == key.active)
+ s = "ACT";
+ else if (k == key.pending)
+ s = "PEN";
+ else
+ s = "-";
+ }
i += scnprintf(buf + i, 200 - i, "\tKey%d: %s", k, s);
rcu_read_lock();
aead = rcu_dereference(c->aead[k]);
if (aead)
i += scnprintf(buf + i, 200 - i,
- "{\"%s...\", \"%s\"}/%d:%d",
+ "{\"0x...%s\", \"%s\"}/%d:%d",
aead->hint,
(aead->mode == CLUSTER_KEY) ? "c" : "p",
atomic_read(&aead->users),
@@ -1950,14 +2108,13 @@ static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf)
i += scnprintf(buf + i, 200 - i, "\n");
}
- if (c->node)
+ if (is_rx(c))
i += scnprintf(buf + i, 200 - i, "\tPeer RX active: %d\n",
atomic_read(&c->peer_rx_active));
return buf;
}
-#ifdef TIPC_CRYPTO_DEBUG
static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
char *buf)
{
@@ -1968,7 +2125,7 @@ static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
/* Output format: "[%s %s %s] -> [%s %s %s]", max len = 32 */
again:
i += scnprintf(buf + i, 32 - i, "[");
- for (k = KEY_MIN; k <= KEY_MAX; k++) {
+ for (k = KEY_1; k <= KEY_3; k++) {
if (k == key->passive)
s = "pas";
else if (k == key->active)
@@ -1978,7 +2135,7 @@ again:
else
s = "-";
i += scnprintf(buf + i, 32 - i,
- (k != KEY_MAX) ? "%s " : "%s", s);
+ (k != KEY_3) ? "%s " : "%s", s);
}
if (key != &new) {
i += scnprintf(buf + i, 32 - i, "] -> ");
@@ -1988,4 +2145,320 @@ again:
i += scnprintf(buf + i, 32 - i, "]");
return buf;
}
-#endif
+
+/**
+ * tipc_crypto_msg_rcv - Common 'MSG_CRYPTO' processing point
+ * @net: the struct net
+ * @skb: the receiving message buffer
+ */
+void tipc_crypto_msg_rcv(struct net *net, struct sk_buff *skb)
+{
+ struct tipc_crypto *rx;
+ struct tipc_msg *hdr;
+
+ if (unlikely(skb_linearize(skb)))
+ goto exit;
+
+ hdr = buf_msg(skb);
+ rx = tipc_node_crypto_rx_by_addr(net, msg_prevnode(hdr));
+ if (unlikely(!rx))
+ goto exit;
+
+ switch (msg_type(hdr)) {
+ case KEY_DISTR_MSG:
+ if (tipc_crypto_key_rcv(rx, hdr))
+ goto exit;
+ break;
+ default:
+ break;
+ }
+
+ tipc_node_put(rx->node);
+
+exit:
+ kfree_skb(skb);
+}
+
+/**
+ * tipc_crypto_key_distr - Distribute a TX key
+ * @tx: the TX crypto
+ * @key: the key's index
+ * @dest: the destination tipc node, = NULL if distributing to all nodes
+ *
+ * Return: 0 in case of success, otherwise < 0
+ */
+int tipc_crypto_key_distr(struct tipc_crypto *tx, u8 key,
+ struct tipc_node *dest)
+{
+ struct tipc_aead *aead;
+ u32 dnode = tipc_node_get_addr(dest);
+ int rc = -ENOKEY;
+
+ if (!sysctl_tipc_key_exchange_enabled)
+ return 0;
+
+ if (key) {
+ rcu_read_lock();
+ aead = tipc_aead_get(tx->aead[key]);
+ if (likely(aead)) {
+ rc = tipc_crypto_key_xmit(tx->net, aead->key,
+ aead->gen, aead->mode,
+ dnode);
+ tipc_aead_put(aead);
+ }
+ rcu_read_unlock();
+ }
+
+ return rc;
+}
+
+/**
+ * tipc_crypto_key_xmit - Send a session key
+ * @net: the struct net
+ * @skey: the session key to be sent
+ * @gen: the key's generation
+ * @mode: the key's mode
+ * @dnode: the destination node address, = 0 if broadcasting to all nodes
+ *
+ * The session key 'skey' is packed in a TIPC v2 'MSG_CRYPTO/KEY_DISTR_MSG'
+ * as its data section, then xmit-ed through the uc/bc link.
+ *
+ * Return: 0 in case of success, otherwise < 0
+ */
+static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey,
+ u16 gen, u8 mode, u32 dnode)
+{
+ struct sk_buff_head pkts;
+ struct tipc_msg *hdr;
+ struct sk_buff *skb;
+ u16 size, cong_link_cnt;
+ u8 *data;
+ int rc;
+
+ size = tipc_aead_key_size(skey);
+ skb = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = buf_msg(skb);
+ tipc_msg_init(tipc_own_addr(net), hdr, MSG_CRYPTO, KEY_DISTR_MSG,
+ INT_H_SIZE, dnode);
+ msg_set_size(hdr, INT_H_SIZE + size);
+ msg_set_key_gen(hdr, gen);
+ msg_set_key_mode(hdr, mode);
+
+ data = msg_data(hdr);
+ *((__be32 *)(data + TIPC_AEAD_ALG_NAME)) = htonl(skey->keylen);
+ memcpy(data, skey->alg_name, TIPC_AEAD_ALG_NAME);
+ memcpy(data + TIPC_AEAD_ALG_NAME + sizeof(__be32), skey->key,
+ skey->keylen);
+
+ __skb_queue_head_init(&pkts);
+ __skb_queue_tail(&pkts, skb);
+ if (dnode)
+ rc = tipc_node_xmit(net, &pkts, dnode, 0);
+ else
+ rc = tipc_bcast_xmit(net, &pkts, &cong_link_cnt);
+
+ return rc;
+}
+
+/**
+ * tipc_crypto_key_rcv - Receive a session key
+ * @rx: the RX crypto
+ * @hdr: the TIPC v2 message incl. the receiving session key in its data
+ *
+ * This function retrieves the session key in the message from peer, then
+ * schedules a RX work to attach the key to the corresponding RX crypto.
+ *
+ * Return: "true" if the key has been scheduled for attaching, otherwise
+ * "false".
+ */
+static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr)
+{
+ struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
+ struct tipc_aead_key *skey = NULL;
+ u16 key_gen = msg_key_gen(hdr);
+ u16 size = msg_data_sz(hdr);
+ u8 *data = msg_data(hdr);
+
+ spin_lock(&rx->lock);
+ if (unlikely(rx->skey || (key_gen == rx->key_gen && rx->key.keys))) {
+ pr_err("%s: key existed <%p>, gen %d vs %d\n", rx->name,
+ rx->skey, key_gen, rx->key_gen);
+ goto exit;
+ }
+
+ /* Allocate memory for the key */
+ skey = kmalloc(size, GFP_ATOMIC);
+ if (unlikely(!skey)) {
+ pr_err("%s: unable to allocate memory for skey\n", rx->name);
+ goto exit;
+ }
+
+ /* Copy key from msg data */
+ skey->keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME)));
+ memcpy(skey->alg_name, data, TIPC_AEAD_ALG_NAME);
+ memcpy(skey->key, data + TIPC_AEAD_ALG_NAME + sizeof(__be32),
+ skey->keylen);
+
+ /* Sanity check */
+ if (unlikely(size != tipc_aead_key_size(skey))) {
+ kfree(skey);
+ skey = NULL;
+ goto exit;
+ }
+
+ rx->key_gen = key_gen;
+ rx->skey_mode = msg_key_mode(hdr);
+ rx->skey = skey;
+ rx->nokey = 0;
+ mb(); /* for nokey flag */
+
+exit:
+ spin_unlock(&rx->lock);
+
+ /* Schedule the key attaching on this crypto */
+ if (likely(skey && queue_delayed_work(tx->wq, &rx->work, 0)))
+ return true;
+
+ return false;
+}
+
+/**
+ * tipc_crypto_work_rx - Scheduled RX works handler
+ * @work: the struct RX work
+ *
+ * The function processes the previous scheduled works i.e. distributing TX key
+ * or attaching a received session key on RX crypto.
+ */
+static void tipc_crypto_work_rx(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tipc_crypto *rx = container_of(dwork, struct tipc_crypto, work);
+ struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
+ unsigned long delay = msecs_to_jiffies(5000);
+ bool resched = false;
+ u8 key;
+ int rc;
+
+ /* Case 1: Distribute TX key to peer if scheduled */
+ if (atomic_cmpxchg(&rx->key_distr,
+ KEY_DISTR_SCHED,
+ KEY_DISTR_COMPL) == KEY_DISTR_SCHED) {
+ /* Always pick the newest one for distributing */
+ key = tx->key.pending ?: tx->key.active;
+ rc = tipc_crypto_key_distr(tx, key, rx->node);
+ if (unlikely(rc))
+ pr_warn("%s: unable to distr key[%d] to %s, err %d\n",
+ tx->name, key, tipc_node_get_id_str(rx->node),
+ rc);
+
+ /* Sched for key_distr releasing */
+ resched = true;
+ } else {
+ atomic_cmpxchg(&rx->key_distr, KEY_DISTR_COMPL, 0);
+ }
+
+ /* Case 2: Attach a pending received session key from peer if any */
+ if (rx->skey) {
+ rc = tipc_crypto_key_init(rx, rx->skey, rx->skey_mode, false);
+ if (unlikely(rc < 0))
+ pr_warn("%s: unable to attach received skey, err %d\n",
+ rx->name, rc);
+ switch (rc) {
+ case -EBUSY:
+ case -ENOMEM:
+ /* Resched the key attaching */
+ resched = true;
+ break;
+ default:
+ synchronize_rcu();
+ kfree(rx->skey);
+ rx->skey = NULL;
+ break;
+ }
+ }
+
+ if (resched && queue_delayed_work(tx->wq, &rx->work, delay))
+ return;
+
+ tipc_node_put(rx->node);
+}
+
+/**
+ * tipc_crypto_rekeying_sched - (Re)schedule rekeying w/o new interval
+ * @tx: TX crypto
+ * @changed: if the rekeying needs to be rescheduled with new interval
+ * @new_intv: new rekeying interval (when "changed" = true)
+ */
+void tipc_crypto_rekeying_sched(struct tipc_crypto *tx, bool changed,
+ u32 new_intv)
+{
+ unsigned long delay;
+ bool now = false;
+
+ if (changed) {
+ if (new_intv == TIPC_REKEYING_NOW)
+ now = true;
+ else
+ tx->rekeying_intv = new_intv;
+ cancel_delayed_work_sync(&tx->work);
+ }
+
+ if (tx->rekeying_intv || now) {
+ delay = (now) ? 0 : tx->rekeying_intv * 60 * 1000;
+ queue_delayed_work(tx->wq, &tx->work, msecs_to_jiffies(delay));
+ }
+}
+
+/**
+ * tipc_crypto_work_tx - Scheduled TX works handler
+ * @work: the struct TX work
+ *
+ * The function processes the previous scheduled work, i.e. key rekeying, by
+ * generating a new session key based on current one, then attaching it to the
+ * TX crypto and finally distributing it to peers. It also re-schedules the
+ * rekeying if needed.
+ */
+static void tipc_crypto_work_tx(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tipc_crypto *tx = container_of(dwork, struct tipc_crypto, work);
+ struct tipc_aead_key *skey = NULL;
+ struct tipc_key key = tx->key;
+ struct tipc_aead *aead;
+ int rc = -ENOMEM;
+
+ if (unlikely(key.pending))
+ goto resched;
+
+ /* Take current key as a template */
+ rcu_read_lock();
+ aead = rcu_dereference(tx->aead[key.active ?: KEY_MASTER]);
+ if (unlikely(!aead)) {
+ rcu_read_unlock();
+ /* At least one key should exist for securing */
+ return;
+ }
+
+ /* Lets duplicate it first */
+ skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC);
+ rcu_read_unlock();
+
+ /* Now, generate new key, initiate & distribute it */
+ if (likely(skey)) {
+ rc = tipc_aead_key_generate(skey) ?:
+ tipc_crypto_key_init(tx, skey, PER_NODE_KEY, false);
+ if (likely(rc > 0))
+ rc = tipc_crypto_key_distr(tx, rc, NULL);
+ kzfree(skey);
+ }
+
+ if (unlikely(rc))
+ pr_warn_ratelimited("%s: rekeying returns %d\n", tx->name, rc);
+
+resched:
+ /* Re-schedule rekeying if any */
+ tipc_crypto_rekeying_sched(tx, false, 0);
+}
diff --git a/net/tipc/crypto.h b/net/tipc/crypto.h
index c3de769f49e8..e71193bd5e36 100644
--- a/net/tipc/crypto.h
+++ b/net/tipc/crypto.h
@@ -67,6 +67,7 @@ enum {
};
extern int sysctl_tipc_max_tfms __read_mostly;
+extern int sysctl_tipc_key_exchange_enabled __read_mostly;
/**
* TIPC encryption message format:
@@ -74,7 +75,7 @@ extern int sysctl_tipc_max_tfms __read_mostly;
* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
* 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * w0:|Ver=7| User |D|TX |RX |K| Rsvd |
+ * w0:|Ver=7| User |D|TX |RX |K|M|N| Rsvd |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* w1:| Seqno |
* w2:| (8 octets) |
@@ -101,6 +102,9 @@ extern int sysctl_tipc_max_tfms __read_mostly;
* RX : Currently RX active key corresponding to the destination
* node's TX key (when the "D" bit is set)
* K : Keep-alive bit (for RPS, LINK_PROTOCOL/STATE_MSG only)
+ * M : Bit indicates if sender has master key
+ * N : Bit indicates if sender has no RX keys corresponding to the
+ * receiver's TX (when the "D" bit is set)
* Rsvd : Reserved bit, field
* Word1-2:
* Seqno : The 64-bit sequence number of the encrypted message, also
@@ -117,7 +121,9 @@ struct tipc_ehdr {
__u8 destined:1,
user:4,
version:3;
- __u8 reserved_1:3,
+ __u8 reserved_1:1,
+ rx_nokey:1,
+ master_key:1,
keepalive:1,
rx_key_active:2,
tx_key:2;
@@ -128,7 +134,9 @@ struct tipc_ehdr {
__u8 tx_key:2,
rx_key_active:2,
keepalive:1,
- reserved_1:3;
+ master_key:1,
+ rx_nokey:1,
+ reserved_1:1;
#else
#error "Please fix <asm/byteorder.h>"
#endif
@@ -158,10 +166,35 @@ int tipc_crypto_xmit(struct net *net, struct sk_buff **skb,
int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx,
struct sk_buff **skb, struct tipc_bearer *b);
int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey,
- u8 mode);
+ u8 mode, bool master_key);
void tipc_crypto_key_flush(struct tipc_crypto *c);
-int tipc_aead_key_validate(struct tipc_aead_key *ukey);
+int tipc_crypto_key_distr(struct tipc_crypto *tx, u8 key,
+ struct tipc_node *dest);
+void tipc_crypto_msg_rcv(struct net *net, struct sk_buff *skb);
+void tipc_crypto_rekeying_sched(struct tipc_crypto *tx, bool changed,
+ u32 new_intv);
+int tipc_aead_key_validate(struct tipc_aead_key *ukey, struct genl_info *info);
bool tipc_ehdr_validate(struct sk_buff *skb);
+static inline u32 msg_key_gen(struct tipc_msg *m)
+{
+ return msg_bits(m, 4, 16, 0xffff);
+}
+
+static inline void msg_set_key_gen(struct tipc_msg *m, u32 gen)
+{
+ msg_set_bits(m, 4, 16, 0xffff, gen);
+}
+
+static inline u32 msg_key_mode(struct tipc_msg *m)
+{
+ return msg_bits(m, 4, 0, 0xf);
+}
+
+static inline void msg_set_key_mode(struct tipc_msg *m, u32 mode)
+{
+ msg_set_bits(m, 4, 0, 0xf, mode);
+}
+
#endif /* _TIPC_CRYPTO_H */
#endif
diff --git a/net/tipc/group.c b/net/tipc/group.c
index 588c2d2b0c69..b1fcd2ad5ecf 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -273,8 +273,8 @@ static struct tipc_member *tipc_group_find_node(struct tipc_group *grp,
return NULL;
}
-static void tipc_group_add_to_tree(struct tipc_group *grp,
- struct tipc_member *m)
+static int tipc_group_add_to_tree(struct tipc_group *grp,
+ struct tipc_member *m)
{
u64 nkey, key = (u64)m->node << 32 | m->port;
struct rb_node **n, *parent = NULL;
@@ -291,10 +291,11 @@ static void tipc_group_add_to_tree(struct tipc_group *grp,
else if (key > nkey)
n = &(*n)->rb_right;
else
- return;
+ return -EEXIST;
}
rb_link_node(&m->tree_node, parent, n);
rb_insert_color(&m->tree_node, &grp->members);
+ return 0;
}
static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
@@ -302,6 +303,7 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
u32 instance, int state)
{
struct tipc_member *m;
+ int ret;
m = kzalloc(sizeof(*m), GFP_ATOMIC);
if (!m)
@@ -314,8 +316,12 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
m->port = port;
m->instance = instance;
m->bc_acked = grp->bc_snd_nxt - 1;
+ ret = tipc_group_add_to_tree(grp, m);
+ if (ret < 0) {
+ kfree(m);
+ return NULL;
+ }
grp->member_cnt++;
- tipc_group_add_to_tree(grp, m);
tipc_nlist_add(&grp->dests, m->node);
m->state = state;
return m;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index a2989f22ebb6..06b880da2a8e 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -527,7 +527,8 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
* tipc_link_bc_create - create new link to be used for broadcast
* @net: pointer to associated network namespace
* @mtu: mtu to be used initially if no peers
- * @window: send window to be used
+ * @min_win: minimal send window to be used by link
+ * @max_win: maximal send window to be used by link
* @inputq: queue to put messages ready for delivery
* @namedq: queue to put binding table update messages ready for delivery
* @link: return value, pointer to put the created link
@@ -1250,6 +1251,11 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
case MSG_FRAGMENTER:
case BCAST_PROTOCOL:
return false;
+#ifdef CONFIG_TIPC_CRYPTO
+ case MSG_CRYPTO:
+ tipc_crypto_msg_rcv(l->net, skb);
+ return true;
+#endif
default:
pr_warn("Dropping received illegal msg type\n");
kfree_skb(skb);
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 848fae674532..11a429f4c6cd 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -150,7 +150,8 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
if (fragid == FIRST_FRAGMENT) {
if (unlikely(head))
goto err;
- if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
+ frag = skb_unshare(frag, GFP_ATOMIC);
+ if (unlikely(!frag))
goto err;
head = *headbuf = frag;
*buf = NULL;
@@ -581,7 +582,7 @@ bundle:
* @pos: position in outer message of msg to be extracted.
* Returns position of next msg
* Consumes outer buffer when last packet extracted
- * Returns true when when there is an extracted buffer, otherwise false
+ * Returns true when there is an extracted buffer, otherwise false
*/
bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
{
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 1016e96db5c4..5d64596ba987 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -82,6 +82,7 @@ struct plist;
#define NAME_DISTRIBUTOR 11
#define MSG_FRAGMENTER 12
#define LINK_CONFIG 13
+#define MSG_CRYPTO 14
#define SOCK_WAKEUP 14 /* pseudo user */
#define TOP_SRV 15 /* pseudo user */
@@ -127,7 +128,9 @@ struct tipc_skb_cb {
#ifdef CONFIG_TIPC_CRYPTO
u8 encrypted:1;
u8 decrypted:1;
- u8 probe:1;
+#define SKB_PROBING 1
+#define SKB_GRACING 2
+ u8 xmit_type:2;
u8 tx_clone_deferred:1;
#endif
};
@@ -747,6 +750,9 @@ static inline void msg_set_nameupper(struct tipc_msg *m, u32 n)
#define GRP_RECLAIM_MSG 4
#define GRP_REMIT_MSG 5
+/* Crypto message types */
+#define KEY_DISTR_MSG 0
+
/*
* Word 1
*/
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index c4aee6247d55..c447cb5f879e 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -108,6 +108,8 @@ const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = {
.len = TIPC_NODEID_LEN},
[TIPC_NLA_NODE_KEY] = { .type = NLA_BINARY,
.len = TIPC_AEAD_KEY_SIZE_MAX},
+ [TIPC_NLA_NODE_KEY_MASTER] = { .type = NLA_FLAG },
+ [TIPC_NLA_NODE_REKEYING] = { .type = NLA_U32 },
};
/* Properties valid for media, bearer and link */
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 4edcee3088da..cf4b239fc569 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -278,6 +278,14 @@ struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos)
{
return container_of(pos, struct tipc_node, list)->crypto_rx;
}
+
+struct tipc_crypto *tipc_node_crypto_rx_by_addr(struct net *net, u32 addr)
+{
+ struct tipc_node *n;
+
+ n = tipc_node_find(net, addr);
+ return (n) ? n->crypto_rx : NULL;
+}
#endif
static void tipc_node_free(struct rcu_head *rp)
@@ -303,7 +311,7 @@ void tipc_node_put(struct tipc_node *node)
kref_put(&node->kref, tipc_node_kref_release);
}
-static void tipc_node_get(struct tipc_node *node)
+void tipc_node_get(struct tipc_node *node)
{
kref_get(&node->kref);
}
@@ -584,6 +592,9 @@ static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
static void tipc_node_delete_from_list(struct tipc_node *node)
{
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_key_flush(node->crypto_rx);
+#endif
list_del_rcu(&node->list);
hlist_del_rcu(&node->hash);
tipc_node_put(node);
@@ -2868,15 +2879,27 @@ static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id)
return 0;
}
+static int tipc_nl_retrieve_rekeying(struct nlattr **attrs, u32 *intv)
+{
+ struct nlattr *attr = attrs[TIPC_NLA_NODE_REKEYING];
+
+ if (!attr)
+ return -ENODATA;
+
+ *intv = nla_get_u32(attr);
+ return 0;
+}
+
static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1];
struct net *net = sock_net(skb->sk);
- struct tipc_net *tn = tipc_net(net);
+ struct tipc_crypto *tx = tipc_net(net)->crypto_tx, *c = tx;
struct tipc_node *n = NULL;
struct tipc_aead_key *ukey;
- struct tipc_crypto *c;
- u8 *id, *own_id;
+ bool rekeying = true, master_key = false;
+ u8 *id, *own_id, mode;
+ u32 intv = 0;
int rc = 0;
if (!info->attrs[TIPC_NLA_NODE])
@@ -2886,52 +2909,66 @@ static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
info->attrs[TIPC_NLA_NODE],
tipc_nl_node_policy, info->extack);
if (rc)
- goto exit;
+ return rc;
own_id = tipc_own_id(net);
if (!own_id) {
- rc = -EPERM;
- goto exit;
+ GENL_SET_ERR_MSG(info, "not found own node identity (set id?)");
+ return -EPERM;
}
+ rc = tipc_nl_retrieve_rekeying(attrs, &intv);
+ if (rc == -ENODATA)
+ rekeying = false;
+
rc = tipc_nl_retrieve_key(attrs, &ukey);
- if (rc)
- goto exit;
+ if (rc == -ENODATA && rekeying)
+ goto rekeying;
+ else if (rc)
+ return rc;
- rc = tipc_aead_key_validate(ukey);
+ rc = tipc_aead_key_validate(ukey, info);
if (rc)
- goto exit;
+ return rc;
rc = tipc_nl_retrieve_nodeid(attrs, &id);
switch (rc) {
case -ENODATA:
- /* Cluster key mode */
- rc = tipc_crypto_key_init(tn->crypto_tx, ukey, CLUSTER_KEY);
+ mode = CLUSTER_KEY;
+ master_key = !!(attrs[TIPC_NLA_NODE_KEY_MASTER]);
break;
case 0:
- /* Per-node key mode */
- if (!memcmp(id, own_id, NODE_ID_LEN)) {
- c = tn->crypto_tx;
- } else {
+ mode = PER_NODE_KEY;
+ if (memcmp(id, own_id, NODE_ID_LEN)) {
n = tipc_node_find_by_id(net, id) ?:
tipc_node_create(net, 0, id, 0xffffu, 0, true);
- if (unlikely(!n)) {
- rc = -ENOMEM;
- break;
- }
+ if (unlikely(!n))
+ return -ENOMEM;
c = n->crypto_rx;
}
-
- rc = tipc_crypto_key_init(c, ukey, PER_NODE_KEY);
- if (n)
- tipc_node_put(n);
break;
default:
- break;
+ return rc;
}
-exit:
- return (rc < 0) ? rc : 0;
+ /* Initiate the TX/RX key */
+ rc = tipc_crypto_key_init(c, ukey, mode, master_key);
+ if (n)
+ tipc_node_put(n);
+
+ if (unlikely(rc < 0)) {
+ GENL_SET_ERR_MSG(info, "unable to initiate or attach new key");
+ return rc;
+ } else if (c == tx) {
+ /* Distribute TX key but not master one */
+ if (!master_key && tipc_crypto_key_distr(tx, rc, NULL))
+ GENL_SET_ERR_MSG(info, "failed to replicate new key");
+rekeying:
+ /* Schedule TX rekeying if needed */
+ tipc_crypto_rekeying_sched(tx, rekeying, intv);
+ }
+
+ return 0;
}
int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
@@ -2958,7 +2995,6 @@ static int __tipc_nl_node_flush_key(struct sk_buff *skb,
tipc_crypto_key_flush(n->crypto_rx);
rcu_read_unlock();
- pr_info("All keys are flushed!\n");
return 0;
}
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 9f6f13f1604f..154a5bbb0d29 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -79,12 +79,14 @@ bool tipc_node_get_id(struct net *net, u32 addr, u8 *id);
u32 tipc_node_get_addr(struct tipc_node *node);
char *tipc_node_get_id_str(struct tipc_node *node);
void tipc_node_put(struct tipc_node *node);
+void tipc_node_get(struct tipc_node *node);
struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
u16 capabilities, u32 hash_mixes,
bool preliminary);
#ifdef CONFIG_TIPC_CRYPTO
struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n);
struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos);
+struct tipc_crypto *tipc_node_crypto_rx_by_addr(struct net *net, u32 addr);
#endif
u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr);
void tipc_node_check_dest(struct net *net, u32 onode, u8 *peer_id128,
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index dd93e8ecb2f4..e795a8a2955b 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -54,7 +54,7 @@
#define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
#define TIPC_MAX_PORT 0xffffffff
#define TIPC_MIN_PORT 1
-#define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */
+#define TIPC_ACK_RATE 4 /* ACK at 1/4 of rcv window size */
enum {
TIPC_LISTEN = TCP_LISTEN,
@@ -2770,10 +2770,7 @@ static int tipc_shutdown(struct socket *sock, int how)
trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
__tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
- if (tipc_sk_type_connectionless(sk))
- sk->sk_shutdown = SHUTDOWN_MASK;
- else
- sk->sk_shutdown = SEND_SHUTDOWN;
+ sk->sk_shutdown = SHUTDOWN_MASK;
if (sk->sk_state == TIPC_DISCONNECTING) {
/* Discard any unreceived messages */
diff --git a/net/tipc/sysctl.c b/net/tipc/sysctl.c
index 97a6264a2993..9fb65c988f7f 100644
--- a/net/tipc/sysctl.c
+++ b/net/tipc/sysctl.c
@@ -74,6 +74,15 @@ static struct ctl_table tipc_table[] = {
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ONE,
},
+ {
+ .procname = "key_exchange_enabled",
+ .data = &sysctl_tipc_key_exchange_enabled,
+ .maxlen = sizeof(sysctl_tipc_key_exchange_enabled),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
#endif
{
.procname = "bc_retruni",
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index 1489cfb941d8..5f6f86051c83 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -48,7 +48,6 @@
#define MAX_SEND_MSG_COUNT 25
#define MAX_RECV_MSG_COUNT 25
#define CF_CONNECTED 1
-#define CF_SERVER 2
#define TIPC_SERVER_NAME_LEN 32
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 911d13cd2e67..1d17f4470ee2 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -52,6 +52,7 @@
#include "bearer.h"
#include "netlink.h"
#include "msg.h"
+#include "udp_media.h"
/* IANA assigned UDP port */
#define UDP_PORT_DEFAULT 6118
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 92784e51ee7d..eb82bdc6cf7c 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -878,7 +878,6 @@ static int unix_autobind(struct socket *sock)
if (err)
return err;
- err = 0;
if (u->addr)
goto out;
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index faf74850a1b5..27026f587fa6 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -217,6 +217,7 @@ config LIB80211_CRYPT_WEP
config LIB80211_CRYPT_CCMP
tristate
+ select CRYPTO
select CRYPTO_AES
select CRYPTO_CCM
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 6a6f2f214c10..96e24ee4c7e8 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -141,9 +141,62 @@ static bool cfg80211_edmg_chandef_valid(const struct cfg80211_chan_def *chandef)
return true;
}
+static int nl80211_chan_width_to_mhz(enum nl80211_chan_width chan_width)
+{
+ int mhz;
+
+ switch (chan_width) {
+ case NL80211_CHAN_WIDTH_1:
+ mhz = 1;
+ break;
+ case NL80211_CHAN_WIDTH_2:
+ mhz = 2;
+ break;
+ case NL80211_CHAN_WIDTH_4:
+ mhz = 4;
+ break;
+ case NL80211_CHAN_WIDTH_8:
+ mhz = 8;
+ break;
+ case NL80211_CHAN_WIDTH_16:
+ mhz = 16;
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ mhz = 5;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ mhz = 10;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ mhz = 20;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ mhz = 40;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_80:
+ mhz = 80;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ mhz = 160;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -1;
+ }
+ return mhz;
+}
+
+static int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c)
+{
+ return nl80211_chan_width_to_mhz(c->width);
+}
+
bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
{
- u32 control_freq;
+ u32 control_freq, oper_freq;
+ int oper_width, control_width;
if (!chandef->chan)
return false;
@@ -155,10 +208,6 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
switch (chandef->width) {
case NL80211_CHAN_WIDTH_1:
- case NL80211_CHAN_WIDTH_2:
- case NL80211_CHAN_WIDTH_4:
- case NL80211_CHAN_WIDTH_8:
- case NL80211_CHAN_WIDTH_16:
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
case NL80211_CHAN_WIDTH_20:
@@ -169,6 +218,30 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
if (chandef->center_freq2)
return false;
break;
+ case NL80211_CHAN_WIDTH_2:
+ case NL80211_CHAN_WIDTH_4:
+ case NL80211_CHAN_WIDTH_8:
+ case NL80211_CHAN_WIDTH_16:
+ control_freq = ieee80211_channel_to_khz(chandef->chan);
+ oper_freq = ieee80211_chandef_to_khz(chandef);
+ control_width = nl80211_chan_width_to_mhz(
+ ieee80211_s1g_channel_width(
+ chandef->chan));
+ oper_width = cfg80211_chandef_get_width(chandef);
+
+ if (oper_width < 0 || control_width < 0)
+ return false;
+ if (chandef->center_freq2)
+ return false;
+
+ if (control_freq + MHZ_TO_KHZ(control_width) / 2 >
+ oper_freq + MHZ_TO_KHZ(oper_width) / 2)
+ return false;
+
+ if (control_freq - MHZ_TO_KHZ(control_width) / 2 <
+ oper_freq - MHZ_TO_KHZ(oper_width) / 2)
+ return false;
+ break;
case NL80211_CHAN_WIDTH_40:
if (chandef->center_freq1 != control_freq + 10 &&
chandef->center_freq1 != control_freq - 10)
@@ -264,53 +337,6 @@ static void chandef_primary_freqs(const struct cfg80211_chan_def *c,
}
}
-static int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c)
-{
- int width;
-
- switch (c->width) {
- case NL80211_CHAN_WIDTH_1:
- width = 1;
- break;
- case NL80211_CHAN_WIDTH_2:
- width = 2;
- break;
- case NL80211_CHAN_WIDTH_4:
- width = 4;
- break;
- case NL80211_CHAN_WIDTH_8:
- width = 8;
- break;
- case NL80211_CHAN_WIDTH_16:
- width = 16;
- break;
- case NL80211_CHAN_WIDTH_5:
- width = 5;
- break;
- case NL80211_CHAN_WIDTH_10:
- width = 10;
- break;
- case NL80211_CHAN_WIDTH_20:
- case NL80211_CHAN_WIDTH_20_NOHT:
- width = 20;
- break;
- case NL80211_CHAN_WIDTH_40:
- width = 40;
- break;
- case NL80211_CHAN_WIDTH_80P80:
- case NL80211_CHAN_WIDTH_80:
- width = 80;
- break;
- case NL80211_CHAN_WIDTH_160:
- width = 160;
- break;
- default:
- WARN_ON_ONCE(1);
- return -1;
- }
- return width;
-}
-
const struct cfg80211_chan_def *
cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
const struct cfg80211_chan_def *c2)
diff --git a/net/wireless/lib80211.c b/net/wireless/lib80211.c
index cc7b9fd5c166..d66a913027e0 100644
--- a/net/wireless/lib80211.c
+++ b/net/wireless/lib80211.c
@@ -26,8 +26,6 @@
#include <net/lib80211.h>
-#define DRV_NAME "lib80211"
-
#define DRV_DESCRIPTION "common routines for IEEE802.11 drivers"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 52a35e788547..1a212db7a300 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -209,14 +209,23 @@ static int validate_beacon_head(const struct nlattr *attr,
unsigned int len = nla_len(attr);
const struct element *elem;
const struct ieee80211_mgmt *mgmt = (void *)data;
- unsigned int fixedlen = offsetof(struct ieee80211_mgmt,
- u.beacon.variable);
+ bool s1g_bcn = ieee80211_is_s1g_beacon(mgmt->frame_control);
+ unsigned int fixedlen, hdrlen;
+
+ if (s1g_bcn) {
+ fixedlen = offsetof(struct ieee80211_ext,
+ u.s1g_beacon.variable);
+ hdrlen = offsetof(struct ieee80211_ext, u.s1g_beacon);
+ } else {
+ fixedlen = offsetof(struct ieee80211_mgmt,
+ u.beacon.variable);
+ hdrlen = offsetof(struct ieee80211_mgmt, u.beacon);
+ }
if (len < fixedlen)
goto err;
- if (ieee80211_hdrlen(mgmt->frame_control) !=
- offsetof(struct ieee80211_mgmt, u.beacon))
+ if (ieee80211_hdrlen(mgmt->frame_control) != hdrlen)
goto err;
data += fixedlen;
@@ -367,6 +376,22 @@ nl80211_tid_config_attr_policy[NL80211_TID_CONFIG_ATTR_MAX + 1] = {
NLA_POLICY_NESTED(nl80211_txattr_policy),
};
+static const struct nla_policy
+nl80211_fils_discovery_policy[NL80211_FILS_DISCOVERY_ATTR_MAX + 1] = {
+ [NL80211_FILS_DISCOVERY_ATTR_INT_MIN] = NLA_POLICY_MAX(NLA_U32, 10000),
+ [NL80211_FILS_DISCOVERY_ATTR_INT_MAX] = NLA_POLICY_MAX(NLA_U32, 10000),
+ NLA_POLICY_RANGE(NLA_BINARY,
+ NL80211_FILS_DISCOVERY_TMPL_MIN_LEN,
+ IEEE80211_MAX_DATA_LEN),
+};
+
+static const struct nla_policy
+nl80211_unsol_bcast_probe_resp_policy[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX + 1] = {
+ [NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT] = NLA_POLICY_MAX(NLA_U32, 20),
+ [NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL] = { .type = NLA_BINARY,
+ .len = IEEE80211_MAX_DATA_LEN }
+};
+
static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[0] = { .strict_start_type = NL80211_ATTR_HE_OBSS_PD },
[NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
@@ -675,6 +700,10 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_SCAN_FREQ_KHZ] = { .type = NLA_NESTED },
[NL80211_ATTR_HE_6GHZ_CAPABILITY] =
NLA_POLICY_EXACT_LEN(sizeof(struct ieee80211_he_6ghz_capa)),
+ [NL80211_ATTR_FILS_DISCOVERY] =
+ NLA_POLICY_NESTED(nl80211_fils_discovery_policy),
+ [NL80211_ATTR_UNSOL_BCAST_PROBE_RESP] =
+ NLA_POLICY_NESTED(nl80211_unsol_bcast_probe_resp_policy),
};
/* policy for the key attributes */
@@ -1010,6 +1039,21 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy,
if ((chan->flags & IEEE80211_CHAN_NO_HE) &&
nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HE))
goto nla_put_failure;
+ if ((chan->flags & IEEE80211_CHAN_1MHZ) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_1MHZ))
+ goto nla_put_failure;
+ if ((chan->flags & IEEE80211_CHAN_2MHZ) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_2MHZ))
+ goto nla_put_failure;
+ if ((chan->flags & IEEE80211_CHAN_4MHZ) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_4MHZ))
+ goto nla_put_failure;
+ if ((chan->flags & IEEE80211_CHAN_8MHZ) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_8MHZ))
+ goto nla_put_failure;
+ if ((chan->flags & IEEE80211_CHAN_16MHZ) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_16MHZ))
+ goto nla_put_failure;
}
if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
@@ -4850,6 +4894,65 @@ static int nl80211_parse_he_bss_color(struct nlattr *attrs,
return 0;
}
+static int nl80211_parse_fils_discovery(struct cfg80211_registered_device *rdev,
+ struct nlattr *attrs,
+ struct cfg80211_ap_settings *params)
+{
+ struct nlattr *tb[NL80211_FILS_DISCOVERY_ATTR_MAX + 1];
+ int ret;
+ struct cfg80211_fils_discovery *fd = &params->fils_discovery;
+
+ if (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_FILS_DISCOVERY))
+ return -EINVAL;
+
+ ret = nla_parse_nested(tb, NL80211_FILS_DISCOVERY_ATTR_MAX, attrs,
+ NULL, NULL);
+ if (ret)
+ return ret;
+
+ if (!tb[NL80211_FILS_DISCOVERY_ATTR_INT_MIN] ||
+ !tb[NL80211_FILS_DISCOVERY_ATTR_INT_MAX] ||
+ !tb[NL80211_FILS_DISCOVERY_ATTR_TMPL])
+ return -EINVAL;
+
+ fd->tmpl_len = nla_len(tb[NL80211_FILS_DISCOVERY_ATTR_TMPL]);
+ fd->tmpl = nla_data(tb[NL80211_FILS_DISCOVERY_ATTR_TMPL]);
+ fd->min_interval = nla_get_u32(tb[NL80211_FILS_DISCOVERY_ATTR_INT_MIN]);
+ fd->max_interval = nla_get_u32(tb[NL80211_FILS_DISCOVERY_ATTR_INT_MAX]);
+
+ return 0;
+}
+
+static int
+nl80211_parse_unsol_bcast_probe_resp(struct cfg80211_registered_device *rdev,
+ struct nlattr *attrs,
+ struct cfg80211_ap_settings *params)
+{
+ struct nlattr *tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX + 1];
+ int ret;
+ struct cfg80211_unsol_bcast_probe_resp *presp =
+ &params->unsol_bcast_probe_resp;
+
+ if (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP))
+ return -EINVAL;
+
+ ret = nla_parse_nested(tb, NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX,
+ attrs, NULL, NULL);
+ if (ret)
+ return ret;
+
+ if (!tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT] ||
+ !tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL])
+ return -EINVAL;
+
+ presp->tmpl = nla_data(tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL]);
+ presp->tmpl_len = nla_len(tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL]);
+ presp->interval = nla_get_u32(tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT]);
+ return 0;
+}
+
static void nl80211_check_ap_rate_selectors(struct cfg80211_ap_settings *params,
const u8 *rates)
{
@@ -5158,6 +5261,22 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
goto out;
}
+ if (info->attrs[NL80211_ATTR_FILS_DISCOVERY]) {
+ err = nl80211_parse_fils_discovery(rdev,
+ info->attrs[NL80211_ATTR_FILS_DISCOVERY],
+ &params);
+ if (err)
+ goto out;
+ }
+
+ if (info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP]) {
+ err = nl80211_parse_unsol_bcast_probe_resp(
+ rdev, info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP],
+ &params);
+ if (err)
+ return err;
+ }
+
nl80211_calculate_ap_params(&params);
if (info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT])
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 0ab7808fcec8..6043a9d33d61 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1617,9 +1617,11 @@ __freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 min_bw)
{
const struct ieee80211_regdomain *regd = reg_get_regdomain(wiphy);
const struct ieee80211_reg_rule *reg_rule = NULL;
+ const u32 bws[] = {0, 1, 2, 4, 5, 8, 10, 16, 20};
+ int i = ARRAY_SIZE(bws) - 1;
u32 bw;
- for (bw = MHZ_TO_KHZ(20); bw >= min_bw; bw = bw / 2) {
+ for (bw = MHZ_TO_KHZ(bws[i]); bw >= min_bw; bw = MHZ_TO_KHZ(bws[i--])) {
reg_rule = freq_reg_info_regd(center_freq, regd, bw);
if (!IS_ERR(reg_rule))
return reg_rule;
@@ -1631,7 +1633,9 @@ __freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 min_bw)
const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
u32 center_freq)
{
- return __freq_reg_info(wiphy, center_freq, MHZ_TO_KHZ(20));
+ u32 min_bw = center_freq < MHZ_TO_KHZ(1000) ? 1 : 20;
+
+ return __freq_reg_info(wiphy, center_freq, MHZ_TO_KHZ(min_bw));
}
EXPORT_SYMBOL(freq_reg_info);
@@ -1659,6 +1663,7 @@ static uint32_t reg_rule_to_chan_bw_flags(const struct ieee80211_regdomain *regd
{
const struct ieee80211_freq_range *freq_range = NULL;
u32 max_bandwidth_khz, center_freq_khz, bw_flags = 0;
+ bool is_s1g = chan->band == NL80211_BAND_S1GHZ;
freq_range = &reg_rule->freq_range;
@@ -1678,16 +1683,57 @@ static uint32_t reg_rule_to_chan_bw_flags(const struct ieee80211_regdomain *regd
MHZ_TO_KHZ(20)))
bw_flags |= IEEE80211_CHAN_NO_20MHZ;
- if (max_bandwidth_khz < MHZ_TO_KHZ(10))
- bw_flags |= IEEE80211_CHAN_NO_10MHZ;
- if (max_bandwidth_khz < MHZ_TO_KHZ(20))
- bw_flags |= IEEE80211_CHAN_NO_20MHZ;
- if (max_bandwidth_khz < MHZ_TO_KHZ(40))
- bw_flags |= IEEE80211_CHAN_NO_HT40;
- if (max_bandwidth_khz < MHZ_TO_KHZ(80))
- bw_flags |= IEEE80211_CHAN_NO_80MHZ;
- if (max_bandwidth_khz < MHZ_TO_KHZ(160))
- bw_flags |= IEEE80211_CHAN_NO_160MHZ;
+ if (is_s1g) {
+ /* S1G is strict about non overlapping channels. We can
+ * calculate which bandwidth is allowed per channel by finding
+ * the largest bandwidth which cleanly divides the freq_range.
+ */
+ int edge_offset;
+ int ch_bw = max_bandwidth_khz;
+
+ while (ch_bw) {
+ edge_offset = (center_freq_khz - ch_bw / 2) -
+ freq_range->start_freq_khz;
+ if (edge_offset % ch_bw == 0) {
+ switch (KHZ_TO_MHZ(ch_bw)) {
+ case 1:
+ bw_flags |= IEEE80211_CHAN_1MHZ;
+ break;
+ case 2:
+ bw_flags |= IEEE80211_CHAN_2MHZ;
+ break;
+ case 4:
+ bw_flags |= IEEE80211_CHAN_4MHZ;
+ break;
+ case 8:
+ bw_flags |= IEEE80211_CHAN_8MHZ;
+ break;
+ case 16:
+ bw_flags |= IEEE80211_CHAN_16MHZ;
+ break;
+ default:
+ /* If we got here, no bandwidths fit on
+ * this frequency, ie. band edge.
+ */
+ bw_flags |= IEEE80211_CHAN_DISABLED;
+ break;
+ }
+ break;
+ }
+ ch_bw /= 2;
+ }
+ } else {
+ if (max_bandwidth_khz < MHZ_TO_KHZ(10))
+ bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(20))
+ bw_flags |= IEEE80211_CHAN_NO_20MHZ;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(40))
+ bw_flags |= IEEE80211_CHAN_NO_HT40;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(80))
+ bw_flags |= IEEE80211_CHAN_NO_80MHZ;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(160))
+ bw_flags |= IEEE80211_CHAN_NO_160MHZ;
+ }
return bw_flags;
}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 4a9ff9ef513f..f01746894a4e 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -95,7 +95,7 @@ u32 ieee80211_channel_to_freq_khz(int chan, enum nl80211_band band)
/* see 802.11ax D6.1 27.3.23.2 */
if (chan == 2)
return MHZ_TO_KHZ(5935);
- if (chan <= 253)
+ if (chan <= 233)
return MHZ_TO_KHZ(5950 + chan * 5);
break;
case NL80211_BAND_60GHZ:
@@ -111,6 +111,33 @@ u32 ieee80211_channel_to_freq_khz(int chan, enum nl80211_band band)
}
EXPORT_SYMBOL(ieee80211_channel_to_freq_khz);
+enum nl80211_chan_width
+ieee80211_s1g_channel_width(const struct ieee80211_channel *chan)
+{
+ if (WARN_ON(!chan || chan->band != NL80211_BAND_S1GHZ))
+ return NL80211_CHAN_WIDTH_20_NOHT;
+
+ /*S1G defines a single allowed channel width per channel.
+ * Extract that width here.
+ */
+ if (chan->flags & IEEE80211_CHAN_1MHZ)
+ return NL80211_CHAN_WIDTH_1;
+ else if (chan->flags & IEEE80211_CHAN_2MHZ)
+ return NL80211_CHAN_WIDTH_2;
+ else if (chan->flags & IEEE80211_CHAN_4MHZ)
+ return NL80211_CHAN_WIDTH_4;
+ else if (chan->flags & IEEE80211_CHAN_8MHZ)
+ return NL80211_CHAN_WIDTH_8;
+ else if (chan->flags & IEEE80211_CHAN_16MHZ)
+ return NL80211_CHAN_WIDTH_16;
+
+ pr_err("unknown channel width for channel at %dKHz?\n",
+ ieee80211_channel_to_khz(chan));
+
+ return NL80211_CHAN_WIDTH_1;
+}
+EXPORT_SYMBOL(ieee80211_s1g_channel_width);
+
int ieee80211_freq_khz_to_channel(u32 freq)
{
/* TODO: just handle MHz for now */
@@ -399,6 +426,11 @@ unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc)
{
unsigned int hdrlen = 24;
+ if (ieee80211_is_ext(fc)) {
+ hdrlen = 4;
+ goto out;
+ }
+
if (ieee80211_is_data(fc)) {
if (ieee80211_has_a4(fc))
hdrlen = 30;
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index a7227b447228..56d052bc65cb 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -140,10 +140,10 @@ static int xdp_umem_account_pages(struct xdp_umem *umem)
static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
{
+ u32 npgs_rem, chunk_size = mr->chunk_size, headroom = mr->headroom;
bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
- u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
u64 npgs, addr = mr->addr, size = mr->len;
- unsigned int chunks, chunks_per_page;
+ unsigned int chunks, chunks_rem;
int err;
if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
@@ -172,19 +172,18 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
if ((addr + size) < addr)
return -EINVAL;
- npgs = size >> PAGE_SHIFT;
+ npgs = div_u64_rem(size, PAGE_SIZE, &npgs_rem);
+ if (npgs_rem)
+ npgs++;
if (npgs > U32_MAX)
return -EINVAL;
- chunks = (unsigned int)div_u64(size, chunk_size);
+ chunks = (unsigned int)div_u64_rem(size, chunk_size, &chunks_rem);
if (chunks == 0)
return -EINVAL;
- if (!unaligned_chunks) {
- chunks_per_page = PAGE_SIZE / chunk_size;
- if (chunks < chunks_per_page || chunks % chunks_per_page)
- return -EINVAL;
- }
+ if (!unaligned_chunks && chunks_rem)
+ return -EINVAL;
if (headroom >= chunk_size - XDP_PACKET_HEADROOM)
return -EINVAL;
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 5eb6662f562a..3895697f8540 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -33,12 +33,6 @@
static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
-bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
-{
- return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
- (xs->pool->fq || READ_ONCE(xs->fq_tmp));
-}
-
void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
{
if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
@@ -717,6 +711,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
dev, qid);
if (err) {
xp_destroy(xs->pool);
+ xs->pool = NULL;
sockfd_put(sock);
goto out_unlock;
}
diff --git a/net/xdp/xsk.h b/net/xdp/xsk.h
index da1f73e43924..b9e896cee5bb 100644
--- a/net/xdp/xsk.h
+++ b/net/xdp/xsk.h
@@ -39,7 +39,6 @@ static inline struct xdp_sock *xdp_sk(struct sock *sk)
return (struct xdp_sock *)sk;
}
-bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
struct xdp_sock **map_entry);
int xsk_map_inc(struct xsk_map *map);
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 795d7c81c0ca..e63fadd000db 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -287,7 +287,7 @@ static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_devi
return NULL;
dma_map->dma_pages = kvcalloc(nr_pages, sizeof(*dma_map->dma_pages), GFP_KERNEL);
- if (!dma_map) {
+ if (!dma_map->dma_pages) {
kfree(dma_map);
return NULL;
}
@@ -296,7 +296,7 @@ static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_devi
dma_map->dev = dev;
dma_map->dma_need_sync = false;
dma_map->dma_pages_cnt = nr_pages;
- refcount_set(&dma_map->users, 0);
+ refcount_set(&dma_map->users, 1);
list_add(&dma_map->list, &umem->xsk_dma_list);
return dma_map;
}
@@ -369,7 +369,6 @@ static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_
pool->dev = dma_map->dev;
pool->dma_pages_cnt = dma_map->dma_pages_cnt;
pool->dma_need_sync = dma_map->dma_need_sync;
- refcount_inc(&dma_map->users);
memcpy(pool->dma_pages, dma_map->dma_pages,
pool->dma_pages_cnt * sizeof(*pool->dma_pages));
@@ -390,6 +389,7 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
if (err)
return err;
+ refcount_inc(&dma_map->users);
return 0;
}
diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c
index 5bd8ea9d206a..c014217f5fa7 100644
--- a/net/xdp/xsk_diag.c
+++ b/net/xdp/xsk_diag.c
@@ -59,22 +59,20 @@ static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb)
du.num_pages = umem->npgs;
du.chunk_size = umem->chunk_size;
du.headroom = umem->headroom;
- du.ifindex = pool->netdev ? pool->netdev->ifindex : 0;
- du.queue_id = pool->queue_id;
+ du.ifindex = (pool && pool->netdev) ? pool->netdev->ifindex : 0;
+ du.queue_id = pool ? pool->queue_id : 0;
du.flags = 0;
if (umem->zc)
du.flags |= XDP_DU_F_ZEROCOPY;
du.refs = refcount_read(&umem->users);
err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du);
-
- if (!err && pool->fq)
+ if (!err && pool && pool->fq)
err = xsk_diag_put_ring(pool->fq,
XDP_DIAG_UMEM_FILL_RING, nlskb);
- if (!err && pool->cq) {
- err = xsk_diag_put_ring(pool->cq, XDP_DIAG_UMEM_COMPLETION_RING,
- nlskb);
- }
+ if (!err && pool && pool->cq)
+ err = xsk_diag_put_ring(pool->cq,
+ XDP_DIAG_UMEM_COMPLETION_RING, nlskb);
return err;
}
diff --git a/net/xdp/xskmap.c b/net/xdp/xskmap.c
index 2a4fd6677155..0c5df593bc56 100644
--- a/net/xdp/xskmap.c
+++ b/net/xdp/xskmap.c
@@ -185,11 +185,6 @@ static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
xs = (struct xdp_sock *)sock->sk;
- if (!xsk_is_setup_for_bpf_map(xs)) {
- sockfd_put(sock);
- return -EOPNOTSUPP;
- }
-
map_entry = &m->xsk_map[i];
node = xsk_map_node_alloc(m, map_entry);
if (IS_ERR(node)) {
diff --git a/samples/bpf/.gitignore b/samples/bpf/.gitignore
index 034800c4d1e6..b2f29bc8dc43 100644
--- a/samples/bpf/.gitignore
+++ b/samples/bpf/.gitignore
@@ -50,4 +50,5 @@ xdp_rxq_info
xdp_sample_pkts
xdp_tx_iptunnel
xdpsock
+xsk_fwd
testfile.img
diff --git a/samples/bpf/sockex3_user.c b/samples/bpf/sockex3_user.c
index 4dbee7427d47..7793f6a6ae7e 100644
--- a/samples/bpf/sockex3_user.c
+++ b/samples/bpf/sockex3_user.c
@@ -29,8 +29,8 @@ int main(int argc, char **argv)
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
struct bpf_program *prog;
struct bpf_object *obj;
+ const char *section;
char filename[256];
- const char *title;
FILE *f;
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
@@ -58,8 +58,8 @@ int main(int argc, char **argv)
bpf_object__for_each_program(prog, obj) {
fd = bpf_program__fd(prog);
- title = bpf_program__title(prog, false);
- if (sscanf(title, "socket/%d", &key) != 1) {
+ section = bpf_program__section_name(prog);
+ if (sscanf(section, "socket/%d", &key) != 1) {
fprintf(stderr, "ERROR: finding prog failed\n");
goto cleanup;
}
diff --git a/samples/bpf/spintest_user.c b/samples/bpf/spintest_user.c
index 847da9284fa8..f090d0dc60d6 100644
--- a/samples/bpf/spintest_user.c
+++ b/samples/bpf/spintest_user.c
@@ -17,7 +17,7 @@ int main(int ac, char **argv)
long key, next_key, value;
struct bpf_program *prog;
int map_fd, i, j = 0;
- const char *title;
+ const char *section;
struct ksym *sym;
if (setrlimit(RLIMIT_MEMLOCK, &r)) {
@@ -51,8 +51,8 @@ int main(int ac, char **argv)
}
bpf_object__for_each_program(prog, obj) {
- title = bpf_program__title(prog, false);
- if (sscanf(title, "kprobe/%s", symbol) != 1)
+ section = bpf_program__section_name(prog);
+ if (sscanf(section, "kprobe/%s", symbol) != 1)
continue;
/* Attach prog only when symbol exists */
diff --git a/samples/bpf/test_map_in_map_kern.c b/samples/bpf/test_map_in_map_kern.c
index 8def45c5b697..b0200c8eac09 100644
--- a/samples/bpf/test_map_in_map_kern.c
+++ b/samples/bpf/test_map_in_map_kern.c
@@ -103,10 +103,9 @@ static __always_inline int do_inline_hash_lookup(void *inner_map, u32 port)
return result ? *result : -ENOENT;
}
-SEC("kprobe/" SYSCALL(sys_connect))
+SEC("kprobe/__sys_connect")
int trace_sys_connect(struct pt_regs *ctx)
{
- struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx);
struct sockaddr_in6 *in6;
u16 test_case, port, dst6[8];
int addrlen, ret, inline_ret, ret_key = 0;
@@ -114,8 +113,8 @@ int trace_sys_connect(struct pt_regs *ctx)
void *outer_map, *inner_map;
bool inline_hash = false;
- in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs);
- addrlen = (int)PT_REGS_PARM3_CORE(real_regs);
+ in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(ctx);
+ addrlen = (int)PT_REGS_PARM3_CORE(ctx);
if (addrlen != sizeof(*in6))
return 0;
diff --git a/samples/bpf/tracex5_user.c b/samples/bpf/tracex5_user.c
index 98dad57a96c4..c17d3fb5fd64 100644
--- a/samples/bpf/tracex5_user.c
+++ b/samples/bpf/tracex5_user.c
@@ -39,8 +39,8 @@ int main(int ac, char **argv)
struct bpf_program *prog;
struct bpf_object *obj;
int key, fd, progs_fd;
+ const char *section;
char filename[256];
- const char *title;
FILE *f;
setrlimit(RLIMIT_MEMLOCK, &r);
@@ -78,9 +78,9 @@ int main(int ac, char **argv)
}
bpf_object__for_each_program(prog, obj) {
- title = bpf_program__title(prog, false);
+ section = bpf_program__section_name(prog);
/* register only syscalls to PROG_ARRAY */
- if (sscanf(title, "kprobe/%d", &key) != 1)
+ if (sscanf(section, "kprobe/%d", &key) != 1)
continue;
fd = bpf_program__fd(prog);
diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
index 004c0622c913..3dd366e9474d 100644
--- a/samples/bpf/xdp_redirect_cpu_user.c
+++ b/samples/bpf/xdp_redirect_cpu_user.c
@@ -111,7 +111,7 @@ static void print_avail_progs(struct bpf_object *obj)
bpf_object__for_each_program(pos, obj) {
if (bpf_program__is_xdp(pos))
- printf(" %s\n", bpf_program__title(pos, false));
+ printf(" %s\n", bpf_program__section_name(pos));
}
}
diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
index 4cead341ae57..b220173dbe1e 100644
--- a/samples/bpf/xdpsock_user.c
+++ b/samples/bpf/xdpsock_user.c
@@ -78,6 +78,7 @@ static int opt_pkt_count;
static u16 opt_pkt_size = MIN_PKT_SIZE;
static u32 opt_pkt_fill_pattern = 0x12345678;
static bool opt_extra_stats;
+static bool opt_quiet;
static int opt_poll;
static int opt_interval = 1;
static u32 opt_xdp_bind_flags = XDP_USE_NEED_WAKEUP;
@@ -718,6 +719,7 @@ static struct option long_options[] = {
{"tx-pkt-size", required_argument, 0, 's'},
{"tx-pkt-pattern", required_argument, 0, 'P'},
{"extra-stats", no_argument, 0, 'x'},
+ {"quiet", no_argument, 0, 'Q'},
{0, 0, 0, 0}
};
@@ -753,6 +755,7 @@ static void usage(const char *prog)
" Min size: %d, Max size %d.\n"
" -P, --tx-pkt-pattern=nPacket fill pattern. Default: 0x%x\n"
" -x, --extra-stats Display extra statistics.\n"
+ " -Q, --quiet Do not display any stats.\n"
"\n";
fprintf(stderr, str, prog, XSK_UMEM__DEFAULT_FRAME_SIZE,
opt_batch_size, MIN_PKT_SIZE, MIN_PKT_SIZE,
@@ -768,7 +771,7 @@ static void parse_command_line(int argc, char **argv)
opterr = 0;
for (;;) {
- c = getopt_long(argc, argv, "Frtli:q:pSNn:czf:muMd:b:C:s:P:x",
+ c = getopt_long(argc, argv, "Frtli:q:pSNn:czf:muMd:b:C:s:P:xQ",
long_options, &option_index);
if (c == -1)
break;
@@ -852,6 +855,9 @@ static void parse_command_line(int argc, char **argv)
case 'x':
opt_extra_stats = 1;
break;
+ case 'Q':
+ opt_quiet = 1;
+ break;
default:
usage(basename(argv[0]));
}
@@ -897,6 +903,14 @@ static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk,
if (!xsk->outstanding_tx)
return;
+ /* In copy mode, Tx is driven by a syscall so we need to use e.g. sendto() to
+ * really send the packets. In zero-copy mode we do not have to do this, since Tx
+ * is driven by the NAPI loop. So as an optimization, we do not have to call
+ * sendto() all the time in zero-copy mode for l2fwd.
+ */
+ if (opt_xdp_bind_flags & XDP_COPY)
+ kick_tx(xsk);
+
ndescs = (xsk->outstanding_tx > opt_batch_size) ? opt_batch_size :
xsk->outstanding_tx;
@@ -1117,6 +1131,7 @@ static void l2fwd(struct xsk_socket_info *xsk, struct pollfd *fds)
while (ret != rcvd) {
if (ret < 0)
exit_with_error(-ret);
+ complete_tx_l2fwd(xsk, fds);
if (xsk_ring_prod__needs_wakeup(&xsk->tx))
kick_tx(xsk);
ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
@@ -1277,9 +1292,11 @@ int main(int argc, char **argv)
setlocale(LC_ALL, "");
- ret = pthread_create(&pt, NULL, poller, NULL);
- if (ret)
- exit_with_error(ret);
+ if (!opt_quiet) {
+ ret = pthread_create(&pt, NULL, poller, NULL);
+ if (ret)
+ exit_with_error(ret);
+ }
prev_time = get_nsecs();
start_time = prev_time;
@@ -1293,7 +1310,8 @@ int main(int argc, char **argv)
benchmark_done = true;
- pthread_join(pt, NULL);
+ if (!opt_quiet)
+ pthread_join(pt, NULL);
xdpsock_cleanup();
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 60d4a79674b6..504d2e431c60 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2639,8 +2639,8 @@ sub process {
# Check if the commit log has what seems like a diff which can confuse patch
if ($in_commit_log && !$commit_log_has_diff &&
- (($line =~ m@^\s+diff\b.*a/[\w/]+@ &&
- $line =~ m@^\s+diff\b.*a/([\w/]+)\s+b/$1\b@) ||
+ (($line =~ m@^\s+diff\b.*a/([\w/]+)@ &&
+ $line =~ m@^\s+diff\b.*a/[\w/]+\s+b/$1\b@) ||
$line =~ m@^\s*(?:\-\-\-\s+a/|\+\+\+\s+b/)@ ||
$line =~ m/^\s*\@\@ \-\d+,\d+ \+\d+,\d+ \@\@/)) {
ERROR("DIFF_IN_COMMIT_MSG",
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index d4ca8297364f..8454649b17bd 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -66,23 +66,6 @@ static inline void xfwrite(const void *str, size_t len, size_t count, FILE *out)
fprintf(stderr, "Error in writing or end of file.\n");
}
-/* menu.c */
-void _menu_init(void);
-void menu_warn(struct menu *menu, const char *fmt, ...);
-struct menu *menu_add_menu(void);
-void menu_end_menu(void);
-void menu_add_entry(struct symbol *sym);
-void menu_add_dep(struct expr *dep);
-void menu_add_visibility(struct expr *dep);
-struct property *menu_add_prompt(enum prop_type type, char *prompt, struct expr *dep);
-void menu_add_expr(enum prop_type type, struct expr *expr, struct expr *dep);
-void menu_add_symbol(enum prop_type type, struct symbol *sym, struct expr *dep);
-void menu_add_option_modules(void);
-void menu_add_option_defconfig_list(void);
-void menu_add_option_allnoconfig_y(void);
-void menu_finalize(struct menu *parent);
-void menu_set_type(int type);
-
/* util.c */
struct file *file_lookup(const char *name);
void *xmalloc(size_t size);
@@ -109,6 +92,36 @@ void str_append(struct gstr *gs, const char *s);
void str_printf(struct gstr *gs, const char *fmt, ...);
const char *str_get(struct gstr *gs);
+/* menu.c */
+void _menu_init(void);
+void menu_warn(struct menu *menu, const char *fmt, ...);
+struct menu *menu_add_menu(void);
+void menu_end_menu(void);
+void menu_add_entry(struct symbol *sym);
+void menu_add_dep(struct expr *dep);
+void menu_add_visibility(struct expr *dep);
+struct property *menu_add_prompt(enum prop_type type, char *prompt, struct expr *dep);
+void menu_add_expr(enum prop_type type, struct expr *expr, struct expr *dep);
+void menu_add_symbol(enum prop_type type, struct symbol *sym, struct expr *dep);
+void menu_add_option_modules(void);
+void menu_add_option_defconfig_list(void);
+void menu_add_option_allnoconfig_y(void);
+void menu_finalize(struct menu *parent);
+void menu_set_type(int type);
+
+extern struct menu rootmenu;
+
+bool menu_is_empty(struct menu *menu);
+bool menu_is_visible(struct menu *menu);
+bool menu_has_prompt(struct menu *menu);
+const char *menu_get_prompt(struct menu *menu);
+struct menu *menu_get_root_menu(struct menu *menu);
+struct menu *menu_get_parent_menu(struct menu *menu);
+bool menu_has_help(struct menu *menu);
+const char *menu_get_help(struct menu *menu);
+struct gstr get_relations_str(struct symbol **sym_arr, struct list_head *head);
+void menu_get_ext_help(struct menu *menu, struct gstr *help);
+
/* symbol.c */
void sym_clear_all_valid(void);
struct symbol *sym_choice_default(struct symbol *sym);
diff --git a/scripts/kconfig/lkc_proto.h b/scripts/kconfig/lkc_proto.h
index f9ab98238aef..9e81be33c40f 100644
--- a/scripts/kconfig/lkc_proto.h
+++ b/scripts/kconfig/lkc_proto.h
@@ -12,20 +12,6 @@ bool conf_get_changed(void);
void conf_set_changed_callback(void (*fn)(void));
void conf_set_message_callback(void (*fn)(const char *s));
-/* menu.c */
-extern struct menu rootmenu;
-
-bool menu_is_empty(struct menu *menu);
-bool menu_is_visible(struct menu *menu);
-bool menu_has_prompt(struct menu *menu);
-const char * menu_get_prompt(struct menu *menu);
-struct menu * menu_get_root_menu(struct menu *menu);
-struct menu * menu_get_parent_menu(struct menu *menu);
-bool menu_has_help(struct menu *menu);
-const char * menu_get_help(struct menu *menu);
-struct gstr get_relations_str(struct symbol **sym_arr, struct list_head *head);
-void menu_get_ext_help(struct menu *menu, struct gstr *help);
-
/* symbol.c */
extern struct symbol * symbol_hash[SYMBOL_HASHSIZE];
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index 8638785328a7..8ce624a3b54b 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -1108,6 +1108,11 @@ void ConfigInfoView::menuInfo(void)
if (showDebug())
stream << debug_info(sym);
+ struct gstr help_gstr = str_new();
+
+ menu_get_ext_help(_menu, &help_gstr);
+ stream << print_filter(str_get(&help_gstr));
+ str_free(&help_gstr);
} else if (_menu->prompt) {
stream << "<big><b>";
stream << print_filter(_menu->prompt->text);
@@ -1119,11 +1124,11 @@ void ConfigInfoView::menuInfo(void)
expr_print_help, &stream, E_NONE);
stream << "<br><br>";
}
+
+ stream << "defined at " << _menu->file->name << ":"
+ << _menu->lineno << "<br><br>";
}
}
- if (showDebug())
- stream << "defined at " << _menu->file->name << ":"
- << _menu->lineno << "<br><br>";
setText(info);
}
@@ -1276,7 +1281,7 @@ void ConfigInfoView::clicked(const QUrl &url)
}
free(result);
- delete data;
+ delete[] data;
}
void ConfigInfoView::contextMenuEvent(QContextMenuEvent *event)
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index e6e2d9e5ff48..dbde59d343b1 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -341,9 +341,9 @@ fi
vmlinux_link vmlinux "${kallsymso}" ${btf_vmlinux_bin_o}
# fill in BTF IDs
-if [ -n "${CONFIG_DEBUG_INFO_BTF}" ]; then
-info BTFIDS vmlinux
-${RESOLVE_BTFIDS} vmlinux
+if [ -n "${CONFIG_DEBUG_INFO_BTF}" -a -n "${CONFIG_BPF}" ]; then
+ info BTFIDS vmlinux
+ ${RESOLVE_BTFIDS} vmlinux
fi
if [ -n "${CONFIG_BUILDTIME_TABLE_SORT}" ]; then
diff --git a/scripts/tags.sh b/scripts/tags.sh
index 32d3f53af10b..850f4ccb6afc 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -26,7 +26,11 @@ else
fi
# ignore userspace tools
-ignore="$ignore ( -path ${tree}tools ) -prune -o"
+if [ -n "$COMPILED_SOURCE" ]; then
+ ignore="$ignore ( -path ./tools ) -prune -o"
+else
+ ignore="$ignore ( -path ${tree}tools ) -prune -o"
+fi
# Detect if ALLSOURCE_ARCHS is set. If not, we assume SRCARCH
if [ "${ALLSOURCE_ARCHS}" = "" ]; then
@@ -92,7 +96,7 @@ all_sources()
all_compiled_sources()
{
realpath -es $([ -z "$KBUILD_ABS_SRCTREE" ] && echo --relative-to=.) \
- include/generated/autoconf.h $(find -name "*.cmd" -exec \
+ include/generated/autoconf.h $(find $ignore -name "*.cmd" -exec \
grep -Poh '(?(?=^source_.* \K).*|(?=^ \K\S).*(?= \\))' {} \+ |
awk '!a[$0]++') | sort -u
}
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 43ab0ad45c1b..04375df52fc9 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -354,7 +354,8 @@ static bool match_exception_partial(struct list_head *exceptions, short type,
{
struct dev_exception_item *ex;
- list_for_each_entry_rcu(ex, exceptions, list) {
+ list_for_each_entry_rcu(ex, exceptions, list,
+ lockdep_is_held(&devcgroup_mutex)) {
if ((type & DEVCG_DEV_BLOCK) && !(ex->type & DEVCG_DEV_BLOCK))
continue;
if ((type & DEVCG_DEV_CHAR) && !(ex->type & DEVCG_DEV_CHAR))
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index c521a1f17096..85e207173f5d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5993,6 +5993,40 @@ static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
}
+
+static void alc294_gx502_toggle_output(struct hda_codec *codec,
+ struct hda_jack_callback *cb)
+{
+ /* The Windows driver sets the codec up in a very different way where
+ * it appears to leave 0x10 = 0x8a20 set. For Linux we need to toggle it
+ */
+ if (snd_hda_jack_detect_state(codec, 0x21) == HDA_JACK_PRESENT)
+ alc_write_coef_idx(codec, 0x10, 0x8a20);
+ else
+ alc_write_coef_idx(codec, 0x10, 0x0a20);
+}
+
+static void alc294_fixup_gx502_hp(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ /* Pin 0x21: headphones/headset mic */
+ if (!is_jack_detectable(codec, 0x21))
+ return;
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ snd_hda_jack_detect_enable_callback(codec, 0x21,
+ alc294_gx502_toggle_output);
+ break;
+ case HDA_FIXUP_ACT_INIT:
+ /* Make sure to start in a correct state, i.e. if
+ * headphones have been plugged in before powering up the system
+ */
+ alc294_gx502_toggle_output(codec, NULL);
+ break;
+ }
+}
+
static void alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -6173,6 +6207,9 @@ enum {
ALC285_FIXUP_THINKPAD_HEADSET_JACK,
ALC294_FIXUP_ASUS_HPE,
ALC294_FIXUP_ASUS_COEF_1B,
+ ALC294_FIXUP_ASUS_GX502_HP,
+ ALC294_FIXUP_ASUS_GX502_PINS,
+ ALC294_FIXUP_ASUS_GX502_VERBS,
ALC285_FIXUP_HP_GPIO_LED,
ALC285_FIXUP_HP_MUTE_LED,
ALC236_FIXUP_HP_MUTE_LED,
@@ -6191,6 +6228,7 @@ enum {
ALC269_FIXUP_LEMOTE_A1802,
ALC269_FIXUP_LEMOTE_A190X,
ALC256_FIXUP_INTEL_NUC8_RUGGED,
+ ALC255_FIXUP_XIAOMI_HEADSET_MIC,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -7338,6 +7376,33 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
},
+ [ALC294_FIXUP_ASUS_GX502_PINS] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x03a11050 }, /* front HP mic */
+ { 0x1a, 0x01a11830 }, /* rear external mic */
+ { 0x21, 0x03211020 }, /* front HP out */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC294_FIXUP_ASUS_GX502_VERBS
+ },
+ [ALC294_FIXUP_ASUS_GX502_VERBS] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ /* set 0x15 to HP-OUT ctrl */
+ { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0 },
+ /* unmute the 0x15 amp */
+ { 0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC294_FIXUP_ASUS_GX502_HP
+ },
+ [ALC294_FIXUP_ASUS_GX502_HP] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc294_fixup_gx502_hp,
+ },
[ALC294_FIXUP_ASUS_COEF_1B] = {
.type = HDA_FIXUP_VERBS,
.v.verbs = (const struct hda_verb[]) {
@@ -7527,6 +7592,16 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269_FIXUP_HEADSET_MODE
},
+ [ALC255_FIXUP_XIAOMI_HEADSET_MIC] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC289_FIXUP_ASUS_GA401
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7711,6 +7786,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
@@ -7823,6 +7899,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
+ SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
@@ -8000,6 +8077,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"},
{.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
{.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"},
+ {.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"},
{}
};
#define ALC225_STANDARD_PINS \
diff --git a/sound/soc/codecs/max98373-sdw.c b/sound/soc/codecs/max98373-sdw.c
index 5fe724728e84..e4675cfff7b2 100644
--- a/sound/soc/codecs/max98373-sdw.c
+++ b/sound/soc/codecs/max98373-sdw.c
@@ -838,8 +838,8 @@ static int max98373_sdw_probe(struct sdw_slave *slave,
/* Regmap Initialization */
regmap = devm_regmap_init_sdw(slave, &max98373_sdw_regmap);
- if (!regmap)
- return -EINVAL;
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
return max98373_init(slave, regmap);
}
diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c
index 5e445fee4ef5..821e7395f90f 100644
--- a/sound/soc/codecs/pcm3168a.c
+++ b/sound/soc/codecs/pcm3168a.c
@@ -306,6 +306,13 @@ static int pcm3168a_set_dai_sysclk(struct snd_soc_dai *dai,
struct pcm3168a_priv *pcm3168a = snd_soc_component_get_drvdata(dai->component);
int ret;
+ /*
+ * Some sound card sets 0 Hz as reset,
+ * but it is impossible to set. Ignore it here
+ */
+ if (freq == 0)
+ return 0;
+
if (freq > PCM3168A_MAX_SYSCLK)
return -EINVAL;
diff --git a/sound/soc/codecs/rt1308-sdw.c b/sound/soc/codecs/rt1308-sdw.c
index b0ba0d2acbdd..56e952a904a3 100644
--- a/sound/soc/codecs/rt1308-sdw.c
+++ b/sound/soc/codecs/rt1308-sdw.c
@@ -684,8 +684,8 @@ static int rt1308_sdw_probe(struct sdw_slave *slave,
/* Regmap Initialization */
regmap = devm_regmap_init_sdw(slave, &rt1308_sdw_regmap);
- if (!regmap)
- return -EINVAL;
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
rt1308_sdw_init(&slave->dev, regmap, slave);
diff --git a/sound/soc/codecs/rt700-sdw.c b/sound/soc/codecs/rt700-sdw.c
index 4d14048d1197..1d24bf040718 100644
--- a/sound/soc/codecs/rt700-sdw.c
+++ b/sound/soc/codecs/rt700-sdw.c
@@ -452,8 +452,8 @@ static int rt700_sdw_probe(struct sdw_slave *slave,
/* Regmap Initialization */
sdw_regmap = devm_regmap_init_sdw(slave, &rt700_sdw_regmap);
- if (!sdw_regmap)
- return -EINVAL;
+ if (IS_ERR(sdw_regmap))
+ return PTR_ERR(sdw_regmap);
regmap = devm_regmap_init(&slave->dev, NULL,
&slave->dev, &rt700_regmap);
diff --git a/sound/soc/codecs/rt711-sdw.c b/sound/soc/codecs/rt711-sdw.c
index 45b928954b58..7efff130a638 100644
--- a/sound/soc/codecs/rt711-sdw.c
+++ b/sound/soc/codecs/rt711-sdw.c
@@ -452,8 +452,8 @@ static int rt711_sdw_probe(struct sdw_slave *slave,
/* Regmap Initialization */
sdw_regmap = devm_regmap_init_sdw(slave, &rt711_sdw_regmap);
- if (!sdw_regmap)
- return -EINVAL;
+ if (IS_ERR(sdw_regmap))
+ return PTR_ERR(sdw_regmap);
regmap = devm_regmap_init(&slave->dev, NULL,
&slave->dev, &rt711_regmap);
diff --git a/sound/soc/codecs/rt715-sdw.c b/sound/soc/codecs/rt715-sdw.c
index d11b23d6b240..68a36739f1b0 100644
--- a/sound/soc/codecs/rt715-sdw.c
+++ b/sound/soc/codecs/rt715-sdw.c
@@ -527,8 +527,8 @@ static int rt715_sdw_probe(struct sdw_slave *slave,
/* Regmap Initialization */
sdw_regmap = devm_regmap_init_sdw(slave, &rt715_sdw_regmap);
- if (!sdw_regmap)
- return -EINVAL;
+ if (IS_ERR(sdw_regmap))
+ return PTR_ERR(sdw_regmap);
regmap = devm_regmap_init(&slave->dev, NULL, &slave->dev,
&rt715_regmap);
diff --git a/sound/soc/codecs/tlv320adcx140.c b/sound/soc/codecs/tlv320adcx140.c
index 5cd50d841177..8efe20605f9b 100644
--- a/sound/soc/codecs/tlv320adcx140.c
+++ b/sound/soc/codecs/tlv320adcx140.c
@@ -842,6 +842,18 @@ static int adcx140_codec_probe(struct snd_soc_component *component)
if (ret)
goto out;
+ if (adcx140->supply_areg == NULL)
+ sleep_cfg_val |= ADCX140_AREG_INTERNAL;
+
+ ret = regmap_write(adcx140->regmap, ADCX140_SLEEP_CFG, sleep_cfg_val);
+ if (ret) {
+ dev_err(adcx140->dev, "setting sleep config failed %d\n", ret);
+ goto out;
+ }
+
+ /* 8.4.3: Wait >= 1ms after entering active mode. */
+ usleep_range(1000, 100000);
+
pdm_count = device_property_count_u32(adcx140->dev,
"ti,pdm-edge-select");
if (pdm_count <= ADCX140_NUM_PDM_EDGES && pdm_count > 0) {
@@ -889,18 +901,6 @@ static int adcx140_codec_probe(struct snd_soc_component *component)
if (ret)
goto out;
- if (adcx140->supply_areg == NULL)
- sleep_cfg_val |= ADCX140_AREG_INTERNAL;
-
- ret = regmap_write(adcx140->regmap, ADCX140_SLEEP_CFG, sleep_cfg_val);
- if (ret) {
- dev_err(adcx140->dev, "setting sleep config failed %d\n", ret);
- goto out;
- }
-
- /* 8.4.3: Wait >= 1ms after entering active mode. */
- usleep_range(1000, 100000);
-
ret = regmap_update_bits(adcx140->regmap, ADCX140_BIAS_CFG,
ADCX140_MIC_BIAS_VAL_MSK |
ADCX140_MIC_BIAS_VREF_MSK, bias_cfg);
@@ -980,6 +980,8 @@ static int adcx140_i2c_probe(struct i2c_client *i2c,
if (!adcx140)
return -ENOMEM;
+ adcx140->dev = &i2c->dev;
+
adcx140->gpio_reset = devm_gpiod_get_optional(adcx140->dev,
"reset", GPIOD_OUT_LOW);
if (IS_ERR(adcx140->gpio_reset))
@@ -1007,7 +1009,7 @@ static int adcx140_i2c_probe(struct i2c_client *i2c,
ret);
return ret;
}
- adcx140->dev = &i2c->dev;
+
i2c_set_clientdata(i2c, adcx140);
return devm_snd_soc_register_component(&i2c->dev,
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 038be667c1a6..fc9ea198ac79 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -3514,6 +3514,8 @@ int wm8994_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
return -EINVAL;
}
+ pm_runtime_get_sync(component->dev);
+
switch (micbias) {
case 1:
micdet = &wm8994->micdet[0];
@@ -3561,6 +3563,8 @@ int wm8994_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
snd_soc_dapm_sync(dapm);
+ pm_runtime_put(component->dev);
+
return 0;
}
EXPORT_SYMBOL_GPL(wm8994_mic_detect);
@@ -3932,6 +3936,8 @@ int wm8958_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
return -EINVAL;
}
+ pm_runtime_get_sync(component->dev);
+
if (jack) {
snd_soc_dapm_force_enable_pin(dapm, "CLK_SYS");
snd_soc_dapm_sync(dapm);
@@ -4000,6 +4006,8 @@ int wm8958_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
snd_soc_dapm_sync(dapm);
}
+ pm_runtime_put(component->dev);
+
return 0;
}
EXPORT_SYMBOL_GPL(wm8958_mic_detect);
@@ -4193,11 +4201,13 @@ static int wm8994_component_probe(struct snd_soc_component *component)
wm8994->hubs.dcs_readback_mode = 2;
break;
}
+ wm8994->hubs.micd_scthr = true;
break;
case WM8958:
wm8994->hubs.dcs_readback_mode = 1;
wm8994->hubs.hp_startup_mode = 1;
+ wm8994->hubs.micd_scthr = true;
switch (control->revision) {
case 0:
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 891effe220fe..0c881846f485 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -1223,6 +1223,9 @@ int wm_hubs_handle_analogue_pdata(struct snd_soc_component *component,
snd_soc_component_update_bits(component, WM8993_ADDITIONAL_CONTROL,
WM8993_LINEOUT2_FB, WM8993_LINEOUT2_FB);
+ if (!hubs->micd_scthr)
+ return 0;
+
snd_soc_component_update_bits(component, WM8993_MICBIAS,
WM8993_JD_SCTHR_MASK | WM8993_JD_THR_MASK |
WM8993_MICB1_LVL | WM8993_MICB2_LVL,
diff --git a/sound/soc/codecs/wm_hubs.h b/sound/soc/codecs/wm_hubs.h
index 4b8e5f0d6e32..988b29e63060 100644
--- a/sound/soc/codecs/wm_hubs.h
+++ b/sound/soc/codecs/wm_hubs.h
@@ -27,6 +27,7 @@ struct wm_hubs_data {
int hp_startup_mode;
int series_startup;
int no_series_update;
+ bool micd_scthr;
bool no_cache_dac_hp_direct;
struct list_head dcs_cache;
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index b1cac7abdc0a..fba2c795ce0d 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -333,6 +333,17 @@ static int sst_media_open(struct snd_pcm_substream *substream,
if (ret_val < 0)
goto out_power_up;
+ /*
+ * Make sure the period to be multiple of 1ms to align the
+ * design of firmware. Apply same rule to buffer size to make
+ * sure alsa could always find a value for period size
+ * regardless the buffer size given by user space.
+ */
+ snd_pcm_hw_constraint_step(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 48);
+ snd_pcm_hw_constraint_step(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 48);
+
/* Make sure, that the period size is always even */
snd_pcm_hw_constraint_step(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_PERIODS, 2);
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 479992f4e97a..fc202747ba83 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -591,6 +591,16 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_SSP0_AIF1 |
BYT_RT5640_MCLK_EN),
},
+ { /* MPMAN Converter 9, similar hw as the I.T.Works TW891 2-in-1 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MPMAN"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Converter9"),
+ },
+ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
+ BYT_RT5640_MONO_SPEAKER |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
{
/* MPMAN MPWIN895CL */
.matches = {
diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
index ca4900036ead..bc50eda297ab 100644
--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
+++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
@@ -181,7 +181,7 @@ static void skl_set_hda_codec_autosuspend_delay(struct snd_soc_card *card)
struct snd_soc_dai *dai;
for_each_card_rtds(card, rtd) {
- if (!strstr(rtd->dai_link->codecs->name, "ehdaudio"))
+ if (!strstr(rtd->dai_link->codecs->name, "ehdaudio0D0"))
continue;
dai = asoc_rtd_to_codec(rtd, 0);
hda_pvt = snd_soc_component_get_drvdata(dai->component);
diff --git a/sound/soc/intel/boards/sof_maxim_common.c b/sound/soc/intel/boards/sof_maxim_common.c
index 1a6961592029..b6e63ea13d64 100644
--- a/sound/soc/intel/boards/sof_maxim_common.c
+++ b/sound/soc/intel/boards/sof_maxim_common.c
@@ -66,6 +66,10 @@ int max98373_trigger(struct snd_pcm_substream *substream, int cmd)
int j;
int ret = 0;
+ /* set spk pin by playback only */
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ return 0;
+
for_each_rtd_codec_dais(rtd, j, codec_dai) {
struct snd_soc_component *component = codec_dai->component;
struct snd_soc_dapm_context *dapm =
@@ -86,9 +90,6 @@ int max98373_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- /* Make sure no streams are active before disable pin */
- if (snd_soc_dai_active(codec_dai) != 1)
- break;
ret = snd_soc_dapm_disable_pin(dapm, pin_name);
if (!ret)
snd_soc_dapm_sync(dapm);
diff --git a/sound/soc/intel/haswell/sst-haswell-dsp.c b/sound/soc/intel/haswell/sst-haswell-dsp.c
index de80e19454c1..88c3f63bded9 100644
--- a/sound/soc/intel/haswell/sst-haswell-dsp.c
+++ b/sound/soc/intel/haswell/sst-haswell-dsp.c
@@ -243,92 +243,45 @@ static irqreturn_t hsw_irq(int irq, void *context)
return ret;
}
-#define CSR_DEFAULT_VALUE 0x8480040E
-#define ISC_DEFAULT_VALUE 0x0
-#define ISD_DEFAULT_VALUE 0x0
-#define IMC_DEFAULT_VALUE 0x7FFF0003
-#define IMD_DEFAULT_VALUE 0x7FFF0003
-#define IPCC_DEFAULT_VALUE 0x0
-#define IPCD_DEFAULT_VALUE 0x0
-#define CLKCTL_DEFAULT_VALUE 0x7FF
-#define CSR2_DEFAULT_VALUE 0x0
-#define LTR_CTRL_DEFAULT_VALUE 0x0
-#define HMD_CTRL_DEFAULT_VALUE 0x0
-
-static void hsw_set_shim_defaults(struct sst_dsp *sst)
-{
- sst_dsp_shim_write_unlocked(sst, SST_CSR, CSR_DEFAULT_VALUE);
- sst_dsp_shim_write_unlocked(sst, SST_ISRX, ISC_DEFAULT_VALUE);
- sst_dsp_shim_write_unlocked(sst, SST_ISRD, ISD_DEFAULT_VALUE);
- sst_dsp_shim_write_unlocked(sst, SST_IMRX, IMC_DEFAULT_VALUE);
- sst_dsp_shim_write_unlocked(sst, SST_IMRD, IMD_DEFAULT_VALUE);
- sst_dsp_shim_write_unlocked(sst, SST_IPCX, IPCC_DEFAULT_VALUE);
- sst_dsp_shim_write_unlocked(sst, SST_IPCD, IPCD_DEFAULT_VALUE);
- sst_dsp_shim_write_unlocked(sst, SST_CLKCTL, CLKCTL_DEFAULT_VALUE);
- sst_dsp_shim_write_unlocked(sst, SST_CSR2, CSR2_DEFAULT_VALUE);
- sst_dsp_shim_write_unlocked(sst, SST_LTRC, LTR_CTRL_DEFAULT_VALUE);
- sst_dsp_shim_write_unlocked(sst, SST_HMDC, HMD_CTRL_DEFAULT_VALUE);
-}
-
-/* all clock-gating minus DCLCGE and DTCGE */
-#define SST_VDRTCL2_CG_OTHER 0xB7D
-
static void hsw_set_dsp_D3(struct sst_dsp *sst)
{
+ u32 val;
u32 reg;
- /* disable clock core gating */
+ /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */
reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
- reg &= ~(SST_VDRTCL2_DCLCGE);
+ reg &= ~(SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE);
writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
- /* stall, reset and set 24MHz XOSC */
- sst_dsp_shim_update_bits_unlocked(sst, SST_CSR,
- SST_CSR_24MHZ_LPCS | SST_CSR_STALL | SST_CSR_RST,
- SST_CSR_24MHZ_LPCS | SST_CSR_STALL | SST_CSR_RST);
-
- /* DRAM power gating all */
- reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
- reg |= SST_VDRTCL0_ISRAMPGE_MASK |
- SST_VDRTCL0_DSRAMPGE_MASK;
- reg &= ~(SST_VDRTCL0_D3SRAMPGD);
- reg |= SST_VDRTCL0_D3PGD;
- writel(reg, sst->addr.pci_cfg + SST_VDRTCTL0);
- udelay(50);
+ /* enable power gating and switch off DRAM & IRAM blocks */
+ val = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
+ val |= SST_VDRTCL0_DSRAMPGE_MASK |
+ SST_VDRTCL0_ISRAMPGE_MASK;
+ val &= ~(SST_VDRTCL0_D3PGD | SST_VDRTCL0_D3SRAMPGD);
+ writel(val, sst->addr.pci_cfg + SST_VDRTCTL0);
- /* PLL shutdown enable */
- reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
- reg |= SST_VDRTCL2_APLLSE_MASK;
- writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
+ /* switch off audio PLL */
+ val = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
+ val |= SST_VDRTCL2_APLLSE_MASK;
+ writel(val, sst->addr.pci_cfg + SST_VDRTCTL2);
- /* disable MCLK */
+ /* disable MCLK(clkctl.smos = 0) */
sst_dsp_shim_update_bits_unlocked(sst, SST_CLKCTL,
- SST_CLKCTL_MASK, 0);
-
- /* switch clock gating */
- reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
- reg |= SST_VDRTCL2_CG_OTHER;
- reg &= ~(SST_VDRTCL2_DTCGE);
- writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
- /* enable DTCGE separatelly */
- reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
- reg |= SST_VDRTCL2_DTCGE;
- writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
+ SST_CLKCTL_MASK, 0);
- /* set shim defaults */
- hsw_set_shim_defaults(sst);
-
- /* set D3 */
- reg = readl(sst->addr.pci_cfg + SST_PMCS);
- reg |= SST_PMCS_PS_MASK;
- writel(reg, sst->addr.pci_cfg + SST_PMCS);
+ /* Set D3 state, delay 50 us */
+ val = readl(sst->addr.pci_cfg + SST_PMCS);
+ val |= SST_PMCS_PS_MASK;
+ writel(val, sst->addr.pci_cfg + SST_PMCS);
udelay(50);
- /* enable clock core gating */
+ /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */
reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
- reg |= SST_VDRTCL2_DCLCGE;
+ reg |= SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE;
writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
+
udelay(50);
+
}
static void hsw_reset(struct sst_dsp *sst)
@@ -346,62 +299,75 @@ static void hsw_reset(struct sst_dsp *sst)
SST_CSR_RST | SST_CSR_STALL, SST_CSR_STALL);
}
-/* recommended CSR state for power-up */
-#define SST_CSR_D0_MASK (0x18A09C0C | SST_CSR_DCS_MASK)
-
static int hsw_set_dsp_D0(struct sst_dsp *sst)
{
- u32 reg;
+ int tries = 10;
+ u32 reg, fw_dump_bit;
- /* disable clock core gating */
+ /* Disable core clock gating (VDRTCTL2.DCLCGE = 0) */
reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
- reg &= ~(SST_VDRTCL2_DCLCGE);
+ reg &= ~(SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE);
writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
- /* switch clock gating */
- reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
- reg |= SST_VDRTCL2_CG_OTHER;
- reg &= ~(SST_VDRTCL2_DTCGE);
- writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
+ /* Disable D3PG (VDRTCTL0.D3PGD = 1) */
+ reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
+ reg |= SST_VDRTCL0_D3PGD;
+ writel(reg, sst->addr.pci_cfg + SST_VDRTCTL0);
- /* set D0 */
+ /* Set D0 state */
reg = readl(sst->addr.pci_cfg + SST_PMCS);
- reg &= ~(SST_PMCS_PS_MASK);
+ reg &= ~SST_PMCS_PS_MASK;
writel(reg, sst->addr.pci_cfg + SST_PMCS);
- /* DRAM power gating none */
- reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
- reg &= ~(SST_VDRTCL0_ISRAMPGE_MASK |
- SST_VDRTCL0_DSRAMPGE_MASK);
- reg |= SST_VDRTCL0_D3SRAMPGD;
- reg |= SST_VDRTCL0_D3PGD;
- writel(reg, sst->addr.pci_cfg + SST_VDRTCTL0);
- mdelay(10);
+ /* check that ADSP shim is enabled */
+ while (tries--) {
+ reg = readl(sst->addr.pci_cfg + SST_PMCS) & SST_PMCS_PS_MASK;
+ if (reg == 0)
+ goto finish;
+
+ msleep(1);
+ }
+
+ return -ENODEV;
- /* set shim defaults */
- hsw_set_shim_defaults(sst);
+finish:
+ /* select SSP1 19.2MHz base clock, SSP clock 0, turn off Low Power Clock */
+ sst_dsp_shim_update_bits_unlocked(sst, SST_CSR,
+ SST_CSR_S1IOCS | SST_CSR_SBCS1 | SST_CSR_LPCS, 0x0);
+
+ /* stall DSP core, set clk to 192/96Mhz */
+ sst_dsp_shim_update_bits_unlocked(sst,
+ SST_CSR, SST_CSR_STALL | SST_CSR_DCS_MASK,
+ SST_CSR_STALL | SST_CSR_DCS(4));
- /* restore MCLK */
+ /* Set 24MHz MCLK, prevent local clock gating, enable SSP0 clock */
sst_dsp_shim_update_bits_unlocked(sst, SST_CLKCTL,
- SST_CLKCTL_MASK, SST_CLKCTL_MASK);
+ SST_CLKCTL_MASK | SST_CLKCTL_DCPLCG | SST_CLKCTL_SCOE0,
+ SST_CLKCTL_MASK | SST_CLKCTL_DCPLCG | SST_CLKCTL_SCOE0);
- /* PLL shutdown disable */
+ /* Stall and reset core, set CSR */
+ hsw_reset(sst);
+
+ /* Enable core clock gating (VDRTCTL2.DCLCGE = 1), delay 50 us */
reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
- reg &= ~(SST_VDRTCL2_APLLSE_MASK);
+ reg |= SST_VDRTCL2_DCLCGE | SST_VDRTCL2_DTCGE;
writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
- sst_dsp_shim_update_bits_unlocked(sst, SST_CSR,
- SST_CSR_D0_MASK, SST_CSR_SBCS0 | SST_CSR_SBCS1 |
- SST_CSR_STALL | SST_CSR_DCS(4));
udelay(50);
- /* enable clock core gating */
+ /* switch on audio PLL */
reg = readl(sst->addr.pci_cfg + SST_VDRTCTL2);
- reg |= SST_VDRTCL2_DCLCGE;
+ reg &= ~SST_VDRTCL2_APLLSE_MASK;
writel(reg, sst->addr.pci_cfg + SST_VDRTCTL2);
- /* clear reset */
- sst_dsp_shim_update_bits_unlocked(sst, SST_CSR, SST_CSR_RST, 0);
+ /* set default power gating control, enable power gating control for all blocks. that is,
+ can't be accessed, please enable each block before accessing. */
+ reg = readl(sst->addr.pci_cfg + SST_VDRTCTL0);
+ reg |= SST_VDRTCL0_DSRAMPGE_MASK | SST_VDRTCL0_ISRAMPGE_MASK;
+ /* for D0, always enable the block(DSRAM[0]) used for FW dump */
+ fw_dump_bit = 1 << SST_VDRTCL0_DSRAMPGE_SHIFT;
+ writel(reg & ~fw_dump_bit, sst->addr.pci_cfg + SST_VDRTCTL0);
+
/* disable DMA finish function for SSP0 & SSP1 */
sst_dsp_shim_update_bits_unlocked(sst, SST_CSR2, SST_CSR2_SDFD_SSP1,
@@ -418,6 +384,12 @@ static int hsw_set_dsp_D0(struct sst_dsp *sst)
sst_dsp_shim_update_bits(sst, SST_IMRD, (SST_IMRD_DONE | SST_IMRD_BUSY |
SST_IMRD_SSP0 | SST_IMRD_DMAC), 0x0);
+ /* clear IPC registers */
+ sst_dsp_shim_write(sst, SST_IPCX, 0x0);
+ sst_dsp_shim_write(sst, SST_IPCD, 0x0);
+ sst_dsp_shim_write(sst, 0x80, 0x6);
+ sst_dsp_shim_write(sst, 0xe0, 0x300a);
+
return 0;
}
@@ -443,6 +415,11 @@ static void hsw_sleep(struct sst_dsp *sst)
{
dev_dbg(sst->dev, "HSW_PM dsp runtime suspend\n");
+ /* put DSP into reset and stall */
+ sst_dsp_shim_update_bits(sst, SST_CSR,
+ SST_CSR_24MHZ_LPCS | SST_CSR_RST | SST_CSR_STALL,
+ SST_CSR_RST | SST_CSR_STALL | SST_CSR_24MHZ_LPCS);
+
hsw_set_dsp_D3(sst);
dev_dbg(sst->dev, "HSW_PM dsp runtime suspend exit\n");
}
diff --git a/sound/soc/meson/axg-toddr.c b/sound/soc/meson/axg-toddr.c
index e711abcf8c12..d6adf7edea41 100644
--- a/sound/soc/meson/axg-toddr.c
+++ b/sound/soc/meson/axg-toddr.c
@@ -18,6 +18,7 @@
#define CTRL0_TODDR_SEL_RESAMPLE BIT(30)
#define CTRL0_TODDR_EXT_SIGNED BIT(29)
#define CTRL0_TODDR_PP_MODE BIT(28)
+#define CTRL0_TODDR_SYNC_CH BIT(27)
#define CTRL0_TODDR_TYPE_MASK GENMASK(15, 13)
#define CTRL0_TODDR_TYPE(x) ((x) << 13)
#define CTRL0_TODDR_MSB_POS_MASK GENMASK(12, 8)
@@ -189,10 +190,31 @@ static const struct axg_fifo_match_data axg_toddr_match_data = {
.dai_drv = &axg_toddr_dai_drv
};
+static int g12a_toddr_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct axg_fifo *fifo = snd_soc_dai_get_drvdata(dai);
+ int ret;
+
+ ret = axg_toddr_dai_startup(substream, dai);
+ if (ret)
+ return ret;
+
+ /*
+ * Make sure the first channel ends up in the at beginning of the output
+ * As weird as it looks, without this the first channel may be misplaced
+ * in memory, with a random shift of 2 channels.
+ */
+ regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_TODDR_SYNC_CH,
+ CTRL0_TODDR_SYNC_CH);
+
+ return 0;
+}
+
static const struct snd_soc_dai_ops g12a_toddr_ops = {
.prepare = g12a_toddr_dai_prepare,
.hw_params = axg_toddr_dai_hw_params,
- .startup = axg_toddr_dai_startup,
+ .startup = g12a_toddr_dai_startup,
.shutdown = axg_toddr_dai_shutdown,
};
diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c
index 083413abc2f6..575e2aefefe3 100644
--- a/sound/soc/qcom/apq8016_sbc.c
+++ b/sound/soc/qcom/apq8016_sbc.c
@@ -143,6 +143,7 @@ static int apq8016_sbc_platform_probe(struct platform_device *pdev)
card = &data->card;
card->dev = dev;
+ card->owner = THIS_MODULE;
card->dapm_widgets = apq8016_sbc_dapm_widgets;
card->num_dapm_widgets = ARRAY_SIZE(apq8016_sbc_dapm_widgets);
diff --git a/sound/soc/qcom/apq8096.c b/sound/soc/qcom/apq8096.c
index 253549600c5a..1a69baefc5ce 100644
--- a/sound/soc/qcom/apq8096.c
+++ b/sound/soc/qcom/apq8096.c
@@ -114,6 +114,7 @@ static int apq8096_platform_probe(struct platform_device *pdev)
return -ENOMEM;
card->dev = dev;
+ card->owner = THIS_MODULE;
dev_set_drvdata(dev, card);
ret = qcom_snd_parse_of(card);
if (ret)
diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
index 5194d90ddb96..fd69cf8b1f23 100644
--- a/sound/soc/qcom/common.c
+++ b/sound/soc/qcom/common.c
@@ -52,8 +52,10 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
for_each_child_of_node(dev->of_node, np) {
dlc = devm_kzalloc(dev, 2 * sizeof(*dlc), GFP_KERNEL);
- if (!dlc)
- return -ENOMEM;
+ if (!dlc) {
+ ret = -ENOMEM;
+ goto err;
+ }
link->cpus = &dlc[0];
link->platforms = &dlc[1];
diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c
index 0d10fba53945..ab1bf23c21a6 100644
--- a/sound/soc/qcom/sdm845.c
+++ b/sound/soc/qcom/sdm845.c
@@ -555,6 +555,7 @@ static int sdm845_snd_platform_probe(struct platform_device *pdev)
card->dapm_widgets = sdm845_snd_widgets;
card->num_dapm_widgets = ARRAY_SIZE(sdm845_snd_widgets);
card->dev = dev;
+ card->owner = THIS_MODULE;
dev_set_drvdata(dev, card);
ret = qcom_snd_parse_of(card);
if (ret)
diff --git a/sound/soc/qcom/storm.c b/sound/soc/qcom/storm.c
index c0c388d4db82..80c9cf2f254a 100644
--- a/sound/soc/qcom/storm.c
+++ b/sound/soc/qcom/storm.c
@@ -96,6 +96,7 @@ static int storm_platform_probe(struct platform_device *pdev)
return -ENOMEM;
card->dev = &pdev->dev;
+ card->owner = THIS_MODULE;
ret = snd_soc_of_parse_card_name(card, "qcom,model");
if (ret) {
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 663e3839f251..054437660678 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -834,6 +834,19 @@ struct snd_soc_dai *snd_soc_find_dai(
}
EXPORT_SYMBOL_GPL(snd_soc_find_dai);
+struct snd_soc_dai *snd_soc_find_dai_with_mutex(
+ const struct snd_soc_dai_link_component *dlc)
+{
+ struct snd_soc_dai *dai;
+
+ mutex_lock(&client_mutex);
+ dai = snd_soc_find_dai(dlc);
+ mutex_unlock(&client_mutex);
+
+ return dai;
+}
+EXPORT_SYMBOL_GPL(snd_soc_find_dai_with_mutex);
+
static int soc_dai_link_sanity_check(struct snd_soc_card *card,
struct snd_soc_dai_link *link)
{
diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c
index 91a2551e4cef..0dbd312aad08 100644
--- a/sound/soc/soc-dai.c
+++ b/sound/soc/soc-dai.c
@@ -412,14 +412,14 @@ void snd_soc_dai_link_set_capabilities(struct snd_soc_dai_link *dai_link)
supported_codec = false;
for_each_link_cpus(dai_link, i, cpu) {
- dai = snd_soc_find_dai(cpu);
+ dai = snd_soc_find_dai_with_mutex(cpu);
if (dai && snd_soc_dai_stream_valid(dai, direction)) {
supported_cpu = true;
break;
}
}
for_each_link_codecs(dai_link, i, codec) {
- dai = snd_soc_find_dai(codec);
+ dai = snd_soc_find_dai_with_mutex(codec);
if (dai && snd_soc_dai_stream_valid(dai, direction)) {
supported_codec = true;
break;
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 00ac1cbf6f88..4c9d4cd8cf0b 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -812,7 +812,7 @@ dynamic:
return 0;
config_err:
- for_each_rtd_dais(rtd, i, dai)
+ for_each_rtd_dais_rollback(rtd, i, dai)
snd_soc_dai_shutdown(dai, substream);
snd_soc_link_shutdown(substream);
diff --git a/sound/soc/ti/ams-delta.c b/sound/soc/ti/ams-delta.c
index 5c47de96c529..57feb473a579 100644
--- a/sound/soc/ti/ams-delta.c
+++ b/sound/soc/ti/ams-delta.c
@@ -446,12 +446,12 @@ static const struct snd_soc_dai_ops ams_delta_dai_ops = {
/* Will be used if the codec ever has its own digital_mute function */
static int ams_delta_startup(struct snd_pcm_substream *substream)
{
- return ams_delta_digital_mute(NULL, 0, substream->stream);
+ return ams_delta_mute(NULL, 0, substream->stream);
}
static void ams_delta_shutdown(struct snd_pcm_substream *substream)
{
- ams_delta_digital_mute(NULL, 1, substream->stream);
+ ams_delta_mute(NULL, 1, substream->stream);
}
diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile
index 0a6d09a3e91f..39bb322707b4 100644
--- a/tools/bpf/Makefile
+++ b/tools/bpf/Makefile
@@ -38,7 +38,7 @@ FEATURE_TESTS = libbfd disassembler-four-args
FEATURE_DISPLAY = libbfd disassembler-four-args
check_feat := 1
-NON_CHECK_FEAT_TARGETS := clean bpftool_clean runqslower_clean
+NON_CHECK_FEAT_TARGETS := clean bpftool_clean runqslower_clean resolve_btfids_clean
ifdef MAKECMDGOALS
ifeq ($(filter-out $(NON_CHECK_FEAT_TARGETS),$(MAKECMDGOALS)),)
check_feat := 0
@@ -89,7 +89,7 @@ $(OUTPUT)bpf_exp.lex.c: $(OUTPUT)bpf_exp.yacc.c
$(OUTPUT)bpf_exp.yacc.o: $(OUTPUT)bpf_exp.yacc.c
$(OUTPUT)bpf_exp.lex.o: $(OUTPUT)bpf_exp.lex.c
-clean: bpftool_clean runqslower_clean
+clean: bpftool_clean runqslower_clean resolve_btfids_clean
$(call QUIET_CLEAN, bpf-progs)
$(Q)$(RM) -r -- $(OUTPUT)*.o $(OUTPUT)bpf_jit_disasm $(OUTPUT)bpf_dbg \
$(OUTPUT)bpf_asm $(OUTPUT)bpf_exp.yacc.* $(OUTPUT)bpf_exp.lex.*
diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile
index 815ac9804aee..f33cb02de95c 100644
--- a/tools/bpf/bpftool/Documentation/Makefile
+++ b/tools/bpf/bpftool/Documentation/Makefile
@@ -19,7 +19,7 @@ man8dir = $(mandir)/man8
# Load targets for building eBPF helpers man page.
include ../../Makefile.helpers
-MAN8_RST = $(filter-out $(HELPERS_RST),$(wildcard *.rst))
+MAN8_RST = $(wildcard bpftool*.rst)
_DOC_MAN8 = $(patsubst %.rst,%.8,$(MAN8_RST))
DOC_MAN8 = $(addprefix $(OUTPUT),$(_DOC_MAN8))
@@ -28,12 +28,23 @@ man: man8 helpers
man8: $(DOC_MAN8)
RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null)
+RST2MAN_OPTS += --verbose
+
+list_pages = $(sort $(basename $(filter-out $(1),$(MAN8_RST))))
+see_also = $(subst " ",, \
+ "\n" \
+ "SEE ALSO\n" \
+ "========\n" \
+ "\t**bpf**\ (2),\n" \
+ "\t**bpf-helpers**\\ (7)" \
+ $(foreach page,$(call list_pages,$(1)),",\n\t**$(page)**\\ (8)") \
+ "\n")
$(OUTPUT)%.8: %.rst
ifndef RST2MAN_DEP
$(error "rst2man not found, but required to generate man pages")
endif
- $(QUIET_GEN)rst2man $< > $@
+ $(QUIET_GEN)( cat $< ; printf "%b" $(call see_also,$<) ) | rst2man $(RST2MAN_OPTS) > $@
clean: helpers-clean
$(call QUIET_CLEAN, Documentation)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-btf.rst b/tools/bpf/bpftool/Documentation/bpftool-btf.rst
index 896f4c6c2870..ff4d327a582e 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-btf.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-btf.rst
@@ -71,26 +71,12 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
-
- -d, --debug
- Print all logs available from libbpf, including debug-level
- information.
+ .. include:: common_options.rst
EXAMPLES
========
**# bpftool btf dump id 1226**
+
::
[1] PTR '(anon)' type_id=2
@@ -104,6 +90,7 @@ EXAMPLES
This gives an example of default output for all supported BTF kinds.
**$ cat prog.c**
+
::
struct fwd_struct;
@@ -144,6 +131,7 @@ This gives an example of default output for all supported BTF kinds.
}
**$ bpftool btf dump file prog.o**
+
::
[1] PTR '(anon)' type_id=2
@@ -229,20 +217,3 @@ All the standard ways to specify map or program are supported:
**# bpftool btf dump prog tag b88e0a09b1d9759d**
**# bpftool btf dump prog pinned /sys/fs/bpf/prog_name**
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-gen**\ (8),
- **bpftool-iter**\ (8),
- **bpftool-link**\ (8),
- **bpftool-map**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-struct_ops**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
index a226aee3574f..790944c35602 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -116,26 +116,11 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
+ .. include:: common_options.rst
-f, --bpffs
Show file names of pinned programs.
- -d, --debug
- Print all logs available from libbpf, including debug-level
- information.
-
EXAMPLES
========
|
@@ -158,19 +143,3 @@ EXAMPLES
::
ID AttachType AttachFlags Name
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-gen**\ (8),
- **bpftool-iter**\ (8),
- **bpftool-link**\ (8),
- **bpftool-map**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-struct_ops**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-feature.rst b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
index 8609f06e71de..dd3771bdbc57 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-feature.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
@@ -71,35 +71,4 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
-
- -d, --debug
- Print all logs available from libbpf, including debug-level
- information.
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-gen**\ (8),
- **bpftool-iter**\ (8),
- **bpftool-link**\ (8),
- **bpftool-map**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-struct_ops**\ (8)
+ .. include:: common_options.rst
diff --git a/tools/bpf/bpftool/Documentation/bpftool-gen.rst b/tools/bpf/bpftool/Documentation/bpftool-gen.rst
index df85dbd962c0..84cf0639696f 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-gen.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-gen.rst
@@ -126,26 +126,12 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON,
- this option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
-
- -d, --debug
- Print all logs available from libbpf, including debug-level
- information.
+ .. include:: common_options.rst
EXAMPLES
========
**$ cat example.c**
+
::
#include <stdbool.h>
@@ -187,6 +173,7 @@ This is example BPF application with two BPF programs and a mix of BPF maps
and global variables.
**$ bpftool gen skeleton example.o**
+
::
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
@@ -241,6 +228,7 @@ and global variables.
#endif /* __EXAMPLE_SKEL_H__ */
**$ cat example_user.c**
+
::
#include "example.skel.h"
@@ -283,6 +271,7 @@ and global variables.
}
**# ./example_user**
+
::
my_map name: my_map
@@ -290,19 +279,3 @@ and global variables.
my_static_var: 7
This is a stripped-out version of skeleton generated for above example code.
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-iter**\ (8),
- **bpftool-link**\ (8),
- **bpftool-map**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-struct_ops**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-iter.rst b/tools/bpf/bpftool/Documentation/bpftool-iter.rst
index 070ffacb42b5..51f49bead619 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-iter.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-iter.rst
@@ -51,16 +51,7 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -d, --debug
- Print all logs available, even debug-level information. This
- includes logs from libbpf as well as from the verifier, when
- attempting to load programs.
+ .. include:: common_options.rst
EXAMPLES
========
@@ -77,19 +68,3 @@ EXAMPLES
Create a file-based bpf iterator from bpf_iter_hashmap.o and map with
id 20, and pin it to /sys/fs/bpf/my_hashmap
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-gen**\ (8),
- **bpftool-link**\ (8),
- **bpftool-map**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-struct_ops**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-link.rst b/tools/bpf/bpftool/Documentation/bpftool-link.rst
index 4a52e7a93339..5f7db2a837cc 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-link.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-link.rst
@@ -21,7 +21,7 @@ LINK COMMANDS
| **bpftool** **link { show | list }** [*LINK*]
| **bpftool** **link pin** *LINK* *FILE*
-| **bpftool** **link detach *LINK*
+| **bpftool** **link detach** *LINK*
| **bpftool** **link help**
|
| *LINK* := { **id** *LINK_ID* | **pinned** *FILE* }
@@ -62,18 +62,7 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
+ .. include:: common_options.rst
-f, --bpffs
When showing BPF links, show file names of pinned
@@ -83,10 +72,6 @@ OPTIONS
Do not automatically attempt to mount any virtual file system
(such as tracefs or BPF virtual file system) when necessary.
- -d, --debug
- Print all logs available, even debug-level information. This
- includes logs from libbpf.
-
EXAMPLES
========
**# bpftool link show**
@@ -121,20 +106,3 @@ EXAMPLES
::
-rw------- 1 root root 0 Apr 23 21:39 link
-
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-gen**\ (8),
- **bpftool-iter**\ (8),
- **bpftool-map**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-struct_ops**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
index 083db6c2fc67..dade10cdf295 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-map.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -23,7 +23,8 @@ MAP COMMANDS
| **bpftool** **map** { **show** | **list** } [*MAP*]
| **bpftool** **map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* \
-| **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**dev** *NAME*]
+| **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**inner_map** *MAP*] \
+| [**dev** *NAME*]
| **bpftool** **map dump** *MAP*
| **bpftool** **map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*]
| **bpftool** **map lookup** *MAP* [**key** *DATA*]
@@ -67,7 +68,7 @@ DESCRIPTION
maps. On such kernels bpftool will automatically emit this
information as well.
- **bpftool map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**dev** *NAME*]
+ **bpftool map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**inner_map** *MAP*] [**dev** *NAME*]
Create a new map with given parameters and pin it to *bpffs*
as *FILE*.
@@ -75,6 +76,11 @@ DESCRIPTION
desired flags, e.g. 1024 for **BPF_F_MMAPABLE** (see bpf.h
UAPI header for existing flags).
+ To create maps of type array-of-maps or hash-of-maps, the
+ **inner_map** keyword must be used to pass an inner map. The
+ kernel needs it to collect metadata related to the inner maps
+ that the new map will work with.
+
Keyword **dev** expects a network interface name, and is used
to request hardware offload for the map.
@@ -155,18 +161,7 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
+ .. include:: common_options.rst
-f, --bpffs
Show file names of pinned maps.
@@ -175,13 +170,10 @@ OPTIONS
Do not automatically attempt to mount any virtual file system
(such as tracefs or BPF virtual file system) when necessary.
- -d, --debug
- Print all logs available from libbpf, including debug-level
- information.
-
EXAMPLES
========
**# bpftool map show**
+
::
10: hash name some_map flags 0x0
@@ -203,6 +195,7 @@ The following three commands are equivalent:
**# bpftool map dump id 10**
+
::
key: 00 01 02 03 value: 00 01 02 03 04 05 06 07
@@ -210,6 +203,7 @@ The following three commands are equivalent:
Found 2 elements
**# bpftool map getnext id 10 key 0 1 2 3**
+
::
key:
@@ -276,19 +270,3 @@ would be lost as soon as bpftool exits).
key: 00 00 00 00 value: 22 02 00 00
Found 1 element
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-gen**\ (8),
- **bpftool-iter**\ (8),
- **bpftool-link**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-struct_ops**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst
index aa7450736179..d8165d530937 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-net.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst
@@ -75,22 +75,7 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
-
- -d, --debug
- Print all logs available from libbpf, including debug-level
- information.
+ .. include:: common_options.rst
EXAMPLES
========
@@ -187,20 +172,3 @@ EXAMPLES
::
xdp:
-
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-gen**\ (8),
- **bpftool-iter**\ (8),
- **bpftool-link**\ (8),
- **bpftool-map**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-struct_ops**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-perf.rst b/tools/bpf/bpftool/Documentation/bpftool-perf.rst
index 9c592b7c6775..e958ce91de72 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-perf.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-perf.rst
@@ -40,22 +40,7 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
-
- -d, --debug
- Print all logs available from libbpf, including debug-level
- information.
+ .. include:: common_options.rst
EXAMPLES
========
@@ -78,20 +63,3 @@ EXAMPLES
{"pid":21765,"fd":5,"prog_id":7,"fd_type":"kretprobe","func":"__x64_sys_nanosleep","offset":0}, \
{"pid":21767,"fd":5,"prog_id":8,"fd_type":"tracepoint","tracepoint":"sys_enter_nanosleep"}, \
{"pid":21800,"fd":5,"prog_id":9,"fd_type":"uprobe","filename":"/home/yhs/a.out","offset":1159}]
-
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-gen**\ (8),
- **bpftool-iter**\ (8),
- **bpftool-link**\ (8),
- **bpftool-map**\ (8),
- **bpftool-net**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-struct_ops**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 82e356b664e8..358c7309d419 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -210,18 +210,7 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
+ .. include:: common_options.rst
-f, --bpffs
When showing BPF programs, show file names of pinned
@@ -234,11 +223,6 @@ OPTIONS
Do not automatically attempt to mount any virtual file system
(such as tracefs or BPF virtual file system) when necessary.
- -d, --debug
- Print all logs available, even debug-level information. This
- includes logs from libbpf as well as from the verifier, when
- attempting to load programs.
-
EXAMPLES
========
**# bpftool prog show**
@@ -342,19 +326,3 @@ EXAMPLES
40176203 cycles (83.05%)
42518139 instructions # 1.06 insns per cycle (83.39%)
123 llc_misses # 2.89 LLC misses per million insns (83.15%)
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-gen**\ (8),
- **bpftool-iter**\ (8),
- **bpftool-link**\ (8),
- **bpftool-map**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-struct_ops**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst b/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst
index d93cd1cb8b0f..506e70ee78e9 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst
@@ -60,23 +60,7 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
-
- -d, --debug
- Print all logs available, even debug-level information. This
- includes logs from libbpf as well as from the verifier, when
- attempting to load programs.
+ .. include:: common_options.rst
EXAMPLES
========
@@ -98,20 +82,3 @@ EXAMPLES
::
Registered tcp_congestion_ops cubic id 110
-
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-gen**\ (8),
- **bpftool-iter**\ (8),
- **bpftool-link**\ (8),
- **bpftool-map**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-prog**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst
index 420d4d5df8b6..e7d949334961 100644
--- a/tools/bpf/bpftool/Documentation/bpftool.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool.rst
@@ -46,18 +46,7 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
+ .. include:: common_options.rst
-m, --mapcompat
Allow loading maps with unknown map definitions.
@@ -65,24 +54,3 @@ OPTIONS
-n, --nomount
Do not automatically attempt to mount any virtual file system
(such as tracefs or BPF virtual file system) when necessary.
-
- -d, --debug
- Print all logs available, even debug-level information. This
- includes logs from libbpf as well as from the verifier, when
- attempting to load programs.
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool-btf**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-gen**\ (8),
- **bpftool-iter**\ (8),
- **bpftool-link**\ (8),
- **bpftool-map**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-struct_ops**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/common_options.rst b/tools/bpf/bpftool/Documentation/common_options.rst
new file mode 100644
index 000000000000..05d06c74dcaa
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/common_options.rst
@@ -0,0 +1,22 @@
+-h, --help
+ Print short help message (similar to **bpftool help**).
+
+-V, --version
+ Print version number (similar to **bpftool version**), and optional
+ features that were included when bpftool was compiled. Optional
+ features include linking against libbfd to provide the disassembler
+ for JIT-ted programs (**bpftool prog dump jited**) and usage of BPF
+ skeletons (some features like **bpftool prog profile** or showing
+ pids associated to BPF objects may rely on it).
+
+-j, --json
+ Generate JSON output. For commands that cannot produce JSON, this
+ option has no effect.
+
+-p, --pretty
+ Generate human-readable JSON output. Implies **-j**.
+
+-d, --debug
+ Print all logs available, even debug-level information. This includes
+ logs from libbpf as well as from the verifier, when attempting to
+ load programs.
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index 7b68e3c0a5fb..3f1da30c4da6 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -709,9 +709,26 @@ _bpftool()
"$cur" ) )
return 0
;;
- key|value|flags|name|entries)
+ key|value|flags|entries)
return 0
;;
+ inner_map)
+ COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ id)
+ _bpftool_get_map_ids
+ ;;
+ name)
+ case $pprev in
+ inner_map)
+ _bpftool_get_map_names
+ ;;
+ *)
+ return 0
+ ;;
+ esac
+ ;;
*)
_bpftool_once_attr 'type'
_bpftool_once_attr 'key'
@@ -719,6 +736,9 @@ _bpftool()
_bpftool_once_attr 'entries'
_bpftool_once_attr 'name'
_bpftool_once_attr 'flags'
+ if _bpftool_search_list 'array_of_maps' 'hash_of_maps'; then
+ _bpftool_once_attr 'inner_map'
+ fi
_bpftool_once_attr 'dev'
return 0
;;
diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c
index 86501cd3c763..7fea83bedf48 100644
--- a/tools/bpf/bpftool/json_writer.c
+++ b/tools/bpf/bpftool/json_writer.c
@@ -119,6 +119,12 @@ void jsonw_pretty(json_writer_t *self, bool on)
self->pretty = on;
}
+void jsonw_reset(json_writer_t *self)
+{
+ assert(self->depth == 0);
+ self->sep = '\0';
+}
+
/* Basic blocks */
static void jsonw_begin(json_writer_t *self, int c)
{
diff --git a/tools/bpf/bpftool/json_writer.h b/tools/bpf/bpftool/json_writer.h
index 35cf1f00f96c..8ace65cdb92f 100644
--- a/tools/bpf/bpftool/json_writer.h
+++ b/tools/bpf/bpftool/json_writer.h
@@ -27,6 +27,9 @@ void jsonw_destroy(json_writer_t **self_p);
/* Cause output to have pretty whitespace */
void jsonw_pretty(json_writer_t *self, bool on);
+/* Reset separator to create new JSON */
+void jsonw_reset(json_writer_t *self);
+
/* Add property name */
void jsonw_name(json_writer_t *self, const char *name);
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index 4a191fcbeb82..682daaa49e6a 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -70,13 +70,42 @@ static int do_help(int argc, char **argv)
static int do_version(int argc, char **argv)
{
+#ifdef HAVE_LIBBFD_SUPPORT
+ const bool has_libbfd = true;
+#else
+ const bool has_libbfd = false;
+#endif
+#ifdef BPFTOOL_WITHOUT_SKELETONS
+ const bool has_skeletons = false;
+#else
+ const bool has_skeletons = true;
+#endif
+
if (json_output) {
- jsonw_start_object(json_wtr);
+ jsonw_start_object(json_wtr); /* root object */
+
jsonw_name(json_wtr, "version");
jsonw_printf(json_wtr, "\"%s\"", BPFTOOL_VERSION);
- jsonw_end_object(json_wtr);
+
+ jsonw_name(json_wtr, "features");
+ jsonw_start_object(json_wtr); /* features */
+ jsonw_bool_field(json_wtr, "libbfd", has_libbfd);
+ jsonw_bool_field(json_wtr, "skeletons", has_skeletons);
+ jsonw_end_object(json_wtr); /* features */
+
+ jsonw_end_object(json_wtr); /* root object */
} else {
+ unsigned int nb_features = 0;
+
printf("%s v%s\n", bin_name, BPFTOOL_VERSION);
+ printf("features:");
+ if (has_libbfd) {
+ printf(" libbfd");
+ nb_features++;
+ }
+ if (has_skeletons)
+ printf("%s skeletons", nb_features++ ? "," : "");
+ printf("\n");
}
return 0;
}
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index bc0071228f88..a7efbd84fbcc 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -213,8 +213,9 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
jsonw_end_object(json_wtr);
}
-static void print_entry_error(struct bpf_map_info *info, unsigned char *key,
- const char *error_msg)
+static void
+print_entry_error_msg(struct bpf_map_info *info, unsigned char *key,
+ const char *error_msg)
{
int msg_size = strlen(error_msg);
bool single_line, break_names;
@@ -232,6 +233,40 @@ static void print_entry_error(struct bpf_map_info *info, unsigned char *key,
printf("\n");
}
+static void
+print_entry_error(struct bpf_map_info *map_info, void *key, int lookup_errno)
+{
+ /* For prog_array maps or arrays of maps, failure to lookup the value
+ * means there is no entry for that key. Do not print an error message
+ * in that case.
+ */
+ if ((map_is_map_of_maps(map_info->type) ||
+ map_is_map_of_progs(map_info->type)) && lookup_errno == ENOENT)
+ return;
+
+ if (json_output) {
+ jsonw_start_object(json_wtr); /* entry */
+ jsonw_name(json_wtr, "key");
+ print_hex_data_json(key, map_info->key_size);
+ jsonw_name(json_wtr, "value");
+ jsonw_start_object(json_wtr); /* error */
+ jsonw_string_field(json_wtr, "error", strerror(lookup_errno));
+ jsonw_end_object(json_wtr); /* error */
+ jsonw_end_object(json_wtr); /* entry */
+ } else {
+ const char *msg = NULL;
+
+ if (lookup_errno == ENOENT)
+ msg = "<no entry>";
+ else if (lookup_errno == ENOSPC &&
+ map_info->type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
+ msg = "<cannot read>";
+
+ print_entry_error_msg(map_info, key,
+ msg ? : strerror(lookup_errno));
+ }
+}
+
static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
unsigned char *value)
{
@@ -713,56 +748,23 @@ static int dump_map_elem(int fd, void *key, void *value,
struct bpf_map_info *map_info, struct btf *btf,
json_writer_t *btf_wtr)
{
- int num_elems = 0;
- int lookup_errno;
-
- if (!bpf_map_lookup_elem(fd, key, value)) {
- if (json_output) {
- print_entry_json(map_info, key, value, btf);
- } else {
- if (btf) {
- struct btf_dumper d = {
- .btf = btf,
- .jw = btf_wtr,
- .is_plain_text = true,
- };
-
- do_dump_btf(&d, map_info, key, value);
- } else {
- print_entry_plain(map_info, key, value);
- }
- num_elems++;
- }
- return num_elems;
+ if (bpf_map_lookup_elem(fd, key, value)) {
+ print_entry_error(map_info, key, errno);
+ return -1;
}
- /* lookup error handling */
- lookup_errno = errno;
-
- if (map_is_map_of_maps(map_info->type) ||
- map_is_map_of_progs(map_info->type))
- return 0;
-
if (json_output) {
- jsonw_start_object(json_wtr);
- jsonw_name(json_wtr, "key");
- print_hex_data_json(key, map_info->key_size);
- jsonw_name(json_wtr, "value");
- jsonw_start_object(json_wtr);
- jsonw_string_field(json_wtr, "error", strerror(lookup_errno));
- jsonw_end_object(json_wtr);
- jsonw_end_object(json_wtr);
- } else {
- const char *msg = NULL;
-
- if (lookup_errno == ENOENT)
- msg = "<no entry>";
- else if (lookup_errno == ENOSPC &&
- map_info->type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
- msg = "<cannot read>";
+ print_entry_json(map_info, key, value, btf);
+ } else if (btf) {
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = btf_wtr,
+ .is_plain_text = true,
+ };
- print_entry_error(map_info, key,
- msg ? : strerror(lookup_errno));
+ do_dump_btf(&d, map_info, key, value);
+ } else {
+ print_entry_plain(map_info, key, value);
}
return 0;
@@ -873,7 +875,8 @@ map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr,
err = 0;
break;
}
- num_elems += dump_map_elem(fd, key, value, info, btf, wtr);
+ if (!dump_map_elem(fd, key, value, info, btf, wtr))
+ num_elems++;
prev_key = key;
}
@@ -1247,7 +1250,7 @@ static int do_create(int argc, char **argv)
{
struct bpf_create_map_attr attr = { NULL, };
const char *pinfile;
- int err, fd;
+ int err = -1, fd;
if (!REQ_ARGS(7))
return -1;
@@ -1262,13 +1265,13 @@ static int do_create(int argc, char **argv)
if (attr.map_type) {
p_err("map type already specified");
- return -1;
+ goto exit;
}
attr.map_type = map_type_from_str(*argv);
if ((int)attr.map_type < 0) {
p_err("unrecognized map type: %s", *argv);
- return -1;
+ goto exit;
}
NEXT_ARG();
} else if (is_prefix(*argv, "name")) {
@@ -1277,43 +1280,56 @@ static int do_create(int argc, char **argv)
} else if (is_prefix(*argv, "key")) {
if (parse_u32_arg(&argc, &argv, &attr.key_size,
"key size"))
- return -1;
+ goto exit;
} else if (is_prefix(*argv, "value")) {
if (parse_u32_arg(&argc, &argv, &attr.value_size,
"value size"))
- return -1;
+ goto exit;
} else if (is_prefix(*argv, "entries")) {
if (parse_u32_arg(&argc, &argv, &attr.max_entries,
"max entries"))
- return -1;
+ goto exit;
} else if (is_prefix(*argv, "flags")) {
if (parse_u32_arg(&argc, &argv, &attr.map_flags,
"flags"))
- return -1;
+ goto exit;
} else if (is_prefix(*argv, "dev")) {
NEXT_ARG();
if (attr.map_ifindex) {
p_err("offload device already specified");
- return -1;
+ goto exit;
}
attr.map_ifindex = if_nametoindex(*argv);
if (!attr.map_ifindex) {
p_err("unrecognized netdevice '%s': %s",
*argv, strerror(errno));
- return -1;
+ goto exit;
}
NEXT_ARG();
+ } else if (is_prefix(*argv, "inner_map")) {
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+ int inner_map_fd;
+
+ NEXT_ARG();
+ if (!REQ_ARGS(2))
+ usage();
+ inner_map_fd = map_parse_fd_and_info(&argc, &argv,
+ &info, &len);
+ if (inner_map_fd < 0)
+ return -1;
+ attr.inner_map_fd = inner_map_fd;
} else {
p_err("unknown arg %s", *argv);
- return -1;
+ goto exit;
}
}
if (!attr.name) {
p_err("map name not specified");
- return -1;
+ goto exit;
}
set_max_rlimit();
@@ -1321,17 +1337,22 @@ static int do_create(int argc, char **argv)
fd = bpf_create_map_xattr(&attr);
if (fd < 0) {
p_err("map create failed: %s", strerror(errno));
- return -1;
+ goto exit;
}
err = do_pin_fd(fd, pinfile);
close(fd);
if (err)
- return err;
+ goto exit;
if (json_output)
jsonw_null(json_wtr);
- return 0;
+
+exit:
+ if (attr.inner_map_fd > 0)
+ close(attr.inner_map_fd);
+
+ return err;
}
static int do_pop_dequeue(int argc, char **argv)
@@ -1417,7 +1438,7 @@ static int do_help(int argc, char **argv)
"Usage: %1$s %2$s { show | list } [MAP]\n"
" %1$s %2$s create FILE type TYPE key KEY_SIZE value VALUE_SIZE \\\n"
" entries MAX_ENTRIES name NAME [flags FLAGS] \\\n"
- " [dev NAME]\n"
+ " [inner_map MAP] [dev NAME]\n"
" %1$s %2$s dump MAP\n"
" %1$s %2$s update MAP [key DATA] [value VALUE] [UPDATE_FLAGS]\n"
" %1$s %2$s lookup MAP [key DATA]\n"
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index d393eb8263a6..d942c1e3372c 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -29,6 +29,9 @@
#include "main.h"
#include "xlated_dumper.h"
+#define BPF_METADATA_PREFIX "bpf_metadata_"
+#define BPF_METADATA_PREFIX_LEN (sizeof(BPF_METADATA_PREFIX) - 1)
+
const char * const prog_type_name[] = {
[BPF_PROG_TYPE_UNSPEC] = "unspec",
[BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
@@ -151,6 +154,198 @@ static void show_prog_maps(int fd, __u32 num_maps)
}
}
+static void *find_metadata(int prog_fd, struct bpf_map_info *map_info)
+{
+ struct bpf_prog_info prog_info;
+ __u32 prog_info_len;
+ __u32 map_info_len;
+ void *value = NULL;
+ __u32 *map_ids;
+ int nr_maps;
+ int key = 0;
+ int map_fd;
+ int ret;
+ __u32 i;
+
+ memset(&prog_info, 0, sizeof(prog_info));
+ prog_info_len = sizeof(prog_info);
+ ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
+ if (ret)
+ return NULL;
+
+ if (!prog_info.nr_map_ids)
+ return NULL;
+
+ map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32));
+ if (!map_ids)
+ return NULL;
+
+ nr_maps = prog_info.nr_map_ids;
+ memset(&prog_info, 0, sizeof(prog_info));
+ prog_info.nr_map_ids = nr_maps;
+ prog_info.map_ids = ptr_to_u64(map_ids);
+ prog_info_len = sizeof(prog_info);
+
+ ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
+ if (ret)
+ goto free_map_ids;
+
+ for (i = 0; i < prog_info.nr_map_ids; i++) {
+ map_fd = bpf_map_get_fd_by_id(map_ids[i]);
+ if (map_fd < 0)
+ goto free_map_ids;
+
+ memset(map_info, 0, sizeof(*map_info));
+ map_info_len = sizeof(*map_info);
+ ret = bpf_obj_get_info_by_fd(map_fd, map_info, &map_info_len);
+ if (ret < 0) {
+ close(map_fd);
+ goto free_map_ids;
+ }
+
+ if (map_info->type != BPF_MAP_TYPE_ARRAY ||
+ map_info->key_size != sizeof(int) ||
+ map_info->max_entries != 1 ||
+ !map_info->btf_value_type_id ||
+ !strstr(map_info->name, ".rodata")) {
+ close(map_fd);
+ continue;
+ }
+
+ value = malloc(map_info->value_size);
+ if (!value) {
+ close(map_fd);
+ goto free_map_ids;
+ }
+
+ if (bpf_map_lookup_elem(map_fd, &key, value)) {
+ close(map_fd);
+ free(value);
+ value = NULL;
+ goto free_map_ids;
+ }
+
+ close(map_fd);
+ break;
+ }
+
+free_map_ids:
+ free(map_ids);
+ return value;
+}
+
+static bool has_metadata_prefix(const char *s)
+{
+ return strncmp(s, BPF_METADATA_PREFIX, BPF_METADATA_PREFIX_LEN) == 0;
+}
+
+static void show_prog_metadata(int fd, __u32 num_maps)
+{
+ const struct btf_type *t_datasec, *t_var;
+ struct bpf_map_info map_info;
+ struct btf_var_secinfo *vsi;
+ bool printed_header = false;
+ struct btf *btf = NULL;
+ unsigned int i, vlen;
+ void *value = NULL;
+ const char *name;
+ int err;
+
+ if (!num_maps)
+ return;
+
+ memset(&map_info, 0, sizeof(map_info));
+ value = find_metadata(fd, &map_info);
+ if (!value)
+ return;
+
+ err = btf__get_from_id(map_info.btf_id, &btf);
+ if (err || !btf)
+ goto out_free;
+
+ t_datasec = btf__type_by_id(btf, map_info.btf_value_type_id);
+ if (!btf_is_datasec(t_datasec))
+ goto out_free;
+
+ vlen = btf_vlen(t_datasec);
+ vsi = btf_var_secinfos(t_datasec);
+
+ /* We don't proceed to check the kinds of the elements of the DATASEC.
+ * The verifier enforces them to be BTF_KIND_VAR.
+ */
+
+ if (json_output) {
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = json_wtr,
+ .is_plain_text = false,
+ };
+
+ for (i = 0; i < vlen; i++, vsi++) {
+ t_var = btf__type_by_id(btf, vsi->type);
+ name = btf__name_by_offset(btf, t_var->name_off);
+
+ if (!has_metadata_prefix(name))
+ continue;
+
+ if (!printed_header) {
+ jsonw_name(json_wtr, "metadata");
+ jsonw_start_object(json_wtr);
+ printed_header = true;
+ }
+
+ jsonw_name(json_wtr, name + BPF_METADATA_PREFIX_LEN);
+ err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
+ if (err) {
+ p_err("btf dump failed: %d", err);
+ break;
+ }
+ }
+ if (printed_header)
+ jsonw_end_object(json_wtr);
+ } else {
+ json_writer_t *btf_wtr = jsonw_new(stdout);
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = btf_wtr,
+ .is_plain_text = true,
+ };
+
+ if (!btf_wtr) {
+ p_err("jsonw alloc failed");
+ goto out_free;
+ }
+
+ for (i = 0; i < vlen; i++, vsi++) {
+ t_var = btf__type_by_id(btf, vsi->type);
+ name = btf__name_by_offset(btf, t_var->name_off);
+
+ if (!has_metadata_prefix(name))
+ continue;
+
+ if (!printed_header) {
+ printf("\tmetadata:");
+ printed_header = true;
+ }
+
+ printf("\n\t\t%s = ", name + BPF_METADATA_PREFIX_LEN);
+
+ jsonw_reset(btf_wtr);
+ err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
+ if (err) {
+ p_err("btf dump failed: %d", err);
+ break;
+ }
+ }
+ if (printed_header)
+ jsonw_destroy(&btf_wtr);
+ }
+
+out_free:
+ btf__free(btf);
+ free(value);
+}
+
static void print_prog_header_json(struct bpf_prog_info *info)
{
jsonw_uint_field(json_wtr, "id", info->id);
@@ -228,6 +423,8 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
emit_obj_refs_json(&refs_table, info->id, json_wtr);
+ show_prog_metadata(fd, info->nr_map_ids);
+
jsonw_end_object(json_wtr);
}
@@ -297,6 +494,8 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
emit_obj_refs_plain(&refs_table, info->id, "\n\tpids ");
printf("\n");
+
+ show_prog_metadata(fd, info->nr_map_ids);
}
static int show_prog(int fd)
@@ -1304,7 +1503,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
enum bpf_prog_type prog_type = common_prog_type;
if (prog_type == BPF_PROG_TYPE_UNSPEC) {
- const char *sec_name = bpf_program__title(pos, false);
+ const char *sec_name = bpf_program__section_name(pos);
err = get_prog_type_by_name(sec_name, &prog_type,
&expected_attach_type);
@@ -1398,7 +1597,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
err = bpf_obj_pin(bpf_program__fd(prog), pinfile);
if (err) {
p_err("failed to pin program %s",
- bpf_program__title(prog, false));
+ bpf_program__section_name(prog));
goto err_close_obj;
}
} else {
diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile
index a88cd4426398..66cb92136de4 100644
--- a/tools/bpf/resolve_btfids/Makefile
+++ b/tools/bpf/resolve_btfids/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
include ../../scripts/Makefile.include
+include ../../scripts/Makefile.arch
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
@@ -29,6 +30,7 @@ endif
AR = $(HOSTAR)
CC = $(HOSTCC)
LD = $(HOSTLD)
+ARCH = $(HOSTARCH)
OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/
@@ -80,6 +82,7 @@ libbpf-clean:
clean: libsubcmd-clean libbpf-clean fixdep-clean
$(call msg,CLEAN,$(BINARY))
$(Q)$(RM) -f $(BINARY); \
+ $(RM) -rf $(if $(OUTPUT),$(OUTPUT),.)/feature; \
find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
tags:
diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h
index 210b086188a3..57890b357f85 100644
--- a/tools/include/linux/btf_ids.h
+++ b/tools/include/linux/btf_ids.h
@@ -76,6 +76,13 @@ extern u32 name[];
#define BTF_ID_LIST_GLOBAL(name) \
__BTF_ID_LIST(name, globl)
+/* The BTF_ID_LIST_SINGLE macro defines a BTF_ID_LIST with
+ * a single entry.
+ */
+#define BTF_ID_LIST_SINGLE(name, prefix, typename) \
+ BTF_ID_LIST(name) \
+ BTF_ID(prefix, typename)
+
/*
* The BTF_ID_UNUSED macro defines 4 zero bytes.
* It's used when we want to define 'unused' entry
@@ -140,6 +147,7 @@ extern struct btf_id_set name;
#define BTF_ID(prefix, name)
#define BTF_ID_UNUSED
#define BTF_ID_LIST_GLOBAL(name) u32 name[1];
+#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1];
#define BTF_SET_START(name) static struct btf_id_set name = { 0 };
#define BTF_SET_START_GLOBAL(name) static struct btf_id_set name = { 0 };
#define BTF_SET_END(name)
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 8dda13880957..a22812561064 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -124,6 +124,7 @@ enum bpf_cmd {
BPF_ENABLE_STATS,
BPF_ITER_CREATE,
BPF_LINK_DETACH,
+ BPF_PROG_BIND_MAP,
};
enum bpf_map_type {
@@ -658,6 +659,12 @@ union bpf_attr {
__u32 flags;
} iter_create;
+ struct { /* struct used by BPF_PROG_BIND_MAP command */
+ __u32 prog_fd;
+ __u32 map_fd;
+ __u32 flags; /* extra flags */
+ } prog_bind_map;
+
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
@@ -1447,8 +1454,8 @@ union bpf_attr {
* Return
* The return value depends on the result of the test, and can be:
*
- * * 0, if the *skb* task belongs to the cgroup2.
- * * 1, if the *skb* task does not belong to the cgroup2.
+ * * 0, if current task belongs to the cgroup2.
+ * * 1, if current task does not belong to the cgroup2.
* * A negative error code, if an error occurred.
*
* long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
@@ -3349,38 +3356,38 @@ union bpf_attr {
* Description
* Dynamically cast a *sk* pointer to a *tcp6_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* struct tcp_sock *bpf_skc_to_tcp_sock(void *sk)
* Description
* Dynamically cast a *sk* pointer to a *tcp_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk)
* Description
* Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk)
* Description
* Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* struct udp6_sock *bpf_skc_to_udp6_sock(void *sk)
* Description
* Dynamically cast a *sk* pointer to a *udp6_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
* Description
* Return a user or a kernel stack in bpf program provided buffer.
* To achieve this, the helper needs *task*, which is a valid
- * pointer to struct task_struct. To store the stacktrace, the
- * bpf program provides *buf* with a nonnegative *size*.
+ * pointer to **struct task_struct**. To store the stacktrace, the
+ * bpf program provides *buf* with a nonnegative *size*.
*
* The last argument, *flags*, holds the number of stack frames to
* skip (from 0 to 255), masked with
@@ -3410,12 +3417,12 @@ union bpf_attr {
* long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags)
* Description
* Load header option. Support reading a particular TCP header
- * option for bpf program (BPF_PROG_TYPE_SOCK_OPS).
+ * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**).
*
* If *flags* is 0, it will search the option from the
- * sock_ops->skb_data. The comment in "struct bpf_sock_ops"
+ * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops**
* has details on what skb_data contains under different
- * sock_ops->op.
+ * *skops*\ **->op**.
*
* The first byte of the *searchby_res* specifies the
* kind that it wants to search.
@@ -3435,7 +3442,7 @@ union bpf_attr {
* [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ].
*
* To search for the standard window scale option (3),
- * the searchby_res should be [ 3, 0, 0, .... 0 ].
+ * the *searchby_res* should be [ 3, 0, 0, .... 0 ].
* Note, kind-length must be 0 for regular option.
*
* Searching for No-Op (0) and End-of-Option-List (1) are
@@ -3445,27 +3452,30 @@ union bpf_attr {
* of a header option.
*
* Supported flags:
+ *
* * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the
* saved_syn packet or the just-received syn packet.
*
* Return
- * >0 when found, the header option is copied to *searchby_res*.
- * The return value is the total length copied.
+ * > 0 when found, the header option is copied to *searchby_res*.
+ * The return value is the total length copied. On failure, a
+ * negative error code is returned:
*
- * **-EINVAL** If param is invalid
+ * **-EINVAL** if a parameter is invalid.
*
- * **-ENOMSG** The option is not found
+ * **-ENOMSG** if the option is not found.
*
- * **-ENOENT** No syn packet available when
- * **BPF_LOAD_HDR_OPT_TCP_SYN** is used
+ * **-ENOENT** if no syn packet is available when
+ * **BPF_LOAD_HDR_OPT_TCP_SYN** is used.
*
- * **-ENOSPC** Not enough space. Only *len* number of
- * bytes are copied.
+ * **-ENOSPC** if there is not enough space. Only *len* number of
+ * bytes are copied.
*
- * **-EFAULT** Cannot parse the header options in the packet
+ * **-EFAULT** on failure to parse the header options in the
+ * packet.
*
- * **-EPERM** This helper cannot be used under the
- * current sock_ops->op.
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
*
* long bpf_store_hdr_opt(struct bpf_sock_ops *skops, const void *from, u32 len, u64 flags)
* Description
@@ -3483,44 +3493,44 @@ union bpf_attr {
* by searching the same option in the outgoing skb.
*
* This helper can only be called during
- * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
+ * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
*
* Return
* 0 on success, or negative error in case of failure:
*
- * **-EINVAL** If param is invalid
+ * **-EINVAL** If param is invalid.
*
- * **-ENOSPC** Not enough space in the header.
- * Nothing has been written
+ * **-ENOSPC** if there is not enough space in the header.
+ * Nothing has been written
*
- * **-EEXIST** The option has already existed
+ * **-EEXIST** if the option already exists.
*
- * **-EFAULT** Cannot parse the existing header options
+ * **-EFAULT** on failrue to parse the existing header options.
*
- * **-EPERM** This helper cannot be used under the
- * current sock_ops->op.
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
*
* long bpf_reserve_hdr_opt(struct bpf_sock_ops *skops, u32 len, u64 flags)
* Description
* Reserve *len* bytes for the bpf header option. The
- * space will be used by bpf_store_hdr_opt() later in
- * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
+ * space will be used by **bpf_store_hdr_opt**\ () later in
+ * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
*
- * If bpf_reserve_hdr_opt() is called multiple times,
+ * If **bpf_reserve_hdr_opt**\ () is called multiple times,
* the total number of bytes will be reserved.
*
* This helper can only be called during
- * BPF_SOCK_OPS_HDR_OPT_LEN_CB.
+ * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**.
*
* Return
* 0 on success, or negative error in case of failure:
*
- * **-EINVAL** if param is invalid
+ * **-EINVAL** if a parameter is invalid.
*
- * **-ENOSPC** Not enough space in the header.
+ * **-ENOSPC** if there is not enough space in the header.
*
- * **-EPERM** This helper cannot be used under the
- * current sock_ops->op.
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
*
* void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags)
* Description
@@ -3560,9 +3570,9 @@ union bpf_attr {
*
* long bpf_d_path(struct path *path, char *buf, u32 sz)
* Description
- * Return full path for given 'struct path' object, which
- * needs to be the kernel BTF 'path' object. The path is
- * returned in the provided buffer 'buf' of size 'sz' and
+ * Return full path for given **struct path** object, which
+ * needs to be the kernel BTF *path* object. The path is
+ * returned in the provided buffer *buf* of size *sz* and
* is zero terminated.
*
* Return
@@ -3573,7 +3583,7 @@ union bpf_attr {
* long bpf_copy_from_user(void *dst, u32 size, const void *user_ptr)
* Description
* Read *size* bytes from user space address *user_ptr* and store
- * the data in *dst*. This is a wrapper of copy_from_user().
+ * the data in *dst*. This is a wrapper of **copy_from_user**\ ().
* Return
* 0 on success, or a negative error in case of failure.
*/
diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
index 3d0d8231dc19..7d6687618d80 100644
--- a/tools/include/uapi/linux/in.h
+++ b/tools/include/uapi/linux/in.h
@@ -135,7 +135,7 @@ struct in_addr {
* this socket to prevent accepting spoofed ones.
*/
#define IP_PMTUDISC_INTERFACE 4
-/* weaker version of IP_PMTUDISC_INTERFACE, which allos packets to get
+/* weaker version of IP_PMTUDISC_INTERFACE, which allows packets to get
* fragmented if they exeed the interface mtu
*/
#define IP_PMTUDISC_OMIT 5
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index f6d86033c4fa..7d8eced6f459 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -790,9 +790,10 @@ struct kvm_ppc_resize_hpt {
#define KVM_VM_PPC_HV 1
#define KVM_VM_PPC_PR 2
-/* on MIPS, 0 forces trap & emulate, 1 forces VZ ASE */
-#define KVM_VM_MIPS_TE 0
+/* on MIPS, 0 indicates auto, 1 forces VZ ASE, 2 forces trap & emulate */
+#define KVM_VM_MIPS_AUTO 0
#define KVM_VM_MIPS_VZ 1
+#define KVM_VM_MIPS_TE 2
#define KVM_S390_SIE_PAGE_OFFSET 1
@@ -1035,6 +1036,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_LAST_CPU 184
#define KVM_CAP_SMALLER_MAXPHYADDR 185
#define KVM_CAP_S390_DIAG318 186
+#define KVM_CAP_STEAL_TIME 187
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/tools/io_uring/io_uring-bench.c b/tools/io_uring/io_uring-bench.c
index 0f257139b003..7703f0118385 100644
--- a/tools/io_uring/io_uring-bench.c
+++ b/tools/io_uring/io_uring-bench.c
@@ -130,7 +130,7 @@ static int io_uring_register_files(struct submitter *s)
s->nr_files);
}
-static int gettid(void)
+static int lk_gettid(void)
{
return syscall(__NR_gettid);
}
@@ -281,7 +281,7 @@ static void *submitter_fn(void *data)
struct io_sq_ring *ring = &s->sq_ring;
int ret, prepped;
- printf("submitter=%d\n", gettid());
+ printf("submitter=%d\n", lk_gettid());
srand48_r(pthread_self(), &s->rand);
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index adbe994610f2..f43249696d9f 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -62,7 +62,7 @@ FEATURE_USER = .libbpf
FEATURE_TESTS = libelf zlib bpf
FEATURE_DISPLAY = libelf zlib bpf
-INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi
+INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi
FEATURE_CHECK_CFLAGS-bpf = $(INCLUDES)
check_feat := 1
@@ -147,6 +147,7 @@ GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \
awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}' | \
sort -u | wc -l)
VERSIONED_SYM_COUNT = $(shell readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \
+ awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}' | \
grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l)
CMD_TARGETS = $(LIB_TARGET) $(PC_FILE)
@@ -214,6 +215,7 @@ check_abi: $(OUTPUT)libbpf.so
awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}'| \
sort -u > $(OUTPUT)libbpf_global_syms.tmp; \
readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \
+ awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}'| \
grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | \
sort -u > $(OUTPUT)libbpf_versioned_syms.tmp; \
diff -u $(OUTPUT)libbpf_global_syms.tmp \
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 82b983ff6569..2baa1308737c 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -872,3 +872,19 @@ int bpf_enable_stats(enum bpf_stats_type type)
return sys_bpf(BPF_ENABLE_STATS, &attr, sizeof(attr));
}
+
+int bpf_prog_bind_map(int prog_fd, int map_fd,
+ const struct bpf_prog_bind_opts *opts)
+{
+ union bpf_attr attr;
+
+ if (!OPTS_VALID(opts, bpf_prog_bind_opts))
+ return -EINVAL;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.prog_bind_map.prog_fd = prog_fd;
+ attr.prog_bind_map.map_fd = map_fd;
+ attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0);
+
+ return sys_bpf(BPF_PROG_BIND_MAP, &attr, sizeof(attr));
+}
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 015d13f25fcc..8c1ac4b42f90 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -243,6 +243,14 @@ LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */
LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type);
+struct bpf_prog_bind_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+ __u32 flags;
+};
+#define bpf_prog_bind_opts__last_field flags
+
+LIBBPF_API int bpf_prog_bind_map(int prog_fd, int map_fd,
+ const struct bpf_prog_bind_opts *opts);
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index 91f0ad0e0325..2a55320d87d0 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -57,14 +57,16 @@ LIBBPF_API struct btf_ext *btf_ext__new(__u8 *data, __u32 size);
LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext);
LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext,
__u32 *size);
-LIBBPF_API int btf_ext__reloc_func_info(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const char *sec_name, __u32 insns_cnt,
- void **func_info, __u32 *cnt);
-LIBBPF_API int btf_ext__reloc_line_info(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const char *sec_name, __u32 insns_cnt,
- void **line_info, __u32 *cnt);
+LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_func_info was never meant as a public API and has wrong assumptions embedded in it; it will be removed in the future libbpf versions")
+int btf_ext__reloc_func_info(const struct btf *btf,
+ const struct btf_ext *btf_ext,
+ const char *sec_name, __u32 insns_cnt,
+ void **func_info, __u32 *cnt);
+LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_line_info was never meant as a public API and has wrong assumptions embedded in it; it will be removed in the future libbpf versions")
+int btf_ext__reloc_line_info(const struct btf *btf,
+ const struct btf_ext *btf_ext,
+ const char *sec_name, __u32 insns_cnt,
+ void **line_info, __u32 *cnt);
LIBBPF_API __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
LIBBPF_API __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index b688aadf09c5..32dc444224d8 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -73,8 +73,6 @@
#define __printf(a, b) __attribute__((format(printf, a, b)))
static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
-static struct bpf_program *bpf_object__find_prog_by_idx(struct bpf_object *obj,
- int idx);
static const struct btf_type *
skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
@@ -176,6 +174,8 @@ enum kern_feature_id {
FEAT_EXP_ATTACH_TYPE,
/* bpf_probe_read_{kernel,user}[_str] helpers */
FEAT_PROBE_READ_KERN,
+ /* BPF_PROG_BIND_MAP is supported */
+ FEAT_PROG_BIND_MAP,
__FEAT_CNT,
};
@@ -193,6 +193,7 @@ struct reloc_desc {
int insn_idx;
int map_idx;
int sym_off;
+ bool processed;
};
struct bpf_sec_def;
@@ -217,20 +218,45 @@ struct bpf_sec_def {
* linux/filter.h.
*/
struct bpf_program {
- /* Index in elf obj file, for relocation use. */
- int idx;
- char *name;
- int prog_ifindex;
- char *section_name;
const struct bpf_sec_def *sec_def;
- /* section_name with / replaced by _; makes recursive pinning
+ char *sec_name;
+ size_t sec_idx;
+ /* this program's instruction offset (in number of instructions)
+ * within its containing ELF section
+ */
+ size_t sec_insn_off;
+ /* number of original instructions in ELF section belonging to this
+ * program, not taking into account subprogram instructions possible
+ * appended later during relocation
+ */
+ size_t sec_insn_cnt;
+ /* Offset (in number of instructions) of the start of instruction
+ * belonging to this BPF program within its containing main BPF
+ * program. For the entry-point (main) BPF program, this is always
+ * zero. For a sub-program, this gets reset before each of main BPF
+ * programs are processed and relocated and is used to determined
+ * whether sub-program was already appended to the main program, and
+ * if yes, at which instruction offset.
+ */
+ size_t sub_insn_off;
+
+ char *name;
+ /* sec_name with / replaced by _; makes recursive pinning
* in bpf_object__pin_programs easier
*/
char *pin_name;
+
+ /* instructions that belong to BPF program; insns[0] is located at
+ * sec_insn_off instruction within its ELF section in ELF file, so
+ * when mapping ELF file instruction index to the local instruction,
+ * one needs to subtract sec_insn_off; and vice versa.
+ */
struct bpf_insn *insns;
- size_t insns_cnt, main_prog_cnt;
- enum bpf_prog_type type;
- bool load;
+ /* actual number of instruction in this BPF program's image; for
+ * entry-point BPF programs this includes the size of main program
+ * itself plus all the used sub-programs, appended at the end
+ */
+ size_t insns_cnt;
struct reloc_desc *reloc_desc;
int nr_reloc;
@@ -246,7 +272,10 @@ struct bpf_program {
void *priv;
bpf_program_clear_priv_t clear_priv;
+ bool load;
+ enum bpf_prog_type type;
enum bpf_attach_type expected_attach_type;
+ int prog_ifindex;
__u32 attach_btf_id;
__u32 attach_prog_fd;
void *func_info;
@@ -382,9 +411,10 @@ struct bpf_object {
struct extern_desc *externs;
int nr_extern;
int kconfig_map_idx;
+ int rodata_map_idx;
bool loaded;
- bool has_pseudo_calls;
+ bool has_subcalls;
/*
* Information when doing elf related work. Only valid if fd
@@ -446,6 +476,8 @@ static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr);
static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
+static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx,
+ size_t off, __u32 sym_type, GElf_Sym *sym);
void bpf_program__unload(struct bpf_program *prog)
{
@@ -486,152 +518,160 @@ static void bpf_program__exit(struct bpf_program *prog)
bpf_program__unload(prog);
zfree(&prog->name);
- zfree(&prog->section_name);
+ zfree(&prog->sec_name);
zfree(&prog->pin_name);
zfree(&prog->insns);
zfree(&prog->reloc_desc);
prog->nr_reloc = 0;
prog->insns_cnt = 0;
- prog->idx = -1;
+ prog->sec_idx = -1;
}
static char *__bpf_program__pin_name(struct bpf_program *prog)
{
char *name, *p;
- name = p = strdup(prog->section_name);
+ name = p = strdup(prog->sec_name);
while ((p = strchr(p, '/')))
*p = '_';
return name;
}
+static bool insn_is_subprog_call(const struct bpf_insn *insn)
+{
+ return BPF_CLASS(insn->code) == BPF_JMP &&
+ BPF_OP(insn->code) == BPF_CALL &&
+ BPF_SRC(insn->code) == BPF_K &&
+ insn->src_reg == BPF_PSEUDO_CALL &&
+ insn->dst_reg == 0 &&
+ insn->off == 0;
+}
+
static int
-bpf_program__init(void *data, size_t size, const char *section_name, int idx,
- struct bpf_program *prog)
+bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
+ const char *name, size_t sec_idx, const char *sec_name,
+ size_t sec_off, void *insn_data, size_t insn_data_sz)
{
- const size_t bpf_insn_sz = sizeof(struct bpf_insn);
+ int i;
- if (size == 0 || size % bpf_insn_sz) {
- pr_warn("corrupted section '%s', size: %zu\n",
- section_name, size);
+ if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
+ pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
+ sec_name, name, sec_off, insn_data_sz);
return -EINVAL;
}
memset(prog, 0, sizeof(*prog));
+ prog->obj = obj;
+
+ prog->sec_idx = sec_idx;
+ prog->sec_insn_off = sec_off / BPF_INSN_SZ;
+ prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
+ /* insns_cnt can later be increased by appending used subprograms */
+ prog->insns_cnt = prog->sec_insn_cnt;
+
+ prog->type = BPF_PROG_TYPE_UNSPEC;
+ prog->load = true;
- prog->section_name = strdup(section_name);
- if (!prog->section_name) {
- pr_warn("failed to alloc name for prog under section(%d) %s\n",
- idx, section_name);
+ prog->instances.fds = NULL;
+ prog->instances.nr = -1;
+
+ prog->sec_name = strdup(sec_name);
+ if (!prog->sec_name)
+ goto errout;
+
+ prog->name = strdup(name);
+ if (!prog->name)
goto errout;
- }
prog->pin_name = __bpf_program__pin_name(prog);
- if (!prog->pin_name) {
- pr_warn("failed to alloc pin name for prog under section(%d) %s\n",
- idx, section_name);
+ if (!prog->pin_name)
goto errout;
- }
- prog->insns = malloc(size);
- if (!prog->insns) {
- pr_warn("failed to alloc insns for prog under section %s\n",
- section_name);
+ prog->insns = malloc(insn_data_sz);
+ if (!prog->insns)
goto errout;
+ memcpy(prog->insns, insn_data, insn_data_sz);
+
+ for (i = 0; i < prog->insns_cnt; i++) {
+ if (insn_is_subprog_call(&prog->insns[i])) {
+ obj->has_subcalls = true;
+ break;
+ }
}
- prog->insns_cnt = size / bpf_insn_sz;
- memcpy(prog->insns, data, size);
- prog->idx = idx;
- prog->instances.fds = NULL;
- prog->instances.nr = -1;
- prog->type = BPF_PROG_TYPE_UNSPEC;
- prog->load = true;
return 0;
errout:
+ pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
bpf_program__exit(prog);
return -ENOMEM;
}
static int
-bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
- const char *section_name, int idx)
+bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
+ const char *sec_name, int sec_idx)
{
- struct bpf_program prog, *progs;
+ struct bpf_program *prog, *progs;
+ void *data = sec_data->d_buf;
+ size_t sec_sz = sec_data->d_size, sec_off, prog_sz;
int nr_progs, err;
-
- err = bpf_program__init(data, size, section_name, idx, &prog);
- if (err)
- return err;
+ const char *name;
+ GElf_Sym sym;
progs = obj->programs;
nr_progs = obj->nr_programs;
+ sec_off = 0;
- progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
- if (!progs) {
- /*
- * In this case the original obj->programs
- * is still valid, so don't need special treat for
- * bpf_close_object().
- */
- pr_warn("failed to alloc a new program under section '%s'\n",
- section_name);
- bpf_program__exit(&prog);
- return -ENOMEM;
- }
-
- pr_debug("elf: found program '%s'\n", prog.section_name);
- obj->programs = progs;
- obj->nr_programs = nr_progs + 1;
- prog.obj = obj;
- progs[nr_progs] = prog;
- return 0;
-}
-
-static int
-bpf_object__init_prog_names(struct bpf_object *obj)
-{
- Elf_Data *symbols = obj->efile.symbols;
- struct bpf_program *prog;
- size_t pi, si;
+ while (sec_off < sec_sz) {
+ if (elf_sym_by_sec_off(obj, sec_idx, sec_off, STT_FUNC, &sym)) {
+ pr_warn("sec '%s': failed to find program symbol at offset %zu\n",
+ sec_name, sec_off);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
- for (pi = 0; pi < obj->nr_programs; pi++) {
- const char *name = NULL;
+ prog_sz = sym.st_size;
- prog = &obj->programs[pi];
+ name = elf_sym_str(obj, sym.st_name);
+ if (!name) {
+ pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
+ sec_name, sec_off);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
- for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name; si++) {
- GElf_Sym sym;
+ if (sec_off + prog_sz > sec_sz) {
+ pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
+ sec_name, sec_off);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
- if (!gelf_getsym(symbols, si, &sym))
- continue;
- if (sym.st_shndx != prog->idx)
- continue;
- if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
- continue;
+ pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
+ sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
- name = elf_sym_str(obj, sym.st_name);
- if (!name) {
- pr_warn("prog '%s': failed to get symbol name\n",
- prog->section_name);
- return -LIBBPF_ERRNO__LIBELF;
- }
+ progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
+ if (!progs) {
+ /*
+ * In this case the original obj->programs
+ * is still valid, so don't need special treat for
+ * bpf_close_object().
+ */
+ pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
+ sec_name, name);
+ return -ENOMEM;
}
+ obj->programs = progs;
- if (!name && prog->idx == obj->efile.text_shndx)
- name = ".text";
+ prog = &progs[nr_progs];
- if (!name) {
- pr_warn("prog '%s': failed to find program symbol\n",
- prog->section_name);
- return -EINVAL;
- }
+ err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
+ sec_off, data + sec_off, prog_sz);
+ if (err)
+ return err;
- prog->name = strdup(name);
- if (!prog->name)
- return -ENOMEM;
+ nr_progs++;
+ obj->nr_programs = nr_progs;
+
+ sec_off += prog_sz;
}
return 0;
@@ -1033,6 +1073,7 @@ static struct bpf_object *bpf_object__new(const char *path,
obj->efile.bss_shndx = -1;
obj->efile.st_ops_shndx = -1;
obj->kconfig_map_idx = -1;
+ obj->rodata_map_idx = -1;
obj->kern_version = get_kernel_version();
obj->loaded = false;
@@ -1391,6 +1432,8 @@ static int bpf_object__init_global_data_maps(struct bpf_object *obj)
obj->efile.rodata->d_size);
if (err)
return err;
+
+ obj->rodata_map_idx = obj->nr_maps - 1;
}
if (obj->efile.bss_shndx >= 0) {
err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
@@ -2675,6 +2718,26 @@ static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
return data;
}
+static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx,
+ size_t off, __u32 sym_type, GElf_Sym *sym)
+{
+ Elf_Data *symbols = obj->efile.symbols;
+ size_t n = symbols->d_size / sizeof(GElf_Sym);
+ int i;
+
+ for (i = 0; i < n; i++) {
+ if (!gelf_getsym(symbols, i, sym))
+ continue;
+ if (sym->st_shndx != sec_idx || sym->st_value != off)
+ continue;
+ if (GELF_ST_TYPE(sym->st_info) != sym_type)
+ continue;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
static bool is_sec_name_dwarf(const char *name)
{
/* approximation, but the actual list is too long */
@@ -2715,19 +2778,55 @@ static bool ignore_elf_section(GElf_Shdr *hdr, const char *name)
return false;
}
+static int cmp_progs(const void *_a, const void *_b)
+{
+ const struct bpf_program *a = _a;
+ const struct bpf_program *b = _b;
+
+ if (a->sec_idx != b->sec_idx)
+ return a->sec_idx < b->sec_idx ? -1 : 1;
+
+ /* sec_insn_off can't be the same within the section */
+ return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
+}
+
static int bpf_object__elf_collect(struct bpf_object *obj)
{
Elf *elf = obj->efile.elf;
Elf_Data *btf_ext_data = NULL;
Elf_Data *btf_data = NULL;
- Elf_Scn *scn = NULL;
int idx = 0, err = 0;
+ const char *name;
+ Elf_Data *data;
+ Elf_Scn *scn;
+ GElf_Shdr sh;
+ /* a bunch of ELF parsing functionality depends on processing symbols,
+ * so do the first pass and find the symbol table
+ */
+ scn = NULL;
while ((scn = elf_nextscn(elf, scn)) != NULL) {
- const char *name;
- GElf_Shdr sh;
- Elf_Data *data;
+ if (elf_sec_hdr(obj, scn, &sh))
+ return -LIBBPF_ERRNO__FORMAT;
+
+ if (sh.sh_type == SHT_SYMTAB) {
+ if (obj->efile.symbols) {
+ pr_warn("elf: multiple symbol tables in %s\n", obj->path);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
+ data = elf_sec_data(obj, scn);
+ if (!data)
+ return -LIBBPF_ERRNO__FORMAT;
+
+ obj->efile.symbols = data;
+ obj->efile.symbols_shndx = elf_ndxscn(scn);
+ obj->efile.strtabidx = sh.sh_link;
+ }
+ }
+
+ scn = NULL;
+ while ((scn = elf_nextscn(elf, scn)) != NULL) {
idx++;
if (elf_sec_hdr(obj, scn, &sh))
@@ -2766,20 +2865,12 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
btf_ext_data = data;
} else if (sh.sh_type == SHT_SYMTAB) {
- if (obj->efile.symbols) {
- pr_warn("elf: multiple symbol tables in %s\n", obj->path);
- return -LIBBPF_ERRNO__FORMAT;
- }
- obj->efile.symbols = data;
- obj->efile.symbols_shndx = idx;
- obj->efile.strtabidx = sh.sh_link;
+ /* already processed during the first pass above */
} else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
if (sh.sh_flags & SHF_EXECINSTR) {
if (strcmp(name, ".text") == 0)
obj->efile.text_shndx = idx;
- err = bpf_object__add_program(obj, data->d_buf,
- data->d_size,
- name, idx);
+ err = bpf_object__add_programs(obj, data, name, idx);
if (err)
return err;
} else if (strcmp(name, DATA_SEC) == 0) {
@@ -2833,6 +2924,11 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
return -LIBBPF_ERRNO__FORMAT;
}
+
+ /* sort BPF programs by section name and in-section instruction offset
+ * for faster search */
+ qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
+
return bpf_object__init_btf(obj, btf_data, btf_ext_data);
}
@@ -3157,20 +3253,6 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
return 0;
}
-static struct bpf_program *
-bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
-{
- struct bpf_program *prog;
- size_t i;
-
- for (i = 0; i < obj->nr_programs; i++) {
- prog = &obj->programs[i];
- if (prog->idx == idx)
- return prog;
- }
- return NULL;
-}
-
struct bpf_program *
bpf_object__find_program_by_title(const struct bpf_object *obj,
const char *title)
@@ -3178,12 +3260,18 @@ bpf_object__find_program_by_title(const struct bpf_object *obj,
struct bpf_program *pos;
bpf_object__for_each_program(pos, obj) {
- if (pos->section_name && !strcmp(pos->section_name, title))
+ if (pos->sec_name && !strcmp(pos->sec_name, title))
return pos;
}
return NULL;
}
+static bool prog_is_subprog(const struct bpf_object *obj,
+ const struct bpf_program *prog)
+{
+ return prog->sec_idx == obj->efile.text_shndx && obj->has_subcalls;
+}
+
struct bpf_program *
bpf_object__find_program_by_name(const struct bpf_object *obj,
const char *name)
@@ -3191,6 +3279,8 @@ bpf_object__find_program_by_name(const struct bpf_object *obj,
struct bpf_program *prog;
bpf_object__for_each_program(prog, obj) {
+ if (prog_is_subprog(obj, prog))
+ continue;
if (!strcmp(prog->name, name))
return prog;
}
@@ -3240,6 +3330,8 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
const char *sym_sec_name;
struct bpf_map *map;
+ reloc_desc->processed = false;
+
/* sub-program call relocation */
if (insn->code == (BPF_JMP | BPF_CALL)) {
if (insn->src_reg != BPF_PSEUDO_CALL) {
@@ -3261,7 +3353,6 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
reloc_desc->type = RELO_CALL;
reloc_desc->insn_idx = insn_idx;
reloc_desc->sym_off = sym->st_value;
- obj->has_pseudo_calls = true;
return 0;
}
@@ -3361,14 +3452,50 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
return 0;
}
+static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
+{
+ return insn_idx >= prog->sec_insn_off &&
+ insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
+}
+
+static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
+ size_t sec_idx, size_t insn_idx)
+{
+ int l = 0, r = obj->nr_programs - 1, m;
+ struct bpf_program *prog;
+
+ while (l < r) {
+ m = l + (r - l + 1) / 2;
+ prog = &obj->programs[m];
+
+ if (prog->sec_idx < sec_idx ||
+ (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
+ l = m;
+ else
+ r = m - 1;
+ }
+ /* matching program could be at index l, but it still might be the
+ * wrong one, so we need to double check conditions for the last time
+ */
+ prog = &obj->programs[l];
+ if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
+ return prog;
+ return NULL;
+}
+
static int
-bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
- Elf_Data *data, struct bpf_object *obj)
+bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data *data)
{
Elf_Data *symbols = obj->efile.symbols;
const char *relo_sec_name, *sec_name;
size_t sec_idx = shdr->sh_info;
+ struct bpf_program *prog;
+ struct reloc_desc *relos;
int err, i, nrels;
+ const char *sym_name;
+ __u32 insn_idx;
+ GElf_Sym sym;
+ GElf_Rel rel;
relo_sec_name = elf_sec_str(obj, shdr->sh_name);
sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
@@ -3379,19 +3506,7 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
relo_sec_name, sec_idx, sec_name);
nrels = shdr->sh_size / shdr->sh_entsize;
- prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
- if (!prog->reloc_desc) {
- pr_warn("failed to alloc memory in relocation\n");
- return -ENOMEM;
- }
- prog->nr_reloc = nrels;
-
for (i = 0; i < nrels; i++) {
- const char *sym_name;
- __u32 insn_idx;
- GElf_Sym sym;
- GElf_Rel rel;
-
if (!gelf_getrel(data, i, &rel)) {
pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
return -LIBBPF_ERRNO__FORMAT;
@@ -3408,15 +3523,42 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
}
insn_idx = rel.r_offset / BPF_INSN_SZ;
- sym_name = elf_sym_str(obj, sym.st_name) ?: "<?>";
+ /* relocations against static functions are recorded as
+ * relocations against the section that contains a function;
+ * in such case, symbol will be STT_SECTION and sym.st_name
+ * will point to empty string (0), so fetch section name
+ * instead
+ */
+ if (GELF_ST_TYPE(sym.st_info) == STT_SECTION && sym.st_name == 0)
+ sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym.st_shndx));
+ else
+ sym_name = elf_sym_str(obj, sym.st_name);
+ sym_name = sym_name ?: "<?";
pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
relo_sec_name, i, insn_idx, sym_name);
- err = bpf_program__record_reloc(prog, &prog->reloc_desc[i],
+ prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
+ if (!prog) {
+ pr_warn("sec '%s': relo #%d: program not found in section '%s' for insn #%u\n",
+ relo_sec_name, i, sec_name, insn_idx);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+
+ relos = libbpf_reallocarray(prog->reloc_desc,
+ prog->nr_reloc + 1, sizeof(*relos));
+ if (!relos)
+ return -ENOMEM;
+ prog->reloc_desc = relos;
+
+ /* adjust insn_idx to local BPF program frame of reference */
+ insn_idx -= prog->sec_insn_off;
+ err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
insn_idx, sym_name, &sym, &rel);
if (err)
return err;
+
+ prog->nr_reloc++;
}
return 0;
}
@@ -3758,6 +3900,52 @@ static int probe_kern_probe_read_kernel(void)
return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
}
+static int probe_prog_bind_map(void)
+{
+ struct bpf_load_program_attr prg_attr;
+ struct bpf_create_map_attr map_attr;
+ char *cp, errmsg[STRERR_BUFSIZE];
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ int ret, map, prog;
+
+ memset(&map_attr, 0, sizeof(map_attr));
+ map_attr.map_type = BPF_MAP_TYPE_ARRAY;
+ map_attr.key_size = sizeof(int);
+ map_attr.value_size = 32;
+ map_attr.max_entries = 1;
+
+ map = bpf_create_map_xattr(&map_attr);
+ if (map < 0) {
+ ret = -errno;
+ cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
+ pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
+ __func__, cp, -ret);
+ return ret;
+ }
+
+ memset(&prg_attr, 0, sizeof(prg_attr));
+ prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+ prg_attr.insns = insns;
+ prg_attr.insns_cnt = ARRAY_SIZE(insns);
+ prg_attr.license = "GPL";
+
+ prog = bpf_load_program_xattr(&prg_attr, NULL, 0);
+ if (prog < 0) {
+ close(map);
+ return 0;
+ }
+
+ ret = bpf_prog_bind_map(prog, map, NULL);
+
+ close(map);
+ close(prog);
+
+ return ret >= 0;
+}
+
enum kern_feature_result {
FEAT_UNKNOWN = 0,
FEAT_SUPPORTED = 1,
@@ -3798,6 +3986,9 @@ static struct kern_feature_desc {
},
[FEAT_PROBE_READ_KERN] = {
"bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
+ },
+ [FEAT_PROG_BIND_MAP] = {
+ "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
}
};
@@ -4089,75 +4280,6 @@ err_out:
return err;
}
-static int
-check_btf_ext_reloc_err(struct bpf_program *prog, int err,
- void *btf_prog_info, const char *info_name)
-{
- if (err != -ENOENT) {
- pr_warn("Error in loading %s for sec %s.\n",
- info_name, prog->section_name);
- return err;
- }
-
- /* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */
-
- if (btf_prog_info) {
- /*
- * Some info has already been found but has problem
- * in the last btf_ext reloc. Must have to error out.
- */
- pr_warn("Error in relocating %s for sec %s.\n",
- info_name, prog->section_name);
- return err;
- }
-
- /* Have problem loading the very first info. Ignore the rest. */
- pr_warn("Cannot find %s for main program sec %s. Ignore all %s.\n",
- info_name, prog->section_name, info_name);
- return 0;
-}
-
-static int
-bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
- const char *section_name, __u32 insn_offset)
-{
- int err;
-
- if (!insn_offset || prog->func_info) {
- /*
- * !insn_offset => main program
- *
- * For sub prog, the main program's func_info has to
- * be loaded first (i.e. prog->func_info != NULL)
- */
- err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
- section_name, insn_offset,
- &prog->func_info,
- &prog->func_info_cnt);
- if (err)
- return check_btf_ext_reloc_err(prog, err,
- prog->func_info,
- "bpf_func_info");
-
- prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
- }
-
- if (!insn_offset || prog->line_info) {
- err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext,
- section_name, insn_offset,
- &prog->line_info,
- &prog->line_info_cnt);
- if (err)
- return check_btf_ext_reloc_err(prog, err,
- prog->line_info,
- "bpf_line_info");
-
- prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
- }
-
- return 0;
-}
-
#define BPF_CORE_SPEC_MAX_LEN 64
/* represents BPF CO-RE field or array element accessor */
@@ -4927,8 +5049,7 @@ static int bpf_core_calc_field_relo(const struct bpf_program *prog,
*val = sz;
} else {
pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
- bpf_program__title(prog, false),
- relo->kind, relo->insn_off / 8);
+ prog->name, relo->kind, relo->insn_off / 8);
return -EINVAL;
}
if (validate)
@@ -4950,8 +5071,7 @@ static int bpf_core_calc_field_relo(const struct bpf_program *prog,
if (byte_sz >= 8) {
/* bitfield can't be read with 64-bit read */
pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
- bpf_program__title(prog, false),
- relo->kind, relo->insn_off / 8);
+ prog->name, relo->kind, relo->insn_off / 8);
return -E2BIG;
}
byte_sz *= 2;
@@ -5116,8 +5236,8 @@ static int bpf_core_calc_relo(const struct bpf_program *prog,
} else if (err == -EOPNOTSUPP) {
/* EOPNOTSUPP means unknown/unsupported relocation */
pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n",
- bpf_program__title(prog, false), relo_idx,
- core_relo_kind_str(relo->kind), relo->kind, relo->insn_off / 8);
+ prog->name, relo_idx, core_relo_kind_str(relo->kind),
+ relo->kind, relo->insn_off / 8);
}
return err;
@@ -5131,7 +5251,7 @@ static void bpf_core_poison_insn(struct bpf_program *prog, int relo_idx,
int insn_idx, struct bpf_insn *insn)
{
pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
- bpf_program__title(prog, false), relo_idx, insn_idx);
+ prog->name, relo_idx, insn_idx);
insn->code = BPF_JMP | BPF_CALL;
insn->dst_reg = 0;
insn->src_reg = 0;
@@ -5175,6 +5295,11 @@ static int bpf_core_patch_insn(struct bpf_program *prog,
if (relo->insn_off % BPF_INSN_SZ)
return -EINVAL;
insn_idx = relo->insn_off / BPF_INSN_SZ;
+ /* adjust insn_idx from section frame of reference to the local
+ * program's frame of reference; (sub-)program code is not yet
+ * relocated, so it's enough to just subtract in-section offset
+ */
+ insn_idx = insn_idx - prog->sec_insn_off;
insn = &prog->insns[insn_idx];
class = BPF_CLASS(insn->code);
@@ -5198,14 +5323,14 @@ static int bpf_core_patch_insn(struct bpf_program *prog,
return -EINVAL;
if (res->validate && insn->imm != orig_val) {
pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
- bpf_program__title(prog, false), relo_idx,
+ prog->name, relo_idx,
insn_idx, insn->imm, orig_val, new_val);
return -EINVAL;
}
orig_val = insn->imm;
insn->imm = new_val;
pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
- bpf_program__title(prog, false), relo_idx, insn_idx,
+ prog->name, relo_idx, insn_idx,
orig_val, new_val);
break;
case BPF_LDX:
@@ -5213,21 +5338,18 @@ static int bpf_core_patch_insn(struct bpf_program *prog,
case BPF_STX:
if (res->validate && insn->off != orig_val) {
pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n",
- bpf_program__title(prog, false), relo_idx,
- insn_idx, insn->off, orig_val, new_val);
+ prog->name, relo_idx, insn_idx, insn->off, orig_val, new_val);
return -EINVAL;
}
if (new_val > SHRT_MAX) {
pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
- bpf_program__title(prog, false), relo_idx,
- insn_idx, new_val);
+ prog->name, relo_idx, insn_idx, new_val);
return -ERANGE;
}
orig_val = insn->off;
insn->off = new_val;
pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
- bpf_program__title(prog, false), relo_idx, insn_idx,
- orig_val, new_val);
+ prog->name, relo_idx, insn_idx, orig_val, new_val);
break;
case BPF_LD: {
__u64 imm;
@@ -5238,14 +5360,14 @@ static int bpf_core_patch_insn(struct bpf_program *prog,
insn[1].code != 0 || insn[1].dst_reg != 0 ||
insn[1].src_reg != 0 || insn[1].off != 0) {
pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n",
- bpf_program__title(prog, false), relo_idx, insn_idx);
+ prog->name, relo_idx, insn_idx);
return -EINVAL;
}
imm = insn[0].imm + ((__u64)insn[1].imm << 32);
if (res->validate && imm != orig_val) {
pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n",
- bpf_program__title(prog, false), relo_idx,
+ prog->name, relo_idx,
insn_idx, (unsigned long long)imm,
orig_val, new_val);
return -EINVAL;
@@ -5254,15 +5376,14 @@ static int bpf_core_patch_insn(struct bpf_program *prog,
insn[0].imm = new_val;
insn[1].imm = 0; /* currently only 32-bit values are supported */
pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n",
- bpf_program__title(prog, false), relo_idx, insn_idx,
+ prog->name, relo_idx, insn_idx,
(unsigned long long)imm, new_val);
break;
}
default:
pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n",
- bpf_program__title(prog, false), relo_idx,
- insn_idx, insn->code, insn->src_reg, insn->dst_reg,
- insn->off, insn->imm);
+ prog->name, relo_idx, insn_idx, insn->code,
+ insn->src_reg, insn->dst_reg, insn->off, insn->imm);
return -EINVAL;
}
@@ -5392,7 +5513,6 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
const struct btf *targ_btf,
struct hashmap *cand_cache)
{
- const char *prog_name = bpf_program__title(prog, false);
struct bpf_core_spec local_spec, cand_spec, targ_spec = {};
const void *type_key = u32_as_hash_key(relo->type_id);
struct bpf_core_relo_res cand_res, targ_res;
@@ -5419,13 +5539,13 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec);
if (err) {
pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
- prog_name, relo_idx, local_id, btf_kind_str(local_type),
+ prog->name, relo_idx, local_id, btf_kind_str(local_type),
str_is_empty(local_name) ? "<anon>" : local_name,
spec_str, err);
return -EINVAL;
}
- pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog_name,
+ pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog->name,
relo_idx, core_relo_kind_str(relo->kind), relo->kind);
bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
libbpf_print(LIBBPF_DEBUG, "\n");
@@ -5442,7 +5562,7 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
/* libbpf doesn't support candidate search for anonymous types */
if (str_is_empty(spec_str)) {
pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
- prog_name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
+ prog->name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
return -EOPNOTSUPP;
}
@@ -5450,7 +5570,7 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf);
if (IS_ERR(cand_ids)) {
pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld",
- prog_name, relo_idx, local_id, btf_kind_str(local_type),
+ prog->name, relo_idx, local_id, btf_kind_str(local_type),
local_name, PTR_ERR(cand_ids));
return PTR_ERR(cand_ids);
}
@@ -5466,13 +5586,13 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
err = bpf_core_spec_match(&local_spec, targ_btf, cand_id, &cand_spec);
if (err < 0) {
pr_warn("prog '%s': relo #%d: error matching candidate #%d ",
- prog_name, relo_idx, i);
+ prog->name, relo_idx, i);
bpf_core_dump_spec(LIBBPF_WARN, &cand_spec);
libbpf_print(LIBBPF_WARN, ": %d\n", err);
return err;
}
- pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog_name,
+ pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog->name,
relo_idx, err == 0 ? "non-matching" : "matching", i);
bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
libbpf_print(LIBBPF_DEBUG, "\n");
@@ -5492,7 +5612,7 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
* should all resolve to the same bit offset
*/
pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n",
- prog_name, relo_idx, cand_spec.bit_offset,
+ prog->name, relo_idx, cand_spec.bit_offset,
targ_spec.bit_offset);
return -EINVAL;
} else if (cand_res.poison != targ_res.poison || cand_res.new_val != targ_res.new_val) {
@@ -5501,7 +5621,7 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
* proceed due to ambiguity
*/
pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n",
- prog_name, relo_idx,
+ prog->name, relo_idx,
cand_res.poison ? "failure" : "success", cand_res.new_val,
targ_res.poison ? "failure" : "success", targ_res.new_val);
return -EINVAL;
@@ -5534,7 +5654,7 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
*/
if (j == 0) {
pr_debug("prog '%s': relo #%d: no matching targets found\n",
- prog_name, relo_idx);
+ prog->name, relo_idx);
/* calculate single target relo result explicitly */
err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, NULL, &targ_res);
@@ -5547,7 +5667,7 @@ patch_insn:
err = bpf_core_patch_insn(prog, relo, relo_idx, &targ_res);
if (err) {
pr_warn("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
- prog_name, relo_idx, relo->insn_off, err);
+ prog->name, relo_idx, relo->insn_off, err);
return -EINVAL;
}
@@ -5565,7 +5685,7 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
struct bpf_program *prog;
struct btf *targ_btf;
const char *sec_name;
- int i, err = 0;
+ int i, err = 0, insn_idx, sec_idx;
if (obj->btf_ext->core_relo_info.len == 0)
return 0;
@@ -5592,24 +5712,37 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
err = -EINVAL;
goto out;
}
+ /* bpf_object's ELF is gone by now so it's not easy to find
+ * section index by section name, but we can find *any*
+ * bpf_program within desired section name and use it's
+ * prog->sec_idx to do a proper search by section index and
+ * instruction offset
+ */
prog = NULL;
for (i = 0; i < obj->nr_programs; i++) {
- if (!strcmp(obj->programs[i].section_name, sec_name)) {
- prog = &obj->programs[i];
+ prog = &obj->programs[i];
+ if (strcmp(prog->sec_name, sec_name) == 0)
break;
- }
}
if (!prog) {
- pr_warn("failed to find program '%s' for CO-RE offset relocation\n",
- sec_name);
- err = -EINVAL;
- goto out;
+ pr_warn("sec '%s': failed to find a BPF program\n", sec_name);
+ return -ENOENT;
}
+ sec_idx = prog->sec_idx;
pr_debug("sec '%s': found %d CO-RE relocations\n",
sec_name, sec->num_info);
for_each_btf_ext_rec(seg, sec, i, rec) {
+ insn_idx = rec->insn_off / BPF_INSN_SZ;
+ prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
+ if (!prog) {
+ pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n",
+ sec_name, insn_idx, i);
+ err = -EINVAL;
+ goto out;
+ }
+
err = bpf_core_apply_relo(prog, rec, i, obj->btf,
targ_btf, cand_cache);
if (err) {
@@ -5633,89 +5766,32 @@ out:
return err;
}
+/* Relocate data references within program code:
+ * - map references;
+ * - global variable references;
+ * - extern references.
+ */
static int
-bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
- struct reloc_desc *relo)
-{
- struct bpf_insn *insn, *new_insn;
- struct bpf_program *text;
- size_t new_cnt;
- int err;
-
- if (prog->idx != obj->efile.text_shndx && prog->main_prog_cnt == 0) {
- text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
- if (!text) {
- pr_warn("no .text section found yet relo into text exist\n");
- return -LIBBPF_ERRNO__RELOC;
- }
- new_cnt = prog->insns_cnt + text->insns_cnt;
- new_insn = libbpf_reallocarray(prog->insns, new_cnt, sizeof(*insn));
- if (!new_insn) {
- pr_warn("oom in prog realloc\n");
- return -ENOMEM;
- }
- prog->insns = new_insn;
-
- if (obj->btf_ext) {
- err = bpf_program_reloc_btf_ext(prog, obj,
- text->section_name,
- prog->insns_cnt);
- if (err)
- return err;
- }
-
- memcpy(new_insn + prog->insns_cnt, text->insns,
- text->insns_cnt * sizeof(*insn));
- prog->main_prog_cnt = prog->insns_cnt;
- prog->insns_cnt = new_cnt;
- pr_debug("added %zd insn from %s to prog %s\n",
- text->insns_cnt, text->section_name,
- prog->section_name);
- }
-
- insn = &prog->insns[relo->insn_idx];
- insn->imm += relo->sym_off / 8 + prog->main_prog_cnt - relo->insn_idx;
- return 0;
-}
-
-static int
-bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
+bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
{
- int i, err;
-
- if (!prog)
- return 0;
-
- if (obj->btf_ext) {
- err = bpf_program_reloc_btf_ext(prog, obj,
- prog->section_name, 0);
- if (err)
- return err;
- }
-
- if (!prog->reloc_desc)
- return 0;
+ int i;
for (i = 0; i < prog->nr_reloc; i++) {
struct reloc_desc *relo = &prog->reloc_desc[i];
struct bpf_insn *insn = &prog->insns[relo->insn_idx];
struct extern_desc *ext;
- if (relo->insn_idx + 1 >= (int)prog->insns_cnt) {
- pr_warn("relocation out of range: '%s'\n",
- prog->section_name);
- return -LIBBPF_ERRNO__RELOC;
- }
-
switch (relo->type) {
case RELO_LD64:
insn[0].src_reg = BPF_PSEUDO_MAP_FD;
insn[0].imm = obj->maps[relo->map_idx].fd;
+ relo->processed = true;
break;
case RELO_DATA:
insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
insn[1].imm = insn[0].imm + relo->sym_off;
insn[0].imm = obj->maps[relo->map_idx].fd;
+ relo->processed = true;
break;
case RELO_EXTERN:
ext = &obj->externs[relo->sym_off];
@@ -5727,11 +5803,10 @@ bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
insn[0].imm = (__u32)ext->ksym.addr;
insn[1].imm = ext->ksym.addr >> 32;
}
+ relo->processed = true;
break;
case RELO_CALL:
- err = bpf_program__reloc_text(prog, obj, relo);
- if (err)
- return err;
+ /* will be handled as a follow up pass */
break;
default:
pr_warn("prog '%s': relo #%d: bad relo type %d\n",
@@ -5740,8 +5815,378 @@ bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
}
}
- zfree(&prog->reloc_desc);
- prog->nr_reloc = 0;
+ return 0;
+}
+
+static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
+ const struct bpf_program *prog,
+ const struct btf_ext_info *ext_info,
+ void **prog_info, __u32 *prog_rec_cnt,
+ __u32 *prog_rec_sz)
+{
+ void *copy_start = NULL, *copy_end = NULL;
+ void *rec, *rec_end, *new_prog_info;
+ const struct btf_ext_info_sec *sec;
+ size_t old_sz, new_sz;
+ const char *sec_name;
+ int i, off_adj;
+
+ for_each_btf_ext_sec(ext_info, sec) {
+ sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
+ if (!sec_name)
+ return -EINVAL;
+ if (strcmp(sec_name, prog->sec_name) != 0)
+ continue;
+
+ for_each_btf_ext_rec(ext_info, sec, i, rec) {
+ __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
+
+ if (insn_off < prog->sec_insn_off)
+ continue;
+ if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
+ break;
+
+ if (!copy_start)
+ copy_start = rec;
+ copy_end = rec + ext_info->rec_size;
+ }
+
+ if (!copy_start)
+ return -ENOENT;
+
+ /* append func/line info of a given (sub-)program to the main
+ * program func/line info
+ */
+ old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
+ new_sz = old_sz + (copy_end - copy_start);
+ new_prog_info = realloc(*prog_info, new_sz);
+ if (!new_prog_info)
+ return -ENOMEM;
+ *prog_info = new_prog_info;
+ *prog_rec_cnt = new_sz / ext_info->rec_size;
+ memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
+
+ /* Kernel instruction offsets are in units of 8-byte
+ * instructions, while .BTF.ext instruction offsets generated
+ * by Clang are in units of bytes. So convert Clang offsets
+ * into kernel offsets and adjust offset according to program
+ * relocated position.
+ */
+ off_adj = prog->sub_insn_off - prog->sec_insn_off;
+ rec = new_prog_info + old_sz;
+ rec_end = new_prog_info + new_sz;
+ for (; rec < rec_end; rec += ext_info->rec_size) {
+ __u32 *insn_off = rec;
+
+ *insn_off = *insn_off / BPF_INSN_SZ + off_adj;
+ }
+ *prog_rec_sz = ext_info->rec_size;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int
+reloc_prog_func_and_line_info(const struct bpf_object *obj,
+ struct bpf_program *main_prog,
+ const struct bpf_program *prog)
+{
+ int err;
+
+ /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
+ * supprot func/line info
+ */
+ if (!obj->btf_ext || !kernel_supports(FEAT_BTF_FUNC))
+ return 0;
+
+ /* only attempt func info relocation if main program's func_info
+ * relocation was successful
+ */
+ if (main_prog != prog && !main_prog->func_info)
+ goto line_info;
+
+ err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
+ &main_prog->func_info,
+ &main_prog->func_info_cnt,
+ &main_prog->func_info_rec_size);
+ if (err) {
+ if (err != -ENOENT) {
+ pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
+ prog->name, err);
+ return err;
+ }
+ if (main_prog->func_info) {
+ /*
+ * Some info has already been found but has problem
+ * in the last btf_ext reloc. Must have to error out.
+ */
+ pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
+ return err;
+ }
+ /* Have problem loading the very first info. Ignore the rest. */
+ pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
+ prog->name);
+ }
+
+line_info:
+ /* don't relocate line info if main program's relocation failed */
+ if (main_prog != prog && !main_prog->line_info)
+ return 0;
+
+ err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
+ &main_prog->line_info,
+ &main_prog->line_info_cnt,
+ &main_prog->line_info_rec_size);
+ if (err) {
+ if (err != -ENOENT) {
+ pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
+ prog->name, err);
+ return err;
+ }
+ if (main_prog->line_info) {
+ /*
+ * Some info has already been found but has problem
+ * in the last btf_ext reloc. Must have to error out.
+ */
+ pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
+ return err;
+ }
+ /* Have problem loading the very first info. Ignore the rest. */
+ pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
+ prog->name);
+ }
+ return 0;
+}
+
+static int cmp_relo_by_insn_idx(const void *key, const void *elem)
+{
+ size_t insn_idx = *(const size_t *)key;
+ const struct reloc_desc *relo = elem;
+
+ if (insn_idx == relo->insn_idx)
+ return 0;
+ return insn_idx < relo->insn_idx ? -1 : 1;
+}
+
+static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
+{
+ return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
+ sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
+}
+
+static int
+bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
+ struct bpf_program *prog)
+{
+ size_t sub_insn_idx, insn_idx, new_cnt;
+ struct bpf_program *subprog;
+ struct bpf_insn *insns, *insn;
+ struct reloc_desc *relo;
+ int err;
+
+ err = reloc_prog_func_and_line_info(obj, main_prog, prog);
+ if (err)
+ return err;
+
+ for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
+ insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
+ if (!insn_is_subprog_call(insn))
+ continue;
+
+ relo = find_prog_insn_relo(prog, insn_idx);
+ if (relo && relo->type != RELO_CALL) {
+ pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
+ prog->name, insn_idx, relo->type);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ if (relo) {
+ /* sub-program instruction index is a combination of
+ * an offset of a symbol pointed to by relocation and
+ * call instruction's imm field; for global functions,
+ * call always has imm = -1, but for static functions
+ * relocation is against STT_SECTION and insn->imm
+ * points to a start of a static function
+ */
+ sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
+ } else {
+ /* if subprogram call is to a static function within
+ * the same ELF section, there won't be any relocation
+ * emitted, but it also means there is no additional
+ * offset necessary, insns->imm is relative to
+ * instruction's original position within the section
+ */
+ sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
+ }
+
+ /* we enforce that sub-programs should be in .text section */
+ subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
+ if (!subprog) {
+ pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
+ prog->name);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+
+ /* if it's the first call instruction calling into this
+ * subprogram (meaning this subprog hasn't been processed
+ * yet) within the context of current main program:
+ * - append it at the end of main program's instructions blog;
+ * - process is recursively, while current program is put on hold;
+ * - if that subprogram calls some other not yet processes
+ * subprogram, same thing will happen recursively until
+ * there are no more unprocesses subprograms left to append
+ * and relocate.
+ */
+ if (subprog->sub_insn_off == 0) {
+ subprog->sub_insn_off = main_prog->insns_cnt;
+
+ new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
+ insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
+ if (!insns) {
+ pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
+ return -ENOMEM;
+ }
+ main_prog->insns = insns;
+ main_prog->insns_cnt = new_cnt;
+
+ memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
+ subprog->insns_cnt * sizeof(*insns));
+
+ pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
+ main_prog->name, subprog->insns_cnt, subprog->name);
+
+ err = bpf_object__reloc_code(obj, main_prog, subprog);
+ if (err)
+ return err;
+ }
+
+ /* main_prog->insns memory could have been re-allocated, so
+ * calculate pointer again
+ */
+ insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
+ /* calculate correct instruction position within current main
+ * prog; each main prog can have a different set of
+ * subprograms appended (potentially in different order as
+ * well), so position of any subprog can be different for
+ * different main programs */
+ insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
+
+ if (relo)
+ relo->processed = true;
+
+ pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
+ prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
+ }
+
+ return 0;
+}
+
+/*
+ * Relocate sub-program calls.
+ *
+ * Algorithm operates as follows. Each entry-point BPF program (referred to as
+ * main prog) is processed separately. For each subprog (non-entry functions,
+ * that can be called from either entry progs or other subprogs) gets their
+ * sub_insn_off reset to zero. This serves as indicator that this subprogram
+ * hasn't been yet appended and relocated within current main prog. Once its
+ * relocated, sub_insn_off will point at the position within current main prog
+ * where given subprog was appended. This will further be used to relocate all
+ * the call instructions jumping into this subprog.
+ *
+ * We start with main program and process all call instructions. If the call
+ * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
+ * is zero), subprog instructions are appended at the end of main program's
+ * instruction array. Then main program is "put on hold" while we recursively
+ * process newly appended subprogram. If that subprogram calls into another
+ * subprogram that hasn't been appended, new subprogram is appended again to
+ * the *main* prog's instructions (subprog's instructions are always left
+ * untouched, as they need to be in unmodified state for subsequent main progs
+ * and subprog instructions are always sent only as part of a main prog) and
+ * the process continues recursively. Once all the subprogs called from a main
+ * prog or any of its subprogs are appended (and relocated), all their
+ * positions within finalized instructions array are known, so it's easy to
+ * rewrite call instructions with correct relative offsets, corresponding to
+ * desired target subprog.
+ *
+ * Its important to realize that some subprogs might not be called from some
+ * main prog and any of its called/used subprogs. Those will keep their
+ * subprog->sub_insn_off as zero at all times and won't be appended to current
+ * main prog and won't be relocated within the context of current main prog.
+ * They might still be used from other main progs later.
+ *
+ * Visually this process can be shown as below. Suppose we have two main
+ * programs mainA and mainB and BPF object contains three subprogs: subA,
+ * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and
+ * subC both call subB:
+ *
+ * +--------+ +-------+
+ * | v v |
+ * +--+---+ +--+-+-+ +---+--+
+ * | subA | | subB | | subC |
+ * +--+---+ +------+ +---+--+
+ * ^ ^
+ * | |
+ * +---+-------+ +------+----+
+ * | mainA | | mainB |
+ * +-----------+ +-----------+
+ *
+ * We'll start relocating mainA, will find subA, append it and start
+ * processing sub A recursively:
+ *
+ * +-----------+------+
+ * | mainA | subA |
+ * +-----------+------+
+ *
+ * At this point we notice that subB is used from subA, so we append it and
+ * relocate (there are no further subcalls from subB):
+ *
+ * +-----------+------+------+
+ * | mainA | subA | subB |
+ * +-----------+------+------+
+ *
+ * At this point, we relocate subA calls, then go one level up and finish with
+ * relocatin mainA calls. mainA is done.
+ *
+ * For mainB process is similar but results in different order. We start with
+ * mainB and skip subA and subB, as mainB never calls them (at least
+ * directly), but we see subC is needed, so we append and start processing it:
+ *
+ * +-----------+------+
+ * | mainB | subC |
+ * +-----------+------+
+ * Now we see subC needs subB, so we go back to it, append and relocate it:
+ *
+ * +-----------+------+------+
+ * | mainB | subC | subB |
+ * +-----------+------+------+
+ *
+ * At this point we unwind recursion, relocate calls in subC, then in mainB.
+ */
+static int
+bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
+{
+ struct bpf_program *subprog;
+ int i, j, err;
+
+ /* mark all subprogs as not relocated (yet) within the context of
+ * current main program
+ */
+ for (i = 0; i < obj->nr_programs; i++) {
+ subprog = &obj->programs[i];
+ if (!prog_is_subprog(obj, subprog))
+ continue;
+
+ subprog->sub_insn_off = 0;
+ for (j = 0; j < subprog->nr_reloc; j++)
+ if (subprog->reloc_desc[j].type == RELO_CALL)
+ subprog->reloc_desc[j].processed = false;
+ }
+
+ err = bpf_object__reloc_code(obj, prog, prog);
+ if (err)
+ return err;
+
+
return 0;
}
@@ -5760,37 +6205,45 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
return err;
}
}
- /* ensure .text is relocated first, as it's going to be copied as-is
- * later for sub-program calls
+ /* relocate data references first for all programs and sub-programs,
+ * as they don't change relative to code locations, so subsequent
+ * subprogram processing won't need to re-calculate any of them
*/
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
- if (prog->idx != obj->efile.text_shndx)
- continue;
-
- err = bpf_program__relocate(prog, obj);
+ err = bpf_object__relocate_data(obj, prog);
if (err) {
pr_warn("prog '%s': failed to relocate data references: %d\n",
prog->name, err);
return err;
}
- break;
}
- /* now relocate everything but .text, which by now is relocated
- * properly, so we can copy raw sub-program instructions as is safely
+ /* now relocate subprogram calls and append used subprograms to main
+ * programs; each copy of subprogram code needs to be relocated
+ * differently for each main program, because its code location might
+ * have changed
*/
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
- if (prog->idx == obj->efile.text_shndx)
+ /* sub-program's sub-calls are relocated within the context of
+ * its main program only
+ */
+ if (prog_is_subprog(obj, prog))
continue;
- err = bpf_program__relocate(prog, obj);
+ err = bpf_object__relocate_calls(obj, prog);
if (err) {
pr_warn("prog '%s': failed to relocate calls: %d\n",
prog->name, err);
return err;
}
}
+ /* free up relocation descriptors */
+ for (i = 0; i < obj->nr_programs; i++) {
+ prog = &obj->programs[i];
+ zfree(&prog->reloc_desc);
+ prog->nr_reloc = 0;
+ }
return 0;
}
@@ -5804,8 +6257,8 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
int i, j, nrels, new_sz;
const struct btf_var_secinfo *vi = NULL;
const struct btf_type *sec, *var, *def;
+ struct bpf_map *map = NULL, *targ_map;
const struct btf_member *member;
- struct bpf_map *map, *targ_map;
const char *name, *mname;
Elf_Data *symbols;
unsigned int moff;
@@ -5910,41 +6363,53 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
return 0;
}
-static int bpf_object__collect_reloc(struct bpf_object *obj)
+static int cmp_relocs(const void *_a, const void *_b)
{
- int i, err;
+ const struct reloc_desc *a = _a;
+ const struct reloc_desc *b = _b;
- if (!obj_elf_valid(obj)) {
- pr_warn("Internal error: elf object is closed\n");
- return -LIBBPF_ERRNO__INTERNAL;
- }
+ if (a->insn_idx != b->insn_idx)
+ return a->insn_idx < b->insn_idx ? -1 : 1;
+
+ /* no two relocations should have the same insn_idx, but ... */
+ if (a->type != b->type)
+ return a->type < b->type ? -1 : 1;
+
+ return 0;
+}
+
+static int bpf_object__collect_relos(struct bpf_object *obj)
+{
+ int i, err;
for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
Elf_Data *data = obj->efile.reloc_sects[i].data;
int idx = shdr->sh_info;
- struct bpf_program *prog;
if (shdr->sh_type != SHT_REL) {
pr_warn("internal error at %d\n", __LINE__);
return -LIBBPF_ERRNO__INTERNAL;
}
- if (idx == obj->efile.st_ops_shndx) {
+ if (idx == obj->efile.st_ops_shndx)
err = bpf_object__collect_st_ops_relos(obj, shdr, data);
- } else if (idx == obj->efile.btf_maps_shndx) {
+ else if (idx == obj->efile.btf_maps_shndx)
err = bpf_object__collect_map_relos(obj, shdr, data);
- } else {
- prog = bpf_object__find_prog_by_idx(obj, idx);
- if (!prog) {
- pr_warn("relocation failed: no prog in section(%d)\n", idx);
- return -LIBBPF_ERRNO__RELOC;
- }
- err = bpf_program__collect_reloc(prog, shdr, data, obj);
- }
+ else
+ err = bpf_object__collect_prog_relos(obj, shdr, data);
if (err)
return err;
}
+
+ for (i = 0; i < obj->nr_programs; i++) {
+ struct bpf_program *p = &obj->programs[i];
+
+ if (!p->nr_reloc)
+ continue;
+
+ qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
+ }
return 0;
}
@@ -6058,6 +6523,20 @@ retry_load:
if (ret >= 0) {
if (log_buf && load_attr.log_level)
pr_debug("verifier log:\n%s", log_buf);
+
+ if (prog->obj->rodata_map_idx >= 0 &&
+ kernel_supports(FEAT_PROG_BIND_MAP)) {
+ struct bpf_map *rodata_map =
+ &prog->obj->maps[prog->obj->rodata_map_idx];
+
+ if (bpf_prog_bind_map(ret, bpf_map__fd(rodata_map), NULL)) {
+ cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
+ pr_warn("prog '%s': failed to bind .rodata map: %s\n",
+ prog->name, cp);
+ /* Don't fail hard if can't bind rodata. */
+ }
+ }
+
*pfd = ret;
ret = 0;
goto out;
@@ -6110,8 +6589,7 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
int err = 0, fd, i, btf_id;
if (prog->obj->loaded) {
- pr_warn("prog '%s'('%s'): can't load after object was loaded\n",
- prog->name, prog->section_name);
+ pr_warn("prog '%s': can't load after object was loaded\n", prog->name);
return -EINVAL;
}
@@ -6127,7 +6605,7 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
if (prog->instances.nr < 0 || !prog->instances.fds) {
if (prog->preprocessor) {
pr_warn("Internal error: can't load program '%s'\n",
- prog->section_name);
+ prog->name);
return -LIBBPF_ERRNO__INTERNAL;
}
@@ -6142,8 +6620,8 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
if (!prog->preprocessor) {
if (prog->instances.nr != 1) {
- pr_warn("Program '%s' is inconsistent: nr(%d) != 1\n",
- prog->section_name, prog->instances.nr);
+ pr_warn("prog '%s': inconsistent nr(%d) != 1\n",
+ prog->name, prog->instances.nr);
}
err = load_program(prog, prog->insns, prog->insns_cnt,
license, kern_ver, &fd);
@@ -6161,13 +6639,13 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
prog->insns_cnt, &result);
if (err) {
pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
- i, prog->section_name);
+ i, prog->name);
goto out;
}
if (!result.new_insn_ptr || !result.new_insn_cnt) {
pr_debug("Skip loading the %dth instance of program '%s'\n",
- i, prog->section_name);
+ i, prog->name);
prog->instances.fds[i] = -1;
if (result.pfd)
*result.pfd = -1;
@@ -6178,7 +6656,7 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
result.new_insn_cnt, license, kern_ver, &fd);
if (err) {
pr_warn("Loading the %dth instance of program '%s' failed\n",
- i, prog->section_name);
+ i, prog->name);
goto out;
}
@@ -6188,18 +6666,12 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
}
out:
if (err)
- pr_warn("failed to load program '%s'\n", prog->section_name);
+ pr_warn("failed to load program '%s'\n", prog->name);
zfree(&prog->insns);
prog->insns_cnt = 0;
return err;
}
-static bool bpf_program__is_function_storage(const struct bpf_program *prog,
- const struct bpf_object *obj)
-{
- return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
-}
-
static int
bpf_object__load_progs(struct bpf_object *obj, int log_level)
{
@@ -6216,7 +6688,7 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
- if (bpf_program__is_function_storage(prog, obj))
+ if (prog_is_subprog(obj, prog))
continue;
if (!prog->load) {
pr_debug("prog '%s': skipped loading\n", prog->name);
@@ -6280,14 +6752,13 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
err = err ? : bpf_object__collect_externs(obj);
err = err ? : bpf_object__finalize_btf(obj);
err = err ? : bpf_object__init_maps(obj, opts);
- err = err ? : bpf_object__init_prog_names(obj);
- err = err ? : bpf_object__collect_reloc(obj);
+ err = err ? : bpf_object__collect_relos(obj);
if (err)
goto out;
bpf_object__elf_finish(obj);
bpf_object__for_each_program(prog, obj) {
- prog->sec_def = find_sec_def(prog->section_name);
+ prog->sec_def = find_sec_def(prog->sec_name);
if (!prog->sec_def)
/* couldn't guess, but user might manually specify */
continue;
@@ -6668,7 +7139,7 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
if (instance < 0 || instance >= prog->instances.nr) {
pr_warn("invalid prog instance %d of prog %s (max %d)\n",
- instance, prog->section_name, prog->instances.nr);
+ instance, prog->name, prog->instances.nr);
return -EINVAL;
}
@@ -6699,7 +7170,7 @@ int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
if (instance < 0 || instance >= prog->instances.nr) {
pr_warn("invalid prog instance %d of prog %s (max %d)\n",
- instance, prog->section_name, prog->instances.nr);
+ instance, prog->name, prog->instances.nr);
return -EINVAL;
}
@@ -6729,8 +7200,7 @@ int bpf_program__pin(struct bpf_program *prog, const char *path)
}
if (prog->instances.nr <= 0) {
- pr_warn("no instances of prog %s to pin\n",
- prog->section_name);
+ pr_warn("no instances of prog %s to pin\n", prog->name);
return -EINVAL;
}
@@ -6792,8 +7262,7 @@ int bpf_program__unpin(struct bpf_program *prog, const char *path)
}
if (prog->instances.nr <= 0) {
- pr_warn("no instances of prog %s to pin\n",
- prog->section_name);
+ pr_warn("no instances of prog %s to pin\n", prog->name);
return -EINVAL;
}
@@ -7285,7 +7754,7 @@ bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
do {
prog = __bpf_program__iter(prog, obj, true);
- } while (prog && bpf_program__is_function_storage(prog, obj));
+ } while (prog && prog_is_subprog(obj, prog));
return prog;
}
@@ -7297,7 +7766,7 @@ bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
do {
prog = __bpf_program__iter(prog, obj, false);
- } while (prog && bpf_program__is_function_storage(prog, obj));
+ } while (prog && prog_is_subprog(obj, prog));
return prog;
}
@@ -7328,11 +7797,16 @@ const char *bpf_program__name(const struct bpf_program *prog)
return prog->name;
}
+const char *bpf_program__section_name(const struct bpf_program *prog)
+{
+ return prog->sec_name;
+}
+
const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
{
const char *title;
- title = prog->section_name;
+ title = prog->sec_name;
if (needs_copy) {
title = strdup(title);
if (!title) {
@@ -7405,14 +7879,14 @@ int bpf_program__nth_fd(const struct bpf_program *prog, int n)
if (n >= prog->instances.nr || n < 0) {
pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
- n, prog->section_name, prog->instances.nr);
+ n, prog->name, prog->instances.nr);
return -EINVAL;
}
fd = prog->instances.fds[n];
if (fd < 0) {
pr_warn("%dth instance of program '%s' is invalid\n",
- n, prog->section_name);
+ n, prog->name);
return -ENOENT;
}
@@ -7772,7 +8246,7 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
const struct btf *btf;
struct bpf_map *map;
Elf_Data *symbols;
- unsigned int moff;
+ unsigned int moff, insn_idx;
const char *name;
__u32 member_idx;
GElf_Sym sym;
@@ -7817,6 +8291,12 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
map->name, (size_t)rel.r_offset, shdr_idx);
return -LIBBPF_ERRNO__RELOC;
}
+ if (sym.st_value % BPF_INSN_SZ) {
+ pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
+ map->name, (unsigned long long)sym.st_value);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
+ insn_idx = sym.st_value / BPF_INSN_SZ;
member = find_member_by_offset(st_ops->type, moff * 8);
if (!member) {
@@ -7833,7 +8313,7 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
return -EINVAL;
}
- prog = bpf_object__find_prog_by_idx(obj, shdr_idx);
+ prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
if (!prog) {
pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
map->name, shdr_idx, name);
@@ -7843,7 +8323,7 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
if (prog->type == BPF_PROG_TYPE_UNSPEC) {
const struct bpf_sec_def *sec_def;
- sec_def = find_sec_def(prog->section_name);
+ sec_def = find_sec_def(prog->sec_name);
if (sec_def &&
sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) {
/* for pr_warn */
@@ -7866,7 +8346,7 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
invalid_prog:
pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
- map->name, prog->name, prog->section_name, prog->type,
+ map->name, prog->name, prog->sec_name, prog->type,
prog->attach_btf_id, prog->expected_attach_type, name);
return -EINVAL;
}
@@ -7970,7 +8450,7 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog)
{
enum bpf_attach_type attach_type = prog->expected_attach_type;
__u32 attach_prog_fd = prog->attach_prog_fd;
- const char *name = prog->section_name;
+ const char *name = prog->sec_name;
int i, err;
if (!name)
@@ -8497,14 +8977,14 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
int prog_fd, err;
if (pfd < 0) {
- pr_warn("program '%s': invalid perf event FD %d\n",
- bpf_program__title(prog, false), pfd);
+ pr_warn("prog '%s': invalid perf event FD %d\n",
+ prog->name, pfd);
return ERR_PTR(-EINVAL);
}
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
- pr_warn("program '%s': can't attach BPF program w/o FD (did you load it?)\n",
- bpf_program__title(prog, false));
+ pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
+ prog->name);
return ERR_PTR(-EINVAL);
}
@@ -8517,20 +8997,18 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
err = -errno;
free(link);
- pr_warn("program '%s': failed to attach to pfd %d: %s\n",
- bpf_program__title(prog, false), pfd,
- libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ pr_warn("prog '%s': failed to attach to pfd %d: %s\n",
+ prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
if (err == -EPROTO)
- pr_warn("program '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
- bpf_program__title(prog, false), pfd);
+ pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
+ prog->name, pfd);
return ERR_PTR(err);
}
if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
err = -errno;
free(link);
- pr_warn("program '%s': failed to enable pfd %d: %s\n",
- bpf_program__title(prog, false), pfd,
- libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ pr_warn("prog '%s': failed to enable pfd %d: %s\n",
+ prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
return ERR_PTR(err);
}
return link;
@@ -8652,9 +9130,8 @@ struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
0 /* offset */, -1 /* pid */);
if (pfd < 0) {
- pr_warn("program '%s': failed to create %s '%s' perf event: %s\n",
- bpf_program__title(prog, false),
- retprobe ? "kretprobe" : "kprobe", func_name,
+ pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n",
+ prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
return ERR_PTR(pfd);
}
@@ -8662,9 +9139,8 @@ struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
if (IS_ERR(link)) {
close(pfd);
err = PTR_ERR(link);
- pr_warn("program '%s': failed to attach to %s '%s': %s\n",
- bpf_program__title(prog, false),
- retprobe ? "kretprobe" : "kprobe", func_name,
+ pr_warn("prog '%s': failed to attach to %s '%s': %s\n",
+ prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
return link;
}
@@ -8677,7 +9153,7 @@ static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
const char *func_name;
bool retprobe;
- func_name = bpf_program__title(prog, false) + sec->len;
+ func_name = prog->sec_name + sec->len;
retprobe = strcmp(sec->sec, "kretprobe/") == 0;
return bpf_program__attach_kprobe(prog, retprobe, func_name);
@@ -8695,9 +9171,8 @@ struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
pfd = perf_event_open_probe(true /* uprobe */, retprobe,
binary_path, func_offset, pid);
if (pfd < 0) {
- pr_warn("program '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
- bpf_program__title(prog, false),
- retprobe ? "uretprobe" : "uprobe",
+ pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
+ prog->name, retprobe ? "uretprobe" : "uprobe",
binary_path, func_offset,
libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
return ERR_PTR(pfd);
@@ -8706,9 +9181,8 @@ struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
if (IS_ERR(link)) {
close(pfd);
err = PTR_ERR(link);
- pr_warn("program '%s': failed to attach to %s '%s:0x%zx': %s\n",
- bpf_program__title(prog, false),
- retprobe ? "uretprobe" : "uprobe",
+ pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
+ prog->name, retprobe ? "uretprobe" : "uprobe",
binary_path, func_offset,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
return link;
@@ -8776,9 +9250,8 @@ struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
pfd = perf_event_open_tracepoint(tp_category, tp_name);
if (pfd < 0) {
- pr_warn("program '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
- bpf_program__title(prog, false),
- tp_category, tp_name,
+ pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
+ prog->name, tp_category, tp_name,
libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
return ERR_PTR(pfd);
}
@@ -8786,9 +9259,8 @@ struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
if (IS_ERR(link)) {
close(pfd);
err = PTR_ERR(link);
- pr_warn("program '%s': failed to attach to tracepoint '%s/%s': %s\n",
- bpf_program__title(prog, false),
- tp_category, tp_name,
+ pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
+ prog->name, tp_category, tp_name,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
return link;
}
@@ -8801,7 +9273,7 @@ static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
char *sec_name, *tp_cat, *tp_name;
struct bpf_link *link;
- sec_name = strdup(bpf_program__title(prog, false));
+ sec_name = strdup(prog->sec_name);
if (!sec_name)
return ERR_PTR(-ENOMEM);
@@ -8830,8 +9302,7 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
- pr_warn("program '%s': can't attach before loaded\n",
- bpf_program__title(prog, false));
+ pr_warn("prog '%s': can't attach before loaded\n", prog->name);
return ERR_PTR(-EINVAL);
}
@@ -8844,9 +9315,8 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
if (pfd < 0) {
pfd = -errno;
free(link);
- pr_warn("program '%s': failed to attach to raw tracepoint '%s': %s\n",
- bpf_program__title(prog, false), tp_name,
- libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
+ pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
+ prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
return ERR_PTR(pfd);
}
link->fd = pfd;
@@ -8856,7 +9326,7 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
struct bpf_program *prog)
{
- const char *tp_name = bpf_program__title(prog, false) + sec->len;
+ const char *tp_name = prog->sec_name + sec->len;
return bpf_program__attach_raw_tracepoint(prog, tp_name);
}
@@ -8870,8 +9340,7 @@ static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
- pr_warn("program '%s': can't attach before loaded\n",
- bpf_program__title(prog, false));
+ pr_warn("prog '%s': can't attach before loaded\n", prog->name);
return ERR_PTR(-EINVAL);
}
@@ -8884,9 +9353,8 @@ static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
if (pfd < 0) {
pfd = -errno;
free(link);
- pr_warn("program '%s': failed to attach: %s\n",
- bpf_program__title(prog, false),
- libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
+ pr_warn("prog '%s': failed to attach: %s\n",
+ prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
return ERR_PTR(pfd);
}
link->fd = pfd;
@@ -8932,8 +9400,7 @@ bpf_program__attach_fd(struct bpf_program *prog, int target_fd,
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
- pr_warn("program '%s': can't attach before loaded\n",
- bpf_program__title(prog, false));
+ pr_warn("prog '%s': can't attach before loaded\n", prog->name);
return ERR_PTR(-EINVAL);
}
@@ -8947,8 +9414,8 @@ bpf_program__attach_fd(struct bpf_program *prog, int target_fd,
if (link_fd < 0) {
link_fd = -errno;
free(link);
- pr_warn("program '%s': failed to attach to %s: %s\n",
- bpf_program__title(prog, false), target_name,
+ pr_warn("prog '%s': failed to attach to %s: %s\n",
+ prog->name, target_name,
libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
return ERR_PTR(link_fd);
}
@@ -8992,8 +9459,7 @@ bpf_program__attach_iter(struct bpf_program *prog,
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
- pr_warn("program '%s': can't attach before loaded\n",
- bpf_program__title(prog, false));
+ pr_warn("prog '%s': can't attach before loaded\n", prog->name);
return ERR_PTR(-EINVAL);
}
@@ -9007,9 +9473,8 @@ bpf_program__attach_iter(struct bpf_program *prog,
if (link_fd < 0) {
link_fd = -errno;
free(link);
- pr_warn("program '%s': failed to attach to iterator: %s\n",
- bpf_program__title(prog, false),
- libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
+ pr_warn("prog '%s': failed to attach to iterator: %s\n",
+ prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
return ERR_PTR(link_fd);
}
link->fd = link_fd;
@@ -9020,7 +9485,7 @@ struct bpf_link *bpf_program__attach(struct bpf_program *prog)
{
const struct bpf_sec_def *sec_def;
- sec_def = find_sec_def(bpf_program__title(prog, false));
+ sec_def = find_sec_def(prog->sec_name);
if (!sec_def || !sec_def->attach_fn)
return ERR_PTR(-ESRCH);
@@ -10090,12 +10555,11 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
struct bpf_program *prog = *s->progs[i].prog;
struct bpf_link **link = s->progs[i].link;
const struct bpf_sec_def *sec_def;
- const char *sec_name = bpf_program__title(prog, false);
if (!prog->load)
continue;
- sec_def = find_sec_def(sec_name);
+ sec_def = find_sec_def(prog->sec_name);
if (!sec_def || !sec_def->attach_fn)
continue;
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 308e0ded8f14..a750f67a23f6 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -198,8 +198,9 @@ LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog,
__u32 ifindex);
LIBBPF_API const char *bpf_program__name(const struct bpf_program *prog);
-LIBBPF_API const char *bpf_program__title(const struct bpf_program *prog,
- bool needs_copy);
+LIBBPF_API const char *bpf_program__section_name(const struct bpf_program *prog);
+LIBBPF_API LIBBPF_DEPRECATED("BPF program title is confusing term; please use bpf_program__section_name() instead")
+const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy);
LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog);
LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload);
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 3fedcdc4ae2f..5f054dadf082 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -302,6 +302,8 @@ LIBBPF_0.1.0 {
LIBBPF_0.2.0 {
global:
+ bpf_prog_bind_map;
+ bpf_program__section_name;
perf_buffer__buffer_cnt;
perf_buffer__buffer_fd;
perf_buffer__epoll_fd;
diff --git a/tools/lib/bpf/libbpf_common.h b/tools/lib/bpf/libbpf_common.h
index a23ae1ac27eb..947d8bd8a7bb 100644
--- a/tools/lib/bpf/libbpf_common.h
+++ b/tools/lib/bpf/libbpf_common.h
@@ -15,6 +15,8 @@
#define LIBBPF_API __attribute__((visibility("default")))
#endif
+#define LIBBPF_DEPRECATED(msg) __attribute__((deprecated(msg)))
+
/* Helper macro to declare and initialize libbpf options struct
*
* This dance with uninitialized declaration, followed by memset to zero,
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index 49c324594792..30b4ca5d2ac7 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -20,6 +20,7 @@
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/if_xdp.h>
+#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/sockios.h>
#include <net/if.h>
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index e034a8f24f46..90a66891441a 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -619,7 +619,7 @@ static int add_jump_destinations(struct objtool_file *file)
if (!is_static_jump(insn))
continue;
- if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET)
+ if (insn->offset == FAKE_JUMP_OFFSET)
continue;
reloc = find_reloc_by_dest_range(file->elf, insn->sec,
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c
index 71d830d7b923..cecce93ccc63 100644
--- a/tools/perf/bench/sched-messaging.c
+++ b/tools/perf/bench/sched-messaging.c
@@ -66,11 +66,10 @@ static void fdpair(int fds[2])
/* Block until we're ready to go */
static void ready(int ready_out, int wakefd)
{
- char dummy;
struct pollfd pollfd = { .fd = wakefd, .events = POLLIN };
/* Tell them we're ready. */
- if (write(ready_out, &dummy, 1) != 1)
+ if (write(ready_out, "R", 1) != 1)
err(EXIT_FAILURE, "CLIENT: ready write");
/* Wait for "GO" signal */
@@ -85,6 +84,7 @@ static void *sender(struct sender_context *ctx)
unsigned int i, j;
ready(ctx->ready_out, ctx->wakefd);
+ memset(data, 'S', sizeof(data));
/* Now pump to every receiver. */
for (i = 0; i < nr_loops; i++) {
diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/core.json b/tools/perf/pmu-events/arch/x86/amdzen1/core.json
index 7e1aa8273935..653b11b23399 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen1/core.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen1/core.json
@@ -61,7 +61,7 @@
{
"EventName": "ex_ret_brn_ind_misp",
"EventCode": "0xca",
- "BriefDescription": "Retired Indirect Branch Instructions Mispredicted.",
+ "BriefDescription": "Retired Indirect Branch Instructions Mispredicted."
},
{
"EventName": "ex_ret_mmx_fp_instr.sse_instr",
diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/core.json b/tools/perf/pmu-events/arch/x86/amdzen2/core.json
index de89e5a44ff1..4b75183da94a 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen2/core.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen2/core.json
@@ -125,6 +125,6 @@
{
"EventName": "ex_ret_fus_brnch_inst",
"EventCode": "0x1d0",
- "BriefDescription": "Retired Fused Instructions. The number of fuse-branch instructions retired per cycle. The number of events logged per cycle can vary from 0-8.",
+ "BriefDescription": "Retired Fused Instructions. The number of fuse-branch instructions retired per cycle. The number of events logged per cycle can vary from 0-8."
}
]
diff --git a/tools/perf/tests/attr/README b/tools/perf/tests/attr/README
index 6cd408108595..a36f49fb4dbe 100644
--- a/tools/perf/tests/attr/README
+++ b/tools/perf/tests/attr/README
@@ -49,6 +49,7 @@ Following tests are defined (with perf commands):
perf record --call-graph fp kill (test-record-graph-fp)
perf record --group -e cycles,instructions kill (test-record-group)
perf record -e '{cycles,instructions}' kill (test-record-group1)
+ perf record -e '{cycles/period=1/,instructions/period=2/}:S' kill (test-record-group2)
perf record -D kill (test-record-no-delay)
perf record -i kill (test-record-no-inherit)
perf record -n kill (test-record-no-samples)
diff --git a/tools/perf/tests/attr/test-record-group2 b/tools/perf/tests/attr/test-record-group2
new file mode 100644
index 000000000000..6b9f8d182ce1
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-group2
@@ -0,0 +1,29 @@
+[config]
+command = record
+args = --no-bpf-event -e '{cycles/period=1234000/,instructions/period=6789000/}:S' kill >/dev/null 2>&1
+ret = 1
+
+[event-1:base-record]
+fd=1
+group_fd=-1
+config=0|1
+sample_period=1234000
+sample_type=87
+read_format=12
+inherit=0
+freq=0
+
+[event-2:base-record]
+fd=2
+group_fd=1
+config=0|1
+sample_period=6789000
+sample_type=87
+read_format=12
+disabled=0
+inherit=0
+mmap=0
+comm=0
+freq=0
+enable_on_exec=0
+task=0
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index da8ec1e8e064..cc9fbcedb364 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -45,10 +45,13 @@ volatile long the_var;
#if defined (__x86_64__)
extern void __test_function(volatile long *ptr);
asm (
+ ".pushsection .text;"
".globl __test_function\n"
+ ".type __test_function, @function;"
"__test_function:\n"
"incq (%rdi)\n"
- "ret\n");
+ "ret\n"
+ ".popsection\n");
#else
static void __test_function(volatile long *ptr)
{
diff --git a/tools/perf/tests/parse-metric.c b/tools/perf/tests/parse-metric.c
index 23db8acc492d..cd7331aac3bd 100644
--- a/tools/perf/tests/parse-metric.c
+++ b/tools/perf/tests/parse-metric.c
@@ -153,8 +153,10 @@ static int __compute_metric(const char *name, struct value *vals,
return -ENOMEM;
cpus = perf_cpu_map__new("0");
- if (!cpus)
+ if (!cpus) {
+ evlist__delete(evlist);
return -ENOMEM;
+ }
perf_evlist__set_maps(&evlist->core, cpus, NULL);
@@ -163,10 +165,11 @@ static int __compute_metric(const char *name, struct value *vals,
false, false,
&metric_events);
if (err)
- return err;
+ goto out;
- if (perf_evlist__alloc_stats(evlist, false))
- return -1;
+ err = perf_evlist__alloc_stats(evlist, false);
+ if (err)
+ goto out;
/* Load the runtime stats with given numbers for events. */
runtime_stat__init(&st);
@@ -178,13 +181,14 @@ static int __compute_metric(const char *name, struct value *vals,
if (name2 && ratio2)
*ratio2 = compute_single(&metric_events, evlist, &st, name2);
+out:
/* ... clenup. */
metricgroup__rblist_exit(&metric_events);
runtime_stat__exit(&st);
perf_evlist__free_stats(evlist);
perf_cpu_map__put(cpus);
evlist__delete(evlist);
- return 0;
+ return err;
}
static int compute_metric(const char *name, struct value *vals, double *ratio)
diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c
index eb19f9a0bc15..d3517a74d95e 100644
--- a/tools/perf/tests/pmu-events.c
+++ b/tools/perf/tests/pmu-events.c
@@ -274,6 +274,7 @@ static int __test__pmu_event_aliases(char *pmu_name, int *count)
int res = 0;
bool use_uncore_table;
struct pmu_events_map *map = __test_pmu_get_events_map();
+ struct perf_pmu_alias *a, *tmp;
if (!map)
return -1;
@@ -347,6 +348,10 @@ static int __test__pmu_event_aliases(char *pmu_name, int *count)
pmu_name, alias->name);
}
+ list_for_each_entry_safe(a, tmp, &aliases, list) {
+ list_del(&a->list);
+ perf_pmu_free_alias(a);
+ }
free(pmu);
return res;
}
diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c
index 5c11fe2b3040..714e6830a758 100644
--- a/tools/perf/tests/pmu.c
+++ b/tools/perf/tests/pmu.c
@@ -173,6 +173,7 @@ int test__pmu(struct test *test __maybe_unused, int subtest __maybe_unused)
ret = 0;
} while (0);
+ perf_pmu__del_formats(&formats);
test_format_dir_put(format);
return ret;
}
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 2feb751516ab..0374adcb223c 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -328,12 +328,6 @@ config_bpf_program(struct bpf_program *prog)
probe_conf.no_inlines = false;
probe_conf.force_add = false;
- config_str = bpf_program__title(prog, false);
- if (IS_ERR(config_str)) {
- pr_debug("bpf: unable to get title for program\n");
- return PTR_ERR(config_str);
- }
-
priv = calloc(sizeof(*priv), 1);
if (!priv) {
pr_debug("bpf: failed to alloc priv\n");
@@ -341,6 +335,7 @@ config_bpf_program(struct bpf_program *prog)
}
pev = &priv->pev;
+ config_str = bpf_program__section_name(prog);
pr_debug("bpf: config program '%s'\n", config_str);
err = parse_prog_config(config_str, &main_str, &is_tp, pev);
if (err)
@@ -454,10 +449,7 @@ preproc_gen_prologue(struct bpf_program *prog, int n,
if (err) {
const char *title;
- title = bpf_program__title(prog, false);
- if (!title)
- title = "[unknown]";
-
+ title = bpf_program__section_name(prog);
pr_debug("Failed to generate prologue for program %s\n",
title);
return err;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index e3fa3bf7498a..c0768c61eb43 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -946,6 +946,10 @@ int perf_evlist__create_maps(struct evlist *evlist, struct target *target)
perf_evlist__set_maps(&evlist->core, cpus, threads);
+ /* as evlist now has references, put count here */
+ perf_cpu_map__put(cpus);
+ perf_thread_map__put(threads);
+
return 0;
out_delete_threads:
@@ -1273,11 +1277,12 @@ static int perf_evlist__create_syswide_maps(struct evlist *evlist)
goto out_put;
perf_evlist__set_maps(&evlist->core, cpus, threads);
-out:
- return err;
+
+ perf_thread_map__put(threads);
out_put:
perf_cpu_map__put(cpus);
- goto out;
+out:
+ return err;
}
int evlist__open(struct evlist *evlist)
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index fd865002cbbd..459b51e90063 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -976,16 +976,20 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
* We default some events to have a default interval. But keep
* it a weak assumption overridable by the user.
*/
- if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
- opts->user_interval != ULLONG_MAX)) {
+ if (!attr->sample_period) {
if (opts->freq) {
- evsel__set_sample_bit(evsel, PERIOD);
attr->freq = 1;
attr->sample_freq = opts->freq;
} else {
attr->sample_period = opts->default_interval;
}
}
+ /*
+ * If attr->freq was set (here or earlier), ask for period
+ * to be sampled.
+ */
+ if (attr->freq)
+ evsel__set_sample_bit(evsel, PERIOD);
if (opts->no_samples)
attr->sample_freq = 0;
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 8831b964288f..ab5030fcfed4 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -85,6 +85,7 @@ static void metric_event_delete(struct rblist *rblist __maybe_unused,
list_for_each_entry_safe(expr, tmp, &me->head, nd) {
free(expr->metric_refs);
+ free(expr->metric_events);
free(expr);
}
@@ -316,6 +317,7 @@ static int metricgroup__setup_events(struct list_head *groups,
if (!metric_refs) {
ret = -ENOMEM;
free(metric_events);
+ free(expr);
break;
}
@@ -530,6 +532,9 @@ void metricgroup__print(bool metrics, bool metricgroups, char *filter,
continue;
strlist__add(me->metrics, s);
}
+
+ if (!raw)
+ free(s);
}
free(omg);
}
@@ -667,7 +672,6 @@ static int __add_metric(struct list_head *metric_list,
m->has_constraint = metric_no_group || metricgroup__has_constraint(pe);
INIT_LIST_HEAD(&m->metric_refs);
m->metric_refs_cnt = 0;
- *mp = m;
parent = expr_ids__alloc(ids);
if (!parent) {
@@ -680,6 +684,7 @@ static int __add_metric(struct list_head *metric_list,
free(m);
return -ENOMEM;
}
+ *mp = m;
} else {
/*
* We got here for the referenced metric, via the
@@ -714,8 +719,11 @@ static int __add_metric(struct list_head *metric_list,
* all the metric's IDs and add it to the parent context.
*/
if (expr__find_other(pe->metric_expr, NULL, &m->pctx, runtime) < 0) {
- expr__ctx_clear(&m->pctx);
- free(m);
+ if (m->metric_refs_cnt == 0) {
+ expr__ctx_clear(&m->pctx);
+ free(m);
+ *mp = NULL;
+ }
return -EINVAL;
}
@@ -934,7 +942,7 @@ static int metricgroup__add_metric(const char *metric, bool metric_no_group,
ret = add_metric(&list, pe, metric_no_group, &m, NULL, &ids);
if (ret)
- return ret;
+ goto out;
/*
* Process any possible referenced metrics
@@ -943,12 +951,14 @@ static int metricgroup__add_metric(const char *metric, bool metric_no_group,
ret = resolve_metric(metric_no_group,
&list, map, &ids);
if (ret)
- return ret;
+ goto out;
}
/* End of pmu events. */
- if (!has_match)
- return -EINVAL;
+ if (!has_match) {
+ ret = -EINVAL;
+ goto out;
+ }
list_for_each_entry(m, &list, nd) {
if (events->len > 0)
@@ -963,9 +973,14 @@ static int metricgroup__add_metric(const char *metric, bool metric_no_group,
}
}
+out:
+ /*
+ * add to metric_list so that they can be released
+ * even if it's failed
+ */
list_splice(&list, metric_list);
expr_ids__exit(&ids);
- return 0;
+ return ret;
}
static int metricgroup__add_metric_list(const char *list, bool metric_no_group,
@@ -1040,7 +1055,7 @@ static int parse_groups(struct evlist *perf_evlist, const char *str,
ret = metricgroup__add_metric_list(str, metric_no_group,
&extra_events, &metric_list, map);
if (ret)
- return ret;
+ goto out;
pr_debug("adding %s\n", extra_events.buf);
bzero(&parse_error, sizeof(parse_error));
ret = __parse_events(perf_evlist, extra_events.buf, &parse_error, fake_pmu);
@@ -1048,11 +1063,11 @@ static int parse_groups(struct evlist *perf_evlist, const char *str,
parse_events_print_error(&parse_error, extra_events.buf);
goto out;
}
- strbuf_release(&extra_events);
ret = metricgroup__setup_events(&metric_list, metric_no_merge,
perf_evlist, metric_events);
out:
metricgroup__free_metrics(&metric_list);
+ strbuf_release(&extra_events);
return ret;
}
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index c4d2394e2b2d..667cbca1547a 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -411,7 +411,7 @@ static int add_event_tool(struct list_head *list, int *idx,
return -ENOMEM;
evsel->tool_event = tool_event;
if (tool_event == PERF_TOOL_DURATION_TIME)
- evsel->unit = strdup("ns");
+ evsel->unit = "ns";
return 0;
}
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index f1688e1f6ed7..d41caeb35cf6 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -274,7 +274,7 @@ static void perf_pmu_update_alias(struct perf_pmu_alias *old,
}
/* Delete an alias entry. */
-static void perf_pmu_free_alias(struct perf_pmu_alias *newalias)
+void perf_pmu_free_alias(struct perf_pmu_alias *newalias)
{
zfree(&newalias->name);
zfree(&newalias->desc);
@@ -1354,6 +1354,17 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to)
set_bit(b, bits);
}
+void perf_pmu__del_formats(struct list_head *formats)
+{
+ struct perf_pmu_format *fmt, *tmp;
+
+ list_for_each_entry_safe(fmt, tmp, formats, list) {
+ list_del(&fmt->list);
+ free(fmt->name);
+ free(fmt);
+ }
+}
+
static int sub_non_neg(int a, int b)
{
if (b > a)
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 44ccbdbb1c37..a64e9c9ce731 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -94,6 +94,7 @@ int perf_pmu__new_format(struct list_head *list, char *name,
int config, unsigned long *bits);
void perf_pmu__set_format(unsigned long *bits, long from, long to);
int perf_pmu__format_parse(char *dir, struct list_head *head);
+void perf_pmu__del_formats(struct list_head *formats);
struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu);
@@ -113,6 +114,7 @@ void pmu_add_cpu_aliases_map(struct list_head *head, struct perf_pmu *pmu,
struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu);
bool pmu_uncore_alias_match(const char *pmu_name, const char *name);
+void perf_pmu_free_alias(struct perf_pmu_alias *alias);
int perf_pmu__convert_scale(const char *scale, char **end, double *sval);
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
index a4cc11592f6b..ea9aa1d7cf50 100644
--- a/tools/perf/util/record.c
+++ b/tools/perf/util/record.c
@@ -2,6 +2,7 @@
#include "debug.h"
#include "evlist.h"
#include "evsel.h"
+#include "evsel_config.h"
#include "parse-events.h"
#include <errno.h>
#include <limits.h>
@@ -33,11 +34,24 @@ static struct evsel *evsel__read_sampler(struct evsel *evsel, struct evlist *evl
return leader;
}
+static u64 evsel__config_term_mask(struct evsel *evsel)
+{
+ struct evsel_config_term *term;
+ struct list_head *config_terms = &evsel->config_terms;
+ u64 term_types = 0;
+
+ list_for_each_entry(term, config_terms, list) {
+ term_types |= 1 << term->type;
+ }
+ return term_types;
+}
+
static void evsel__config_leader_sampling(struct evsel *evsel, struct evlist *evlist)
{
struct perf_event_attr *attr = &evsel->core.attr;
struct evsel *leader = evsel->leader;
struct evsel *read_sampler;
+ u64 term_types, freq_mask;
if (!leader->sample_read)
return;
@@ -47,16 +61,20 @@ static void evsel__config_leader_sampling(struct evsel *evsel, struct evlist *ev
if (evsel == read_sampler)
return;
+ term_types = evsel__config_term_mask(evsel);
/*
- * Disable sampling for all group members other than the leader in
- * case the leader 'leads' the sampling, except when the leader is an
- * AUX area event, in which case the 2nd event in the group is the one
- * that 'leads' the sampling.
+ * Disable sampling for all group members except those with explicit
+ * config terms or the leader. In the case of an AUX area event, the 2nd
+ * event in the group is the one that 'leads' the sampling.
*/
- attr->freq = 0;
- attr->sample_freq = 0;
- attr->sample_period = 0;
- attr->write_backward = 0;
+ freq_mask = (1 << EVSEL__CONFIG_TERM_FREQ) | (1 << EVSEL__CONFIG_TERM_PERIOD);
+ if ((term_types & freq_mask) == 0) {
+ attr->freq = 0;
+ attr->sample_freq = 0;
+ attr->sample_period = 0;
+ }
+ if ((term_types & (1 << EVSEL__CONFIG_TERM_OVERWRITE)) == 0)
+ attr->write_backward = 0;
/*
* We don't get a sample for slave events, we make them when delivering
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index e1ba6c1b916a..924b54d15d54 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -517,7 +517,7 @@ static void print_l1_dcache_misses(struct perf_stat_config *config,
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
- out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-dcache hits", ratio);
+ out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-dcache accesses", ratio);
}
static void print_l1_icache_misses(struct perf_stat_config *config,
@@ -538,7 +538,7 @@ static void print_l1_icache_misses(struct perf_stat_config *config,
ratio = avg / total * 100.0;
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
- out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-icache hits", ratio);
+ out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-icache accesses", ratio);
}
static void print_dtlb_cache_misses(struct perf_stat_config *config,
@@ -558,7 +558,7 @@ static void print_dtlb_cache_misses(struct perf_stat_config *config,
ratio = avg / total * 100.0;
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
- out->print_metric(config, out->ctx, color, "%7.2f%%", "of all dTLB cache hits", ratio);
+ out->print_metric(config, out->ctx, color, "%7.2f%%", "of all dTLB cache accesses", ratio);
}
static void print_itlb_cache_misses(struct perf_stat_config *config,
@@ -578,7 +578,7 @@ static void print_itlb_cache_misses(struct perf_stat_config *config,
ratio = avg / total * 100.0;
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
- out->print_metric(config, out->ctx, color, "%7.2f%%", "of all iTLB cache hits", ratio);
+ out->print_metric(config, out->ctx, color, "%7.2f%%", "of all iTLB cache accesses", ratio);
}
static void print_ll_cache_misses(struct perf_stat_config *config,
@@ -598,7 +598,7 @@ static void print_ll_cache_misses(struct perf_stat_config *config,
ratio = avg / total * 100.0;
color = get_ratio_color(GRC_CACHE_MISSES, ratio);
- out->print_metric(config, out->ctx, color, "%7.2f%%", "of all LL-cache hits", ratio);
+ out->print_metric(config, out->ctx, color, "%7.2f%%", "of all LL-cache accesses", ratio);
}
/*
@@ -853,14 +853,16 @@ static void generic_metric(struct perf_stat_config *config,
double test_generic_metric(struct metric_expr *mexp, int cpu, struct runtime_stat *st)
{
struct expr_parse_ctx pctx;
- double ratio;
+ double ratio = 0.0;
if (prepare_metric(mexp->metric_events, mexp->metric_refs, &pctx, cpu, st) < 0)
- return 0.;
+ goto out;
if (expr__parse(&ratio, &pctx, mexp->metric_expr, 1))
- return 0.;
+ ratio = 0.0;
+out:
+ expr__ctx_clear(&pctx);
return ratio;
}
@@ -918,7 +920,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
if (runtime_stat_n(st, STAT_L1_DCACHE, ctx, cpu) != 0)
print_l1_dcache_misses(config, cpu, evsel, avg, out, st);
else
- print_metric(config, ctxp, NULL, NULL, "of all L1-dcache hits", 0);
+ print_metric(config, ctxp, NULL, NULL, "of all L1-dcache accesses", 0);
} else if (
evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
evsel->core.attr.config == ( PERF_COUNT_HW_CACHE_L1I |
@@ -928,7 +930,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
if (runtime_stat_n(st, STAT_L1_ICACHE, ctx, cpu) != 0)
print_l1_icache_misses(config, cpu, evsel, avg, out, st);
else
- print_metric(config, ctxp, NULL, NULL, "of all L1-icache hits", 0);
+ print_metric(config, ctxp, NULL, NULL, "of all L1-icache accesses", 0);
} else if (
evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
evsel->core.attr.config == ( PERF_COUNT_HW_CACHE_DTLB |
@@ -938,7 +940,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
if (runtime_stat_n(st, STAT_DTLB_CACHE, ctx, cpu) != 0)
print_dtlb_cache_misses(config, cpu, evsel, avg, out, st);
else
- print_metric(config, ctxp, NULL, NULL, "of all dTLB cache hits", 0);
+ print_metric(config, ctxp, NULL, NULL, "of all dTLB cache accesses", 0);
} else if (
evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
evsel->core.attr.config == ( PERF_COUNT_HW_CACHE_ITLB |
@@ -948,7 +950,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
if (runtime_stat_n(st, STAT_ITLB_CACHE, ctx, cpu) != 0)
print_itlb_cache_misses(config, cpu, evsel, avg, out, st);
else
- print_metric(config, ctxp, NULL, NULL, "of all iTLB cache hits", 0);
+ print_metric(config, ctxp, NULL, NULL, "of all iTLB cache accesses", 0);
} else if (
evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
evsel->core.attr.config == ( PERF_COUNT_HW_CACHE_LL |
@@ -958,7 +960,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
if (runtime_stat_n(st, STAT_LL_CACHE, ctx, cpu) != 0)
print_ll_cache_misses(config, cpu, evsel, avg, out, st);
else
- print_metric(config, ctxp, NULL, NULL, "of all LL-cache hits", 0);
+ print_metric(config, ctxp, NULL, NULL, "of all LL-cache accesses", 0);
} else if (evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
total = runtime_stat_avg(st, STAT_CACHEREFS, ctx, cpu);
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 9a0946ddb705..e8fed558b8b8 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -15,7 +15,6 @@ test_sock
test_sock_addr
test_sock_fields
urandom_read
-test_btf
test_sockmap
test_lirc_mode2_user
get_cgroup_id_user
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 65d3d9aaeb31..59a5fa5fe837 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -33,7 +33,7 @@ LDLIBS += -lcap -lelf -lz -lrt -lpthread
# Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_verifier_log test_dev_cgroup test_tcpbpf_user \
- test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \
+ test_sock test_sockmap get_cgroup_id_user test_socket_cookie \
test_cgroup_storage \
test_netcnt test_tcpnotify_user test_sock_fields test_sysctl \
test_progs-no_alu32 \
@@ -68,7 +68,8 @@ TEST_PROGS := test_kmod.sh \
test_tc_edt.sh \
test_xdping.sh \
test_bpftool_build.sh \
- test_bpftool.sh
+ test_bpftool.sh \
+ test_bpftool_metadata.sh \
TEST_PROGS_EXTENDED := with_addr.sh \
with_tunnels.sh \
@@ -176,6 +177,11 @@ $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
$(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \
OUTPUT=$(BUILD_DIR)/bpftool/ \
prefix= DESTDIR=$(SCRATCH_DIR)/ install
+ $(Q)mkdir -p $(BUILD_DIR)/bpftool/Documentation
+ $(Q)RST2MAN_OPTS="--exit-status=1" $(MAKE) $(submake_extras) \
+ -C $(BPFTOOLDIR)/Documentation \
+ OUTPUT=$(BUILD_DIR)/bpftool/Documentation/ \
+ prefix= DESTDIR=$(SCRATCH_DIR)/ install
$(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
../../../include/uapi/linux/bpf.h \
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.h b/tools/testing/selftests/bpf/flow_dissector_load.h
index daeaeb518894..7290401ec172 100644
--- a/tools/testing/selftests/bpf/flow_dissector_load.h
+++ b/tools/testing/selftests/bpf/flow_dissector_load.h
@@ -23,7 +23,13 @@ static inline int bpf_flow_load(struct bpf_object **obj,
if (ret)
return ret;
- main_prog = bpf_object__find_program_by_title(*obj, section_name);
+ main_prog = NULL;
+ bpf_object__for_each_program(prog, *obj) {
+ if (strcmp(section_name, bpf_program__section_name(prog)) == 0) {
+ main_prog = prog;
+ break;
+ }
+ }
if (!main_prog)
return -1;
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index 7375d9a6d242..fe1a83b9875c 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -132,17 +132,38 @@ static void test_task_stack(void)
bpf_iter_task_stack__destroy(skel);
}
+static void *do_nothing(void *arg)
+{
+ pthread_exit(arg);
+}
+
static void test_task_file(void)
{
struct bpf_iter_task_file *skel;
+ pthread_t thread_id;
+ void *ret;
skel = bpf_iter_task_file__open_and_load();
if (CHECK(!skel, "bpf_iter_task_file__open_and_load",
"skeleton open_and_load failed\n"))
return;
+ skel->bss->tgid = getpid();
+
+ if (CHECK(pthread_create(&thread_id, NULL, &do_nothing, NULL),
+ "pthread_create", "pthread_create failed\n"))
+ goto done;
+
do_dummy_read(skel->progs.dump_task_file);
+ if (CHECK(pthread_join(thread_id, &ret) || ret != NULL,
+ "pthread_join", "pthread_join failed\n"))
+ goto done;
+
+ CHECK(skel->bss->count != 0, "check_count",
+ "invalid non pthread file visit count %d\n", skel->bss->count);
+
+done:
bpf_iter_task_file__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
index e9f2f12ba06b..e698ee6bb6c2 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
@@ -49,6 +49,7 @@ void test_bpf_verif_scale(void)
{ "test_verif_scale3.o", BPF_PROG_TYPE_SCHED_CLS },
{ "pyperf_global.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
+ { "pyperf_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
/* full unroll by llvm */
{ "pyperf50.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
@@ -86,6 +87,9 @@ void test_bpf_verif_scale(void)
{ "strobemeta_nounroll1.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
{ "strobemeta_nounroll2.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
+ /* non-inlined subprogs */
+ { "strobemeta_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
+
{ "test_sysctl_loop1.o", BPF_PROG_TYPE_CGROUP_SYSCTL },
{ "test_sysctl_loop2.o", BPF_PROG_TYPE_CGROUP_SYSCTL },
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
index c75fc6447186..93162484c2ca 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf.c
@@ -24,40 +24,17 @@
#include "bpf_rlimit.h"
#include "bpf_util.h"
-#include "test_btf.h"
+#include "../test_btf.h"
+#include "test_progs.h"
#define MAX_INSNS 512
#define MAX_SUBPROGS 16
-static uint32_t pass_cnt;
-static uint32_t error_cnt;
-static uint32_t skip_cnt;
+static int duration = 0;
+static bool always_log;
-#define CHECK(condition, format...) ({ \
- int __ret = !!(condition); \
- if (__ret) { \
- fprintf(stderr, "%s:%d:FAIL ", __func__, __LINE__); \
- fprintf(stderr, format); \
- } \
- __ret; \
-})
-
-static int count_result(int err)
-{
- if (err)
- error_cnt++;
- else
- pass_cnt++;
-
- fprintf(stderr, "\n");
- return err;
-}
-
-static int __base_pr(enum libbpf_print_level level __attribute__((unused)),
- const char *format, va_list args)
-{
- return vfprintf(stderr, format, args);
-}
+#undef CHECK
+#define CHECK(condition, format...) _CHECK(condition, "check", duration, format)
#define BTF_END_RAW 0xdeadbeef
#define NAME_TBD 0xdeadb33f
@@ -69,21 +46,6 @@ static int __base_pr(enum libbpf_print_level level __attribute__((unused)),
#define MAX_NR_RAW_U32 1024
#define BTF_LOG_BUF_SIZE 65535
-static struct args {
- unsigned int raw_test_num;
- unsigned int file_test_num;
- unsigned int get_info_test_num;
- unsigned int info_raw_test_num;
- unsigned int dedup_test_num;
- bool raw_test;
- bool file_test;
- bool get_info_test;
- bool pprint_test;
- bool always_log;
- bool info_raw_test;
- bool dedup_test;
-} args;
-
static char btf_log_buf[BTF_LOG_BUF_SIZE];
static struct btf_header hdr_tmpl = {
@@ -3664,7 +3626,7 @@ done:
return raw_btf;
}
-static int do_test_raw(unsigned int test_num)
+static void do_test_raw(unsigned int test_num)
{
struct btf_raw_test *test = &raw_tests[test_num - 1];
struct bpf_create_map_attr create_attr = {};
@@ -3674,15 +3636,16 @@ static int do_test_raw(unsigned int test_num)
void *raw_btf;
int err;
- fprintf(stderr, "BTF raw test[%u] (%s): ", test_num, test->descr);
+ if (!test__start_subtest(test->descr))
+ return;
+
raw_btf = btf_raw_create(&hdr_tmpl,
test->raw_types,
test->str_sec,
test->str_sec_size,
&raw_btf_size, NULL);
-
if (!raw_btf)
- return -1;
+ return;
hdr = raw_btf;
@@ -3694,7 +3657,7 @@ static int do_test_raw(unsigned int test_num)
*btf_log_buf = '\0';
btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
btf_log_buf, BTF_LOG_BUF_SIZE,
- args.always_log);
+ always_log);
free(raw_btf);
err = ((btf_fd == -1) != test->btf_load_err);
@@ -3725,32 +3688,12 @@ static int do_test_raw(unsigned int test_num)
map_fd, test->map_create_err);
done:
- if (!err)
- fprintf(stderr, "OK");
-
- if (*btf_log_buf && (err || args.always_log))
+ if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
-
if (btf_fd != -1)
close(btf_fd);
if (map_fd != -1)
close(map_fd);
-
- return err;
-}
-
-static int test_raw(void)
-{
- unsigned int i;
- int err = 0;
-
- if (args.raw_test_num)
- return count_result(do_test_raw(args.raw_test_num));
-
- for (i = 1; i <= ARRAY_SIZE(raw_tests); i++)
- err |= count_result(do_test_raw(i));
-
- return err;
}
struct btf_get_info_test {
@@ -3814,11 +3757,6 @@ const struct btf_get_info_test get_info_tests[] = {
},
};
-static inline __u64 ptr_to_u64(const void *ptr)
-{
- return (__u64)(unsigned long)ptr;
-}
-
static int test_big_btf_info(unsigned int test_num)
{
const struct btf_get_info_test *test = &get_info_tests[test_num - 1];
@@ -3851,7 +3789,7 @@ static int test_big_btf_info(unsigned int test_num)
btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
btf_log_buf, BTF_LOG_BUF_SIZE,
- args.always_log);
+ always_log);
if (CHECK(btf_fd == -1, "errno:%d", errno)) {
err = -1;
goto done;
@@ -3892,7 +3830,7 @@ static int test_big_btf_info(unsigned int test_num)
fprintf(stderr, "OK");
done:
- if (*btf_log_buf && (err || args.always_log))
+ if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
free(raw_btf);
@@ -3939,7 +3877,7 @@ static int test_btf_id(unsigned int test_num)
btf_fd[0] = bpf_load_btf(raw_btf, raw_btf_size,
btf_log_buf, BTF_LOG_BUF_SIZE,
- args.always_log);
+ always_log);
if (CHECK(btf_fd[0] == -1, "errno:%d", errno)) {
err = -1;
goto done;
@@ -4024,7 +3962,7 @@ static int test_btf_id(unsigned int test_num)
fprintf(stderr, "OK");
done:
- if (*btf_log_buf && (err || args.always_log))
+ if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
free(raw_btf);
@@ -4039,7 +3977,7 @@ done:
return err;
}
-static int do_test_get_info(unsigned int test_num)
+static void do_test_get_info(unsigned int test_num)
{
const struct btf_get_info_test *test = &get_info_tests[test_num - 1];
unsigned int raw_btf_size, user_btf_size, expected_nbytes;
@@ -4048,11 +3986,14 @@ static int do_test_get_info(unsigned int test_num)
int btf_fd = -1, err, ret;
uint32_t info_len;
- fprintf(stderr, "BTF GET_INFO test[%u] (%s): ",
- test_num, test->descr);
+ if (!test__start_subtest(test->descr))
+ return;
- if (test->special_test)
- return test->special_test(test_num);
+ if (test->special_test) {
+ err = test->special_test(test_num);
+ if (CHECK(err, "failed: %d\n", err))
+ return;
+ }
raw_btf = btf_raw_create(&hdr_tmpl,
test->raw_types,
@@ -4061,7 +4002,7 @@ static int do_test_get_info(unsigned int test_num)
&raw_btf_size, NULL);
if (!raw_btf)
- return -1;
+ return;
*btf_log_buf = '\0';
@@ -4073,7 +4014,7 @@ static int do_test_get_info(unsigned int test_num)
btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
btf_log_buf, BTF_LOG_BUF_SIZE,
- args.always_log);
+ always_log);
if (CHECK(btf_fd == -1, "errno:%d", errno)) {
err = -1;
goto done;
@@ -4114,7 +4055,7 @@ static int do_test_get_info(unsigned int test_num)
fprintf(stderr, "OK");
done:
- if (*btf_log_buf && (err || args.always_log))
+ if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
free(raw_btf);
@@ -4122,22 +4063,6 @@ done:
if (btf_fd != -1)
close(btf_fd);
-
- return err;
-}
-
-static int test_get_info(void)
-{
- unsigned int i;
- int err = 0;
-
- if (args.get_info_test_num)
- return count_result(do_test_get_info(args.get_info_test_num));
-
- for (i = 1; i <= ARRAY_SIZE(get_info_tests); i++)
- err |= count_result(do_test_get_info(i));
-
- return err;
}
struct btf_file_test {
@@ -4151,7 +4076,7 @@ static struct btf_file_test file_tests[] = {
{ .file = "test_btf_nokv.o", .btf_kv_notfound = true, },
};
-static int do_test_file(unsigned int test_num)
+static void do_test_file(unsigned int test_num)
{
const struct btf_file_test *test = &file_tests[test_num - 1];
const char *expected_fnames[] = {"_dummy_tracepoint",
@@ -4169,17 +4094,17 @@ static int do_test_file(unsigned int test_num)
struct bpf_map *map;
int i, err, prog_fd;
- fprintf(stderr, "BTF libbpf test[%u] (%s): ", test_num,
- test->file);
+ if (!test__start_subtest(test->file))
+ return;
btf = btf__parse_elf(test->file, &btf_ext);
if (IS_ERR(btf)) {
if (PTR_ERR(btf) == -ENOENT) {
- fprintf(stderr, "SKIP. No ELF %s found", BTF_ELF_SEC);
- skip_cnt++;
- return 0;
+ printf("%s:SKIP: No ELF %s found", __func__, BTF_ELF_SEC);
+ test__skip();
+ return;
}
- return PTR_ERR(btf);
+ return;
}
btf__free(btf);
@@ -4188,7 +4113,7 @@ static int do_test_file(unsigned int test_num)
obj = bpf_object__open(test->file);
if (CHECK(IS_ERR(obj), "obj: %ld", PTR_ERR(obj)))
- return PTR_ERR(obj);
+ return;
prog = bpf_program__next(NULL, obj);
if (CHECK(!prog, "Cannot find bpf_prog")) {
@@ -4310,21 +4235,6 @@ skip:
done:
free(func_info);
bpf_object__close(obj);
- return err;
-}
-
-static int test_file(void)
-{
- unsigned int i;
- int err = 0;
-
- if (args.file_test_num)
- return count_result(do_test_file(args.file_test_num));
-
- for (i = 1; i <= ARRAY_SIZE(file_tests); i++)
- err |= count_result(do_test_file(i));
-
- return err;
}
const char *pprint_enum_str[] = {
@@ -4428,7 +4338,7 @@ static struct btf_raw_test pprint_test_template[] = {
.value_size = sizeof(struct pprint_mapv),
.key_type_id = 3, /* unsigned int */
.value_type_id = 16, /* struct pprint_mapv */
- .max_entries = 128 * 1024,
+ .max_entries = 128,
},
{
@@ -4493,7 +4403,7 @@ static struct btf_raw_test pprint_test_template[] = {
.value_size = sizeof(struct pprint_mapv),
.key_type_id = 3, /* unsigned int */
.value_type_id = 16, /* struct pprint_mapv */
- .max_entries = 128 * 1024,
+ .max_entries = 128,
},
{
@@ -4564,7 +4474,7 @@ static struct btf_raw_test pprint_test_template[] = {
.value_size = sizeof(struct pprint_mapv),
.key_type_id = 3, /* unsigned int */
.value_type_id = 16, /* struct pprint_mapv */
- .max_entries = 128 * 1024,
+ .max_entries = 128,
},
#ifdef __SIZEOF_INT128__
@@ -4591,7 +4501,7 @@ static struct btf_raw_test pprint_test_template[] = {
.value_size = sizeof(struct pprint_mapv_int128),
.key_type_id = 1,
.value_type_id = 4,
- .max_entries = 128 * 1024,
+ .max_entries = 128,
.mapv_kind = PPRINT_MAPV_KIND_INT128,
},
#endif
@@ -4790,7 +4700,7 @@ static int check_line(const char *expected_line, int nexpected_line,
}
-static int do_test_pprint(int test_num)
+static void do_test_pprint(int test_num)
{
const struct btf_raw_test *test = &pprint_test_template[test_num];
enum pprint_mapv_kind_t mapv_kind = test->mapv_kind;
@@ -4809,18 +4719,20 @@ static int do_test_pprint(int test_num)
uint8_t *raw_btf;
ssize_t nread;
- fprintf(stderr, "%s(#%d)......", test->descr, test_num);
+ if (!test__start_subtest(test->descr))
+ return;
+
raw_btf = btf_raw_create(&hdr_tmpl, test->raw_types,
test->str_sec, test->str_sec_size,
&raw_btf_size, NULL);
if (!raw_btf)
- return -1;
+ return;
*btf_log_buf = '\0';
btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
btf_log_buf, BTF_LOG_BUF_SIZE,
- args.always_log);
+ always_log);
free(raw_btf);
if (CHECK(btf_fd == -1, "errno:%d", errno)) {
@@ -4971,7 +4883,7 @@ done:
free(mapv);
if (!err)
fprintf(stderr, "OK");
- if (*btf_log_buf && (err || args.always_log))
+ if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
if (btf_fd != -1)
close(btf_fd);
@@ -4981,14 +4893,11 @@ done:
fclose(pin_file);
unlink(pin_path);
free(line);
-
- return err;
}
-static int test_pprint(void)
+static void test_pprint(void)
{
unsigned int i;
- int err = 0;
/* test various maps with the first test template */
for (i = 0; i < ARRAY_SIZE(pprint_tests_meta); i++) {
@@ -4999,7 +4908,7 @@ static int test_pprint(void)
pprint_test_template[0].lossless_map = pprint_tests_meta[i].lossless_map;
pprint_test_template[0].percpu_map = pprint_tests_meta[i].percpu_map;
- err |= count_result(do_test_pprint(0));
+ do_test_pprint(0);
}
/* test rest test templates with the first map */
@@ -5010,10 +4919,8 @@ static int test_pprint(void)
pprint_test_template[i].ordered_map = pprint_tests_meta[0].ordered_map;
pprint_test_template[i].lossless_map = pprint_tests_meta[0].lossless_map;
pprint_test_template[i].percpu_map = pprint_tests_meta[0].percpu_map;
- err |= count_result(do_test_pprint(i));
+ do_test_pprint(i);
}
-
- return err;
}
#define BPF_LINE_INFO_ENC(insn_off, file_off, line_off, line_num, line_col) \
@@ -6178,7 +6085,7 @@ done:
return err;
}
-static int do_test_info_raw(unsigned int test_num)
+static void do_test_info_raw(unsigned int test_num)
{
const struct prog_info_raw_test *test = &info_raw_tests[test_num - 1];
unsigned int raw_btf_size, linfo_str_off, linfo_size;
@@ -6187,18 +6094,19 @@ static int do_test_info_raw(unsigned int test_num)
const char *ret_next_str;
union bpf_attr attr = {};
- fprintf(stderr, "BTF prog info raw test[%u] (%s): ", test_num, test->descr);
+ if (!test__start_subtest(test->descr))
+ return;
+
raw_btf = btf_raw_create(&hdr_tmpl, test->raw_types,
test->str_sec, test->str_sec_size,
&raw_btf_size, &ret_next_str);
-
if (!raw_btf)
- return -1;
+ return;
*btf_log_buf = '\0';
btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
btf_log_buf, BTF_LOG_BUF_SIZE,
- args.always_log);
+ always_log);
free(raw_btf);
if (CHECK(btf_fd == -1, "invalid btf_fd errno:%d", errno)) {
@@ -6206,7 +6114,7 @@ static int do_test_info_raw(unsigned int test_num)
goto done;
}
- if (*btf_log_buf && args.always_log)
+ if (*btf_log_buf && always_log)
fprintf(stderr, "\n%s", btf_log_buf);
*btf_log_buf = '\0';
@@ -6261,10 +6169,7 @@ static int do_test_info_raw(unsigned int test_num)
goto done;
done:
- if (!err)
- fprintf(stderr, "OK");
-
- if (*btf_log_buf && (err || args.always_log))
+ if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
if (btf_fd != -1)
@@ -6274,22 +6179,6 @@ done:
if (!IS_ERR(patched_linfo))
free(patched_linfo);
-
- return err;
-}
-
-static int test_info_raw(void)
-{
- unsigned int i;
- int err = 0;
-
- if (args.info_raw_test_num)
- return count_result(do_test_info_raw(args.info_raw_test_num));
-
- for (i = 1; i <= ARRAY_SIZE(info_raw_tests); i++)
- err |= count_result(do_test_info_raw(i));
-
- return err;
}
struct btf_raw_data {
@@ -6754,7 +6643,7 @@ static void dump_btf_strings(const char *strs, __u32 len)
}
}
-static int do_test_dedup(unsigned int test_num)
+static void do_test_dedup(unsigned int test_num)
{
const struct btf_dedup_test *test = &dedup_tests[test_num - 1];
__u32 test_nr_types, expect_nr_types, test_btf_size, expect_btf_size;
@@ -6769,13 +6658,15 @@ static int do_test_dedup(unsigned int test_num)
void *raw_btf;
int err = 0, i;
- fprintf(stderr, "BTF dedup test[%u] (%s):", test_num, test->descr);
+ if (!test__start_subtest(test->descr))
+ return;
raw_btf = btf_raw_create(&hdr_tmpl, test->input.raw_types,
test->input.str_sec, test->input.str_sec_size,
&raw_btf_size, &ret_test_next_str);
if (!raw_btf)
- return -1;
+ return;
+
test_btf = btf__new((__u8 *)raw_btf, raw_btf_size);
free(raw_btf);
if (CHECK(IS_ERR(test_btf), "invalid test_btf errno:%ld",
@@ -6789,7 +6680,7 @@ static int do_test_dedup(unsigned int test_num)
test->expect.str_sec_size,
&raw_btf_size, &ret_expect_next_str);
if (!raw_btf)
- return -1;
+ return;
expect_btf = btf__new((__u8 *)raw_btf, raw_btf_size);
free(raw_btf);
if (CHECK(IS_ERR(expect_btf), "invalid expect_btf errno:%ld",
@@ -6894,174 +6785,27 @@ static int do_test_dedup(unsigned int test_num)
}
done:
- if (!err)
- fprintf(stderr, "OK");
if (!IS_ERR(test_btf))
btf__free(test_btf);
if (!IS_ERR(expect_btf))
btf__free(expect_btf);
-
- return err;
}
-static int test_dedup(void)
+void test_btf(void)
{
- unsigned int i;
- int err = 0;
+ int i;
- if (args.dedup_test_num)
- return count_result(do_test_dedup(args.dedup_test_num));
+ always_log = env.verbosity > VERBOSE_NONE;
+ for (i = 1; i <= ARRAY_SIZE(raw_tests); i++)
+ do_test_raw(i);
+ for (i = 1; i <= ARRAY_SIZE(get_info_tests); i++)
+ do_test_get_info(i);
+ for (i = 1; i <= ARRAY_SIZE(file_tests); i++)
+ do_test_file(i);
+ for (i = 1; i <= ARRAY_SIZE(info_raw_tests); i++)
+ do_test_info_raw(i);
for (i = 1; i <= ARRAY_SIZE(dedup_tests); i++)
- err |= count_result(do_test_dedup(i));
-
- return err;
-}
-
-static void usage(const char *cmd)
-{
- fprintf(stderr, "Usage: %s [-l] [[-r btf_raw_test_num (1 - %zu)] |\n"
- "\t[-g btf_get_info_test_num (1 - %zu)] |\n"
- "\t[-f btf_file_test_num (1 - %zu)] |\n"
- "\t[-k btf_prog_info_raw_test_num (1 - %zu)] |\n"
- "\t[-p (pretty print test)] |\n"
- "\t[-d btf_dedup_test_num (1 - %zu)]]\n",
- cmd, ARRAY_SIZE(raw_tests), ARRAY_SIZE(get_info_tests),
- ARRAY_SIZE(file_tests), ARRAY_SIZE(info_raw_tests),
- ARRAY_SIZE(dedup_tests));
-}
-
-static int parse_args(int argc, char **argv)
-{
- const char *optstr = "hlpk:f:r:g:d:";
- int opt;
-
- while ((opt = getopt(argc, argv, optstr)) != -1) {
- switch (opt) {
- case 'l':
- args.always_log = true;
- break;
- case 'f':
- args.file_test_num = atoi(optarg);
- args.file_test = true;
- break;
- case 'r':
- args.raw_test_num = atoi(optarg);
- args.raw_test = true;
- break;
- case 'g':
- args.get_info_test_num = atoi(optarg);
- args.get_info_test = true;
- break;
- case 'p':
- args.pprint_test = true;
- break;
- case 'k':
- args.info_raw_test_num = atoi(optarg);
- args.info_raw_test = true;
- break;
- case 'd':
- args.dedup_test_num = atoi(optarg);
- args.dedup_test = true;
- break;
- case 'h':
- usage(argv[0]);
- exit(0);
- default:
- usage(argv[0]);
- return -1;
- }
- }
-
- if (args.raw_test_num &&
- (args.raw_test_num < 1 ||
- args.raw_test_num > ARRAY_SIZE(raw_tests))) {
- fprintf(stderr, "BTF raw test number must be [1 - %zu]\n",
- ARRAY_SIZE(raw_tests));
- return -1;
- }
-
- if (args.file_test_num &&
- (args.file_test_num < 1 ||
- args.file_test_num > ARRAY_SIZE(file_tests))) {
- fprintf(stderr, "BTF file test number must be [1 - %zu]\n",
- ARRAY_SIZE(file_tests));
- return -1;
- }
-
- if (args.get_info_test_num &&
- (args.get_info_test_num < 1 ||
- args.get_info_test_num > ARRAY_SIZE(get_info_tests))) {
- fprintf(stderr, "BTF get info test number must be [1 - %zu]\n",
- ARRAY_SIZE(get_info_tests));
- return -1;
- }
-
- if (args.info_raw_test_num &&
- (args.info_raw_test_num < 1 ||
- args.info_raw_test_num > ARRAY_SIZE(info_raw_tests))) {
- fprintf(stderr, "BTF prog info raw test number must be [1 - %zu]\n",
- ARRAY_SIZE(info_raw_tests));
- return -1;
- }
-
- if (args.dedup_test_num &&
- (args.dedup_test_num < 1 ||
- args.dedup_test_num > ARRAY_SIZE(dedup_tests))) {
- fprintf(stderr, "BTF dedup test number must be [1 - %zu]\n",
- ARRAY_SIZE(dedup_tests));
- return -1;
- }
-
- return 0;
-}
-
-static void print_summary(void)
-{
- fprintf(stderr, "PASS:%u SKIP:%u FAIL:%u\n",
- pass_cnt - skip_cnt, skip_cnt, error_cnt);
-}
-
-int main(int argc, char **argv)
-{
- int err = 0;
-
- err = parse_args(argc, argv);
- if (err)
- return err;
-
- if (args.always_log)
- libbpf_set_print(__base_pr);
-
- if (args.raw_test)
- err |= test_raw();
-
- if (args.get_info_test)
- err |= test_get_info();
-
- if (args.file_test)
- err |= test_file();
-
- if (args.pprint_test)
- err |= test_pprint();
-
- if (args.info_raw_test)
- err |= test_info_raw();
-
- if (args.dedup_test)
- err |= test_dedup();
-
- if (args.raw_test || args.get_info_test || args.file_test ||
- args.pprint_test || args.info_raw_test || args.dedup_test)
- goto done;
-
- err |= test_raw();
- err |= test_get_info();
- err |= test_file();
- err |= test_info_raw();
- err |= test_dedup();
-
-done:
- print_summary();
- return err;
+ do_test_dedup(i);
+ test_pprint();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/cls_redirect.c b/tools/testing/selftests/bpf/prog_tests/cls_redirect.c
index f259085cca6a..9781d85cb223 100644
--- a/tools/testing/selftests/bpf/prog_tests/cls_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/cls_redirect.c
@@ -12,10 +12,13 @@
#include "progs/test_cls_redirect.h"
#include "test_cls_redirect.skel.h"
+#include "test_cls_redirect_subprogs.skel.h"
#define ENCAP_IP INADDR_LOOPBACK
#define ENCAP_PORT (1234)
+static int duration = 0;
+
struct addr_port {
in_port_t port;
union {
@@ -361,30 +364,18 @@ static void close_fds(int *fds, int n)
close(fds[i]);
}
-void test_cls_redirect(void)
+static void test_cls_redirect_common(struct bpf_program *prog)
{
- struct test_cls_redirect *skel = NULL;
struct bpf_prog_test_run_attr tattr = {};
int families[] = { AF_INET, AF_INET6 };
struct sockaddr_storage ss;
struct sockaddr *addr;
socklen_t slen;
int i, j, err;
-
int servers[__NR_KIND][ARRAY_SIZE(families)] = {};
int conns[__NR_KIND][ARRAY_SIZE(families)] = {};
struct tuple tuples[__NR_KIND][ARRAY_SIZE(families)];
- skel = test_cls_redirect__open();
- if (CHECK_FAIL(!skel))
- return;
-
- skel->rodata->ENCAPSULATION_IP = htonl(ENCAP_IP);
- skel->rodata->ENCAPSULATION_PORT = htons(ENCAP_PORT);
-
- if (CHECK_FAIL(test_cls_redirect__load(skel)))
- goto cleanup;
-
addr = (struct sockaddr *)&ss;
for (i = 0; i < ARRAY_SIZE(families); i++) {
slen = prepare_addr(&ss, families[i]);
@@ -402,7 +393,7 @@ void test_cls_redirect(void)
goto cleanup;
}
- tattr.prog_fd = bpf_program__fd(skel->progs.cls_redirect);
+ tattr.prog_fd = bpf_program__fd(prog);
for (i = 0; i < ARRAY_SIZE(tests); i++) {
struct test_cfg *test = &tests[i];
@@ -450,7 +441,58 @@ void test_cls_redirect(void)
}
cleanup:
- test_cls_redirect__destroy(skel);
close_fds((int *)servers, sizeof(servers) / sizeof(servers[0][0]));
close_fds((int *)conns, sizeof(conns) / sizeof(conns[0][0]));
}
+
+static void test_cls_redirect_inlined(void)
+{
+ struct test_cls_redirect *skel;
+ int err;
+
+ skel = test_cls_redirect__open();
+ if (CHECK(!skel, "skel_open", "failed\n"))
+ return;
+
+ skel->rodata->ENCAPSULATION_IP = htonl(ENCAP_IP);
+ skel->rodata->ENCAPSULATION_PORT = htons(ENCAP_PORT);
+
+ err = test_cls_redirect__load(skel);
+ if (CHECK(err, "skel_load", "failed: %d\n", err))
+ goto cleanup;
+
+ test_cls_redirect_common(skel->progs.cls_redirect);
+
+cleanup:
+ test_cls_redirect__destroy(skel);
+}
+
+static void test_cls_redirect_subprogs(void)
+{
+ struct test_cls_redirect_subprogs *skel;
+ int err;
+
+ skel = test_cls_redirect_subprogs__open();
+ if (CHECK(!skel, "skel_open", "failed\n"))
+ return;
+
+ skel->rodata->ENCAPSULATION_IP = htonl(ENCAP_IP);
+ skel->rodata->ENCAPSULATION_PORT = htons(ENCAP_PORT);
+
+ err = test_cls_redirect_subprogs__load(skel);
+ if (CHECK(err, "skel_load", "failed: %d\n", err))
+ goto cleanup;
+
+ test_cls_redirect_common(skel->progs.cls_redirect);
+
+cleanup:
+ test_cls_redirect_subprogs__destroy(skel);
+}
+
+void test_cls_redirect(void)
+{
+ if (test__start_subtest("cls_redirect_inlined"))
+ test_cls_redirect_inlined();
+ if (test__start_subtest("cls_redirect_subprogs"))
+ test_cls_redirect_subprogs();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/d_path.c b/tools/testing/selftests/bpf/prog_tests/d_path.c
index fc12e0d445ff..0a577a248d34 100644
--- a/tools/testing/selftests/bpf/prog_tests/d_path.c
+++ b/tools/testing/selftests/bpf/prog_tests/d_path.c
@@ -120,6 +120,16 @@ void test_d_path(void)
if (err < 0)
goto cleanup;
+ if (CHECK(!bss->called_stat,
+ "stat",
+ "trampoline for security_inode_getattr was not called\n"))
+ goto cleanup;
+
+ if (CHECK(!bss->called_close,
+ "close",
+ "trampoline for filp_close was not called\n"))
+ goto cleanup;
+
for (int i = 0; i < MAX_FILES; i++) {
CHECK(strncmp(src.paths[i], bss->paths_stat[i], MAX_PATH_LEN),
"check",
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
index a550dab9ba7a..eda682727787 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
@@ -208,11 +208,18 @@ static void test_func_map_prog_compatibility(void)
void test_fexit_bpf2bpf(void)
{
- test_target_no_callees();
- test_target_yes_callees();
- test_func_replace();
- test_func_replace_verify();
- test_func_sockmap_update();
- test_func_replace_return_code();
- test_func_map_prog_compatibility();
+ if (test__start_subtest("target_no_callees"))
+ test_target_no_callees();
+ if (test__start_subtest("target_yes_callees"))
+ test_target_yes_callees();
+ if (test__start_subtest("func_replace"))
+ test_func_replace();
+ if (test__start_subtest("func_replace_verify"))
+ test_func_replace_verify();
+ if (test__start_subtest("func_sockmap_update"))
+ test_func_sockmap_update();
+ if (test__start_subtest("func_replace_return_code"))
+ test_func_replace_return_code();
+ if (test__start_subtest("func_map_prog_compatibility"))
+ test_func_map_prog_compatibility();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/global_data_init.c b/tools/testing/selftests/bpf/prog_tests/global_data_init.c
index 3bdaa5a40744..ee46b11f1f9a 100644
--- a/tools/testing/selftests/bpf/prog_tests/global_data_init.c
+++ b/tools/testing/selftests/bpf/prog_tests/global_data_init.c
@@ -12,7 +12,8 @@ void test_global_data_init(void)
size_t sz;
obj = bpf_object__open_file(file, NULL);
- if (CHECK_FAIL(!obj))
+ err = libbpf_get_error(obj);
+ if (CHECK_FAIL(err))
return;
map = bpf_object__find_map_by_name(obj, "test_glo.rodata");
diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms.c b/tools/testing/selftests/bpf/prog_tests/ksyms.c
index e3d6777226a8..b771804b2342 100644
--- a/tools/testing/selftests/bpf/prog_tests/ksyms.c
+++ b/tools/testing/selftests/bpf/prog_tests/ksyms.c
@@ -32,6 +32,7 @@ out:
void test_ksyms(void)
{
+ __u64 per_cpu_start_addr = kallsyms_find("__per_cpu_start");
__u64 link_fops_addr = kallsyms_find("bpf_link_fops");
const char *btf_path = "/sys/kernel/btf/vmlinux";
struct test_ksyms *skel;
@@ -63,8 +64,9 @@ void test_ksyms(void)
"got %llu, exp %llu\n", data->out__bpf_link_fops1, (__u64)0);
CHECK(data->out__btf_size != btf_size, "btf_size",
"got %llu, exp %llu\n", data->out__btf_size, btf_size);
- CHECK(data->out__per_cpu_start != 0, "__per_cpu_start",
- "got %llu, exp %llu\n", data->out__per_cpu_start, (__u64)0);
+ CHECK(data->out__per_cpu_start != per_cpu_start_addr, "__per_cpu_start",
+ "got %llu, exp %llu\n", data->out__per_cpu_start,
+ per_cpu_start_addr);
cleanup:
test_ksyms__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c
index c2d373e294bb..8073105548ff 100644
--- a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c
+++ b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c
@@ -80,9 +80,8 @@ out:
void test_l4lb_all(void)
{
- const char *file1 = "./test_l4lb.o";
- const char *file2 = "./test_l4lb_noinline.o";
-
- test_l4lb(file1);
- test_l4lb(file2);
+ if (test__start_subtest("l4lb_inline"))
+ test_l4lb("test_l4lb.o");
+ if (test__start_subtest("l4lb_noinline"))
+ test_l4lb("test_l4lb_noinline.o");
}
diff --git a/tools/testing/selftests/bpf/prog_tests/metadata.c b/tools/testing/selftests/bpf/prog_tests/metadata.c
new file mode 100644
index 000000000000..2c53eade88e3
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/metadata.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright 2020 Google LLC.
+ */
+
+#include <test_progs.h>
+#include <cgroup_helpers.h>
+#include <network_helpers.h>
+
+#include "metadata_unused.skel.h"
+#include "metadata_used.skel.h"
+
+static int duration;
+
+static int prog_holds_map(int prog_fd, int map_fd)
+{
+ struct bpf_prog_info prog_info = {};
+ struct bpf_prog_info map_info = {};
+ __u32 prog_info_len;
+ __u32 map_info_len;
+ __u32 *map_ids;
+ int nr_maps;
+ int ret;
+ int i;
+
+ map_info_len = sizeof(map_info);
+ ret = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len);
+ if (ret)
+ return -errno;
+
+ prog_info_len = sizeof(prog_info);
+ ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
+ if (ret)
+ return -errno;
+
+ map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32));
+ if (!map_ids)
+ return -ENOMEM;
+
+ nr_maps = prog_info.nr_map_ids;
+ memset(&prog_info, 0, sizeof(prog_info));
+ prog_info.nr_map_ids = nr_maps;
+ prog_info.map_ids = ptr_to_u64(map_ids);
+ prog_info_len = sizeof(prog_info);
+
+ ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
+ if (ret) {
+ ret = -errno;
+ goto free_map_ids;
+ }
+
+ ret = -ENOENT;
+ for (i = 0; i < prog_info.nr_map_ids; i++) {
+ if (map_ids[i] == map_info.id) {
+ ret = 0;
+ break;
+ }
+ }
+
+free_map_ids:
+ free(map_ids);
+ return ret;
+}
+
+static void test_metadata_unused(void)
+{
+ struct metadata_unused *obj;
+ int err;
+
+ obj = metadata_unused__open_and_load();
+ if (CHECK(!obj, "skel-load", "errno %d", errno))
+ return;
+
+ err = prog_holds_map(bpf_program__fd(obj->progs.prog),
+ bpf_map__fd(obj->maps.rodata));
+ if (CHECK(err, "prog-holds-rodata", "errno: %d", err))
+ return;
+
+ /* Assert that we can access the metadata in skel and the values are
+ * what we expect.
+ */
+ if (CHECK(strncmp(obj->rodata->bpf_metadata_a, "foo",
+ sizeof(obj->rodata->bpf_metadata_a)),
+ "bpf_metadata_a", "expected \"foo\", value differ"))
+ goto close_bpf_object;
+ if (CHECK(obj->rodata->bpf_metadata_b != 1, "bpf_metadata_b",
+ "expected 1, got %d", obj->rodata->bpf_metadata_b))
+ goto close_bpf_object;
+
+ /* Assert that binding metadata map to prog again succeeds. */
+ err = bpf_prog_bind_map(bpf_program__fd(obj->progs.prog),
+ bpf_map__fd(obj->maps.rodata), NULL);
+ CHECK(err, "rebind_map", "errno %d, expected 0", errno);
+
+close_bpf_object:
+ metadata_unused__destroy(obj);
+}
+
+static void test_metadata_used(void)
+{
+ struct metadata_used *obj;
+ int err;
+
+ obj = metadata_used__open_and_load();
+ if (CHECK(!obj, "skel-load", "errno %d", errno))
+ return;
+
+ err = prog_holds_map(bpf_program__fd(obj->progs.prog),
+ bpf_map__fd(obj->maps.rodata));
+ if (CHECK(err, "prog-holds-rodata", "errno: %d", err))
+ return;
+
+ /* Assert that we can access the metadata in skel and the values are
+ * what we expect.
+ */
+ if (CHECK(strncmp(obj->rodata->bpf_metadata_a, "bar",
+ sizeof(obj->rodata->bpf_metadata_a)),
+ "metadata_a", "expected \"bar\", value differ"))
+ goto close_bpf_object;
+ if (CHECK(obj->rodata->bpf_metadata_b != 2, "metadata_b",
+ "expected 2, got %d", obj->rodata->bpf_metadata_b))
+ goto close_bpf_object;
+
+ /* Assert that binding metadata map to prog again succeeds. */
+ err = bpf_prog_bind_map(bpf_program__fd(obj->progs.prog),
+ bpf_map__fd(obj->maps.rodata), NULL);
+ CHECK(err, "rebind_map", "errno %d, expected 0", errno);
+
+close_bpf_object:
+ metadata_used__destroy(obj);
+}
+
+void test_metadata(void)
+{
+ if (test__start_subtest("unused"))
+ test_metadata_unused();
+
+ if (test__start_subtest("used"))
+ test_metadata_used();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
index fc0d7f4f02cf..ac1ee10cffd8 100644
--- a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
+++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
@@ -27,7 +27,7 @@ void test_reference_tracking(void)
const char *title;
/* Ignore .text sections */
- title = bpf_program__title(prog, false);
+ title = bpf_program__section_name(prog);
if (strstr(title, ".text") != NULL)
continue;
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
index a49a26f95a8b..3a469099f30d 100644
--- a/tools/testing/selftests/bpf/prog_tests/sk_assign.c
+++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
@@ -265,7 +265,7 @@ void test_sk_assign(void)
TEST("ipv6 udp port redir", AF_INET6, SOCK_DGRAM, false),
TEST("ipv6 udp addr redir", AF_INET6, SOCK_DGRAM, true),
};
- int server = -1;
+ __s64 server = -1;
int server_map;
int self_net;
int i;
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
index 0b79d78b98db..4b7a527e7e82 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
@@ -6,6 +6,9 @@
#include "test_skmsg_load_helpers.skel.h"
#include "test_sockmap_update.skel.h"
#include "test_sockmap_invalid_update.skel.h"
+#include "bpf_iter_sockmap.skel.h"
+
+#include "progs/bpf_iter_sockmap.h"
#define TCP_REPAIR 19 /* TCP sock is under repair right now */
@@ -171,6 +174,88 @@ static void test_sockmap_invalid_update(void)
test_sockmap_invalid_update__destroy(skel);
}
+static void test_sockmap_iter(enum bpf_map_type map_type)
+{
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ int err, len, src_fd, iter_fd, duration = 0;
+ union bpf_iter_link_info linfo = {0};
+ __s64 sock_fd[SOCKMAP_MAX_ENTRIES];
+ __u32 i, num_sockets, max_elems;
+ struct bpf_iter_sockmap *skel;
+ struct bpf_link *link;
+ struct bpf_map *src;
+ char buf[64];
+
+ skel = bpf_iter_sockmap__open_and_load();
+ if (CHECK(!skel, "bpf_iter_sockmap__open_and_load", "skeleton open_and_load failed\n"))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(sock_fd); i++)
+ sock_fd[i] = -1;
+
+ /* Make sure we have at least one "empty" entry to test iteration of
+ * an empty slot.
+ */
+ num_sockets = ARRAY_SIZE(sock_fd) - 1;
+
+ if (map_type == BPF_MAP_TYPE_SOCKMAP) {
+ src = skel->maps.sockmap;
+ max_elems = bpf_map__max_entries(src);
+ } else {
+ src = skel->maps.sockhash;
+ max_elems = num_sockets;
+ }
+
+ src_fd = bpf_map__fd(src);
+
+ for (i = 0; i < num_sockets; i++) {
+ sock_fd[i] = connected_socket_v4();
+ if (CHECK(sock_fd[i] == -1, "connected_socket_v4", "cannot connect\n"))
+ goto out;
+
+ err = bpf_map_update_elem(src_fd, &i, &sock_fd[i], BPF_NOEXIST);
+ if (CHECK(err, "map_update", "failed: %s\n", strerror(errno)))
+ goto out;
+ }
+
+ linfo.map.map_fd = src_fd;
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+ link = bpf_program__attach_iter(skel->progs.count_elems, &opts);
+ if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ goto out;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ goto free_link;
+
+ /* do some tests */
+ while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
+ ;
+ if (CHECK(len < 0, "read", "failed: %s\n", strerror(errno)))
+ goto close_iter;
+
+ /* test results */
+ if (CHECK(skel->bss->elems != max_elems, "elems", "got %u expected %u\n",
+ skel->bss->elems, max_elems))
+ goto close_iter;
+
+ if (CHECK(skel->bss->socks != num_sockets, "socks", "got %u expected %u\n",
+ skel->bss->socks, num_sockets))
+ goto close_iter;
+
+close_iter:
+ close(iter_fd);
+free_link:
+ bpf_link__destroy(link);
+out:
+ for (i = 0; i < num_sockets; i++) {
+ if (sock_fd[i] >= 0)
+ close(sock_fd[i]);
+ }
+ bpf_iter_sockmap__destroy(skel);
+}
+
void test_sockmap_basic(void)
{
if (test__start_subtest("sockmap create_update_free"))
@@ -187,4 +272,8 @@ void test_sockmap_basic(void)
test_sockmap_update(BPF_MAP_TYPE_SOCKHASH);
if (test__start_subtest("sockmap update in unsafe context"))
test_sockmap_invalid_update();
+ if (test__start_subtest("sockmap iter"))
+ test_sockmap_iter(BPF_MAP_TYPE_SOCKMAP);
+ if (test__start_subtest("sockhash iter"))
+ test_sockmap_iter(BPF_MAP_TYPE_SOCKHASH);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
index 5f54c6aec7f0..b25c9c45c148 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
@@ -45,9 +45,9 @@ static int getsetsockopt(void)
goto err;
}
- if (*(int *)big_buf != 0x08) {
+ if (*big_buf != 0x08) {
log_err("Unexpected getsockopt(IP_TOS) optval 0x%x != 0x08",
- *(int *)big_buf);
+ (int)*big_buf);
goto err;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/subprogs.c b/tools/testing/selftests/bpf/prog_tests/subprogs.c
new file mode 100644
index 000000000000..a00abf58c037
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/subprogs.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#include <test_progs.h>
+#include <time.h>
+#include "test_subprogs.skel.h"
+
+static int duration;
+
+void test_subprogs(void)
+{
+ struct test_subprogs *skel;
+ int err;
+
+ skel = test_subprogs__open_and_load();
+ if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ return;
+
+ err = test_subprogs__attach(skel);
+ if (CHECK(err, "skel_attach", "failed to attach skeleton: %d\n", err))
+ goto cleanup;
+
+ usleep(1);
+
+ CHECK(skel->bss->res1 != 12, "res1", "got %d, exp %d\n", skel->bss->res1, 12);
+ CHECK(skel->bss->res2 != 17, "res2", "got %d, exp %d\n", skel->bss->res2, 17);
+ CHECK(skel->bss->res3 != 19, "res3", "got %d, exp %d\n", skel->bss->res3, 19);
+ CHECK(skel->bss->res4 != 36, "res4", "got %d, exp %d\n", skel->bss->res4, 36);
+
+cleanup:
+ test_subprogs__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
index bb8fe646dd9f..ee27d68d2a1c 100644
--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
+#include <network_helpers.h>
/* test_tailcall_1 checks basic functionality by patching multiple locations
* in a single program for a single tail call slot with nop->jmp, jmp->nop
@@ -472,6 +473,329 @@ out:
bpf_object__close(obj);
}
+/* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
+ * correctly in correlation with BPF subprograms
+ */
+static void test_tailcall_bpf2bpf_1(void)
+{
+ int err, map_fd, prog_fd, main_fd, i;
+ struct bpf_map *prog_array;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ __u32 retval, duration;
+ char prog_name[32];
+
+ err = bpf_prog_load("tailcall_bpf2bpf1.o", BPF_PROG_TYPE_SCHED_CLS,
+ &obj, &prog_fd);
+ if (CHECK_FAIL(err))
+ return;
+
+ prog = bpf_object__find_program_by_title(obj, "classifier");
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ main_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(main_fd < 0))
+ goto out;
+
+ prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
+ if (CHECK_FAIL(!prog_array))
+ goto out;
+
+ map_fd = bpf_map__fd(prog_array);
+ if (CHECK_FAIL(map_fd < 0))
+ goto out;
+
+ /* nop -> jmp */
+ for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+
+ prog = bpf_object__find_program_by_title(obj, prog_name);
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(prog_fd < 0))
+ goto out;
+
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto out;
+ }
+
+ err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
+ 0, &retval, &duration);
+ CHECK(err || retval != 1, "tailcall",
+ "err %d errno %d retval %d\n", err, errno, retval);
+
+ /* jmp -> nop, call subprog that will do tailcall */
+ i = 1;
+ err = bpf_map_delete_elem(map_fd, &i);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
+ 0, &retval, &duration);
+ CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+
+ /* make sure that subprog can access ctx and entry prog that
+ * called this subprog can properly return
+ */
+ i = 0;
+ err = bpf_map_delete_elem(map_fd, &i);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
+ 0, &retval, &duration);
+ CHECK(err || retval != sizeof(pkt_v4) * 2,
+ "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+out:
+ bpf_object__close(obj);
+}
+
+/* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
+ * enforcement matches with expectations when tailcall is preceded with
+ * bpf2bpf call.
+ */
+static void test_tailcall_bpf2bpf_2(void)
+{
+ int err, map_fd, prog_fd, main_fd, data_fd, i, val;
+ struct bpf_map *prog_array, *data_map;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ __u32 retval, duration;
+ char buff[128] = {};
+
+ err = bpf_prog_load("tailcall_bpf2bpf2.o", BPF_PROG_TYPE_SCHED_CLS,
+ &obj, &prog_fd);
+ if (CHECK_FAIL(err))
+ return;
+
+ prog = bpf_object__find_program_by_title(obj, "classifier");
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ main_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(main_fd < 0))
+ goto out;
+
+ prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
+ if (CHECK_FAIL(!prog_array))
+ goto out;
+
+ map_fd = bpf_map__fd(prog_array);
+ if (CHECK_FAIL(map_fd < 0))
+ goto out;
+
+ prog = bpf_object__find_program_by_title(obj, "classifier/0");
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(prog_fd < 0))
+ goto out;
+
+ i = 0;
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+
+ data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+ return;
+
+ data_fd = bpf_map__fd(data_map);
+ if (CHECK_FAIL(map_fd < 0))
+ return;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+ CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n",
+ err, errno, val);
+
+ i = 0;
+ err = bpf_map_delete_elem(map_fd, &i);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+out:
+ bpf_object__close(obj);
+}
+
+/* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
+ * 256 bytes) can be used within bpf subprograms that have the tailcalls
+ * in them
+ */
+static void test_tailcall_bpf2bpf_3(void)
+{
+ int err, map_fd, prog_fd, main_fd, i;
+ struct bpf_map *prog_array;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ __u32 retval, duration;
+ char prog_name[32];
+
+ err = bpf_prog_load("tailcall_bpf2bpf3.o", BPF_PROG_TYPE_SCHED_CLS,
+ &obj, &prog_fd);
+ if (CHECK_FAIL(err))
+ return;
+
+ prog = bpf_object__find_program_by_title(obj, "classifier");
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ main_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(main_fd < 0))
+ goto out;
+
+ prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
+ if (CHECK_FAIL(!prog_array))
+ goto out;
+
+ map_fd = bpf_map__fd(prog_array);
+ if (CHECK_FAIL(map_fd < 0))
+ goto out;
+
+ for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+
+ prog = bpf_object__find_program_by_title(obj, prog_name);
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(prog_fd < 0))
+ goto out;
+
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto out;
+ }
+
+ err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != sizeof(pkt_v4) * 3,
+ "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+
+ i = 1;
+ err = bpf_map_delete_elem(map_fd, &i);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != sizeof(pkt_v4),
+ "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+
+ i = 0;
+ err = bpf_map_delete_elem(map_fd, &i);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != sizeof(pkt_v4) * 2,
+ "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+out:
+ bpf_object__close(obj);
+}
+
+/* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
+ * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
+ * counter behaves correctly, bpf program will go through following flow:
+ *
+ * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
+ * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
+ * subprog2 [here bump global counter] --------^
+ *
+ * We go through first two tailcalls and start counting from the subprog2 where
+ * the loop begins. At the end of the test make sure that the global counter is
+ * equal to 31, because tailcall counter includes the first two tailcalls
+ * whereas global counter is incremented only on loop presented on flow above.
+ */
+static void test_tailcall_bpf2bpf_4(void)
+{
+ int err, map_fd, prog_fd, main_fd, data_fd, i, val;
+ struct bpf_map *prog_array, *data_map;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ __u32 retval, duration;
+ char prog_name[32];
+
+ err = bpf_prog_load("tailcall_bpf2bpf4.o", BPF_PROG_TYPE_SCHED_CLS,
+ &obj, &prog_fd);
+ if (CHECK_FAIL(err))
+ return;
+
+ prog = bpf_object__find_program_by_title(obj, "classifier");
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ main_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(main_fd < 0))
+ goto out;
+
+ prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
+ if (CHECK_FAIL(!prog_array))
+ goto out;
+
+ map_fd = bpf_map__fd(prog_array);
+ if (CHECK_FAIL(map_fd < 0))
+ goto out;
+
+ for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+
+ prog = bpf_object__find_program_by_title(obj, prog_name);
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(prog_fd < 0))
+ goto out;
+
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto out;
+ }
+
+ err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != sizeof(pkt_v4) * 3, "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+
+ data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+ return;
+
+ data_fd = bpf_map__fd(data_map);
+ if (CHECK_FAIL(map_fd < 0))
+ return;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+ CHECK(err || val != 31, "tailcall count", "err %d errno %d count %d\n",
+ err, errno, val);
+
+out:
+ bpf_object__close(obj);
+}
+
void test_tailcalls(void)
{
if (test__start_subtest("tailcall_1"))
@@ -484,4 +808,12 @@ void test_tailcalls(void)
test_tailcall_4();
if (test__start_subtest("tailcall_5"))
test_tailcall_5();
+ if (test__start_subtest("tailcall_bpf2bpf_1"))
+ test_tailcall_bpf2bpf_1();
+ if (test__start_subtest("tailcall_bpf2bpf_2"))
+ test_tailcall_bpf2bpf_2();
+ if (test__start_subtest("tailcall_bpf2bpf_3"))
+ test_tailcall_bpf2bpf_3();
+ if (test__start_subtest("tailcall_bpf2bpf_4"))
+ test_tailcall_bpf2bpf_4();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c
index f284f72158ef..a1f06424cf83 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
+#include "test_xdp_noinline.skel.h"
void test_xdp_noinline(void)
{
- const char *file = "./test_xdp_noinline.o";
unsigned int nr_cpus = bpf_num_possible_cpus();
+ struct test_xdp_noinline *skel;
struct vip key = {.protocol = 6};
struct vip_meta {
__u32 flags;
@@ -25,58 +26,42 @@ void test_xdp_noinline(void)
} real_def = {.dst = MAGIC_VAL};
__u32 ch_key = 11, real_num = 3;
__u32 duration, retval, size;
- int err, i, prog_fd, map_fd;
+ int err, i;
__u64 bytes = 0, pkts = 0;
- struct bpf_object *obj;
char buf[128];
u32 *magic = (u32 *)buf;
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
- if (CHECK_FAIL(err))
+ skel = test_xdp_noinline__open_and_load();
+ if (CHECK(!skel, "skel_open_and_load", "failed\n"))
return;
- map_fd = bpf_find_map(__func__, obj, "vip_map");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &key, &value, 0);
+ bpf_map_update_elem(bpf_map__fd(skel->maps.vip_map), &key, &value, 0);
+ bpf_map_update_elem(bpf_map__fd(skel->maps.ch_rings), &ch_key, &real_num, 0);
+ bpf_map_update_elem(bpf_map__fd(skel->maps.reals), &real_num, &real_def, 0);
- map_fd = bpf_find_map(__func__, obj, "ch_rings");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
-
- map_fd = bpf_find_map(__func__, obj, "reals");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
-
- err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
+ err = bpf_prog_test_run(bpf_program__fd(skel->progs.balancer_ingress_v4),
+ NUM_ITER, &pkt_v4, sizeof(pkt_v4),
buf, &size, &retval, &duration);
CHECK(err || retval != 1 || size != 54 ||
*magic != MAGIC_VAL, "ipv4",
"err %d errno %d retval %d size %d magic %x\n",
err, errno, retval, size, *magic);
- err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
+ err = bpf_prog_test_run(bpf_program__fd(skel->progs.balancer_ingress_v6),
+ NUM_ITER, &pkt_v6, sizeof(pkt_v6),
buf, &size, &retval, &duration);
CHECK(err || retval != 1 || size != 74 ||
*magic != MAGIC_VAL, "ipv6",
"err %d errno %d retval %d size %d magic %x\n",
err, errno, retval, size, *magic);
- map_fd = bpf_find_map(__func__, obj, "stats");
- if (map_fd < 0)
- goto out;
- bpf_map_lookup_elem(map_fd, &stats_key, stats);
+ bpf_map_lookup_elem(bpf_map__fd(skel->maps.stats), &stats_key, stats);
for (i = 0; i < nr_cpus; i++) {
bytes += stats[i].bytes;
pkts += stats[i].pkts;
}
- if (CHECK_FAIL(bytes != MAGIC_BYTES * NUM_ITER * 2 ||
- pkts != NUM_ITER * 2)) {
- printf("test_xdp_noinline:FAIL:stats %lld %lld\n",
- bytes, pkts);
- }
-out:
- bpf_object__close(obj);
+ CHECK(bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2,
+ "stats", "bytes %lld pkts %lld\n",
+ (unsigned long long)bytes, (unsigned long long)pkts);
+ test_xdp_noinline__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter.h b/tools/testing/selftests/bpf/progs/bpf_iter.h
index c196280df90d..df682af75510 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter.h
+++ b/tools/testing/selftests/bpf/progs/bpf_iter.h
@@ -13,6 +13,7 @@
#define udp6_sock udp6_sock___not_used
#define bpf_iter__bpf_map_elem bpf_iter__bpf_map_elem___not_used
#define bpf_iter__bpf_sk_storage_map bpf_iter__bpf_sk_storage_map___not_used
+#define bpf_iter__sockmap bpf_iter__sockmap___not_used
#include "vmlinux.h"
#undef bpf_iter_meta
#undef bpf_iter__bpf_map
@@ -26,6 +27,7 @@
#undef udp6_sock
#undef bpf_iter__bpf_map_elem
#undef bpf_iter__bpf_sk_storage_map
+#undef bpf_iter__sockmap
struct bpf_iter_meta {
struct seq_file *seq;
@@ -96,3 +98,10 @@ struct bpf_iter__bpf_sk_storage_map {
struct sock *sk;
void *value;
};
+
+struct bpf_iter__sockmap {
+ struct bpf_iter_meta *meta;
+ struct bpf_map *map;
+ void *key;
+ struct sock *sk;
+};
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c
index 07ddbfdbcab7..6dfce3fd68bc 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c
@@ -47,7 +47,10 @@ int dump_bpf_hash_map(struct bpf_iter__bpf_map_elem *ctx)
__u32 seq_num = ctx->meta->seq_num;
struct bpf_map *map = ctx->map;
struct key_t *key = ctx->key;
+ struct key_t tmp_key;
__u64 *val = ctx->value;
+ __u64 tmp_val = 0;
+ int ret;
if (in_test_mode) {
/* test mode is used by selftests to
@@ -61,6 +64,18 @@ int dump_bpf_hash_map(struct bpf_iter__bpf_map_elem *ctx)
if (key == (void *)0 || val == (void *)0)
return 0;
+ /* update the value and then delete the <key, value> pair.
+ * it should not impact the existing 'val' which is still
+ * accessible under rcu.
+ */
+ __builtin_memcpy(&tmp_key, key, sizeof(struct key_t));
+ ret = bpf_map_update_elem(&hashmap1, &tmp_key, &tmp_val, 0);
+ if (ret)
+ return 0;
+ ret = bpf_map_delete_elem(&hashmap1, &tmp_key);
+ if (ret)
+ return 0;
+
key_sum_a += key->a;
key_sum_b += key->b;
key_sum_c += key->c;
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c b/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c
new file mode 100644
index 000000000000..0e27f73dd803
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Cloudflare */
+#include "bpf_iter.h"
+#include "bpf_tracing_net.h"
+#include "bpf_iter_sockmap.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <errno.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, SOCKMAP_MAX_ENTRIES);
+ __type(key, __u32);
+ __type(value, __u64);
+} sockmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKHASH);
+ __uint(max_entries, SOCKMAP_MAX_ENTRIES);
+ __type(key, __u32);
+ __type(value, __u64);
+} sockhash SEC(".maps");
+
+__u32 elems = 0;
+__u32 socks = 0;
+
+SEC("iter/sockmap")
+int count_elems(struct bpf_iter__sockmap *ctx)
+{
+ struct sock *sk = ctx->sk;
+ __u32 tmp, *key = ctx->key;
+ int ret;
+
+ if (key)
+ elems++;
+
+ if (sk)
+ socks++;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.h b/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.h
new file mode 100644
index 000000000000..35a675d13c0f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.h
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define SOCKMAP_MAX_ENTRIES (64)
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
index 8b787baa2654..b2f7c7c5f952 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
@@ -6,6 +6,9 @@
char _license[] SEC("license") = "GPL";
+int count = 0;
+int tgid = 0;
+
SEC("iter/task_file")
int dump_task_file(struct bpf_iter__task_file *ctx)
{
@@ -17,8 +20,13 @@ int dump_task_file(struct bpf_iter__task_file *ctx)
if (task == (void *)0 || file == (void *)0)
return 0;
- if (ctx->meta->seq_num == 0)
+ if (ctx->meta->seq_num == 0) {
+ count = 0;
BPF_SEQ_PRINTF(seq, " tgid gid fd file\n");
+ }
+
+ if (tgid == task->tgid && task->tgid != task->pid)
+ count++;
BPF_SEQ_PRINTF(seq, "%8d %8d %8d %lx\n", task->tgid, task->pid, fd,
(long)file->f_op);
diff --git a/tools/testing/selftests/bpf/progs/map_ptr_kern.c b/tools/testing/selftests/bpf/progs/map_ptr_kern.c
index 982a2d8aa844..c325405751e2 100644
--- a/tools/testing/selftests/bpf/progs/map_ptr_kern.c
+++ b/tools/testing/selftests/bpf/progs/map_ptr_kern.c
@@ -82,6 +82,14 @@ static inline int check_default(struct bpf_map *indirect,
return 1;
}
+static __noinline int
+check_default_noinline(struct bpf_map *indirect, struct bpf_map *direct)
+{
+ VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
+ MAX_ENTRIES));
+ return 1;
+}
+
typedef struct {
int counter;
} atomic_t;
@@ -107,7 +115,7 @@ static inline int check_hash(void)
struct bpf_map *map = (struct bpf_map *)&m_hash;
int i;
- VERIFY(check_default(&hash->map, map));
+ VERIFY(check_default_noinline(&hash->map, map));
VERIFY(hash->n_buckets == MAX_ENTRIES);
VERIFY(hash->elem_size == 64);
diff --git a/tools/testing/selftests/bpf/progs/metadata_unused.c b/tools/testing/selftests/bpf/progs/metadata_unused.c
new file mode 100644
index 000000000000..672a0d19f8d0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/metadata_unused.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+volatile const char bpf_metadata_a[] SEC(".rodata") = "foo";
+volatile const int bpf_metadata_b SEC(".rodata") = 1;
+
+SEC("cgroup_skb/egress")
+int prog(struct xdp_md *ctx)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/metadata_used.c b/tools/testing/selftests/bpf/progs/metadata_used.c
new file mode 100644
index 000000000000..b7198e65383d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/metadata_used.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+volatile const char bpf_metadata_a[] SEC(".rodata") = "bar";
+volatile const int bpf_metadata_b SEC(".rodata") = 2;
+
+SEC("cgroup_skb/egress")
+int prog(struct xdp_md *ctx)
+{
+ return bpf_metadata_b ? 1 : 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h
index cc615b82b56e..2fb7adafb6b6 100644
--- a/tools/testing/selftests/bpf/progs/pyperf.h
+++ b/tools/testing/selftests/bpf/progs/pyperf.h
@@ -67,7 +67,12 @@ typedef struct {
void* co_name; // PyCodeObject.co_name
} FrameData;
-static __always_inline void *get_thread_state(void *tls_base, PidData *pidData)
+#ifdef SUBPROGS
+__noinline
+#else
+__always_inline
+#endif
+static void *get_thread_state(void *tls_base, PidData *pidData)
{
void* thread_state;
int key;
@@ -155,7 +160,9 @@ struct {
} stackmap SEC(".maps");
#ifdef GLOBAL_FUNC
-__attribute__((noinline))
+__noinline
+#elif defined(SUBPROGS)
+static __noinline
#else
static __always_inline
#endif
diff --git a/tools/testing/selftests/bpf/progs/pyperf_subprogs.c b/tools/testing/selftests/bpf/progs/pyperf_subprogs.c
new file mode 100644
index 000000000000..60e27a7f0cca
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/pyperf_subprogs.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#define STACK_MAX_LEN 50
+#define SUBPROGS
+#include "pyperf.h"
diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h
index ad61b722a9de..7de534f38c3f 100644
--- a/tools/testing/selftests/bpf/progs/strobemeta.h
+++ b/tools/testing/selftests/bpf/progs/strobemeta.h
@@ -266,8 +266,12 @@ struct tls_index {
uint64_t offset;
};
-static __always_inline void *calc_location(struct strobe_value_loc *loc,
- void *tls_base)
+#ifdef SUBPROGS
+__noinline
+#else
+__always_inline
+#endif
+static void *calc_location(struct strobe_value_loc *loc, void *tls_base)
{
/*
* tls_mode value is:
@@ -327,10 +331,15 @@ static __always_inline void *calc_location(struct strobe_value_loc *loc,
: NULL;
}
-static __always_inline void read_int_var(struct strobemeta_cfg *cfg,
- size_t idx, void *tls_base,
- struct strobe_value_generic *value,
- struct strobemeta_payload *data)
+#ifdef SUBPROGS
+__noinline
+#else
+__always_inline
+#endif
+static void read_int_var(struct strobemeta_cfg *cfg,
+ size_t idx, void *tls_base,
+ struct strobe_value_generic *value,
+ struct strobemeta_payload *data)
{
void *location = calc_location(&cfg->int_locs[idx], tls_base);
if (!location)
@@ -440,8 +449,13 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
* read_strobe_meta returns NULL, if no metadata was read; otherwise returns
* pointer to *right after* payload ends
*/
-static __always_inline void *read_strobe_meta(struct task_struct *task,
- struct strobemeta_payload *data)
+#ifdef SUBPROGS
+__noinline
+#else
+__always_inline
+#endif
+static void *read_strobe_meta(struct task_struct *task,
+ struct strobemeta_payload *data)
{
pid_t pid = bpf_get_current_pid_tgid() >> 32;
struct strobe_value_generic value = {0};
diff --git a/tools/testing/selftests/bpf/progs/strobemeta_subprogs.c b/tools/testing/selftests/bpf/progs/strobemeta_subprogs.c
new file mode 100644
index 000000000000..b6c01f8fc559
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/strobemeta_subprogs.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+// Copyright (c) 2019 Facebook
+
+#define STROBE_MAX_INTS 2
+#define STROBE_MAX_STRS 25
+#define STROBE_MAX_MAPS 13
+#define STROBE_MAX_MAP_ENTRIES 20
+#define NO_UNROLL
+#define SUBPROGS
+#include "strobemeta.h"
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c
new file mode 100644
index 000000000000..b5d9c8e778ae
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 2);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+#define TAIL_FUNC(x) \
+ SEC("classifier/" #x) \
+ int bpf_func_##x(struct __sk_buff *skb) \
+ { \
+ return x; \
+ }
+TAIL_FUNC(0)
+TAIL_FUNC(1)
+
+static __noinline
+int subprog_tail(struct __sk_buff *skb)
+{
+ bpf_tail_call(skb, &jmp_table, 0);
+
+ return skb->len * 2;
+}
+
+SEC("classifier")
+int entry(struct __sk_buff *skb)
+{
+ bpf_tail_call(skb, &jmp_table, 1);
+
+ return subprog_tail(skb);
+}
+
+char __license[] SEC("license") = "GPL";
+int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c
new file mode 100644
index 000000000000..a004ab28ce28
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_legacy.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+static __noinline
+int subprog_tail(struct __sk_buff *skb)
+{
+ if (load_byte(skb, 0))
+ bpf_tail_call(skb, &jmp_table, 1);
+ else
+ bpf_tail_call(skb, &jmp_table, 0);
+ return 1;
+}
+
+static volatile int count;
+
+SEC("classifier/0")
+int bpf_func_0(struct __sk_buff *skb)
+{
+ count++;
+ return subprog_tail(skb);
+}
+
+SEC("classifier")
+int entry(struct __sk_buff *skb)
+{
+ bpf_tail_call(skb, &jmp_table, 0);
+
+ return 0;
+}
+
+char __license[] SEC("license") = "GPL";
+int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c
new file mode 100644
index 000000000000..96dbef2b6b7c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_legacy.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 2);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+__noinline
+int subprog_tail2(struct __sk_buff *skb)
+{
+ volatile char arr[64] = {};
+
+ if (load_word(skb, 0) || load_half(skb, 0))
+ bpf_tail_call(skb, &jmp_table, 10);
+ else
+ bpf_tail_call(skb, &jmp_table, 1);
+
+ return skb->len;
+}
+
+static __noinline
+int subprog_tail(struct __sk_buff *skb)
+{
+ volatile char arr[64] = {};
+
+ bpf_tail_call(skb, &jmp_table, 0);
+
+ return skb->len * 2;
+}
+
+SEC("classifier/0")
+int bpf_func_0(struct __sk_buff *skb)
+{
+ volatile char arr[128] = {};
+
+ return subprog_tail2(skb);
+}
+
+SEC("classifier/1")
+int bpf_func_1(struct __sk_buff *skb)
+{
+ volatile char arr[128] = {};
+
+ return skb->len * 3;
+}
+
+SEC("classifier")
+int entry(struct __sk_buff *skb)
+{
+ volatile char arr[128] = {};
+
+ return subprog_tail(skb);
+}
+
+char __license[] SEC("license") = "GPL";
+int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
new file mode 100644
index 000000000000..98b40a95bc67
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 3);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+static volatile int count;
+
+__noinline
+int subprog_tail_2(struct __sk_buff *skb)
+{
+ bpf_tail_call(skb, &jmp_table, 2);
+ return skb->len * 3;
+}
+
+__noinline
+int subprog_tail_1(struct __sk_buff *skb)
+{
+ bpf_tail_call(skb, &jmp_table, 1);
+ return skb->len * 2;
+}
+
+__noinline
+int subprog_tail(struct __sk_buff *skb)
+{
+ bpf_tail_call(skb, &jmp_table, 0);
+ return skb->len;
+}
+
+SEC("classifier/1")
+int bpf_func_1(struct __sk_buff *skb)
+{
+ return subprog_tail_2(skb);
+}
+
+SEC("classifier/2")
+int bpf_func_2(struct __sk_buff *skb)
+{
+ count++;
+ return subprog_tail_2(skb);
+}
+
+SEC("classifier/0")
+int bpf_func_0(struct __sk_buff *skb)
+{
+ return subprog_tail_1(skb);
+}
+
+SEC("classifier")
+int entry(struct __sk_buff *skb)
+{
+ return subprog_tail(skb);
+}
+
+char __license[] SEC("license") = "GPL";
+int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.c b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
index f0b72e86bee5..c9f8464996ea 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect.c
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
@@ -22,6 +22,12 @@
#include "test_cls_redirect.h"
+#ifdef SUBPROGS
+#define INLINING __noinline
+#else
+#define INLINING __always_inline
+#endif
+
#define offsetofend(TYPE, MEMBER) \
(offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER)))
@@ -125,7 +131,7 @@ typedef struct buf {
uint8_t *const tail;
} buf_t;
-static size_t buf_off(const buf_t *buf)
+static __always_inline size_t buf_off(const buf_t *buf)
{
/* Clang seems to optimize constructs like
* a - b + c
@@ -145,7 +151,7 @@ static size_t buf_off(const buf_t *buf)
return off;
}
-static bool buf_copy(buf_t *buf, void *dst, size_t len)
+static __always_inline bool buf_copy(buf_t *buf, void *dst, size_t len)
{
if (bpf_skb_load_bytes(buf->skb, buf_off(buf), dst, len)) {
return false;
@@ -155,7 +161,7 @@ static bool buf_copy(buf_t *buf, void *dst, size_t len)
return true;
}
-static bool buf_skip(buf_t *buf, const size_t len)
+static __always_inline bool buf_skip(buf_t *buf, const size_t len)
{
/* Check whether off + len is valid in the non-linear part. */
if (buf_off(buf) + len > buf->skb->len) {
@@ -173,7 +179,7 @@ static bool buf_skip(buf_t *buf, const size_t len)
* If scratch is not NULL, the function will attempt to load non-linear
* data via bpf_skb_load_bytes. On success, scratch is returned.
*/
-static void *buf_assign(buf_t *buf, const size_t len, void *scratch)
+static __always_inline void *buf_assign(buf_t *buf, const size_t len, void *scratch)
{
if (buf->head + len > buf->tail) {
if (scratch == NULL) {
@@ -188,7 +194,7 @@ static void *buf_assign(buf_t *buf, const size_t len, void *scratch)
return ptr;
}
-static bool pkt_skip_ipv4_options(buf_t *buf, const struct iphdr *ipv4)
+static INLINING bool pkt_skip_ipv4_options(buf_t *buf, const struct iphdr *ipv4)
{
if (ipv4->ihl <= 5) {
return true;
@@ -197,13 +203,13 @@ static bool pkt_skip_ipv4_options(buf_t *buf, const struct iphdr *ipv4)
return buf_skip(buf, (ipv4->ihl - 5) * 4);
}
-static bool ipv4_is_fragment(const struct iphdr *ip)
+static INLINING bool ipv4_is_fragment(const struct iphdr *ip)
{
uint16_t frag_off = ip->frag_off & bpf_htons(IP_OFFSET_MASK);
return (ip->frag_off & bpf_htons(IP_MF)) != 0 || frag_off > 0;
}
-static struct iphdr *pkt_parse_ipv4(buf_t *pkt, struct iphdr *scratch)
+static __always_inline struct iphdr *pkt_parse_ipv4(buf_t *pkt, struct iphdr *scratch)
{
struct iphdr *ipv4 = buf_assign(pkt, sizeof(*ipv4), scratch);
if (ipv4 == NULL) {
@@ -222,7 +228,7 @@ static struct iphdr *pkt_parse_ipv4(buf_t *pkt, struct iphdr *scratch)
}
/* Parse the L4 ports from a packet, assuming a layout like TCP or UDP. */
-static bool pkt_parse_icmp_l4_ports(buf_t *pkt, flow_ports_t *ports)
+static INLINING bool pkt_parse_icmp_l4_ports(buf_t *pkt, flow_ports_t *ports)
{
if (!buf_copy(pkt, ports, sizeof(*ports))) {
return false;
@@ -237,7 +243,7 @@ static bool pkt_parse_icmp_l4_ports(buf_t *pkt, flow_ports_t *ports)
return true;
}
-static uint16_t pkt_checksum_fold(uint32_t csum)
+static INLINING uint16_t pkt_checksum_fold(uint32_t csum)
{
/* The highest reasonable value for an IPv4 header
* checksum requires two folds, so we just do that always.
@@ -247,7 +253,7 @@ static uint16_t pkt_checksum_fold(uint32_t csum)
return (uint16_t)~csum;
}
-static void pkt_ipv4_checksum(struct iphdr *iph)
+static INLINING void pkt_ipv4_checksum(struct iphdr *iph)
{
iph->check = 0;
@@ -268,10 +274,11 @@ static void pkt_ipv4_checksum(struct iphdr *iph)
iph->check = pkt_checksum_fold(acc);
}
-static bool pkt_skip_ipv6_extension_headers(buf_t *pkt,
- const struct ipv6hdr *ipv6,
- uint8_t *upper_proto,
- bool *is_fragment)
+static INLINING
+bool pkt_skip_ipv6_extension_headers(buf_t *pkt,
+ const struct ipv6hdr *ipv6,
+ uint8_t *upper_proto,
+ bool *is_fragment)
{
/* We understand five extension headers.
* https://tools.ietf.org/html/rfc8200#section-4.1 states that all
@@ -336,7 +343,7 @@ static bool pkt_skip_ipv6_extension_headers(buf_t *pkt,
* scratch is allocated on the stack. However, this usage should be safe since
* it's the callers stack after all.
*/
-static inline __attribute__((__always_inline__)) struct ipv6hdr *
+static __always_inline struct ipv6hdr *
pkt_parse_ipv6(buf_t *pkt, struct ipv6hdr *scratch, uint8_t *proto,
bool *is_fragment)
{
@@ -354,20 +361,20 @@ pkt_parse_ipv6(buf_t *pkt, struct ipv6hdr *scratch, uint8_t *proto,
/* Global metrics, per CPU
*/
-struct bpf_map_def metrics_map SEC("maps") = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(unsigned int),
- .value_size = sizeof(metrics_t),
- .max_entries = 1,
-};
-
-static metrics_t *get_global_metrics(void)
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, unsigned int);
+ __type(value, metrics_t);
+} metrics_map SEC(".maps");
+
+static INLINING metrics_t *get_global_metrics(void)
{
uint64_t key = 0;
return bpf_map_lookup_elem(&metrics_map, &key);
}
-static ret_t accept_locally(struct __sk_buff *skb, encap_headers_t *encap)
+static INLINING ret_t accept_locally(struct __sk_buff *skb, encap_headers_t *encap)
{
const int payload_off =
sizeof(*encap) +
@@ -388,8 +395,8 @@ static ret_t accept_locally(struct __sk_buff *skb, encap_headers_t *encap)
return bpf_redirect(skb->ifindex, BPF_F_INGRESS);
}
-static ret_t forward_with_gre(struct __sk_buff *skb, encap_headers_t *encap,
- struct in_addr *next_hop, metrics_t *metrics)
+static INLINING ret_t forward_with_gre(struct __sk_buff *skb, encap_headers_t *encap,
+ struct in_addr *next_hop, metrics_t *metrics)
{
metrics->forwarded_packets_total_gre++;
@@ -509,8 +516,8 @@ static ret_t forward_with_gre(struct __sk_buff *skb, encap_headers_t *encap,
return bpf_redirect(skb->ifindex, 0);
}
-static ret_t forward_to_next_hop(struct __sk_buff *skb, encap_headers_t *encap,
- struct in_addr *next_hop, metrics_t *metrics)
+static INLINING ret_t forward_to_next_hop(struct __sk_buff *skb, encap_headers_t *encap,
+ struct in_addr *next_hop, metrics_t *metrics)
{
/* swap L2 addresses */
/* This assumes that packets are received from a router.
@@ -546,7 +553,7 @@ static ret_t forward_to_next_hop(struct __sk_buff *skb, encap_headers_t *encap,
return bpf_redirect(skb->ifindex, 0);
}
-static ret_t skip_next_hops(buf_t *pkt, int n)
+static INLINING ret_t skip_next_hops(buf_t *pkt, int n)
{
switch (n) {
case 1:
@@ -566,8 +573,8 @@ static ret_t skip_next_hops(buf_t *pkt, int n)
* pkt is positioned just after the variable length GLB header
* iff the call is successful.
*/
-static ret_t get_next_hop(buf_t *pkt, encap_headers_t *encap,
- struct in_addr *next_hop)
+static INLINING ret_t get_next_hop(buf_t *pkt, encap_headers_t *encap,
+ struct in_addr *next_hop)
{
if (encap->unigue.next_hop > encap->unigue.hop_count) {
return TC_ACT_SHOT;
@@ -601,8 +608,8 @@ static ret_t get_next_hop(buf_t *pkt, encap_headers_t *encap,
* return value, and calling code works while still being "generic" to
* IPv4 and IPv6.
*/
-static uint64_t fill_tuple(struct bpf_sock_tuple *tuple, void *iph,
- uint64_t iphlen, uint16_t sport, uint16_t dport)
+static INLINING uint64_t fill_tuple(struct bpf_sock_tuple *tuple, void *iph,
+ uint64_t iphlen, uint16_t sport, uint16_t dport)
{
switch (iphlen) {
case sizeof(struct iphdr): {
@@ -630,9 +637,9 @@ static uint64_t fill_tuple(struct bpf_sock_tuple *tuple, void *iph,
}
}
-static verdict_t classify_tcp(struct __sk_buff *skb,
- struct bpf_sock_tuple *tuple, uint64_t tuplen,
- void *iph, struct tcphdr *tcp)
+static INLINING verdict_t classify_tcp(struct __sk_buff *skb,
+ struct bpf_sock_tuple *tuple, uint64_t tuplen,
+ void *iph, struct tcphdr *tcp)
{
struct bpf_sock *sk =
bpf_skc_lookup_tcp(skb, tuple, tuplen, BPF_F_CURRENT_NETNS, 0);
@@ -663,8 +670,8 @@ static verdict_t classify_tcp(struct __sk_buff *skb,
return UNKNOWN;
}
-static verdict_t classify_udp(struct __sk_buff *skb,
- struct bpf_sock_tuple *tuple, uint64_t tuplen)
+static INLINING verdict_t classify_udp(struct __sk_buff *skb,
+ struct bpf_sock_tuple *tuple, uint64_t tuplen)
{
struct bpf_sock *sk =
bpf_sk_lookup_udp(skb, tuple, tuplen, BPF_F_CURRENT_NETNS, 0);
@@ -681,9 +688,9 @@ static verdict_t classify_udp(struct __sk_buff *skb,
return UNKNOWN;
}
-static verdict_t classify_icmp(struct __sk_buff *skb, uint8_t proto,
- struct bpf_sock_tuple *tuple, uint64_t tuplen,
- metrics_t *metrics)
+static INLINING verdict_t classify_icmp(struct __sk_buff *skb, uint8_t proto,
+ struct bpf_sock_tuple *tuple, uint64_t tuplen,
+ metrics_t *metrics)
{
switch (proto) {
case IPPROTO_TCP:
@@ -698,7 +705,7 @@ static verdict_t classify_icmp(struct __sk_buff *skb, uint8_t proto,
}
}
-static verdict_t process_icmpv4(buf_t *pkt, metrics_t *metrics)
+static INLINING verdict_t process_icmpv4(buf_t *pkt, metrics_t *metrics)
{
struct icmphdr icmp;
if (!buf_copy(pkt, &icmp, sizeof(icmp))) {
@@ -745,7 +752,7 @@ static verdict_t process_icmpv4(buf_t *pkt, metrics_t *metrics)
sizeof(tuple.ipv4), metrics);
}
-static verdict_t process_icmpv6(buf_t *pkt, metrics_t *metrics)
+static INLINING verdict_t process_icmpv6(buf_t *pkt, metrics_t *metrics)
{
struct icmp6hdr icmp6;
if (!buf_copy(pkt, &icmp6, sizeof(icmp6))) {
@@ -797,8 +804,8 @@ static verdict_t process_icmpv6(buf_t *pkt, metrics_t *metrics)
metrics);
}
-static verdict_t process_tcp(buf_t *pkt, void *iph, uint64_t iphlen,
- metrics_t *metrics)
+static INLINING verdict_t process_tcp(buf_t *pkt, void *iph, uint64_t iphlen,
+ metrics_t *metrics)
{
metrics->l4_protocol_packets_total_tcp++;
@@ -819,8 +826,8 @@ static verdict_t process_tcp(buf_t *pkt, void *iph, uint64_t iphlen,
return classify_tcp(pkt->skb, &tuple, tuplen, iph, tcp);
}
-static verdict_t process_udp(buf_t *pkt, void *iph, uint64_t iphlen,
- metrics_t *metrics)
+static INLINING verdict_t process_udp(buf_t *pkt, void *iph, uint64_t iphlen,
+ metrics_t *metrics)
{
metrics->l4_protocol_packets_total_udp++;
@@ -837,7 +844,7 @@ static verdict_t process_udp(buf_t *pkt, void *iph, uint64_t iphlen,
return classify_udp(pkt->skb, &tuple, tuplen);
}
-static verdict_t process_ipv4(buf_t *pkt, metrics_t *metrics)
+static INLINING verdict_t process_ipv4(buf_t *pkt, metrics_t *metrics)
{
metrics->l3_protocol_packets_total_ipv4++;
@@ -874,7 +881,7 @@ static verdict_t process_ipv4(buf_t *pkt, metrics_t *metrics)
}
}
-static verdict_t process_ipv6(buf_t *pkt, metrics_t *metrics)
+static INLINING verdict_t process_ipv6(buf_t *pkt, metrics_t *metrics)
{
metrics->l3_protocol_packets_total_ipv6++;
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect_subprogs.c b/tools/testing/selftests/bpf/progs/test_cls_redirect_subprogs.c
new file mode 100644
index 000000000000..eed26b70e3a2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect_subprogs.c
@@ -0,0 +1,2 @@
+#define SUBPROGS
+#include "test_cls_redirect.c"
diff --git a/tools/testing/selftests/bpf/progs/test_d_path.c b/tools/testing/selftests/bpf/progs/test_d_path.c
index 61f007855649..84e1f883f97b 100644
--- a/tools/testing/selftests/bpf/progs/test_d_path.c
+++ b/tools/testing/selftests/bpf/progs/test_d_path.c
@@ -15,7 +15,10 @@ char paths_close[MAX_FILES][MAX_PATH_LEN] = {};
int rets_stat[MAX_FILES] = {};
int rets_close[MAX_FILES] = {};
-SEC("fentry/vfs_getattr")
+int called_stat = 0;
+int called_close = 0;
+
+SEC("fentry/security_inode_getattr")
int BPF_PROG(prog_stat, struct path *path, struct kstat *stat,
__u32 request_mask, unsigned int query_flags)
{
@@ -23,6 +26,8 @@ int BPF_PROG(prog_stat, struct path *path, struct kstat *stat,
__u32 cnt = cnt_stat;
int ret;
+ called_stat = 1;
+
if (pid != my_pid)
return 0;
@@ -42,6 +47,8 @@ int BPF_PROG(prog_close, struct file *file, void *id)
__u32 cnt = cnt_close;
int ret;
+ called_close = 1;
+
if (pid != my_pid)
return 0;
diff --git a/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
index 28351936a438..b9e2753f4f91 100644
--- a/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
@@ -17,9 +17,7 @@
#include "test_iptunnel_common.h"
#include <bpf/bpf_endian.h>
-int _version SEC("version") = 1;
-
-static __u32 rol32(__u32 word, unsigned int shift)
+static __always_inline __u32 rol32(__u32 word, unsigned int shift)
{
return (word << shift) | (word >> ((-shift) & 31));
}
@@ -52,7 +50,7 @@ static __u32 rol32(__u32 word, unsigned int shift)
typedef unsigned int u32;
-static u32 jhash(const void *key, u32 length, u32 initval)
+static __noinline u32 jhash(const void *key, u32 length, u32 initval)
{
u32 a, b, c;
const unsigned char *k = key;
@@ -88,7 +86,7 @@ static u32 jhash(const void *key, u32 length, u32 initval)
return c;
}
-static u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
+static __noinline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
{
a += initval;
b += initval;
@@ -97,7 +95,7 @@ static u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
return c;
}
-static u32 jhash_2words(u32 a, u32 b, u32 initval)
+static __noinline u32 jhash_2words(u32 a, u32 b, u32 initval)
{
return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
}
@@ -200,8 +198,7 @@ struct {
__type(value, struct ctl_value);
} ctl_array SEC(".maps");
-static __u32 get_packet_hash(struct packet_description *pckt,
- bool ipv6)
+static __noinline __u32 get_packet_hash(struct packet_description *pckt, bool ipv6)
{
if (ipv6)
return jhash_2words(jhash(pckt->srcv6, 16, MAX_VIPS),
@@ -210,10 +207,10 @@ static __u32 get_packet_hash(struct packet_description *pckt,
return jhash_2words(pckt->src, pckt->ports, CH_RINGS_SIZE);
}
-static bool get_packet_dst(struct real_definition **real,
- struct packet_description *pckt,
- struct vip_meta *vip_info,
- bool is_ipv6)
+static __noinline bool get_packet_dst(struct real_definition **real,
+ struct packet_description *pckt,
+ struct vip_meta *vip_info,
+ bool is_ipv6)
{
__u32 hash = get_packet_hash(pckt, is_ipv6);
__u32 key = RING_SIZE * vip_info->vip_num + hash % RING_SIZE;
@@ -233,8 +230,8 @@ static bool get_packet_dst(struct real_definition **real,
return true;
}
-static int parse_icmpv6(void *data, void *data_end, __u64 off,
- struct packet_description *pckt)
+static __noinline int parse_icmpv6(void *data, void *data_end, __u64 off,
+ struct packet_description *pckt)
{
struct icmp6hdr *icmp_hdr;
struct ipv6hdr *ip6h;
@@ -255,8 +252,8 @@ static int parse_icmpv6(void *data, void *data_end, __u64 off,
return TC_ACT_UNSPEC;
}
-static int parse_icmp(void *data, void *data_end, __u64 off,
- struct packet_description *pckt)
+static __noinline int parse_icmp(void *data, void *data_end, __u64 off,
+ struct packet_description *pckt)
{
struct icmphdr *icmp_hdr;
struct iphdr *iph;
@@ -280,8 +277,8 @@ static int parse_icmp(void *data, void *data_end, __u64 off,
return TC_ACT_UNSPEC;
}
-static bool parse_udp(void *data, __u64 off, void *data_end,
- struct packet_description *pckt)
+static __noinline bool parse_udp(void *data, __u64 off, void *data_end,
+ struct packet_description *pckt)
{
struct udphdr *udp;
udp = data + off;
@@ -299,8 +296,8 @@ static bool parse_udp(void *data, __u64 off, void *data_end,
return true;
}
-static bool parse_tcp(void *data, __u64 off, void *data_end,
- struct packet_description *pckt)
+static __noinline bool parse_tcp(void *data, __u64 off, void *data_end,
+ struct packet_description *pckt)
{
struct tcphdr *tcp;
@@ -321,8 +318,8 @@ static bool parse_tcp(void *data, __u64 off, void *data_end,
return true;
}
-static int process_packet(void *data, __u64 off, void *data_end,
- bool is_ipv6, struct __sk_buff *skb)
+static __noinline int process_packet(void *data, __u64 off, void *data_end,
+ bool is_ipv6, struct __sk_buff *skb)
{
void *pkt_start = (void *)(long)skb->data;
struct packet_description pckt = {};
diff --git a/tools/testing/selftests/bpf/progs/test_subprogs.c b/tools/testing/selftests/bpf/progs/test_subprogs.c
new file mode 100644
index 000000000000..d3c5673c0218
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_subprogs.c
@@ -0,0 +1,103 @@
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+const char LICENSE[] SEC("license") = "GPL";
+
+__noinline int sub1(int x)
+{
+ return x + 1;
+}
+
+static __noinline int sub5(int v);
+
+__noinline int sub2(int y)
+{
+ return sub5(y + 2);
+}
+
+static __noinline int sub3(int z)
+{
+ return z + 3 + sub1(4);
+}
+
+static __noinline int sub4(int w)
+{
+ return w + sub3(5) + sub1(6);
+}
+
+/* sub5() is an identitify function, just to test weirder functions layout and
+ * call patterns
+ */
+static __noinline int sub5(int v)
+{
+ return sub1(v) - 1; /* compensates sub1()'s + 1 */
+}
+
+/* unfortunately verifier rejects `struct task_struct *t` as an unkown pointer
+ * type, so we need to accept pointer as integer and then cast it inside the
+ * function
+ */
+__noinline int get_task_tgid(uintptr_t t)
+{
+ /* this ensures that CO-RE relocs work in multi-subprogs .text */
+ return BPF_CORE_READ((struct task_struct *)(void *)t, tgid);
+}
+
+int res1 = 0;
+int res2 = 0;
+int res3 = 0;
+int res4 = 0;
+
+SEC("raw_tp/sys_enter")
+int prog1(void *ctx)
+{
+ /* perform some CO-RE relocations to ensure they work with multi-prog
+ * sections correctly
+ */
+ struct task_struct *t = (void *)bpf_get_current_task();
+
+ if (!BPF_CORE_READ(t, pid) || !get_task_tgid((uintptr_t)t))
+ return 1;
+
+ res1 = sub1(1) + sub3(2); /* (1 + 1) + (2 + 3 + (4 + 1)) = 12 */
+ return 0;
+}
+
+SEC("raw_tp/sys_exit")
+int prog2(void *ctx)
+{
+ struct task_struct *t = (void *)bpf_get_current_task();
+
+ if (!BPF_CORE_READ(t, pid) || !get_task_tgid((uintptr_t)t))
+ return 1;
+
+ res2 = sub2(3) + sub3(4); /* (3 + 2) + (4 + 3 + (4 + 1)) = 17 */
+ return 0;
+}
+
+/* prog3 has the same section name as prog1 */
+SEC("raw_tp/sys_enter")
+int prog3(void *ctx)
+{
+ struct task_struct *t = (void *)bpf_get_current_task();
+
+ if (!BPF_CORE_READ(t, pid) || !get_task_tgid((uintptr_t)t))
+ return 1;
+
+ res3 = sub3(5) + 6; /* (5 + 3 + (4 + 1)) + 6 = 19 */
+ return 0;
+}
+
+/* prog4 has the same section name as prog2 */
+SEC("raw_tp/sys_exit")
+int prog4(void *ctx)
+{
+ struct task_struct *t = (void *)bpf_get_current_task();
+
+ if (!BPF_CORE_READ(t, pid) || !get_task_tgid((uintptr_t)t))
+ return 1;
+
+ res4 = sub4(7) + sub1(8); /* (7 + (5 + 3 + (4 + 1)) + (6 + 1)) + (8 + 1) = 36 */
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
index 458b0d69133e..553a282d816a 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
@@ -18,11 +18,11 @@
#define MAX_ULONG_STR_LEN 7
#define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN)
+const char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string";
static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
{
- volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string";
unsigned char i;
- char name[64];
+ char name[sizeof(tcp_mem_name)];
int ret;
memset(name, 0, sizeof(name));
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
index b2e6f9b0894d..2b64bc563a12 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
@@ -18,11 +18,11 @@
#define MAX_ULONG_STR_LEN 7
#define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN)
+const char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop";
static __attribute__((noinline)) int is_tcp_mem(struct bpf_sysctl *ctx)
{
- volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop";
unsigned char i;
- char name[64];
+ char name[sizeof(tcp_mem_name)];
int ret;
memset(name, 0, sizeof(name));
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
index 50525235380e..5489823c83fc 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
@@ -19,11 +19,11 @@
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
+const char tcp_mem_name[] = "net/ipv4/tcp_mem";
static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
{
- char tcp_mem_name[] = "net/ipv4/tcp_mem";
unsigned char i;
- char name[64];
+ char name[sizeof(tcp_mem_name)];
int ret;
memset(name, 0, sizeof(name));
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
index 8beecec166d9..3a67921f62b5 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
@@ -16,7 +16,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-static __u32 rol32(__u32 word, unsigned int shift)
+static __always_inline __u32 rol32(__u32 word, unsigned int shift)
{
return (word << shift) | (word >> ((-shift) & 31));
}
@@ -49,7 +49,7 @@ static __u32 rol32(__u32 word, unsigned int shift)
typedef unsigned int u32;
-static __attribute__ ((noinline))
+static __noinline
u32 jhash(const void *key, u32 length, u32 initval)
{
u32 a, b, c;
@@ -86,7 +86,7 @@ u32 jhash(const void *key, u32 length, u32 initval)
return c;
}
-__attribute__ ((noinline))
+__noinline
u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
{
a += initval;
@@ -96,7 +96,7 @@ u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
return c;
}
-__attribute__ ((noinline))
+__noinline
u32 jhash_2words(u32 a, u32 b, u32 initval)
{
return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
@@ -213,7 +213,7 @@ struct eth_hdr {
unsigned short eth_proto;
};
-static inline __u64 calc_offset(bool is_ipv6, bool is_icmp)
+static __noinline __u64 calc_offset(bool is_ipv6, bool is_icmp)
{
__u64 off = sizeof(struct eth_hdr);
if (is_ipv6) {
@@ -797,8 +797,8 @@ out:
return XDP_DROP;
}
-__attribute__ ((section("xdp-test"), used))
-int balancer_ingress(struct xdp_md *ctx)
+SEC("xdp-test-v4")
+int balancer_ingress_v4(struct xdp_md *ctx)
{
void *data = (void *)(long)ctx->data;
void *data_end = (void *)(long)ctx->data_end;
@@ -812,11 +812,27 @@ int balancer_ingress(struct xdp_md *ctx)
eth_proto = bpf_ntohs(eth->eth_proto);
if (eth_proto == ETH_P_IP)
return process_packet(data, nh_off, data_end, 0, ctx);
- else if (eth_proto == ETH_P_IPV6)
+ else
+ return XDP_DROP;
+}
+
+SEC("xdp-test-v6")
+int balancer_ingress_v6(struct xdp_md *ctx)
+{
+ void *data = (void *)(long)ctx->data;
+ void *data_end = (void *)(long)ctx->data_end;
+ struct eth_hdr *eth = data;
+ __u32 eth_proto;
+ __u32 nh_off;
+
+ nh_off = sizeof(struct eth_hdr);
+ if (data + nh_off > data_end)
+ return XDP_DROP;
+ eth_proto = bpf_ntohs(eth->eth_proto);
+ if (eth_proto == ETH_P_IPV6)
return process_packet(data, nh_off, data_end, 1, ctx);
else
return XDP_DROP;
}
-char _license[] __attribute__ ((section("license"), used)) = "GPL";
-int _version __attribute__ ((section("version"), used)) = 1;
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_bpftool_build.sh b/tools/testing/selftests/bpf/test_bpftool_build.sh
index ac349a5cea7e..2db3c60e1e61 100755
--- a/tools/testing/selftests/bpf/test_bpftool_build.sh
+++ b/tools/testing/selftests/bpf/test_bpftool_build.sh
@@ -85,6 +85,23 @@ make_with_tmpdir() {
echo
}
+make_doc_and_clean() {
+ echo -e "\$PWD: $PWD"
+ echo -e "command: make -s $* doc >/dev/null"
+ RST2MAN_OPTS="--exit-status=1" make $J -s $* doc
+ if [ $? -ne 0 ] ; then
+ ERROR=1
+ printf "FAILURE: Errors or warnings when building documentation\n"
+ fi
+ (
+ if [ $# -ge 1 ] ; then
+ cd ${@: -1}
+ fi
+ make -s doc-clean
+ )
+ echo
+}
+
echo "Trying to build bpftool"
echo -e "... through kbuild\n"
@@ -145,3 +162,7 @@ make_and_clean
make_with_tmpdir OUTPUT
make_with_tmpdir O
+
+echo -e "Checking documentation build\n"
+# From tools/bpf/bpftool
+make_doc_and_clean
diff --git a/tools/testing/selftests/bpf/test_bpftool_metadata.sh b/tools/testing/selftests/bpf/test_bpftool_metadata.sh
new file mode 100755
index 000000000000..1bf81b49457a
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_bpftool_metadata.sh
@@ -0,0 +1,82 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+TESTNAME=bpftool_metadata
+BPF_FS=$(awk '$3 == "bpf" {print $2; exit}' /proc/mounts)
+BPF_DIR=$BPF_FS/test_$TESTNAME
+
+_cleanup()
+{
+ set +e
+ rm -rf $BPF_DIR 2> /dev/null
+}
+
+cleanup_skip()
+{
+ echo "selftests: $TESTNAME [SKIP]"
+ _cleanup
+
+ exit $ksft_skip
+}
+
+cleanup()
+{
+ if [ "$?" = 0 ]; then
+ echo "selftests: $TESTNAME [PASS]"
+ else
+ echo "selftests: $TESTNAME [FAILED]"
+ fi
+ _cleanup
+}
+
+if [ $(id -u) -ne 0 ]; then
+ echo "selftests: $TESTNAME [SKIP] Need root privileges"
+ exit $ksft_skip
+fi
+
+if [ -z "$BPF_FS" ]; then
+ echo "selftests: $TESTNAME [SKIP] Could not run test without bpffs mounted"
+ exit $ksft_skip
+fi
+
+if ! bpftool version > /dev/null 2>&1; then
+ echo "selftests: $TESTNAME [SKIP] Could not run test without bpftool"
+ exit $ksft_skip
+fi
+
+set -e
+
+trap cleanup_skip EXIT
+
+mkdir $BPF_DIR
+
+trap cleanup EXIT
+
+bpftool prog load metadata_unused.o $BPF_DIR/unused
+
+METADATA_PLAIN="$(bpftool prog)"
+echo "$METADATA_PLAIN" | grep 'a = "foo"' > /dev/null
+echo "$METADATA_PLAIN" | grep 'b = 1' > /dev/null
+
+bpftool prog --json | grep '"metadata":{"a":"foo","b":1}' > /dev/null
+
+bpftool map | grep 'metadata.rodata' > /dev/null
+
+rm $BPF_DIR/unused
+
+bpftool prog load metadata_used.o $BPF_DIR/used
+
+METADATA_PLAIN="$(bpftool prog)"
+echo "$METADATA_PLAIN" | grep 'a = "bar"' > /dev/null
+echo "$METADATA_PLAIN" | grep 'b = 2' > /dev/null
+
+bpftool prog --json | grep '"metadata":{"a":"bar","b":2}' > /dev/null
+
+bpftool map | grep 'metadata.rodata' > /dev/null
+
+rm $BPF_DIR/used
+
+exit 0
diff --git a/tools/testing/selftests/bpf/test_socket_cookie.c b/tools/testing/selftests/bpf/test_socket_cookie.c
index 154a8fd2a48d..ca7ca87e91aa 100644
--- a/tools/testing/selftests/bpf/test_socket_cookie.c
+++ b/tools/testing/selftests/bpf/test_socket_cookie.c
@@ -151,7 +151,7 @@ static int run_test(int cgfd)
}
bpf_object__for_each_program(prog, pobj) {
- prog_name = bpf_program__title(prog, /*needs_copy*/ false);
+ prog_name = bpf_program__section_name(prog);
if (libbpf_attach_type_by_name(prog_name, &attach_type))
goto err;
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index 94258c6b5235..c4f5d909e58a 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -647,13 +647,14 @@
.result = REJECT,
},
{
- "calls: ld_abs with changing ctx data in callee",
+ "calls: subprog call with ld_abs in main prog",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_LD_ABS(BPF_B, 0),
BPF_LD_ABS(BPF_H, 0),
BPF_LD_ABS(BPF_W, 0),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
BPF_LD_ABS(BPF_B, 0),
@@ -666,8 +667,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
- .result = REJECT,
+ .result = ACCEPT,
},
{
"calls: two calls with bad fallthrough",
diff --git a/tools/testing/selftests/bpf/verifier/map_ptr.c b/tools/testing/selftests/bpf/verifier/map_ptr.c
index b52209db8250..637f9293bda8 100644
--- a/tools/testing/selftests/bpf/verifier/map_ptr.c
+++ b/tools/testing/selftests/bpf/verifier/map_ptr.c
@@ -60,3 +60,35 @@
.result = ACCEPT,
.retval = 1,
},
+{
+ "bpf_map_ptr: r = 0, map_ptr = map_ptr + r",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 4 },
+ .result = ACCEPT,
+},
+{
+ "bpf_map_ptr: r = 0, r = r + map_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_LD_MAP_FD(BPF_REG_0, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 4 },
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/drivers/net/netdevsim/ethtool-pause.sh b/tools/testing/selftests/drivers/net/netdevsim/ethtool-pause.sh
new file mode 100755
index 000000000000..dd6ad6501c9c
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/netdevsim/ethtool-pause.sh
@@ -0,0 +1,108 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-only
+
+NSIM_ID=$((RANDOM % 1024))
+NSIM_DEV_SYS=/sys/bus/netdevsim/devices/netdevsim$NSIM_ID
+NSIM_DEV_DFS=/sys/kernel/debug/netdevsim/netdevsim$NSIM_ID
+NSIM_NETDEV=
+num_passes=0
+num_errors=0
+
+function cleanup_nsim {
+ if [ -e $NSIM_DEV_SYS ]; then
+ echo $NSIM_ID > /sys/bus/netdevsim/del_device
+ fi
+}
+
+function cleanup {
+ cleanup_nsim
+}
+
+trap cleanup EXIT
+
+function get_netdev_name {
+ local -n old=$1
+
+ new=$(ls /sys/class/net)
+
+ for netdev in $new; do
+ for check in $old; do
+ [ $netdev == $check ] && break
+ done
+
+ if [ $netdev != $check ]; then
+ echo $netdev
+ break
+ fi
+ done
+}
+
+function check {
+ local code=$1
+ local str=$2
+ local exp_str=$3
+
+ if [ $code -ne 0 ]; then
+ ((num_errors++))
+ return
+ fi
+
+ if [ "$str" != "$exp_str" ]; then
+ echo -e "Expected: '$exp_str', got '$str'"
+ ((num_errors++))
+ return
+ fi
+
+ ((num_passes++))
+}
+
+# Bail if ethtool is too old
+if ! ethtool -h | grep include-stat 2>&1 >/dev/null; then
+ echo "SKIP: No --include-statistics support in ethtool"
+ exit 4
+fi
+
+# Make a netdevsim
+old_netdevs=$(ls /sys/class/net)
+
+modprobe netdevsim
+echo $NSIM_ID > /sys/bus/netdevsim/new_device
+
+NSIM_NETDEV=`get_netdev_name old_netdevs`
+
+set -o pipefail
+
+echo n > $NSIM_DEV_DFS/ethtool/pause/report_stats_tx
+echo n > $NSIM_DEV_DFS/ethtool/pause/report_stats_rx
+
+s=$(ethtool --json -a $NSIM_NETDEV | jq '.[].statistics')
+check $? "$s" "null"
+
+s=$(ethtool -I --json -a $NSIM_NETDEV | jq '.[].statistics')
+check $? "$s" "{}"
+
+echo y > $NSIM_DEV_DFS/ethtool/pause/report_stats_tx
+
+s=$(ethtool -I --json -a $NSIM_NETDEV | jq '.[].statistics | length')
+check $? "$s" "1"
+
+s=$(ethtool -I --json -a $NSIM_NETDEV | jq '.[].statistics.tx_pause_frames')
+check $? "$s" "2"
+
+echo y > $NSIM_DEV_DFS/ethtool/pause/report_stats_rx
+
+s=$(ethtool -I --json -a $NSIM_NETDEV | jq '.[].statistics | length')
+check $? "$s" "2"
+
+s=$(ethtool -I --json -a $NSIM_NETDEV | jq '.[].statistics.rx_pause_frames')
+check $? "$s" "1"
+s=$(ethtool -I --json -a $NSIM_NETDEV | jq '.[].statistics.tx_pause_frames')
+check $? "$s" "2"
+
+if [ $num_errors -eq 0 ]; then
+ echo "PASSED all $((num_passes)) checks"
+ exit 0
+else
+ echo "FAILED $num_errors/$((num_errors+num_passes)) checks"
+ exit 1
+fi
diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh
index b74884d52913..eb693a3b7b4a 100755
--- a/tools/testing/selftests/net/fib_nexthops.sh
+++ b/tools/testing/selftests/net/fib_nexthops.sh
@@ -411,9 +411,16 @@ ipv6_fdb_grp_fcnal()
run_cmd "$IP -6 ro add 2001:db8:101::1/128 nhid 103"
log_test $? 2 "Route add with fdb nexthop group"
+ run_cmd "$IP nexthop del id 61"
+ run_cmd "$BRIDGE fdb get to 02:02:00:00:00:13 dev vx10 self"
+ log_test $? 0 "Fdb entry after deleting a single nexthop"
+
run_cmd "$IP nexthop del id 102"
log_test $? 0 "Fdb nexthop delete"
+ run_cmd "$BRIDGE fdb get to 02:02:00:00:00:13 dev vx10 self"
+ log_test $? 254 "Fdb entry after deleting a nexthop group"
+
$IP link del dev vx10
}
@@ -484,9 +491,16 @@ ipv4_fdb_grp_fcnal()
run_cmd "$IP ro add 172.16.0.0/22 nhid 103"
log_test $? 2 "Route add with fdb nexthop group"
+ run_cmd "$IP nexthop del id 12"
+ run_cmd "$BRIDGE fdb get to 02:02:00:00:00:13 dev vx10 self"
+ log_test $? 0 "Fdb entry after deleting a single nexthop"
+
run_cmd "$IP nexthop del id 102"
log_test $? 0 "Fdb nexthop delete"
+ run_cmd "$BRIDGE fdb get to 02:02:00:00:00:13 dev vx10 self"
+ log_test $? 254 "Fdb entry after deleting a nexthop group"
+
$IP link del dev vx10
}
diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile
index aa254aefc2c3..00bb158b4a5d 100644
--- a/tools/testing/selftests/net/mptcp/Makefile
+++ b/tools/testing/selftests/net/mptcp/Makefile
@@ -5,7 +5,8 @@ KSFT_KHDR_INSTALL := 1
CFLAGS = -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include
-TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh
+TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh \
+ simult_flows.sh
TEST_GEN_FILES = mptcp_connect pm_nl_ctl
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
index a54966531a64..77bb62feb872 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
@@ -54,6 +54,7 @@ static int pf = AF_INET;
static int cfg_sndbuf;
static int cfg_rcvbuf;
static bool cfg_join;
+static bool cfg_remove;
static int cfg_wait;
static void die_usage(void)
@@ -271,6 +272,9 @@ static size_t do_rnd_write(const int fd, char *buf, const size_t len)
if (cfg_join && first && do_w > 100)
do_w = 100;
+ if (cfg_remove && do_w > 50)
+ do_w = 50;
+
bw = write(fd, buf, do_w);
if (bw < 0)
perror("write");
@@ -281,6 +285,9 @@ static size_t do_rnd_write(const int fd, char *buf, const size_t len)
first = false;
}
+ if (cfg_remove)
+ usleep(200000);
+
return bw;
}
@@ -428,7 +435,7 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd)
}
/* leave some time for late join/announce */
- if (cfg_join)
+ if (cfg_join || cfg_remove)
usleep(cfg_wait);
close(peerfd);
@@ -686,7 +693,7 @@ static void maybe_close(int fd)
{
unsigned int r = rand();
- if (!cfg_join && (r & 1))
+ if (!(cfg_join || cfg_remove) && (r & 1))
close(fd);
}
@@ -822,13 +829,18 @@ static void parse_opts(int argc, char **argv)
{
int c;
- while ((c = getopt(argc, argv, "6jlp:s:hut:m:S:R:w:")) != -1) {
+ while ((c = getopt(argc, argv, "6jrlp:s:hut:m:S:R:w:")) != -1) {
switch (c) {
case 'j':
cfg_join = true;
cfg_mode = CFG_MODE_POLL;
cfg_wait = 400000;
break;
+ case 'r':
+ cfg_remove = true;
+ cfg_mode = CFG_MODE_POLL;
+ cfg_wait = 400000;
+ break;
case 'l':
listen_mode = true;
break;
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index e4df9ba64824..2cfd87d94db8 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -443,9 +443,9 @@ do_transfer()
duration=$(printf "(duration %05sms)" $duration)
if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
echo "$duration [ FAIL ] client exit code $retc, server $rets" 1>&2
- echo "\nnetns ${listener_ns} socket stat for $port:" 1>&2
+ echo -e "\nnetns ${listener_ns} socket stat for ${port}:" 1>&2
ip netns exec ${listener_ns} ss -nita 1>&2 -o "sport = :$port"
- echo "\nnetns ${connector_ns} socket stat for $port:" 1>&2
+ echo -e "\nnetns ${connector_ns} socket stat for ${port}:" 1>&2
ip netns exec ${connector_ns} ss -nita 1>&2 -o "dport = :$port"
cat "$capout"
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index f39c1129ce5f..08f53d86dedc 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -8,6 +8,7 @@ cin=""
cout=""
ksft_skip=4
timeout=30
+mptcp_connect=""
capture=0
TEST_COUNT=0
@@ -132,6 +133,8 @@ do_transfer()
cl_proto="$3"
srv_proto="$4"
connect_addr="$5"
+ rm_nr_ns1="$6"
+ rm_nr_ns2="$7"
port=$((10000+$TEST_COUNT))
TEST_COUNT=$((TEST_COUNT+1))
@@ -156,14 +159,44 @@ do_transfer()
sleep 1
fi
- ip netns exec ${listener_ns} ./mptcp_connect -j -t $timeout -l -p $port -s ${srv_proto} 0.0.0.0 < "$sin" > "$sout" &
+ if [[ $rm_nr_ns1 -eq 0 && $rm_nr_ns2 -eq 0 ]]; then
+ mptcp_connect="./mptcp_connect -j"
+ else
+ mptcp_connect="./mptcp_connect -r"
+ fi
+
+ ip netns exec ${listener_ns} $mptcp_connect -t $timeout -l -p $port -s ${srv_proto} 0.0.0.0 < "$sin" > "$sout" &
spid=$!
sleep 1
- ip netns exec ${connector_ns} ./mptcp_connect -j -t $timeout -p $port -s ${cl_proto} $connect_addr < "$cin" > "$cout" &
+ ip netns exec ${connector_ns} $mptcp_connect -t $timeout -p $port -s ${cl_proto} $connect_addr < "$cin" > "$cout" &
cpid=$!
+ if [ $rm_nr_ns1 -gt 0 ]; then
+ counter=1
+ sleep 1
+
+ while [ $counter -le $rm_nr_ns1 ]
+ do
+ ip netns exec ${listener_ns} ./pm_nl_ctl del $counter
+ sleep 1
+ let counter+=1
+ done
+ fi
+
+ if [ $rm_nr_ns2 -gt 0 ]; then
+ counter=1
+ sleep 1
+
+ while [ $counter -le $rm_nr_ns2 ]
+ do
+ ip netns exec ${connector_ns} ./pm_nl_ctl del $counter
+ sleep 1
+ let counter+=1
+ done
+ fi
+
wait $cpid
retc=$?
wait $spid
@@ -176,9 +209,9 @@ do_transfer()
if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
echo " client exit code $retc, server $rets" 1>&2
- echo "\nnetns ${listener_ns} socket stat for $port:" 1>&2
+ echo -e "\nnetns ${listener_ns} socket stat for ${port}:" 1>&2
ip netns exec ${listener_ns} ss -nita 1>&2 -o "sport = :$port"
- echo "\nnetns ${connector_ns} socket stat for $port:" 1>&2
+ echo -e "\nnetns ${connector_ns} socket stat for ${port}:" 1>&2
ip netns exec ${connector_ns} ss -nita 1>&2 -o "dport = :$port"
cat "$capout"
@@ -219,7 +252,24 @@ run_tests()
connect_addr="$3"
lret=0
- do_transfer ${listener_ns} ${connector_ns} MPTCP MPTCP ${connect_addr}
+ do_transfer ${listener_ns} ${connector_ns} MPTCP MPTCP ${connect_addr} 0 0
+ lret=$?
+ if [ $lret -ne 0 ]; then
+ ret=$lret
+ return
+ fi
+}
+
+run_remove_tests()
+{
+ listener_ns="$1"
+ connector_ns="$2"
+ connect_addr="$3"
+ rm_nr_ns1="$4"
+ rm_nr_ns2="$5"
+ lret=0
+
+ do_transfer ${listener_ns} ${connector_ns} MPTCP MPTCP ${connect_addr} ${rm_nr_ns1} ${rm_nr_ns2}
lret=$?
if [ $lret -ne 0 ]; then
ret=$lret
@@ -276,6 +326,80 @@ chk_join_nr()
fi
}
+chk_add_nr()
+{
+ local add_nr=$1
+ local echo_nr=$2
+ local count
+ local dump_stats
+
+ printf "%-39s %s" " " "add"
+ count=`ip netns exec $ns2 nstat -as | grep MPTcpExtAddAddr | awk '{print $2}'`
+ [ -z "$count" ] && count=0
+ if [ "$count" != "$add_nr" ]; then
+ echo "[fail] got $count ADD_ADDR[s] expected $add_nr"
+ ret=1
+ dump_stats=1
+ else
+ echo -n "[ ok ]"
+ fi
+
+ echo -n " - echo "
+ count=`ip netns exec $ns1 nstat -as | grep MPTcpExtEchoAdd | awk '{print $2}'`
+ [ -z "$count" ] && count=0
+ if [ "$count" != "$echo_nr" ]; then
+ echo "[fail] got $count ADD_ADDR echo[s] expected $echo_nr"
+ ret=1
+ dump_stats=1
+ else
+ echo "[ ok ]"
+ fi
+
+ if [ "${dump_stats}" = 1 ]; then
+ echo Server ns stats
+ ip netns exec $ns1 nstat -as | grep MPTcp
+ echo Client ns stats
+ ip netns exec $ns2 nstat -as | grep MPTcp
+ fi
+}
+
+chk_rm_nr()
+{
+ local rm_addr_nr=$1
+ local rm_subflow_nr=$2
+ local count
+ local dump_stats
+
+ printf "%-39s %s" " " "rm "
+ count=`ip netns exec $ns1 nstat -as | grep MPTcpExtRmAddr | awk '{print $2}'`
+ [ -z "$count" ] && count=0
+ if [ "$count" != "$rm_addr_nr" ]; then
+ echo "[fail] got $count RM_ADDR[s] expected $rm_addr_nr"
+ ret=1
+ dump_stats=1
+ else
+ echo -n "[ ok ]"
+ fi
+
+ echo -n " - sf "
+ count=`ip netns exec $ns2 nstat -as | grep MPTcpExtRmSubflow | awk '{print $2}'`
+ [ -z "$count" ] && count=0
+ if [ "$count" != "$rm_subflow_nr" ]; then
+ echo "[fail] got $count RM_SUBFLOW[s] expected $rm_subflow_nr"
+ ret=1
+ dump_stats=1
+ else
+ echo "[ ok ]"
+ fi
+
+ if [ "${dump_stats}" = 1 ]; then
+ echo Server ns stats
+ ip netns exec $ns1 nstat -as | grep MPTcp
+ echo Client ns stats
+ ip netns exec $ns2 nstat -as | grep MPTcp
+ fi
+}
+
sin=$(mktemp)
sout=$(mktemp)
cin=$(mktemp)
@@ -332,6 +456,7 @@ reset
ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr "unused signal address" 0 0 0
+chk_add_nr 1 1
# accept and use add_addr
reset
@@ -340,6 +465,7 @@ ip netns exec $ns2 ./pm_nl_ctl limits 1 1
ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr "signal address" 1 1 1
+chk_add_nr 1 1
# accept and use add_addr with an additional subflow
# note: signal address in server ns and local addresses in client ns must
@@ -352,6 +478,7 @@ ip netns exec $ns2 ./pm_nl_ctl limits 1 2
ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr "subflow and signal" 2 2 2
+chk_add_nr 1 1
# accept and use add_addr with additional subflows
reset
@@ -362,6 +489,59 @@ ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
ip netns exec $ns2 ./pm_nl_ctl add 10.0.4.2 flags subflow
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr "multiple subflows and signal" 3 3 3
+chk_add_nr 1 1
+
+# single subflow, remove
+reset
+ip netns exec $ns1 ./pm_nl_ctl limits 0 1
+ip netns exec $ns2 ./pm_nl_ctl limits 0 1
+ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+run_remove_tests $ns1 $ns2 10.0.1.1 0 1
+chk_join_nr "remove single subflow" 1 1 1
+chk_rm_nr 1 1
+
+# multiple subflows, remove
+reset
+ip netns exec $ns1 ./pm_nl_ctl limits 0 2
+ip netns exec $ns2 ./pm_nl_ctl limits 0 2
+ip netns exec $ns2 ./pm_nl_ctl add 10.0.2.2 flags subflow
+ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+run_remove_tests $ns1 $ns2 10.0.1.1 0 2
+chk_join_nr "remove multiple subflows" 2 2 2
+chk_rm_nr 2 2
+
+# single address, remove
+reset
+ip netns exec $ns1 ./pm_nl_ctl limits 0 1
+ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+ip netns exec $ns2 ./pm_nl_ctl limits 1 1
+run_remove_tests $ns1 $ns2 10.0.1.1 1 0
+chk_join_nr "remove single address" 1 1 1
+chk_add_nr 1 1
+chk_rm_nr 0 0
+
+# subflow and signal, remove
+reset
+ip netns exec $ns1 ./pm_nl_ctl limits 0 2
+ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+ip netns exec $ns2 ./pm_nl_ctl limits 1 2
+ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+run_remove_tests $ns1 $ns2 10.0.1.1 1 1
+chk_join_nr "remove subflow and signal" 2 2 2
+chk_add_nr 1 1
+chk_rm_nr 1 1
+
+# subflows and signal, remove
+reset
+ip netns exec $ns1 ./pm_nl_ctl limits 0 3
+ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+ip netns exec $ns2 ./pm_nl_ctl limits 1 3
+ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+ip netns exec $ns2 ./pm_nl_ctl add 10.0.4.2 flags subflow
+run_remove_tests $ns1 $ns2 10.0.1.1 1 2
+chk_join_nr "remove subflows and signal" 3 3 3
+chk_add_nr 1 1
+chk_rm_nr 2 2
# single subflow, syncookies
reset_with_cookies
@@ -396,6 +576,7 @@ ip netns exec $ns2 ./pm_nl_ctl limits 1 1
ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr "signal address with syn cookies" 1 1 1
+chk_add_nr 1 1
# test cookie with subflow and signal
reset_with_cookies
@@ -405,6 +586,7 @@ ip netns exec $ns2 ./pm_nl_ctl limits 1 2
ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr "subflow and signal w cookies" 2 2 2
+chk_add_nr 1 1
# accept and use add_addr with additional subflows
reset_with_cookies
@@ -415,5 +597,6 @@ ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
ip netns exec $ns2 ./pm_nl_ctl add 10.0.4.2 flags subflow
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr "subflows and signal w. cookies" 3 3 3
+chk_add_nr 1 1
exit $ret
diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
new file mode 100755
index 000000000000..0d88225daa02
--- /dev/null
+++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
@@ -0,0 +1,293 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
+ns1="ns1-$rndh"
+ns2="ns2-$rndh"
+ns3="ns3-$rndh"
+capture=false
+ksft_skip=4
+timeout=30
+test_cnt=1
+ret=0
+bail=0
+
+usage() {
+ echo "Usage: $0 [ -b ] [ -c ] [ -d ]"
+ echo -e "\t-b: bail out after first error, otherwise runs al testcases"
+ echo -e "\t-c: capture packets for each test using tcpdump (default: no capture)"
+ echo -e "\t-d: debug this script"
+}
+
+cleanup()
+{
+ rm -f "$cin" "$cout"
+ rm -f "$sin" "$sout"
+ rm -f "$capout"
+
+ local netns
+ for netns in "$ns1" "$ns2" "$ns3";do
+ ip netns del $netns
+ done
+}
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+# "$ns1" ns2 ns3
+# ns1eth1 ns2eth1 ns2eth3 ns3eth1
+# netem
+# ns1eth2 ns2eth2
+# netem
+
+setup()
+{
+ large=$(mktemp)
+ small=$(mktemp)
+ sout=$(mktemp)
+ cout=$(mktemp)
+ capout=$(mktemp)
+ size=$((2048 * 4096))
+ dd if=/dev/zero of=$small bs=4096 count=20 >/dev/null 2>&1
+ dd if=/dev/zero of=$large bs=4096 count=$((size / 4096)) >/dev/null 2>&1
+
+ trap cleanup EXIT
+
+ for i in "$ns1" "$ns2" "$ns3";do
+ ip netns add $i || exit $ksft_skip
+ ip -net $i link set lo up
+ done
+
+ ip link add ns1eth1 netns "$ns1" type veth peer name ns2eth1 netns "$ns2"
+ ip link add ns1eth2 netns "$ns1" type veth peer name ns2eth2 netns "$ns2"
+ ip link add ns2eth3 netns "$ns2" type veth peer name ns3eth1 netns "$ns3"
+
+ ip -net "$ns1" addr add 10.0.1.1/24 dev ns1eth1
+ ip -net "$ns1" addr add dead:beef:1::1/64 dev ns1eth1 nodad
+ ip -net "$ns1" link set ns1eth1 up mtu 1500
+ ip -net "$ns1" route add default via 10.0.1.2
+ ip -net "$ns1" route add default via dead:beef:1::2
+
+ ip -net "$ns1" addr add 10.0.2.1/24 dev ns1eth2
+ ip -net "$ns1" addr add dead:beef:2::1/64 dev ns1eth2 nodad
+ ip -net "$ns1" link set ns1eth2 up mtu 1500
+ ip -net "$ns1" route add default via 10.0.2.2 metric 101
+ ip -net "$ns1" route add default via dead:beef:2::2 metric 101
+
+ ip netns exec "$ns1" ./pm_nl_ctl limits 1 1
+ ip netns exec "$ns1" ./pm_nl_ctl add 10.0.2.1 dev ns1eth2 flags subflow
+ ip netns exec "$ns1" sysctl -q net.ipv4.conf.all.rp_filter=0
+
+ ip -net "$ns2" addr add 10.0.1.2/24 dev ns2eth1
+ ip -net "$ns2" addr add dead:beef:1::2/64 dev ns2eth1 nodad
+ ip -net "$ns2" link set ns2eth1 up mtu 1500
+
+ ip -net "$ns2" addr add 10.0.2.2/24 dev ns2eth2
+ ip -net "$ns2" addr add dead:beef:2::2/64 dev ns2eth2 nodad
+ ip -net "$ns2" link set ns2eth2 up mtu 1500
+
+ ip -net "$ns2" addr add 10.0.3.2/24 dev ns2eth3
+ ip -net "$ns2" addr add dead:beef:3::2/64 dev ns2eth3 nodad
+ ip -net "$ns2" link set ns2eth3 up mtu 1500
+ ip netns exec "$ns2" sysctl -q net.ipv4.ip_forward=1
+ ip netns exec "$ns2" sysctl -q net.ipv6.conf.all.forwarding=1
+
+ ip -net "$ns3" addr add 10.0.3.3/24 dev ns3eth1
+ ip -net "$ns3" addr add dead:beef:3::3/64 dev ns3eth1 nodad
+ ip -net "$ns3" link set ns3eth1 up mtu 1500
+ ip -net "$ns3" route add default via 10.0.3.2
+ ip -net "$ns3" route add default via dead:beef:3::2
+
+ ip netns exec "$ns3" ./pm_nl_ctl limits 1 1
+}
+
+# $1: ns, $2: port
+wait_local_port_listen()
+{
+ local listener_ns="${1}"
+ local port="${2}"
+
+ local port_hex i
+
+ port_hex="$(printf "%04X" "${port}")"
+ for i in $(seq 10); do
+ ip netns exec "${listener_ns}" cat /proc/net/tcp* | \
+ awk "BEGIN {rc=1} {if (\$2 ~ /:${port_hex}\$/ && \$4 ~ /0A/) {rc=0; exit}} END {exit rc}" &&
+ break
+ sleep 0.1
+ done
+}
+
+do_transfer()
+{
+ local cin=$1
+ local sin=$2
+ local max_time=$3
+ local port
+ port=$((10000+$test_cnt))
+ test_cnt=$((test_cnt+1))
+
+ :> "$cout"
+ :> "$sout"
+ :> "$capout"
+
+ local addr_port
+ addr_port=$(printf "%s:%d" ${connect_addr} ${port})
+
+ if $capture; then
+ local capuser
+ if [ -z $SUDO_USER ] ; then
+ capuser=""
+ else
+ capuser="-Z $SUDO_USER"
+ fi
+
+ local capfile="${rndh}-${port}"
+ local capopt="-i any -s 65535 -B 32768 ${capuser}"
+
+ ip netns exec ${ns3} tcpdump ${capopt} -w "${capfile}-listener.pcap" >> "${capout}" 2>&1 &
+ local cappid_listener=$!
+
+ ip netns exec ${ns1} tcpdump ${capopt} -w "${capfile}-connector.pcap" >> "${capout}" 2>&1 &
+ local cappid_connector=$!
+
+ sleep 1
+ fi
+
+ ip netns exec ${ns3} ./mptcp_connect -jt $timeout -l -p $port 0.0.0.0 < "$sin" > "$sout" &
+ local spid=$!
+
+ wait_local_port_listen "${ns3}" "${port}"
+
+ local start
+ start=$(date +%s%3N)
+ ip netns exec ${ns1} ./mptcp_connect -jt $timeout -p $port 10.0.3.3 < "$cin" > "$cout" &
+ local cpid=$!
+
+ wait $cpid
+ local retc=$?
+ wait $spid
+ local rets=$?
+
+ local stop
+ stop=$(date +%s%3N)
+
+ if $capture; then
+ sleep 1
+ kill ${cappid_listener}
+ kill ${cappid_connector}
+ fi
+
+ local duration
+ duration=$((stop-start))
+
+ cmp $sin $cout > /dev/null 2>&1
+ local cmps=$?
+ cmp $cin $sout > /dev/null 2>&1
+ local cmpc=$?
+
+ printf "%16s" "$duration max $max_time "
+ if [ $retc -eq 0 ] && [ $rets -eq 0 ] && \
+ [ $cmpc -eq 0 ] && [ $cmps -eq 0 ] && \
+ [ $duration -lt $max_time ]; then
+ echo "[ OK ]"
+ cat "$capout"
+ return 0
+ fi
+
+ echo " [ fail ]"
+ echo "client exit code $retc, server $rets" 1>&2
+ echo "\nnetns ${ns3} socket stat for $port:" 1>&2
+ ip netns exec ${ns3} ss -nita 1>&2 -o "sport = :$port"
+ echo "\nnetns ${ns1} socket stat for $port:" 1>&2
+ ip netns exec ${ns1} ss -nita 1>&2 -o "dport = :$port"
+ ls -l $sin $cout
+ ls -l $cin $sout
+
+ cat "$capout"
+ return 1
+}
+
+run_test()
+{
+ local rate1=$1
+ local rate2=$2
+ local delay1=$3
+ local delay2=$4
+ local lret
+ local dev
+ shift 4
+ local msg=$*
+
+ [ $delay1 -gt 0 ] && delay1="delay $delay1" || delay1=""
+ [ $delay2 -gt 0 ] && delay2="delay $delay2" || delay2=""
+
+ for dev in ns1eth1 ns1eth2; do
+ tc -n $ns1 qdisc del dev $dev root >/dev/null 2>&1
+ done
+ for dev in ns2eth1 ns2eth2; do
+ tc -n $ns2 qdisc del dev $dev root >/dev/null 2>&1
+ done
+ tc -n $ns1 qdisc add dev ns1eth1 root netem rate ${rate1}mbit $delay1
+ tc -n $ns1 qdisc add dev ns1eth2 root netem rate ${rate2}mbit $delay2
+ tc -n $ns2 qdisc add dev ns2eth1 root netem rate ${rate1}mbit $delay1
+ tc -n $ns2 qdisc add dev ns2eth2 root netem rate ${rate2}mbit $delay2
+
+ # time is measure in ms
+ local time=$((size * 8 * 1000 / (( $rate1 + $rate2) * 1024 *1024) ))
+
+ # mptcp_connect will do some sleeps to allow the mp_join handshake
+ # completion
+ time=$((time + 1350))
+
+ printf "%-50s" "$msg"
+ do_transfer $small $large $((time * 11 / 10))
+ lret=$?
+ if [ $lret -ne 0 ]; then
+ ret=$lret
+ [ $bail -eq 0 ] || exit $ret
+ fi
+
+ printf "%-50s" "$msg - reverse direction"
+ do_transfer $large $small $((time * 11 / 10))
+ lret=$?
+ if [ $lret -ne 0 ]; then
+ ret=$lret
+ [ $bail -eq 0 ] || exit $ret
+ fi
+}
+
+while getopts "bcdh" option;do
+ case "$option" in
+ "h")
+ usage $0
+ exit 0
+ ;;
+ "b")
+ bail=1
+ ;;
+ "c")
+ capture=true
+ ;;
+ "d")
+ set -x
+ ;;
+ "?")
+ usage $0
+ exit 1
+ ;;
+ esac
+done
+
+setup
+run_test 10 10 0 0 "balanced bwidth"
+run_test 10 10 1 50 "balanced bwidth with unbalanced delay"
+
+# we still need some additional infrastructure to pass the following test-cases
+# run_test 30 10 0 0 "unbalanced bwidth"
+# run_test 30 10 1 50 "unbalanced bwidth with unbalanced delay"
+# run_test 30 10 50 1 "unbalanced bwidth with opposed, unbalanced delay"
+exit $ret
diff --git a/tools/testing/selftests/net/nettest.c b/tools/testing/selftests/net/nettest.c
index 93208caacbe6..f75c53ce0a2d 100644
--- a/tools/testing/selftests/net/nettest.c
+++ b/tools/testing/selftests/net/nettest.c
@@ -1667,6 +1667,8 @@ int main(int argc, char *argv[])
case 'R':
args.type = SOCK_RAW;
args.port = 0;
+ if (!args.protocol)
+ args.protocol = IPPROTO_RAW;
break;
case 'P':
pe = getprotobyname(optarg);
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 7c38a909f8b8..8a2fe6d64bf2 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -1175,6 +1175,51 @@ kci_test_neigh_get()
echo "PASS: neigh get"
}
+kci_test_bridge_parent_id()
+{
+ local ret=0
+ sysfsnet=/sys/bus/netdevsim/devices/netdevsim
+ probed=false
+
+ if [ ! -w /sys/bus/netdevsim/new_device ] ; then
+ modprobe -q netdevsim
+ check_err $?
+ if [ $ret -ne 0 ]; then
+ echo "SKIP: bridge_parent_id can't load netdevsim"
+ return $ksft_skip
+ fi
+ probed=true
+ fi
+
+ echo "10 1" > /sys/bus/netdevsim/new_device
+ while [ ! -d ${sysfsnet}10 ] ; do :; done
+ echo "20 1" > /sys/bus/netdevsim/new_device
+ while [ ! -d ${sysfsnet}20 ] ; do :; done
+ udevadm settle
+ dev10=`ls ${sysfsnet}10/net/`
+ dev20=`ls ${sysfsnet}20/net/`
+
+ ip link add name test-bond0 type bond mode 802.3ad
+ ip link set dev $dev10 master test-bond0
+ ip link set dev $dev20 master test-bond0
+ ip link add name test-br0 type bridge
+ ip link set dev test-bond0 master test-br0
+ check_err $?
+
+ # clean up any leftovers
+ ip link del dev test-br0
+ ip link del dev test-bond0
+ echo 20 > /sys/bus/netdevsim/del_device
+ echo 10 > /sys/bus/netdevsim/del_device
+ $probed && rmmod netdevsim
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: bridge_parent_id"
+ return 1
+ fi
+ echo "PASS: bridge_parent_id"
+}
+
kci_test_rtnl()
{
local ret=0
@@ -1224,6 +1269,8 @@ kci_test_rtnl()
check_err $?
kci_test_neigh_get
check_err $?
+ kci_test_bridge_parent_id
+ check_err $?
kci_del_dummy
return $ret
diff --git a/tools/testing/selftests/powerpc/mm/prot_sao.c b/tools/testing/selftests/powerpc/mm/prot_sao.c
index e0cf8ebbf8cd..30b71b1d78d5 100644
--- a/tools/testing/selftests/powerpc/mm/prot_sao.c
+++ b/tools/testing/selftests/powerpc/mm/prot_sao.c
@@ -7,6 +7,7 @@
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
+#include <unistd.h>
#include <asm/cputable.h>
@@ -18,9 +19,13 @@ int test_prot_sao(void)
{
char *p;
- /* SAO was introduced in 2.06 and removed in 3.1 */
+ /*
+ * SAO was introduced in 2.06 and removed in 3.1. It's disabled in
+ * guests/LPARs by default, so also skip if we are running in a guest.
+ */
SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06) ||
- have_hwcap2(PPC_FEATURE2_ARCH_3_1));
+ have_hwcap2(PPC_FEATURE2_ARCH_3_1) ||
+ access("/proc/device-tree/rtas/ibm,hypertas-functions", F_OK) == 0);
/*
* Ensure we can ask for PROT_SAO.
diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
index 7656c7ce79d9..0e73a16874c4 100644
--- a/tools/testing/selftests/timers/Makefile
+++ b/tools/testing/selftests/timers/Makefile
@@ -13,6 +13,7 @@ DESTRUCTIVE_TESTS = alarmtimer-suspend valid-adjtimex adjtick change_skew \
TEST_GEN_PROGS_EXTENDED = $(DESTRUCTIVE_TESTS)
+TEST_FILES := settings
include ../lib.mk
diff --git a/tools/testing/selftests/timers/settings b/tools/testing/selftests/timers/settings
new file mode 100644
index 000000000000..e7b9417537fb
--- /dev/null
+++ b/tools/testing/selftests/timers/settings
@@ -0,0 +1 @@
+timeout=0
diff --git a/tools/testing/selftests/vm/map_hugetlb.c b/tools/testing/selftests/vm/map_hugetlb.c
index 6af951900aa3..312889edb84a 100644
--- a/tools/testing/selftests/vm/map_hugetlb.c
+++ b/tools/testing/selftests/vm/map_hugetlb.c
@@ -83,7 +83,7 @@ int main(int argc, char **argv)
}
if (shift)
- printf("%u kB hugepages\n", 1 << shift);
+ printf("%u kB hugepages\n", 1 << (shift - 10));
else
printf("Default size hugepages\n");
printf("Mapping %lu Mbytes\n", (unsigned long)length >> 20);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 67cd0b88a6b6..cf88233b819a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -4332,7 +4332,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_device *dev)
{
- int i;
+ int i, j;
struct kvm_io_bus *new_bus, *bus;
bus = kvm_get_bus(kvm, bus_idx);
@@ -4349,17 +4349,20 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
GFP_KERNEL_ACCOUNT);
- if (!new_bus) {
+ if (new_bus) {
+ memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
+ new_bus->dev_count--;
+ memcpy(new_bus->range + i, bus->range + i + 1,
+ (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
+ } else {
pr_err("kvm: failed to shrink bus, removing it completely\n");
- goto broken;
+ for (j = 0; j < bus->dev_count; j++) {
+ if (j == i)
+ continue;
+ kvm_iodevice_destructor(bus->range[j].dev);
+ }
}
- memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
- new_bus->dev_count--;
- memcpy(new_bus->range + i, bus->range + i + 1,
- (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
-
-broken:
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu);
kfree(bus);