summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap2
-rw-r--r--Documentation/admin-guide/blockdev/drbd/figures.rst4
-rw-r--r--Documentation/admin-guide/blockdev/drbd/peer-states-8.dot (renamed from Documentation/admin-guide/blockdev/drbd/node-states-8.dot)5
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt10
-rw-r--r--Documentation/conf.py15
-rw-r--r--Documentation/devicetree/bindings/i2c/apple,i2c.yaml8
-rw-r--r--Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/gpio-keys.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml14
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-phy.yaml8
-rw-r--r--Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq25980.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/samsung,s5m8767.yaml25
-rw-r--r--Documentation/devicetree/bindings/sound/wlf,wm8962.yaml3
-rw-r--r--Documentation/devicetree/bindings/spi/spi-rockchip.yaml1
-rw-r--r--Documentation/i2c/summary.rst8
-rw-r--r--Documentation/locking/locktypes.rst9
-rw-r--r--Documentation/networking/bonding.rst11
-rw-r--r--Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst1
-rw-r--r--Documentation/networking/device_drivers/ethernet/intel/ixgbe.rst16
-rw-r--r--Documentation/networking/ip-sysctl.rst6
-rw-r--r--Documentation/networking/timestamping.rst4
-rw-r--r--Documentation/process/changes.rst11
-rw-r--r--Documentation/process/submitting-patches.rst3
-rw-r--r--Documentation/sound/hd-audio/models.rst2
-rw-r--r--MAINTAINERS37
-rw-r--r--Makefile14
-rw-r--r--arch/arm/boot/dts/imx6qdl-wandboard.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6qp-prtwd3.dts2
-rw-r--r--arch/arm/boot/dts/imx6ull-pinfunc.h2
-rw-r--r--arch/arm/boot/dts/ls1021a-tsn.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_arria5_socdk.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_socdk.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_sockit.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_socrates.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_sodia.dts2
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts4
-rw-r--r--arch/arm/include/asm/efi.h1
-rw-r--r--arch/arm/kernel/entry-armv.S8
-rw-r--r--arch/arm/kernel/head-nommu.S1
-rw-r--r--arch/arm/mach-rockchip/platsmp.c2
-rw-r--r--arch/arm64/Kconfig.platforms1
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-axg-jethome-jethub-j100.dts30
-rw-r--r--arch/arm64/boot/dts/apple/t8103-j274.dts2
-rw-r--r--arch/arm64/boot/dts/apple/t8103.dtsi11
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a-bluebox3.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi2
-rw-r--r--arch/arm64/include/asm/efi.h1
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c1
-rw-r--r--arch/csky/kernel/traps.c4
-rw-r--r--arch/mips/include/asm/mach-ralink/spaces.h2
-rw-r--r--arch/mips/include/asm/pci.h4
-rw-r--r--arch/mips/net/bpf_jit_comp.h2
-rw-r--r--arch/mips/pci/pci-generic.c2
-rw-r--r--arch/parisc/Kconfig5
-rw-r--r--arch/parisc/include/asm/futex.h4
-rw-r--r--arch/parisc/kernel/syscall.S2
-rw-r--r--arch/parisc/kernel/traps.c2
-rw-r--r--arch/powerpc/kernel/module_64.c42
-rw-r--r--arch/powerpc/mm/ptdump/ptdump.c2
-rw-r--r--arch/powerpc/platforms/85xx/smp.c4
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts1
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts113
-rw-r--r--arch/riscv/include/asm/efi.h1
-rw-r--r--arch/s390/configs/debug_defconfig2
-rw-r--r--arch/s390/configs/defconfig2
-rw-r--r--arch/s390/kernel/ftrace.c2
-rw-r--r--arch/s390/kernel/irq.c9
-rw-r--r--arch/s390/kernel/machine_kexec_file.c38
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/efi.h2
-rw-r--r--arch/x86/include/asm/kvm-x86-ops.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/include/asm/pkru.h4
-rw-r--r--arch/x86/kernel/setup.c72
-rw-r--r--arch/x86/kernel/smpboot.c14
-rw-r--r--arch/x86/kvm/hyperv.c7
-rw-r--r--arch/x86/kvm/mmu/mmu.c16
-rw-r--r--arch/x86/kvm/mmu/tdp_iter.c6
-rw-r--r--arch/x86/kvm/mmu/tdp_iter.h6
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c29
-rw-r--r--arch/x86/kvm/svm/svm.c21
-rw-r--r--arch/x86/kvm/vmx/vmx.c67
-rw-r--r--arch/x86/kvm/x86.c25
-rw-r--r--arch/x86/net/bpf_jit_comp.c51
-rw-r--r--arch/x86/platform/efi/quirks.c3
-rw-r--r--arch/x86/tools/relocs.c2
-rw-r--r--block/blk-iocost.c9
-rw-r--r--block/fops.c4
-rw-r--r--block/ioprio.c3
-rw-r--r--drivers/Makefile3
-rw-r--r--drivers/android/binder.c21
-rw-r--r--drivers/android/binder_alloc.c2
-rw-r--r--drivers/ata/ahci_ceva.c3
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/libata-scsi.c15
-rw-r--r--drivers/auxdisplay/charlcd.c5
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/block/xen-blkfront.c15
-rw-r--r--drivers/bus/mhi/core/pm.c21
-rw-r--r--drivers/bus/mhi/pci_generic.c2
-rw-r--r--drivers/bus/sunxi-rsb.c8
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c21
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c7
-rw-r--r--drivers/clk/clk.c15
-rw-r--r--drivers/clk/imx/clk-imx8qxp-lpcg.c2
-rw-r--r--drivers/clk/imx/clk-imx8qxp.c2
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c9
-rw-r--r--drivers/clk/qcom/clk-regmap-mux.c2
-rw-r--r--drivers/clk/qcom/common.c12
-rw-r--r--drivers/clk/qcom/common.h2
-rw-r--r--drivers/clk/qcom/gcc-sm6125.c4
-rw-r--r--drivers/clk/versatile/clk-icst.c2
-rw-r--r--drivers/clocksource/arm_arch_timer.c9
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c2
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c7
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c4
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c10
-rw-r--r--drivers/dma/idxd/irq.c2
-rw-r--r--drivers/dma/idxd/submit.c18
-rw-r--r--drivers/dma/st_fdma.c2
-rw-r--r--drivers/dma/ti/k3-udma.c157
-rw-r--r--drivers/firmware/scpi_pm_domain.c10
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c5
-rw-r--r--drivers/gpio/gpio-aspeed-sgpio.c2
-rw-r--r--drivers/gpio/gpio-dln2.c19
-rw-r--r--drivers/gpio/gpio-virtio.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h31
-rw-r--r--drivers/gpu/drm/amd/include/discovery.h49
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c3
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c5
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c8
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c1
-rw-r--r--drivers/gpu/drm/drm_syncobj.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c18
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c6
-rw-r--r--drivers/gpu/drm/i915/i915_request.c1
-rw-r--r--drivers/gpu/drm/lima/lima_device.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c12
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c28
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
-rw-r--r--drivers/hid/Kconfig10
-rw-r--r--drivers/hid/hid-asus.c6
-rw-r--r--drivers/hid/hid-bigbenff.c2
-rw-r--r--drivers/hid/hid-chicony.c3
-rw-r--r--drivers/hid/hid-corsair.c7
-rw-r--r--drivers/hid/hid-elan.c2
-rw-r--r--drivers/hid/hid-elo.c3
-rw-r--r--drivers/hid/hid-ft260.c3
-rw-r--r--drivers/hid/hid-google-hammer.c2
-rw-r--r--drivers/hid/hid-holtek-kbd.c9
-rw-r--r--drivers/hid/hid-holtek-mouse.c24
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-input.c2
-rw-r--r--drivers/hid/hid-lg.c10
-rw-r--r--drivers/hid/hid-logitech-dj.c2
-rw-r--r--drivers/hid/hid-prodikeys.c10
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/hid-roccat-arvo.c3
-rw-r--r--drivers/hid/hid-roccat-isku.c3
-rw-r--r--drivers/hid/hid-roccat-kone.c3
-rw-r--r--drivers/hid/hid-roccat-koneplus.c3
-rw-r--r--drivers/hid/hid-roccat-konepure.c3
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c3
-rw-r--r--drivers/hid/hid-roccat-lua.c3
-rw-r--r--drivers/hid/hid-roccat-pyra.c3
-rw-r--r--drivers/hid/hid-roccat-ryos.c3
-rw-r--r--drivers/hid/hid-roccat-savu.c3
-rw-r--r--drivers/hid/hid-samsung.c3
-rw-r--r--drivers/hid/hid-sony.c24
-rw-r--r--drivers/hid/hid-thrustmaster.c3
-rw-r--r--drivers/hid/hid-u2fzero.c2
-rw-r--r--drivers/hid/hid-uclogic-core.c3
-rw-r--r--drivers/hid/hid-uclogic-params.c3
-rw-r--r--drivers/hid/hid-vivaldi.c3
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c6
-rw-r--r--drivers/hid/wacom_sys.c19
-rw-r--r--drivers/hv/Kconfig1
-rw-r--r--drivers/hwmon/corsair-psu.c2
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c7
-rw-r--r--drivers/hwmon/lm90.c106
-rw-r--r--drivers/hwmon/nct6775.c2
-rw-r--r--drivers/hwmon/pwm-fan.c2
-rw-r--r--drivers/hwmon/sht4x.c4
-rw-r--r--drivers/i2c/busses/i2c-mpc.c2
-rw-r--r--drivers/i2c/busses/i2c-virtio.c32
-rw-r--r--drivers/i2c/i2c-dev.c3
-rw-r--r--drivers/iio/accel/kxcjk-1013.c5
-rw-r--r--drivers/iio/accel/kxsd9.c6
-rw-r--r--drivers/iio/accel/mma8452.c2
-rw-r--r--drivers/iio/adc/Kconfig2
-rw-r--r--drivers/iio/adc/ad7768-1.c2
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c3
-rw-r--r--drivers/iio/adc/axp20x_adc.c18
-rw-r--r--drivers/iio/adc/dln2-adc.c21
-rw-r--r--drivers/iio/adc/stm32-adc.c3
-rw-r--r--drivers/iio/gyro/adxrs290.c5
-rw-r--r--drivers/iio/gyro/itg3200_buffer.c2
-rw-r--r--drivers/iio/industrialio-trigger.c1
-rw-r--r--drivers/iio/light/ltr501.c2
-rw-r--r--drivers/iio/light/stk3310.c6
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c2
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c2
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c2
-rw-r--r--drivers/infiniband/hw/hfi1/init.c40
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c78
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c2
-rw-r--r--drivers/infiniband/hw/irdma/hw.c7
-rw-r--r--drivers/infiniband/hw/irdma/main.h1
-rw-r--r--drivers/infiniband/hw/irdma/pble.c8
-rw-r--r--drivers/infiniband/hw/irdma/pble.h1
-rw-r--r--drivers/infiniband/hw/irdma/utils.c24
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c23
-rw-r--r--drivers/infiniband/hw/irdma/verbs.h2
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h6
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c26
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c1
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c9
-rw-r--r--drivers/input/joystick/spaceball.c11
-rw-r--r--drivers/input/misc/iqs626a.c21
-rw-r--r--drivers/input/mouse/appletouch.c4
-rw-r--r--drivers/input/mouse/elantech.c8
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h21
-rw-r--r--drivers/input/serio/i8042.c54
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c2
-rw-r--r--drivers/input/touchscreen/elants_i2c.c46
-rw-r--r--drivers/input/touchscreen/goodix.c31
-rw-r--r--drivers/input/touchscreen/goodix.h1
-rw-r--r--drivers/input/touchscreen/goodix_fwupload.c2
-rw-r--r--drivers/irqchip/irq-apple-aic.c2
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c16
-rw-r--r--drivers/irqchip/irq-aspeed-scu-ic.c4
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c4
-rw-r--r--drivers/irqchip/irq-nvic.c2
-rw-r--r--drivers/isdn/mISDN/core.c6
-rw-r--r--drivers/isdn/mISDN/core.h4
-rw-r--r--drivers/isdn/mISDN/layer1.c4
-rw-r--r--drivers/md/bcache/super.c3
-rw-r--r--drivers/md/dm-integrity.c2
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c2
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c4
-rw-r--r--drivers/misc/eeprom/at25.c38
-rw-r--r--drivers/misc/fastrpc.c10
-rw-r--r--drivers/mmc/core/core.c7
-rw-r--r--drivers/mmc/core/core.h1
-rw-r--r--drivers/mmc/core/host.c9
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-mmc.c16
-rw-r--r--drivers/mmc/host/mmci_stm32_sdmmc.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c4
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c2
-rw-r--r--drivers/mmc/host/sdhci-tegra.c43
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c8
-rw-r--r--drivers/mtd/nand/raw/Kconfig2
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c36
-rw-r--r--drivers/mtd/nand/raw/nand_base.c6
-rw-r--r--drivers/net/bonding/bond_alb.c14
-rw-r--r--drivers/net/bonding/bond_options.c2
-rw-r--r--drivers/net/can/kvaser_pciefd.c8
-rw-r--r--drivers/net/can/m_can/m_can.c42
-rw-r--r--drivers/net/can/m_can/m_can.h3
-rw-r--r--drivers/net/can/m_can/m_can_pci.c62
-rw-r--r--drivers/net/can/pch_can.c2
-rw-r--r--drivers/net/can/sja1000/ems_pcmcia.c7
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c101
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c89
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c8
-rw-r--r--drivers/net/dsa/ocelot/felix.c5
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c9
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c49
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c8
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c23
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c5
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.h3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c12
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c1
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c8
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c60
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c115
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c43
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c66
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c47
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c3
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c36
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c3
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c2
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c10
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c7
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c19
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c3
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c10
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c10
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c86
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c29
-rw-r--r--drivers/net/fjes/fjes_main.c5
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/ieee802154/atusb.c10
-rw-r--r--drivers/net/netdevsim/bpf.c1
-rw-r--r--drivers/net/netdevsim/ethtool.c5
-rw-r--r--drivers/net/phy/mdio_bus.c3
-rw-r--r--drivers/net/phy/phylink.c1
-rw-r--r--drivers/net/tun.c115
-rw-r--r--drivers/net/usb/asix_common.c8
-rw-r--r--drivers/net/usb/cdc_ncm.c2
-rw-r--r--drivers/net/usb/lan78xx.c6
-rw-r--r--drivers/net/usb/pegasus.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c52
-rw-r--r--drivers/net/usb/rndis_host.c5
-rw-r--r--drivers/net/veth.c8
-rw-r--r--drivers/net/virtio_net.c9
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c13
-rw-r--r--drivers/net/vrf.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/Kconfig14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/Kconfig4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c26
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.h4
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.c7
-rw-r--r--drivers/net/xen-netback/common.h1
-rw-r--r--drivers/net/xen-netback/rx.c77
-rw-r--r--drivers/net/xen-netfront.c125
-rw-r--r--drivers/nfc/st21nfca/i2c.c29
-rw-r--r--drivers/nvme/host/core.c23
-rw-r--r--drivers/nvme/host/multipath.c3
-rw-r--r--drivers/nvme/host/nvme.h2
-rw-r--r--drivers/nvme/host/zns.c5
-rw-r--r--drivers/nvme/target/tcp.c9
-rw-r--r--drivers/of/irq.c27
-rw-r--r--drivers/pci/controller/Kconfig4
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c1
-rw-r--r--drivers/pci/controller/pci-aardvark.c9
-rw-r--r--drivers/pci/controller/pcie-apple.c14
-rw-r--r--drivers/pci/msi.c15
-rw-r--r--drivers/phy/hisilicon/phy-hi3670-pcie.c4
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-utmi.c4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c26
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hsic.c2
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c2
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c2
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c2
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c6
-rw-r--r--drivers/phy/ti/phy-tusb1210.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c29
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c8
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c8
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c4
-rw-r--r--drivers/platform/x86/Makefile2
-rw-r--r--drivers/platform/x86/amd-pmc.c5
-rw-r--r--drivers/platform/x86/apple-gmux.c2
-rw-r--r--drivers/platform/x86/intel/Kconfig15
-rw-r--r--drivers/platform/x86/intel/hid.c7
-rw-r--r--drivers/platform/x86/intel/pmc/pltdrv.c2
-rw-r--r--drivers/platform/x86/lg-laptop.c12
-rw-r--r--drivers/platform/x86/system76_acpi.c58
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c6
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c18
-rw-r--r--drivers/reset/tegra/reset-bpmp.c9
-rw-r--r--drivers/scsi/libiscsi.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c6
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c38
-rw-r--r--drivers/scsi/qedi/qedi_fw.c37
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c2
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c3
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/vmw_pvscsi.c7
-rw-r--r--drivers/soc/imx/imx8m-blk-ctrl.c19
-rw-r--r--drivers/soc/imx/soc-imx.c4
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c2
-rw-r--r--drivers/soc/tegra/fuse/fuse.h2
-rw-r--r--drivers/spi/spi-armada-3700.c2
-rw-r--r--drivers/tee/amdtee/core.c5
-rw-r--r--drivers/tee/optee/core.c6
-rw-r--r--drivers/tee/optee/smc_abi.c2
-rw-r--r--drivers/tee/tee_shm.c174
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c2
-rw-r--r--drivers/tty/hvc/hvc_xen.c30
-rw-r--r--drivers/tty/n_hdlc.c23
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c20
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c12
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c11
-rw-r--r--drivers/usb/cdns3/cdnsp-trace.h4
-rw-r--r--drivers/usb/cdns3/host.c1
-rw-r--r--drivers/usb/core/config.c6
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc2/platform.c3
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c15
-rw-r--r--drivers/usb/early/xhci-dbc.c15
-rw-r--r--drivers/usb/gadget/composite.c14
-rw-r--r--drivers/usb/gadget/function/f_fs.c9
-rw-r--r--drivers/usb/gadget/function/u_ether.c16
-rw-r--r--drivers/usb/gadget/legacy/dbgp.c15
-rw-r--r--drivers/usb/gadget/legacy/inode.c16
-rw-r--r--drivers/usb/host/xhci-hub.c1
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c2
-rw-r--r--drivers/usb/host/xhci-pci.c11
-rw-r--r--drivers/usb/host/xhci-ring.c1
-rw-r--r--drivers/usb/host/xhci.c26
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c12
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.c7
-rw-r--r--drivers/usb/serial/cp210x.c6
-rw-r--r--drivers/usb/serial/option.c8
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c18
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c4
-rw-r--r--drivers/vdpa/vdpa.c3
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c6
-rw-r--r--drivers/vhost/vdpa.c2
-rw-r--r--drivers/virt/nitro_enclaves/ne_misc_dev.c5
-rw-r--r--drivers/virtio/virtio_ring.c2
-rw-r--r--drivers/xen/events/events_base.c6
-rw-r--r--fs/afs/file.c5
-rw-r--r--fs/afs/super.c1
-rw-r--r--fs/aio.c186
-rw-r--r--fs/btrfs/ctree.c17
-rw-r--r--fs/btrfs/ctree.h7
-rw-r--r--fs/btrfs/delalloc-space.c12
-rw-r--r--fs/btrfs/disk-io.c8
-rw-r--r--fs/btrfs/extent-tree.c16
-rw-r--r--fs/btrfs/extent_io.c22
-rw-r--r--fs/btrfs/free-space-tree.c4
-rw-r--r--fs/btrfs/ioctl.c16
-rw-r--r--fs/btrfs/qgroup.c3
-rw-r--r--fs/btrfs/root-tree.c3
-rw-r--r--fs/btrfs/tree-log.c7
-rw-r--r--fs/btrfs/volumes.c6
-rw-r--r--fs/btrfs/zoned.c2
-rw-r--r--fs/ceph/caps.c16
-rw-r--r--fs/ceph/file.c20
-rw-r--r--fs/ceph/mds_client.c3
-rw-r--r--fs/cifs/connect.c7
-rw-r--r--fs/cifs/fs_context.c38
-rw-r--r--fs/cifs/inode.c13
-rw-r--r--fs/cifs/sess.c54
-rw-r--r--fs/file.c72
-rw-r--r--fs/io-wq.c31
-rw-r--r--fs/io_uring.c16
-rw-r--r--fs/ksmbd/ndr.c2
-rw-r--r--fs/ksmbd/smb2ops.c3
-rw-r--r--fs/ksmbd/smb2pdu.c29
-rw-r--r--fs/namespace.c9
-rw-r--r--fs/netfs/read_helper.c21
-rw-r--r--fs/nfsd/nfs3proc.c11
-rw-r--r--fs/nfsd/nfs4recover.c1
-rw-r--r--fs/nfsd/nfs4state.c9
-rw-r--r--fs/nfsd/nfsctl.c14
-rw-r--r--fs/nfsd/nfsproc.c8
-rw-r--r--fs/signalfd.c12
-rw-r--r--fs/smbfs_common/cifs_arc4.c13
-rw-r--r--fs/xfs/xfs_super.c14
-rw-r--r--fs/zonefs/super.c1
-rw-r--r--include/linux/bpf.h17
-rw-r--r--include/linux/btf.h14
-rw-r--r--include/linux/cacheinfo.h1
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/delay.h14
-rw-r--r--include/linux/device/driver.h1
-rw-r--r--include/linux/efi.h6
-rw-r--r--include/linux/filter.h5
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/hid.h5
-rw-r--r--include/linux/instrumentation.h4
-rw-r--r--include/linux/ipv6.h2
-rw-r--r--include/linux/memblock.h4
-rw-r--r--include/linux/mhi.h13
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/pagemap.h1
-rw-r--r--include/linux/percpu-refcount.h2
-rw-r--r--include/linux/phy.h11
-rw-r--r--include/linux/pm_runtime.h2
-rw-r--r--include/linux/regulator/driver.h14
-rw-r--r--include/linux/skbuff.h3
-rw-r--r--include/linux/tee_drv.h4
-rw-r--r--include/linux/virtio_net.h25
-rw-r--r--include/linux/wait.h26
-rw-r--r--include/net/bond_alb.h2
-rw-r--r--include/net/busy_poll.h13
-rw-r--r--include/net/netfilter/nf_conntrack.h6
-rw-r--r--include/net/pkt_sched.h16
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--include/net/sctp/sctp.h9
-rw-r--r--include/net/sctp/structs.h3
-rw-r--r--include/net/seg6.h21
-rw-r--r--include/net/sock.h2
-rw-r--r--include/trace/events/vmscan.h4
-rw-r--r--include/uapi/asm-generic/poll.h2
-rw-r--r--include/uapi/linux/byteorder/big_endian.h1
-rw-r--r--include/uapi/linux/byteorder/little_endian.h1
-rw-r--r--include/uapi/linux/mptcp.h18
-rw-r--r--include/uapi/linux/nfc.h6
-rw-r--r--include/uapi/linux/resource.h13
-rw-r--r--include/xen/events.h1
-rw-r--r--kernel/audit.c21
-rw-r--r--kernel/bpf/btf.c11
-rw-r--r--kernel/bpf/verifier.c55
-rw-r--r--kernel/crash_core.c11
-rw-r--r--kernel/locking/rtmutex.c2
-rw-r--r--kernel/sched/wait.c7
-rw-r--r--kernel/signal.c9
-rw-r--r--kernel/time/timekeeping.c3
-rw-r--r--kernel/time/timer.c16
-rw-r--r--kernel/ucount.c15
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/backing-dev.c7
-rw-r--r--mm/damon/core.c20
-rw-r--r--mm/damon/dbgfs.c15
-rw-r--r--mm/damon/vaddr-test.h79
-rw-r--r--mm/damon/vaddr.c2
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/kfence/core.c1
-rw-r--r--mm/memcontrol.c106
-rw-r--r--mm/memory-failure.c14
-rw-r--r--mm/memory_hotplug.c1
-rw-r--r--mm/mempolicy.c3
-rw-r--r--mm/slub.c15
-rw-r--r--mm/swap_slots.c1
-rw-r--r--mm/vmscan.c65
-rw-r--r--net/ax25/af_ax25.c4
-rw-r--r--net/batman-adv/multicast.c15
-rw-r--r--net/batman-adv/multicast.h10
-rw-r--r--net/batman-adv/soft-interface.c7
-rw-r--r--net/bridge/br_ioctl.c2
-rw-r--r--net/bridge/br_multicast.c32
-rw-r--r--net/bridge/br_netlink.c4
-rw-r--r--net/bridge/br_private.h12
-rw-r--r--net/bridge/br_sysfs_br.c4
-rw-r--r--net/bridge/br_vlan_options.c4
-rw-r--r--net/core/dev.c8
-rw-r--r--net/core/devlink.c16
-rw-r--r--net/core/flow_dissector.c3
-rw-r--r--net/core/lwtunnel.c4
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/skmsg.c5
-rw-r--r--net/core/sock_map.c15
-rw-r--r--net/dsa/tag_ocelot.c6
-rw-r--r--net/ethtool/netlink.c3
-rw-r--r--net/ipv4/af_inet.c12
-rw-r--r--net/ipv4/fib_semantics.c49
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_diag.c4
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c11
-rw-r--r--net/ipv4/tcp_minisocks.c4
-rw-r--r--net/ipv4/udp.c10
-rw-r--r--net/ipv6/icmp.c6
-rw-r--r--net/ipv6/ip6_vti.c2
-rw-r--r--net/ipv6/raw.c3
-rw-r--r--net/ipv6/route.c32
-rw-r--r--net/ipv6/seg6.c59
-rw-r--r--net/ipv6/seg6_iptunnel.c8
-rw-r--r--net/ipv6/seg6_local.c33
-rw-r--r--net/ipv6/sit.c1
-rw-r--r--net/ipv6/tcp_ipv6.c11
-rw-r--r--net/ipv6/udp.c9
-rw-r--r--net/mac80211/agg-rx.c5
-rw-r--r--net/mac80211/agg-tx.c16
-rw-r--r--net/mac80211/cfg.c3
-rw-r--r--net/mac80211/driver-ops.h5
-rw-r--r--net/mac80211/ieee80211_i.h24
-rw-r--r--net/mac80211/mesh.h22
-rw-r--r--net/mac80211/mesh_pathtbl.c89
-rw-r--r--net/mac80211/mlme.c15
-rw-r--r--net/mac80211/rx.c1
-rw-r--r--net/mac80211/sta_info.c21
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/mac80211/tx.c10
-rw-r--r--net/mac80211/util.c23
-rw-r--r--net/mctp/neigh.c9
-rw-r--r--net/mptcp/pm_netlink.c3
-rw-r--r--net/mptcp/protocol.c6
-rw-r--r--net/mptcp/sockopt.c1
-rw-r--r--net/ncsi/ncsi-netlink.c6
-rw-r--r--net/netfilter/nf_conntrack_core.c6
-rw-r--r--net/netfilter/nf_conntrack_netlink.c7
-rw-r--r--net/netfilter/nf_flow_table_core.c4
-rw-r--r--net/netfilter/nf_tables_api.c4
-rw-r--r--net/netfilter/nfnetlink_log.c3
-rw-r--r--net/netfilter/nfnetlink_queue.c5
-rw-r--r--net/netfilter/nft_exthdr.c11
-rw-r--r--net/netfilter/nft_set_pipapo_avx2.c2
-rw-r--r--net/netrom/af_netrom.c2
-rw-r--r--net/nfc/netlink.c12
-rw-r--r--net/openvswitch/flow.c8
-rw-r--r--net/packet/af_packet.c5
-rw-r--r--net/phonet/pep.c3
-rw-r--r--net/rds/connection.c1
-rw-r--r--net/sched/act_ct.c15
-rw-r--r--net/sched/cls_api.c8
-rw-r--r--net/sched/cls_flower.c6
-rw-r--r--net/sched/sch_cake.c6
-rw-r--r--net/sched/sch_ets.c4
-rw-r--r--net/sched/sch_fq_pie.c1
-rw-r--r--net/sched/sch_frag.c3
-rw-r--r--net/sched/sch_qfq.c6
-rw-r--r--net/sctp/diag.c58
-rw-r--r--net/sctp/endpointola.c23
-rw-r--r--net/sctp/socket.c45
-rw-r--r--net/smc/af_smc.c4
-rw-r--r--net/smc/smc.h5
-rw-r--r--net/smc/smc_cdc.c52
-rw-r--r--net/smc/smc_cdc.h2
-rw-r--r--net/smc/smc_core.c27
-rw-r--r--net/smc/smc_core.h6
-rw-r--r--net/smc/smc_ib.c4
-rw-r--r--net/smc/smc_ib.h1
-rw-r--r--net/smc/smc_llc.c2
-rw-r--r--net/smc/smc_wr.c51
-rw-r--r--net/smc/smc_wr.h5
-rw-r--r--net/tipc/crypto.c8
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/vmw_vsock/virtio_transport_common.c3
-rw-r--r--net/wireless/reg.c30
-rw-r--r--net/xdp/xsk_buff_pool.c1
-rwxr-xr-xscripts/recordmcount.pl2
-rw-r--r--security/selinux/hooks.c35
-rw-r--r--security/tomoyo/util.c31
-rw-r--r--sound/core/control_compat.c3
-rw-r--r--sound/core/jack.c4
-rw-r--r--sound/core/oss/pcm_oss.c37
-rw-r--r--sound/core/rawmidi.c1
-rw-r--r--sound/drivers/opl3/opl3_midi.c2
-rw-r--r--sound/hda/intel-sdw-acpi.c13
-rw-r--r--sound/pci/hda/patch_hdmi.c21
-rw-r--r--sound/pci/hda/patch_realtek.c109
-rw-r--r--sound/soc/amd/yc/pci-acp6x.c3
-rw-r--r--sound/soc/codecs/rt5682.c14
-rw-r--r--sound/soc/codecs/rt5682s.c10
-rw-r--r--sound/soc/codecs/tas2770.c4
-rw-r--r--sound/soc/codecs/wcd934x.c126
-rw-r--r--sound/soc/codecs/wsa881x.c16
-rw-r--r--sound/soc/meson/aiu-encoder-i2s.c33
-rw-r--r--sound/soc/meson/aiu-fifo-i2s.c19
-rw-r--r--sound/soc/meson/aiu-fifo.c6
-rw-r--r--sound/soc/qcom/qdsp6/q6routing.c8
-rw-r--r--sound/soc/rockchip/rockchip_i2s_tdm.c52
-rw-r--r--sound/soc/sof/intel/hda-codec.c14
-rw-r--r--sound/soc/sof/intel/pci-tgl.c4
-rw-r--r--sound/soc/tegra/tegra210_adx.c4
-rw-r--r--sound/soc/tegra/tegra210_amx.c4
-rw-r--r--sound/soc/tegra/tegra210_mixer.c4
-rw-r--r--sound/soc/tegra/tegra210_mvc.c8
-rw-r--r--sound/soc/tegra/tegra210_sfc.c4
-rw-r--r--sound/soc/tegra/tegra_asoc_machine.c11
-rw-r--r--sound/soc/tegra/tegra_asoc_machine.h1
-rw-r--r--sound/usb/mixer_quirks.c10
-rw-r--r--tools/bpf/resolve_btfids/main.c8
-rw-r--r--tools/build/Makefile.feature1
-rw-r--r--tools/build/feature/Makefile4
-rw-r--r--tools/build/feature/test-all.c5
-rw-r--r--tools/build/feature/test-libpython-version.c11
-rw-r--r--tools/include/linux/debug_locks.h14
-rw-r--r--tools/include/linux/hardirq.h12
-rw-r--r--tools/include/linux/irqflags.h39
-rw-r--r--tools/include/linux/lockdep.h72
-rw-r--r--tools/include/linux/proc_fs.h4
-rw-r--r--tools/include/linux/spinlock.h2
-rw-r--r--tools/include/linux/stacktrace.h33
-rw-r--r--tools/perf/Makefile.config2
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/s390/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/bench/sched-messaging.c4
-rw-r--r--tools/perf/builtin-inject.c15
-rw-r--r--tools/perf/builtin-script.c2
-rw-r--r--tools/perf/scripts/python/intel-pt-events.py23
-rw-r--r--tools/perf/tests/expr.c4
-rw-r--r--tools/perf/tests/parse-metric.c1
-rw-r--r--tools/perf/ui/tui/setup.c8
-rw-r--r--tools/perf/util/bpf_skel/bperf.h14
-rw-r--r--tools/perf/util/bpf_skel/bperf_follower.bpf.c19
-rw-r--r--tools/perf/util/bpf_skel/bperf_leader.bpf.c19
-rw-r--r--tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c2
-rw-r--r--tools/perf/util/event.h5
-rw-r--r--tools/perf/util/expr.c12
-rw-r--r--tools/perf/util/header.c15
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c85
-rw-r--r--tools/perf/util/intel-pt.c2
-rw-r--r--tools/perf/util/perf_regs.c3
-rw-r--r--tools/perf/util/pmu.c23
-rw-r--r--tools/perf/util/python.c2
-rw-r--r--tools/perf/util/smt.c2
-rw-r--r--tools/power/acpi/Makefile.config1
-rw-r--r--tools/power/acpi/Makefile.rules1
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c20
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_module_attach.c12
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c86
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_fetch.c94
-rw-r--r--tools/testing/selftests/bpf/verifier/search_pruning.c71
-rw-r--r--tools/testing/selftests/bpf/verifier/spill_fill.c32
-rw-r--r--tools/testing/selftests/bpf/verifier/value_ptr_arith.c23
-rw-r--r--tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c632
-rw-r--r--tools/testing/selftests/damon/.gitignore2
-rw-r--r--tools/testing/selftests/damon/Makefile7
-rw-r--r--tools/testing/selftests/damon/_debugfs_common.sh52
-rw-r--r--tools/testing/selftests/damon/debugfs_attrs.sh73
-rw-r--r--tools/testing/selftests/damon/debugfs_empty_targets.sh13
-rw-r--r--tools/testing/selftests/damon/debugfs_huge_count_read_write.sh22
-rw-r--r--tools/testing/selftests/damon/debugfs_schemes.sh19
-rw-r--r--tools/testing/selftests/damon/debugfs_target_ids.sh19
-rw-r--r--tools/testing/selftests/damon/huge_count_read_write.c39
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh30
-rw-r--r--tools/testing/selftests/kvm/.gitignore2
-rw-r--r--tools/testing/selftests/kvm/Makefile2
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h1
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c7
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c68
-rw-r--r--tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/userspace_io_test.c114
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c105
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c17
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/net/amt.sh0
-rwxr-xr-xtools/testing/selftests/net/fcnal-test.sh53
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh59
-rw-r--r--tools/testing/selftests/net/forwarding/forwarding.config.sample2
-rwxr-xr-xtools/testing/selftests/net/icmp_redirect.sh2
-rw-r--r--tools/testing/selftests/net/mptcp/config1
-rw-r--r--tools/testing/selftests/net/tls.c36
-rw-r--r--tools/testing/selftests/net/toeplitz.c2
-rwxr-xr-xtools/testing/selftests/net/udpgro_fwd.sh7
-rw-r--r--tools/testing/selftests/net/udpgso.c12
-rw-r--r--tools/testing/selftests/net/udpgso_bench_tx.c8
-rwxr-xr-xtools/testing/selftests/netfilter/conntrack_vrf.sh30
-rwxr-xr-xtools/testing/selftests/netfilter/nft_concat_range.sh24
-rwxr-xr-xtools/testing/selftests/netfilter/nft_zones_many.sh19
-rw-r--r--tools/testing/selftests/tc-testing/config2
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py8
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.sh1
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c16
858 files changed, 8118 insertions, 3668 deletions
diff --git a/.mailmap b/.mailmap
index 6277bb27b4bf..b344067e0acb 100644
--- a/.mailmap
+++ b/.mailmap
@@ -126,6 +126,8 @@ Greg Kroah-Hartman <gregkh@suse.de>
Greg Kroah-Hartman <greg@kroah.com>
Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
+Guo Ren <guoren@kernel.org> <guoren@linux.alibaba.com>
+Guo Ren <guoren@kernel.org> <ren_guo@c-sky.com>
Gustavo Padovan <gustavo@las.ic.unicamp.br>
Gustavo Padovan <padovan@profusion.mobi>
Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
diff --git a/Documentation/admin-guide/blockdev/drbd/figures.rst b/Documentation/admin-guide/blockdev/drbd/figures.rst
index bd9a4901fe46..9f73253ea353 100644
--- a/Documentation/admin-guide/blockdev/drbd/figures.rst
+++ b/Documentation/admin-guide/blockdev/drbd/figures.rst
@@ -25,6 +25,6 @@ Sub graphs of DRBD's state transitions
:alt: disk-states-8.dot
:align: center
-.. kernel-figure:: node-states-8.dot
- :alt: node-states-8.dot
+.. kernel-figure:: peer-states-8.dot
+ :alt: peer-states-8.dot
:align: center
diff --git a/Documentation/admin-guide/blockdev/drbd/node-states-8.dot b/Documentation/admin-guide/blockdev/drbd/peer-states-8.dot
index bfa54e1f8016..6dc3954954d6 100644
--- a/Documentation/admin-guide/blockdev/drbd/node-states-8.dot
+++ b/Documentation/admin-guide/blockdev/drbd/peer-states-8.dot
@@ -1,8 +1,3 @@
-digraph node_states {
- Secondary -> Primary [ label = "ioctl_set_state()" ]
- Primary -> Secondary [ label = "ioctl_set_state()" ]
-}
-
digraph peer_states {
Secondary -> Primary [ label = "recv state packet" ]
Primary -> Secondary [ label = "recv state packet" ]
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 9725c546a0d4..2fba82431efb 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1689,6 +1689,8 @@
architectures force reset to be always executed
i8042.unlock [HW] Unlock (ignore) the keylock
i8042.kbdreset [HW] Reset device connected to KBD port
+ i8042.probe_defer
+ [HW] Allow deferred probing upon i8042 probe errors
i810= [HW,DRM]
@@ -2413,8 +2415,12 @@
Default is 1 (enabled)
kvm-intel.emulate_invalid_guest_state=
- [KVM,Intel] Enable emulation of invalid guest states
- Default is 0 (disabled)
+ [KVM,Intel] Disable emulation of invalid guest state.
+ Ignored if kvm-intel.enable_unrestricted_guest=1, as
+ guest state is never invalid for unrestricted guests.
+ This param doesn't apply to nested guests (L2), as KVM
+ never emulates invalid L2 guest state.
+ Default is 1 (enabled)
kvm-intel.flexpriority=
[KVM,Intel] Disable FlexPriority feature (TPR shadow).
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 17f7cee56987..76e5eb5cb62b 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -249,11 +249,16 @@ except ImportError:
html_static_path = ['sphinx-static']
-html_context = {
- 'css_files': [
- '_static/theme_overrides.css',
- ],
-}
+html_css_files = [
+ 'theme_overrides.css',
+]
+
+if major <= 1 and minor < 8:
+ html_context = {
+ 'css_files': [
+ '_static/theme_overrides.css',
+ ],
+ }
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
diff --git a/Documentation/devicetree/bindings/i2c/apple,i2c.yaml b/Documentation/devicetree/bindings/i2c/apple,i2c.yaml
index 22fc8483256f..82b953181a52 100644
--- a/Documentation/devicetree/bindings/i2c/apple,i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/apple,i2c.yaml
@@ -20,9 +20,9 @@ allOf:
properties:
compatible:
- enum:
- - apple,t8103-i2c
- - apple,i2c
+ items:
+ - const: apple,t8103-i2c
+ - const: apple,i2c
reg:
maxItems: 1
@@ -51,7 +51,7 @@ unevaluatedProperties: false
examples:
- |
i2c@35010000 {
- compatible = "apple,t8103-i2c";
+ compatible = "apple,t8103-i2c", "apple,i2c";
reg = <0x35010000 0x4000>;
interrupt-parent = <&aic>;
interrupts = <0 627 4>;
diff --git a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml
index c65921e66dc1..81c87295912c 100644
--- a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml
@@ -136,7 +136,7 @@ examples:
samsung,syscon-phandle = <&pmu_system_controller>;
/* NTC thermistor is a hwmon device */
- ncp15wb473 {
+ thermistor {
compatible = "murata,ncp15wb473";
pullup-uv = <1800000>;
pullup-ohm = <47000>;
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.yaml b/Documentation/devicetree/bindings/input/gpio-keys.yaml
index 060a309ff8e7..dbe7ecc19ccb 100644
--- a/Documentation/devicetree/bindings/input/gpio-keys.yaml
+++ b/Documentation/devicetree/bindings/input/gpio-keys.yaml
@@ -142,7 +142,7 @@ examples:
down {
label = "GPIO Key DOWN";
linux,code = <108>;
- interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+ interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
};
};
diff --git a/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml b/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml
index 877183cf4278..1ef849dc74d7 100644
--- a/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml
+++ b/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml
@@ -79,6 +79,8 @@ properties:
properties:
data-lanes:
+ description:
+ Note that 'fsl,imx7-mipi-csi2' only supports up to 2 data lines.
items:
minItems: 1
maxItems: 4
@@ -91,18 +93,6 @@ properties:
required:
- data-lanes
- allOf:
- - if:
- properties:
- compatible:
- contains:
- const: fsl,imx7-mipi-csi2
- then:
- properties:
- data-lanes:
- items:
- maxItems: 2
-
port@1:
$ref: /schemas/graph.yaml#/properties/port
description:
diff --git a/Documentation/devicetree/bindings/net/ethernet-phy.yaml b/Documentation/devicetree/bindings/net/ethernet-phy.yaml
index 2766fe45bb98..ee42328a109d 100644
--- a/Documentation/devicetree/bindings/net/ethernet-phy.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-phy.yaml
@@ -91,6 +91,14 @@ properties:
compensate for the board being designed with the lanes
swapped.
+ enet-phy-lane-no-swap:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ If set, indicates that PHY will disable swap of the
+ TX/RX lanes. This property allows the PHY to work correcly after
+ e.g. wrong bootstrap configuration caused by issues in PCB
+ layout design.
+
eee-broken-100tx:
$ref: /schemas/types.yaml#/definitions/flag
description:
diff --git a/Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml b/Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml
index 04d5654efb38..79906519c652 100644
--- a/Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml
+++ b/Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml
@@ -29,7 +29,7 @@ properties:
- PHY_TYPE_PCIE
- PHY_TYPE_SATA
- PHY_TYPE_SGMII
- - PHY_TYPE_USB
+ - PHY_TYPE_USB3
- description: The PHY instance
minimum: 0
maximum: 1 # for DP, SATA or USB
diff --git a/Documentation/devicetree/bindings/power/supply/bq25980.yaml b/Documentation/devicetree/bindings/power/supply/bq25980.yaml
index 06eca6667f67..8367a1fd4057 100644
--- a/Documentation/devicetree/bindings/power/supply/bq25980.yaml
+++ b/Documentation/devicetree/bindings/power/supply/bq25980.yaml
@@ -105,7 +105,7 @@ examples:
reg = <0x65>;
interrupt-parent = <&gpio1>;
interrupts = <16 IRQ_TYPE_EDGE_FALLING>;
- ti,watchdog-timer = <0>;
+ ti,watchdog-timeout-ms = <0>;
ti,sc-ocp-limit-microamp = <2000000>;
ti,sc-ovp-limit-microvolt = <17800000>;
monitored-battery = <&bat>;
diff --git a/Documentation/devicetree/bindings/regulator/samsung,s5m8767.yaml b/Documentation/devicetree/bindings/regulator/samsung,s5m8767.yaml
index 80a63d47790a..c98929a213e9 100644
--- a/Documentation/devicetree/bindings/regulator/samsung,s5m8767.yaml
+++ b/Documentation/devicetree/bindings/regulator/samsung,s5m8767.yaml
@@ -51,6 +51,19 @@ patternProperties:
description:
Properties for single BUCK regulator.
+ properties:
+ op_mode:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+ default: 1
+ description: |
+ Describes the different operating modes of the regulator with power
+ mode change in SOC. The different possible values are:
+ 0 - always off mode
+ 1 - on in normal mode
+ 2 - low power mode
+ 3 - suspend mode
+
required:
- regulator-name
@@ -63,6 +76,18 @@ patternProperties:
Properties for single BUCK regulator.
properties:
+ op_mode:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+ default: 1
+ description: |
+ Describes the different operating modes of the regulator with power
+ mode change in SOC. The different possible values are:
+ 0 - always off mode
+ 1 - on in normal mode
+ 2 - low power mode
+ 3 - suspend mode
+
s5m8767,pmic-ext-control-gpios:
maxItems: 1
description: |
diff --git a/Documentation/devicetree/bindings/sound/wlf,wm8962.yaml b/Documentation/devicetree/bindings/sound/wlf,wm8962.yaml
index 0e6249d7c133..5e172e9462b9 100644
--- a/Documentation/devicetree/bindings/sound/wlf,wm8962.yaml
+++ b/Documentation/devicetree/bindings/sound/wlf,wm8962.yaml
@@ -19,6 +19,9 @@ properties:
clocks:
maxItems: 1
+ interrupts:
+ maxItems: 1
+
"#sound-dai-cells":
const: 0
diff --git a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
index 7f987e79337c..52a78a2e362e 100644
--- a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml
@@ -33,6 +33,7 @@ properties:
- rockchip,rk3328-spi
- rockchip,rk3368-spi
- rockchip,rk3399-spi
+ - rockchip,rk3568-spi
- rockchip,rv1126-spi
- const: rockchip,rk3066-spi
diff --git a/Documentation/i2c/summary.rst b/Documentation/i2c/summary.rst
index 136c4e333be7..786c618ba3be 100644
--- a/Documentation/i2c/summary.rst
+++ b/Documentation/i2c/summary.rst
@@ -11,9 +11,11 @@ systems. Some systems use variants that don't meet branding requirements,
and so are not advertised as being I2C but come under different names,
e.g. TWI (Two Wire Interface), IIC.
-The official I2C specification is the `"I2C-bus specification and user
-manual" (UM10204) <https://www.nxp.com/docs/en/user-guide/UM10204.pdf>`_
-published by NXP Semiconductors.
+The latest official I2C specification is the `"I2C-bus specification and user
+manual" (UM10204) <https://www.nxp.com/webapp/Download?colCode=UM10204>`_
+published by NXP Semiconductors. However, you need to log-in to the site to
+access the PDF. An older version of the specification (revision 6) is archived
+`here <https://web.archive.org/web/20210813122132/https://www.nxp.com/docs/en/user-guide/UM10204.pdf>`_.
SMBus (System Management Bus) is based on the I2C protocol, and is mostly
a subset of I2C protocols and signaling. Many I2C devices will work on an
diff --git a/Documentation/locking/locktypes.rst b/Documentation/locking/locktypes.rst
index ddada4a53749..4fd7b70fcde1 100644
--- a/Documentation/locking/locktypes.rst
+++ b/Documentation/locking/locktypes.rst
@@ -439,11 +439,9 @@ preemption. The following substitution works on both kernels::
spin_lock(&p->lock);
p->count += this_cpu_read(var2);
-On a non-PREEMPT_RT kernel migrate_disable() maps to preempt_disable()
-which makes the above code fully equivalent. On a PREEMPT_RT kernel
migrate_disable() ensures that the task is pinned on the current CPU which
in turn guarantees that the per-CPU access to var1 and var2 are staying on
-the same CPU.
+the same CPU while the task remains preemptible.
The migrate_disable() substitution is not valid for the following
scenario::
@@ -456,9 +454,8 @@ scenario::
p = this_cpu_ptr(&var1);
p->val = func2();
-While correct on a non-PREEMPT_RT kernel, this breaks on PREEMPT_RT because
-here migrate_disable() does not protect against reentrancy from a
-preempting task. A correct substitution for this case is::
+This breaks because migrate_disable() does not protect against reentrancy from
+a preempting task. A correct substitution for this case is::
func()
{
diff --git a/Documentation/networking/bonding.rst b/Documentation/networking/bonding.rst
index 31cfd7d674a6..c0a789b00806 100644
--- a/Documentation/networking/bonding.rst
+++ b/Documentation/networking/bonding.rst
@@ -196,11 +196,12 @@ ad_actor_sys_prio
ad_actor_system
In an AD system, this specifies the mac-address for the actor in
- protocol packet exchanges (LACPDUs). The value cannot be NULL or
- multicast. It is preferred to have the local-admin bit set for this
- mac but driver does not enforce it. If the value is not given then
- system defaults to using the masters' mac address as actors' system
- address.
+ protocol packet exchanges (LACPDUs). The value cannot be a multicast
+ address. If the all-zeroes MAC is specified, bonding will internally
+ use the MAC of the bond itself. It is preferred to have the
+ local-admin bit set for this mac but driver does not enforce it. If
+ the value is not given then system defaults to using the masters'
+ mac address as actors' system address.
This parameter has effect only in 802.3ad mode and is available through
SysFs interface.
diff --git a/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst b/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst
index d638b5a8aadd..199647729251 100644
--- a/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst
+++ b/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst
@@ -183,6 +183,7 @@ PHY and allows physical transmission and reception of Ethernet frames.
IRQ config, enable, reset
DPNI (Datapath Network Interface)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Contains TX/RX queues, network interface configuration, and RX buffer pool
configuration mechanisms. The TX/RX queues are in memory and are identified
by queue number.
diff --git a/Documentation/networking/device_drivers/ethernet/intel/ixgbe.rst b/Documentation/networking/device_drivers/ethernet/intel/ixgbe.rst
index f1d5233e5e51..0a233b17c664 100644
--- a/Documentation/networking/device_drivers/ethernet/intel/ixgbe.rst
+++ b/Documentation/networking/device_drivers/ethernet/intel/ixgbe.rst
@@ -440,6 +440,22 @@ NOTE: For 82599-based network connections, if you are enabling jumbo frames in
a virtual function (VF), jumbo frames must first be enabled in the physical
function (PF). The VF MTU setting cannot be larger than the PF MTU.
+NBASE-T Support
+---------------
+The ixgbe driver supports NBASE-T on some devices. However, the advertisement
+of NBASE-T speeds is suppressed by default, to accommodate broken network
+switches which cannot cope with advertised NBASE-T speeds. Use the ethtool
+command to enable advertising NBASE-T speeds on devices which support it::
+
+ ethtool -s eth? advertise 0x1800000001028
+
+On Linux systems with INTERFACES(5), this can be specified as a pre-up command
+in /etc/network/interfaces so that the interface is always brought up with
+NBASE-T support, e.g.::
+
+ iface eth? inet dhcp
+ pre-up ethtool -s eth? advertise 0x1800000001028 || true
+
Generic Receive Offload, aka GRO
--------------------------------
The driver supports the in-kernel software implementation of GRO. GRO has
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index c04431144f7a..2572eecc3e86 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -25,7 +25,8 @@ ip_default_ttl - INTEGER
ip_no_pmtu_disc - INTEGER
Disable Path MTU Discovery. If enabled in mode 1 and a
fragmentation-required ICMP is received, the PMTU to this
- destination will be set to min_pmtu (see below). You will need
+ destination will be set to the smallest of the old MTU to
+ this destination and min_pmtu (see below). You will need
to raise min_pmtu to the smallest interface MTU on your system
manually if you want to avoid locally generated fragments.
@@ -49,7 +50,8 @@ ip_no_pmtu_disc - INTEGER
Default: FALSE
min_pmtu - INTEGER
- default 552 - minimum discovered Path MTU
+ default 552 - minimum Path MTU. Unless this is changed mannually,
+ each cached pmtu will never be lower than this setting.
ip_forward_use_pmtu - BOOLEAN
By default we don't trust protocol path MTUs while forwarding
diff --git a/Documentation/networking/timestamping.rst b/Documentation/networking/timestamping.rst
index 80b13353254a..f5809206eb93 100644
--- a/Documentation/networking/timestamping.rst
+++ b/Documentation/networking/timestamping.rst
@@ -582,8 +582,8 @@ Time stamps for outgoing packets are to be generated as follows:
and hardware timestamping is not possible (SKBTX_IN_PROGRESS not set).
- As soon as the driver has sent the packet and/or obtained a
hardware time stamp for it, it passes the time stamp back by
- calling skb_hwtstamp_tx() with the original skb, the raw
- hardware time stamp. skb_hwtstamp_tx() clones the original skb and
+ calling skb_tstamp_tx() with the original skb, the raw
+ hardware time stamp. skb_tstamp_tx() clones the original skb and
adds the timestamps, therefore the original skb has to be freed now.
If obtaining the hardware time stamp somehow fails, then the driver
should not fall back to software time stamping. The rationale is that
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index b398b8576417..cf908d79666e 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -35,6 +35,7 @@ GNU make 3.81 make --version
binutils 2.23 ld -v
flex 2.5.35 flex --version
bison 2.0 bison --version
+pahole 1.16 pahole --version
util-linux 2.10o fdformat --version
kmod 13 depmod -V
e2fsprogs 1.41.4 e2fsck -V
@@ -108,6 +109,16 @@ Bison
Since Linux 4.16, the build system generates parsers
during build. This requires bison 2.0 or later.
+pahole:
+-------
+
+Since Linux 5.2, if CONFIG_DEBUG_INFO_BTF is selected, the build system
+generates BTF (BPF Type Format) from DWARF in vmlinux, a bit later from kernel
+modules as well. This requires pahole v1.16 or later.
+
+It is found in the 'dwarves' or 'pahole' distro packages or from
+https://fedorapeople.org/~acme/dwarves/.
+
Perl
----
diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst
index da085d63af9b..6b3aaed66fba 100644
--- a/Documentation/process/submitting-patches.rst
+++ b/Documentation/process/submitting-patches.rst
@@ -14,7 +14,8 @@ works, see Documentation/process/development-process.rst. Also, read
Documentation/process/submit-checklist.rst
for a list of items to check before submitting code. If you are submitting
a driver, also read Documentation/process/submitting-drivers.rst; for device
-tree binding patches, read Documentation/process/submitting-patches.rst.
+tree binding patches, read
+Documentation/devicetree/bindings/submitting-patches.rst.
This documentation assumes that you're using ``git`` to prepare your patches.
If you're unfamiliar with ``git``, you would be well-advised to learn how to
diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst
index 0ea967d34583..d25335993e55 100644
--- a/Documentation/sound/hd-audio/models.rst
+++ b/Documentation/sound/hd-audio/models.rst
@@ -326,6 +326,8 @@ usi-headset
Headset support on USI machines
dual-codecs
Lenovo laptops with dual codecs
+alc285-hp-amp-init
+ HP laptops which require speaker amplifier initialization (ALC285)
ALC680
======
diff --git a/MAINTAINERS b/MAINTAINERS
index 43007f2d29e0..dd36acc87ce6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3066,7 +3066,7 @@ F: Documentation/devicetree/bindings/phy/phy-ath79-usb.txt
F: drivers/phy/qualcomm/phy-ath79-usb.c
ATHEROS ATH GENERIC UTILITIES
-M: Kalle Valo <kvalo@codeaurora.org>
+M: Kalle Valo <kvalo@kernel.org>
L: linux-wireless@vger.kernel.org
S: Supported
F: drivers/net/wireless/ath/*
@@ -3081,7 +3081,7 @@ W: https://wireless.wiki.kernel.org/en/users/Drivers/ath5k
F: drivers/net/wireless/ath/ath5k/
ATHEROS ATH6KL WIRELESS DRIVER
-M: Kalle Valo <kvalo@codeaurora.org>
+M: Kalle Valo <kvalo@kernel.org>
L: linux-wireless@vger.kernel.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath6kl
@@ -3769,7 +3769,8 @@ S: Supported
F: drivers/net/wireless/broadcom/brcm80211/
BROADCOM BRCMSTB GPIO DRIVER
-M: Gregory Fong <gregory.0xf0@gmail.com>
+M: Doug Berger <opendmb@gmail.com>
+M: Florian Fainelli <f.fainelli@gmail.com>
L: bcm-kernel-feedback-list@broadcom.com
S: Supported
F: Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
@@ -9329,7 +9330,6 @@ S: Maintained
F: drivers/iio/pressure/dps310.c
INFINIBAND SUBSYSTEM
-M: Doug Ledford <dledford@redhat.com>
M: Jason Gunthorpe <jgg@nvidia.com>
L: linux-rdma@vger.kernel.org
S: Supported
@@ -10280,9 +10280,9 @@ F: lib/Kconfig.kcsan
F: scripts/Makefile.kcsan
KDUMP
-M: Dave Young <dyoung@redhat.com>
M: Baoquan He <bhe@redhat.com>
R: Vivek Goyal <vgoyal@redhat.com>
+R: Dave Young <dyoung@redhat.com>
L: kexec@lists.infradead.org
S: Maintained
W: http://lse.sourceforge.net/kdump/
@@ -12180,8 +12180,8 @@ F: drivers/net/ethernet/mellanox/mlx5/core/fpga/*
F: include/linux/mlx5/mlx5_ifc_fpga.h
MELLANOX ETHERNET SWITCH DRIVERS
-M: Jiri Pirko <jiri@nvidia.com>
M: Ido Schimmel <idosch@nvidia.com>
+M: Petr Machata <petrm@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com
@@ -13249,7 +13249,7 @@ F: include/uapi/linux/if_*
F: include/uapi/linux/netdevice.h
NETWORKING DRIVERS (WIRELESS)
-M: Kalle Valo <kvalo@codeaurora.org>
+M: Kalle Valo <kvalo@kernel.org>
L: linux-wireless@vger.kernel.org
S: Maintained
Q: http://patchwork.kernel.org/project/linux-wireless/list/
@@ -14846,7 +14846,7 @@ PCIE DRIVER FOR MEDIATEK
M: Ryder Lee <ryder.lee@mediatek.com>
M: Jianjun Wang <jianjun.wang@mediatek.com>
L: linux-pci@vger.kernel.org
-L: linux-mediatek@lists.infradead.org
+L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: Documentation/devicetree/bindings/pci/mediatek*
F: drivers/pci/controller/*mediatek*
@@ -15705,7 +15705,7 @@ T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/tuners/qt1010*
QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
-M: Kalle Valo <kvalo@codeaurora.org>
+M: Kalle Valo <kvalo@kernel.org>
L: ath10k@lists.infradead.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath10k
@@ -15713,7 +15713,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: drivers/net/wireless/ath/ath10k/
QUALCOMM ATHEROS ATH11K WIRELESS DRIVER
-M: Kalle Valo <kvalo@codeaurora.org>
+M: Kalle Valo <kvalo@kernel.org>
L: ath11k@lists.infradead.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
@@ -15771,6 +15771,15 @@ S: Maintained
F: Documentation/devicetree/bindings/net/qcom,ethqos.txt
F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+QUALCOMM FASTRPC DRIVER
+M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+M: Amol Maheshwari <amahesh@qti.qualcomm.com>
+L: linux-arm-msm@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/misc/qcom,fastrpc.txt
+F: drivers/misc/fastrpc.c
+F: include/uapi/misc/fastrpc.h
+
QUALCOMM GENERIC INTERFACE I2C DRIVER
M: Akash Asthana <akashast@codeaurora.org>
M: Mukesh Savaliya <msavaliy@codeaurora.org>
@@ -15877,7 +15886,7 @@ F: Documentation/devicetree/bindings/media/*venus*
F: drivers/media/platform/qcom/venus/
QUALCOMM WCN36XX WIRELESS DRIVER
-M: Kalle Valo <kvalo@codeaurora.org>
+M: Kalle Valo <kvalo@kernel.org>
L: wcn36xx@lists.infradead.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx
@@ -16629,7 +16638,6 @@ W: http://www.ibm.com/developerworks/linux/linux390/
F: drivers/iommu/s390-iommu.c
S390 IUCV NETWORK LAYER
-M: Julian Wiedmann <jwi@linux.ibm.com>
M: Alexandra Winter <wintera@linux.ibm.com>
M: Wenjia Zhang <wenjia@linux.ibm.com>
L: linux-s390@vger.kernel.org
@@ -16641,7 +16649,6 @@ F: include/net/iucv/
F: net/iucv/
S390 NETWORK DRIVERS
-M: Julian Wiedmann <jwi@linux.ibm.com>
M: Alexandra Winter <wintera@linux.ibm.com>
M: Wenjia Zhang <wenjia@linux.ibm.com>
L: linux-s390@vger.kernel.org
@@ -17417,7 +17424,7 @@ F: drivers/video/fbdev/sm712*
SILVACO I3C DUAL-ROLE MASTER
M: Miquel Raynal <miquel.raynal@bootlin.com>
M: Conor Culhane <conor.culhane@silvaco.com>
-L: linux-i3c@lists.infradead.org
+L: linux-i3c@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/i3c/silvaco,i3c-master.yaml
F: drivers/i3c/master/svc-i3c-master.c
@@ -21053,7 +21060,7 @@ S: Maintained
F: arch/x86/kernel/cpu/zhaoxin.c
ZONEFS FILESYSTEM
-M: Damien Le Moal <damien.lemoal@wdc.com>
+M: Damien Le Moal <damien.lemoal@opensource.wdc.com>
M: Naohiro Aota <naohiro.aota@wdc.com>
R: Johannes Thumshirn <jth@kernel.org>
L: linux-fsdevel@vger.kernel.org
diff --git a/Makefile b/Makefile
index 8e35d7804fef..16d7f83ac368 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 16
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc8
NAME = Gobble Gobble
# *DOCUMENTATION*
@@ -789,7 +789,7 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong
KBUILD_CFLAGS += $(stackp-flags-y)
KBUILD_CFLAGS-$(CONFIG_WERROR) += -Werror
-KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH)
+KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH:"%"=%)
ifdef CONFIG_CC_IS_CLANG
KBUILD_CPPFLAGS += -Qunused-arguments
@@ -1374,17 +1374,17 @@ endif
ifneq ($(dtstree),)
-%.dtb: dt_binding_check include/config/kernel.release scripts_dtc
- $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@ $(dtstree)/$*.dt.yaml
+%.dtb: include/config/kernel.release scripts_dtc
+ $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@
-%.dtbo: dt_binding_check include/config/kernel.release scripts_dtc
- $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@ $(dtstree)/$*.dt.yaml
+%.dtbo: include/config/kernel.release scripts_dtc
+ $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@
PHONY += dtbs dtbs_install dtbs_check
dtbs: include/config/kernel.release scripts_dtc
$(Q)$(MAKE) $(build)=$(dtstree)
-ifneq ($(filter dtbs_check %.dtb %.dtbo, $(MAKECMDGOALS)),)
+ifneq ($(filter dtbs_check, $(MAKECMDGOALS)),)
export CHECK_DTBS=y
dtbs: dt_binding_check
endif
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
index b62a0dbb033f..ec6fba5ee8fd 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
@@ -309,6 +309,7 @@
ethphy: ethernet-phy@1 {
reg = <1>;
+ qca,clk-out-frequency = <125000000>;
};
};
};
diff --git a/arch/arm/boot/dts/imx6qp-prtwd3.dts b/arch/arm/boot/dts/imx6qp-prtwd3.dts
index 7648e8a02000..cf6571cc4682 100644
--- a/arch/arm/boot/dts/imx6qp-prtwd3.dts
+++ b/arch/arm/boot/dts/imx6qp-prtwd3.dts
@@ -178,6 +178,8 @@
label = "cpu";
ethernet = <&fec>;
phy-mode = "rgmii-id";
+ rx-internal-delay-ps = <2000>;
+ tx-internal-delay-ps = <2000>;
fixed-link {
speed = <100>;
diff --git a/arch/arm/boot/dts/imx6ull-pinfunc.h b/arch/arm/boot/dts/imx6ull-pinfunc.h
index eb025a9d4759..7328d4ef8559 100644
--- a/arch/arm/boot/dts/imx6ull-pinfunc.h
+++ b/arch/arm/boot/dts/imx6ull-pinfunc.h
@@ -82,6 +82,6 @@
#define MX6ULL_PAD_CSI_DATA04__ESAI_TX_FS 0x01F4 0x0480 0x0000 0x9 0x0
#define MX6ULL_PAD_CSI_DATA05__ESAI_TX_CLK 0x01F8 0x0484 0x0000 0x9 0x0
#define MX6ULL_PAD_CSI_DATA06__ESAI_TX5_RX0 0x01FC 0x0488 0x0000 0x9 0x0
-#define MX6ULL_PAD_CSI_DATA07__ESAI_T0 0x0200 0x048C 0x0000 0x9 0x0
+#define MX6ULL_PAD_CSI_DATA07__ESAI_TX0 0x0200 0x048C 0x0000 0x9 0x0
#endif /* __DTS_IMX6ULL_PINFUNC_H */
diff --git a/arch/arm/boot/dts/ls1021a-tsn.dts b/arch/arm/boot/dts/ls1021a-tsn.dts
index ff0ffb22768b..1ea32fff4120 100644
--- a/arch/arm/boot/dts/ls1021a-tsn.dts
+++ b/arch/arm/boot/dts/ls1021a-tsn.dts
@@ -91,6 +91,8 @@
/* Internal port connected to eth2 */
ethernet = <&enet2>;
phy-mode = "rgmii";
+ rx-internal-delay-ps = <0>;
+ tx-internal-delay-ps = <0>;
reg = <4>;
fixed-link {
diff --git a/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts b/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts
index 2b645642b935..2a745522404d 100644
--- a/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts
+++ b/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts
@@ -12,7 +12,7 @@
flash0: n25q00@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "n25q00aa";
+ compatible = "micron,mt25qu02g", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <100000000>;
diff --git a/arch/arm/boot/dts/socfpga_arria5_socdk.dts b/arch/arm/boot/dts/socfpga_arria5_socdk.dts
index 90e676e7019f..1b02d46496a8 100644
--- a/arch/arm/boot/dts/socfpga_arria5_socdk.dts
+++ b/arch/arm/boot/dts/socfpga_arria5_socdk.dts
@@ -119,7 +119,7 @@
flash: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "n25q256a";
+ compatible = "micron,n25q256a", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <100000000>;
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
index 6f138b2b2616..51bb436784e2 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
@@ -124,7 +124,7 @@
flash0: n25q00@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "n25q00";
+ compatible = "micron,mt25qu02g", "jedec,spi-nor";
reg = <0>; /* chip select */
spi-max-frequency = <100000000>;
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
index c155ff02eb6e..cae9ddd5ed38 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
@@ -169,7 +169,7 @@
flash: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "n25q00";
+ compatible = "micron,mt25qu02g", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <100000000>;
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts b/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
index 8d5d3996f6f2..ca18b959e655 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
@@ -80,7 +80,7 @@
flash: flash@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "n25q256a";
+ compatible = "micron,n25q256a", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <100000000>;
m25p,fast-read;
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts b/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts
index 99a71757cdf4..3f7aa7bf0863 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts
@@ -116,7 +116,7 @@
flash0: n25q512a@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "n25q512a";
+ compatible = "micron,n25q512a", "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <100000000>;
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
index a060718758b6..25874e1b9c82 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
@@ -224,7 +224,7 @@
n25q128@0 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "n25q128";
+ compatible = "micron,n25q128", "jedec,spi-nor";
reg = <0>; /* chip select */
spi-max-frequency = <100000000>;
m25p,fast-read;
@@ -241,7 +241,7 @@
n25q00@1 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "n25q00";
+ compatible = "micron,mt25qu02g", "jedec,spi-nor";
reg = <1>; /* chip select */
spi-max-frequency = <100000000>;
m25p,fast-read;
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
index a6f3b179e8a9..27218eabbf9a 100644
--- a/arch/arm/include/asm/efi.h
+++ b/arch/arm/include/asm/efi.h
@@ -17,7 +17,6 @@
#ifdef CONFIG_EFI
void efi_init(void);
-extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index deff286eb5ea..5cd057859fe9 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -596,11 +596,9 @@ call_fpe:
tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
reteq lr
and r8, r0, #0x00000f00 @ mask out CP number
- THUMB( lsr r8, r8, #8 )
mov r7, #1
- add r6, r10, #TI_USED_CP
- ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
- THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
+ add r6, r10, r8, lsr #8 @ add used_cp[] array offset first
+ strb r7, [r6, #TI_USED_CP] @ set appropriate used_cp[]
#ifdef CONFIG_IWMMXT
@ Test if we need to give access to iWMMXt coprocessors
ldr r5, [r10, #TI_FLAGS]
@@ -609,7 +607,7 @@ call_fpe:
bcs iwmmxt_task_enable
#endif
ARM( add pc, pc, r8, lsr #6 )
- THUMB( lsl r8, r8, #2 )
+ THUMB( lsr r8, r8, #6 )
THUMB( add pc, r8 )
nop
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index fadfee9e2b45..950bef83339f 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -114,6 +114,7 @@ ENTRY(secondary_startup)
add r12, r12, r10
ret r12
1: bl __after_proc_init
+ ldr r7, __secondary_data @ reload r7
ldr sp, [r7, #12] @ set up the stack pointer
ldr r0, [r7, #16] @ set up task pointer
mov fp, #0
diff --git a/arch/arm/mach-rockchip/platsmp.c b/arch/arm/mach-rockchip/platsmp.c
index d60856898d97..5ec58d004b7d 100644
--- a/arch/arm/mach-rockchip/platsmp.c
+++ b/arch/arm/mach-rockchip/platsmp.c
@@ -189,7 +189,7 @@ static int __init rockchip_smp_prepare_sram(struct device_node *node)
rockchip_boot_fn = __pa_symbol(secondary_startup);
/* copy the trampoline to sram, that runs during startup of the core */
- memcpy(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz);
+ memcpy_toio(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz);
flush_cache_all();
outer_clean_range(0, trampoline_sz);
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 1aa8b7073218..54e3910e8b9b 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -161,7 +161,6 @@ config ARCH_MEDIATEK
config ARCH_MESON
bool "Amlogic Platforms"
- select COMMON_CLK
help
This enables support for the arm64 based Amlogic SoCs
such as the s905, S905X/D, S912, A113X/D or S905X/D2
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
index d13980ed7a79..7ec5ac850a0d 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus.dts
@@ -69,7 +69,7 @@
pinctrl-0 = <&emac_rgmii_pins>;
phy-supply = <&reg_gmac_3v3>;
phy-handle = <&ext_rgmii_phy>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg-jethome-jethub-j100.dts b/arch/arm64/boot/dts/amlogic/meson-axg-jethome-jethub-j100.dts
index 52ebe371df26..561eec21b4de 100644
--- a/arch/arm64/boot/dts/amlogic/meson-axg-jethome-jethub-j100.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-axg-jethome-jethub-j100.dts
@@ -134,23 +134,23 @@
type = "critical";
};
};
- };
- cpu_cooling_maps: cooling-maps {
- map0 {
- trip = <&cpu_passive>;
- cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
- };
+ cpu_cooling_maps: cooling-maps {
+ map0 {
+ trip = <&cpu_passive>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
- map1 {
- trip = <&cpu_hot>;
- cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
- <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ map1 {
+ trip = <&cpu_hot>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
};
};
};
diff --git a/arch/arm64/boot/dts/apple/t8103-j274.dts b/arch/arm64/boot/dts/apple/t8103-j274.dts
index 33a80f9501dc..02c36301e985 100644
--- a/arch/arm64/boot/dts/apple/t8103-j274.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j274.dts
@@ -60,7 +60,7 @@
&port02 {
bus-range = <3 3>;
- ethernet0: pci@0,0 {
+ ethernet0: ethernet@0,0 {
reg = <0x30000 0x0 0x0 0x0 0x0>;
/* To be filled by the loader */
local-mac-address = [00 10 18 00 00 00];
diff --git a/arch/arm64/boot/dts/apple/t8103.dtsi b/arch/arm64/boot/dts/apple/t8103.dtsi
index fc8b2bb06ffe..8b61e7fd3e9c 100644
--- a/arch/arm64/boot/dts/apple/t8103.dtsi
+++ b/arch/arm64/boot/dts/apple/t8103.dtsi
@@ -7,6 +7,7 @@
* Copyright The Asahi Linux Contributors
*/
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/apple-aic.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/pinctrl/apple.h>
@@ -143,6 +144,7 @@
apple,npins = <212>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupt-parent = <&aic>;
interrupts = <AIC_IRQ 190 IRQ_TYPE_LEVEL_HIGH>,
<AIC_IRQ 191 IRQ_TYPE_LEVEL_HIGH>,
@@ -169,6 +171,7 @@
apple,npins = <42>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupt-parent = <&aic>;
interrupts = <AIC_IRQ 268 IRQ_TYPE_LEVEL_HIGH>,
<AIC_IRQ 269 IRQ_TYPE_LEVEL_HIGH>,
@@ -189,6 +192,7 @@
apple,npins = <23>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupt-parent = <&aic>;
interrupts = <AIC_IRQ 330 IRQ_TYPE_LEVEL_HIGH>,
<AIC_IRQ 331 IRQ_TYPE_LEVEL_HIGH>,
@@ -209,6 +213,7 @@
apple,npins = <16>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupt-parent = <&aic>;
interrupts = <AIC_IRQ 391 IRQ_TYPE_LEVEL_HIGH>,
<AIC_IRQ 392 IRQ_TYPE_LEVEL_HIGH>,
@@ -281,7 +286,7 @@
port00: pci@0,0 {
device_type = "pci";
reg = <0x0 0x0 0x0 0x0 0x0>;
- reset-gpios = <&pinctrl_ap 152 0>;
+ reset-gpios = <&pinctrl_ap 152 GPIO_ACTIVE_LOW>;
max-link-speed = <2>;
#address-cells = <3>;
@@ -301,7 +306,7 @@
port01: pci@1,0 {
device_type = "pci";
reg = <0x800 0x0 0x0 0x0 0x0>;
- reset-gpios = <&pinctrl_ap 153 0>;
+ reset-gpios = <&pinctrl_ap 153 GPIO_ACTIVE_LOW>;
max-link-speed = <2>;
#address-cells = <3>;
@@ -321,7 +326,7 @@
port02: pci@2,0 {
device_type = "pci";
reg = <0x1000 0x0 0x0 0x0 0x0>;
- reset-gpios = <&pinctrl_ap 33 0>;
+ reset-gpios = <&pinctrl_ap 33 GPIO_ACTIVE_LOW>;
max-link-speed = <1>;
#address-cells = <3>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts b/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts
index 3063851c2fb9..d3f03dcbb8c3 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts
@@ -38,7 +38,6 @@
powerdn {
label = "External Power Down";
gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
- interrupts = <&gpio1 17 IRQ_TYPE_EDGE_FALLING>;
linux,code = <KEY_POWER>;
};
@@ -46,7 +45,6 @@
admin {
label = "ADMIN button";
gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>;
- interrupts = <&gpio3 8 IRQ_TYPE_EDGE_RISING>;
linux,code = <KEY_WPS_BUTTON>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-bluebox3.dts b/arch/arm64/boot/dts/freescale/fsl-lx2160a-bluebox3.dts
index b21be03da0af..042c486bdda2 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-bluebox3.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-bluebox3.dts
@@ -386,6 +386,8 @@
reg = <2>;
ethernet = <&dpmac17>;
phy-mode = "rgmii-id";
+ rx-internal-delay-ps = <2000>;
+ tx-internal-delay-ps = <2000>;
fixed-link {
speed = <1000>;
@@ -529,6 +531,8 @@
reg = <2>;
ethernet = <&dpmac18>;
phy-mode = "rgmii-id";
+ rx-internal-delay-ps = <2000>;
+ tx-internal-delay-ps = <2000>;
fixed-link {
speed = <1000>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
index dc8661ebd1f6..2433e6f2eda8 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
@@ -719,7 +719,7 @@
clock-names = "i2c";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(16)>;
- scl-gpio = <&gpio2 15 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>;
status = "disabled";
};
@@ -768,7 +768,7 @@
clock-names = "i2c";
clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL
QORIQ_CLK_PLL_DIV(16)>;
- scl-gpio = <&gpio2 16 GPIO_ACTIVE_HIGH>;
+ scl-gpios = <&gpio2 16 GPIO_ACTIVE_HIGH>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index 972766b67a15..71bf497f99c2 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -524,8 +524,6 @@
<&clk IMX8MQ_VIDEO_PLL1>,
<&clk IMX8MQ_VIDEO_PLL1_OUT>;
assigned-clock-rates = <0>, <0>, <0>, <594000000>;
- interconnects = <&noc IMX8MQ_ICM_LCDIF &noc IMX8MQ_ICS_DRAM>;
- interconnect-names = "dram";
status = "disabled";
port@0 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
index 665b2e69455d..ea6820902ede 100644
--- a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
@@ -97,7 +97,7 @@
regulator-max-microvolt = <3300000>;
regulator-always-on;
regulator-boot-on;
- vim-supply = <&vcc_io>;
+ vin-supply = <&vcc_io>;
};
vdd_core: vdd-core {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
index d5c7648c841d..f1fcc6b5b402 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
@@ -705,7 +705,6 @@
&sdhci {
bus-width = <8>;
mmc-hs400-1_8v;
- mmc-hs400-enhanced-strobe;
non-removable;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
index 63c7681843da..b6ac00f64613 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
@@ -276,6 +276,7 @@
clock-output-names = "xin32k", "rk808-clkout2";
pinctrl-names = "default";
pinctrl-0 = <&pmic_int_l>;
+ rockchip,system-power-controller;
vcc1-supply = <&vcc5v0_sys>;
vcc2-supply = <&vcc5v0_sys>;
vcc3-supply = <&vcc5v0_sys>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts b/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts
index 7c93f840bc64..e890166e7fd4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts
@@ -55,7 +55,7 @@
regulator-boot-on;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- vim-supply = <&vcc3v3_sys>;
+ vin-supply = <&vcc3v3_sys>;
};
vcc3v3_sys: vcc3v3-sys {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
index 98136c88fa49..6a434be62819 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
@@ -502,7 +502,7 @@
status = "okay";
bt656-supply = <&vcc_3v0>;
- audio-supply = <&vcc_3v0>;
+ audio-supply = <&vcc1v8_codec>;
sdmmc-supply = <&vcc_sdio>;
gpio1830-supply = <&vcc_3v0>;
};
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index d3e1825337be..ad55079abe47 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -14,7 +14,6 @@
#ifdef CONFIG_EFI
extern void efi_init(void);
-extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
#else
#define efi_init()
#endif
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index 63634b4d72c1..59c648d51848 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -149,6 +149,7 @@ int load_other_segments(struct kimage *image,
initrd_len, cmdline, 0);
if (!dtb) {
pr_err("Preparing for new dtb failed\n");
+ ret = -EINVAL;
goto out_err;
}
diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c
index e5fbf8653a21..2020af88b636 100644
--- a/arch/csky/kernel/traps.c
+++ b/arch/csky/kernel/traps.c
@@ -209,7 +209,7 @@ asmlinkage void do_trap_illinsn(struct pt_regs *regs)
asmlinkage void do_trap_fpe(struct pt_regs *regs)
{
-#ifdef CONFIG_CPU_HAS_FP
+#ifdef CONFIG_CPU_HAS_FPU
return fpu_fpe(regs);
#else
do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->pc,
@@ -219,7 +219,7 @@ asmlinkage void do_trap_fpe(struct pt_regs *regs)
asmlinkage void do_trap_priv(struct pt_regs *regs)
{
-#ifdef CONFIG_CPU_HAS_FP
+#ifdef CONFIG_CPU_HAS_FPU
if (user_mode(regs) && fpu_libc_helper(regs))
return;
#endif
diff --git a/arch/mips/include/asm/mach-ralink/spaces.h b/arch/mips/include/asm/mach-ralink/spaces.h
index 05d14c21c417..f7af11ea2d61 100644
--- a/arch/mips/include/asm/mach-ralink/spaces.h
+++ b/arch/mips/include/asm/mach-ralink/spaces.h
@@ -6,5 +6,7 @@
#define PCI_IOSIZE SZ_64K
#define IO_SPACE_LIMIT (PCI_IOSIZE - 1)
+#define pci_remap_iospace pci_remap_iospace
+
#include <asm/mach-generic/spaces.h>
#endif
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 421231f55935..9ffc8192adae 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -20,10 +20,6 @@
#include <linux/list.h>
#include <linux/of.h>
-#ifdef CONFIG_PCI_DRIVERS_GENERIC
-#define pci_remap_iospace pci_remap_iospace
-#endif
-
#ifdef CONFIG_PCI_DRIVERS_LEGACY
/*
diff --git a/arch/mips/net/bpf_jit_comp.h b/arch/mips/net/bpf_jit_comp.h
index 6f3a7b07294b..a37fe20818eb 100644
--- a/arch/mips/net/bpf_jit_comp.h
+++ b/arch/mips/net/bpf_jit_comp.h
@@ -98,7 +98,7 @@ do { \
#define emit(...) __emit(__VA_ARGS__)
/* Workaround for R10000 ll/sc errata */
-#ifdef CONFIG_WAR_R10000
+#ifdef CONFIG_WAR_R10000_LLSC
#define LLSC_beqz beqzl
#else
#define LLSC_beqz beqz
diff --git a/arch/mips/pci/pci-generic.c b/arch/mips/pci/pci-generic.c
index 18eb8a453a86..d2d68bac3d25 100644
--- a/arch/mips/pci/pci-generic.c
+++ b/arch/mips/pci/pci-generic.c
@@ -47,6 +47,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
pci_read_bridge_bases(bus);
}
+#ifdef pci_remap_iospace
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
{
unsigned long vaddr;
@@ -60,3 +61,4 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
set_io_port_base(vaddr);
return 0;
}
+#endif
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index b2188da09c73..011dc32fdb4d 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -85,11 +85,6 @@ config MMU
config STACK_GROWSUP
def_bool y
-config ARCH_DEFCONFIG
- string
- default "arch/parisc/configs/generic-32bit_defconfig" if !64BIT
- default "arch/parisc/configs/generic-64bit_defconfig" if 64BIT
-
config GENERIC_LOCKBREAK
bool
default y
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h
index 70cf8f0a7617..9cd4dd6e63ad 100644
--- a/arch/parisc/include/asm/futex.h
+++ b/arch/parisc/include/asm/futex.h
@@ -14,7 +14,7 @@ static inline void
_futex_spin_lock(u32 __user *uaddr)
{
extern u32 lws_lock_start[];
- long index = ((long)uaddr & 0x3f8) >> 1;
+ long index = ((long)uaddr & 0x7f8) >> 1;
arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
preempt_disable();
arch_spin_lock(s);
@@ -24,7 +24,7 @@ static inline void
_futex_spin_unlock(u32 __user *uaddr)
{
extern u32 lws_lock_start[];
- long index = ((long)uaddr & 0x3f8) >> 1;
+ long index = ((long)uaddr & 0x7f8) >> 1;
arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
arch_spin_unlock(s);
preempt_enable();
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index d2497b339d13..65c88ca7a7ac 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -472,7 +472,7 @@ lws_start:
extrd,u %r1,PSW_W_BIT,1,%r1
/* sp must be aligned on 4, so deposit the W bit setting into
* the bottom of sp temporarily */
- or,ev %r1,%r30,%r30
+ or,od %r1,%r30,%r30
/* Clip LWS number to a 32-bit value for 32-bit processes */
depdi 0, 31, 32, %r20
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index b11fb26ce299..892b7fc8f3c4 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -730,6 +730,8 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
}
mmap_read_unlock(current->mm);
}
+ /* CPU could not fetch instruction, so clear stale IIR value. */
+ regs->iir = 0xbaadf00d;
fallthrough;
case 27:
/* Data memory protection ID trap */
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 6baa676e7cb6..5d77d3f5fbb5 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -422,11 +422,17 @@ static inline int create_stub(const Elf64_Shdr *sechdrs,
const char *name)
{
long reladdr;
+ func_desc_t desc;
+ int i;
if (is_mprofile_ftrace_call(name))
return create_ftrace_stub(entry, addr, me);
- memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns));
+ for (i = 0; i < sizeof(ppc64_stub_insns) / sizeof(u32); i++) {
+ if (patch_instruction(&entry->jump[i],
+ ppc_inst(ppc64_stub_insns[i])))
+ return 0;
+ }
/* Stub uses address relative to r2. */
reladdr = (unsigned long)entry - my_r2(sechdrs, me);
@@ -437,10 +443,24 @@ static inline int create_stub(const Elf64_Shdr *sechdrs,
}
pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr);
- entry->jump[0] |= PPC_HA(reladdr);
- entry->jump[1] |= PPC_LO(reladdr);
- entry->funcdata = func_desc(addr);
- entry->magic = STUB_MAGIC;
+ if (patch_instruction(&entry->jump[0],
+ ppc_inst(entry->jump[0] | PPC_HA(reladdr))))
+ return 0;
+
+ if (patch_instruction(&entry->jump[1],
+ ppc_inst(entry->jump[1] | PPC_LO(reladdr))))
+ return 0;
+
+ // func_desc_t is 8 bytes if ABIv2, else 16 bytes
+ desc = func_desc(addr);
+ for (i = 0; i < sizeof(func_desc_t) / sizeof(u32); i++) {
+ if (patch_instruction(((u32 *)&entry->funcdata) + i,
+ ppc_inst(((u32 *)(&desc))[i])))
+ return 0;
+ }
+
+ if (patch_instruction(&entry->magic, ppc_inst(STUB_MAGIC)))
+ return 0;
return 1;
}
@@ -495,8 +515,11 @@ static int restore_r2(const char *name, u32 *instruction, struct module *me)
me->name, *instruction, instruction);
return 0;
}
+
/* ld r2,R2_STACK_OFFSET(r1) */
- *instruction = PPC_INST_LD_TOC;
+ if (patch_instruction(instruction, ppc_inst(PPC_INST_LD_TOC)))
+ return 0;
+
return 1;
}
@@ -636,9 +659,12 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
}
/* Only replace bits 2 through 26 */
- *(uint32_t *)location
- = (*(uint32_t *)location & ~0x03fffffc)
+ value = (*(uint32_t *)location & ~0x03fffffc)
| (value & 0x03fffffc);
+
+ if (patch_instruction((u32 *)location, ppc_inst(value)))
+ return -EFAULT;
+
break;
case R_PPC64_REL64:
diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
index bf251191e78d..32bfb215c485 100644
--- a/arch/powerpc/mm/ptdump/ptdump.c
+++ b/arch/powerpc/mm/ptdump/ptdump.c
@@ -183,7 +183,7 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
{
pte_t pte = __pte(st->current_flags);
- if (!IS_ENABLED(CONFIG_PPC_DEBUG_WX) || !st->check_wx)
+ if (!IS_ENABLED(CONFIG_DEBUG_WX) || !st->check_wx)
return;
if (!pte_write(pte) || !pte_exec(pte))
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 83f4a6389a28..d7081e9af65c 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -220,7 +220,7 @@ static int smp_85xx_start_cpu(int cpu)
local_irq_save(flags);
hard_irq_disable();
- if (qoriq_pm_ops)
+ if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare)
qoriq_pm_ops->cpu_up_prepare(cpu);
/* if cpu is not spinning, reset it */
@@ -292,7 +292,7 @@ static int smp_85xx_kick_cpu(int nr)
booting_thread_hwid = cpu_thread_in_core(nr);
primary = cpu_first_thread_sibling(nr);
- if (qoriq_pm_ops)
+ if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare)
qoriq_pm_ops->cpu_up_prepare(nr);
/*
diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
index ba304d4c455c..ced0d4e47938 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
@@ -76,6 +76,7 @@
spi-max-frequency = <20000000>;
voltage-ranges = <3300 3300>;
disable-wp;
+ gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
index 4f66919215f6..6bfa1f24d3de 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
@@ -2,6 +2,7 @@
/* Copyright (c) 2020 SiFive, Inc */
#include "fu740-c000.dtsi"
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
/* Clock frequency (in Hz) of the PCB crystal for rtcclk */
@@ -54,10 +55,21 @@
temperature-sensor@4c {
compatible = "ti,tmp451";
reg = <0x4c>;
+ vcc-supply = <&vdd_bpro>;
interrupt-parent = <&gpio>;
interrupts = <6 IRQ_TYPE_LEVEL_LOW>;
};
+ eeprom@54 {
+ compatible = "microchip,24c02", "atmel,24c02";
+ reg = <0x54>;
+ vcc-supply = <&vdd_bpro>;
+ label = "board-id";
+ pagesize = <16>;
+ read-only;
+ size = <256>;
+ };
+
pmic@58 {
compatible = "dlg,da9063";
reg = <0x58>;
@@ -65,48 +77,44 @@
interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
- regulators {
- vdd_bcore1: bcore1 {
- regulator-min-microvolt = <900000>;
- regulator-max-microvolt = <900000>;
- regulator-min-microamp = <5000000>;
- regulator-max-microamp = <5000000>;
- regulator-always-on;
- };
+ onkey {
+ compatible = "dlg,da9063-onkey";
+ };
- vdd_bcore2: bcore2 {
- regulator-min-microvolt = <900000>;
- regulator-max-microvolt = <900000>;
- regulator-min-microamp = <5000000>;
- regulator-max-microamp = <5000000>;
+ rtc {
+ compatible = "dlg,da9063-rtc";
+ };
+
+ wdt {
+ compatible = "dlg,da9063-watchdog";
+ };
+
+ regulators {
+ vdd_bcore: bcores-merged {
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-min-microamp = <4800000>;
+ regulator-max-microamp = <4800000>;
regulator-always-on;
};
vdd_bpro: bpro {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- regulator-min-microamp = <2500000>;
- regulator-max-microamp = <2500000>;
+ regulator-min-microamp = <2400000>;
+ regulator-max-microamp = <2400000>;
regulator-always-on;
};
vdd_bperi: bperi {
- regulator-min-microvolt = <1050000>;
- regulator-max-microvolt = <1050000>;
+ regulator-min-microvolt = <1060000>;
+ regulator-max-microvolt = <1060000>;
regulator-min-microamp = <1500000>;
regulator-max-microamp = <1500000>;
regulator-always-on;
};
- vdd_bmem: bmem {
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
- regulator-min-microamp = <3000000>;
- regulator-max-microamp = <3000000>;
- regulator-always-on;
- };
-
- vdd_bio: bio {
+ vdd_bmem_bio: bmem-bio-merged {
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-min-microamp = <3000000>;
@@ -117,86 +125,66 @@
vdd_ldo1: ldo1 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- regulator-min-microamp = <100000>;
- regulator-max-microamp = <100000>;
regulator-always-on;
};
vdd_ldo2: ldo2 {
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
- regulator-min-microamp = <200000>;
- regulator-max-microamp = <200000>;
regulator-always-on;
};
vdd_ldo3: ldo3 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-min-microamp = <200000>;
- regulator-max-microamp = <200000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
regulator-always-on;
};
vdd_ldo4: ldo4 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-min-microamp = <200000>;
- regulator-max-microamp = <200000>;
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
regulator-always-on;
};
vdd_ldo5: ldo5 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-min-microamp = <100000>;
- regulator-max-microamp = <100000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
regulator-always-on;
};
vdd_ldo6: ldo6 {
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-min-microamp = <200000>;
- regulator-max-microamp = <200000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
regulator-always-on;
};
vdd_ldo7: ldo7 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-min-microamp = <200000>;
- regulator-max-microamp = <200000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
regulator-always-on;
};
vdd_ldo8: ldo8 {
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <1800000>;
- regulator-min-microamp = <200000>;
- regulator-max-microamp = <200000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
regulator-always-on;
};
vdd_ld09: ldo9 {
regulator-min-microvolt = <1050000>;
regulator-max-microvolt = <1050000>;
- regulator-min-microamp = <200000>;
- regulator-max-microamp = <200000>;
+ regulator-always-on;
};
vdd_ldo10: ldo10 {
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <1000000>;
- regulator-min-microamp = <300000>;
- regulator-max-microamp = <300000>;
+ regulator-always-on;
};
vdd_ldo11: ldo11 {
regulator-min-microvolt = <2500000>;
regulator-max-microvolt = <2500000>;
- regulator-min-microamp = <300000>;
- regulator-max-microamp = <300000>;
regulator-always-on;
};
};
@@ -223,6 +211,7 @@
spi-max-frequency = <20000000>;
voltage-ranges = <3300 3300>;
disable-wp;
+ gpios = <&gpio 15 GPIO_ACTIVE_LOW>;
};
};
@@ -245,4 +234,8 @@
&gpio {
status = "okay";
+ gpio-line-names = "J29.1", "PMICNTB", "PMICSHDN", "J8.1", "J8.3",
+ "PCIe_PWREN", "THERM", "UBRDG_RSTN", "PCIe_PERSTN",
+ "ULPI_RSTN", "J8.2", "UHUB_RSTN", "GEMGXL_RST", "J8.4",
+ "EN_VDD_SD", "SD_CD";
};
diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h
index 49b398fe99f1..cc4f6787f937 100644
--- a/arch/riscv/include/asm/efi.h
+++ b/arch/riscv/include/asm/efi.h
@@ -13,7 +13,6 @@
#ifdef CONFIG_EFI
extern void efi_init(void);
-extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
#else
#define efi_init()
#endif
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index b626bc6e0eaf..e45cc27716de 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -117,6 +117,7 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
+CONFIG_NET_SWITCHDEV=y
CONFIG_SMC=m
CONFIG_SMC_DIAG=m
CONFIG_INET=y
@@ -511,6 +512,7 @@ CONFIG_NLMON=m
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
+CONFIG_MLX5_ESWITCH=y
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_NET_VENDOR_MICROSEMI is not set
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 0056cab27372..1c750bfca2d8 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -109,6 +109,7 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
+CONFIG_NET_SWITCHDEV=y
CONFIG_SMC=m
CONFIG_SMC_DIAG=m
CONFIG_INET=y
@@ -502,6 +503,7 @@ CONFIG_NLMON=m
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
+CONFIG_MLX5_ESWITCH=y
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_NET_VENDOR_MICROSEMI is not set
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 5510c7d10ddc..21d62d8b6b9a 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -290,7 +290,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
return;
regs = ftrace_get_regs(fregs);
- preempt_disable_notrace();
p = get_kprobe((kprobe_opcode_t *)ip);
if (unlikely(!p) || kprobe_disabled(p))
goto out;
@@ -318,7 +317,6 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
}
__this_cpu_write(current_kprobe, NULL);
out:
- preempt_enable_notrace();
ftrace_test_recursion_unlock(bit);
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 0df83ecaa2e0..cb7099682340 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -138,7 +138,7 @@ void noinstr do_io_irq(struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs);
int from_idle;
- irq_enter();
+ irq_enter_rcu();
if (user_mode(regs)) {
update_timer_sys();
@@ -158,7 +158,8 @@ void noinstr do_io_irq(struct pt_regs *regs)
do_irq_async(regs, IO_INTERRUPT);
} while (MACHINE_IS_LPAR && irq_pending(regs));
- irq_exit();
+ irq_exit_rcu();
+
set_irq_regs(old_regs);
irqentry_exit(regs, state);
@@ -172,7 +173,7 @@ void noinstr do_ext_irq(struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs);
int from_idle;
- irq_enter();
+ irq_enter_rcu();
if (user_mode(regs)) {
update_timer_sys();
@@ -190,7 +191,7 @@ void noinstr do_ext_irq(struct pt_regs *regs)
do_irq_async(regs, EXT_INTERRUPT);
- irq_exit();
+ irq_exit_rcu();
set_irq_regs(old_regs);
irqentry_exit(regs, state);
diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c
index 9975ad200d74..8f43575a4dd3 100644
--- a/arch/s390/kernel/machine_kexec_file.c
+++ b/arch/s390/kernel/machine_kexec_file.c
@@ -7,6 +7,8 @@
* Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
*/
+#define pr_fmt(fmt) "kexec: " fmt
+
#include <linux/elf.h>
#include <linux/errno.h>
#include <linux/kexec.h>
@@ -290,8 +292,16 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab)
{
+ const char *strtab, *name, *shstrtab;
+ const Elf_Shdr *sechdrs;
Elf_Rela *relas;
int i, r_type;
+ int ret;
+
+ /* String & section header string table */
+ sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
+ strtab = (char *)pi->ehdr + sechdrs[symtab->sh_link].sh_offset;
+ shstrtab = (char *)pi->ehdr + sechdrs[pi->ehdr->e_shstrndx].sh_offset;
relas = (void *)pi->ehdr + relsec->sh_offset;
@@ -304,15 +314,27 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
sym = (void *)pi->ehdr + symtab->sh_offset;
sym += ELF64_R_SYM(relas[i].r_info);
- if (sym->st_shndx == SHN_UNDEF)
+ if (sym->st_name)
+ name = strtab + sym->st_name;
+ else
+ name = shstrtab + sechdrs[sym->st_shndx].sh_name;
+
+ if (sym->st_shndx == SHN_UNDEF) {
+ pr_err("Undefined symbol: %s\n", name);
return -ENOEXEC;
+ }
- if (sym->st_shndx == SHN_COMMON)
+ if (sym->st_shndx == SHN_COMMON) {
+ pr_err("symbol '%s' in common section\n", name);
return -ENOEXEC;
+ }
if (sym->st_shndx >= pi->ehdr->e_shnum &&
- sym->st_shndx != SHN_ABS)
+ sym->st_shndx != SHN_ABS) {
+ pr_err("Invalid section %d for symbol %s\n",
+ sym->st_shndx, name);
return -ENOEXEC;
+ }
loc = pi->purgatory_buf;
loc += section->sh_offset;
@@ -326,7 +348,15 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
addr = section->sh_addr + relas[i].r_offset;
r_type = ELF64_R_TYPE(relas[i].r_info);
- arch_kexec_do_relocs(r_type, loc, val, addr);
+
+ if (r_type == R_390_PLT32DBL)
+ r_type = R_390_PC32DBL;
+
+ ret = arch_kexec_do_relocs(r_type, loc, val, addr);
+ if (ret) {
+ pr_err("Unknown rela relocation: %d\n", r_type);
+ return -ENOEXEC;
+ }
}
return 0;
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 7399327d1eff..5c2ccb85f2ef 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1932,6 +1932,7 @@ config EFI
depends on ACPI
select UCS2_STRING
select EFI_RUNTIME_WRAPPERS
+ select ARCH_USE_MEMREMAP_PROT
help
This enables the kernel to use EFI runtime services that are
available (such as the EFI variable services).
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 4d0b126835b8..63158fd55856 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -197,8 +197,6 @@ static inline bool efi_runtime_supported(void)
extern void parse_efi_setup(u64 phys_addr, u32 data_len);
-extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
-
extern void efi_thunk_runtime_setup(void);
efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size,
unsigned long descriptor_size,
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index cefe1d81e2e8..9e50da3ed01a 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -47,6 +47,7 @@ KVM_X86_OP(set_dr7)
KVM_X86_OP(cache_reg)
KVM_X86_OP(get_rflags)
KVM_X86_OP(set_rflags)
+KVM_X86_OP(get_if_flag)
KVM_X86_OP(tlb_flush_all)
KVM_X86_OP(tlb_flush_current)
KVM_X86_OP_NULL(tlb_remote_flush)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 860ed500580c..555f4de47ef2 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -97,7 +97,7 @@
KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26)
#define KVM_REQ_TLB_FLUSH_GUEST \
- KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
+ KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
#define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29)
#define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \
@@ -1349,6 +1349,7 @@ struct kvm_x86_ops {
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
+ bool (*get_if_flag)(struct kvm_vcpu *vcpu);
void (*tlb_flush_all)(struct kvm_vcpu *vcpu);
void (*tlb_flush_current)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/pkru.h b/arch/x86/include/asm/pkru.h
index 4cd49afa0ca4..74f0a2d34ffd 100644
--- a/arch/x86/include/asm/pkru.h
+++ b/arch/x86/include/asm/pkru.h
@@ -4,8 +4,8 @@
#include <asm/cpufeature.h>
-#define PKRU_AD_BIT 0x1
-#define PKRU_WD_BIT 0x2
+#define PKRU_AD_BIT 0x1u
+#define PKRU_WD_BIT 0x2u
#define PKRU_BITS_PER_PKEY 2
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 6a190c7f4d71..e04f5e6eb33f 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -713,9 +713,6 @@ static void __init early_reserve_memory(void)
early_reserve_initrd();
- if (efi_enabled(EFI_BOOT))
- efi_memblock_x86_reserve_range();
-
memblock_x86_reserve_range_setup_data();
reserve_ibft_region();
@@ -742,28 +739,6 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
return 0;
}
-static char * __init prepare_command_line(void)
-{
-#ifdef CONFIG_CMDLINE_BOOL
-#ifdef CONFIG_CMDLINE_OVERRIDE
- strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
-#else
- if (builtin_cmdline[0]) {
- /* append boot loader cmdline to builtin */
- strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
- strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
- strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
- }
-#endif
-#endif
-
- strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
-
- parse_early_param();
-
- return command_line;
-}
-
/*
* Determine if we were loaded by an EFI loader. If so, then we have also been
* passed the efi memmap, systab, etc., so we should use these data structures
@@ -853,23 +828,6 @@ void __init setup_arch(char **cmdline_p)
x86_init.oem.arch_setup();
/*
- * x86_configure_nx() is called before parse_early_param() (called by
- * prepare_command_line()) to detect whether hardware doesn't support
- * NX (so that the early EHCI debug console setup can safely call
- * set_fixmap()). It may then be called again from within noexec_setup()
- * during parsing early parameters to honor the respective command line
- * option.
- */
- x86_configure_nx();
-
- /*
- * This parses early params and it needs to run before
- * early_reserve_memory() because latter relies on such settings
- * supplied as early params.
- */
- *cmdline_p = prepare_command_line();
-
- /*
* Do some memory reservations *before* memory is added to memblock, so
* memblock allocations won't overwrite it.
*
@@ -902,6 +860,36 @@ void __init setup_arch(char **cmdline_p)
bss_resource.start = __pa_symbol(__bss_start);
bss_resource.end = __pa_symbol(__bss_stop)-1;
+#ifdef CONFIG_CMDLINE_BOOL
+#ifdef CONFIG_CMDLINE_OVERRIDE
+ strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+#else
+ if (builtin_cmdline[0]) {
+ /* append boot loader cmdline to builtin */
+ strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
+ strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+ strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+ }
+#endif
+#endif
+
+ strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+ *cmdline_p = command_line;
+
+ /*
+ * x86_configure_nx() is called before parse_early_param() to detect
+ * whether hardware doesn't support NX (so that the early EHCI debug
+ * console setup can safely call set_fixmap()). It may then be called
+ * again from within noexec_setup() during parsing early parameters
+ * to honor the respective command line option.
+ */
+ x86_configure_nx();
+
+ parse_early_param();
+
+ if (efi_enabled(EFI_BOOT))
+ efi_memblock_x86_reserve_range();
+
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* Memory used by the kernel cannot be hot-removed because Linux
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ac2909f0cab3..617012f4619f 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -579,6 +579,17 @@ static struct sched_domain_topology_level x86_numa_in_package_topology[] = {
{ NULL, },
};
+static struct sched_domain_topology_level x86_hybrid_topology[] = {
+#ifdef CONFIG_SCHED_SMT
+ { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
+#endif
+#ifdef CONFIG_SCHED_MC
+ { cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
+#endif
+ { cpu_cpu_mask, SD_INIT_NAME(DIE) },
+ { NULL, },
+};
+
static struct sched_domain_topology_level x86_topology[] = {
#ifdef CONFIG_SCHED_SMT
{ cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
@@ -1469,8 +1480,11 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
calculate_max_logical_packages();
+ /* XXX for now assume numa-in-package and hybrid don't overlap */
if (x86_has_numa_in_package)
set_sched_topology(x86_numa_in_package_topology);
+ if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
+ set_sched_topology(x86_hybrid_topology);
nmi_selftest();
impress_friends();
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 5e19e6e4c2ce..8d8c1cc7cb53 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1922,11 +1922,13 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
+ if (all_cpus)
+ goto check_and_send_ipi;
+
if (!sparse_banks_len)
goto ret_success;
- if (!all_cpus &&
- kvm_read_guest(kvm,
+ if (kvm_read_guest(kvm,
hc->ingpa + offsetof(struct hv_send_ipi_ex,
vp_set.bank_contents),
sparse_banks,
@@ -1934,6 +1936,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
return HV_STATUS_INVALID_HYPERCALL_INPUT;
}
+check_and_send_ipi:
if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index e2e1d012df22..fcdf3f8bb59a 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3987,7 +3987,21 @@ out_retry:
static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault, int mmu_seq)
{
- if (is_obsolete_sp(vcpu->kvm, to_shadow_page(vcpu->arch.mmu->root_hpa)))
+ struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root_hpa);
+
+ /* Special roots, e.g. pae_root, are not backed by shadow pages. */
+ if (sp && is_obsolete_sp(vcpu->kvm, sp))
+ return true;
+
+ /*
+ * Roots without an associated shadow page are considered invalid if
+ * there is a pending request to free obsolete roots. The request is
+ * only a hint that the current root _may_ be obsolete and needs to be
+ * reloaded, e.g. if the guest frees a PGD that KVM is tracking as a
+ * previous root, then __kvm_mmu_prepare_zap_page() signals all vCPUs
+ * to reload even if no vCPU is actively using the root.
+ */
+ if (!sp && kvm_test_request(KVM_REQ_MMU_RELOAD, vcpu))
return true;
return fault->slot &&
diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c
index b3ed302c1a35..caa96c270b95 100644
--- a/arch/x86/kvm/mmu/tdp_iter.c
+++ b/arch/x86/kvm/mmu/tdp_iter.c
@@ -26,6 +26,7 @@ static gfn_t round_gfn_for_level(gfn_t gfn, int level)
*/
void tdp_iter_restart(struct tdp_iter *iter)
{
+ iter->yielded = false;
iter->yielded_gfn = iter->next_last_level_gfn;
iter->level = iter->root_level;
@@ -160,6 +161,11 @@ static bool try_step_up(struct tdp_iter *iter)
*/
void tdp_iter_next(struct tdp_iter *iter)
{
+ if (iter->yielded) {
+ tdp_iter_restart(iter);
+ return;
+ }
+
if (try_step_down(iter))
return;
diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h
index b1748b988d3a..e19cabbcb65c 100644
--- a/arch/x86/kvm/mmu/tdp_iter.h
+++ b/arch/x86/kvm/mmu/tdp_iter.h
@@ -45,6 +45,12 @@ struct tdp_iter {
* iterator walks off the end of the paging structure.
*/
bool valid;
+ /*
+ * True if KVM dropped mmu_lock and yielded in the middle of a walk, in
+ * which case tdp_iter_next() needs to restart the walk at the root
+ * level instead of advancing to the next entry.
+ */
+ bool yielded;
};
/*
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 1db8496259ad..1beb4ca90560 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -502,6 +502,8 @@ static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
struct tdp_iter *iter,
u64 new_spte)
{
+ WARN_ON_ONCE(iter->yielded);
+
lockdep_assert_held_read(&kvm->mmu_lock);
/*
@@ -575,6 +577,8 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
u64 new_spte, bool record_acc_track,
bool record_dirty_log)
{
+ WARN_ON_ONCE(iter->yielded);
+
lockdep_assert_held_write(&kvm->mmu_lock);
/*
@@ -640,18 +644,19 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
* If this function should yield and flush is set, it will perform a remote
* TLB flush before yielding.
*
- * If this function yields, it will also reset the tdp_iter's walk over the
- * paging structure and the calling function should skip to the next
- * iteration to allow the iterator to continue its traversal from the
- * paging structure root.
+ * If this function yields, iter->yielded is set and the caller must skip to
+ * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk
+ * over the paging structures to allow the iterator to continue its traversal
+ * from the paging structure root.
*
- * Return true if this function yielded and the iterator's traversal was reset.
- * Return false if a yield was not needed.
+ * Returns true if this function yielded.
*/
-static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
- struct tdp_iter *iter, bool flush,
- bool shared)
+static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
+ struct tdp_iter *iter,
+ bool flush, bool shared)
{
+ WARN_ON(iter->yielded);
+
/* Ensure forward progress has been made before yielding. */
if (iter->next_last_level_gfn == iter->yielded_gfn)
return false;
@@ -671,12 +676,10 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
WARN_ON(iter->gfn > iter->next_last_level_gfn);
- tdp_iter_restart(iter);
-
- return true;
+ iter->yielded = true;
}
- return false;
+ return iter->yielded;
}
/*
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index d0f68d11ec70..5151efa424ac 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1585,6 +1585,15 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
to_svm(vcpu)->vmcb->save.rflags = rflags;
}
+static bool svm_get_if_flag(struct kvm_vcpu *vcpu)
+{
+ struct vmcb *vmcb = to_svm(vcpu)->vmcb;
+
+ return sev_es_guest(vcpu->kvm)
+ ? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK
+ : kvm_get_rflags(vcpu) & X86_EFLAGS_IF;
+}
+
static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
switch (reg) {
@@ -3568,14 +3577,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
if (!gif_set(svm))
return true;
- if (sev_es_guest(vcpu->kvm)) {
- /*
- * SEV-ES guests to not expose RFLAGS. Use the VMCB interrupt mask
- * bit to determine the state of the IF flag.
- */
- if (!(vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK))
- return true;
- } else if (is_guest_mode(vcpu)) {
+ if (is_guest_mode(vcpu)) {
/* As long as interrupts are being delivered... */
if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF)
@@ -3586,7 +3588,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
if (nested_exit_on_intr(svm))
return false;
} else {
- if (!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
+ if (!svm_get_if_flag(vcpu))
return true;
}
@@ -4621,6 +4623,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.cache_reg = svm_cache_reg,
.get_rflags = svm_get_rflags,
.set_rflags = svm_set_rflags,
+ .get_if_flag = svm_get_if_flag,
.tlb_flush_all = svm_flush_tlb,
.tlb_flush_current = svm_flush_tlb,
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 9453743ce0c4..0dbf94eb954f 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1363,6 +1363,11 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
vmx->emulation_required = vmx_emulation_required(vcpu);
}
+static bool vmx_get_if_flag(struct kvm_vcpu *vcpu)
+{
+ return vmx_get_rflags(vcpu) & X86_EFLAGS_IF;
+}
+
u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
{
u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
@@ -2646,15 +2651,6 @@ int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
if (!loaded_vmcs->msr_bitmap)
goto out_vmcs;
memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
-
- if (IS_ENABLED(CONFIG_HYPERV) &&
- static_branch_unlikely(&enable_evmcs) &&
- (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
- struct hv_enlightened_vmcs *evmcs =
- (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs;
-
- evmcs->hv_enlightenments_control.msr_bitmap = 1;
- }
}
memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
@@ -3968,8 +3964,7 @@ static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
if (pi_test_and_set_on(&vmx->pi_desc))
return 0;
- if (vcpu != kvm_get_running_vcpu() &&
- !kvm_vcpu_trigger_posted_interrupt(vcpu, false))
+ if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
kvm_vcpu_kick(vcpu);
return 0;
@@ -5886,18 +5881,14 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
vmx_flush_pml_buffer(vcpu);
/*
- * We should never reach this point with a pending nested VM-Enter, and
- * more specifically emulation of L2 due to invalid guest state (see
- * below) should never happen as that means we incorrectly allowed a
- * nested VM-Enter with an invalid vmcs12.
+ * KVM should never reach this point with a pending nested VM-Enter.
+ * More specifically, short-circuiting VM-Entry to emulate L2 due to
+ * invalid guest state should never happen as that means KVM knowingly
+ * allowed a nested VM-Enter with an invalid vmcs12. More below.
*/
if (KVM_BUG_ON(vmx->nested.nested_run_pending, vcpu->kvm))
return -EIO;
- /* If guest state is invalid, start emulating */
- if (vmx->emulation_required)
- return handle_invalid_guest_state(vcpu);
-
if (is_guest_mode(vcpu)) {
/*
* PML is never enabled when running L2, bail immediately if a
@@ -5919,10 +5910,30 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
*/
nested_mark_vmcs12_pages_dirty(vcpu);
+ /*
+ * Synthesize a triple fault if L2 state is invalid. In normal
+ * operation, nested VM-Enter rejects any attempt to enter L2
+ * with invalid state. However, those checks are skipped if
+ * state is being stuffed via RSM or KVM_SET_NESTED_STATE. If
+ * L2 state is invalid, it means either L1 modified SMRAM state
+ * or userspace provided bad state. Synthesize TRIPLE_FAULT as
+ * doing so is architecturally allowed in the RSM case, and is
+ * the least awful solution for the userspace case without
+ * risking false positives.
+ */
+ if (vmx->emulation_required) {
+ nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0);
+ return 1;
+ }
+
if (nested_vmx_reflect_vmexit(vcpu))
return 1;
}
+ /* If guest state is invalid, start emulating. L2 is handled above. */
+ if (vmx->emulation_required)
+ return handle_invalid_guest_state(vcpu);
+
if (exit_reason.failed_vmentry) {
dump_vmcs(vcpu);
vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
@@ -6617,9 +6628,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
* consistency check VM-Exit due to invalid guest state and bail.
*/
if (unlikely(vmx->emulation_required)) {
-
- /* We don't emulate invalid state of a nested guest */
- vmx->fail = is_guest_mode(vcpu);
+ vmx->fail = 0;
vmx->exit_reason.full = EXIT_REASON_INVALID_STATE;
vmx->exit_reason.failed_vmentry = 1;
@@ -6842,6 +6851,19 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
if (err < 0)
goto free_pml;
+ /*
+ * Use Hyper-V 'Enlightened MSR Bitmap' feature when KVM runs as a
+ * nested (L1) hypervisor and Hyper-V in L0 supports it. Enable the
+ * feature only for vmcs01, KVM currently isn't equipped to realize any
+ * performance benefits from enabling it for vmcs02.
+ */
+ if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs) &&
+ (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
+ struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
+
+ evmcs->hv_enlightenments_control.msr_bitmap = 1;
+ }
+
/* The MSR bitmap starts with all ones */
bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
@@ -7575,6 +7597,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.cache_reg = vmx_cache_reg,
.get_rflags = vmx_get_rflags,
.set_rflags = vmx_set_rflags,
+ .get_if_flag = vmx_get_if_flag,
.tlb_flush_all = vmx_flush_tlb_all,
.tlb_flush_current = vmx_flush_tlb_current,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e0aa4dd53c7f..e50e97ac4408 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -890,7 +890,8 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
!load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)))
return 1;
- if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
+ if (!(cr0 & X86_CR0_PG) &&
+ (is_64_bit_mode(vcpu) || kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)))
return 1;
static_call(kvm_x86_set_cr0)(vcpu, cr0);
@@ -1330,7 +1331,7 @@ static const u32 msrs_to_save_all[] = {
MSR_IA32_UMWAIT_CONTROL,
MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
- MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_ARCH_PERFMON_FIXED_CTR0 + 3,
+ MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
@@ -3412,7 +3413,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!msr_info->host_initiated)
return 1;
- if (guest_cpuid_has(vcpu, X86_FEATURE_PDCM) && kvm_get_msr_feature(&msr_ent))
+ if (kvm_get_msr_feature(&msr_ent))
return 1;
if (data & ~msr_ent.data)
return 1;
@@ -7121,7 +7122,13 @@ static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
unsigned short port, void *val, unsigned int count)
{
if (vcpu->arch.pio.count) {
- /* Complete previous iteration. */
+ /*
+ * Complete a previous iteration that required userspace I/O.
+ * Note, @count isn't guaranteed to match pio.count as userspace
+ * can modify ECX before rerunning the vCPU. Ignore any such
+ * shenanigans as KVM doesn't support modifying the rep count,
+ * and the emulator ensures @count doesn't overflow the buffer.
+ */
} else {
int r = __emulator_pio_in(vcpu, size, port, count);
if (!r)
@@ -7130,7 +7137,6 @@ static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
/* Results already available, fall through. */
}
- WARN_ON(count != vcpu->arch.pio.count);
complete_emulator_pio_in(vcpu, val);
return 1;
}
@@ -8995,14 +9001,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
{
struct kvm_run *kvm_run = vcpu->run;
- /*
- * if_flag is obsolete and useless, so do not bother
- * setting it for SEV-ES guests. Userspace can just
- * use kvm_run->ready_for_interrupt_injection.
- */
- kvm_run->if_flag = !vcpu->arch.guest_state_protected
- && (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
-
+ kvm_run->if_flag = static_call(kvm_x86_get_if_flag)(vcpu);
kvm_run->cr8 = kvm_get_cr8(vcpu);
kvm_run->apic_base = kvm_get_apic_base(vcpu);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 726700fabca6..bafe36e69227 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1252,19 +1252,54 @@ st: if (is_imm8(insn->off))
case BPF_LDX | BPF_MEM | BPF_DW:
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
- /* test src_reg, src_reg */
- maybe_emit_mod(&prog, src_reg, src_reg, true); /* always 1 byte */
- EMIT2(0x85, add_2reg(0xC0, src_reg, src_reg));
- /* jne start_of_ldx */
- EMIT2(X86_JNE, 0);
+ /* Though the verifier prevents negative insn->off in BPF_PROBE_MEM
+ * add abs(insn->off) to the limit to make sure that negative
+ * offset won't be an issue.
+ * insn->off is s16, so it won't affect valid pointers.
+ */
+ u64 limit = TASK_SIZE_MAX + PAGE_SIZE + abs(insn->off);
+ u8 *end_of_jmp1, *end_of_jmp2;
+
+ /* Conservatively check that src_reg + insn->off is a kernel address:
+ * 1. src_reg + insn->off >= limit
+ * 2. src_reg + insn->off doesn't become small positive.
+ * Cannot do src_reg + insn->off >= limit in one branch,
+ * since it needs two spare registers, but JIT has only one.
+ */
+
+ /* movabsq r11, limit */
+ EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG));
+ EMIT((u32)limit, 4);
+ EMIT(limit >> 32, 4);
+ /* cmp src_reg, r11 */
+ maybe_emit_mod(&prog, src_reg, AUX_REG, true);
+ EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG));
+ /* if unsigned '<' goto end_of_jmp2 */
+ EMIT2(X86_JB, 0);
+ end_of_jmp1 = prog;
+
+ /* mov r11, src_reg */
+ emit_mov_reg(&prog, true, AUX_REG, src_reg);
+ /* add r11, insn->off */
+ maybe_emit_1mod(&prog, AUX_REG, true);
+ EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
+ /* jmp if not carry to start_of_ldx
+ * Otherwise ERR_PTR(-EINVAL) + 128 will be the user addr
+ * that has to be rejected.
+ */
+ EMIT2(0x73 /* JNC */, 0);
+ end_of_jmp2 = prog;
+
/* xor dst_reg, dst_reg */
emit_mov_imm32(&prog, false, dst_reg, 0);
/* jmp byte_after_ldx */
EMIT2(0xEB, 0);
- /* populate jmp_offset for JNE above */
- temp[4] = prog - temp - 5 /* sizeof(test + jne) */;
+ /* populate jmp_offset for JB above to jump to xor dst_reg */
+ end_of_jmp1[-1] = end_of_jmp2 - end_of_jmp1;
+ /* populate jmp_offset for JNC above to jump to start_of_ldx */
start_of_ldx = prog;
+ end_of_jmp2[-1] = start_of_ldx - end_of_jmp2;
}
emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
@@ -1305,7 +1340,7 @@ st: if (is_imm8(insn->off))
* End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
* of 4 bytes will be ignored and rbx will be zero inited.
*/
- ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
+ ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8);
}
break;
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index b15ebfe40a73..b0b848d6933a 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -277,7 +277,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
return;
}
- new = early_memremap(data.phys_map, data.size);
+ new = early_memremap_prot(data.phys_map, data.size,
+ pgprot_val(pgprot_encrypted(FIXMAP_PAGE_NORMAL)));
if (!new) {
pr_err("Failed to map new boot services memmap\n");
return;
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index c736cf2ac76b..e2c5b296120d 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -68,7 +68,7 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
"(__parainstructions|__alt_instructions)(_end)?|"
"(__iommu_table|__apicdrivers|__smp_locks)(_end)?|"
"__(start|end)_pci_.*|"
-#if CONFIG_FW_LOADER_BUILTIN
+#if CONFIG_FW_LOADER
"__(start|end)_builtin_fw|"
#endif
"__(start|stop)___ksymtab(_gpl)?|"
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index a5b37cc65b17..769b64394298 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -2311,7 +2311,14 @@ static void ioc_timer_fn(struct timer_list *timer)
hwm = current_hweight_max(iocg);
new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
usage, &now);
- if (new_hwi < hwm) {
+ /*
+ * Donation calculation assumes hweight_after_donation
+ * to be positive, a condition that a donor w/ hwa < 2
+ * can't meet. Don't bother with donation if hwa is
+ * below 2. It's not gonna make a meaningful difference
+ * anyway.
+ */
+ if (new_hwi < hwm && hwa >= 2) {
iocg->hweight_donating = hwa;
iocg->hweight_after_donation = new_hwi;
list_add(&iocg->surplus_list, &surpluses);
diff --git a/block/fops.c b/block/fops.c
index ad732a36f9b3..0da147edbd18 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -15,6 +15,7 @@
#include <linux/falloc.h>
#include <linux/suspend.h>
#include <linux/fs.h>
+#include <linux/module.h>
#include "blk.h"
static inline struct inode *bdev_file_inode(struct file *file)
@@ -340,8 +341,7 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
} else {
ret = bio_iov_iter_get_pages(bio, iter);
if (unlikely(ret)) {
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
+ bio_put(bio);
return ret;
}
}
diff --git a/block/ioprio.c b/block/ioprio.c
index 313c14a70bbd..6f01d35a5145 100644
--- a/block/ioprio.c
+++ b/block/ioprio.c
@@ -220,6 +220,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
pgrp = task_pgrp(current);
else
pgrp = find_vpid(who);
+ read_lock(&tasklist_lock);
do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
tmpio = get_task_ioprio(p);
if (tmpio < 0)
@@ -229,6 +230,8 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
else
ret = ioprio_best(ret, tmpio);
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
+ read_unlock(&tasklist_lock);
+
break;
case IOPRIO_WHO_USER:
uid = make_kuid(current_user_ns(), who);
diff --git a/drivers/Makefile b/drivers/Makefile
index be5d40ae1488..a110338c860c 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -41,8 +41,7 @@ obj-$(CONFIG_DMADEVICES) += dma/
# SOC specific infrastructure drivers.
obj-y += soc/
-obj-$(CONFIG_VIRTIO) += virtio/
-obj-$(CONFIG_VIRTIO_PCI_LIB) += virtio/
+obj-y += virtio/
obj-$(CONFIG_VDPA) += vdpa/
obj-$(CONFIG_XEN) += xen/
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index cffbe57a8e08..c75fb600740c 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -4422,23 +4422,20 @@ static int binder_thread_release(struct binder_proc *proc,
__release(&t->lock);
/*
- * If this thread used poll, make sure we remove the waitqueue
- * from any epoll data structures holding it with POLLFREE.
- * waitqueue_active() is safe to use here because we're holding
- * the inner lock.
+ * If this thread used poll, make sure we remove the waitqueue from any
+ * poll data structures holding it.
*/
- if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
- waitqueue_active(&thread->wait)) {
- wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
- }
+ if (thread->looper & BINDER_LOOPER_STATE_POLL)
+ wake_up_pollfree(&thread->wait);
binder_inner_proc_unlock(thread->proc);
/*
- * This is needed to avoid races between wake_up_poll() above and
- * and ep_remove_waitqueue() called for other reasons (eg the epoll file
- * descriptor being closed); ep_remove_waitqueue() holds an RCU read
- * lock, so we can be sure it's done after calling synchronize_rcu().
+ * This is needed to avoid races between wake_up_pollfree() above and
+ * someone else removing the last entry from the queue for other reasons
+ * (e.g. ep_remove_wait_queue() being called due to an epoll file
+ * descriptor being closed). Such other users hold an RCU read lock, so
+ * we can be sure they're done after we call synchronize_rcu().
*/
if (thread->looper & BINDER_LOOPER_STATE_POLL)
synchronize_rcu();
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 340515f54498..47bc74a8c7b6 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -671,7 +671,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) {
- alloc->free_async_space += size + sizeof(struct binder_buffer);
+ alloc->free_async_space += buffer_size + sizeof(struct binder_buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_free_buf size %zd async free %zd\n",
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
index 50b56cd0039d..e9c7c07fd84c 100644
--- a/drivers/ata/ahci_ceva.c
+++ b/drivers/ata/ahci_ceva.c
@@ -94,6 +94,7 @@ struct ceva_ahci_priv {
static unsigned int ceva_ahci_read_id(struct ata_device *dev,
struct ata_taskfile *tf, u16 *id)
{
+ __le16 *__id = (__le16 *)id;
u32 err_mask;
err_mask = ata_do_dev_read_id(dev, tf, id);
@@ -103,7 +104,7 @@ static unsigned int ceva_ahci_read_id(struct ata_device *dev,
* Since CEVA controller does not support device sleep feature, we
* need to clear DEVSLP (bit 8) in word78 of the IDENTIFY DEVICE data.
*/
- id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
+ __id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
return 0;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 59ad8c979cb3..aba0c67d1bd6 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3920,6 +3920,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
/* Odd clown on sil3726/4726 PMPs */
{ "Config Disk", NULL, ATA_HORKAGE_DISABLE },
+ /* Similar story with ASMedia 1092 */
+ { "ASMT109x- Config", NULL, ATA_HORKAGE_DISABLE },
/* Weird ATAPI devices */
{ "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 1b84d5526d77..313e9475507b 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2859,8 +2859,19 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
goto invalid_fld;
}
- if (ata_is_ncq(tf->protocol) && (cdb[2 + cdb_offset] & 0x3) == 0)
- tf->protocol = ATA_PROT_NCQ_NODATA;
+ if ((cdb[2 + cdb_offset] & 0x3) == 0) {
+ /*
+ * When T_LENGTH is zero (No data is transferred), dir should
+ * be DMA_NONE.
+ */
+ if (scmd->sc_data_direction != DMA_NONE) {
+ fp = 2 + cdb_offset;
+ goto invalid_fld;
+ }
+
+ if (ata_is_ncq(tf->protocol))
+ tf->protocol = ATA_PROT_NCQ_NODATA;
+ }
/* enable LBA */
tf->flags |= ATA_TFLAG_LBA;
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 304accde365c..6d309e4971b6 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -37,7 +37,7 @@ struct charlcd_priv {
bool must_clear;
/* contains the LCD config state */
- unsigned long int flags;
+ unsigned long flags;
/* Current escape sequence and it's length or -1 if outside */
struct {
@@ -578,6 +578,9 @@ static int charlcd_init(struct charlcd *lcd)
* Since charlcd_init_display() needs to write data, we have to
* enable mark the LCD initialized just before.
*/
+ if (WARN_ON(!lcd->ops->init_display))
+ return -EINVAL;
+
ret = lcd->ops->init_display(lcd);
if (ret)
return ret;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index f4d0c555de29..04ea92cbd9cf 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1902,7 +1902,7 @@ int dpm_prepare(pm_message_t state)
device_block_probing();
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_list)) {
+ while (!list_empty(&dpm_list) && !error) {
struct device *dev = to_device(dpm_list.next);
get_device(dev);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8e3983e456f3..286cf1afad78 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1512,9 +1512,12 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
unsigned long flags;
struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
struct blkfront_info *info = rinfo->dev_info;
+ unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
- if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
+ if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
+ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
return IRQ_HANDLED;
+ }
spin_lock_irqsave(&rinfo->ring_lock, flags);
again:
@@ -1530,6 +1533,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
unsigned long id;
unsigned int op;
+ eoiflag = 0;
+
RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
id = bret.id;
@@ -1646,6 +1651,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
+ xen_irq_lateeoi(irq, eoiflag);
+
return IRQ_HANDLED;
err:
@@ -1653,6 +1660,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
+ /* No EOI in order to avoid further interrupts. */
+
pr_alert("%s disabled for further use\n", info->gd->disk_name);
return IRQ_HANDLED;
}
@@ -1692,8 +1701,8 @@ static int setup_blkring(struct xenbus_device *dev,
if (err)
goto fail;
- err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0,
- "blkif", rinfo);
+ err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt,
+ 0, "blkif", rinfo);
if (err <= 0) {
xenbus_dev_fatal(dev, err,
"bind_evtchn_to_irqhandler failed");
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
index fb99e3727155..547e6e769546 100644
--- a/drivers/bus/mhi/core/pm.c
+++ b/drivers/bus/mhi/core/pm.c
@@ -881,7 +881,7 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
}
EXPORT_SYMBOL_GPL(mhi_pm_suspend);
-int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force)
{
struct mhi_chan *itr, *tmp;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
@@ -898,8 +898,12 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
return -EIO;
- if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3)
- return -EINVAL;
+ if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) {
+ dev_warn(dev, "Resuming from non M3 state (%s)\n",
+ TO_MHI_STATE_STR(mhi_get_mhi_state(mhi_cntrl)));
+ if (!force)
+ return -EINVAL;
+ }
/* Notify clients about exiting LPM */
list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
@@ -940,8 +944,19 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
return 0;
}
+
+int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
+{
+ return __mhi_pm_resume(mhi_cntrl, false);
+}
EXPORT_SYMBOL_GPL(mhi_pm_resume);
+int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl)
+{
+ return __mhi_pm_resume(mhi_cntrl, true);
+}
+EXPORT_SYMBOL_GPL(mhi_pm_resume_force);
+
int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
{
int ret;
diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
index 59a4896a8030..4c577a731709 100644
--- a/drivers/bus/mhi/pci_generic.c
+++ b/drivers/bus/mhi/pci_generic.c
@@ -20,7 +20,7 @@
#define MHI_PCI_DEFAULT_BAR_NUM 0
-#define MHI_POST_RESET_DELAY_MS 500
+#define MHI_POST_RESET_DELAY_MS 2000
#define HEALTH_CHECK_PERIOD (HZ * 2)
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 6f225dddc74f..4566e730ef2b 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -687,11 +687,11 @@ err_clk_disable:
static void sunxi_rsb_hw_exit(struct sunxi_rsb *rsb)
{
- /* Keep the clock and PM reference counts consistent. */
- if (pm_runtime_status_suspended(rsb->dev))
- pm_runtime_resume(rsb->dev);
reset_control_assert(rsb->rstc);
- clk_disable_unprepare(rsb->clk);
+
+ /* Keep the clock and PM reference counts consistent. */
+ if (!pm_runtime_status_suspended(rsb->dev))
+ clk_disable_unprepare(rsb->clk);
}
static int __maybe_unused sunxi_rsb_runtime_suspend(struct device *dev)
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index c837d5416e0e..c59265146e9c 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -3031,7 +3031,7 @@ cleanup_bmc_device(struct kref *ref)
* with removing the device attributes while reading a device
* attribute.
*/
- schedule_work(&bmc->remove_work);
+ queue_work(remove_work_wq, &bmc->remove_work);
}
/*
@@ -5392,22 +5392,27 @@ static int ipmi_init_msghandler(void)
if (initialized)
goto out;
- init_srcu_struct(&ipmi_interfaces_srcu);
-
- timer_setup(&ipmi_timer, ipmi_timeout, 0);
- mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
-
- atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
+ rv = init_srcu_struct(&ipmi_interfaces_srcu);
+ if (rv)
+ goto out;
remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
if (!remove_work_wq) {
pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
rv = -ENOMEM;
- goto out;
+ goto out_wq;
}
+ timer_setup(&ipmi_timer, ipmi_timeout, 0);
+ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+
+ atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
+
initialized = true;
+out_wq:
+ if (rv)
+ cleanup_srcu_struct(&ipmi_interfaces_srcu);
out:
mutex_unlock(&ipmi_interfaces_mutex);
return rv;
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 0c62e578749e..48aab77abebf 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -1659,6 +1659,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
}
+ ssif_info->client = client;
+ i2c_set_clientdata(client, ssif_info);
+
rv = ssif_check_and_remove(client, ssif_info);
/* If rv is 0 and addr source is not SI_ACPI, continue probing */
if (!rv && ssif_info->addr_source == SI_ACPI) {
@@ -1679,9 +1682,6 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
ipmi_addr_src_to_str(ssif_info->addr_source),
client->addr, client->adapter->name, slave_addr);
- ssif_info->client = client;
- i2c_set_clientdata(client, ssif_info);
-
/* Now check for system interface capabilities */
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD;
@@ -1881,6 +1881,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
dev_err(&ssif_info->client->dev,
"Unable to start IPMI SSIF: %d\n", rv);
+ i2c_set_clientdata(client, NULL);
kfree(ssif_info);
}
kfree(resp);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index f467d63bbf1e..566ee2c78709 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -3418,6 +3418,14 @@ static int __clk_core_init(struct clk_core *core)
clk_prepare_lock();
+ /*
+ * Set hw->core after grabbing the prepare_lock to synchronize with
+ * callers of clk_core_fill_parent_index() where we treat hw->core
+ * being NULL as the clk not being registered yet. This is crucial so
+ * that clks aren't parented until their parent is fully registered.
+ */
+ core->hw->core = core;
+
ret = clk_pm_runtime_get(core);
if (ret)
goto unlock;
@@ -3582,8 +3590,10 @@ static int __clk_core_init(struct clk_core *core)
out:
clk_pm_runtime_put(core);
unlock:
- if (ret)
+ if (ret) {
hlist_del_init(&core->child_node);
+ core->hw->core = NULL;
+ }
clk_prepare_unlock();
@@ -3847,7 +3857,6 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
core->num_parents = init->num_parents;
core->min_rate = 0;
core->max_rate = ULONG_MAX;
- hw->core = core;
ret = clk_core_populate_parent_map(core, init);
if (ret)
@@ -3865,7 +3874,7 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
goto fail_create_clk;
}
- clk_core_link_consumer(hw->core, hw->clk);
+ clk_core_link_consumer(core, hw->clk);
ret = __clk_core_init(core);
if (!ret)
diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c
index d3e905cf867d..b23758083ce5 100644
--- a/drivers/clk/imx/clk-imx8qxp-lpcg.c
+++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c
@@ -370,7 +370,7 @@ static struct platform_driver imx8qxp_lpcg_clk_driver = {
.probe = imx8qxp_lpcg_clk_probe,
};
-builtin_platform_driver(imx8qxp_lpcg_clk_driver);
+module_platform_driver(imx8qxp_lpcg_clk_driver);
MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
MODULE_DESCRIPTION("NXP i.MX8QXP LPCG clock driver");
diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
index c53a688d8ccc..40a2efb1329b 100644
--- a/drivers/clk/imx/clk-imx8qxp.c
+++ b/drivers/clk/imx/clk-imx8qxp.c
@@ -308,7 +308,7 @@ static struct platform_driver imx8qxp_clk_driver = {
},
.probe = imx8qxp_clk_probe,
};
-builtin_platform_driver(imx8qxp_clk_driver);
+module_platform_driver(imx8qxp_clk_driver);
MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
MODULE_DESCRIPTION("NXP i.MX8QXP clock driver");
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index eaedcceb766f..8f65b9bdafce 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -1429,6 +1429,15 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
void clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config)
{
+ /*
+ * If the bootloader left the PLL enabled it's likely that there are
+ * RCGs that will lock up if we disable the PLL below.
+ */
+ if (trion_pll_is_enabled(pll, regmap)) {
+ pr_debug("Trion PLL is already enabled, skipping configuration\n");
+ return;
+ }
+
clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL);
clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
diff --git a/drivers/clk/qcom/clk-regmap-mux.c b/drivers/clk/qcom/clk-regmap-mux.c
index b2d00b451963..45d9cca28064 100644
--- a/drivers/clk/qcom/clk-regmap-mux.c
+++ b/drivers/clk/qcom/clk-regmap-mux.c
@@ -28,7 +28,7 @@ static u8 mux_get_parent(struct clk_hw *hw)
val &= mask;
if (mux->parent_map)
- return qcom_find_src_index(hw, mux->parent_map, val);
+ return qcom_find_cfg_index(hw, mux->parent_map, val);
return val;
}
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 0932e019dd12..75f09e6e057e 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -69,6 +69,18 @@ int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src)
}
EXPORT_SYMBOL_GPL(qcom_find_src_index);
+int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map, u8 cfg)
+{
+ int i, num_parents = clk_hw_get_num_parents(hw);
+
+ for (i = 0; i < num_parents; i++)
+ if (cfg == map[i].cfg)
+ return i;
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(qcom_find_cfg_index);
+
struct regmap *
qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
{
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index bb39a7e106d8..9c8f7b798d9f 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -49,6 +49,8 @@ extern void
qcom_pll_set_fsm_mode(struct regmap *m, u32 reg, u8 bias_count, u8 lock_count);
extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
u8 src);
+extern int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map,
+ u8 cfg);
extern int qcom_cc_register_board_clk(struct device *dev, const char *path,
const char *name, unsigned long rate);
diff --git a/drivers/clk/qcom/gcc-sm6125.c b/drivers/clk/qcom/gcc-sm6125.c
index 543cfab7561f..431b55bb0d2f 100644
--- a/drivers/clk/qcom/gcc-sm6125.c
+++ b/drivers/clk/qcom/gcc-sm6125.c
@@ -1121,7 +1121,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
.name = "gcc_sdcc1_apps_clk_src",
.parent_data = gcc_parent_data_1,
.num_parents = ARRAY_SIZE(gcc_parent_data_1),
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -1143,7 +1143,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
.name = "gcc_sdcc1_ice_core_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_floor_ops,
+ .ops = &clk_rcg2_ops,
},
};
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index d52f976dc875..d5cb372f0901 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -543,8 +543,8 @@ static void __init of_syscon_icst_setup(struct device_node *np)
regclk = icst_clk_setup(NULL, &icst_desc, name, parent_name, map, ctype);
if (IS_ERR(regclk)) {
- kfree(name);
pr_err("error setting up syscon ICST clock %s\n", name);
+ kfree(name);
return;
}
of_clk_add_provider(np, of_clk_src_simple_get, regclk);
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 9a04eacc4412..1ecd52f903b8 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -394,8 +394,13 @@ EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
-static void erratum_set_next_event_generic(const int access, unsigned long evt,
- struct clock_event_device *clk)
+/*
+ * Force the inlining of this function so that the register accesses
+ * can be themselves correctly inlined.
+ */
+static __always_inline
+void erratum_set_next_event_generic(const int access, unsigned long evt,
+ struct clock_event_device *clk)
{
unsigned long ctrl;
u64 cval;
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 3819ef5b7098..3245eb0c602d 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -47,7 +47,7 @@ static int __init timer_get_base_and_rate(struct device_node *np,
pr_warn("pclk for %pOFn is present, but could not be activated\n",
np);
- if (!of_property_read_u32(np, "clock-freq", rate) &&
+ if (!of_property_read_u32(np, "clock-freq", rate) ||
!of_property_read_u32(np, "clock-frequency", rate))
return 0;
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
index fa768f10635f..fd29861526d6 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -211,6 +211,12 @@ static u32 uof_get_ae_mask(u32 obj_num)
return adf_4xxx_fw_config[obj_num].ae_mask;
}
+static u32 get_vf2pf_sources(void __iomem *pmisc_addr)
+{
+ /* For the moment do not report vf2pf sources */
+ return 0;
+}
+
void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &adf_4xxx_class;
@@ -254,6 +260,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->set_msix_rttable = set_msix_default_rttable;
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
hw_data->enable_pfvf_comms = pfvf_comms_disabled;
+ hw_data->get_vf2pf_sources = get_vf2pf_sources;
hw_data->disable_iov = adf_disable_sriov;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index cd0d745eb071..33baf1591a49 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -373,7 +373,7 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
struct axi_dma_desc *first)
{
u32 priority = chan->chip->dw->hdata->priority[chan->id];
- struct axi_dma_chan_config config;
+ struct axi_dma_chan_config config = {};
u32 irq_mask;
u8 lms = 0; /* Select AXI0 master for LLI fetching */
@@ -391,7 +391,7 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC;
config.prior = priority;
config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
- config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
+ config.hs_sel_src = DWAXIDMAC_HS_SEL_HW;
switch (chan->direction) {
case DMA_MEM_TO_DEV:
dw_axi_dma_set_byte_halfword(chan, true);
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index 198f6cd8ac1b..cee7aa231d7b 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -187,17 +187,9 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
/* DMA configuration */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
+ if (err) {
pci_err(pdev, "DMA mask 64 set failed\n");
return err;
- } else {
- pci_err(pdev, "DMA mask 64 set failed\n");
-
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- pci_err(pdev, "DMA mask 32 set failed\n");
- return err;
- }
}
/* Data structure allocation */
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 17f2f8a31b63..cf2c8bc4f147 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -137,10 +137,10 @@ halt:
INIT_WORK(&idxd->work, idxd_device_reinit);
queue_work(idxd->wq, &idxd->work);
} else {
- spin_lock(&idxd->dev_lock);
idxd->state = IDXD_DEV_HALTED;
idxd_wqs_quiesce(idxd);
idxd_wqs_unmap_portal(idxd);
+ spin_lock(&idxd->dev_lock);
idxd_device_clear_state(idxd);
dev_err(&idxd->pdev->dev,
"idxd halted, need %s.\n",
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index de76fb4abac2..83452fbbb168 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -106,6 +106,7 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
{
struct idxd_desc *d, *t, *found = NULL;
struct llist_node *head;
+ LIST_HEAD(flist);
desc->completion->status = IDXD_COMP_DESC_ABORT;
/*
@@ -120,7 +121,11 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
found = desc;
continue;
}
- list_add_tail(&desc->list, &ie->work_list);
+
+ if (d->completion->status)
+ list_add_tail(&d->list, &flist);
+ else
+ list_add_tail(&d->list, &ie->work_list);
}
}
@@ -130,6 +135,17 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
if (found)
complete_desc(found, IDXD_COMPLETE_ABORT);
+
+ /*
+ * complete_desc() will return desc to allocator and the desc can be
+ * acquired by a different process and the desc->list can be modified.
+ * Delete desc from list so the list trasversing does not get corrupted
+ * by the other process.
+ */
+ list_for_each_entry_safe(d, t, &flist, list) {
+ list_del_init(&d->list);
+ complete_desc(d, IDXD_COMPLETE_NORMAL);
+ }
}
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
index 962b6e05287b..d95c421877fb 100644
--- a/drivers/dma/st_fdma.c
+++ b/drivers/dma/st_fdma.c
@@ -874,4 +874,4 @@ MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
-MODULE_ALIAS("platform: " DRIVER_NAME);
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 041d8e32d630..6e56d1cef5ee 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -4534,45 +4534,60 @@ static int udma_setup_resources(struct udma_dev *ud)
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
if (IS_ERR(rm_res)) {
bitmap_zero(ud->tchan_map, ud->tchan_cnt);
+ irq_res.sets = 1;
} else {
bitmap_fill(ud->tchan_map, ud->tchan_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->tchan_map,
&rm_res->desc[i], "tchan");
+ irq_res.sets = rm_res->sets;
}
- irq_res.sets = rm_res->sets;
/* rchan and matching default flow ranges */
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
if (IS_ERR(rm_res)) {
bitmap_zero(ud->rchan_map, ud->rchan_cnt);
+ irq_res.sets++;
} else {
bitmap_fill(ud->rchan_map, ud->rchan_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->rchan_map,
&rm_res->desc[i], "rchan");
+ irq_res.sets += rm_res->sets;
}
- irq_res.sets += rm_res->sets;
irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+ if (!irq_res.desc)
+ return -ENOMEM;
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
- for (i = 0; i < rm_res->sets; i++) {
- irq_res.desc[i].start = rm_res->desc[i].start;
- irq_res.desc[i].num = rm_res->desc[i].num;
- irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
- irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
+ if (IS_ERR(rm_res)) {
+ irq_res.desc[0].start = 0;
+ irq_res.desc[0].num = ud->tchan_cnt;
+ i = 1;
+ } else {
+ for (i = 0; i < rm_res->sets; i++) {
+ irq_res.desc[i].start = rm_res->desc[i].start;
+ irq_res.desc[i].num = rm_res->desc[i].num;
+ irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
+ irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
+ }
}
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
- for (j = 0; j < rm_res->sets; j++, i++) {
- if (rm_res->desc[j].num) {
- irq_res.desc[i].start = rm_res->desc[j].start +
- ud->soc_data->oes.udma_rchan;
- irq_res.desc[i].num = rm_res->desc[j].num;
- }
- if (rm_res->desc[j].num_sec) {
- irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
- ud->soc_data->oes.udma_rchan;
- irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ if (IS_ERR(rm_res)) {
+ irq_res.desc[i].start = 0;
+ irq_res.desc[i].num = ud->rchan_cnt;
+ } else {
+ for (j = 0; j < rm_res->sets; j++, i++) {
+ if (rm_res->desc[j].num) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ ud->soc_data->oes.udma_rchan;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+ }
+ if (rm_res->desc[j].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+ ud->soc_data->oes.udma_rchan;
+ irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ }
}
}
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
@@ -4690,14 +4705,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
if (IS_ERR(rm_res)) {
bitmap_zero(ud->bchan_map, ud->bchan_cnt);
+ irq_res.sets++;
} else {
bitmap_fill(ud->bchan_map, ud->bchan_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->bchan_map,
&rm_res->desc[i],
"bchan");
+ irq_res.sets += rm_res->sets;
}
- irq_res.sets += rm_res->sets;
}
/* tchan ranges */
@@ -4705,14 +4721,15 @@ static int bcdma_setup_resources(struct udma_dev *ud)
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
if (IS_ERR(rm_res)) {
bitmap_zero(ud->tchan_map, ud->tchan_cnt);
+ irq_res.sets += 2;
} else {
bitmap_fill(ud->tchan_map, ud->tchan_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->tchan_map,
&rm_res->desc[i],
"tchan");
+ irq_res.sets += rm_res->sets * 2;
}
- irq_res.sets += rm_res->sets * 2;
}
/* rchan ranges */
@@ -4720,47 +4737,72 @@ static int bcdma_setup_resources(struct udma_dev *ud)
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
if (IS_ERR(rm_res)) {
bitmap_zero(ud->rchan_map, ud->rchan_cnt);
+ irq_res.sets += 2;
} else {
bitmap_fill(ud->rchan_map, ud->rchan_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->rchan_map,
&rm_res->desc[i],
"rchan");
+ irq_res.sets += rm_res->sets * 2;
}
- irq_res.sets += rm_res->sets * 2;
}
irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+ if (!irq_res.desc)
+ return -ENOMEM;
if (ud->bchan_cnt) {
rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
- for (i = 0; i < rm_res->sets; i++) {
- irq_res.desc[i].start = rm_res->desc[i].start +
- oes->bcdma_bchan_ring;
- irq_res.desc[i].num = rm_res->desc[i].num;
+ if (IS_ERR(rm_res)) {
+ irq_res.desc[0].start = oes->bcdma_bchan_ring;
+ irq_res.desc[0].num = ud->bchan_cnt;
+ i = 1;
+ } else {
+ for (i = 0; i < rm_res->sets; i++) {
+ irq_res.desc[i].start = rm_res->desc[i].start +
+ oes->bcdma_bchan_ring;
+ irq_res.desc[i].num = rm_res->desc[i].num;
+ }
}
}
if (ud->tchan_cnt) {
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
- for (j = 0; j < rm_res->sets; j++, i += 2) {
- irq_res.desc[i].start = rm_res->desc[j].start +
- oes->bcdma_tchan_data;
- irq_res.desc[i].num = rm_res->desc[j].num;
-
- irq_res.desc[i + 1].start = rm_res->desc[j].start +
- oes->bcdma_tchan_ring;
- irq_res.desc[i + 1].num = rm_res->desc[j].num;
+ if (IS_ERR(rm_res)) {
+ irq_res.desc[i].start = oes->bcdma_tchan_data;
+ irq_res.desc[i].num = ud->tchan_cnt;
+ irq_res.desc[i + 1].start = oes->bcdma_tchan_ring;
+ irq_res.desc[i + 1].num = ud->tchan_cnt;
+ i += 2;
+ } else {
+ for (j = 0; j < rm_res->sets; j++, i += 2) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ oes->bcdma_tchan_data;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+
+ irq_res.desc[i + 1].start = rm_res->desc[j].start +
+ oes->bcdma_tchan_ring;
+ irq_res.desc[i + 1].num = rm_res->desc[j].num;
+ }
}
}
if (ud->rchan_cnt) {
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
- for (j = 0; j < rm_res->sets; j++, i += 2) {
- irq_res.desc[i].start = rm_res->desc[j].start +
- oes->bcdma_rchan_data;
- irq_res.desc[i].num = rm_res->desc[j].num;
-
- irq_res.desc[i + 1].start = rm_res->desc[j].start +
- oes->bcdma_rchan_ring;
- irq_res.desc[i + 1].num = rm_res->desc[j].num;
+ if (IS_ERR(rm_res)) {
+ irq_res.desc[i].start = oes->bcdma_rchan_data;
+ irq_res.desc[i].num = ud->rchan_cnt;
+ irq_res.desc[i + 1].start = oes->bcdma_rchan_ring;
+ irq_res.desc[i + 1].num = ud->rchan_cnt;
+ i += 2;
+ } else {
+ for (j = 0; j < rm_res->sets; j++, i += 2) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ oes->bcdma_rchan_data;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+
+ irq_res.desc[i + 1].start = rm_res->desc[j].start +
+ oes->bcdma_rchan_ring;
+ irq_res.desc[i + 1].num = rm_res->desc[j].num;
+ }
}
}
@@ -4858,39 +4900,54 @@ static int pktdma_setup_resources(struct udma_dev *ud)
if (IS_ERR(rm_res)) {
/* all rflows are assigned exclusively to Linux */
bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
+ irq_res.sets = 1;
} else {
bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->rflow_in_use,
&rm_res->desc[i], "rflow");
+ irq_res.sets = rm_res->sets;
}
- irq_res.sets = rm_res->sets;
/* tflow ranges */
rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
if (IS_ERR(rm_res)) {
/* all tflows are assigned exclusively to Linux */
bitmap_zero(ud->tflow_map, ud->tflow_cnt);
+ irq_res.sets++;
} else {
bitmap_fill(ud->tflow_map, ud->tflow_cnt);
for (i = 0; i < rm_res->sets; i++)
udma_mark_resource_ranges(ud, ud->tflow_map,
&rm_res->desc[i], "tflow");
+ irq_res.sets += rm_res->sets;
}
- irq_res.sets += rm_res->sets;
irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+ if (!irq_res.desc)
+ return -ENOMEM;
rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
- for (i = 0; i < rm_res->sets; i++) {
- irq_res.desc[i].start = rm_res->desc[i].start +
- oes->pktdma_tchan_flow;
- irq_res.desc[i].num = rm_res->desc[i].num;
+ if (IS_ERR(rm_res)) {
+ irq_res.desc[0].start = oes->pktdma_tchan_flow;
+ irq_res.desc[0].num = ud->tflow_cnt;
+ i = 1;
+ } else {
+ for (i = 0; i < rm_res->sets; i++) {
+ irq_res.desc[i].start = rm_res->desc[i].start +
+ oes->pktdma_tchan_flow;
+ irq_res.desc[i].num = rm_res->desc[i].num;
+ }
}
rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
- for (j = 0; j < rm_res->sets; j++, i++) {
- irq_res.desc[i].start = rm_res->desc[j].start +
- oes->pktdma_rchan_flow;
- irq_res.desc[i].num = rm_res->desc[j].num;
+ if (IS_ERR(rm_res)) {
+ irq_res.desc[i].start = oes->pktdma_rchan_flow;
+ irq_res.desc[i].num = ud->rflow_cnt;
+ } else {
+ for (j = 0; j < rm_res->sets; j++, i++) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ oes->pktdma_rchan_flow;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+ }
}
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
kfree(irq_res.desc);
diff --git a/drivers/firmware/scpi_pm_domain.c b/drivers/firmware/scpi_pm_domain.c
index 51201600d789..800673910b51 100644
--- a/drivers/firmware/scpi_pm_domain.c
+++ b/drivers/firmware/scpi_pm_domain.c
@@ -16,7 +16,6 @@ struct scpi_pm_domain {
struct generic_pm_domain genpd;
struct scpi_ops *ops;
u32 domain;
- char name[30];
};
/*
@@ -110,8 +109,13 @@ static int scpi_pm_domain_probe(struct platform_device *pdev)
scpi_pd->domain = i;
scpi_pd->ops = scpi_ops;
- sprintf(scpi_pd->name, "%pOFn.%d", np, i);
- scpi_pd->genpd.name = scpi_pd->name;
+ scpi_pd->genpd.name = devm_kasprintf(dev, GFP_KERNEL,
+ "%pOFn.%d", np, i);
+ if (!scpi_pd->genpd.name) {
+ dev_err(dev, "Failed to allocate genpd name:%pOFn.%d\n",
+ np, i);
+ continue;
+ }
scpi_pd->genpd.power_off = scpi_pd_power_off;
scpi_pd->genpd.power_on = scpi_pd_power_on;
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
index 6d66fe03fb6a..fd89899aeeed 100644
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -77,13 +77,14 @@ static const char *get_filename(struct tegra_bpmp *bpmp,
const char *root_path, *filename = NULL;
char *root_path_buf;
size_t root_len;
+ size_t root_path_buf_len = 512;
- root_path_buf = kzalloc(512, GFP_KERNEL);
+ root_path_buf = kzalloc(root_path_buf_len, GFP_KERNEL);
if (!root_path_buf)
goto out;
root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf,
- sizeof(root_path_buf));
+ root_path_buf_len);
if (IS_ERR(root_path))
goto out;
diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
index 3d6ef37a7702..b3a9b8488f11 100644
--- a/drivers/gpio/gpio-aspeed-sgpio.c
+++ b/drivers/gpio/gpio-aspeed-sgpio.c
@@ -395,7 +395,7 @@ static void aspeed_sgpio_irq_handler(struct irq_desc *desc)
reg = ioread32(bank_reg(data, bank, reg_irq_status));
for_each_set_bit(p, &reg, 32)
- generic_handle_domain_irq(gc->irq.domain, i * 32 + p * 2);
+ generic_handle_domain_irq(gc->irq.domain, (i * 32 + p) * 2);
}
chained_irq_exit(ic, desc);
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index 026903e3ef54..08b9e2cf4f2d 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -46,6 +46,7 @@
struct dln2_gpio {
struct platform_device *pdev;
struct gpio_chip gpio;
+ struct irq_chip irqchip;
/*
* Cache pin direction to save us one transfer, since the hardware has
@@ -383,15 +384,6 @@ static void dln2_irq_bus_unlock(struct irq_data *irqd)
mutex_unlock(&dln2->irq_lock);
}
-static struct irq_chip dln2_gpio_irqchip = {
- .name = "dln2-irq",
- .irq_mask = dln2_irq_mask,
- .irq_unmask = dln2_irq_unmask,
- .irq_set_type = dln2_irq_set_type,
- .irq_bus_lock = dln2_irq_bus_lock,
- .irq_bus_sync_unlock = dln2_irq_bus_unlock,
-};
-
static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
const void *data, int len)
{
@@ -473,8 +465,15 @@ static int dln2_gpio_probe(struct platform_device *pdev)
dln2->gpio.direction_output = dln2_gpio_direction_output;
dln2->gpio.set_config = dln2_gpio_set_config;
+ dln2->irqchip.name = "dln2-irq",
+ dln2->irqchip.irq_mask = dln2_irq_mask,
+ dln2->irqchip.irq_unmask = dln2_irq_unmask,
+ dln2->irqchip.irq_set_type = dln2_irq_set_type,
+ dln2->irqchip.irq_bus_lock = dln2_irq_bus_lock,
+ dln2->irqchip.irq_bus_sync_unlock = dln2_irq_bus_unlock,
+
girq = &dln2->gpio.irq;
- girq->chip = &dln2_gpio_irqchip;
+ girq->chip = &dln2->irqchip;
/* The event comes from the outside so no parent handler */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c
index 84f96b78f32a..9f4941bc5760 100644
--- a/drivers/gpio/gpio-virtio.c
+++ b/drivers/gpio/gpio-virtio.c
@@ -100,11 +100,7 @@ static int _virtio_gpio_req(struct virtio_gpio *vgpio, u16 type, u16 gpio,
virtqueue_kick(vgpio->request_vq);
mutex_unlock(&vgpio->lock);
- if (!wait_for_completion_timeout(&line->completion, HZ)) {
- dev_err(dev, "GPIO operation timed out\n");
- ret = -ETIMEDOUT;
- goto out;
- }
+ wait_for_completion(&line->completion);
if (unlikely(res->status != VIRTIO_GPIO_STATUS_OK)) {
dev_err(dev, "GPIO request failed: %d\n", gpio);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1e651b959141..694c3726e0f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3166,6 +3166,12 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
{
switch (asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_HAINAN:
+#endif
+ case CHIP_TOPAZ:
+ /* chips with no display hardware */
+ return false;
#if defined(CONFIG_DRM_AMD_DC)
case CHIP_TAHITI:
case CHIP_PITCAIRN:
@@ -4461,7 +4467,7 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
struct amdgpu_reset_context *reset_context)
{
- int i, j, r = 0;
+ int i, r = 0;
struct amdgpu_job *job = NULL;
bool need_full_reset =
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
@@ -4483,15 +4489,8 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
/*clear job fence from fence drv to avoid force_completion
*leave NULL and vm flush fence in fence drv */
- for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
- struct dma_fence *old, **ptr;
+ amdgpu_fence_driver_clear_job_fences(ring);
- ptr = &ring->fence_drv.fences[j];
- old = rcu_dereference_protected(*ptr, 1);
- if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
- RCU_INIT_POINTER(*ptr, NULL);
- }
- }
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(ring);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index ea00090b3fb3..bcc9343353b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -526,10 +526,15 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
}
}
+union gc_info {
+ struct gc_info_v1_0 v1;
+ struct gc_info_v2_0 v2;
+};
+
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
- struct gc_info_v1_0 *gc_info;
+ union gc_info *gc_info;
if (!adev->mman.discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
@@ -537,28 +542,55 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
}
bhdr = (struct binary_header *)adev->mman.discovery_bin;
- gc_info = (struct gc_info_v1_0 *)(adev->mman.discovery_bin +
+ gc_info = (union gc_info *)(adev->mman.discovery_bin +
le16_to_cpu(bhdr->table_list[GC].offset));
-
- adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se);
- adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->gc_num_wgp0_per_sa) +
- le32_to_cpu(gc_info->gc_num_wgp1_per_sa));
- adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->gc_num_sa_per_se);
- adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->gc_num_rb_per_se);
- adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->gc_num_gl2c);
- adev->gfx.config.max_gprs = le32_to_cpu(gc_info->gc_num_gprs);
- adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->gc_num_max_gs_thds);
- adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->gc_gs_table_depth);
- adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->gc_gsprim_buff_depth);
- adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->gc_double_offchip_lds_buffer);
- adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->gc_wave_size);
- adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->gc_max_waves_per_simd);
- adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->gc_max_scratch_slots_per_cu);
- adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->gc_lds_size);
- adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->gc_num_sc_per_se) /
- le32_to_cpu(gc_info->gc_num_sa_per_se);
- adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->gc_num_packer_per_sc);
-
+ switch (gc_info->v1.header.version_major) {
+ case 1:
+ adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
+ adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
+ le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
+ adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
+ adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
+ adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
+ adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
+ adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
+ adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
+ adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
+ adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
+ adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
+ adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
+ adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
+ adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
+ adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
+ le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
+ adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
+ break;
+ case 2:
+ adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
+ adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
+ adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
+ adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
+ adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
+ adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
+ adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
+ adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
+ adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
+ adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
+ adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
+ adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
+ adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
+ adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
+ adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
+ le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
+ adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Unhandled GC info table %d.%d\n",
+ gc_info->v1.header.version_major,
+ gc_info->v1.header.version_minor);
+ return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index ae6ab93c868b..7444484a12bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -384,7 +384,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
struct amdgpu_vm_bo_base *bo_base;
int r;
- if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
+ if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
return;
r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index ad95de6399af..86ca80da9eea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -328,10 +328,11 @@ module_param_named(aspm, amdgpu_aspm, int, 0444);
/**
* DOC: runpm (int)
- * Override for runtime power management control for dGPUs in PX/HG laptops. The amdgpu driver can dynamically power down
- * the dGPU on PX/HG laptops when it is idle. The default is -1 (auto enable). Setting the value to 0 disables this functionality.
+ * Override for runtime power management control for dGPUs. The amdgpu driver can dynamically power down
+ * the dGPUs when they are idle if supported. The default is -1 (auto enable).
+ * Setting the value to 0 disables this functionality.
*/
-MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = PX only default)");
+MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto)");
module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
/**
@@ -2153,7 +2154,10 @@ static int amdgpu_pmops_suspend(struct device *dev)
adev->in_s3 = true;
r = amdgpu_device_suspend(drm_dev, true);
adev->in_s3 = false;
-
+ if (r)
+ return r;
+ if (!adev->in_s0ix)
+ r = amdgpu_asic_reset(adev);
return r;
}
@@ -2234,12 +2238,27 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
if (amdgpu_device_supports_px(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ /*
+ * By setting mp1_state as PP_MP1_STATE_UNLOAD, MP1 will do some
+ * proper cleanups and put itself into a state ready for PNP. That
+ * can address some random resuming failure observed on BOCO capable
+ * platforms.
+ * TODO: this may be also needed for PX capable platform.
+ */
+ if (amdgpu_device_supports_boco(drm_dev))
+ adev->mp1_state = PP_MP1_STATE_UNLOAD;
+
ret = amdgpu_device_suspend(drm_dev, false);
if (ret) {
adev->in_runpm = false;
+ if (amdgpu_device_supports_boco(drm_dev))
+ adev->mp1_state = PP_MP1_STATE_NONE;
return ret;
}
+ if (amdgpu_device_supports_boco(drm_dev))
+ adev->mp1_state = PP_MP1_STATE_NONE;
+
if (amdgpu_device_supports_px(drm_dev)) {
/* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 3b7e86ea7167..9afd11ca2709 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -77,11 +77,13 @@ void amdgpu_fence_slab_fini(void)
* Cast helper
*/
static const struct dma_fence_ops amdgpu_fence_ops;
+static const struct dma_fence_ops amdgpu_job_fence_ops;
static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
{
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
- if (__f->base.ops == &amdgpu_fence_ops)
+ if (__f->base.ops == &amdgpu_fence_ops ||
+ __f->base.ops == &amdgpu_job_fence_ops)
return __f;
return NULL;
@@ -158,19 +160,18 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd
}
seq = ++ring->fence_drv.sync_seq;
- if (job != NULL && job->job_run_counter) {
+ if (job && job->job_run_counter) {
/* reinit seq for resubmitted jobs */
fence->seqno = seq;
} else {
- dma_fence_init(fence, &amdgpu_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx,
- seq);
- }
-
- if (job != NULL) {
- /* mark this fence has a parent job */
- set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags);
+ if (job)
+ dma_fence_init(fence, &amdgpu_job_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx, seq);
+ else
+ dma_fence_init(fence, &amdgpu_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx, seq);
}
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
@@ -621,6 +622,25 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
}
/**
+ * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
+ *
+ * @ring: fence of the ring to be cleared
+ *
+ */
+void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
+{
+ int i;
+ struct dma_fence *old, **ptr;
+
+ for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
+ ptr = &ring->fence_drv.fences[i];
+ old = rcu_dereference_protected(*ptr, 1);
+ if (old && old->ops == &amdgpu_job_fence_ops)
+ RCU_INIT_POINTER(*ptr, NULL);
+ }
+}
+
+/**
* amdgpu_fence_driver_force_completion - force signal latest fence of ring
*
* @ring: fence of the ring to signal
@@ -643,16 +663,14 @@ static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
{
- struct amdgpu_ring *ring;
+ return (const char *)to_amdgpu_fence(f)->ring->name;
+}
- if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
- ring = to_amdgpu_ring(job->base.sched);
- } else {
- ring = to_amdgpu_fence(f)->ring;
- }
- return (const char *)ring->name;
+ return (const char *)to_amdgpu_ring(job->base.sched)->name;
}
/**
@@ -665,18 +683,25 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
*/
static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
{
- struct amdgpu_ring *ring;
+ if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer))
+ amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring);
- if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+ return true;
+}
- ring = to_amdgpu_ring(job->base.sched);
- } else {
- ring = to_amdgpu_fence(f)->ring;
- }
+/**
+ * amdgpu_job_fence_enable_signaling - enable signalling on job fence
+ * @f: fence
+ *
+ * This is the simliar function with amdgpu_fence_enable_signaling above, it
+ * only handles the job embedded fence.
+ */
+static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
+{
+ struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
- if (!timer_pending(&ring->fence_drv.fallback_timer))
- amdgpu_fence_schedule_fallback(ring);
+ if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
+ amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
return true;
}
@@ -692,19 +717,23 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
{
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
- if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
- /* free job if fence has a parent job */
- struct amdgpu_job *job;
-
- job = container_of(f, struct amdgpu_job, hw_fence);
- kfree(job);
- } else {
/* free fence_slab if it's separated fence*/
- struct amdgpu_fence *fence;
+ kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f));
+}
- fence = to_amdgpu_fence(f);
- kmem_cache_free(amdgpu_fence_slab, fence);
- }
+/**
+ * amdgpu_job_fence_free - free up the job with embedded fence
+ *
+ * @rcu: RCU callback head
+ *
+ * Free up the job with embedded fence after the RCU grace period.
+ */
+static void amdgpu_job_fence_free(struct rcu_head *rcu)
+{
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
+
+ /* free job if fence has a parent job */
+ kfree(container_of(f, struct amdgpu_job, hw_fence));
}
/**
@@ -720,6 +749,19 @@ static void amdgpu_fence_release(struct dma_fence *f)
call_rcu(&f->rcu, amdgpu_fence_free);
}
+/**
+ * amdgpu_job_fence_release - callback that job embedded fence can be freed
+ *
+ * @f: fence
+ *
+ * This is the simliar function with amdgpu_fence_release above, it
+ * only handles the job embedded fence.
+ */
+static void amdgpu_job_fence_release(struct dma_fence *f)
+{
+ call_rcu(&f->rcu, amdgpu_job_fence_free);
+}
+
static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
@@ -727,6 +769,12 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
.release = amdgpu_fence_release,
};
+static const struct dma_fence_ops amdgpu_job_fence_ops = {
+ .get_driver_name = amdgpu_fence_get_driver_name,
+ .get_timeline_name = amdgpu_job_fence_get_timeline_name,
+ .enable_signaling = amdgpu_job_fence_enable_signaling,
+ .release = amdgpu_job_fence_release,
+};
/*
* Fence debugfs
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 4d380e79752c..fae7d185ad0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -53,9 +53,6 @@ enum amdgpu_ring_priority_level {
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
-/* fence flag bit to indicate the face is embedded in job*/
-#define AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT (DMA_FENCE_FLAG_USER_BITS + 1)
-
#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
#define AMDGPU_IB_POOL_SIZE (1024 * 1024)
@@ -114,6 +111,7 @@ struct amdgpu_fence_driver {
struct dma_fence **fences;
};
+void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index b305fd39874f..edb3e3b08eed 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3070,8 +3070,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GDS |
AMD_PG_SUPPORT_RLC_SMU_HS)) {
- WREG32(mmRLC_JUMP_TABLE_RESTORE,
- adev->gfx.rlc.cp_table_gpu_addr >> 8);
+ WREG32_SOC15(GC, 0, mmRLC_JUMP_TABLE_RESTORE,
+ adev->gfx.rlc.cp_table_gpu_addr >> 8);
gfx_v9_0_init_gfx_power_gating(adev);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 480e41847d7c..ec4d5e15b766 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -162,7 +162,6 @@ static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC);/* XXX for emulation. */
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index 14c1c1a297dd..6e0ace2fbfab 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -196,7 +196,6 @@ static void gfxhub_v2_0_init_tlb_regs(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
- tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC); /* UC, uncached */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index e80d1dc43079..b4eddf6e98a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -197,7 +197,6 @@ static void gfxhub_v2_1_init_tlb_regs(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
- tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, GCMC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC); /* UC, uncached */
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index cb82404df534..d84523cf5f75 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1808,6 +1808,14 @@ static int gmc_v9_0_hw_fini(void *handle)
return 0;
}
+ /*
+ * Pair the operations did in gmc_v9_0_hw_init and thus maintain
+ * a correct cached state for GMC. Otherwise, the "gate" again
+ * operation on S3 resuming will fail due to wrong cached state.
+ */
+ if (adev->mmhub.funcs->update_power_gating)
+ adev->mmhub.funcs->update_power_gating(adev, false);
+
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index a99953833820..1da2ec692057 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -145,7 +145,6 @@ static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC);/* XXX for emulation. */
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
@@ -302,10 +301,10 @@ static void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev))
return;
- if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) {
- amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
-
- }
+ if (adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
+ amdgpu_dpm_set_powergating_by_smu(adev,
+ AMD_IP_BLOCK_TYPE_GMC,
+ enable);
}
static int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
index f80a14a1b82d..f5f7181f9af5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
@@ -165,7 +165,6 @@ static void mmhub_v1_7_init_tlb_regs(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
- tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC);/* XXX for emulation. */
tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index 25f8e93e5ec3..3718ff610ab2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -267,7 +267,6 @@ static void mmhub_v2_0_init_tlb_regs(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
- tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC); /* UC, uncached */
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index a11d60ec6321..9e16da28505a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -194,7 +194,6 @@ static void mmhub_v2_3_init_tlb_regs(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 1);
tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
- tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC); /* UC, uncached */
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index c4ef822bbe8c..ff49eeaf7882 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -190,8 +190,6 @@ static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid)
tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
- ECO_BITS, 0);
- tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
MTYPE, MTYPE_UC);/* XXX for emulation. */
tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL,
ATC_EN, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index d54d720b3cf6..3799226defc0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -246,6 +246,13 @@ static int vcn_v1_0_suspend(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool idle_work_unexecuted;
+
+ idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
+ if (idle_work_unexecuted) {
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, false);
+ }
r = vcn_v1_0_hw_fini(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1cd6b9f4a568..e727f1dd2a9a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1051,6 +1051,11 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
return 0;
}
+ /* Reset DMCUB if it was previously running - before we overwrite its memory. */
+ status = dmub_srv_hw_reset(dmub_srv);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("Error resetting DMUB HW: %d\n", status);
+
hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
fw_inst_const = dmub_fw->data +
@@ -2576,7 +2581,8 @@ static int dm_resume(void *handle)
*/
link_enc_cfg_init(dm->dc, dc_state);
- amdgpu_dm_outbox_init(adev);
+ if (dc_enable_dmub_notifications(adev->dm.dc))
+ amdgpu_dm_outbox_init(adev);
r = dm_dmub_hw_init(adev);
if (r)
@@ -2625,6 +2631,10 @@ static int dm_resume(void *handle)
/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
dc_resource_state_construct(dm->dc, dm_state->context);
+ /* Re-enable outbox interrupts for DPIA. */
+ if (dc_enable_dmub_notifications(adev->dm.dc))
+ amdgpu_dm_outbox_init(adev);
+
/* Before powering on DC we need to re-initialize DMUB. */
r = dm_dmub_hw_init(adev);
if (r)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index f4c9a458ace8..9df38e2ee4f4 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -158,6 +158,7 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
union display_idle_optimization_u idle_info = { 0 };
idle_info.idle_info.df_request_disabled = 1;
idle_info.idle_info.phy_ref_clk_off = 1;
+ idle_info.idle_info.s0i2_rdy = 1;
dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
/* update power state */
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index c8457babfdea..c0bdc23702c8 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -3945,12 +3945,9 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
#if defined(CONFIG_DRM_AMD_DC_DCN)
config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
-
+
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY ||
pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- link_enc = pipe_ctx->stream->link->link_enc;
- config.dio_output_type = pipe_ctx->stream->link->ep_type;
- config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY)
link_enc = pipe_ctx->stream->link->link_enc;
else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index b01077a6af0e..fad3d883ed89 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -226,6 +226,8 @@ static inline void get_edp_links(const struct dc *dc,
*edp_num = 0;
for (i = 0; i < dc->link_count; i++) {
// report any eDP links, even unconnected DDI's
+ if (!dc->links[i])
+ continue;
if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) {
edp_links[*edp_num] = dc->links[i];
if (++(*edp_num) == MAX_NUM_EDP)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
index 34001a30d449..10e613ec7d24 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -78,6 +78,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.get_clock = dcn10_get_clock,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
+ .power_down = dce110_power_down,
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 3883f918b3bb..83f5d9aaffcb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1069,7 +1069,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
index 0fa381088d1d..faec0297ec0a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
@@ -603,7 +603,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
- .pipe_split_policy = MPC_SPLIT_AVOID,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index d452a0d1777e..79313d1ab5d9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -874,7 +874,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true,
.disable_pplib_clock_request = true,
.min_disp_clk_khz = 100000,
- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 79a66e0c4303..98852b586295 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -840,7 +840,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index fbaa03f26d8b..e472b729d869 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -686,7 +686,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_clock_gate = true,
.disable_pplib_clock_request = true,
.disable_pplib_wm_range = true,
- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index fcf96cf08c76..16e7059393fa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -211,7 +211,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 4a9b64023675..87cec14b7870 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -193,7 +193,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index 05335a8c3c2d..17e2f2bb29ec 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -101,6 +101,8 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.z10_restore = dcn31_z10_restore,
.z10_save_init = dcn31_z10_save_init,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+ .optimize_pwr_state = dcn21_optimize_pwr_state,
+ .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 18896294ae12..27afbe6ec0fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -355,6 +355,14 @@ static const struct dce110_clk_src_regs clk_src_regs[] = {
clk_src_regs(3, D),
clk_src_regs(4, E)
};
+/*pll_id being rempped in dmub, in driver it is logical instance*/
+static const struct dce110_clk_src_regs clk_src_regs_b0[] = {
+ clk_src_regs(0, A),
+ clk_src_regs(1, B),
+ clk_src_regs(2, F),
+ clk_src_regs(3, G),
+ clk_src_regs(4, E)
+};
static const struct dce110_clk_src_shift cs_shift = {
CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
@@ -994,7 +1002,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = false,
- .pipe_split_policy = MPC_SPLIT_AVOID,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@@ -2276,14 +2284,27 @@ static bool dcn31_resource_construct(
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL1,
&clk_src_regs[1], false);
- pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
+ /*move phypllx_pixclk_resync to dmub next*/
+ if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+ pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
+ dcn30_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs_b0[2], false);
+ pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
+ dcn30_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs_b0[3], false);
+ } else {
+ pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
- pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
+ pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL3,
&clk_src_regs[3], false);
+ }
+
pool->base.clock_sources[DCN31_CLK_SRC_PLL4] =
dcn30_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL4,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
index 416fe7a721d8..a513363b3326 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
@@ -49,4 +49,35 @@ struct resource_pool *dcn31_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc);
+/*temp: B0 specific before switch to dcn313 headers*/
+#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL
+#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e
+#define regPHYPLLF_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define regPHYPLLG_PIXCLK_RESYNC_CNTL 0x005f
+#define regPHYPLLG_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+
+//PHYPLLF_PIXCLK_RESYNC_CNTL
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE_MASK 0x00000100L
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
+
+//PHYPLLG_PIXCLK_RESYNC_CNTL
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE_MASK 0x00000100L
+#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
+#endif
#endif /* _DCN31_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h
index 7ec4331e67f2..a486769b66c6 100644
--- a/drivers/gpu/drm/amd/include/discovery.h
+++ b/drivers/gpu/drm/amd/include/discovery.h
@@ -143,6 +143,55 @@ struct gc_info_v1_0 {
uint32_t gc_num_gl2a;
};
+struct gc_info_v1_1 {
+ struct gpu_info_header header;
+
+ uint32_t gc_num_se;
+ uint32_t gc_num_wgp0_per_sa;
+ uint32_t gc_num_wgp1_per_sa;
+ uint32_t gc_num_rb_per_se;
+ uint32_t gc_num_gl2c;
+ uint32_t gc_num_gprs;
+ uint32_t gc_num_max_gs_thds;
+ uint32_t gc_gs_table_depth;
+ uint32_t gc_gsprim_buff_depth;
+ uint32_t gc_parameter_cache_depth;
+ uint32_t gc_double_offchip_lds_buffer;
+ uint32_t gc_wave_size;
+ uint32_t gc_max_waves_per_simd;
+ uint32_t gc_max_scratch_slots_per_cu;
+ uint32_t gc_lds_size;
+ uint32_t gc_num_sc_per_se;
+ uint32_t gc_num_sa_per_se;
+ uint32_t gc_num_packer_per_sc;
+ uint32_t gc_num_gl2a;
+ uint32_t gc_num_tcp_per_sa;
+ uint32_t gc_num_sdp_interface;
+ uint32_t gc_num_tcps;
+};
+
+struct gc_info_v2_0 {
+ struct gpu_info_header header;
+
+ uint32_t gc_num_se;
+ uint32_t gc_num_cu_per_sh;
+ uint32_t gc_num_sh_per_se;
+ uint32_t gc_num_rb_per_se;
+ uint32_t gc_num_tccs;
+ uint32_t gc_num_gprs;
+ uint32_t gc_num_max_gs_thds;
+ uint32_t gc_gs_table_depth;
+ uint32_t gc_gsprim_buff_depth;
+ uint32_t gc_parameter_cache_depth;
+ uint32_t gc_double_offchip_lds_buffer;
+ uint32_t gc_wave_size;
+ uint32_t gc_max_waves_per_simd;
+ uint32_t gc_max_scratch_slots_per_cu;
+ uint32_t gc_lds_size;
+ uint32_t gc_num_sc_per_se;
+ uint32_t gc_num_packer_per_sc;
+};
+
typedef struct harvest_info_header {
uint32_t signature; /* Table Signature */
uint32_t version; /* Table Version */
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 8d796ed3b7d1..619f8d305292 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -1328,7 +1328,12 @@ static int pp_set_powergating_by_smu(void *handle,
pp_dpm_powergate_vce(handle, gate);
break;
case AMD_IP_BLOCK_TYPE_GMC:
- pp_dpm_powergate_mmhub(handle);
+ /*
+ * For now, this is only used on PICASSO.
+ * And only "gate" operation is supported.
+ */
+ if (gate)
+ pp_dpm_powergate_mmhub(handle);
break;
case AMD_IP_BLOCK_TYPE_GFX:
ret = pp_dpm_powergate_gfx(handle, gate);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 8a3244585d80..8a817932acdf 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1568,9 +1568,7 @@ static int smu_suspend(void *handle)
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
- /* skip CGPG when in S0ix */
- if (smu->is_apu && !adev->in_s0ix)
- smu_set_gfx_cgpg(&adev->smu, false);
+ smu_set_gfx_cgpg(&adev->smu, false);
return 0;
}
@@ -1601,8 +1599,7 @@ static int smu_resume(void *handle)
return ret;
}
- if (smu->is_apu)
- smu_set_gfx_cgpg(&adev->smu, true);
+ smu_set_gfx_cgpg(&adev->smu, true);
smu->disable_uclk_switch = 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index d60b8c5e8715..9c91e79c955f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -120,7 +120,8 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
{
- if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
+ /* Until now the SMU12 only implemented for Renoir series so here neen't do APU check. */
+ if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) || smu->adev->in_s0ix)
return 0;
return smu_cmn_send_smc_msg_with_param(smu,
@@ -191,6 +192,9 @@ int smu_v12_0_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->watermarks_table);
smu_table->watermarks_table = NULL;
+ kfree(smu_table->gpu_metrics_table);
+ smu_table->gpu_metrics_table = NULL;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 59a7d276541d..7d50827cf0a8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1621,7 +1621,7 @@ static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en)
{
return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GmiPwrDnControl,
- en ? 1 : 0,
+ en ? 0 : 1,
NULL);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 35145db6eedf..19a5d2c39c8d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -198,6 +198,7 @@ int smu_v13_0_check_fw_status(struct smu_context *smu)
int smu_v13_0_check_fw_version(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
uint32_t if_version = 0xff, smu_version = 0xff;
uint16_t smu_major;
uint8_t smu_minor, smu_debug;
@@ -210,6 +211,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
smu_major = (smu_version >> 16) & 0xffff;
smu_minor = (smu_version >> 8) & 0xff;
smu_debug = (smu_version >> 0) & 0xff;
+ if (smu->is_apu)
+ adev->pm.fw_version = smu_version;
switch (smu->adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 2):
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 1e30eaeb0e1b..d5c98f79d58d 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -1121,7 +1121,10 @@ static void ast_crtc_reset(struct drm_crtc *crtc)
if (crtc->state)
crtc->funcs->atomic_destroy_state(crtc, crtc->state);
- __drm_atomic_helper_crtc_reset(crtc, &ast_state->base);
+ if (ast_state)
+ __drm_atomic_helper_crtc_reset(crtc, &ast_state->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
}
static struct drm_crtc_state *
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 8e7a124d6c5a..22bf690910b2 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1743,7 +1743,13 @@ void drm_fb_helper_fill_info(struct fb_info *info,
sizes->fb_width, sizes->fb_height);
info->par = fb_helper;
- snprintf(info->fix.id, sizeof(info->fix.id), "%s",
+ /*
+ * The DRM drivers fbdev emulation device name can be confusing if the
+ * driver name also has a "drm" suffix on it. Leading to names such as
+ * "simpledrmdrmfb" in /proc/fb. Unfortunately, it's an uAPI and can't
+ * be changed due user-space tools (e.g: pm-utils) matching against it.
+ */
+ snprintf(info->fix.id, sizeof(info->fix.id), "%sdrmfb",
fb_helper->dev->driver->name);
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 7b9f69f21f1e..bca0de92802e 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -9,6 +9,7 @@
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <linux/module.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index c9a9d74f338c..c313a5b4549c 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -404,8 +404,17 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
if (*fence) {
ret = dma_fence_chain_find_seqno(fence, point);
- if (!ret)
+ if (!ret) {
+ /* If the requested seqno is already signaled
+ * drm_syncobj_find_fence may return a NULL
+ * fence. To make sure the recipient gets
+ * signalled, use a new fence instead.
+ */
+ if (!*fence)
+ *fence = dma_fence_get_stub();
+
goto out;
+ }
dma_fence_put(*fence);
} else {
ret = -EINVAL;
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 2dc9d632969d..aef69522f0be 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -596,7 +596,7 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv,
continue;
offset = readcount + dmc->dmc_info[id].dmc_offset * 4;
- if (fw->size - offset < 0) {
+ if (offset > fw->size) {
drm_err(&dev_priv->drm, "Reading beyond the fw_size\n");
continue;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index fb33d0322960..c37c9f0d8167 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -564,6 +564,7 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
container_of_user(base, typeof(*ext), base);
const struct set_proto_ctx_engines *set = data;
struct drm_i915_private *i915 = set->i915;
+ struct i915_engine_class_instance prev_engine;
u64 flags;
int err = 0, n, i, j;
u16 slot, width, num_siblings;
@@ -629,7 +630,6 @@ set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
/* Create contexts / engines */
for (i = 0; i < width; ++i) {
intel_engine_mask_t current_mask = 0;
- struct i915_engine_class_instance prev_engine;
for (j = 0; j < num_siblings; ++j) {
struct i915_engine_class_instance ci;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 4d7da07442f2..cb0bf6ffd0e3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -3017,7 +3017,7 @@ eb_composite_fence_create(struct i915_execbuffer *eb, int out_fence_fd)
fence_array = dma_fence_array_create(eb->num_batches,
fences,
eb->context->parallel.fence_context,
- eb->context->parallel.seqno,
+ eb->context->parallel.seqno++,
false);
if (!fence_array) {
kfree(fences);
@@ -3277,6 +3277,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
out_fence = eb_requests_create(&eb, in_fence, out_fence_fd);
if (IS_ERR(out_fence)) {
err = PTR_ERR(out_fence);
+ out_fence = NULL;
if (eb.requests[0])
goto err_request;
else
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 67d14afa6623..b67f620c3d93 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -6,6 +6,7 @@
#include <linux/slab.h> /* fault-inject.h is not standalone! */
#include <linux/fault-inject.h>
+#include <linux/sched/mm.h>
#include "gem/i915_gem_lmem.h"
#include "i915_trace.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index ed73d9bc9d40..2400d6423ba5 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -1127,6 +1127,15 @@ icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_L3_COH_PIPE);
+ /* Wa_1407352427:icl,ehl */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ PSDUNIT_CLKGATE_DIS);
+
+ /* Wa_1406680159:icl,ehl */
+ wa_write_or(wal,
+ SUBSLICE_UNIT_LEVEL_CLKGATE,
+ GWUNIT_CLKGATE_DIS);
+
/* Wa_1607087056:icl,ehl,jsl */
if (IS_ICELAKE(i915) ||
IS_JSL_EHL_GT_STEP(i915, STEP_A0, STEP_B0))
@@ -1852,15 +1861,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
- /* Wa_1407352427:icl,ehl */
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
- PSDUNIT_CLKGATE_DIS);
-
- /* Wa_1406680159:icl,ehl */
- wa_write_or(wal,
- SUBSLICE_UNIT_LEVEL_CLKGATE,
- GWUNIT_CLKGATE_DIS);
-
/*
* Wa_1408767742:icl[a2..forever],ehl[all]
* Wa_1605460711:icl[a0..c0]
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index c48557dfa04c..302e9ff0602c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1662,11 +1662,11 @@ static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce)
GEM_BUG_ON(intel_context_is_parent(cn));
list_del_init(&cn->guc_id.link);
- ce->guc_id = cn->guc_id;
+ ce->guc_id.id = cn->guc_id.id;
- spin_lock(&ce->guc_state.lock);
+ spin_lock(&cn->guc_state.lock);
clr_context_registered(cn);
- spin_unlock(&ce->guc_state.lock);
+ spin_unlock(&cn->guc_state.lock);
set_context_guc_id_invalid(cn);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 820a1f38b271..89cccefeea63 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -29,6 +29,7 @@
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
#include "gem/i915_gem_context.h"
#include "gt/intel_breadcrumbs.h"
diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c
index 65fdca366e41..f74f8048af8f 100644
--- a/drivers/gpu/drm/lima/lima_device.c
+++ b/drivers/gpu/drm/lima/lima_device.c
@@ -4,6 +4,7 @@
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/clk.h>
+#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 5838c44cbf6f..3196189429bc 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1224,12 +1224,14 @@ static int mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_BAD;
}
- if (hdmi->conf->cea_modes_only && !drm_match_cea_mode(mode))
- return MODE_BAD;
+ if (hdmi->conf) {
+ if (hdmi->conf->cea_modes_only && !drm_match_cea_mode(mode))
+ return MODE_BAD;
- if (hdmi->conf->max_mode_clock &&
- mode->clock > hdmi->conf->max_mode_clock)
- return MODE_CLOCK_HIGH;
+ if (hdmi->conf->max_mode_clock &&
+ mode->clock > hdmi->conf->max_mode_clock)
+ return MODE_CLOCK_HIGH;
+ }
if (mode->clock < 27000)
return MODE_CLOCK_LOW;
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 4a1420b05e97..086dacf2f26a 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -5,6 +5,7 @@
*/
#include <linux/vmalloc.h>
+#include <linux/sched/mm.h>
#include "msm_drv.h"
#include "msm_gem.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 05d0b3eb3690..0ae416aa76dc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -353,15 +353,22 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
if (ret)
return ret;
- }
- fobj = dma_resv_shared_list(resv);
- fence = dma_resv_excl_fence(resv);
+ fobj = NULL;
+ } else {
+ fobj = dma_resv_shared_list(resv);
+ }
- if (fence) {
+ /* Waiting for the exclusive fence first causes performance regressions
+ * under some circumstances. So manually wait for the shared ones first.
+ */
+ for (i = 0; i < (fobj ? fobj->shared_count : 0) && !ret; ++i) {
struct nouveau_channel *prev = NULL;
bool must_wait = true;
+ fence = rcu_dereference_protected(fobj->shared[i],
+ dma_resv_held(resv));
+
f = nouveau_local_fence(fence, chan->drm);
if (f) {
rcu_read_lock();
@@ -373,20 +380,13 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
if (must_wait)
ret = dma_fence_wait(fence, intr);
-
- return ret;
}
- if (!exclusive || !fobj)
- return ret;
-
- for (i = 0; i < fobj->shared_count && !ret; ++i) {
+ fence = dma_resv_excl_fence(resv);
+ if (fence) {
struct nouveau_channel *prev = NULL;
bool must_wait = true;
- fence = rcu_dereference_protected(fobj->shared[i],
- dma_resv_held(resv));
-
f = nouveau_local_fence(fence, chan->drm);
if (f) {
rcu_read_lock();
@@ -398,6 +398,8 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
if (must_wait)
ret = dma_fence_wait(fence, intr);
+
+ return ret;
}
return ret;
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index 481b48bde047..5a6e89825bc2 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -458,7 +458,7 @@ static struct drm_display_mode simpledrm_mode(unsigned int width,
{
struct drm_display_mode mode = { SIMPLEDRM_MODE(width, height) };
- mode.clock = 60 /* Hz */ * mode.hdisplay * mode.vdisplay;
+ mode.clock = mode.hdisplay * mode.vdisplay * 60 / 1000 /* kHz */;
drm_mode_set_name(&mode);
return mode;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 739f11c0109c..047adc42d9a0 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1103,7 +1103,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
* as an indication that we're about to swap out.
*/
memset(&place, 0, sizeof(place));
- place.mem_type = TTM_PL_SYSTEM;
+ place.mem_type = bo->resource->mem_type;
if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL))
return -EBUSY;
@@ -1135,6 +1135,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
struct ttm_place hop;
memset(&hop, 0, sizeof(hop));
+ place.mem_type = TTM_PL_SYSTEM;
ret = ttm_resource_alloc(bo, &place, &evict_mem);
if (unlikely(ret))
goto out;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 7e83c00a3f48..79c870a3bef8 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -34,6 +34,7 @@
#include <linux/sched.h>
#include <linux/shmem_fs.h>
#include <linux/file.h>
+#include <linux/module.h>
#include <drm/drm_cache.h>
#include <drm/ttm/ttm_bo_driver.h>
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 9f5435b55949..a7c78ac96270 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -207,14 +207,14 @@ config HID_CHERRY
config HID_CHICONY
tristate "Chicony devices"
- depends on HID
+ depends on USB_HID
default !EXPERT
help
Support for Chicony Tactical pad and special keys on Chicony keyboards.
config HID_CORSAIR
tristate "Corsair devices"
- depends on HID && USB && LEDS_CLASS
+ depends on USB_HID && LEDS_CLASS
help
Support for Corsair devices that are not fully compliant with the
HID standard.
@@ -245,7 +245,7 @@ config HID_MACALLY
config HID_PRODIKEYS
tristate "Prodikeys PC-MIDI Keyboard support"
- depends on HID && SND
+ depends on USB_HID && SND
select SND_RAWMIDI
help
Support for Prodikeys PC-MIDI Keyboard device support.
@@ -560,7 +560,7 @@ config HID_LENOVO
config HID_LOGITECH
tristate "Logitech devices"
- depends on HID
+ depends on USB_HID
depends on LEDS_CLASS
default !EXPERT
help
@@ -951,7 +951,7 @@ config HID_SAITEK
config HID_SAMSUNG
tristate "Samsung InfraRed remote control or keyboards"
- depends on HID
+ depends on USB_HID
help
Support for Samsung InfraRed remote control or keyboards.
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index f3ecddc519ee..08c9a9a60ae4 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -1028,8 +1028,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
drvdata->tp = &asus_i2c_tp;
- if ((drvdata->quirks & QUIRK_T100_KEYBOARD) &&
- hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+ if ((drvdata->quirks & QUIRK_T100_KEYBOARD) && hid_is_usb(hdev)) {
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) {
@@ -1057,8 +1056,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
drvdata->tp = &asus_t100chi_tp;
}
- if ((drvdata->quirks & QUIRK_MEDION_E1239T) &&
- hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+ if ((drvdata->quirks & QUIRK_MEDION_E1239T) && hid_is_usb(hdev)) {
struct usb_host_interface *alt =
to_usb_interface(hdev->dev.parent)->altsetting;
diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c
index db6da21ade06..74ad8bf98bfd 100644
--- a/drivers/hid/hid-bigbenff.c
+++ b/drivers/hid/hid-bigbenff.c
@@ -191,7 +191,7 @@ static void bigben_worker(struct work_struct *work)
struct bigben_device, worker);
struct hid_field *report_field = bigben->report->field[0];
- if (bigben->removed)
+ if (bigben->removed || !report_field)
return;
if (bigben->work_led) {
diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
index ca556d39da2a..f04d2aa23efe 100644
--- a/drivers/hid/hid-chicony.c
+++ b/drivers/hid/hid-chicony.c
@@ -114,6 +114,9 @@ static int ch_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
ret = hid_parse(hdev);
if (ret) {
diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c
index 902a60e249ed..8c895c820b67 100644
--- a/drivers/hid/hid-corsair.c
+++ b/drivers/hid/hid-corsair.c
@@ -553,7 +553,12 @@ static int corsair_probe(struct hid_device *dev, const struct hid_device_id *id)
int ret;
unsigned long quirks = id->driver_data;
struct corsair_drvdata *drvdata;
- struct usb_interface *usbif = to_usb_interface(dev->dev.parent);
+ struct usb_interface *usbif;
+
+ if (!hid_is_usb(dev))
+ return -EINVAL;
+
+ usbif = to_usb_interface(dev->dev.parent);
drvdata = devm_kzalloc(&dev->dev, sizeof(struct corsair_drvdata),
GFP_KERNEL);
diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c
index 021049805bb7..3091355d48df 100644
--- a/drivers/hid/hid-elan.c
+++ b/drivers/hid/hid-elan.c
@@ -50,7 +50,7 @@ struct elan_drvdata {
static int is_not_elan_touchpad(struct hid_device *hdev)
{
- if (hdev->bus == BUS_USB) {
+ if (hid_is_usb(hdev)) {
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
return (intf->altsetting->desc.bInterfaceNumber !=
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
index 383dfda8c12f..8e960d7b233b 100644
--- a/drivers/hid/hid-elo.c
+++ b/drivers/hid/hid-elo.c
@@ -230,6 +230,9 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
int ret;
struct usb_device *udev;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
diff --git a/drivers/hid/hid-ft260.c b/drivers/hid/hid-ft260.c
index 8ee77f4afe9f..79505c64dbfe 100644
--- a/drivers/hid/hid-ft260.c
+++ b/drivers/hid/hid-ft260.c
@@ -915,6 +915,9 @@ static int ft260_probe(struct hid_device *hdev, const struct hid_device_id *id)
struct ft260_get_chip_version_report version;
int ret;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index 8123b871a3eb..0403beb3104b 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -586,6 +586,8 @@ static const struct hid_device_id hammer_devices[] = {
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_EEL) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) },
diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
index 0a38e8e9bc78..403506b9697e 100644
--- a/drivers/hid/hid-holtek-kbd.c
+++ b/drivers/hid/hid-holtek-kbd.c
@@ -140,12 +140,17 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
static int holtek_kbd_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
- struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
- int ret = hid_parse(hdev);
+ struct usb_interface *intf;
+ int ret;
+
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+ ret = hid_parse(hdev);
if (!ret)
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ intf = to_usb_interface(hdev->dev.parent);
if (!ret && intf->cur_altsetting->desc.bInterfaceNumber == 1) {
struct hid_input *hidinput;
list_for_each_entry(hidinput, &hdev->inputs, list) {
diff --git a/drivers/hid/hid-holtek-mouse.c b/drivers/hid/hid-holtek-mouse.c
index 195b735b001d..7c907939bfae 100644
--- a/drivers/hid/hid-holtek-mouse.c
+++ b/drivers/hid/hid-holtek-mouse.c
@@ -62,6 +62,29 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
return rdesc;
}
+static int holtek_mouse_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret;
+
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "hid parse failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(hdev, "hw start failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct hid_device_id holtek_mouse_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
@@ -83,6 +106,7 @@ static struct hid_driver holtek_mouse_driver = {
.name = "holtek_mouse",
.id_table = holtek_mouse_devices,
.report_fixup = holtek_mouse_report_fixup,
+ .probe = holtek_mouse_probe,
};
module_hid_driver(holtek_mouse_driver);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 96a455921c67..19da07777d62 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -399,6 +399,7 @@
#define USB_DEVICE_ID_HP_X2_10_COVER 0x0755
#define I2C_DEVICE_ID_HP_ENVY_X360_15 0x2d05
#define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817
+#define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544
#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
@@ -501,6 +502,7 @@
#define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
#define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
#define USB_DEVICE_ID_GOOGLE_DON 0x5050
+#define USB_DEVICE_ID_GOOGLE_EEL 0x5057
#define USB_VENDOR_ID_GOTOP 0x08f2
#define USB_DEVICE_ID_SUPER_Q2 0x007f
@@ -886,6 +888,7 @@
#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
#define USB_DEVICE_ID_MS_POWER_COVER 0x07da
+#define USB_DEVICE_ID_MS_SURFACE3_COVER 0x07de
#define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd
#define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb
#define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS 0x02e0
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 217f2d1b91c5..03f994541981 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -325,6 +325,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15),
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index d40af911df63..fb3f7258009c 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -749,12 +749,18 @@ static int lg_raw_event(struct hid_device *hdev, struct hid_report *report,
static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
- struct usb_interface *iface = to_usb_interface(hdev->dev.parent);
- __u8 iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
+ struct usb_interface *iface;
+ __u8 iface_num;
unsigned int connect_mask = HID_CONNECT_DEFAULT;
struct lg_drv_data *drv_data;
int ret;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
+ iface = to_usb_interface(hdev->dev.parent);
+ iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
+
/* G29 only work with the 1st interface */
if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) &&
(iface_num != 0)) {
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index a0017b010c34..7106b921b53c 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1777,7 +1777,7 @@ static int logi_dj_probe(struct hid_device *hdev,
case recvr_type_bluetooth: no_dj_interfaces = 2; break;
case recvr_type_dinovo: no_dj_interfaces = 2; break;
}
- if (hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+ if (hid_is_usb(hdev)) {
intf = to_usb_interface(hdev->dev.parent);
if (intf && intf->altsetting->desc.bInterfaceNumber >=
no_dj_interfaces) {
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index 2666af02d5c1..e4e9471d0f1e 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -798,12 +798,18 @@ static int pk_raw_event(struct hid_device *hdev, struct hid_report *report,
static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
- struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
- unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+ struct usb_interface *intf;
+ unsigned short ifnum;
unsigned long quirks = id->driver_data;
struct pk_device *pk;
struct pcmidi_snd *pm = NULL;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
+ intf = to_usb_interface(hdev->dev.parent);
+ ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+
pk = kzalloc(sizeof(*pk), GFP_KERNEL);
if (pk == NULL) {
hid_err(hdev, "can't alloc descriptor\n");
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 06b7908c874c..ee7e504e7279 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -124,6 +124,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE3_COVER), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c
index 4556d2a50f75..d94ee0539421 100644
--- a/drivers/hid/hid-roccat-arvo.c
+++ b/drivers/hid/hid-roccat-arvo.c
@@ -344,6 +344,9 @@ static int arvo_probe(struct hid_device *hdev,
{
int retval;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");
diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c
index ce5f22519956..e95d59cd8d07 100644
--- a/drivers/hid/hid-roccat-isku.c
+++ b/drivers/hid/hid-roccat-isku.c
@@ -324,6 +324,9 @@ static int isku_probe(struct hid_device *hdev,
{
int retval;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index ea17abc7ad52..76da04801ca9 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -749,6 +749,9 @@ static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int retval;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
index 0316edf8c5bb..1896c69ea512 100644
--- a/drivers/hid/hid-roccat-koneplus.c
+++ b/drivers/hid/hid-roccat-koneplus.c
@@ -431,6 +431,9 @@ static int koneplus_probe(struct hid_device *hdev,
{
int retval;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");
diff --git a/drivers/hid/hid-roccat-konepure.c b/drivers/hid/hid-roccat-konepure.c
index 5248b3c7cf78..cf8eeb33a125 100644
--- a/drivers/hid/hid-roccat-konepure.c
+++ b/drivers/hid/hid-roccat-konepure.c
@@ -133,6 +133,9 @@ static int konepure_probe(struct hid_device *hdev,
{
int retval;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 960012881570..6fb9b9563769 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -501,6 +501,9 @@ static int kovaplus_probe(struct hid_device *hdev,
{
int retval;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");
diff --git a/drivers/hid/hid-roccat-lua.c b/drivers/hid/hid-roccat-lua.c
index 4a88a76d5c62..d5ddf0d68346 100644
--- a/drivers/hid/hid-roccat-lua.c
+++ b/drivers/hid/hid-roccat-lua.c
@@ -160,6 +160,9 @@ static int lua_probe(struct hid_device *hdev,
{
int retval;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 989927defe8d..4fcc8e7d276f 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -449,6 +449,9 @@ static int pyra_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int retval;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");
diff --git a/drivers/hid/hid-roccat-ryos.c b/drivers/hid/hid-roccat-ryos.c
index 3956a6c9c521..5bf1971a2b14 100644
--- a/drivers/hid/hid-roccat-ryos.c
+++ b/drivers/hid/hid-roccat-ryos.c
@@ -141,6 +141,9 @@ static int ryos_probe(struct hid_device *hdev,
{
int retval;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");
diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c
index 818701f7a028..a784bb4ee651 100644
--- a/drivers/hid/hid-roccat-savu.c
+++ b/drivers/hid/hid-roccat-savu.c
@@ -113,6 +113,9 @@ static int savu_probe(struct hid_device *hdev,
{
int retval;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
retval = hid_parse(hdev);
if (retval) {
hid_err(hdev, "parse failed\n");
diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c
index 2e1c31156eca..cf5992e97094 100644
--- a/drivers/hid/hid-samsung.c
+++ b/drivers/hid/hid-samsung.c
@@ -152,6 +152,9 @@ static int samsung_probe(struct hid_device *hdev,
int ret;
unsigned int cmask = HID_CONNECT_DEFAULT;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index d1b107d547f5..60ec2b29d54d 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -3000,7 +3000,6 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
sc->quirks = quirks;
hid_set_drvdata(hdev, sc);
sc->hdev = hdev;
- usbdev = to_usb_device(sc->hdev->dev.parent->parent);
ret = hid_parse(hdev);
if (ret) {
@@ -3038,14 +3037,23 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
*/
if (!(hdev->claimed & HID_CLAIMED_INPUT)) {
hid_err(hdev, "failed to claim input\n");
- hid_hw_stop(hdev);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err;
}
if (sc->quirks & (GHL_GUITAR_PS3WIIU | GHL_GUITAR_PS4)) {
+ if (!hid_is_usb(hdev)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ usbdev = to_usb_device(sc->hdev->dev.parent->parent);
+
sc->ghl_urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!sc->ghl_urb)
- return -ENOMEM;
+ if (!sc->ghl_urb) {
+ ret = -ENOMEM;
+ goto err;
+ }
if (sc->quirks & GHL_GUITAR_PS3WIIU)
ret = ghl_init_urb(sc, usbdev, ghl_ps3wiiu_magic_data,
@@ -3055,7 +3063,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
ARRAY_SIZE(ghl_ps4_magic_data));
if (ret) {
hid_err(hdev, "error preparing URB\n");
- return ret;
+ goto err;
}
timer_setup(&sc->ghl_poke_timer, ghl_magic_poke, 0);
@@ -3064,6 +3072,10 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
return ret;
+
+err:
+ hid_hw_stop(hdev);
+ return ret;
}
static void sony_remove(struct hid_device *hdev)
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
index 3a5333424aa3..03b935ff02d5 100644
--- a/drivers/hid/hid-thrustmaster.c
+++ b/drivers/hid/hid-thrustmaster.c
@@ -274,6 +274,9 @@ static int thrustmaster_probe(struct hid_device *hdev, const struct hid_device_i
int ret = 0;
struct tm_wheel *tm_wheel = NULL;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed with error %d\n", ret);
diff --git a/drivers/hid/hid-u2fzero.c b/drivers/hid/hid-u2fzero.c
index 31ea7fc69916..ad489caf53ad 100644
--- a/drivers/hid/hid-u2fzero.c
+++ b/drivers/hid/hid-u2fzero.c
@@ -311,7 +311,7 @@ static int u2fzero_probe(struct hid_device *hdev,
unsigned int minor;
int ret;
- if (!hid_is_using_ll_driver(hdev, &usb_hid_driver))
+ if (!hid_is_usb(hdev))
return -EINVAL;
dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
index 6a9865dd703c..d8ab0139e5cd 100644
--- a/drivers/hid/hid-uclogic-core.c
+++ b/drivers/hid/hid-uclogic-core.c
@@ -164,6 +164,9 @@ static int uclogic_probe(struct hid_device *hdev,
struct uclogic_drvdata *drvdata = NULL;
bool params_initialized = false;
+ if (!hid_is_usb(hdev))
+ return -EINVAL;
+
/*
* libinput requires the pad interface to be on a different node
* than the pen, so use QUIRK_MULTI_INPUT for all tablets.
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 3d67b748a3b9..adff1bd68d9f 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -843,8 +843,7 @@ int uclogic_params_init(struct uclogic_params *params,
struct uclogic_params p = {0, };
/* Check arguments */
- if (params == NULL || hdev == NULL ||
- !hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
+ if (params == NULL || hdev == NULL || !hid_is_usb(hdev)) {
rc = -EINVAL;
goto cleanup;
}
diff --git a/drivers/hid/hid-vivaldi.c b/drivers/hid/hid-vivaldi.c
index cd7ada48b1d9..72957a9f7117 100644
--- a/drivers/hid/hid-vivaldi.c
+++ b/drivers/hid/hid-vivaldi.c
@@ -57,6 +57,9 @@ static int vivaldi_probe(struct hid_device *hdev,
int ret;
drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
hid_set_drvdata(hdev, drvdata);
ret = hid_parse(hdev);
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 1c5039081db2..8e9d9450cb83 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -266,7 +266,8 @@ static void __maybe_unused ish_resume_handler(struct work_struct *work)
if (ish_should_leave_d0i3(pdev) && !dev->suspend_flag
&& IPC_IS_ISH_ILUP(fwsts)) {
- disable_irq_wake(pdev->irq);
+ if (device_may_wakeup(&pdev->dev))
+ disable_irq_wake(pdev->irq);
ish_set_host_ready(dev);
@@ -337,7 +338,8 @@ static int __maybe_unused ish_suspend(struct device *device)
*/
pci_save_state(pdev);
- enable_irq_wake(pdev->irq);
+ if (device_may_wakeup(&pdev->dev))
+ enable_irq_wake(pdev->irq);
}
} else {
/*
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 2717d39600b4..066c567dbaa2 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -726,7 +726,7 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev,
* Skip the query for this type and modify defaults based on
* interface number.
*/
- if (features->type == WIRELESS) {
+ if (features->type == WIRELESS && intf) {
if (intf->cur_altsetting->desc.bInterfaceNumber == 0)
features->device_type = WACOM_DEVICETYPE_WL_MONITOR;
else
@@ -2214,7 +2214,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) {
char *product_name = wacom->hdev->name;
- if (hid_is_using_ll_driver(wacom->hdev, &usb_hid_driver)) {
+ if (hid_is_usb(wacom->hdev)) {
struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent);
struct usb_device *dev = interface_to_usbdev(intf);
product_name = dev->product;
@@ -2451,6 +2451,9 @@ static void wacom_wireless_work(struct work_struct *work)
wacom_destroy_battery(wacom);
+ if (!usbdev)
+ return;
+
/* Stylus interface */
hdev1 = usb_get_intfdata(usbdev->config->interface[1]);
wacom1 = hid_get_drvdata(hdev1);
@@ -2730,8 +2733,6 @@ static void wacom_mode_change_work(struct work_struct *work)
static int wacom_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
- struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
- struct usb_device *dev = interface_to_usbdev(intf);
struct wacom *wacom;
struct wacom_wac *wacom_wac;
struct wacom_features *features;
@@ -2766,8 +2767,14 @@ static int wacom_probe(struct hid_device *hdev,
wacom_wac->hid_data.inputmode = -1;
wacom_wac->mode_report = -1;
- wacom->usbdev = dev;
- wacom->intf = intf;
+ if (hid_is_usb(hdev)) {
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *dev = interface_to_usbdev(intf);
+
+ wacom->usbdev = dev;
+ wacom->intf = intf;
+ }
+
mutex_init(&wacom->lock);
INIT_DELAYED_WORK(&wacom->init_work, wacom_init_work);
INIT_WORK(&wacom->wireless_work, wacom_wireless_work);
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index dd12af20e467..0747a8f1fcee 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -19,6 +19,7 @@ config HYPERV_TIMER
config HYPERV_UTILS
tristate "Microsoft Hyper-V Utilities driver"
depends on HYPERV && CONNECTOR && NLS
+ depends on PTP_1588_CLOCK_OPTIONAL
help
Select this option to enable the Hyper-V Utilities.
diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c
index 731d5117f9f1..14389fd7afb8 100644
--- a/drivers/hwmon/corsair-psu.c
+++ b/drivers/hwmon/corsair-psu.c
@@ -729,7 +729,7 @@ static int corsairpsu_probe(struct hid_device *hdev, const struct hid_device_id
corsairpsu_check_cmd_support(priv);
priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, "corsairpsu", priv,
- &corsairpsu_chip_info, 0);
+ &corsairpsu_chip_info, NULL);
if (IS_ERR(priv->hwmon_dev)) {
ret = PTR_ERR(priv->hwmon_dev);
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index eaace478f508..5596c211f38d 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -627,10 +627,9 @@ static void __init i8k_init_procfs(struct device *dev)
{
struct dell_smm_data *data = dev_get_drvdata(dev);
- /* Register the proc entry */
- proc_create_data("i8k", 0, NULL, &i8k_proc_ops, data);
-
- devm_add_action_or_reset(dev, i8k_exit_procfs, NULL);
+ /* Only register exit function if creation was successful */
+ if (proc_create_data("i8k", 0, NULL, &i8k_proc_ops, data))
+ devm_add_action_or_reset(dev, i8k_exit_procfs, NULL);
}
#else
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 618052c6cdb6..74019dff2550 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -35,13 +35,14 @@
* explicitly as max6659, or if its address is not 0x4c.
* These chips lack the remote temperature offset feature.
*
- * This driver also supports the MAX6654 chip made by Maxim. This chip can
- * be at 9 different addresses, similar to MAX6680/MAX6681. The MAX6654 is
- * otherwise similar to MAX6657/MAX6658/MAX6659. Extended range is available
- * by setting the configuration register accordingly, and is done during
- * initialization. Extended precision is only available at conversion rates
- * of 1 Hz and slower. Note that extended precision is not enabled by
- * default, as this driver initializes all chips to 2 Hz by design.
+ * This driver also supports the MAX6654 chip made by Maxim. This chip can be
+ * at 9 different addresses, similar to MAX6680/MAX6681. The MAX6654 is similar
+ * to MAX6657/MAX6658/MAX6659, but does not support critical temperature
+ * limits. Extended range is available by setting the configuration register
+ * accordingly, and is done during initialization. Extended precision is only
+ * available at conversion rates of 1 Hz and slower. Note that extended
+ * precision is not enabled by default, as this driver initializes all chips
+ * to 2 Hz by design.
*
* This driver also supports the MAX6646, MAX6647, MAX6648, MAX6649 and
* MAX6692 chips made by Maxim. These are again similar to the LM86,
@@ -188,6 +189,8 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define LM90_HAVE_BROKEN_ALERT (1 << 7) /* Broken alert */
#define LM90_HAVE_EXTENDED_TEMP (1 << 8) /* extended temperature support*/
#define LM90_PAUSE_FOR_CONFIG (1 << 9) /* Pause conversion for config */
+#define LM90_HAVE_CRIT (1 << 10)/* Chip supports CRIT/OVERT register */
+#define LM90_HAVE_CRIT_ALRM_SWP (1 << 11)/* critical alarm bits swapped */
/* LM90 status */
#define LM90_STATUS_LTHRM (1 << 0) /* local THERM limit tripped */
@@ -197,6 +200,7 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define LM90_STATUS_RHIGH (1 << 4) /* remote high temp limit tripped */
#define LM90_STATUS_LLOW (1 << 5) /* local low temp limit tripped */
#define LM90_STATUS_LHIGH (1 << 6) /* local high temp limit tripped */
+#define LM90_STATUS_BUSY (1 << 7) /* conversion is ongoing */
#define MAX6696_STATUS2_R2THRM (1 << 1) /* remote2 THERM limit tripped */
#define MAX6696_STATUS2_R2OPEN (1 << 2) /* remote2 is an open circuit */
@@ -354,38 +358,43 @@ struct lm90_params {
static const struct lm90_params lm90_params[] = {
[adm1032] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
- | LM90_HAVE_BROKEN_ALERT,
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 10,
},
[adt7461] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
- | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP,
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP
+ | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 10,
},
[g781] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
- | LM90_HAVE_BROKEN_ALERT,
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 8,
},
[lm86] = {
- .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+ | LM90_HAVE_CRIT,
.alert_alarms = 0x7b,
.max_convrate = 9,
},
[lm90] = {
- .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+ | LM90_HAVE_CRIT,
.alert_alarms = 0x7b,
.max_convrate = 9,
},
[lm99] = {
- .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+ | LM90_HAVE_CRIT,
.alert_alarms = 0x7b,
.max_convrate = 9,
},
[max6646] = {
+ .flags = LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 6,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
@@ -396,50 +405,51 @@ static const struct lm90_params lm90_params[] = {
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[max6657] = {
- .flags = LM90_PAUSE_FOR_CONFIG,
+ .flags = LM90_PAUSE_FOR_CONFIG | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 8,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[max6659] = {
- .flags = LM90_HAVE_EMERGENCY,
+ .flags = LM90_HAVE_EMERGENCY | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 8,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[max6680] = {
- .flags = LM90_HAVE_OFFSET,
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_CRIT
+ | LM90_HAVE_CRIT_ALRM_SWP,
.alert_alarms = 0x7c,
.max_convrate = 7,
},
[max6696] = {
.flags = LM90_HAVE_EMERGENCY
- | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3,
+ | LM90_HAVE_EMERGENCY_ALARM | LM90_HAVE_TEMP3 | LM90_HAVE_CRIT,
.alert_alarms = 0x1c7c,
.max_convrate = 6,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[w83l771] = {
- .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 8,
},
[sa56004] = {
- .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT,
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT | LM90_HAVE_CRIT,
.alert_alarms = 0x7b,
.max_convrate = 9,
.reg_local_ext = SA56004_REG_R_LOCAL_TEMPL,
},
[tmp451] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
- | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP,
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 9,
.reg_local_ext = TMP451_REG_R_LOCAL_TEMPL,
},
[tmp461] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
- | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP,
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
.max_convrate = 9,
.reg_local_ext = TMP451_REG_R_LOCAL_TEMPL,
@@ -668,20 +678,22 @@ static int lm90_update_limits(struct device *dev)
struct i2c_client *client = data->client;
int val;
- val = lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT);
- if (val < 0)
- return val;
- data->temp8[LOCAL_CRIT] = val;
+ if (data->flags & LM90_HAVE_CRIT) {
+ val = lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT);
+ if (val < 0)
+ return val;
+ data->temp8[LOCAL_CRIT] = val;
- val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT);
- if (val < 0)
- return val;
- data->temp8[REMOTE_CRIT] = val;
+ val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT);
+ if (val < 0)
+ return val;
+ data->temp8[REMOTE_CRIT] = val;
- val = lm90_read_reg(client, LM90_REG_R_TCRIT_HYST);
- if (val < 0)
- return val;
- data->temp_hyst = val;
+ val = lm90_read_reg(client, LM90_REG_R_TCRIT_HYST);
+ if (val < 0)
+ return val;
+ data->temp_hyst = val;
+ }
val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH);
if (val < 0)
@@ -809,7 +821,7 @@ static int lm90_update_device(struct device *dev)
val = lm90_read_reg(client, LM90_REG_R_STATUS);
if (val < 0)
return val;
- data->alarms = val; /* lower 8 bit of alarms */
+ data->alarms = val & ~LM90_STATUS_BUSY;
if (data->kind == max6696) {
val = lm90_select_remote_channel(data, 1);
@@ -1160,8 +1172,8 @@ static int lm90_set_temphyst(struct lm90_data *data, long val)
else
temp = temp_from_s8(data->temp8[LOCAL_CRIT]);
- /* prevent integer underflow */
- val = max(val, -128000l);
+ /* prevent integer overflow/underflow */
+ val = clamp_val(val, -128000l, 255000l);
data->temp_hyst = hyst_to_reg(temp - val);
err = i2c_smbus_write_byte_data(client, LM90_REG_W_TCRIT_HYST,
@@ -1192,6 +1204,7 @@ static const u8 lm90_temp_emerg_index[3] = {
static const u8 lm90_min_alarm_bits[3] = { 5, 3, 11 };
static const u8 lm90_max_alarm_bits[3] = { 6, 4, 12 };
static const u8 lm90_crit_alarm_bits[3] = { 0, 1, 9 };
+static const u8 lm90_crit_alarm_bits_swapped[3] = { 1, 0, 9 };
static const u8 lm90_emergency_alarm_bits[3] = { 15, 13, 14 };
static const u8 lm90_fault_bits[3] = { 0, 2, 10 };
@@ -1217,7 +1230,10 @@ static int lm90_temp_read(struct device *dev, u32 attr, int channel, long *val)
*val = (data->alarms >> lm90_max_alarm_bits[channel]) & 1;
break;
case hwmon_temp_crit_alarm:
- *val = (data->alarms >> lm90_crit_alarm_bits[channel]) & 1;
+ if (data->flags & LM90_HAVE_CRIT_ALRM_SWP)
+ *val = (data->alarms >> lm90_crit_alarm_bits_swapped[channel]) & 1;
+ else
+ *val = (data->alarms >> lm90_crit_alarm_bits[channel]) & 1;
break;
case hwmon_temp_emergency_alarm:
*val = (data->alarms >> lm90_emergency_alarm_bits[channel]) & 1;
@@ -1465,12 +1481,11 @@ static int lm90_detect(struct i2c_client *client,
if (man_id < 0 || chip_id < 0 || config1 < 0 || convrate < 0)
return -ENODEV;
- if (man_id == 0x01 || man_id == 0x5C || man_id == 0x41) {
+ if (man_id == 0x01 || man_id == 0x5C || man_id == 0xA1) {
config2 = i2c_smbus_read_byte_data(client, LM90_REG_R_CONFIG2);
if (config2 < 0)
return -ENODEV;
- } else
- config2 = 0; /* Make compiler happy */
+ }
if ((address == 0x4C || address == 0x4D)
&& man_id == 0x01) { /* National Semiconductor */
@@ -1903,11 +1918,14 @@ static int lm90_probe(struct i2c_client *client)
info->config = data->channel_config;
data->channel_config[0] = HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
- HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
- HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM;
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM;
data->channel_config[1] = HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
- HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_MIN_ALARM |
- HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM | HWMON_T_FAULT;
+ HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | HWMON_T_FAULT;
+
+ if (data->flags & LM90_HAVE_CRIT) {
+ data->channel_config[0] |= HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_CRIT_HYST;
+ data->channel_config[1] |= HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_CRIT_HYST;
+ }
if (data->flags & LM90_HAVE_OFFSET)
data->channel_config[1] |= HWMON_T_OFFSET;
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 93dca471972e..57ce8633a725 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -1527,7 +1527,7 @@ static u16 nct6775_wmi_read_value(struct nct6775_data *data, u16 reg)
nct6775_wmi_set_bank(data, reg);
- err = nct6775_asuswmi_read(data->bank, reg, &tmp);
+ err = nct6775_asuswmi_read(data->bank, reg & 0xff, &tmp);
if (err)
return 0;
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 17518b4cab1b..f12b9a28a232 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -336,8 +336,6 @@ static int pwm_fan_probe(struct platform_device *pdev)
return ret;
}
- ctx->pwm_value = MAX_PWM;
-
pwm_init_state(ctx->pwm, &ctx->pwm_state);
/*
diff --git a/drivers/hwmon/sht4x.c b/drivers/hwmon/sht4x.c
index 09c2a0b06444..3415d7a0e0fc 100644
--- a/drivers/hwmon/sht4x.c
+++ b/drivers/hwmon/sht4x.c
@@ -23,7 +23,7 @@
/*
* I2C command delays (in microseconds)
*/
-#define SHT4X_MEAS_DELAY 1000
+#define SHT4X_MEAS_DELAY_HPM 8200 /* see t_MEAS,h in datasheet */
#define SHT4X_DELAY_EXTRA 10000
/*
@@ -90,7 +90,7 @@ static int sht4x_read_values(struct sht4x_data *data)
if (ret < 0)
goto unlock;
- usleep_range(SHT4X_MEAS_DELAY, SHT4X_MEAS_DELAY + SHT4X_DELAY_EXTRA);
+ usleep_range(SHT4X_MEAS_DELAY_HPM, SHT4X_MEAS_DELAY_HPM + SHT4X_DELAY_EXTRA);
ret = i2c_master_recv(client, raw_data, SHT4X_RESPONSE_LENGTH);
if (ret != SHT4X_RESPONSE_LENGTH) {
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index a6ea1eb1394e..53b8da6dbb23 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -636,7 +636,7 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id)
status = readb(i2c->base + MPC_I2C_SR);
if (status & CSR_MIF) {
/* Wait up to 100us for transfer to properly complete */
- readb_poll_timeout(i2c->base + MPC_I2C_SR, status, !(status & CSR_MCF), 0, 100);
+ readb_poll_timeout_atomic(i2c->base + MPC_I2C_SR, status, status & CSR_MCF, 0, 100);
writeb(0, i2c->base + MPC_I2C_SR);
mpc_i2c_do_intr(i2c, status);
return IRQ_HANDLED;
diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c
index 95378780da6d..41eb0dcc3204 100644
--- a/drivers/i2c/busses/i2c-virtio.c
+++ b/drivers/i2c/busses/i2c-virtio.c
@@ -22,24 +22,24 @@
/**
* struct virtio_i2c - virtio I2C data
* @vdev: virtio device for this controller
- * @completion: completion of virtio I2C message
* @adap: I2C adapter for this controller
* @vq: the virtio virtqueue for communication
*/
struct virtio_i2c {
struct virtio_device *vdev;
- struct completion completion;
struct i2c_adapter adap;
struct virtqueue *vq;
};
/**
* struct virtio_i2c_req - the virtio I2C request structure
+ * @completion: completion of virtio I2C message
* @out_hdr: the OUT header of the virtio I2C message
* @buf: the buffer into which data is read, or from which it's written
* @in_hdr: the IN header of the virtio I2C message
*/
struct virtio_i2c_req {
+ struct completion completion;
struct virtio_i2c_out_hdr out_hdr ____cacheline_aligned;
uint8_t *buf ____cacheline_aligned;
struct virtio_i2c_in_hdr in_hdr ____cacheline_aligned;
@@ -47,9 +47,11 @@ struct virtio_i2c_req {
static void virtio_i2c_msg_done(struct virtqueue *vq)
{
- struct virtio_i2c *vi = vq->vdev->priv;
+ struct virtio_i2c_req *req;
+ unsigned int len;
- complete(&vi->completion);
+ while ((req = virtqueue_get_buf(vq, &len)))
+ complete(&req->completion);
}
static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
@@ -62,6 +64,8 @@ static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
for (i = 0; i < num; i++) {
int outcnt = 0, incnt = 0;
+ init_completion(&reqs[i].completion);
+
/*
* Only 7-bit mode supported for this moment. For the address
* format, Please check the Virtio I2C Specification.
@@ -106,21 +110,15 @@ static int virtio_i2c_complete_reqs(struct virtqueue *vq,
struct virtio_i2c_req *reqs,
struct i2c_msg *msgs, int num)
{
- struct virtio_i2c_req *req;
bool failed = false;
- unsigned int len;
int i, j = 0;
for (i = 0; i < num; i++) {
- /* Detach the ith request from the vq */
- req = virtqueue_get_buf(vq, &len);
+ struct virtio_i2c_req *req = &reqs[i];
- /*
- * Condition req == &reqs[i] should always meet since we have
- * total num requests in the vq. reqs[i] can never be NULL here.
- */
- if (!failed && (WARN_ON(req != &reqs[i]) ||
- req->in_hdr.status != VIRTIO_I2C_MSG_OK))
+ wait_for_completion(&req->completion);
+
+ if (!failed && req->in_hdr.status != VIRTIO_I2C_MSG_OK)
failed = true;
i2c_put_dma_safe_msg_buf(reqs[i].buf, &msgs[i], !failed);
@@ -156,12 +154,8 @@ static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
* remote here to clear the virtqueue, so we can try another set of
* messages later on.
*/
-
- reinit_completion(&vi->completion);
virtqueue_kick(vq);
- wait_for_completion(&vi->completion);
-
count = virtio_i2c_complete_reqs(vq, reqs, msgs, count);
err_free:
@@ -210,8 +204,6 @@ static int virtio_i2c_probe(struct virtio_device *vdev)
vdev->priv = vi;
vi->vdev = vdev;
- init_completion(&vi->completion);
-
ret = virtio_i2c_setup_vqs(vi);
if (ret)
return ret;
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index bce0e8bb7852..cf5d049342ea 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -535,6 +535,9 @@ static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned lo
sizeof(rdwr_arg)))
return -EFAULT;
+ if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0)
+ return -EINVAL;
+
if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
return -EINVAL;
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index a51fdd3c9b5b..24c9387c2968 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -1595,8 +1595,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
return 0;
err_buffer_cleanup:
- if (data->dready_trig)
- iio_triggered_buffer_cleanup(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
err_trigger_unregister:
if (data->dready_trig)
iio_trigger_unregister(data->dready_trig);
@@ -1618,8 +1617,8 @@ static int kxcjk1013_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
+ iio_triggered_buffer_cleanup(indio_dev);
if (data->dready_trig) {
- iio_triggered_buffer_cleanup(indio_dev);
iio_trigger_unregister(data->dready_trig);
iio_trigger_unregister(data->motion_trig);
}
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index 2faf85ca996e..552eba5e8b4f 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -224,14 +224,14 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p)
hw_values.chan,
sizeof(hw_values.chan));
if (ret) {
- dev_err(st->dev,
- "error reading data\n");
- return ret;
+ dev_err(st->dev, "error reading data: %d\n", ret);
+ goto out;
}
iio_push_to_buffers_with_timestamp(indio_dev,
&hw_values,
iio_get_time_ns(indio_dev));
+out:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 715b8138fb71..09c7f10fefb6 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1470,7 +1470,7 @@ static int mma8452_trigger_setup(struct iio_dev *indio_dev)
if (ret)
return ret;
- indio_dev->trig = trig;
+ indio_dev->trig = iio_trigger_get(trig);
return 0;
}
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 8bf5b62a73f4..3363af15a43f 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -532,7 +532,7 @@ config IMX7D_ADC
config IMX8QXP_ADC
tristate "NXP IMX8QXP ADC driver"
- depends on ARCH_MXC_ARM64 || COMPILE_TEST
+ depends on ARCH_MXC || COMPILE_TEST
depends on HAS_IOMEM
help
Say yes here to build support for IMX8QXP ADC.
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index 2c5c8a3672b2..aa42ba759fa1 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -480,8 +480,8 @@ static irqreturn_t ad7768_trigger_handler(int irq, void *p)
iio_push_to_buffers_with_timestamp(indio_dev, &st->data.scan,
iio_get_time_ns(indio_dev));
- iio_trigger_notify_done(indio_dev->trig);
err_unlock:
+ iio_trigger_notify_done(indio_dev->trig);
mutex_unlock(&st->lock);
return IRQ_HANDLED;
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 4c922ef634f8..92a57cf10fba 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -1586,7 +1586,8 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
*val = st->conversion_value;
ret = at91_adc_adjust_val_osr(st, val);
if (chan->scan_type.sign == 's')
- *val = sign_extend32(*val, 11);
+ *val = sign_extend32(*val,
+ chan->scan_type.realbits - 1);
st->conversion_done = false;
}
diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c
index 3e0c0233b431..df99f1365c39 100644
--- a/drivers/iio/adc/axp20x_adc.c
+++ b/drivers/iio/adc/axp20x_adc.c
@@ -251,19 +251,8 @@ static int axp22x_adc_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val)
{
struct axp20x_adc_iio *info = iio_priv(indio_dev);
- int size;
- /*
- * N.B.: Unlike the Chinese datasheets tell, the charging current is
- * stored on 12 bits, not 13 bits. Only discharging current is on 13
- * bits.
- */
- if (chan->type == IIO_CURRENT && chan->channel == AXP22X_BATT_DISCHRG_I)
- size = 13;
- else
- size = 12;
-
- *val = axp20x_read_variable_width(info->regmap, chan->address, size);
+ *val = axp20x_read_variable_width(info->regmap, chan->address, 12);
if (*val < 0)
return *val;
@@ -386,9 +375,8 @@ static int axp22x_adc_scale(struct iio_chan_spec const *chan, int *val,
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CURRENT:
- *val = 0;
- *val2 = 500000;
- return IIO_VAL_INT_PLUS_MICRO;
+ *val = 1;
+ return IIO_VAL_INT;
case IIO_TEMP:
*val = 100;
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
index 16407664182c..97d162a3cba4 100644
--- a/drivers/iio/adc/dln2-adc.c
+++ b/drivers/iio/adc/dln2-adc.c
@@ -248,7 +248,6 @@ static int dln2_adc_set_chan_period(struct dln2_adc *dln2,
static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
{
int ret, i;
- struct iio_dev *indio_dev = platform_get_drvdata(dln2->pdev);
u16 conflict;
__le16 value;
int olen = sizeof(value);
@@ -257,13 +256,9 @@ static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
.chan = channel,
};
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret < 0)
- return ret;
-
ret = dln2_adc_set_chan_enabled(dln2, channel, true);
if (ret < 0)
- goto release_direct;
+ return ret;
ret = dln2_adc_set_port_enabled(dln2, true, &conflict);
if (ret < 0) {
@@ -300,8 +295,6 @@ disable_port:
dln2_adc_set_port_enabled(dln2, false, NULL);
disable_chan:
dln2_adc_set_chan_enabled(dln2, channel, false);
-release_direct:
- iio_device_release_direct_mode(indio_dev);
return ret;
}
@@ -337,10 +330,16 @@ static int dln2_adc_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret < 0)
+ return ret;
+
mutex_lock(&dln2->mutex);
ret = dln2_adc_read(dln2, chan->channel);
mutex_unlock(&dln2->mutex);
+ iio_device_release_direct_mode(indio_dev);
+
if (ret < 0)
return ret;
@@ -656,7 +655,11 @@ static int dln2_adc_probe(struct platform_device *pdev)
return -ENOMEM;
}
iio_trigger_set_drvdata(dln2->trig, dln2);
- devm_iio_trigger_register(dev, dln2->trig);
+ ret = devm_iio_trigger_register(dev, dln2->trig);
+ if (ret) {
+ dev_err(dev, "failed to register trigger: %d\n", ret);
+ return ret;
+ }
iio_trigger_set_immutable(indio_dev, dln2->trig);
ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 6245434f8377..8cd258cb2682 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -1117,6 +1117,7 @@ static void stm32h7_adc_unprepare(struct iio_dev *indio_dev)
{
struct stm32_adc *adc = iio_priv(indio_dev);
+ stm32_adc_writel(adc, STM32H7_ADC_PCSEL, 0);
stm32h7_adc_disable(indio_dev);
stm32_adc_int_ch_disable(adc);
stm32h7_adc_enter_pwr_down(adc);
@@ -1986,7 +1987,7 @@ static int stm32_adc_populate_int_ch(struct iio_dev *indio_dev, const char *ch_n
/* Get calibration data for vrefint channel */
ret = nvmem_cell_read_u16(&indio_dev->dev, "vrefint", &vrefint);
if (ret && ret != -ENOENT) {
- return dev_err_probe(&indio_dev->dev, ret,
+ return dev_err_probe(indio_dev->dev.parent, ret,
"nvmem access error\n");
}
if (ret == -ENOENT)
diff --git a/drivers/iio/gyro/adxrs290.c b/drivers/iio/gyro/adxrs290.c
index 3e0734ddafe3..600e9725da78 100644
--- a/drivers/iio/gyro/adxrs290.c
+++ b/drivers/iio/gyro/adxrs290.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/kernel.h>
@@ -124,7 +125,7 @@ static int adxrs290_get_rate_data(struct iio_dev *indio_dev, const u8 cmd, int *
goto err_unlock;
}
- *val = temp;
+ *val = sign_extend32(temp, 15);
err_unlock:
mutex_unlock(&st->lock);
@@ -146,7 +147,7 @@ static int adxrs290_get_temp_data(struct iio_dev *indio_dev, int *val)
}
/* extract lower 12 bits temperature reading */
- *val = temp & 0x0FFF;
+ *val = sign_extend32(temp, 11);
err_unlock:
mutex_unlock(&st->lock);
diff --git a/drivers/iio/gyro/itg3200_buffer.c b/drivers/iio/gyro/itg3200_buffer.c
index 04dd6a7969ea..4cfa0d439560 100644
--- a/drivers/iio/gyro/itg3200_buffer.c
+++ b/drivers/iio/gyro/itg3200_buffer.c
@@ -61,9 +61,9 @@ static irqreturn_t itg3200_trigger_handler(int irq, void *p)
iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp);
+error_ret:
iio_trigger_notify_done(indio_dev->trig);
-error_ret:
return IRQ_HANDLED;
}
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index b23caa2f2aa1..93990ff1dfe3 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -556,7 +556,6 @@ struct iio_trigger *viio_trigger_alloc(struct device *parent,
irq_modify_status(trig->subirq_base + i,
IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
}
- get_device(&trig->dev);
return trig;
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 7e51aaac0bf8..b2983b1a9ed1 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1275,7 +1275,7 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
ret = regmap_bulk_read(data->regmap, LTR501_ALS_DATA1,
als_buf, sizeof(als_buf));
if (ret < 0)
- return ret;
+ goto done;
if (test_bit(0, indio_dev->active_scan_mask))
scan.channels[j++] = le16_to_cpu(als_buf[1]);
if (test_bit(1, indio_dev->active_scan_mask))
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index 07e91846307c..fc63856ed54d 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -546,9 +546,8 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
mutex_lock(&data->lock);
ret = regmap_field_read(data->reg_flag_nf, &dir);
if (ret < 0) {
- dev_err(&data->client->dev, "register read failed\n");
- mutex_unlock(&data->lock);
- return ret;
+ dev_err(&data->client->dev, "register read failed: %d\n", ret);
+ goto out;
}
event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1,
IIO_EV_TYPE_THRESH,
@@ -560,6 +559,7 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
ret = regmap_field_write(data->reg_flag_psint, 0);
if (ret < 0)
dev_err(&data->client->dev, "failed to reset interrupts\n");
+out:
mutex_unlock(&data->lock);
return IRQ_HANDLED;
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index 33083877cd19..4353b749ecef 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -912,6 +912,6 @@ static struct platform_driver stm32_timer_trigger_driver = {
};
module_platform_driver(stm32_timer_trigger_driver);
-MODULE_ALIAS("platform: stm32-timer-trigger");
+MODULE_ALIAS("platform:stm32-timer-trigger");
MODULE_DESCRIPTION("STMicroelectronics STM32 Timer Trigger driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index ec37f4fd8e96..f1245c94ae26 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -8415,6 +8415,8 @@ static void receive_interrupt_common(struct hfi1_ctxtdata *rcd)
*/
static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
{
+ if (!rcd->rcvhdrq)
+ return;
clear_recv_intr(rcd);
if (check_packet_present(rcd))
force_recv_intr(rcd);
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 61f341c3005c..e2c634af40e9 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -1012,6 +1012,8 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
struct hfi1_packet packet;
int skip_pkt = 0;
+ if (!rcd->rcvhdrq)
+ return RCV_PKT_OK;
/* Control context will always use the slow path interrupt handler */
needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1;
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index dbd1c31830b9..4436ed41547c 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -113,7 +113,6 @@ static int hfi1_create_kctxt(struct hfi1_devdata *dd,
rcd->fast_handler = get_dma_rtail_setting(rcd) ?
handle_receive_interrupt_dma_rtail :
handle_receive_interrupt_nodma_rtail;
- rcd->slow_handler = handle_receive_interrupt;
hfi1_set_seq_cnt(rcd, 1);
@@ -334,6 +333,8 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
rcd->numa_id = numa;
rcd->rcv_array_groups = dd->rcv_entries.ngroups;
rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
+ rcd->slow_handler = handle_receive_interrupt;
+ rcd->do_interrupt = rcd->slow_handler;
rcd->msix_intr = CCE_NUM_MSIX_VECTORS;
mutex_init(&rcd->exp_mutex);
@@ -874,18 +875,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
if (ret)
goto done;
- /* allocate dummy tail memory for all receive contexts */
- dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
- sizeof(u64),
- &dd->rcvhdrtail_dummy_dma,
- GFP_KERNEL);
-
- if (!dd->rcvhdrtail_dummy_kvaddr) {
- dd_dev_err(dd, "cannot allocate dummy tail memory\n");
- ret = -ENOMEM;
- goto done;
- }
-
/* dd->rcd can be NULL if early initialization failed */
for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) {
/*
@@ -898,8 +887,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
if (!rcd)
continue;
- rcd->do_interrupt = &handle_receive_interrupt;
-
lastfail = hfi1_create_rcvhdrq(dd, rcd);
if (!lastfail)
lastfail = hfi1_setup_eagerbufs(rcd);
@@ -1120,7 +1107,7 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
rcd->egrbufs.rcvtids = NULL;
for (e = 0; e < rcd->egrbufs.alloced; e++) {
- if (rcd->egrbufs.buffers[e].dma)
+ if (rcd->egrbufs.buffers[e].addr)
dma_free_coherent(&dd->pcidev->dev,
rcd->egrbufs.buffers[e].len,
rcd->egrbufs.buffers[e].addr,
@@ -1201,6 +1188,11 @@ void hfi1_free_devdata(struct hfi1_devdata *dd)
dd->tx_opstats = NULL;
kfree(dd->comp_vect);
dd->comp_vect = NULL;
+ if (dd->rcvhdrtail_dummy_kvaddr)
+ dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
+ (void *)dd->rcvhdrtail_dummy_kvaddr,
+ dd->rcvhdrtail_dummy_dma);
+ dd->rcvhdrtail_dummy_kvaddr = NULL;
sdma_clean(dd, dd->num_sdma);
rvt_dealloc_device(&dd->verbs_dev.rdi);
}
@@ -1298,6 +1290,15 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
goto bail;
}
+ /* allocate dummy tail memory for all receive contexts */
+ dd->rcvhdrtail_dummy_kvaddr =
+ dma_alloc_coherent(&dd->pcidev->dev, sizeof(u64),
+ &dd->rcvhdrtail_dummy_dma, GFP_KERNEL);
+ if (!dd->rcvhdrtail_dummy_kvaddr) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
atomic_set(&dd->ipoib_rsm_usr_num, 0);
return dd;
@@ -1505,13 +1506,6 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
free_credit_return(dd);
- if (dd->rcvhdrtail_dummy_kvaddr) {
- dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
- (void *)dd->rcvhdrtail_dummy_kvaddr,
- dd->rcvhdrtail_dummy_dma);
- dd->rcvhdrtail_dummy_kvaddr = NULL;
- }
-
/*
* Free any resources still in use (usually just kernel contexts)
* at unload; we do for ctxtcnt, because that's what we allocate.
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 2b6c24b7b586..f07d328689d3 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -838,8 +838,8 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
if (current->nr_cpus_allowed != 1)
goto out;
- cpu_id = smp_processor_id();
rcu_read_lock();
+ cpu_id = smp_processor_id();
rht_node = rhashtable_lookup(dd->sdma_rht, &cpu_id,
sdma_rht_params);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 9bfbaddd1763..eb0defa80d0d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -33,6 +33,7 @@
#include <linux/acpi.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <net/addrconf.h>
@@ -1050,9 +1051,14 @@ static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
unsigned long instance_stage,
unsigned long reset_stage)
{
+#define HW_RESET_TIMEOUT_US 1000000
+#define HW_RESET_SLEEP_US 1000
+
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hnae3_handle *handle = priv->handle;
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ unsigned long val;
+ int ret;
/* When hardware reset is detected, we should stop sending mailbox&cmq&
* doorbell to hardware. If now in .init_instance() function, we should
@@ -1064,7 +1070,11 @@ static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
* again.
*/
hr_dev->dis_db = true;
- if (!ops->get_hw_reset_stat(handle))
+
+ ret = read_poll_timeout(ops->ae_dev_reset_cnt, val,
+ val > hr_dev->reset_cnt, HW_RESET_SLEEP_US,
+ HW_RESET_TIMEOUT_US, false, handle);
+ if (!ret)
hr_dev->is_reset = true;
if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
@@ -1584,11 +1594,17 @@ static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc;
struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+ u32 clock_cycles_of_1us;
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
false);
- hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, 0x3e8);
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+ clock_cycles_of_1us = HNS_ROCE_1NS_CFG;
+ else
+ clock_cycles_of_1us = HNS_ROCE_1US_CFG;
+
+ hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, clock_cycles_of_1us);
hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT);
return hns_roce_cmq_send(hr_dev, &desc, 1);
@@ -4792,6 +4808,30 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
return ret;
}
+static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
+{
+#define QP_ACK_TIMEOUT_MAX_HIP08 20
+#define QP_ACK_TIMEOUT_OFFSET 10
+#define QP_ACK_TIMEOUT_MAX 31
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+ if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) {
+ ibdev_warn(&hr_dev->ib_dev,
+ "Local ACK timeout shall be 0 to 20.\n");
+ return false;
+ }
+ *timeout += QP_ACK_TIMEOUT_OFFSET;
+ } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
+ if (*timeout > QP_ACK_TIMEOUT_MAX) {
+ ibdev_warn(&hr_dev->ib_dev,
+ "Local ACK timeout shall be 0 to 31.\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
@@ -4801,6 +4841,7 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
int ret = 0;
+ u8 timeout;
if (attr_mask & IB_QP_AV) {
ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
@@ -4810,12 +4851,10 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
}
if (attr_mask & IB_QP_TIMEOUT) {
- if (attr->timeout < 31) {
- hr_reg_write(context, QPC_AT, attr->timeout);
+ timeout = attr->timeout;
+ if (check_qp_timeout_cfg_range(hr_dev, &timeout)) {
+ hr_reg_write(context, QPC_AT, timeout);
hr_reg_clear(qpc_mask, QPC_AT);
- } else {
- ibdev_warn(&hr_dev->ib_dev,
- "Local ACK timeout shall be 0 to 30.\n");
}
}
@@ -4872,7 +4911,9 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
if (attr_mask & IB_QP_MIN_RNR_TIMER) {
- hr_reg_write(context, QPC_MIN_RNR_TIME, attr->min_rnr_timer);
+ hr_reg_write(context, QPC_MIN_RNR_TIME,
+ hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ?
+ HNS_ROCE_RNR_TIMER_10NS : attr->min_rnr_timer);
hr_reg_clear(qpc_mask, QPC_MIN_RNR_TIME);
}
@@ -5489,6 +5530,16 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
hr_reg_write(cq_context, CQC_CQ_MAX_CNT, cq_count);
hr_reg_clear(cqc_mask, CQC_CQ_MAX_CNT);
+
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+ if (cq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
+ dev_info(hr_dev->dev,
+ "cq_period(%u) reached the upper limit, adjusted to 65.\n",
+ cq_period);
+ cq_period = HNS_ROCE_MAX_CQ_PERIOD;
+ }
+ cq_period *= HNS_ROCE_CLOCK_ADJUST;
+ }
hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period);
hr_reg_clear(cqc_mask, CQC_CQ_PERIOD);
@@ -5884,6 +5935,15 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX);
hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt);
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
+ if (eq->eq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
+ dev_info(hr_dev->dev, "eq_period(%u) reached the upper limit, adjusted to 65.\n",
+ eq->eq_period);
+ eq->eq_period = HNS_ROCE_MAX_EQ_PERIOD;
+ }
+ eq->eq_period *= HNS_ROCE_CLOCK_ADJUST;
+ }
+
hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period);
hr_reg_write(eqc, EQC_EQE_REPORT_TIMER, HNS_ROCE_EQ_INIT_REPORT_TIMER);
hr_reg_write(eqc, EQC_EQE_BA_L, bt_ba >> 3);
@@ -6387,10 +6447,8 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
if (!hr_dev)
return 0;
- hr_dev->is_reset = true;
hr_dev->active = false;
hr_dev->dis_db = true;
-
hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 4d904d5e82be..35c61da7ba15 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -1444,6 +1444,14 @@ struct hns_roce_dip {
struct list_head node; /* all dips are on a list */
};
+/* only for RNR timeout issue of HIP08 */
+#define HNS_ROCE_CLOCK_ADJUST 1000
+#define HNS_ROCE_MAX_CQ_PERIOD 65
+#define HNS_ROCE_MAX_EQ_PERIOD 65
+#define HNS_ROCE_RNR_TIMER_10NS 1
+#define HNS_ROCE_1US_CFG 999
+#define HNS_ROCE_1NS_CFG 0
+
#define HNS_ROCE_AEQ_DEFAULT_BURST_NUM 0x0
#define HNS_ROCE_AEQ_DEFAULT_INTERVAL 0x0
#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x0
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 6eee9deadd12..e64ef6903fb4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -259,7 +259,7 @@ static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
static void free_srq_wrid(struct hns_roce_srq *srq)
{
- kfree(srq->wrid);
+ kvfree(srq->wrid);
srq->wrid = NULL;
}
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index 4108dcabece2..b4c657f5f2f9 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -60,6 +60,8 @@ static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq)
{
struct irdma_cq *cq = iwcq->back_cq;
+ if (!cq->user_mode)
+ cq->armed = false;
if (cq->ibcq.comp_handler)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
@@ -146,6 +148,7 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
qp->flush_code = FLUSH_PROT_ERR;
break;
case IRDMA_AE_AMP_BAD_QP:
+ case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
qp->flush_code = FLUSH_LOC_QP_OP_ERR;
break;
case IRDMA_AE_AMP_BAD_STAG_KEY:
@@ -156,7 +159,6 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
case IRDMA_AE_PRIV_OPERATION_DENIED:
case IRDMA_AE_IB_INVALID_REQUEST:
case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
- case IRDMA_AE_IB_REMOTE_OP_ERROR:
qp->flush_code = FLUSH_REM_ACCESS_ERR;
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break;
@@ -184,6 +186,9 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
qp->flush_code = FLUSH_MW_BIND_ERR;
break;
+ case IRDMA_AE_IB_REMOTE_OP_ERROR:
+ qp->flush_code = FLUSH_REM_OP_ERR;
+ break;
default:
qp->flush_code = FLUSH_FATAL_ERR;
break;
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index 91a497139ba3..cb218cab79ac 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -542,6 +542,7 @@ int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
void (*callback_fcn)(struct irdma_cqp_request *cqp_request),
void *cb_param);
void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request);
+bool irdma_cq_empty(struct irdma_cq *iwcq);
int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
void *ptr);
int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c
index aeeb1c310965..fed49da770f3 100644
--- a/drivers/infiniband/hw/irdma/pble.c
+++ b/drivers/infiniband/hw/irdma/pble.c
@@ -25,8 +25,7 @@ void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
list_del(&chunk->list);
if (chunk->type == PBLE_SD_PAGED)
irdma_pble_free_paged_mem(chunk);
- if (chunk->bitmapbuf)
- kfree(chunk->bitmapmem.va);
+ bitmap_free(chunk->bitmapbuf);
kfree(chunk->chunkmem.va);
}
}
@@ -283,7 +282,6 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
"PBLE: next_fpm_addr = %llx chunk_size[%llu] = 0x%llx\n",
pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3);
- list_add(&chunk->list, &pble_rsrc->pinfo.clist);
sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ?
sd_entry->u.pd_table.pd_page_addr.pa :
sd_entry->u.bp.addr.pa;
@@ -295,12 +293,12 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
goto error;
}
+ list_add(&chunk->list, &pble_rsrc->pinfo.clist);
sd_entry->valid = true;
return 0;
error:
- if (chunk->bitmapbuf)
- kfree(chunk->bitmapmem.va);
+ bitmap_free(chunk->bitmapbuf);
kfree(chunk->chunkmem.va);
return ret_code;
diff --git a/drivers/infiniband/hw/irdma/pble.h b/drivers/infiniband/hw/irdma/pble.h
index e1b3b8118a2c..aa20827dcc9d 100644
--- a/drivers/infiniband/hw/irdma/pble.h
+++ b/drivers/infiniband/hw/irdma/pble.h
@@ -78,7 +78,6 @@ struct irdma_chunk {
u32 pg_cnt;
enum irdma_alloc_type type;
struct irdma_sc_dev *dev;
- struct irdma_virt_mem bitmapmem;
struct irdma_virt_mem chunkmem;
};
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index 8b42c43fc14f..398736d8c78a 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -2239,15 +2239,10 @@ enum irdma_status_code irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift;
- pchunk->bitmapmem.size = sizeofbitmap >> 3;
- pchunk->bitmapmem.va = kzalloc(pchunk->bitmapmem.size, GFP_KERNEL);
-
- if (!pchunk->bitmapmem.va)
+ pchunk->bitmapbuf = bitmap_zalloc(sizeofbitmap, GFP_KERNEL);
+ if (!pchunk->bitmapbuf)
return IRDMA_ERR_NO_MEMORY;
- pchunk->bitmapbuf = pchunk->bitmapmem.va;
- bitmap_zero(pchunk->bitmapbuf, sizeofbitmap);
-
pchunk->sizeofbitmap = sizeofbitmap;
/* each pble is 8 bytes hence shift by 3 */
pprm->total_pble_alloc += pchunk->size >> 3;
@@ -2491,3 +2486,18 @@ void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
ibevent.element.qp = &iwqp->ibqp;
iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
}
+
+bool irdma_cq_empty(struct irdma_cq *iwcq)
+{
+ struct irdma_cq_uk *ukcq;
+ u64 qword3;
+ __le64 *cqe;
+ u8 polarity;
+
+ ukcq = &iwcq->sc_cq.cq_uk;
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
+ get_64bit_val(cqe, 24, &qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+
+ return polarity != ukcq->polarity;
+}
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 0f66e809d418..8cd5f9261692 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -3584,18 +3584,31 @@ static int irdma_req_notify_cq(struct ib_cq *ibcq,
struct irdma_cq *iwcq;
struct irdma_cq_uk *ukcq;
unsigned long flags;
- enum irdma_cmpl_notify cq_notify = IRDMA_CQ_COMPL_EVENT;
+ enum irdma_cmpl_notify cq_notify;
+ bool promo_event = false;
+ int ret = 0;
+ cq_notify = notify_flags == IB_CQ_SOLICITED ?
+ IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT;
iwcq = to_iwcq(ibcq);
ukcq = &iwcq->sc_cq.cq_uk;
- if (notify_flags == IB_CQ_SOLICITED)
- cq_notify = IRDMA_CQ_COMPL_SOLICITED;
spin_lock_irqsave(&iwcq->lock, flags);
- irdma_uk_cq_request_notification(ukcq, cq_notify);
+ /* Only promote to arm the CQ for any event if the last arm event was solicited. */
+ if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED)
+ promo_event = true;
+
+ if (!iwcq->armed || promo_event) {
+ iwcq->armed = true;
+ iwcq->last_notify = cq_notify;
+ irdma_uk_cq_request_notification(ukcq, cq_notify);
+ }
+
+ if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && !irdma_cq_empty(iwcq))
+ ret = 1;
spin_unlock_irqrestore(&iwcq->lock, flags);
- return 0;
+ return ret;
}
static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
index 5c244cd321a3..d0fdef8d09ea 100644
--- a/drivers/infiniband/hw/irdma/verbs.h
+++ b/drivers/infiniband/hw/irdma/verbs.h
@@ -110,6 +110,8 @@ struct irdma_cq {
u16 cq_size;
u16 cq_num;
bool user_mode;
+ bool armed;
+ enum irdma_cmpl_notify last_notify;
u32 polled_cmpls;
u32 cq_mem_size;
struct irdma_dma_mem kmem;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index e636e954f6bf..4a7a56ed740b 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -664,7 +664,6 @@ struct mlx5_ib_mr {
/* User MR data */
struct mlx5_cache_ent *cache_ent;
- struct ib_umem *umem;
/* This is zero'd when the MR is allocated */
union {
@@ -676,7 +675,7 @@ struct mlx5_ib_mr {
struct list_head list;
};
- /* Used only by kernel MRs (umem == NULL) */
+ /* Used only by kernel MRs */
struct {
void *descs;
void *descs_alloc;
@@ -697,8 +696,9 @@ struct mlx5_ib_mr {
int data_length;
};
- /* Used only by User MRs (umem != NULL) */
+ /* Used only by User MRs */
struct {
+ struct ib_umem *umem;
unsigned int page_shift;
/* Current access_flags */
int access_flags;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 157d862fb864..63e2129f1142 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1904,19 +1904,18 @@ err:
return ret;
}
-static void
-mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
+static void mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
{
- if (!mr->umem && mr->descs) {
- struct ib_device *device = mr->ibmr.device;
- int size = mr->max_descs * mr->desc_size;
- struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+ int size = mr->max_descs * mr->desc_size;
- dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
- DMA_TO_DEVICE);
- kfree(mr->descs_alloc);
- mr->descs = NULL;
- }
+ if (!mr->descs)
+ return;
+
+ dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
+ DMA_TO_DEVICE);
+ kfree(mr->descs_alloc);
+ mr->descs = NULL;
}
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
@@ -1992,7 +1991,8 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
if (mr->cache_ent) {
mlx5_mr_cache_free(dev, mr);
} else {
- mlx5_free_priv_descs(mr);
+ if (!udata)
+ mlx5_free_priv_descs(mr);
kfree(mr);
}
return 0;
@@ -2079,7 +2079,6 @@ static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
if (err)
goto err_free_in;
- mr->umem = NULL;
kfree(in);
return mr;
@@ -2206,7 +2205,6 @@ static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
}
mr->ibmr.device = pd->device;
- mr->umem = NULL;
switch (mr_type) {
case IB_MR_TYPE_MEM_REG:
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index ac11943a5ddb..bf2f30d67949 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -941,7 +941,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
&addrlimit) ||
addrlimit > type_max(typeof(pkt->addrlimit))) {
ret = -EINVAL;
- goto free_pbc;
+ goto free_pkt;
}
pkt->addrlimit = addrlimit;
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 975321812c87..54b8711321c1 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -359,6 +359,7 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
err2:
rxe_queue_cleanup(qp->sq.queue);
+ qp->sq.queue = NULL;
err1:
qp->pd = NULL;
qp->rcq = NULL;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
index f7e459fe68be..76e4352fe3f6 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
@@ -19,7 +19,7 @@ void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
int cpu;
cpu = raw_smp_processor_id();
- s = this_cpu_ptr(stats->pcpu_stats);
+ s = get_cpu_ptr(stats->pcpu_stats);
if (con->cpu != cpu) {
s->cpu_migr.to++;
@@ -27,14 +27,16 @@ void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
s = per_cpu_ptr(stats->pcpu_stats, con->cpu);
atomic_inc(&s->cpu_migr.from);
}
+ put_cpu_ptr(stats->pcpu_stats);
}
void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
{
struct rtrs_clt_stats_pcpu *s;
- s = this_cpu_ptr(stats->pcpu_stats);
+ s = get_cpu_ptr(stats->pcpu_stats);
s->rdma.failover_cnt++;
+ put_cpu_ptr(stats->pcpu_stats);
}
int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
@@ -169,9 +171,10 @@ static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
{
struct rtrs_clt_stats_pcpu *s;
- s = this_cpu_ptr(stats->pcpu_stats);
+ s = get_cpu_ptr(stats->pcpu_stats);
s->rdma.dir[d].cnt++;
s->rdma.dir[d].size_total += size;
+ put_cpu_ptr(stats->pcpu_stats);
}
void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c
index 429411c6c0a8..a85a4f33aea8 100644
--- a/drivers/input/joystick/spaceball.c
+++ b/drivers/input/joystick/spaceball.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/input.h>
#include <linux/serio.h>
+#include <asm/unaligned.h>
#define DRIVER_DESC "SpaceTec SpaceBall 2003/3003/4000 FLX driver"
@@ -75,9 +76,15 @@ static void spaceball_process_packet(struct spaceball* spaceball)
case 'D': /* Ball data */
if (spaceball->idx != 15) return;
- for (i = 0; i < 6; i++)
+ /*
+ * Skip first three bytes; read six axes worth of data.
+ * Axis values are signed 16-bit big-endian.
+ */
+ data += 3;
+ for (i = 0; i < ARRAY_SIZE(spaceball_axes); i++) {
input_report_abs(dev, spaceball_axes[i],
- (__s16)((data[2 * i + 3] << 8) | data[2 * i + 2]));
+ (__s16)get_unaligned_be16(&data[i * 2]));
+ }
break;
case 'K': /* Button data */
diff --git a/drivers/input/misc/iqs626a.c b/drivers/input/misc/iqs626a.c
index d57e996732cf..23b5dd9552dc 100644
--- a/drivers/input/misc/iqs626a.c
+++ b/drivers/input/misc/iqs626a.c
@@ -456,9 +456,10 @@ struct iqs626_private {
unsigned int suspend_mode;
};
-static int iqs626_parse_events(struct iqs626_private *iqs626,
- const struct fwnode_handle *ch_node,
- enum iqs626_ch_id ch_id)
+static noinline_for_stack int
+iqs626_parse_events(struct iqs626_private *iqs626,
+ const struct fwnode_handle *ch_node,
+ enum iqs626_ch_id ch_id)
{
struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
struct i2c_client *client = iqs626->client;
@@ -604,9 +605,10 @@ static int iqs626_parse_events(struct iqs626_private *iqs626,
return 0;
}
-static int iqs626_parse_ati_target(struct iqs626_private *iqs626,
- const struct fwnode_handle *ch_node,
- enum iqs626_ch_id ch_id)
+static noinline_for_stack int
+iqs626_parse_ati_target(struct iqs626_private *iqs626,
+ const struct fwnode_handle *ch_node,
+ enum iqs626_ch_id ch_id)
{
struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
struct i2c_client *client = iqs626->client;
@@ -885,9 +887,10 @@ static int iqs626_parse_trackpad(struct iqs626_private *iqs626,
return 0;
}
-static int iqs626_parse_channel(struct iqs626_private *iqs626,
- const struct fwnode_handle *ch_node,
- enum iqs626_ch_id ch_id)
+static noinline_for_stack int
+iqs626_parse_channel(struct iqs626_private *iqs626,
+ const struct fwnode_handle *ch_node,
+ enum iqs626_ch_id ch_id)
{
struct iqs626_sys_reg *sys_reg = &iqs626->sys_reg;
struct i2c_client *client = iqs626->client;
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index bfa26651c0be..627048bc6a12 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -916,6 +916,8 @@ static int atp_probe(struct usb_interface *iface,
set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit);
set_bit(BTN_LEFT, input_dev->keybit);
+ INIT_WORK(&dev->work, atp_reinit);
+
error = input_register_device(dev->input);
if (error)
goto err_free_buffer;
@@ -923,8 +925,6 @@ static int atp_probe(struct usb_interface *iface,
/* save our data pointer in this interface device */
usb_set_intfdata(iface, dev);
- INIT_WORK(&dev->work, atp_reinit);
-
return 0;
err_free_buffer:
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 956d9cd34796..ece97f8c6a3e 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1588,7 +1588,13 @@ static const struct dmi_system_id no_hw_res_dmi_table[] = {
*/
static int elantech_change_report_id(struct psmouse *psmouse)
{
- unsigned char param[2] = { 0x10, 0x03 };
+ /*
+ * NOTE: the code is expecting to receive param[] as an array of 3
+ * items (see __ps2_command()), even if in this case only 2 are
+ * actually needed. Make sure the array size is 3 to avoid potential
+ * stack out-of-bound accesses.
+ */
+ unsigned char param[3] = { 0x10, 0x03 };
if (elantech_write_reg_params(psmouse, 0x7, param) ||
elantech_read_reg_params(psmouse, 0x7, param) ||
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index aedd05541044..148a7c5fd0e2 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -995,6 +995,24 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
{ }
};
+static const struct dmi_system_id i8042_dmi_probe_defer_table[] __initconst = {
+ {
+ /* ASUS ZenBook UX425UA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"),
+ },
+ },
+ {
+ /* ASUS ZenBook UM325UA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"),
+ },
+ },
+ { }
+};
+
#endif /* CONFIG_X86 */
#ifdef CONFIG_PNP
@@ -1315,6 +1333,9 @@ static int __init i8042_platform_init(void)
if (dmi_check_system(i8042_dmi_kbdreset_table))
i8042_kbdreset = true;
+ if (dmi_check_system(i8042_dmi_probe_defer_table))
+ i8042_probe_defer = true;
+
/*
* A20 was already enabled during early kernel init. But some buggy
* BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 0b9f1d0a8f8b..3fc0a89cc785 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -45,6 +45,10 @@ static bool i8042_unlock;
module_param_named(unlock, i8042_unlock, bool, 0);
MODULE_PARM_DESC(unlock, "Ignore keyboard lock.");
+static bool i8042_probe_defer;
+module_param_named(probe_defer, i8042_probe_defer, bool, 0);
+MODULE_PARM_DESC(probe_defer, "Allow deferred probing.");
+
enum i8042_controller_reset_mode {
I8042_RESET_NEVER,
I8042_RESET_ALWAYS,
@@ -711,7 +715,7 @@ static int i8042_set_mux_mode(bool multiplex, unsigned char *mux_version)
* LCS/Telegraphics.
*/
-static int __init i8042_check_mux(void)
+static int i8042_check_mux(void)
{
unsigned char mux_version;
@@ -740,10 +744,10 @@ static int __init i8042_check_mux(void)
/*
* The following is used to test AUX IRQ delivery.
*/
-static struct completion i8042_aux_irq_delivered __initdata;
-static bool i8042_irq_being_tested __initdata;
+static struct completion i8042_aux_irq_delivered;
+static bool i8042_irq_being_tested;
-static irqreturn_t __init i8042_aux_test_irq(int irq, void *dev_id)
+static irqreturn_t i8042_aux_test_irq(int irq, void *dev_id)
{
unsigned long flags;
unsigned char str, data;
@@ -770,7 +774,7 @@ static irqreturn_t __init i8042_aux_test_irq(int irq, void *dev_id)
* verifies success by readinng CTR. Used when testing for presence of AUX
* port.
*/
-static int __init i8042_toggle_aux(bool on)
+static int i8042_toggle_aux(bool on)
{
unsigned char param;
int i;
@@ -798,7 +802,7 @@ static int __init i8042_toggle_aux(bool on)
* the presence of an AUX interface.
*/
-static int __init i8042_check_aux(void)
+static int i8042_check_aux(void)
{
int retval = -1;
bool irq_registered = false;
@@ -1005,7 +1009,7 @@ static int i8042_controller_init(void)
if (i8042_command(&ctr[n++ % 2], I8042_CMD_CTL_RCTR)) {
pr_err("Can't read CTR while initializing i8042\n");
- return -EIO;
+ return i8042_probe_defer ? -EPROBE_DEFER : -EIO;
}
} while (n < 2 || ctr[0] != ctr[1]);
@@ -1320,7 +1324,7 @@ static void i8042_shutdown(struct platform_device *dev)
i8042_controller_reset(false);
}
-static int __init i8042_create_kbd_port(void)
+static int i8042_create_kbd_port(void)
{
struct serio *serio;
struct i8042_port *port = &i8042_ports[I8042_KBD_PORT_NO];
@@ -1349,7 +1353,7 @@ static int __init i8042_create_kbd_port(void)
return 0;
}
-static int __init i8042_create_aux_port(int idx)
+static int i8042_create_aux_port(int idx)
{
struct serio *serio;
int port_no = idx < 0 ? I8042_AUX_PORT_NO : I8042_MUX_PORT_NO + idx;
@@ -1386,13 +1390,13 @@ static int __init i8042_create_aux_port(int idx)
return 0;
}
-static void __init i8042_free_kbd_port(void)
+static void i8042_free_kbd_port(void)
{
kfree(i8042_ports[I8042_KBD_PORT_NO].serio);
i8042_ports[I8042_KBD_PORT_NO].serio = NULL;
}
-static void __init i8042_free_aux_ports(void)
+static void i8042_free_aux_ports(void)
{
int i;
@@ -1402,7 +1406,7 @@ static void __init i8042_free_aux_ports(void)
}
}
-static void __init i8042_register_ports(void)
+static void i8042_register_ports(void)
{
int i;
@@ -1443,7 +1447,7 @@ static void i8042_free_irqs(void)
i8042_aux_irq_registered = i8042_kbd_irq_registered = false;
}
-static int __init i8042_setup_aux(void)
+static int i8042_setup_aux(void)
{
int (*aux_enable)(void);
int error;
@@ -1485,7 +1489,7 @@ static int __init i8042_setup_aux(void)
return error;
}
-static int __init i8042_setup_kbd(void)
+static int i8042_setup_kbd(void)
{
int error;
@@ -1535,7 +1539,7 @@ static int i8042_kbd_bind_notifier(struct notifier_block *nb,
return 0;
}
-static int __init i8042_probe(struct platform_device *dev)
+static int i8042_probe(struct platform_device *dev)
{
int error;
@@ -1600,6 +1604,7 @@ static struct platform_driver i8042_driver = {
.pm = &i8042_pm_ops,
#endif
},
+ .probe = i8042_probe,
.remove = i8042_remove,
.shutdown = i8042_shutdown,
};
@@ -1610,7 +1615,6 @@ static struct notifier_block i8042_kbd_bind_notifier_block = {
static int __init i8042_init(void)
{
- struct platform_device *pdev;
int err;
dbg_init();
@@ -1626,17 +1630,29 @@ static int __init i8042_init(void)
/* Set this before creating the dev to allow i8042_command to work right away */
i8042_present = true;
- pdev = platform_create_bundle(&i8042_driver, i8042_probe, NULL, 0, NULL, 0);
- if (IS_ERR(pdev)) {
- err = PTR_ERR(pdev);
+ err = platform_driver_register(&i8042_driver);
+ if (err)
goto err_platform_exit;
+
+ i8042_platform_device = platform_device_alloc("i8042", -1);
+ if (!i8042_platform_device) {
+ err = -ENOMEM;
+ goto err_unregister_driver;
}
+ err = platform_device_add(i8042_platform_device);
+ if (err)
+ goto err_free_device;
+
bus_register_notifier(&serio_bus, &i8042_kbd_bind_notifier_block);
panic_blink = i8042_panic_blink;
return 0;
+err_free_device:
+ platform_device_put(i8042_platform_device);
+err_unregister_driver:
+ platform_driver_unregister(&i8042_driver);
err_platform_exit:
i8042_platform_exit();
return err;
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 05de92c0293b..eb66cd2689b7 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -1882,7 +1882,7 @@ static int mxt_read_info_block(struct mxt_data *data)
if (error) {
dev_err(&client->dev, "Error %d parsing object table\n", error);
mxt_free_object_table(data);
- goto err_free_mem;
+ return error;
}
data->object_table = (struct mxt_object *)(id_buf + MXT_OBJECT_START);
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 7e13a66a8a95..879a4d984c90 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -117,6 +117,19 @@
#define ELAN_POWERON_DELAY_USEC 500
#define ELAN_RESET_DELAY_MSEC 20
+/* FW boot code version */
+#define BC_VER_H_BYTE_FOR_EKTH3900x1_I2C 0x72
+#define BC_VER_H_BYTE_FOR_EKTH3900x2_I2C 0x82
+#define BC_VER_H_BYTE_FOR_EKTH3900x3_I2C 0x92
+#define BC_VER_H_BYTE_FOR_EKTH5312x1_I2C 0x6D
+#define BC_VER_H_BYTE_FOR_EKTH5312x2_I2C 0x6E
+#define BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C 0x77
+#define BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C 0x78
+#define BC_VER_H_BYTE_FOR_EKTH5312x1_I2C_USB 0x67
+#define BC_VER_H_BYTE_FOR_EKTH5312x2_I2C_USB 0x68
+#define BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C_USB 0x74
+#define BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C_USB 0x75
+
enum elants_chip_id {
EKTH3500,
EKTF3624,
@@ -736,6 +749,37 @@ static int elants_i2c_validate_remark_id(struct elants_data *ts,
return 0;
}
+static bool elants_i2c_should_check_remark_id(struct elants_data *ts)
+{
+ struct i2c_client *client = ts->client;
+ const u8 bootcode_version = ts->iap_version;
+ bool check;
+
+ /* I2C eKTH3900 and eKTH5312 are NOT support Remark ID */
+ if ((bootcode_version == BC_VER_H_BYTE_FOR_EKTH3900x1_I2C) ||
+ (bootcode_version == BC_VER_H_BYTE_FOR_EKTH3900x2_I2C) ||
+ (bootcode_version == BC_VER_H_BYTE_FOR_EKTH3900x3_I2C) ||
+ (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x1_I2C) ||
+ (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x2_I2C) ||
+ (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C) ||
+ (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C) ||
+ (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x1_I2C_USB) ||
+ (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312x2_I2C_USB) ||
+ (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx1_I2C_USB) ||
+ (bootcode_version == BC_VER_H_BYTE_FOR_EKTH5312cx2_I2C_USB)) {
+ dev_dbg(&client->dev,
+ "eKTH3900/eKTH5312(0x%02x) are not support remark id\n",
+ bootcode_version);
+ check = false;
+ } else if (bootcode_version >= 0x60) {
+ check = true;
+ } else {
+ check = false;
+ }
+
+ return check;
+}
+
static int elants_i2c_do_update_firmware(struct i2c_client *client,
const struct firmware *fw,
bool force)
@@ -749,7 +793,7 @@ static int elants_i2c_do_update_firmware(struct i2c_client *client,
u16 send_id;
int page, n_fw_pages;
int error;
- bool check_remark_id = ts->iap_version >= 0x60;
+ bool check_remark_id = elants_i2c_should_check_remark_id(ts);
/* Recovery mode detection! */
if (force) {
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index b5cc91788195..aaa3c455e01e 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -102,6 +102,7 @@ static const struct goodix_chip_id goodix_chip_ids[] = {
{ .id = "911", .data = &gt911_chip_data },
{ .id = "9271", .data = &gt911_chip_data },
{ .id = "9110", .data = &gt911_chip_data },
+ { .id = "9111", .data = &gt911_chip_data },
{ .id = "927", .data = &gt911_chip_data },
{ .id = "928", .data = &gt911_chip_data },
@@ -650,10 +651,16 @@ int goodix_reset_no_int_sync(struct goodix_ts_data *ts)
usleep_range(6000, 10000); /* T4: > 5ms */
- /* end select I2C slave addr */
- error = gpiod_direction_input(ts->gpiod_rst);
- if (error)
- goto error;
+ /*
+ * Put the reset pin back in to input / high-impedance mode to save
+ * power. Only do this in the non ACPI case since some ACPI boards
+ * don't have a pull-up, so there the reset pin must stay active-high.
+ */
+ if (ts->irq_pin_access_method == IRQ_PIN_ACCESS_GPIO) {
+ error = gpiod_direction_input(ts->gpiod_rst);
+ if (error)
+ goto error;
+ }
return 0;
@@ -787,6 +794,14 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
return -EINVAL;
}
+ /*
+ * Normally we put the reset pin in input / high-impedance mode to save
+ * power. But some x86/ACPI boards don't have a pull-up, so for the ACPI
+ * case, leave the pin as is. This results in the pin not being touched
+ * at all on x86/ACPI boards, except when needed for error-recover.
+ */
+ ts->gpiod_rst_flags = GPIOD_ASIS;
+
return devm_acpi_dev_add_driver_gpios(dev, gpio_mapping);
}
#else
@@ -812,6 +827,12 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts)
return -EINVAL;
dev = &ts->client->dev;
+ /*
+ * By default we request the reset pin as input, leaving it in
+ * high-impedance when not resetting the controller to save power.
+ */
+ ts->gpiod_rst_flags = GPIOD_IN;
+
ts->avdd28 = devm_regulator_get(dev, "AVDD28");
if (IS_ERR(ts->avdd28)) {
error = PTR_ERR(ts->avdd28);
@@ -849,7 +870,7 @@ retry_get_irq_gpio:
ts->gpiod_int = gpiod;
/* Get the reset line GPIO pin number */
- gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME, GPIOD_IN);
+ gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME, ts->gpiod_rst_flags);
if (IS_ERR(gpiod)) {
error = PTR_ERR(gpiod);
if (error != -EPROBE_DEFER)
diff --git a/drivers/input/touchscreen/goodix.h b/drivers/input/touchscreen/goodix.h
index 62138f930d1a..02065d1c3263 100644
--- a/drivers/input/touchscreen/goodix.h
+++ b/drivers/input/touchscreen/goodix.h
@@ -87,6 +87,7 @@ struct goodix_ts_data {
struct gpio_desc *gpiod_rst;
int gpio_count;
int gpio_int_idx;
+ enum gpiod_flags gpiod_rst_flags;
char id[GOODIX_ID_MAX_LEN + 1];
char cfg_name[64];
u16 version;
diff --git a/drivers/input/touchscreen/goodix_fwupload.c b/drivers/input/touchscreen/goodix_fwupload.c
index c1e7a2413078..191d4f38d991 100644
--- a/drivers/input/touchscreen/goodix_fwupload.c
+++ b/drivers/input/touchscreen/goodix_fwupload.c
@@ -207,7 +207,7 @@ static int goodix_firmware_upload(struct goodix_ts_data *ts)
error = goodix_reset_no_int_sync(ts);
if (error)
- return error;
+ goto release;
error = goodix_enter_upload_mode(ts->client);
if (error)
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index 3759dc36cc8f..2543ef65825b 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -707,7 +707,7 @@ static const struct irq_domain_ops aic_ipi_domain_ops = {
.free = aic_ipi_free,
};
-static int aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
+static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
{
struct irq_domain *ipi_domain;
int base_ipi;
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 80906bfec845..5b8d571c041d 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -232,16 +232,12 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
int hwirq, i;
mutex_lock(&msi_used_lock);
+ hwirq = bitmap_find_free_region(msi_used, PCI_MSI_DOORBELL_NR,
+ order_base_2(nr_irqs));
+ mutex_unlock(&msi_used_lock);
- hwirq = bitmap_find_next_zero_area(msi_used, PCI_MSI_DOORBELL_NR,
- 0, nr_irqs, 0);
- if (hwirq >= PCI_MSI_DOORBELL_NR) {
- mutex_unlock(&msi_used_lock);
+ if (hwirq < 0)
return -ENOSPC;
- }
-
- bitmap_set(msi_used, hwirq, nr_irqs);
- mutex_unlock(&msi_used_lock);
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_info(domain, virq + i, hwirq + i,
@@ -250,7 +246,7 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
NULL, NULL);
}
- return hwirq;
+ return 0;
}
static void armada_370_xp_msi_free(struct irq_domain *domain,
@@ -259,7 +255,7 @@ static void armada_370_xp_msi_free(struct irq_domain *domain,
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
mutex_lock(&msi_used_lock);
- bitmap_clear(msi_used, d->hwirq, nr_irqs);
+ bitmap_release_region(msi_used, d->hwirq, order_base_2(nr_irqs));
mutex_unlock(&msi_used_lock);
}
diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c
index f3c6855a4cef..18b77c3e6db4 100644
--- a/drivers/irqchip/irq-aspeed-scu-ic.c
+++ b/drivers/irqchip/irq-aspeed-scu-ic.c
@@ -76,8 +76,8 @@ static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
generic_handle_domain_irq(scu_ic->irq_domain,
bit - scu_ic->irq_shift);
- regmap_update_bits(scu_ic->scu, scu_ic->reg, mask,
- BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
+ regmap_write_bits(scu_ic->scu, scu_ic->reg, mask,
+ BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
}
chained_irq_exit(chip, desc);
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index d80e67a6aad2..bb6609cebdbc 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -238,6 +238,7 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
}
data->num_parent_irqs = platform_irq_count(pdev);
+ put_device(&pdev->dev);
if (data->num_parent_irqs <= 0) {
pr_err("invalid number of parent interrupts\n");
ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index eb0882d15366..0cb584d9815b 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -742,7 +742,7 @@ static struct its_collection *its_build_invall_cmd(struct its_node *its,
its_fixup_cmd(cmd);
- return NULL;
+ return desc->its_invall_cmd.col;
}
static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index d02b05a067d9..ff89b36267dd 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) "irq-mips-gic: " fmt
+#include <linux/bitfield.h>
#include <linux/bitmap.h>
#include <linux/clocksource.h>
#include <linux/cpuhotplug.h>
@@ -735,8 +736,7 @@ static int __init gic_of_init(struct device_node *node,
mips_gic_base = ioremap(gic_base, gic_len);
gicconfig = read_gic_config();
- gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS;
- gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS);
+ gic_shared_intrs = FIELD_GET(GIC_CONFIG_NUMINTERRUPTS, gicconfig);
gic_shared_intrs = (gic_shared_intrs + 1) * 8;
if (cpu_has_veic) {
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
index 63bac3f78863..ba4759b3e269 100644
--- a/drivers/irqchip/irq-nvic.c
+++ b/drivers/irqchip/irq-nvic.c
@@ -26,7 +26,7 @@
#define NVIC_ISER 0x000
#define NVIC_ICER 0x080
-#define NVIC_IPR 0x300
+#define NVIC_IPR 0x400
#define NVIC_MAX_BANKS 16
/*
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
index 55891e420446..a41b4b264594 100644
--- a/drivers/isdn/mISDN/core.c
+++ b/drivers/isdn/mISDN/core.c
@@ -381,7 +381,7 @@ mISDNInit(void)
err = mISDN_inittimer(&debug);
if (err)
goto error2;
- err = l1_init(&debug);
+ err = Isdnl1_Init(&debug);
if (err)
goto error3;
err = Isdnl2_Init(&debug);
@@ -395,7 +395,7 @@ mISDNInit(void)
error5:
Isdnl2_cleanup();
error4:
- l1_cleanup();
+ Isdnl1_cleanup();
error3:
mISDN_timer_cleanup();
error2:
@@ -408,7 +408,7 @@ static void mISDN_cleanup(void)
{
misdn_sock_cleanup();
Isdnl2_cleanup();
- l1_cleanup();
+ Isdnl1_cleanup();
mISDN_timer_cleanup();
class_unregister(&mISDN_class);
diff --git a/drivers/isdn/mISDN/core.h b/drivers/isdn/mISDN/core.h
index 23b44d303327..42599f49c189 100644
--- a/drivers/isdn/mISDN/core.h
+++ b/drivers/isdn/mISDN/core.h
@@ -60,8 +60,8 @@ struct Bprotocol *get_Bprotocol4id(u_int);
extern int mISDN_inittimer(u_int *);
extern void mISDN_timer_cleanup(void);
-extern int l1_init(u_int *);
-extern void l1_cleanup(void);
+extern int Isdnl1_Init(u_int *);
+extern void Isdnl1_cleanup(void);
extern int Isdnl2_Init(u_int *);
extern void Isdnl2_cleanup(void);
diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c
index 98a3bc6c1700..7b31c25a550e 100644
--- a/drivers/isdn/mISDN/layer1.c
+++ b/drivers/isdn/mISDN/layer1.c
@@ -398,7 +398,7 @@ create_l1(struct dchannel *dch, dchannel_l1callback *dcb) {
EXPORT_SYMBOL(create_l1);
int
-l1_init(u_int *deb)
+Isdnl1_Init(u_int *deb)
{
debug = deb;
l1fsm_s.state_count = L1S_STATE_COUNT;
@@ -409,7 +409,7 @@ l1_init(u_int *deb)
}
void
-l1_cleanup(void)
+Isdnl1_cleanup(void)
{
mISDN_FsmFree(&l1fsm_s);
}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 86b9e355c583..140f35dc0c45 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1139,6 +1139,7 @@ static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
static void cached_dev_detach_finish(struct work_struct *w)
{
struct cached_dev *dc = container_of(w, struct cached_dev, detach);
+ struct cache_set *c = dc->disk.c;
BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
BUG_ON(refcount_read(&dc->count));
@@ -1156,7 +1157,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
bcache_device_detach(&dc->disk);
list_move(&dc->list, &uncached_devices);
- calc_cached_dev_sectors(dc->disk.c);
+ calc_cached_dev_sectors(c);
clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 6319deccbe09..7af242de3202 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1963,7 +1963,7 @@ static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
n_sectors -= bv.bv_len >> SECTOR_SHIFT;
bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
retry_kmap:
- mem = bvec_kmap_local(&bv);
+ mem = kmap_local_page(bv.bv_page);
if (likely(dio->op == REQ_OP_WRITE))
flush_dcache_page(bv.bv_page);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 5111ed966947..41d6e2383517 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2189,6 +2189,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
if (!num_sectors || num_sectors > max_sectors)
num_sectors = max_sectors;
+ rdev->sb_start = sb_start;
}
sb = page_address(rdev->sb_page);
sb->data_size = cpu_to_le64(num_sectors);
@@ -6270,7 +6271,8 @@ static void __md_stop(struct mddev *mddev)
spin_lock(&mddev->lock);
mddev->pers = NULL;
spin_unlock(&mddev->lock);
- pers->free(mddev, mddev->private);
+ if (mddev->private)
+ pers->free(mddev, mddev->private);
mddev->private = NULL;
if (pers->sync_request && mddev->to_remove == NULL)
mddev->to_remove = &md_redundancy_group;
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index 70532335c7c7..cb670f16e98e 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -423,9 +423,9 @@ static int rebalance_children(struct shadow_spine *s,
memcpy(n, dm_block_data(child),
dm_bm_block_size(dm_tm_get_bm(info->tm)));
- dm_tm_unlock(info->tm, child);
dm_tm_dec(info->tm, dm_block_location(child));
+ dm_tm_unlock(info->tm, child);
return 0;
}
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index 8c72eb590f79..6ac509c1821c 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -1803,8 +1803,6 @@ static int rtsx_pci_runtime_suspend(struct device *device)
mutex_lock(&pcr->pcr_mutex);
rtsx_pci_power_off(pcr, HOST_ENTER_S3);
- free_irq(pcr->irq, (void *)pcr);
-
mutex_unlock(&pcr->pcr_mutex);
pcr->is_runtime_suspended = true;
@@ -1825,8 +1823,6 @@ static int rtsx_pci_runtime_resume(struct device *device)
mutex_lock(&pcr->pcr_mutex);
rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
- rtsx_pci_acquire_irq(pcr);
- synchronize_irq(pcr->irq);
if (pcr->ops->fetch_vendor_settings)
pcr->ops->fetch_vendor_settings(pcr);
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 632325474233..b38978a3b3ff 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -376,7 +376,6 @@ MODULE_DEVICE_TABLE(spi, at25_spi_ids);
static int at25_probe(struct spi_device *spi)
{
struct at25_data *at25 = NULL;
- struct spi_eeprom chip;
int err;
int sr;
u8 id[FM25_ID_LEN];
@@ -389,15 +388,18 @@ static int at25_probe(struct spi_device *spi)
if (match && !strcmp(match->compatible, "cypress,fm25"))
is_fram = 1;
+ at25 = devm_kzalloc(&spi->dev, sizeof(struct at25_data), GFP_KERNEL);
+ if (!at25)
+ return -ENOMEM;
+
/* Chip description */
- if (!spi->dev.platform_data) {
- if (!is_fram) {
- err = at25_fw_to_chip(&spi->dev, &chip);
- if (err)
- return err;
- }
- } else
- chip = *(struct spi_eeprom *)spi->dev.platform_data;
+ if (spi->dev.platform_data) {
+ memcpy(&at25->chip, spi->dev.platform_data, sizeof(at25->chip));
+ } else if (!is_fram) {
+ err = at25_fw_to_chip(&spi->dev, &at25->chip);
+ if (err)
+ return err;
+ }
/* Ping the chip ... the status register is pretty portable,
* unlike probing manufacturer IDs. We do expect that system
@@ -409,12 +411,7 @@ static int at25_probe(struct spi_device *spi)
return -ENXIO;
}
- at25 = devm_kzalloc(&spi->dev, sizeof(struct at25_data), GFP_KERNEL);
- if (!at25)
- return -ENOMEM;
-
mutex_init(&at25->lock);
- at25->chip = chip;
at25->spi = spi;
spi_set_drvdata(spi, at25);
@@ -431,7 +428,7 @@ static int at25_probe(struct spi_device *spi)
dev_err(&spi->dev, "Error: unsupported size (id %02x)\n", id[7]);
return -ENODEV;
}
- chip.byte_len = int_pow(2, id[7] - 0x21 + 4) * 1024;
+ at25->chip.byte_len = int_pow(2, id[7] - 0x21 + 4) * 1024;
if (at25->chip.byte_len > 64 * 1024)
at25->chip.flags |= EE_ADDR3;
@@ -464,7 +461,7 @@ static int at25_probe(struct spi_device *spi)
at25->nvmem_config.type = is_fram ? NVMEM_TYPE_FRAM : NVMEM_TYPE_EEPROM;
at25->nvmem_config.name = dev_name(&spi->dev);
at25->nvmem_config.dev = &spi->dev;
- at25->nvmem_config.read_only = chip.flags & EE_READONLY;
+ at25->nvmem_config.read_only = at25->chip.flags & EE_READONLY;
at25->nvmem_config.root_only = true;
at25->nvmem_config.owner = THIS_MODULE;
at25->nvmem_config.compat = true;
@@ -474,17 +471,18 @@ static int at25_probe(struct spi_device *spi)
at25->nvmem_config.priv = at25;
at25->nvmem_config.stride = 1;
at25->nvmem_config.word_size = 1;
- at25->nvmem_config.size = chip.byte_len;
+ at25->nvmem_config.size = at25->chip.byte_len;
at25->nvmem = devm_nvmem_register(&spi->dev, &at25->nvmem_config);
if (IS_ERR(at25->nvmem))
return PTR_ERR(at25->nvmem);
dev_info(&spi->dev, "%d %s %s %s%s, pagesize %u\n",
- (chip.byte_len < 1024) ? chip.byte_len : (chip.byte_len / 1024),
- (chip.byte_len < 1024) ? "Byte" : "KByte",
+ (at25->chip.byte_len < 1024) ?
+ at25->chip.byte_len : (at25->chip.byte_len / 1024),
+ (at25->chip.byte_len < 1024) ? "Byte" : "KByte",
at25->chip.name, is_fram ? "fram" : "eeprom",
- (chip.flags & EE_READONLY) ? " (readonly)" : "",
+ (at25->chip.flags & EE_READONLY) ? " (readonly)" : "",
at25->chip.page_size);
return 0;
}
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 39aca7753719..4ccbf43e6bfa 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -719,16 +719,18 @@ static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
{
u64 size = 0;
- int i;
+ int oix;
size = ALIGN(metalen, FASTRPC_ALIGN);
- for (i = 0; i < ctx->nscalars; i++) {
+ for (oix = 0; oix < ctx->nbufs; oix++) {
+ int i = ctx->olaps[oix].raix;
+
if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
- if (ctx->olaps[i].offset == 0)
+ if (ctx->olaps[oix].offset == 0)
size = ALIGN(size, FASTRPC_ALIGN);
- size += (ctx->olaps[i].mend - ctx->olaps[i].mstart);
+ size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart);
}
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 240c5af793dc..368f10405e13 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2264,7 +2264,7 @@ void mmc_start_host(struct mmc_host *host)
_mmc_detect_change(host, 0, false);
}
-void mmc_stop_host(struct mmc_host *host)
+void __mmc_stop_host(struct mmc_host *host)
{
if (host->slot.cd_irq >= 0) {
mmc_gpio_set_cd_wake(host, false);
@@ -2273,6 +2273,11 @@ void mmc_stop_host(struct mmc_host *host)
host->rescan_disable = 1;
cancel_delayed_work_sync(&host->detect);
+}
+
+void mmc_stop_host(struct mmc_host *host)
+{
+ __mmc_stop_host(host);
/* clear pm flags now and let card drivers set them as needed */
host->pm_flags = 0;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 7931a4f0137d..f5f3f623ea49 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -70,6 +70,7 @@ static inline void mmc_delay(unsigned int ms)
void mmc_rescan(struct work_struct *work);
void mmc_start_host(struct mmc_host *host);
+void __mmc_stop_host(struct mmc_host *host);
void mmc_stop_host(struct mmc_host *host);
void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index d4683b1d263f..cf140f4ec864 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -80,9 +80,18 @@ static void mmc_host_classdev_release(struct device *dev)
kfree(host);
}
+static int mmc_host_classdev_shutdown(struct device *dev)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+ __mmc_stop_host(host);
+ return 0;
+}
+
static struct class mmc_host_class = {
.name = "mmc_host",
.dev_release = mmc_host_classdev_release,
+ .shutdown_pre = mmc_host_classdev_shutdown,
.pm = MMC_HOST_CLASS_DEV_PM_OPS,
};
diff --git a/drivers/mmc/host/meson-mx-sdhc-mmc.c b/drivers/mmc/host/meson-mx-sdhc-mmc.c
index 7cd9c0ec2fcf..8fdd0bbbfa21 100644
--- a/drivers/mmc/host/meson-mx-sdhc-mmc.c
+++ b/drivers/mmc/host/meson-mx-sdhc-mmc.c
@@ -135,6 +135,7 @@ static void meson_mx_sdhc_start_cmd(struct mmc_host *mmc,
struct mmc_command *cmd)
{
struct meson_mx_sdhc_host *host = mmc_priv(mmc);
+ bool manual_stop = false;
u32 ictl, send;
int pack_len;
@@ -172,12 +173,27 @@ static void meson_mx_sdhc_start_cmd(struct mmc_host *mmc,
else
/* software flush: */
ictl |= MESON_SDHC_ICTL_DATA_XFER_OK;
+
+ /*
+ * Mimic the logic from the vendor driver where (only)
+ * SD_IO_RW_EXTENDED commands with more than one block set the
+ * MESON_SDHC_MISC_MANUAL_STOP bit. This fixes the firmware
+ * download in the brcmfmac driver for a BCM43362/1 card.
+ * Without this sdio_memcpy_toio() (with a size of 219557
+ * bytes) times out if MESON_SDHC_MISC_MANUAL_STOP is not set.
+ */
+ manual_stop = cmd->data->blocks > 1 &&
+ cmd->opcode == SD_IO_RW_EXTENDED;
} else {
pack_len = 0;
ictl |= MESON_SDHC_ICTL_RESP_OK;
}
+ regmap_update_bits(host->regmap, MESON_SDHC_MISC,
+ MESON_SDHC_MISC_MANUAL_STOP,
+ manual_stop ? MESON_SDHC_MISC_MANUAL_STOP : 0);
+
if (cmd->opcode == MMC_STOP_TRANSMISSION)
send |= MESON_SDHC_SEND_DATA_STOP;
diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c
index fdaa11f92fe6..a75d3dd34d18 100644
--- a/drivers/mmc/host/mmci_stm32_sdmmc.c
+++ b/drivers/mmc/host/mmci_stm32_sdmmc.c
@@ -441,6 +441,8 @@ static int sdmmc_dlyb_phase_tuning(struct mmci_host *host, u32 opcode)
return -EINVAL;
}
+ writel_relaxed(0, dlyb->base + DLYB_CR);
+
phase = end_of_len - max_len / 2;
sdmmc_dlyb_set_cfgr(dlyb, dlyb->unit, phase, false);
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 943940b44e83..632775217d35 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -2291,8 +2291,10 @@ static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card
sdr_set_field(host->base + PAD_DS_TUNE,
PAD_DS_TUNE_DLY1, i);
ret = mmc_get_ext_csd(card, &ext_csd);
- if (!ret)
+ if (!ret) {
result_dly1 |= (1 << i);
+ kfree(ext_csd);
+ }
}
host->hs400_tuning = false;
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index a4407f391f66..f5b2684ad805 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -673,7 +673,7 @@ static int renesas_sdhi_execute_tuning(struct mmc_host *mmc, u32 opcode)
/* Issue CMD19 twice for each tap */
for (i = 0; i < 2 * priv->tap_num; i++) {
- int cmd_error;
+ int cmd_error = 0;
/* Set sampling clock position */
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num);
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index a5001875876b..9762ffab2e23 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -356,23 +356,6 @@ static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
}
}
-static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
- struct mmc_ios *ios)
-{
- struct sdhci_host *host = mmc_priv(mmc);
- u32 val;
-
- val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
-
- if (ios->enhanced_strobe)
- val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
- else
- val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
-
- sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
-
-}
-
static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -793,6 +776,32 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
}
}
+static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 val;
+
+ val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
+
+ if (ios->enhanced_strobe) {
+ val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
+ /*
+ * When CMD13 is sent from mmc_select_hs400es() after
+ * switching to HS400ES mode, the bus is operating at
+ * either MMC_HIGH_26_MAX_DTR or MMC_HIGH_52_MAX_DTR.
+ * To meet Tegra SDHCI requirement at HS400ES mode, force SDHCI
+ * interface clock to MMC_HS200_MAX_DTR (200 MHz) so that host
+ * controller CAR clock and the interface clock are rate matched.
+ */
+ tegra_sdhci_set_clock(host, MMC_HS200_MAX_DTR);
+ } else {
+ val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
+ }
+
+ sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
+}
+
static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 9802e265fca8..2b317ed6c103 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -96,6 +96,13 @@ struct dataflash {
struct mtd_info mtd;
};
+static const struct spi_device_id dataflash_dev_ids[] = {
+ { "at45" },
+ { "dataflash" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, dataflash_dev_ids);
+
#ifdef CONFIG_OF
static const struct of_device_id dataflash_dt_ids[] = {
{ .compatible = "atmel,at45", },
@@ -927,6 +934,7 @@ static struct spi_driver dataflash_driver = {
.name = "mtd_dataflash",
.of_match_table = of_match_ptr(dataflash_dt_ids),
},
+ .id_table = dataflash_dev_ids,
.probe = dataflash_probe,
.remove = dataflash_remove,
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 67b7cb67c030..0a45d3c6c15b 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -26,7 +26,7 @@ config MTD_NAND_DENALI_PCI
config MTD_NAND_DENALI_DT
tristate "Denali NAND controller as a DT device"
select MTD_NAND_DENALI
- depends on HAS_DMA && HAVE_CLK && OF
+ depends on HAS_DMA && HAVE_CLK && OF && HAS_IOMEM
help
Enable the driver for NAND flash on platforms using a Denali NAND
controller as a DT device.
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index 658f0cbe7ce8..6b2bda815b88 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/completion.h>
+#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
@@ -93,6 +94,14 @@
#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
+/*
+ * According to SPEAr300 Reference Manual (RM0082)
+ * TOUDEL = 7ns (Output delay from the flip-flops to the board)
+ * TINDEL = 5ns (Input delay from the board to the flipflop)
+ */
+#define TOUTDEL 7000
+#define TINDEL 5000
+
struct fsmc_nand_timings {
u8 tclr;
u8 tar;
@@ -277,7 +286,7 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
{
unsigned long hclk = clk_get_rate(host->clk);
unsigned long hclkn = NSEC_PER_SEC / hclk;
- u32 thiz, thold, twait, tset;
+ u32 thiz, thold, twait, tset, twait_min;
if (sdrt->tRC_min < 30000)
return -EOPNOTSUPP;
@@ -309,13 +318,6 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
else if (tims->thold > FSMC_THOLD_MASK)
tims->thold = FSMC_THOLD_MASK;
- twait = max(sdrt->tRP_min, sdrt->tWP_min);
- tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1;
- if (tims->twait == 0)
- tims->twait = 1;
- else if (tims->twait > FSMC_TWAIT_MASK)
- tims->twait = FSMC_TWAIT_MASK;
-
tset = max(sdrt->tCS_min - sdrt->tWP_min,
sdrt->tCEA_max - sdrt->tREA_max);
tims->tset = DIV_ROUND_UP(tset / 1000, hclkn) - 1;
@@ -324,6 +326,21 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
else if (tims->tset > FSMC_TSET_MASK)
tims->tset = FSMC_TSET_MASK;
+ /*
+ * According to SPEAr300 Reference Manual (RM0082) which gives more
+ * information related to FSMSC timings than the SPEAr600 one (RM0305),
+ * twait >= tCEA - (tset * TCLK) + TOUTDEL + TINDEL
+ */
+ twait_min = sdrt->tCEA_max - ((tims->tset + 1) * hclkn * 1000)
+ + TOUTDEL + TINDEL;
+ twait = max3(sdrt->tRP_min, sdrt->tWP_min, twait_min);
+
+ tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1;
+ if (tims->twait == 0)
+ tims->twait = 1;
+ else if (tims->twait > FSMC_TWAIT_MASK)
+ tims->twait = FSMC_TWAIT_MASK;
+
return 0;
}
@@ -664,6 +681,9 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
instr->ctx.waitrdy.timeout_ms);
break;
}
+
+ if (instr->delay_ns)
+ ndelay(instr->delay_ns);
}
return ret;
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 3d6c6e880520..a130320de412 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -926,7 +926,7 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip,
struct nand_sdr_timings *spec_timings)
{
const struct nand_controller_ops *ops = chip->controller->ops;
- int best_mode = 0, mode, ret;
+ int best_mode = 0, mode, ret = -EOPNOTSUPP;
iface->type = NAND_SDR_IFACE;
@@ -977,7 +977,7 @@ int nand_choose_best_nvddr_timings(struct nand_chip *chip,
struct nand_nvddr_timings *spec_timings)
{
const struct nand_controller_ops *ops = chip->controller->ops;
- int best_mode = 0, mode, ret;
+ int best_mode = 0, mode, ret = -EOPNOTSUPP;
iface->type = NAND_NVDDR_IFACE;
@@ -1837,7 +1837,7 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
NAND_OP_CMD(NAND_CMD_ERASE1, 0),
NAND_OP_ADDR(2, addrs, 0),
NAND_OP_CMD(NAND_CMD_ERASE2,
- NAND_COMMON_TIMING_MS(conf, tWB_max)),
+ NAND_COMMON_TIMING_NS(conf, tWB_max)),
NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max),
0),
};
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 2ec8e015c7b3..533e476988f2 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1501,14 +1501,14 @@ void bond_alb_monitor(struct work_struct *work)
struct slave *slave;
if (!bond_has_slaves(bond)) {
- bond_info->tx_rebalance_counter = 0;
+ atomic_set(&bond_info->tx_rebalance_counter, 0);
bond_info->lp_counter = 0;
goto re_arm;
}
rcu_read_lock();
- bond_info->tx_rebalance_counter++;
+ atomic_inc(&bond_info->tx_rebalance_counter);
bond_info->lp_counter++;
/* send learning packets */
@@ -1530,7 +1530,7 @@ void bond_alb_monitor(struct work_struct *work)
}
/* rebalance tx traffic */
- if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) {
+ if (atomic_read(&bond_info->tx_rebalance_counter) >= BOND_TLB_REBALANCE_TICKS) {
bond_for_each_slave_rcu(bond, slave, iter) {
tlb_clear_slave(bond, slave, 1);
if (slave == rcu_access_pointer(bond->curr_active_slave)) {
@@ -1540,7 +1540,7 @@ void bond_alb_monitor(struct work_struct *work)
bond_info->unbalanced_load = 0;
}
}
- bond_info->tx_rebalance_counter = 0;
+ atomic_set(&bond_info->tx_rebalance_counter, 0);
}
if (bond_info->rlb_enabled) {
@@ -1610,7 +1610,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
tlb_init_slave(slave);
/* order a rebalance ASAP */
- bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
+ atomic_set(&bond->alb_info.tx_rebalance_counter,
+ BOND_TLB_REBALANCE_TICKS);
if (bond->alb_info.rlb_enabled)
bond->alb_info.rlb_rebalance = 1;
@@ -1647,7 +1648,8 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
rlb_clear_slave(bond, slave);
} else if (link == BOND_LINK_UP) {
/* order a rebalance ASAP */
- bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
+ atomic_set(&bond_info->tx_rebalance_counter,
+ BOND_TLB_REBALANCE_TICKS);
if (bond->alb_info.rlb_enabled) {
bond->alb_info.rlb_rebalance = 1;
/* If the updelay module parameter is smaller than the
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index a8fde3bc458f..b93337b5a721 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1526,7 +1526,7 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
mac = (u8 *)&newval->value;
}
- if (!is_valid_ether_addr(mac))
+ if (is_multicast_ether_addr(mac))
goto err;
netdev_dbg(bond->dev, "Setting ad_actor_system to %pM\n", mac);
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index 74d9899fc904..eb74cdf26b88 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -248,6 +248,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_SPACK_EWLR BIT(23)
#define KVASER_PCIEFD_SPACK_EPLR BIT(24)
+/* Kvaser KCAN_EPACK second word */
+#define KVASER_PCIEFD_EPACK_DIR_TX BIT(0)
+
struct kvaser_pciefd;
struct kvaser_pciefd_can {
@@ -1285,7 +1288,10 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
can->err_rep_cnt++;
can->can.can_stats.bus_error++;
- stats->rx_errors++;
+ if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX)
+ stats->tx_errors++;
+ else
+ stats->rx_errors++;
can->bec.txerr = bec.txerr;
can->bec.rxerr = bec.rxerr;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 2470c47b2e31..c2a8421e7845 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -204,16 +204,16 @@ enum m_can_reg {
/* Interrupts for version 3.0.x */
#define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
-#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_ELO | IR_BEU | \
- IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
- IR_RF1L | IR_RF0L)
+#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \
+ IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
+ IR_RF0L)
#define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X)
/* Interrupts for version >= 3.1.x */
#define IR_ERR_LEC_31X (IR_PED | IR_PEA)
-#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \
- IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
- IR_RF1L | IR_RF0L)
+#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \
+ IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
+ IR_RF0L)
#define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X)
/* Interrupt Line Select (ILS) */
@@ -517,7 +517,7 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DATA,
cf->data, DIV_ROUND_UP(cf->len, 4));
if (err)
- goto out_fail;
+ goto out_free_skb;
}
/* acknowledge rx fifo 0 */
@@ -532,6 +532,8 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
return 0;
+out_free_skb:
+ kfree_skb(skb);
out_fail:
netdev_err(dev, "FIFO read returned %d\n", err);
return err;
@@ -810,8 +812,6 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
{
if (irqstatus & IR_WDI)
netdev_err(dev, "Message RAM Watchdog event due to missing READY\n");
- if (irqstatus & IR_ELO)
- netdev_err(dev, "Error Logging Overflow\n");
if (irqstatus & IR_BEU)
netdev_err(dev, "Bit Error Uncorrected\n");
if (irqstatus & IR_BEC)
@@ -1494,20 +1494,32 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
case 30:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */
can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
- cdev->can.bittiming_const = &m_can_bittiming_const_30X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X;
+ cdev->can.bittiming_const = cdev->bit_timing ?
+ cdev->bit_timing : &m_can_bittiming_const_30X;
+
+ cdev->can.data_bittiming_const = cdev->data_timing ?
+ cdev->data_timing :
+ &m_can_data_bittiming_const_30X;
break;
case 31:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
- cdev->can.bittiming_const = &m_can_bittiming_const_31X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
+ cdev->can.bittiming_const = cdev->bit_timing ?
+ cdev->bit_timing : &m_can_bittiming_const_31X;
+
+ cdev->can.data_bittiming_const = cdev->data_timing ?
+ cdev->data_timing :
+ &m_can_data_bittiming_const_31X;
break;
case 32:
case 33:
/* Support both MCAN version v3.2.x and v3.3.0 */
- cdev->can.bittiming_const = &m_can_bittiming_const_31X;
- cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
+ cdev->can.bittiming_const = cdev->bit_timing ?
+ cdev->bit_timing : &m_can_bittiming_const_31X;
+
+ cdev->can.data_bittiming_const = cdev->data_timing ?
+ cdev->data_timing :
+ &m_can_data_bittiming_const_31X;
cdev->can.ctrlmode_supported |=
(m_can_niso_supported(cdev) ?
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index d18b515e6ccc..2c5d40997168 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -85,6 +85,9 @@ struct m_can_classdev {
struct sk_buff *tx_skb;
struct phy *transceiver;
+ const struct can_bittiming_const *bit_timing;
+ const struct can_bittiming_const *data_timing;
+
struct m_can_ops *ops;
int version;
diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
index 89cc3d41e952..b56a54d6c5a9 100644
--- a/drivers/net/can/m_can/m_can_pci.c
+++ b/drivers/net/can/m_can/m_can_pci.c
@@ -18,9 +18,14 @@
#define M_CAN_PCI_MMIO_BAR 0
-#define M_CAN_CLOCK_FREQ_EHL 100000000
#define CTL_CSR_INT_CTL_OFFSET 0x508
+struct m_can_pci_config {
+ const struct can_bittiming_const *bit_timing;
+ const struct can_bittiming_const *data_timing;
+ unsigned int clock_freq;
+};
+
struct m_can_pci_priv {
struct m_can_classdev cdev;
@@ -42,8 +47,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg)
static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count)
{
struct m_can_pci_priv *priv = cdev_to_priv(cdev);
+ void __iomem *src = priv->base + offset;
- ioread32_rep(priv->base + offset, val, val_count);
+ while (val_count--) {
+ *(unsigned int *)val = ioread32(src);
+ val += 4;
+ src += 4;
+ }
return 0;
}
@@ -61,8 +71,13 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
const void *val, size_t val_count)
{
struct m_can_pci_priv *priv = cdev_to_priv(cdev);
+ void __iomem *dst = priv->base + offset;
- iowrite32_rep(priv->base + offset, val, val_count);
+ while (val_count--) {
+ iowrite32(*(unsigned int *)val, dst);
+ val += 4;
+ dst += 4;
+ }
return 0;
}
@@ -74,9 +89,40 @@ static struct m_can_ops m_can_pci_ops = {
.read_fifo = iomap_read_fifo,
};
+static const struct can_bittiming_const m_can_bittiming_const_ehl = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
+ .tseg1_max = 64,
+ .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 512,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const m_can_data_bittiming_const_ehl = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
+ .tseg1_max = 16,
+ .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 32,
+ .brp_inc = 1,
+};
+
+static const struct m_can_pci_config m_can_pci_ehl = {
+ .bit_timing = &m_can_bittiming_const_ehl,
+ .data_timing = &m_can_data_bittiming_const_ehl,
+ .clock_freq = 200000000,
+};
+
static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
struct device *dev = &pci->dev;
+ const struct m_can_pci_config *cfg;
struct m_can_classdev *mcan_class;
struct m_can_pci_priv *priv;
void __iomem *base;
@@ -104,6 +150,8 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
if (!mcan_class)
return -ENOMEM;
+ cfg = (const struct m_can_pci_config *)id->driver_data;
+
priv = cdev_to_priv(mcan_class);
priv->base = base;
@@ -115,7 +163,9 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
mcan_class->dev = &pci->dev;
mcan_class->net->irq = pci_irq_vector(pci, 0);
mcan_class->pm_clock_support = 1;
- mcan_class->can.clock.freq = id->driver_data;
+ mcan_class->bit_timing = cfg->bit_timing;
+ mcan_class->data_timing = cfg->data_timing;
+ mcan_class->can.clock.freq = cfg->clock_freq;
mcan_class->ops = &m_can_pci_ops;
pci_set_drvdata(pci, mcan_class);
@@ -168,8 +218,8 @@ static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops,
m_can_pci_suspend, m_can_pci_resume);
static const struct pci_device_id m_can_pci_id_table[] = {
- { PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, },
- { PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, },
+ { PCI_VDEVICE(INTEL, 0x4bc1), (kernel_ulong_t)&m_can_pci_ehl, },
+ { PCI_VDEVICE(INTEL, 0x4bc2), (kernel_ulong_t)&m_can_pci_ehl, },
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(pci, m_can_pci_id_table);
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 92a54a5fd4c5..964c8a09226a 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -692,11 +692,11 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
cf->data[i + 1] = data_reg >> 8;
}
- netif_receive_skb(skb);
rcv_pkts++;
stats->rx_packets++;
quota--;
stats->rx_bytes += cf->len;
+ netif_receive_skb(skb);
pch_fifo_thresh(priv, obj_num);
obj_num++;
diff --git a/drivers/net/can/sja1000/ems_pcmcia.c b/drivers/net/can/sja1000/ems_pcmcia.c
index e21b169c14c0..4642b6d4aaf7 100644
--- a/drivers/net/can/sja1000/ems_pcmcia.c
+++ b/drivers/net/can/sja1000/ems_pcmcia.c
@@ -234,7 +234,12 @@ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base)
free_sja1000dev(dev);
}
- err = request_irq(dev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
+ if (!card->channels) {
+ err = -ENODEV;
+ goto failure_cleanup;
+ }
+
+ err = request_irq(pdev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
DRV_NAME, card);
if (!err)
return 0;
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index 59ba7c7beec0..f7af1bf5ab46 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -28,10 +28,6 @@
#include "kvaser_usb.h"
-/* Forward declaration */
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
-
-#define CAN_USB_CLOCK 8000000
#define MAX_USBCAN_NET_DEVICES 2
/* Command header size */
@@ -80,6 +76,12 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
#define CMD_LEAF_LOG_MESSAGE 106
+/* Leaf frequency options */
+#define KVASER_USB_LEAF_SWOPTION_FREQ_MASK 0x60
+#define KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK 0
+#define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5)
+#define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6)
+
/* error factors */
#define M16C_EF_ACKE BIT(0)
#define M16C_EF_CRCE BIT(1)
@@ -340,6 +342,50 @@ struct kvaser_usb_err_summary {
};
};
+static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
+ .name = "kvaser_usb",
+ .tseg1_min = KVASER_USB_TSEG1_MIN,
+ .tseg1_max = KVASER_USB_TSEG1_MAX,
+ .tseg2_min = KVASER_USB_TSEG2_MIN,
+ .tseg2_max = KVASER_USB_TSEG2_MAX,
+ .sjw_max = KVASER_USB_SJW_MAX,
+ .brp_min = KVASER_USB_BRP_MIN,
+ .brp_max = KVASER_USB_BRP_MAX,
+ .brp_inc = KVASER_USB_BRP_INC,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = {
+ .clock = {
+ .freq = 8000000,
+ },
+ .timestamp_freq = 1,
+ .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = {
+ .clock = {
+ .freq = 16000000,
+ },
+ .timestamp_freq = 1,
+ .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = {
+ .clock = {
+ .freq = 24000000,
+ },
+ .timestamp_freq = 1,
+ .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = {
+ .clock = {
+ .freq = 32000000,
+ },
+ .timestamp_freq = 1,
+ .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+};
+
static void *
kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
const struct sk_buff *skb, int *frame_len,
@@ -471,6 +517,27 @@ static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev,
return rc;
}
+static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
+ const struct leaf_cmd_softinfo *softinfo)
+{
+ u32 sw_options = le32_to_cpu(softinfo->sw_options);
+
+ dev->fw_version = le32_to_cpu(softinfo->fw_version);
+ dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
+
+ switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
+ case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_dev_cfg_16mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_dev_cfg_24mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_dev_cfg_32mhz;
+ break;
+ }
+}
+
static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
{
struct kvaser_cmd cmd;
@@ -486,14 +553,13 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
switch (dev->card_data.leaf.family) {
case KVASER_LEAF:
- dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version);
- dev->max_tx_urbs =
- le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx);
+ kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo);
break;
case KVASER_USBCAN:
dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
dev->max_tx_urbs =
le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
+ dev->cfg = &kvaser_usb_leaf_dev_cfg_8mhz;
break;
}
@@ -1225,24 +1291,11 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev)
{
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
- dev->cfg = &kvaser_usb_leaf_dev_cfg;
card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
return 0;
}
-static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
- .name = "kvaser_usb",
- .tseg1_min = KVASER_USB_TSEG1_MIN,
- .tseg1_max = KVASER_USB_TSEG1_MAX,
- .tseg2_min = KVASER_USB_TSEG2_MIN,
- .tseg2_max = KVASER_USB_TSEG2_MAX,
- .sjw_max = KVASER_USB_SJW_MAX,
- .brp_min = KVASER_USB_BRP_MIN,
- .brp_max = KVASER_USB_BRP_MAX,
- .brp_inc = KVASER_USB_BRP_INC,
-};
-
static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
@@ -1348,11 +1401,3 @@ const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
.dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback,
.dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd,
};
-
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg = {
- .clock = {
- .freq = CAN_USB_CLOCK,
- },
- .timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
-};
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index f00cbf5753b9..cd8462d1e27c 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -471,6 +471,12 @@ static int mv88e6xxx_port_ppu_updates(struct mv88e6xxx_chip *chip, int port)
u16 reg;
int err;
+ /* The 88e6250 family does not have the PHY detect bit. Instead,
+ * report whether the port is internal.
+ */
+ if (chip->info->family == MV88E6XXX_FAMILY_6250)
+ return port < chip->info->num_internal_phys;
+
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
if (err) {
dev_err(chip->dev,
@@ -692,44 +698,48 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_port *p;
- int err;
+ int err = 0;
p = &chip->ports[port];
- /* FIXME: is this the correct test? If we're in fixed mode on an
- * internal port, why should we process this any different from
- * PHY mode? On the other hand, the port may be automedia between
- * an internal PHY and the serdes...
- */
- if ((mode == MLO_AN_PHY) && mv88e6xxx_phy_is_internal(ds, port))
- return;
-
mv88e6xxx_reg_lock(chip);
- /* In inband mode, the link may come up at any time while the link
- * is not forced down. Force the link down while we reconfigure the
- * interface mode.
- */
- if (mode == MLO_AN_INBAND && p->interface != state->interface &&
- chip->info->ops->port_set_link)
- chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN);
- err = mv88e6xxx_port_config_interface(chip, port, state->interface);
- if (err && err != -EOPNOTSUPP)
- goto err_unlock;
-
- err = mv88e6xxx_serdes_pcs_config(chip, port, mode, state->interface,
- state->advertising);
- /* FIXME: we should restart negotiation if something changed - which
- * is something we get if we convert to using phylinks PCS operations.
- */
- if (err > 0)
- err = 0;
+ if (mode != MLO_AN_PHY || !mv88e6xxx_phy_is_internal(ds, port)) {
+ /* In inband mode, the link may come up at any time while the
+ * link is not forced down. Force the link down while we
+ * reconfigure the interface mode.
+ */
+ if (mode == MLO_AN_INBAND &&
+ p->interface != state->interface &&
+ chip->info->ops->port_set_link)
+ chip->info->ops->port_set_link(chip, port,
+ LINK_FORCED_DOWN);
+
+ err = mv88e6xxx_port_config_interface(chip, port,
+ state->interface);
+ if (err && err != -EOPNOTSUPP)
+ goto err_unlock;
+
+ err = mv88e6xxx_serdes_pcs_config(chip, port, mode,
+ state->interface,
+ state->advertising);
+ /* FIXME: we should restart negotiation if something changed -
+ * which is something we get if we convert to using phylinks
+ * PCS operations.
+ */
+ if (err > 0)
+ err = 0;
+ }
/* Undo the forced down state above after completing configuration
- * irrespective of its state on entry, which allows the link to come up.
+ * irrespective of its state on entry, which allows the link to come
+ * up in the in-band case where there is no separate SERDES. Also
+ * ensure that the link can come up if the PPU is in use and we are
+ * in PHY mode (we treat the PPU as an effective in-band mechanism.)
*/
- if (mode == MLO_AN_INBAND && p->interface != state->interface &&
- chip->info->ops->port_set_link)
+ if (chip->info->ops->port_set_link &&
+ ((mode == MLO_AN_INBAND && p->interface != state->interface) ||
+ (mode == MLO_AN_PHY && mv88e6xxx_port_ppu_updates(chip, port))))
chip->info->ops->port_set_link(chip, port, LINK_UNFORCED);
p->interface = state->interface;
@@ -752,13 +762,16 @@ static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
ops = chip->info->ops;
mv88e6xxx_reg_lock(chip);
- /* Internal PHYs propagate their configuration directly to the MAC.
- * External PHYs depend on whether the PPU is enabled for this port.
+ /* Force the link down if we know the port may not be automatically
+ * updated by the switch or if we are using fixed-link mode.
*/
- if (((!mv88e6xxx_phy_is_internal(ds, port) &&
- !mv88e6xxx_port_ppu_updates(chip, port)) ||
+ if ((!mv88e6xxx_port_ppu_updates(chip, port) ||
mode == MLO_AN_FIXED) && ops->port_sync_link)
err = ops->port_sync_link(chip, port, mode, false);
+
+ if (!err && ops->port_set_speed_duplex)
+ err = ops->port_set_speed_duplex(chip, port, SPEED_UNFORCED,
+ DUPLEX_UNFORCED);
mv88e6xxx_reg_unlock(chip);
if (err)
@@ -779,11 +792,11 @@ static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
ops = chip->info->ops;
mv88e6xxx_reg_lock(chip);
- /* Internal PHYs propagate their configuration directly to the MAC.
- * External PHYs depend on whether the PPU is enabled for this port.
+ /* Configure and force the link up if we know that the port may not
+ * automatically updated by the switch or if we are using fixed-link
+ * mode.
*/
- if ((!mv88e6xxx_phy_is_internal(ds, port) &&
- !mv88e6xxx_port_ppu_updates(chip, port)) ||
+ if (!mv88e6xxx_port_ppu_updates(chip, port) ||
mode == MLO_AN_FIXED) {
/* FIXME: for an automedia port, should we force the link
* down here - what if the link comes up due to "other" media
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index d9817b20ea64..ab41619a809b 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -283,7 +283,7 @@ static int mv88e6xxx_port_set_speed_duplex(struct mv88e6xxx_chip *chip,
if (err)
return err;
- if (speed)
+ if (speed != SPEED_UNFORCED)
dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed);
else
dev_dbg(chip->dev, "p%d: Speed unforced\n", port);
@@ -516,7 +516,7 @@ int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
- if (speed)
+ if (speed != SPEED_UNFORCED)
dev_dbg(chip->dev, "p%d: Speed set to %d Mbps\n", port, speed);
else
dev_dbg(chip->dev, "p%d: Speed unforced\n", port);
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 55273013bfb5..2b05ead515cd 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -830,7 +830,7 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
bool up)
{
u8 cmode = chip->ports[port].cmode;
- int err = 0;
+ int err;
switch (cmode) {
case MV88E6XXX_PORT_STS_CMODE_SGMII:
@@ -842,6 +842,9 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
case MV88E6XXX_PORT_STS_CMODE_RXAUI:
err = mv88e6390_serdes_power_10g(chip, lane, up);
break;
+ default:
+ err = -EINVAL;
+ break;
}
if (!err && up)
@@ -1541,6 +1544,9 @@ int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
case MV88E6393X_PORT_STS_CMODE_10GBASER:
err = mv88e6390_serdes_power_10g(chip, lane, on);
break;
+ default:
+ err = -EINVAL;
+ break;
}
if (err)
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 327cc4654806..f1a05e7dc818 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -290,8 +290,11 @@ static int felix_setup_mmio_filtering(struct felix *felix)
}
}
- if (cpu < 0)
+ if (cpu < 0) {
+ kfree(tagging_rule);
+ kfree(redirect_rule);
return -EINVAL;
+ }
tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE;
*(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588);
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index d75d95a97dd9..993b2fb42961 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -1430,16 +1430,19 @@ static int altera_tse_probe(struct platform_device *pdev)
priv->rxdescmem_busaddr = dma_res->start;
} else {
+ ret = -ENODEV;
goto err_free_netdev;
}
- if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
+ if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) {
dma_set_coherent_mask(priv->device,
DMA_BIT_MASK(priv->dmaops->dmamask));
- else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
+ } else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) {
dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
- else
+ } else {
+ ret = -EIO;
goto err_free_netdev;
+ }
/* MAC address space */
ret = request_and_map(pdev, "control_port", &control_port,
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 7d5d885d85d5..c72f0c7ff4aa 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1288,26 +1288,22 @@ static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
{
- struct ena_tx_buffer *tx_info = NULL;
+ struct ena_tx_buffer *tx_info;
- if (likely(req_id < tx_ring->ring_size)) {
- tx_info = &tx_ring->tx_buffer_info[req_id];
- if (likely(tx_info->skb))
- return 0;
- }
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ if (likely(tx_info->skb))
+ return 0;
return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
}
static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
{
- struct ena_tx_buffer *tx_info = NULL;
+ struct ena_tx_buffer *tx_info;
- if (likely(req_id < xdp_ring->ring_size)) {
- tx_info = &xdp_ring->tx_buffer_info[req_id];
- if (likely(tx_info->xdpf))
- return 0;
- }
+ tx_info = &xdp_ring->tx_buffer_info[req_id];
+ if (likely(tx_info->xdpf))
+ return 0;
return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
}
@@ -1332,9 +1328,14 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
&req_id);
- if (rc)
+ if (rc) {
+ if (unlikely(rc == -EINVAL))
+ handle_invalid_req_id(tx_ring, req_id, NULL,
+ false);
break;
+ }
+ /* validate that the request id points to a valid skb */
rc = validate_tx_req_id(tx_ring, req_id);
if (rc)
break;
@@ -1427,6 +1428,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
u16 *next_to_clean)
{
struct ena_rx_buffer *rx_info;
+ struct ena_adapter *adapter;
u16 len, req_id, buf = 0;
struct sk_buff *skb;
void *page_addr;
@@ -1439,8 +1441,14 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
rx_info = &rx_ring->rx_buffer_info[req_id];
if (unlikely(!rx_info->page)) {
- netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
- "Page is NULL\n");
+ adapter = rx_ring->adapter;
+ netif_err(adapter, rx_err, rx_ring->netdev,
+ "Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id);
+ ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp);
+ adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
+ /* Make sure reset reason is set before triggering the reset */
+ smp_mb__before_atomic();
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
return NULL;
}
@@ -1896,9 +1904,14 @@ static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
&req_id);
- if (rc)
+ if (rc) {
+ if (unlikely(rc == -EINVAL))
+ handle_invalid_req_id(xdp_ring, req_id, NULL,
+ true);
break;
+ }
+ /* validate that the request id points to a valid xdp_frame */
rc = validate_xdp_req_id(xdp_ring, req_id);
if (rc)
break;
@@ -4013,10 +4026,6 @@ static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
/* 1 IRQ for mgmnt and 1 IRQs for each IO direction */
max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
- if (unlikely(!max_num_io_queues)) {
- dev_err(&pdev->dev, "The device doesn't have io queues\n");
- return -EFAULT;
- }
return max_num_io_queues;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 81b3756417ec..77e76c9efd32 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -366,6 +366,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
if (!buff->is_eop) {
buff_ = buff;
do {
+ if (buff_->next >= self->size) {
+ err = -EIO;
+ goto err_exit;
+ }
next_ = buff_->next,
buff_ = &self->buff_ring[next_];
is_rsc_completed =
@@ -389,6 +393,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
(buff->is_lro && buff->is_cso_err)) {
buff_ = buff;
do {
+ if (buff_->next >= self->size) {
+ err = -EIO;
+ goto err_exit;
+ }
next_ = buff_->next,
buff_ = &self->buff_ring[next_];
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 88d2ab748399..4579ddf9c427 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1913,15 +1913,12 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
if (IS_ERR(ag->mac_reset)) {
netif_err(ag, probe, ndev, "missing mac reset\n");
- err = PTR_ERR(ag->mac_reset);
- goto err_free;
+ return PTR_ERR(ag->mac_reset);
}
ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!ag->mac_base) {
- err = -ENOMEM;
- goto err_free;
- }
+ if (!ag->mac_base)
+ return -ENOMEM;
ndev->irq = platform_get_irq(pdev, 0);
err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
@@ -1929,7 +1926,7 @@ static int ag71xx_probe(struct platform_device *pdev)
if (err) {
netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
ndev->irq);
- goto err_free;
+ return err;
}
ndev->netdev_ops = &ag71xx_netdev_ops;
@@ -1957,10 +1954,8 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
sizeof(struct ag71xx_desc),
&ag->stop_desc_dma, GFP_KERNEL);
- if (!ag->stop_desc) {
- err = -ENOMEM;
- goto err_free;
- }
+ if (!ag->stop_desc)
+ return -ENOMEM;
ag->stop_desc->data = 0;
ag->stop_desc->ctrl = 0;
@@ -1975,7 +1970,7 @@ static int ag71xx_probe(struct platform_device *pdev)
err = of_get_phy_mode(np, &ag->phy_if_mode);
if (err) {
netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
- goto err_free;
+ return err;
}
netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
@@ -1983,7 +1978,7 @@ static int ag71xx_probe(struct platform_device *pdev)
err = clk_prepare_enable(ag->clk_eth);
if (err) {
netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
- goto err_free;
+ return err;
}
ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
@@ -2019,8 +2014,6 @@ err_mdio_remove:
ag71xx_mdio_remove(ag);
err_put_clk:
clk_disable_unprepare(ag->clk_eth);
-err_free:
- free_netdev(ndev);
return err;
}
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 7cc5213c575a..b07cb9bc5f2d 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -708,7 +708,9 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
enet->irq_tx = platform_get_irq_byname(pdev, "tx");
- dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
err = bcm4908_enet_dma_alloc(enet);
if (err)
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 40933bf5a710..60dde29974bf 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1309,11 +1309,11 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
struct bcm_sysport_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
struct bcm_sysport_tx_ring *ring;
+ unsigned long flags, desc_flags;
struct bcm_sysport_cb *cb;
struct netdev_queue *txq;
u32 len_status, addr_lo;
unsigned int skb_len;
- unsigned long flags;
dma_addr_t mapping;
u16 queue;
int ret;
@@ -1373,8 +1373,10 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
ring->desc_count--;
/* Ports are latched, so write upper address first */
+ spin_lock_irqsave(&priv->desc_lock, desc_flags);
tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
+ spin_unlock_irqrestore(&priv->desc_lock, desc_flags);
/* Check ring space and update SW control flow */
if (ring->desc_count == 0)
@@ -2013,6 +2015,7 @@ static int bcm_sysport_open(struct net_device *dev)
}
/* Initialize both hardware and software ring */
+ spin_lock_init(&priv->desc_lock);
for (i = 0; i < dev->num_tx_queues; i++) {
ret = bcm_sysport_init_tx_ring(priv, i);
if (ret) {
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 984f76e74b43..16b73bb9acc7 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -711,6 +711,7 @@ struct bcm_sysport_priv {
int wol_irq;
/* Transmit rings */
+ spinlock_t desc_lock;
struct bcm_sysport_tx_ring *tx_rings;
/* Receive queue */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 5f259641437a..c888ddee1fc4 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -589,9 +589,9 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
* Internal or external PHY with MDIO access
*/
phydev = phy_attach(priv->dev, phy_name, pd->phy_interface);
- if (!phydev) {
+ if (IS_ERR(phydev)) {
dev_err(kdev, "failed to register PHY device\n");
- return -ENODEV;
+ return PTR_ERR(phydev);
}
} else {
/*
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 2085844227fe..e54e70ebdd05 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -388,6 +388,8 @@ struct dpaa2_eth_ch_stats {
__u64 bytes_per_cdan;
};
+#define DPAA2_ETH_CH_STATS 7
+
/* Maximum number of queues associated with a DPNI */
#define DPAA2_ETH_MAX_TCS 8
#define DPAA2_ETH_MAX_RX_QUEUES_PER_TC 16
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index adb8ce5306ee..3fdbf87dccb1 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -278,7 +278,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
/* Per-channel stats */
for (k = 0; k < priv->num_channels; k++) {
ch_stats = &priv->channel[k]->stats;
- for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64) - 1; j++)
+ for (j = 0; j < DPAA2_ETH_CH_STATS; j++)
*((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
}
i += j;
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 7b4961daa254..ed7301b69169 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -377,6 +377,9 @@ struct bufdesc_ex {
#define FEC_ENET_WAKEUP ((uint)0x00020000) /* Wakeup request */
#define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2)
#define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2)
+#define FEC_ENET_RXF_GET(X) (((X) == 0) ? FEC_ENET_RXF_0 : \
+ (((X) == 1) ? FEC_ENET_RXF_1 : \
+ FEC_ENET_RXF_2))
#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
#define FEC_ENET_TS_TIMER ((uint)0x00008000)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index bc418b910999..1b1f7f2a6130 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1480,7 +1480,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
break;
pkt_received++;
- writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
+ writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
/* Check for errors. */
status ^= BD_ENET_RX_LAST;
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index d9baac0dbc7d..4c9d05c45c03 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1805,7 +1805,7 @@ static int fman_port_probe(struct platform_device *of_dev)
fman = dev_get_drvdata(&fm_pdev->dev);
if (!fman) {
err = -EINVAL;
- goto return_err;
+ goto put_device;
}
err = of_property_read_u32(port_node, "cell-index", &val);
@@ -1813,7 +1813,7 @@ static int fman_port_probe(struct platform_device *of_dev)
dev_err(port->dev, "%s: reading cell-index for %pOF failed\n",
__func__, port_node);
err = -EINVAL;
- goto return_err;
+ goto put_device;
}
port_id = (u8)val;
port->dts_params.id = port_id;
@@ -1847,7 +1847,7 @@ static int fman_port_probe(struct platform_device *of_dev)
} else {
dev_err(port->dev, "%s: Illegal port type\n", __func__);
err = -EINVAL;
- goto return_err;
+ goto put_device;
}
port->dts_params.type = port_type;
@@ -1861,7 +1861,7 @@ static int fman_port_probe(struct platform_device *of_dev)
dev_err(port->dev, "%s: incorrect qman-channel-id\n",
__func__);
err = -EINVAL;
- goto return_err;
+ goto put_device;
}
port->dts_params.qman_channel_id = qman_channel_id;
}
@@ -1871,7 +1871,7 @@ static int fman_port_probe(struct platform_device *of_dev)
dev_err(port->dev, "%s: of_address_to_resource() failed\n",
__func__);
err = -ENOMEM;
- goto return_err;
+ goto put_device;
}
port->dts_params.fman = fman;
@@ -1896,6 +1896,8 @@ static int fman_port_probe(struct platform_device *of_dev)
return 0;
+put_device:
+ put_device(&fm_pdev->dev);
return_err:
of_node_put(port_node);
free_port:
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index 0b68852379da..5b8b9bcf41a2 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -47,7 +47,6 @@ struct tgec_mdio_controller {
#define MDIO_CTL_READ BIT(15)
#define MDIO_DATA(x) (x & 0xffff)
-#define MDIO_DATA_BSY BIT(31)
struct mdio_fsl_priv {
struct tgec_mdio_controller __iomem *mdio_base;
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 83ae56c310d3..326b56b49216 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -738,10 +738,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
* is not set to GqiRda, choose the queue format in a priority order:
* DqoRda, GqiRda, GqiQpl. Use GqiQpl as default.
*/
- if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
- dev_info(&priv->pdev->dev,
- "Driver is running with GQI RDA queue format.\n");
- } else if (dev_op_dqo_rda) {
+ if (dev_op_dqo_rda) {
priv->queue_format = GVE_DQO_RDA_FORMAT;
dev_info(&priv->pdev->dev,
"Driver is running with DQO RDA queue format.\n");
@@ -753,6 +750,9 @@ int gve_adminq_describe_device(struct gve_priv *priv)
"Driver is running with GQI RDA queue format.\n");
supported_features_mask =
be32_to_cpu(dev_op_gqi_rda->supported_features_mask);
+ } else if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
+ dev_info(&priv->pdev->dev,
+ "Driver is running with GQI RDA queue format.\n");
} else {
priv->queue_format = GVE_GQI_QPL_FORMAT;
if (dev_op_gqi_qpl)
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index 88ca49cbc1e2..d57508bc4307 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -68,6 +68,9 @@ struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
set_protocol = ctx->curr_frag_cnt == ctx->expected_frag_cnt - 1;
} else {
skb = napi_alloc_skb(napi, len);
+
+ if (unlikely(!skb))
+ return NULL;
set_protocol = true;
}
__skb_put(skb, len);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 3f7a9a4c59d5..63f5abcc6bf4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -839,6 +839,8 @@ struct hnae3_handle {
u8 netdev_flags;
struct dentry *hnae3_dbgfs;
+ /* protects concurrent contention between debugfs commands */
+ struct mutex dbgfs_lock;
/* Network interface message level enabled bits */
u32 msg_enable;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 081295bff765..c381f8af67f0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -1226,6 +1226,7 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
if (ret)
return ret;
+ mutex_lock(&handle->dbgfs_lock);
save_buf = &hns3_dbg_cmd[index].buf;
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
@@ -1238,15 +1239,15 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
read_buf = *save_buf;
} else {
read_buf = kvzalloc(hns3_dbg_cmd[index].buf_len, GFP_KERNEL);
- if (!read_buf)
- return -ENOMEM;
+ if (!read_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
/* save the buffer addr until the last read operation */
*save_buf = read_buf;
- }
- /* get data ready for the first time to read */
- if (!*ppos) {
+ /* get data ready for the first time to read */
ret = hns3_dbg_read_cmd(dbg_data, hns3_dbg_cmd[index].cmd,
read_buf, hns3_dbg_cmd[index].buf_len);
if (ret)
@@ -1255,8 +1256,10 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
size = simple_read_from_buffer(buffer, count, ppos, read_buf,
strlen(read_buf));
- if (size > 0)
+ if (size > 0) {
+ mutex_unlock(&handle->dbgfs_lock);
return size;
+ }
out:
/* free the buffer for the last read operation */
@@ -1265,6 +1268,7 @@ out:
*save_buf = NULL;
}
+ mutex_unlock(&handle->dbgfs_lock);
return ret;
}
@@ -1337,6 +1341,8 @@ int hns3_dbg_init(struct hnae3_handle *handle)
debugfs_create_dir(hns3_dbg_dentry[i].name,
handle->hnae3_dbgfs);
+ mutex_init(&handle->dbgfs_lock);
+
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES &&
ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) ||
@@ -1363,6 +1369,7 @@ int hns3_dbg_init(struct hnae3_handle *handle)
return 0;
out:
+ mutex_destroy(&handle->dbgfs_lock);
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
return ret;
@@ -1378,6 +1385,7 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
hns3_dbg_cmd[i].buf = NULL;
}
+ mutex_destroy(&handle->dbgfs_lock);
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index fdc66fae0960..c5ac6ecf36e1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -114,7 +114,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
memcpy(&req->msg, send_msg, sizeof(struct hclge_vf_to_pf_msg));
- trace_hclge_vf_mbx_send(hdev, req);
+ if (test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state))
+ trace_hclge_vf_mbx_send(hdev, req);
/* synchronous send */
if (need_resp) {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
index a78c398bf5b2..01e7d3c0b68e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
@@ -8,6 +8,7 @@
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
+#include <linux/module.h>
#include "hinic_hw_dev.h"
#include "hinic_dev.h"
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 291e61ac3e44..2c1b1da1220e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -553,6 +553,14 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
return;
}
+ if (vsi->type != I40E_VSI_MAIN &&
+ vsi->type != I40E_VSI_FDIR &&
+ vsi->type != I40E_VSI_VMDQ2) {
+ dev_info(&pf->pdev->dev,
+ "vsi %d type %d descriptor rings not available\n",
+ vsi_seid, vsi->type);
+ return;
+ }
if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) {
dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid);
return;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index e118cf9265c7..61afc220fc6c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -99,6 +99,24 @@ MODULE_LICENSE("GPL v2");
static struct workqueue_struct *i40e_wq;
+static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
+ struct net_device *netdev, int delta)
+{
+ struct netdev_hw_addr *ha;
+
+ if (!f || !netdev)
+ return;
+
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (ether_addr_equal(ha->addr, f->macaddr)) {
+ ha->refcount += delta;
+ if (ha->refcount <= 0)
+ ha->refcount = 1;
+ break;
+ }
+ }
+}
+
/**
* i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
* @hw: pointer to the HW structure
@@ -2036,6 +2054,7 @@ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
hlist_for_each_entry_safe(new, h, from, hlist) {
/* We can simply free the wrapper structure */
hlist_del(&new->hlist);
+ netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
kfree(new);
}
}
@@ -2383,6 +2402,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
&tmp_add_list,
&tmp_del_list,
vlan_filters);
+
+ hlist_for_each_entry(new, &tmp_add_list, hlist)
+ netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
+
if (retval)
goto err_no_memory_locked;
@@ -2515,6 +2538,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
if (new->f->state == I40E_FILTER_NEW)
new->f->state = new->state;
hlist_del(&new->hlist);
+ netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
kfree(new);
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -8717,6 +8741,27 @@ int i40e_open(struct net_device *netdev)
}
/**
+ * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
+ * @vsi: vsi structure
+ *
+ * This updates netdev's number of tx/rx queues
+ *
+ * Returns status of setting tx/rx queues
+ **/
+static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi)
+{
+ int ret;
+
+ ret = netif_set_real_num_rx_queues(vsi->netdev,
+ vsi->num_queue_pairs);
+ if (ret)
+ return ret;
+
+ return netif_set_real_num_tx_queues(vsi->netdev,
+ vsi->num_queue_pairs);
+}
+
+/**
* i40e_vsi_open -
* @vsi: the VSI to open
*
@@ -8752,13 +8797,7 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
goto err_setup_rx;
/* Notify the stack of the actual queue counts. */
- err = netif_set_real_num_tx_queues(vsi->netdev,
- vsi->num_queue_pairs);
- if (err)
- goto err_set_queues;
-
- err = netif_set_real_num_rx_queues(vsi->netdev,
- vsi->num_queue_pairs);
+ err = i40e_netif_set_realnum_tx_rx_queues(vsi);
if (err)
goto err_set_queues;
@@ -14151,6 +14190,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
ret = i40e_config_netdev(vsi);
if (ret)
goto err_netdev;
+ ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
+ if (ret)
+ goto err_netdev;
ret = register_netdev(vsi->netdev);
if (ret)
goto err_netdev;
@@ -15451,8 +15493,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
- dev_info(&pdev->dev,
- "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
+ dev_dbg(&pdev->dev,
+ "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
hw->aq.api_maj_ver,
hw->aq.api_min_ver,
I40E_FW_API_VERSION_MAJOR,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 80ae264c99ba..048f1678ab8a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1877,17 +1877,19 @@ sriov_configure_out:
/***********************virtual channel routines******************/
/**
- * i40e_vc_send_msg_to_vf
+ * i40e_vc_send_msg_to_vf_ex
* @vf: pointer to the VF info
* @v_opcode: virtual channel opcode
* @v_retval: virtual channel return value
* @msg: pointer to the msg buffer
* @msglen: msg length
+ * @is_quiet: true for not printing unsuccessful return values, false otherwise
*
* send msg to VF
**/
-static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
- u32 v_retval, u8 *msg, u16 msglen)
+static int i40e_vc_send_msg_to_vf_ex(struct i40e_vf *vf, u32 v_opcode,
+ u32 v_retval, u8 *msg, u16 msglen,
+ bool is_quiet)
{
struct i40e_pf *pf;
struct i40e_hw *hw;
@@ -1903,7 +1905,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
/* single place to detect unsuccessful return values */
- if (v_retval) {
+ if (v_retval && !is_quiet) {
vf->num_invalid_msgs++;
dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
vf->vf_id, v_opcode, v_retval);
@@ -1934,6 +1936,23 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
}
/**
+ * i40e_vc_send_msg_to_vf
+ * @vf: pointer to the VF info
+ * @v_opcode: virtual channel opcode
+ * @v_retval: virtual channel return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send msg to VF
+ **/
+static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
+ u32 v_retval, u8 *msg, u16 msglen)
+{
+ return i40e_vc_send_msg_to_vf_ex(vf, v_opcode, v_retval,
+ msg, msglen, false);
+}
+
+/**
* i40e_vc_send_resp_to_vf
* @vf: pointer to the VF info
* @opcode: operation code
@@ -1949,6 +1968,32 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
}
/**
+ * i40e_sync_vf_state
+ * @vf: pointer to the VF info
+ * @state: VF state
+ *
+ * Called from a VF message to synchronize the service with a potential
+ * VF reset state
+ **/
+static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state)
+{
+ int i;
+
+ /* When handling some messages, it needs VF state to be set.
+ * It is possible that this flag is cleared during VF reset,
+ * so there is a need to wait until the end of the reset to
+ * handle the request message correctly.
+ */
+ for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) {
+ if (test_bit(state, &vf->vf_states))
+ return true;
+ usleep_range(10000, 20000);
+ }
+
+ return test_bit(state, &vf->vf_states);
+}
+
+/**
* i40e_vc_get_version_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2008,7 +2053,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
size_t len = 0;
int ret;
- if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -2131,7 +2176,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
bool allmulti = false;
bool alluni = false;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err_out;
}
@@ -2219,7 +2264,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_vsi *vsi;
u16 num_qps_all = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2368,7 +2413,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
int i;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2540,7 +2585,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2590,7 +2635,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
u8 cur_pairs = vf->num_queue_pairs;
struct i40e_pf *pf = vf->pf;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE))
return -EINVAL;
if (req_pairs > I40E_MAX_VF_QUEUES) {
@@ -2635,7 +2680,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
memset(&stats, 0, sizeof(struct i40e_eth_stats));
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2669,6 +2714,7 @@ error_param:
* i40e_check_vf_permission
* @vf: pointer to the VF info
* @al: MAC address list from virtchnl
+ * @is_quiet: set true for printing msg without opcode info, false otherwise
*
* Check that the given list of MAC addresses is allowed. Will return -EPERM
* if any address in the list is not valid. Checks the following conditions:
@@ -2683,13 +2729,15 @@ error_param:
* addresses might not be accurate.
**/
static inline int i40e_check_vf_permission(struct i40e_vf *vf,
- struct virtchnl_ether_addr_list *al)
+ struct virtchnl_ether_addr_list *al,
+ bool *is_quiet)
{
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
int mac2add_cnt = 0;
int i;
+ *is_quiet = false;
for (i = 0; i < al->num_elements; i++) {
struct i40e_mac_filter *f;
u8 *addr = al->list[i].addr;
@@ -2713,6 +2761,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
!ether_addr_equal(addr, vf->default_lan_addr.addr)) {
dev_err(&pf->pdev->dev,
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+ *is_quiet = true;
return -EPERM;
}
@@ -2749,10 +2798,11 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_ether_addr_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
+ bool is_quiet = false;
i40e_status ret = 0;
int i;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
ret = I40E_ERR_PARAM;
goto error_param;
@@ -2765,7 +2815,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
*/
spin_lock_bh(&vsi->mac_filter_hash_lock);
- ret = i40e_check_vf_permission(vf, al);
+ ret = i40e_check_vf_permission(vf, al, &is_quiet);
if (ret) {
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
@@ -2803,8 +2853,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
- ret);
+ return i40e_vc_send_msg_to_vf_ex(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
+ ret, NULL, 0, is_quiet);
}
/**
@@ -2824,7 +2874,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
i40e_status ret = 0;
int i;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
ret = I40E_ERR_PARAM;
goto error_param;
@@ -2968,7 +3018,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
int i;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -3088,9 +3138,9 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
struct i40e_vsi *vsi = NULL;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
- (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
+ vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3119,9 +3169,9 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
u16 i;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
- (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
+ vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3154,7 +3204,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
int len = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3190,7 +3240,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
struct i40e_hw *hw = &pf->hw;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3215,7 +3265,7 @@ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3241,7 +3291,7 @@ static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3468,7 +3518,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
int i, ret;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3599,7 +3649,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
int i, ret;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err_out;
}
@@ -3708,7 +3758,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
i40e_status aq_ret = 0;
u64 speed = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
@@ -3797,11 +3847,6 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
/* set this flag only after making sure all inputs are sane */
vf->adq_enabled = true;
- /* num_req_queues is set when user changes number of queues via ethtool
- * and this causes issue for default VSI(which depends on this variable)
- * when ADq is enabled, hence reset it.
- */
- vf->num_req_queues = 0;
/* reset the VF in order to allocate resources */
i40e_vc_reset_vf(vf, true);
@@ -3824,7 +3869,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
goto err;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 091e32c1bb46..49575a640a84 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -18,6 +18,8 @@
#define I40E_MAX_VF_PROMISC_FLAGS 3
+#define I40E_VF_STATE_WAIT_COUNT 20
+
/* Various queue ctrls */
enum i40e_queue_ctrl {
I40E_QUEUE_CTRL_UNKNOWN = 0,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 0cecaff38d04..461f5237a2f8 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -615,23 +615,44 @@ static int iavf_set_ringparam(struct net_device *netdev,
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_tx_count = clamp_t(u32, ring->tx_pending,
- IAVF_MIN_TXD,
- IAVF_MAX_TXD);
- new_tx_count = ALIGN(new_tx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+ if (ring->tx_pending > IAVF_MAX_TXD ||
+ ring->tx_pending < IAVF_MIN_TXD ||
+ ring->rx_pending > IAVF_MAX_RXD ||
+ ring->rx_pending < IAVF_MIN_RXD) {
+ netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
+ ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD,
+ IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+ return -EINVAL;
+ }
- new_rx_count = clamp_t(u32, ring->rx_pending,
- IAVF_MIN_RXD,
- IAVF_MAX_RXD);
- new_rx_count = ALIGN(new_rx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+ new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+ if (new_tx_count != ring->tx_pending)
+ netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
+ new_tx_count);
+
+ new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
+ if (new_rx_count != ring->rx_pending)
+ netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
+ new_rx_count);
/* if nothing to do return success */
if ((new_tx_count == adapter->tx_desc_count) &&
- (new_rx_count == adapter->rx_desc_count))
+ (new_rx_count == adapter->rx_desc_count)) {
+ netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
return 0;
+ }
- adapter->tx_desc_count = new_tx_count;
- adapter->rx_desc_count = new_rx_count;
+ if (new_tx_count != adapter->tx_desc_count) {
+ netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n",
+ adapter->tx_desc_count, new_tx_count);
+ adapter->tx_desc_count = new_tx_count;
+ }
+
+ if (new_rx_count != adapter->rx_desc_count) {
+ netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n",
+ adapter->rx_desc_count, new_rx_count);
+ adapter->rx_desc_count = new_rx_count;
+ }
if (netif_running(netdev)) {
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 14934a7a13ef..e4439b095533 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -2046,6 +2046,7 @@ static void iavf_watchdog_task(struct work_struct *work)
}
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ mutex_unlock(&adapter->crit_lock);
queue_delayed_work(iavf_wq,
&adapter->watchdog_task,
msecs_to_jiffies(10));
@@ -2076,16 +2077,14 @@ static void iavf_watchdog_task(struct work_struct *work)
iavf_detect_recover_hung(&adapter->vsi);
break;
case __IAVF_REMOVE:
- mutex_unlock(&adapter->crit_lock);
- return;
default:
+ mutex_unlock(&adapter->crit_lock);
return;
}
/* check for hw reset */
reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
if (!reg_val) {
- iavf_change_state(adapter, __IAVF_RESETTING);
adapter->flags |= IAVF_FLAG_RESET_PENDING;
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
@@ -2248,6 +2247,7 @@ static void iavf_reset_task(struct work_struct *work)
}
pci_set_master(adapter->pdev);
+ pci_restore_msi_state(adapter->pdev);
if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
@@ -2708,8 +2708,11 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
total_max_rate += tx_rate;
num_qps += mqprio_qopt->qopt.count[i];
}
- if (num_qps > IAVF_MAX_REQ_QUEUES)
+ if (num_qps > adapter->num_active_queues) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot support requested number of queues\n");
return -EINVAL;
+ }
ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
return ret;
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 1efc635cc0f5..fafe020e46ee 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -6,6 +6,18 @@
#include "ice_lib.h"
#include "ice_dcb_lib.h"
+static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring)
+{
+ rx_ring->xdp_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->xdp_buf), GFP_KERNEL);
+ return !!rx_ring->xdp_buf;
+}
+
+static bool ice_alloc_rx_buf(struct ice_rx_ring *rx_ring)
+{
+ rx_ring->rx_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
+ return !!rx_ring->rx_buf;
+}
+
/**
* __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
* @qs_cfg: gathered variables needed for PF->VSI queues assignment
@@ -492,8 +504,11 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index, ring->q_vector->napi.napi_id);
+ kfree(ring->rx_buf);
ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) {
+ if (!ice_alloc_rx_buf_zc(ring))
+ return -ENOMEM;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->rx_buf_len =
@@ -508,6 +523,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
+ if (!ice_alloc_rx_buf(ring))
+ return -ENOMEM;
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq,
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index 7fdeb411b6df..3eb01731e496 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -97,6 +97,9 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc;
+ if (!bwcfg)
+ new_cfg->etscfg.tcbwtable[0] = 100;
+
if (!bwrec)
new_cfg->etsrec.tcbwtable[0] = 100;
@@ -167,15 +170,18 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
if (mode == pf->dcbx_cap)
return ICE_DCB_NO_HW_CHG;
- pf->dcbx_cap = mode;
qos_cfg = &pf->hw.port_info->qos_cfg;
- if (mode & DCB_CAP_DCBX_VER_CEE) {
- if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP)
- return ICE_DCB_NO_HW_CHG;
+
+ /* DSCP configuration is not DCBx negotiated */
+ if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP)
+ return ICE_DCB_NO_HW_CHG;
+
+ pf->dcbx_cap = mode;
+
+ if (mode & DCB_CAP_DCBX_VER_CEE)
qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
- } else {
+ else
qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
- }
dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
return ICE_DCB_HW_CHG_RST;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 38960bcc384c..b6e7f47c8c78 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -1268,7 +1268,7 @@ ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input,
bool is_tun = tun == ICE_FD_HW_SEG_TUN;
int err;
- if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num))
+ if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num, TNL_ALL))
continue;
err = ice_fdir_write_fltr(pf, input, add, is_tun);
if (err)
@@ -1652,7 +1652,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
}
/* return error if not an update and no available filters */
- fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port) ? 2 : 1;
+ fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port, TNL_ALL) ? 2 : 1;
if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index cbd8424631e3..4dca009bdd50 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -924,7 +924,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len);
loc = pkt;
} else {
- if (!ice_get_open_tunnel_port(hw, &tnl_port))
+ if (!ice_get_open_tunnel_port(hw, &tnl_port, TNL_ALL))
return ICE_ERR_DOES_NOT_EXIST;
if (!ice_fdir_pkt[idx].tun_pkt)
return ICE_ERR_PARAM;
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 23cfcceb1536..6ad1c2559724 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -1899,9 +1899,11 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
* ice_get_open_tunnel_port - retrieve an open tunnel port
* @hw: pointer to the HW structure
* @port: returns open port
+ * @type: type of tunnel, can be TNL_LAST if it doesn't matter
*/
bool
-ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port)
+ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
+ enum ice_tunnel_type type)
{
bool res = false;
u16 i;
@@ -1909,7 +1911,8 @@ ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port)
mutex_lock(&hw->tnl_lock);
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port) {
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port &&
+ (type == TNL_LAST || type == hw->tnl.tbl[i].type)) {
*port = hw->tnl.tbl[i].port;
res = true;
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 344c2637facd..a2863f38fd1f 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -33,7 +33,8 @@ enum ice_status
ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
unsigned long *bm, struct list_head *fv_list);
bool
-ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port);
+ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
+ enum ice_tunnel_type type);
int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti);
int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 4d1fc48c9744..73c61cdb036f 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -5881,6 +5881,9 @@ static int ice_up_complete(struct ice_vsi *vsi)
netif_carrier_on(vsi->netdev);
}
+ /* clear this now, and the first stats read will be used as baseline */
+ vsi->stat_offsets_loaded = false;
+
ice_service_task_schedule(pf);
return 0;
@@ -5927,14 +5930,15 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats st
/**
* ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
* @vsi: the VSI to be updated
+ * @vsi_stats: the stats struct to be updated
* @rings: rings to work on
* @count: number of rings
*/
static void
-ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings,
- u16 count)
+ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
+ struct rtnl_link_stats64 *vsi_stats,
+ struct ice_tx_ring **rings, u16 count)
{
- struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
u16 i;
for (i = 0; i < count; i++) {
@@ -5958,15 +5962,13 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings,
*/
static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
{
- struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
+ struct rtnl_link_stats64 *vsi_stats;
u64 pkts, bytes;
int i;
- /* reset netdev stats */
- vsi_stats->tx_packets = 0;
- vsi_stats->tx_bytes = 0;
- vsi_stats->rx_packets = 0;
- vsi_stats->rx_bytes = 0;
+ vsi_stats = kzalloc(sizeof(*vsi_stats), GFP_ATOMIC);
+ if (!vsi_stats)
+ return;
/* reset non-netdev (extended) stats */
vsi->tx_restart = 0;
@@ -5978,7 +5980,8 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
rcu_read_lock();
/* update Tx rings counters */
- ice_update_vsi_tx_ring_stats(vsi, vsi->tx_rings, vsi->num_txq);
+ ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings,
+ vsi->num_txq);
/* update Rx rings counters */
ice_for_each_rxq(vsi, i) {
@@ -5993,10 +5996,17 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
/* update XDP Tx rings counters */
if (ice_is_xdp_ena_vsi(vsi))
- ice_update_vsi_tx_ring_stats(vsi, vsi->xdp_rings,
+ ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings,
vsi->num_xdp_txq);
rcu_read_unlock();
+
+ vsi->net_stats.tx_packets = vsi_stats->tx_packets;
+ vsi->net_stats.tx_bytes = vsi_stats->tx_bytes;
+ vsi->net_stats.rx_packets = vsi_stats->rx_packets;
+ vsi->net_stats.rx_bytes = vsi_stats->rx_bytes;
+
+ kfree(vsi_stats);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index bf7247c6f58e..442b031b0edc 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -705,7 +705,7 @@ static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
scaled_ppm = -scaled_ppm;
}
- while ((u64)scaled_ppm > div_u64(U64_MAX, incval)) {
+ while ((u64)scaled_ppm > div64_u64(U64_MAX, incval)) {
/* handle overflow by scaling down the scaled_ppm and
* the divisor, losing some precision
*/
@@ -1540,19 +1540,16 @@ static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
if (err)
continue;
- /* Check if the timestamp is valid */
- if (!(raw_tstamp & ICE_PTP_TS_VALID))
+ /* Check if the timestamp is invalid or stale */
+ if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
+ raw_tstamp == tx->tstamps[idx].cached_tstamp)
continue;
- /* clear the timestamp register, so that it won't show valid
- * again when re-used.
- */
- ice_clear_phy_tstamp(hw, tx->quad, phy_idx);
-
/* The timestamp is valid, so we'll go ahead and clear this
* index and then send the timestamp up to the stack.
*/
spin_lock(&tx->lock);
+ tx->tstamps[idx].cached_tstamp = raw_tstamp;
clear_bit(idx, tx->in_use);
skb = tx->tstamps[idx].skb;
tx->tstamps[idx].skb = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index f71ad317d6c8..53c15fc9d996 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -55,15 +55,21 @@ struct ice_perout_channel {
* struct ice_tx_tstamp - Tracking for a single Tx timestamp
* @skb: pointer to the SKB for this timestamp request
* @start: jiffies when the timestamp was first requested
+ * @cached_tstamp: last read timestamp
*
* This structure tracks a single timestamp request. The SKB pointer is
* provided when initiating a request. The start time is used to ensure that
* we discard old requests that were not fulfilled within a 2 second time
* window.
+ * Timestamp values in the PHY are read only and do not get cleared except at
+ * hardware reset or when a new timestamp value is captured. The cached_tstamp
+ * field is used to detect the case where a new timestamp has not yet been
+ * captured, ensuring that we avoid sending stale timestamp data to the stack.
*/
struct ice_tx_tstamp {
struct sk_buff *skb;
unsigned long start;
+ u64 cached_tstamp;
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 793f4a9fc2cd..183d93033890 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -3796,10 +3796,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
* ice_find_recp - find a recipe
* @hw: pointer to the hardware structure
* @lkup_exts: extension sequence to match
+ * @tun_type: type of recipe tunnel
*
* Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
*/
-static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
+static u16
+ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
+ enum ice_sw_tunnel_type tun_type)
{
bool refresh_required = true;
struct ice_sw_recipe *recp;
@@ -3860,8 +3863,9 @@ static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
}
/* If for "i"th recipe the found was never set to false
* then it means we found our match
+ * Also tun type of recipe needs to be checked
*/
- if (found)
+ if (found && recp[i].tun_type == tun_type)
return i; /* Return the recipe ID */
}
}
@@ -4651,11 +4655,12 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
/* Look for a recipe which matches our requested fv / mask list */
- *rid = ice_find_recp(hw, lkup_exts);
+ *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
if (*rid < ICE_MAX_NUM_RECIPES)
/* Success if found a recipe that match the existing criteria */
goto err_unroll;
+ rm->tun_type = rinfo->tun_type;
/* Recipe we need does not exist, add a recipe */
status = ice_add_sw_recipe(hw, rm, profiles);
if (status)
@@ -4958,11 +4963,13 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
switch (tun_type) {
case ICE_SW_TUN_VXLAN:
+ if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
+ return ICE_ERR_CFG;
+ break;
case ICE_SW_TUN_GENEVE:
- if (!ice_get_open_tunnel_port(hw, &open_port))
+ if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
return ICE_ERR_CFG;
break;
-
default:
/* Nothing needs to be done for this tunnel type */
return 0;
@@ -5555,7 +5562,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
if (status)
return status;
- rid = ice_find_recp(hw, &lkup_exts);
+ rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
/* If did not find a recipe that match the existing criteria */
if (rid == ICE_MAX_NUM_RECIPES)
return ICE_ERR_PARAM;
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index e5d23feb6701..25cca5c4ae57 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -74,21 +74,13 @@ static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner)
return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS;
}
-static enum ice_protocol_type
-ice_proto_type_from_l4_port(bool inner, u16 ip_proto)
+static enum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto)
{
- if (inner) {
- switch (ip_proto) {
- case IPPROTO_UDP:
- return ICE_UDP_ILOS;
- }
- } else {
- switch (ip_proto) {
- case IPPROTO_TCP:
- return ICE_TCP_IL;
- case IPPROTO_UDP:
- return ICE_UDP_OF;
- }
+ switch (ip_proto) {
+ case IPPROTO_TCP:
+ return ICE_TCP_IL;
+ case IPPROTO_UDP:
+ return ICE_UDP_ILOS;
}
return 0;
@@ -191,8 +183,9 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
i++;
}
- if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) {
- list[i].type = ice_proto_type_from_l4_port(false, hdr->l3_key.ip_proto);
+ if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) &&
+ hdr->l3_key.ip_proto == IPPROTO_UDP) {
+ list[i].type = ICE_UDP_OF;
list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port;
list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port;
i++;
@@ -317,7 +310,7 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
struct ice_tc_l4_hdr *l4_key, *l4_mask;
- list[i].type = ice_proto_type_from_l4_port(inner, headers->l3_key.ip_proto);
+ list[i].type = ice_proto_type_from_l4_port(headers->l3_key.ip_proto);
l4_key = &headers->l4_key;
l4_mask = &headers->l4_mask;
@@ -802,7 +795,8 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
headers->l3_mask.ttl = match.mask->ttl;
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) &&
+ fltr->tunnel_type != TNL_VXLAN && fltr->tunnel_type != TNL_GENEVE) {
struct flow_match_ports match;
flow_rule_match_enc_ports(rule, &match);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index bc3ba19dc88f..dccf09eefc75 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -419,7 +419,10 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
}
rx_skip_free:
- memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
+ if (rx_ring->xsk_pool)
+ memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf)));
+ else
+ memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
/* Zero out the descriptor ring */
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
@@ -446,8 +449,13 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
rx_ring->xdp_prog = NULL;
- devm_kfree(rx_ring->dev, rx_ring->rx_buf);
- rx_ring->rx_buf = NULL;
+ if (rx_ring->xsk_pool) {
+ kfree(rx_ring->xdp_buf);
+ rx_ring->xdp_buf = NULL;
+ } else {
+ kfree(rx_ring->rx_buf);
+ rx_ring->rx_buf = NULL;
+ }
if (rx_ring->desc) {
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
@@ -475,8 +483,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
/* warn if we are about to overwrite the pointer */
WARN_ON(rx_ring->rx_buf);
rx_ring->rx_buf =
- devm_kcalloc(dev, sizeof(*rx_ring->rx_buf), rx_ring->count,
- GFP_KERNEL);
+ kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
if (!rx_ring->rx_buf)
return -ENOMEM;
@@ -505,7 +512,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
return 0;
err:
- devm_kfree(dev, rx_ring->rx_buf);
+ kfree(rx_ring->rx_buf);
rx_ring->rx_buf = NULL;
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index c56dd1749903..b7b3bd4816f0 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -24,7 +24,6 @@
#define ICE_MAX_DATA_PER_TXD_ALIGNED \
(~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
-#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
#define ICE_MAX_TXQ_PER_TXQG 128
/* Attempt to maximize the headroom available for incoming frames. We use a 2K
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 217ff5e9a6f1..6427e7ec93de 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -1617,6 +1617,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
ice_vc_set_default_allowlist(vf);
ice_vf_fdir_exit(vf);
+ ice_vf_fdir_init(vf);
/* clean VF control VSI when resetting VFs since it should be
* setup only when VF creates its first FDIR rule.
*/
@@ -1747,6 +1748,7 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
}
ice_vf_fdir_exit(vf);
+ ice_vf_fdir_init(vf);
/* clean VF control VSI when resetting VF since it should be setup
* only when VF creates its first FDIR rule.
*/
@@ -2021,6 +2023,10 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
if (ret)
goto err_unroll_sriov;
+ /* rearm global interrupts */
+ if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
+ ice_irq_dynamic_ena(hw, NULL, NULL);
+
return 0;
err_unroll_sriov:
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index bb9a80847298..c895351b25e0 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -12,6 +12,11 @@
#include "ice_txrx_lib.h"
#include "ice_lib.h"
+static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
+{
+ return &rx_ring->xdp_buf[idx];
+}
+
/**
* ice_qp_reset_stats - Resets all stats for rings of given index
* @vsi: VSI that contains rings of interest
@@ -372,7 +377,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
dma_addr_t dma;
rx_desc = ICE_RX_DESC(rx_ring, ntu);
- xdp = &rx_ring->xdp_buf[ntu];
+ xdp = ice_xdp_buf(rx_ring, ntu);
nb_buffs = min_t(u16, count, rx_ring->count - ntu);
nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
@@ -390,14 +395,9 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
}
ntu += nb_buffs;
- if (ntu == rx_ring->count) {
- rx_desc = ICE_RX_DESC(rx_ring, 0);
- xdp = rx_ring->xdp_buf;
+ if (ntu == rx_ring->count)
ntu = 0;
- }
- /* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.status_error0 = 0;
ice_release_rx_desc(rx_ring, ntu);
return count == nb_buffs;
@@ -419,19 +419,18 @@ static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
/**
* ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
* @rx_ring: Rx ring
- * @xdp_arr: Pointer to the SW ring of xdp_buff pointers
+ * @xdp: Pointer to XDP buffer
*
* This function allocates a new skb from a zero-copy Rx buffer.
*
* Returns the skb on success, NULL on failure.
*/
static struct sk_buff *
-ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
+ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{
- struct xdp_buff *xdp = *xdp_arr;
+ unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int datasize = xdp->data_end - xdp->data;
- unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
struct sk_buff *skb;
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
@@ -445,7 +444,6 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
skb_metadata_set(skb, metasize);
xsk_buff_free(xdp);
- *xdp_arr = NULL;
return skb;
}
@@ -507,7 +505,6 @@ out_failure:
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
struct ice_tx_ring *xdp_ring;
unsigned int xdp_xmit = 0;
struct bpf_prog *xdp_prog;
@@ -522,7 +519,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
while (likely(total_rx_packets < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
unsigned int size, xdp_res = 0;
- struct xdp_buff **xdp;
+ struct xdp_buff *xdp;
struct sk_buff *skb;
u16 stat_err_bits;
u16 vlan_tag = 0;
@@ -540,31 +537,35 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
*/
dma_rmb();
+ xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean);
+
size = le16_to_cpu(rx_desc->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
- if (!size)
- break;
+ if (!size) {
+ xdp->data = NULL;
+ xdp->data_end = NULL;
+ xdp->data_hard_start = NULL;
+ xdp->data_meta = NULL;
+ goto construct_skb;
+ }
- xdp = &rx_ring->xdp_buf[rx_ring->next_to_clean];
- xsk_buff_set_size(*xdp, size);
- xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool);
+ xsk_buff_set_size(xdp, size);
+ xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
- xdp_res = ice_run_xdp_zc(rx_ring, *xdp, xdp_prog, xdp_ring);
+ xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
if (xdp_res) {
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
xdp_xmit |= xdp_res;
else
- xsk_buff_free(*xdp);
+ xsk_buff_free(xdp);
- *xdp = NULL;
total_rx_bytes += size;
total_rx_packets++;
- cleaned_count++;
ice_bump_ntc(rx_ring);
continue;
}
-
+construct_skb:
/* XDP_PASS path */
skb = ice_construct_skb_zc(rx_ring, xdp);
if (!skb) {
@@ -572,7 +573,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
break;
}
- cleaned_count++;
ice_bump_ntc(rx_ring);
if (eth_skb_pad(skb)) {
@@ -594,8 +594,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
ice_receive_skb(rx_ring, skb, vlan_tag);
}
- if (cleaned_count >= ICE_RX_BUF_WRITE)
- failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count);
+ failure = !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring));
ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
@@ -811,15 +810,14 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
*/
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
- u16 i;
-
- for (i = 0; i < rx_ring->count; i++) {
- struct xdp_buff **xdp = &rx_ring->xdp_buf[i];
+ u16 count_mask = rx_ring->count - 1;
+ u16 ntc = rx_ring->next_to_clean;
+ u16 ntu = rx_ring->next_to_use;
- if (!xdp)
- continue;
+ for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
+ struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
- *xdp = NULL;
+ xsk_buff_free(xdp);
}
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index fd54d3ef890b..446894dde182 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -7648,6 +7648,20 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
struct vf_mac_filter *entry = NULL;
int ret = 0;
+ if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
+ !vf_data->trusted) {
+ dev_warn(&pdev->dev,
+ "VF %d requested MAC filter but is administratively denied\n",
+ vf);
+ return -EINVAL;
+ }
+ if (!is_valid_ether_addr(addr)) {
+ dev_warn(&pdev->dev,
+ "VF %d attempted to set invalid MAC filter\n",
+ vf);
+ return -EINVAL;
+ }
+
switch (info) {
case E1000_VF_MAC_FILTER_CLR:
/* remove all unicast MAC filters related to the current VF */
@@ -7661,20 +7675,6 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
}
break;
case E1000_VF_MAC_FILTER_ADD:
- if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
- !vf_data->trusted) {
- dev_warn(&pdev->dev,
- "VF %d requested MAC filter but is administratively denied\n",
- vf);
- return -EINVAL;
- }
- if (!is_valid_ether_addr(addr)) {
- dev_warn(&pdev->dev,
- "VF %d attempted to set invalid MAC filter\n",
- vf);
- return -EINVAL;
- }
-
/* try to find empty slot in the list */
list_for_each(pos, &adapter->vf_macs.l) {
entry = list_entry(pos, struct vf_mac_filter, l);
@@ -9254,7 +9254,7 @@ static int __maybe_unused igb_suspend(struct device *dev)
return __igb_shutdown(to_pci_dev(dev), NULL, 0);
}
-static int __maybe_unused igb_resume(struct device *dev)
+static int __maybe_unused __igb_resume(struct device *dev, bool rpm)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -9297,17 +9297,24 @@ static int __maybe_unused igb_resume(struct device *dev)
wr32(E1000_WUS, ~0);
- rtnl_lock();
+ if (!rpm)
+ rtnl_lock();
if (!err && netif_running(netdev))
err = __igb_open(netdev, true);
if (!err)
netif_device_attach(netdev);
- rtnl_unlock();
+ if (!rpm)
+ rtnl_unlock();
return err;
}
+static int __maybe_unused igb_resume(struct device *dev)
+{
+ return __igb_resume(dev, false);
+}
+
static int __maybe_unused igb_runtime_idle(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
@@ -9326,7 +9333,7 @@ static int __maybe_unused igb_runtime_suspend(struct device *dev)
static int __maybe_unused igb_runtime_resume(struct device *dev)
{
- return igb_resume(dev);
+ return __igb_resume(dev, true);
}
static void igb_shutdown(struct pci_dev *pdev)
@@ -9442,7 +9449,7 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot. Implementation
- * resembles the first-half of the igb_resume routine.
+ * resembles the first-half of the __igb_resume routine.
**/
static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
{
@@ -9482,7 +9489,7 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the
- * second-half of the igb_resume routine.
+ * second-half of the __igb_resume routine.
*/
static void igb_io_resume(struct pci_dev *pdev)
{
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 74ccd622251a..4d988da68394 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2859,6 +2859,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_hw_init:
+ netif_napi_del(&adapter->rx_ring->napi);
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
err_sw_init:
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index b2ef9fde97b3..b6807e16eea9 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -636,7 +636,7 @@ s32 igc_set_ltr_i225(struct igc_hw *hw, bool link)
ltrv = rd32(IGC_LTRMAXV);
if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) {
ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max |
- (scale_min << IGC_LTRMAXV_SCALE_SHIFT);
+ (scale_max << IGC_LTRMAXV_SCALE_SHIFT);
wr32(IGC_LTRMAXV, ltrv);
}
}
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 8e448288ee26..d28a80a00953 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -5467,6 +5467,9 @@ static irqreturn_t igc_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+ if (icr & IGC_ICR_TS)
+ igc_tsync_interrupt(adapter);
+
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
@@ -5510,6 +5513,9 @@ static irqreturn_t igc_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+ if (icr & IGC_ICR_TS)
+ igc_tsync_interrupt(adapter);
+
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 30568e3544cd..4f9245aa79a1 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -768,7 +768,20 @@ int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
*/
static bool igc_is_crosststamp_supported(struct igc_adapter *adapter)
{
- return IS_ENABLED(CONFIG_X86_TSC) ? pcie_ptm_enabled(adapter->pdev) : false;
+ if (!IS_ENABLED(CONFIG_X86_TSC))
+ return false;
+
+ /* FIXME: it was noticed that enabling support for PCIe PTM in
+ * some i225-V models could cause lockups when bringing the
+ * interface up/down. There should be no downsides to
+ * disabling crosstimestamping support for i225-V, as it
+ * doesn't have any PTP support. That way we gain some time
+ * while root causing the issue.
+ */
+ if (adapter->pdev->device == IGC_DEV_ID_I225_V)
+ return false;
+
+ return pcie_ptm_enabled(adapter->pdev);
}
static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 0f9f022260d7..45e2ec4d264d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -5531,6 +5531,10 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
if (!speed && hw->mac.ops.get_link_capabilities) {
ret = hw->mac.ops.get_link_capabilities(hw, &speed,
&autoneg);
+ /* remove NBASE-T speeds from default autonegotiation
+ * to accommodate broken network switches in the field
+ * which cannot cope with advertised NBASE-T speeds
+ */
speed &= ~(IXGBE_LINK_SPEED_5GB_FULL |
IXGBE_LINK_SPEED_2_5GB_FULL);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 9724ffb16518..e4b50c7781ff 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -3405,6 +3405,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
/* flush pending Tx transactions */
ixgbe_clear_tx_pending(hw);
+ /* set MDIO speed before talking to the PHY in case it's the 1st time */
+ ixgbe_set_mdio_speed(hw);
+
/* PHY ops must be identified and initialized prior to reset */
status = hw->phy.ops.init(hw);
if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 0da09ea81980..80bfaf2fec92 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -71,6 +71,8 @@ struct xrx200_priv {
struct xrx200_chan chan_tx;
struct xrx200_chan chan_rx;
+ u16 rx_buf_size;
+
struct net_device *net_dev;
struct device *dev;
@@ -97,6 +99,16 @@ static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set,
xrx200_pmac_w32(priv, val, offset);
}
+static int xrx200_max_frame_len(int mtu)
+{
+ return VLAN_ETH_HLEN + mtu;
+}
+
+static int xrx200_buffer_size(int mtu)
+{
+ return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN);
+}
+
/* drop all the packets from the DMA ring */
static void xrx200_flush_dma(struct xrx200_chan *ch)
{
@@ -109,8 +121,7 @@ static void xrx200_flush_dma(struct xrx200_chan *ch)
break;
desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
- (ch->priv->net_dev->mtu + VLAN_ETH_HLEN +
- ETH_FCS_LEN);
+ ch->priv->rx_buf_size;
ch->dma.desc++;
ch->dma.desc %= LTQ_DESC_NUM;
}
@@ -158,21 +169,21 @@ static int xrx200_close(struct net_device *net_dev)
static int xrx200_alloc_skb(struct xrx200_chan *ch)
{
- int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
struct sk_buff *skb = ch->skb[ch->dma.desc];
+ struct xrx200_priv *priv = ch->priv;
dma_addr_t mapping;
int ret = 0;
- ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
- len);
+ ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(priv->net_dev,
+ priv->rx_buf_size);
if (!ch->skb[ch->dma.desc]) {
ret = -ENOMEM;
goto skip;
}
- mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
- len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
+ mapping = dma_map_single(priv->dev, ch->skb[ch->dma.desc]->data,
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, mapping))) {
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
ch->skb[ch->dma.desc] = skb;
ret = -ENOMEM;
@@ -184,7 +195,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
wmb();
skip:
ch->dma.desc_base[ch->dma.desc].ctl =
- LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len;
+ LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | priv->rx_buf_size;
return ret;
}
@@ -213,7 +224,7 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
skb->protocol = eth_type_trans(skb, net_dev);
netif_receive_skb(skb);
net_dev->stats.rx_packets++;
- net_dev->stats.rx_bytes += len - ETH_FCS_LEN;
+ net_dev->stats.rx_bytes += len;
return 0;
}
@@ -356,6 +367,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
int ret = 0;
net_dev->mtu = new_mtu;
+ priv->rx_buf_size = xrx200_buffer_size(new_mtu);
if (new_mtu <= old_mtu)
return ret;
@@ -375,6 +387,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
ret = xrx200_alloc_skb(ch_rx);
if (ret) {
net_dev->mtu = old_mtu;
+ priv->rx_buf_size = xrx200_buffer_size(old_mtu);
break;
}
dev_kfree_skb_any(skb);
@@ -505,7 +518,8 @@ static int xrx200_probe(struct platform_device *pdev)
net_dev->netdev_ops = &xrx200_netdev_ops;
SET_NETDEV_DEV(net_dev, dev);
net_dev->min_mtu = ETH_ZLEN;
- net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN;
+ net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(0);
+ priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN);
/* load the memory ranges */
priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 6480696c979b..6da8a595026b 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -2960,11 +2960,11 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
if (priv->percpu_pools) {
- err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id, 0);
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq, 0);
if (err < 0)
goto err_free_dma;
- err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id, 0);
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq, 0);
if (err < 0)
goto err_unregister_rxq_short;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index 0ef68fdd1f26..61c20907315f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -5,6 +5,8 @@
*
*/
+#include <linux/module.h>
+
#include "otx2_common.h"
#include "otx2_ptp.h"
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 4369a3ffad45..c687dc9aa973 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -54,12 +54,14 @@ int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
u32 dev_id, u32 hw_id)
{
- struct prestera_port *port = NULL;
+ struct prestera_port *port = NULL, *tmp;
read_lock(&sw->port_list_lock);
- list_for_each_entry(port, &sw->port_list, list) {
- if (port->dev_id == dev_id && port->hw_id == hw_id)
+ list_for_each_entry(tmp, &sw->port_list, list) {
+ if (tmp->dev_id == dev_id && tmp->hw_id == hw_id) {
+ port = tmp;
break;
+ }
}
read_unlock(&sw->port_list_lock);
@@ -68,12 +70,14 @@ struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id)
{
- struct prestera_port *port = NULL;
+ struct prestera_port *port = NULL, *tmp;
read_lock(&sw->port_list_lock);
- list_for_each_entry(port, &sw->port_list, list) {
- if (port->id == id)
+ list_for_each_entry(tmp, &sw->port_list, list) {
+ if (tmp->id == id) {
+ port = tmp;
break;
+ }
}
read_unlock(&sw->port_list_lock);
@@ -764,23 +768,27 @@ static int prestera_netdev_port_event(struct net_device *lower,
struct net_device *dev,
unsigned long event, void *ptr)
{
- struct netdev_notifier_changeupper_info *info = ptr;
+ struct netdev_notifier_info *info = ptr;
+ struct netdev_notifier_changeupper_info *cu_info;
struct prestera_port *port = netdev_priv(dev);
struct netlink_ext_ack *extack;
struct net_device *upper;
- extack = netdev_notifier_info_to_extack(&info->info);
- upper = info->upper_dev;
+ extack = netdev_notifier_info_to_extack(info);
+ cu_info = container_of(info,
+ struct netdev_notifier_changeupper_info,
+ info);
switch (event) {
case NETDEV_PRECHANGEUPPER:
+ upper = cu_info->upper_dev;
if (!netif_is_bridge_master(upper) &&
!netif_is_lag_master(upper)) {
NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
return -EINVAL;
}
- if (!info->linking)
+ if (!cu_info->linking)
break;
if (netdev_has_any_upper_dev(upper)) {
@@ -789,7 +797,7 @@ static int prestera_netdev_port_event(struct net_device *lower,
}
if (netif_is_lag_master(upper) &&
- !prestera_lag_master_check(upper, info->upper_info, extack))
+ !prestera_lag_master_check(upper, cu_info->upper_info, extack))
return -EOPNOTSUPP;
if (netif_is_lag_master(upper) && vlan_uses_dev(dev)) {
NL_SET_ERR_MSG_MOD(extack,
@@ -805,14 +813,15 @@ static int prestera_netdev_port_event(struct net_device *lower,
break;
case NETDEV_CHANGEUPPER:
+ upper = cu_info->upper_dev;
if (netif_is_bridge_master(upper)) {
- if (info->linking)
+ if (cu_info->linking)
return prestera_bridge_port_join(upper, port,
extack);
else
prestera_bridge_port_leave(upper, port);
} else if (netif_is_lag_master(upper)) {
- if (info->linking)
+ if (cu_info->linking)
return prestera_lag_port_add(port, upper);
else
prestera_lag_port_del(port);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index f0ac6b0d9653..b47a0d3ef22f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -783,6 +783,8 @@ struct mlx5e_channel {
DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
int ix;
int cpu;
+ /* Sync between icosq recovery and XSK enable/disable. */
+ struct mutex icosq_recovery_lock;
};
struct mlx5e_ptp;
@@ -1014,9 +1016,6 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
void mlx5e_destroy_rq(struct mlx5e_rq *rq);
struct mlx5e_sq_param;
-int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
-void mlx5e_close_icosq(struct mlx5e_icosq *sq);
int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
struct mlx5e_xdpsq *sq, bool is_redirect);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
index d5b7110a4265..0107e4e73bb0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
@@ -30,6 +30,8 @@ void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv);
void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq);
void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq);
void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq);
+void mlx5e_reporter_icosq_suspend_recovery(struct mlx5e_channel *c);
+void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c);
#define MLX5E_REPORTER_PER_Q_MAX_LEN 256
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
index d6c7c81690eb..7c9dd3a75f8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
@@ -66,7 +66,7 @@ mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
static inline void
mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
- struct sk_buff *skb) {}
+ struct sk_buff *skb) { napi_gro_receive(rq->cq.napi, skb); }
#endif /* CONFIG_MLX5_CLS_ACT */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 74086eb556ae..2684e9da9f41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -62,6 +62,7 @@ static void mlx5e_reset_icosq_cc_pc(struct mlx5e_icosq *icosq)
static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
{
+ struct mlx5e_rq *xskrq = NULL;
struct mlx5_core_dev *mdev;
struct mlx5e_icosq *icosq;
struct net_device *dev;
@@ -70,7 +71,13 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
int err;
icosq = ctx;
+
+ mutex_lock(&icosq->channel->icosq_recovery_lock);
+
+ /* mlx5e_close_rq cancels this work before RQ and ICOSQ are killed. */
rq = &icosq->channel->rq;
+ if (test_bit(MLX5E_RQ_STATE_ENABLED, &icosq->channel->xskrq.state))
+ xskrq = &icosq->channel->xskrq;
mdev = icosq->channel->mdev;
dev = icosq->channel->netdev;
err = mlx5_core_query_sq_state(mdev, icosq->sqn, &state);
@@ -84,6 +91,9 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
goto out;
mlx5e_deactivate_rq(rq);
+ if (xskrq)
+ mlx5e_deactivate_rq(xskrq);
+
err = mlx5e_wait_for_icosq_flush(icosq);
if (err)
goto out;
@@ -97,15 +107,28 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
goto out;
mlx5e_reset_icosq_cc_pc(icosq);
+
mlx5e_free_rx_in_progress_descs(rq);
+ if (xskrq)
+ mlx5e_free_rx_in_progress_descs(xskrq);
+
clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
mlx5e_activate_icosq(icosq);
- mlx5e_activate_rq(rq);
+ mlx5e_activate_rq(rq);
rq->stats->recover++;
+
+ if (xskrq) {
+ mlx5e_activate_rq(xskrq);
+ xskrq->stats->recover++;
+ }
+
+ mutex_unlock(&icosq->channel->icosq_recovery_lock);
+
return 0;
out:
clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
+ mutex_unlock(&icosq->channel->icosq_recovery_lock);
return err;
}
@@ -706,6 +729,16 @@ void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq)
mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
}
+void mlx5e_reporter_icosq_suspend_recovery(struct mlx5e_channel *c)
+{
+ mutex_lock(&c->icosq_recovery_lock);
+}
+
+void mlx5e_reporter_icosq_resume_recovery(struct mlx5e_channel *c)
+{
+ mutex_unlock(&c->icosq_recovery_lock);
+}
+
static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
.name = "rx",
.recover = mlx5e_rx_reporter_recover,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 4f4bc8726ec4..614cd9477600 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -466,6 +466,14 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms
return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
+static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+ void *ctx)
+{
+ struct mlx5e_tx_timeout_ctx *to_ctx = ctx;
+
+ return mlx5e_tx_reporter_dump_sq(priv, fmsg, to_ctx->sq);
+}
+
static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
struct devlink_fmsg *fmsg)
{
@@ -561,7 +569,7 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
to_ctx.sq = sq;
err_ctx.ctx = &to_ctx;
err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
- err_ctx.dump = mlx5e_tx_reporter_dump_sq;
+ err_ctx.dump = mlx5e_tx_reporter_timeout_dump;
snprintf(err_str, sizeof(err_str),
"TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u",
sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 538bc2419bd8..8526a5fbbf0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -4,6 +4,7 @@
#include "setup.h"
#include "en/params.h"
#include "en/txrx.h"
+#include "en/health.h"
/* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may
* change unexpectedly, and mlx5e has a minimum valid stride size for striding
@@ -170,7 +171,13 @@ void mlx5e_close_xsk(struct mlx5e_channel *c)
void mlx5e_activate_xsk(struct mlx5e_channel *c)
{
+ /* ICOSQ recovery deactivates RQs. Suspend the recovery to avoid
+ * activating XSKRQ in the middle of recovery.
+ */
+ mlx5e_reporter_icosq_suspend_recovery(c);
set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
+ mlx5e_reporter_icosq_resume_recovery(c);
+
/* TX queue is created active. */
spin_lock_bh(&c->async_icosq_lock);
@@ -180,6 +187,13 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
{
- mlx5e_deactivate_rq(&c->xskrq);
+ /* ICOSQ recovery may reactivate XSKRQ if clear_bit is called in the
+ * middle of recovery. Suspend the recovery to avoid it.
+ */
+ mlx5e_reporter_icosq_suspend_recovery(c);
+ clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
+ mlx5e_reporter_icosq_resume_recovery(c);
+ synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
+
/* TX queue is disabled on close. */
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 65571593ec5c..41379844eee1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1087,8 +1087,6 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
void mlx5e_close_rq(struct mlx5e_rq *rq)
{
cancel_work_sync(&rq->dim.work);
- if (rq->icosq)
- cancel_work_sync(&rq->icosq->recover_work);
cancel_work_sync(&rq->recover_work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
@@ -1216,9 +1214,20 @@ static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work)
mlx5e_reporter_icosq_cqe_err(sq);
}
+static void mlx5e_async_icosq_err_cqe_work(struct work_struct *recover_work)
+{
+ struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
+ recover_work);
+
+ /* Not implemented yet. */
+
+ netdev_warn(sq->channel->netdev, "async_icosq recovery is not implemented\n");
+}
+
static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
struct mlx5e_sq_param *param,
- struct mlx5e_icosq *sq)
+ struct mlx5e_icosq *sq,
+ work_func_t recover_work_func)
{
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
struct mlx5_core_dev *mdev = c->mdev;
@@ -1239,7 +1248,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
if (err)
goto err_sq_wq_destroy;
- INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work);
+ INIT_WORK(&sq->recover_work, recover_work_func);
return 0;
@@ -1575,13 +1584,14 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
mlx5e_reporter_tx_err_cqe(sq);
}
-int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_sq_param *param, struct mlx5e_icosq *sq)
+static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
+ struct mlx5e_sq_param *param, struct mlx5e_icosq *sq,
+ work_func_t recover_work_func)
{
struct mlx5e_create_sq_param csp = {};
int err;
- err = mlx5e_alloc_icosq(c, param, sq);
+ err = mlx5e_alloc_icosq(c, param, sq, recover_work_func);
if (err)
return err;
@@ -1620,7 +1630,7 @@ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
synchronize_net(); /* Sync with NAPI. */
}
-void mlx5e_close_icosq(struct mlx5e_icosq *sq)
+static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
{
struct mlx5e_channel *c = sq->channel;
@@ -2084,11 +2094,15 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
spin_lock_init(&c->async_icosq_lock);
- err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq);
+ err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq,
+ mlx5e_async_icosq_err_cqe_work);
if (err)
goto err_close_xdpsq_cq;
- err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
+ mutex_init(&c->icosq_recovery_lock);
+
+ err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq,
+ mlx5e_icosq_err_cqe_work);
if (err)
goto err_close_async_icosq;
@@ -2156,9 +2170,12 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
mlx5e_close_xdpsq(&c->xdpsq);
if (c->xdp)
mlx5e_close_xdpsq(&c->rq_xdpsq);
+ /* The same ICOSQ is used for UMRs for both RQ and XSKRQ. */
+ cancel_work_sync(&c->icosq.recover_work);
mlx5e_close_rq(&c->rq);
mlx5e_close_sqs(c);
mlx5e_close_icosq(&c->icosq);
+ mutex_destroy(&c->icosq_recovery_lock);
mlx5e_close_icosq(&c->async_icosq);
if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq);
@@ -3724,12 +3741,11 @@ static int set_feature_arfs(struct net_device *netdev, bool enable)
static int mlx5e_handle_feature(struct net_device *netdev,
netdev_features_t *features,
- netdev_features_t wanted_features,
netdev_features_t feature,
mlx5e_feature_handler feature_handler)
{
- netdev_features_t changes = wanted_features ^ netdev->features;
- bool enable = !!(wanted_features & feature);
+ netdev_features_t changes = *features ^ netdev->features;
+ bool enable = !!(*features & feature);
int err;
if (!(changes & feature))
@@ -3737,22 +3753,22 @@ static int mlx5e_handle_feature(struct net_device *netdev,
err = feature_handler(netdev, enable);
if (err) {
+ MLX5E_SET_FEATURE(features, feature, !enable);
netdev_err(netdev, "%s feature %pNF failed, err %d\n",
enable ? "Enable" : "Disable", &feature, err);
return err;
}
- MLX5E_SET_FEATURE(features, feature, enable);
return 0;
}
int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
{
- netdev_features_t oper_features = netdev->features;
+ netdev_features_t oper_features = features;
int err = 0;
#define MLX5E_HANDLE_FEATURE(feature, handler) \
- mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
+ mlx5e_handle_feature(netdev, &oper_features, feature, handler)
err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 3d45f4ae80c0..5e454a14428f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1196,21 +1196,16 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
goto offload_rule_0;
- if (flow_flag_test(flow, CT)) {
- mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
- return;
- }
-
- if (flow_flag_test(flow, SAMPLE)) {
- mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
- return;
- }
-
if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
+ if (flow_flag_test(flow, CT))
+ mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
+ else if (flow_flag_test(flow, SAMPLE))
+ mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
+ else
offload_rule_0:
- mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
+ mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
}
struct mlx5_flow_handle *
@@ -1445,7 +1440,9 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
metadata);
if (err)
- return err;
+ goto err_out;
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
}
}
@@ -1461,13 +1458,15 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
if (attr->chain) {
NL_SET_ERR_MSG_MOD(extack,
"Internal port rule is only supported on chain 0");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto err_out;
}
if (attr->dest_chain) {
NL_SET_ERR_MSG_MOD(extack,
"Internal port rule offload doesn't support goto action");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto err_out;
}
int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
@@ -1475,8 +1474,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
flow_flag_test(flow, EGRESS) ?
MLX5E_TC_INT_PORT_EGRESS :
MLX5E_TC_INT_PORT_INGRESS);
- if (IS_ERR(int_port))
- return PTR_ERR(int_port);
+ if (IS_ERR(int_port)) {
+ err = PTR_ERR(int_port);
+ goto err_out;
+ }
esw_attr->int_port = int_port;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index 97e5845b4cfd..d5e47630e284 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -121,6 +121,9 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
{
+ if (!mlx5_chains_prios_supported(chains))
+ return 1;
+
if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 7df9c7f8d9c8..65083496f913 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1809,12 +1809,13 @@ void mlx5_disable_device(struct mlx5_core_dev *dev)
int mlx5_recover_device(struct mlx5_core_dev *dev)
{
- int ret = -EIO;
+ if (!mlx5_core_is_sf(dev)) {
+ mlx5_pci_disable_device(dev);
+ if (mlx5_pci_slot_reset(dev->pdev) != PCI_ERS_RESULT_RECOVERED)
+ return -EIO;
+ }
- mlx5_pci_disable_device(dev);
- if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
- ret = mlx5_load_one(dev);
- return ret;
+ return mlx5_load_one(dev);
}
static struct pci_driver mlx5_core_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 830444f927d4..bcee30f5de0a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -356,8 +356,8 @@ static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool,
new_irq = irq_pool_create_irq(pool, affinity);
if (IS_ERR(new_irq)) {
if (!least_loaded_irq) {
- mlx5_core_err(pool->dev, "Didn't find IRQ for cpu = %u\n",
- cpumask_first(affinity));
+ mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %ld\n",
+ PTR_ERR(new_irq));
mutex_unlock(&pool->lock);
return new_irq;
}
@@ -398,7 +398,7 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
cpumask_copy(irq->mask, affinity);
if (!irq_pool_is_sf_pool(pool) && !pool->xa_num_irqs.max &&
cpumask_empty(irq->mask))
- cpumask_set_cpu(0, irq->mask);
+ cpumask_set_cpu(cpumask_first(cpu_online_mask), irq->mask);
irq_set_affinity_hint(irq->irqn, irq->mask);
unlock:
mutex_unlock(&pool->lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index 8cbd36c82b3b..c54cc45f63dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include <linux/mlx5/eswitch.h>
+#include <linux/err.h>
#include "dr_types.h"
#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
@@ -72,9 +73,9 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
}
dmn->uar = mlx5_get_uars_page(dmn->mdev);
- if (!dmn->uar) {
+ if (IS_ERR(dmn->uar)) {
mlx5dr_err(dmn, "Couldn't allocate UAR\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(dmn->uar);
goto clean_pd;
}
@@ -163,9 +164,7 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn)
{
- return dr_domain_query_vport(dmn,
- dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0,
- false,
+ return dr_domain_query_vport(dmn, 0, false,
&dmn->info.caps.vports.esw_manager_caps);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 217e3b351dfe..c34833ff1dde 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -8494,7 +8494,8 @@ mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
u8 mac_profile;
int err;
- if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
+ if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
+ !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 2e25798c610e..7f49042484bd 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -321,6 +321,8 @@ static int ks8851_probe_par(struct platform_device *pdev)
return ret;
netdev->irq = platform_get_irq(pdev, 0);
+ if (netdev->irq < 0)
+ return netdev->irq;
return ks8851_probe_common(netdev, dev, msg_enable);
}
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index 34b971ff8ef8..078d6a5a0768 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -480,16 +480,16 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
if (err)
goto out;
- err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
- &hwc_wq->msg_buf);
- if (err)
- goto out;
-
hwc_wq->hwc = hwc;
hwc_wq->gdma_wq = queue;
hwc_wq->queue_depth = q_depth;
hwc_wq->hwc_cq = hwc_cq;
+ err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
+ &hwc_wq->msg_buf);
+ if (err)
+ goto out;
+
*hwc_wq_ptr = hwc_wq;
return 0;
out:
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index d7ac0307797f..34c0d2ddf9ef 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -803,8 +803,10 @@ int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size)
return -ENOMEM;
cache = kzalloc(sizeof(*cache), GFP_KERNEL);
- if (!cache)
+ if (!cache) {
+ nfp_cpp_area_free(area);
return -ENOMEM;
+ }
cache->id = 0;
cache->addr = 0;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 63f8a8163b5f..2ff7be17e5af 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -3135,7 +3135,7 @@ int ionic_lif_init(struct ionic_lif *lif)
return -EINVAL;
}
- lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
+ lif->dbid_inuse = bitmap_zalloc(lif->dbid_count, GFP_KERNEL);
if (!lif->dbid_inuse) {
dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 065e9004598e..999abcfe3310 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1643,6 +1643,13 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
data_split = true;
}
} else {
+ if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) {
+ DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len);
+ qede_free_failed_tx_pkt(txq, first_bd, 0, false);
+ qede_update_tx_producer(txq);
+ return NETDEV_TX_OK;
+ }
+
val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
}
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 1e6d72adfe43..71523d747e93 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3480,20 +3480,19 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
- err = ql_wait_for_drvr_lock(qdev);
- if (err) {
- err = ql_adapter_initialize(qdev);
- if (err) {
- netdev_err(ndev, "Unable to initialize adapter\n");
- goto err_init;
- }
- netdev_err(ndev, "Releasing driver lock\n");
- ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
- } else {
+ if (!ql_wait_for_drvr_lock(qdev)) {
netdev_err(ndev, "Could not acquire driver lock\n");
+ err = -ENODEV;
goto err_lock;
}
+ err = ql_adapter_initialize(qdev);
+ if (err) {
+ netdev_err(ndev, "Unable to initialize adapter\n");
+ goto err_init;
+ }
+ ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
+
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
set_bit(QL_ADAPTER_UP, &qdev->flags);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 7160b42f51dd..d0111cb3b40e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -201,7 +201,7 @@ int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
struct qlcnic_info *, u16);
int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
void qlcnic_sriov_free_vlans(struct qlcnic_adapter *);
-void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
+int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *);
void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *,
struct qlcnic_vf_info *, u16);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index dd03be3fc82a..42a44c97572a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -432,7 +432,7 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
- int i, num_vlans;
+ int i, num_vlans, ret;
u16 *vlans;
if (sriov->allowed_vlans)
@@ -443,7 +443,9 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
sriov->num_allowed_vlans);
- qlcnic_sriov_alloc_vlans(adapter);
+ ret = qlcnic_sriov_alloc_vlans(adapter);
+ if (ret)
+ return ret;
if (!sriov->any_vlan)
return 0;
@@ -2154,7 +2156,7 @@ static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
return err;
}
-void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
+int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_vf_info *vf;
@@ -2164,7 +2166,11 @@ void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
vf = &sriov->vf_info[i];
vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
sizeof(*vf->sriov_vlans), GFP_KERNEL);
+ if (!vf->sriov_vlans)
+ return -ENOMEM;
}
+
+ return 0;
}
void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 447720b93e5a..e90fa97c0ae6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -597,7 +597,9 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
if (err)
goto del_flr_queue;
- qlcnic_sriov_alloc_vlans(adapter);
+ err = qlcnic_sriov_alloc_vlans(adapter);
+ if (err)
+ goto del_flr_queue;
return err;
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 6aa81229b68a..e77a5cb4e40d 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -609,6 +609,9 @@ static size_t ef100_update_stats(struct efx_nic *efx,
ef100_common_stat_mask(mask);
ef100_ethtool_stat_mask(mask);
+ if (!mc_stats)
+ return 0;
+
efx_nic_copy_stats(efx, mc_stats);
efx_nic_update_stats(ef100_stat_desc, EF100_STAT_COUNT, mask,
stats, mc_stats, false);
diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
index 966f13e7475d..0c6cc2191369 100644
--- a/drivers/net/ethernet/sfc/falcon/rx.c
+++ b/drivers/net/ethernet/sfc/falcon/rx.c
@@ -110,6 +110,8 @@ static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue)
struct ef4_rx_page_state *state;
unsigned index;
+ if (unlikely(!rx_queue->page_ring))
+ return NULL;
index = rx_queue->page_remove & rx_queue->page_ptr_mask;
page = rx_queue->page_ring[index];
if (page == NULL)
@@ -293,6 +295,9 @@ static void ef4_recycle_rx_pages(struct ef4_channel *channel,
{
struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
+ if (unlikely(!rx_queue->page_ring))
+ return;
+
do {
ef4_recycle_rx_page(channel, rx_buf);
rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
@@ -728,7 +733,10 @@ static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
efx->rx_bufs_per_page);
rx_queue->page_ring = kcalloc(page_ring_size,
sizeof(*rx_queue->page_ring), GFP_KERNEL);
- rx_queue->page_ptr_mask = page_ring_size - 1;
+ if (!rx_queue->page_ring)
+ rx_queue->page_ptr_mask = 0;
+ else
+ rx_queue->page_ptr_mask = page_ring_size - 1;
}
void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue)
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index 68fc7d317693..633ca77a26fd 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -45,6 +45,8 @@ static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
unsigned int index;
struct page *page;
+ if (unlikely(!rx_queue->page_ring))
+ return NULL;
index = rx_queue->page_remove & rx_queue->page_ptr_mask;
page = rx_queue->page_ring[index];
if (page == NULL)
@@ -114,6 +116,9 @@ void efx_recycle_rx_pages(struct efx_channel *channel,
{
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+ if (unlikely(!rx_queue->page_ring))
+ return;
+
do {
efx_recycle_rx_page(channel, rx_buf);
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
@@ -150,7 +155,10 @@ static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
efx->rx_bufs_per_page);
rx_queue->page_ring = kcalloc(page_ring_size,
sizeof(*rx_queue->page_ring), GFP_KERNEL);
- rx_queue->page_ptr_mask = page_ring_size - 1;
+ if (!rx_queue->page_ring)
+ rx_queue->page_ptr_mask = 0;
+ else
+ rx_queue->page_ptr_mask = page_ring_size - 1;
}
static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 89381f796985..dd6f69ced4ee 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -2072,6 +2072,11 @@ static int smc911x_drv_probe(struct platform_device *pdev)
ndev->dma = (unsigned char)-1;
ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq < 0) {
+ ret = ndev->irq;
+ goto release_both;
+ }
+
lp = netdev_priv(ndev);
lp->netdev = ndev;
#ifdef SMC_DYNAMIC_BUS_CONFIG
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 6924a6aacbd5..c469abc91fa1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -33,6 +33,7 @@ struct rk_gmac_ops {
void (*set_rgmii_speed)(struct rk_priv_data *bsp_priv, int speed);
void (*set_rmii_speed)(struct rk_priv_data *bsp_priv, int speed);
void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
+ bool regs_valid;
u32 regs[];
};
@@ -1092,6 +1093,7 @@ static const struct rk_gmac_ops rk3568_ops = {
.set_to_rmii = rk3568_set_to_rmii,
.set_rgmii_speed = rk3568_set_gmac_speed,
.set_rmii_speed = rk3568_set_gmac_speed,
+ .regs_valid = true,
.regs = {
0xfe2a0000, /* gmac0 */
0xfe010000, /* gmac1 */
@@ -1383,7 +1385,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
* to be distinguished.
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res) {
+ if (res && ops->regs_valid) {
int i = 0;
while (ops->regs[i]) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index 66fc8be34bb7..e2e0f977875d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -26,7 +26,7 @@
#define ETHER_CLK_SEL_FREQ_SEL_125M (BIT(9) | BIT(8))
#define ETHER_CLK_SEL_FREQ_SEL_50M BIT(9)
#define ETHER_CLK_SEL_FREQ_SEL_25M BIT(8)
-#define ETHER_CLK_SEL_FREQ_SEL_2P5M BIT(0)
+#define ETHER_CLK_SEL_FREQ_SEL_2P5M 0
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN BIT(0)
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC BIT(10)
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV BIT(11)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 5f129733aabd..873b9e3e5da2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -172,6 +172,19 @@ struct stmmac_flow_entry {
int is_l4;
};
+/* Rx Frame Steering */
+enum stmmac_rfs_type {
+ STMMAC_RFS_T_VLAN,
+ STMMAC_RFS_T_MAX,
+};
+
+struct stmmac_rfs_entry {
+ unsigned long cookie;
+ int in_use;
+ int type;
+ int tc;
+};
+
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
@@ -289,6 +302,10 @@ struct stmmac_priv {
struct stmmac_tc_entry *tc_entries;
unsigned int flow_entries_max;
struct stmmac_flow_entry *flow_entries;
+ unsigned int rfs_entries_max[STMMAC_RFS_T_MAX];
+ unsigned int rfs_entries_cnt[STMMAC_RFS_T_MAX];
+ unsigned int rfs_entries_total;
+ struct stmmac_rfs_entry *rfs_entries;
/* Pulse Per Second output */
struct stmmac_pps_cfg pps[STMMAC_PPS_MAX];
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index da8306f60730..8ded4be08b00 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1461,16 +1461,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ if (priv->dma_cap.addr64 <= 32)
+ gfp |= GFP_DMA32;
if (!buf->page) {
- buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->page)
return -ENOMEM;
buf->page_offset = stmmac_rx_offset(priv);
}
if (priv->sph && !buf->sec_page) {
- buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->sec_page)
return -ENOMEM;
@@ -4482,6 +4486,10 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
int dirty = stmmac_rx_dirty(priv, queue);
unsigned int entry = rx_q->dirty_rx;
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ if (priv->dma_cap.addr64 <= 32)
+ gfp |= GFP_DMA32;
while (dirty-- > 0) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
@@ -4494,13 +4502,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
p = rx_q->dma_rx + entry;
if (!buf->page) {
- buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->page)
break;
}
if (priv->sph && !buf->sec_page) {
- buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->sec_page)
break;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 580cc035536b..be9b58b2abf9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -102,7 +102,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
time.tv_nsec = priv->plat->est->btr_reserve[0];
time.tv_sec = priv->plat->est->btr_reserve[1];
basetime = timespec64_to_ktime(time);
- cycle_time = priv->plat->est->ctr[1] * NSEC_PER_SEC +
+ cycle_time = (u64)priv->plat->est->ctr[1] * NSEC_PER_SEC +
priv->plat->est->ctr[0];
time = stmmac_calc_tas_basetime(basetime,
current_time_ns,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 1c4ea0b1b845..d0a2b289f460 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -232,11 +232,33 @@ static int tc_setup_cls_u32(struct stmmac_priv *priv,
}
}
+static int tc_rfs_init(struct stmmac_priv *priv)
+{
+ int i;
+
+ priv->rfs_entries_max[STMMAC_RFS_T_VLAN] = 8;
+
+ for (i = 0; i < STMMAC_RFS_T_MAX; i++)
+ priv->rfs_entries_total += priv->rfs_entries_max[i];
+
+ priv->rfs_entries = devm_kcalloc(priv->device,
+ priv->rfs_entries_total,
+ sizeof(*priv->rfs_entries),
+ GFP_KERNEL);
+ if (!priv->rfs_entries)
+ return -ENOMEM;
+
+ dev_info(priv->device, "Enabled RFS Flow TC (entries=%d)\n",
+ priv->rfs_entries_total);
+
+ return 0;
+}
+
static int tc_init(struct stmmac_priv *priv)
{
struct dma_features *dma_cap = &priv->dma_cap;
unsigned int count;
- int i;
+ int ret, i;
if (dma_cap->l3l4fnum) {
priv->flow_entries_max = dma_cap->l3l4fnum;
@@ -250,10 +272,14 @@ static int tc_init(struct stmmac_priv *priv)
for (i = 0; i < priv->flow_entries_max; i++)
priv->flow_entries[i].idx = i;
- dev_info(priv->device, "Enabled Flow TC (entries=%d)\n",
+ dev_info(priv->device, "Enabled L3L4 Flow TC (entries=%d)\n",
priv->flow_entries_max);
}
+ ret = tc_rfs_init(priv);
+ if (ret)
+ return -ENOMEM;
+
if (!priv->plat->fpe_cfg) {
priv->plat->fpe_cfg = devm_kzalloc(priv->device,
sizeof(*priv->plat->fpe_cfg),
@@ -607,16 +633,45 @@ static int tc_del_flow(struct stmmac_priv *priv,
return ret;
}
+static struct stmmac_rfs_entry *tc_find_rfs(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls,
+ bool get_free)
+{
+ int i;
+
+ for (i = 0; i < priv->rfs_entries_total; i++) {
+ struct stmmac_rfs_entry *entry = &priv->rfs_entries[i];
+
+ if (entry->cookie == cls->cookie)
+ return entry;
+ if (get_free && entry->in_use == false)
+ return entry;
+ }
+
+ return NULL;
+}
+
#define VLAN_PRIO_FULL_MASK (0x07)
static int tc_add_vlan_flow(struct stmmac_priv *priv,
struct flow_cls_offload *cls)
{
+ struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
struct flow_match_vlan match;
+ if (!entry) {
+ entry = tc_find_rfs(priv, cls, true);
+ if (!entry)
+ return -ENOENT;
+ }
+
+ if (priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN] >=
+ priv->rfs_entries_max[STMMAC_RFS_T_VLAN])
+ return -ENOENT;
+
/* Nothing to do here */
if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
return -EINVAL;
@@ -638,6 +693,12 @@ static int tc_add_vlan_flow(struct stmmac_priv *priv,
prio = BIT(match.key->vlan_priority);
stmmac_rx_queue_prio(priv, priv->hw, prio, tc);
+
+ entry->in_use = true;
+ entry->cookie = cls->cookie;
+ entry->tc = tc;
+ entry->type = STMMAC_RFS_T_VLAN;
+ priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]++;
}
return 0;
@@ -646,20 +707,19 @@ static int tc_add_vlan_flow(struct stmmac_priv *priv,
static int tc_del_vlan_flow(struct stmmac_priv *priv,
struct flow_cls_offload *cls)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
- struct flow_dissector *dissector = rule->match.dissector;
- int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
+ struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
- /* Nothing to do here */
- if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
- return -EINVAL;
+ if (!entry || !entry->in_use || entry->type != STMMAC_RFS_T_VLAN)
+ return -ENOENT;
- if (tc < 0) {
- netdev_err(priv->dev, "Invalid traffic class\n");
- return -EINVAL;
- }
+ stmmac_rx_queue_prio(priv, priv->hw, 0, entry->tc);
+
+ entry->in_use = false;
+ entry->cookie = 0;
+ entry->tc = 0;
+ entry->type = 0;
- stmmac_rx_queue_prio(priv, priv->hw, 0, tc);
+ priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]--;
return 0;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index c092cb61416a..ffbbda8f4d41 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -1844,13 +1844,14 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
if (ret < 0) {
dev_err(dev, "%pOF error reading port_id %d\n",
port_np, ret);
- return ret;
+ goto of_node_put;
}
if (!port_id || port_id > common->port_num) {
dev_err(dev, "%pOF has invalid port_id %u %s\n",
port_np, port_id, port_np->name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto of_node_put;
}
port = am65_common_get_port(common, port_id);
@@ -1866,8 +1867,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
(AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1));
port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base);
- if (IS_ERR(port->slave.mac_sl))
- return PTR_ERR(port->slave.mac_sl);
+ if (IS_ERR(port->slave.mac_sl)) {
+ ret = PTR_ERR(port->slave.mac_sl);
+ goto of_node_put;
+ }
port->disabled = !of_device_is_available(port_np);
if (port->disabled) {
@@ -1880,7 +1883,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
ret = PTR_ERR(port->slave.ifphy);
dev_err(dev, "%pOF error retrieving port phy: %d\n",
port_np, ret);
- return ret;
+ goto of_node_put;
}
port->slave.mac_only =
@@ -1889,10 +1892,12 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
/* get phy/link info */
if (of_phy_is_fixed_link(port_np)) {
ret = of_phy_register_fixed_link(port_np);
- if (ret)
- return dev_err_probe(dev, ret,
+ if (ret) {
+ ret = dev_err_probe(dev, ret,
"failed to register fixed-link phy %pOF\n",
port_np);
+ goto of_node_put;
+ }
port->slave.phy_node = of_node_get(port_np);
} else {
port->slave.phy_node =
@@ -1902,14 +1907,15 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
if (!port->slave.phy_node) {
dev_err(dev,
"slave[%d] no phy found\n", port_id);
- return -ENODEV;
+ ret = -ENODEV;
+ goto of_node_put;
}
ret = of_get_phy_mode(port_np, &port->slave.phy_if);
if (ret) {
dev_err(dev, "%pOF read phy-mode err %d\n",
port_np, ret);
- return ret;
+ goto of_node_put;
}
ret = of_get_mac_address(port_np, port->slave.mac_addr);
@@ -1932,6 +1938,11 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
}
return 0;
+
+of_node_put:
+ of_node_put(port_np);
+ of_node_put(node);
+ return ret;
}
static void am65_cpsw_pcpu_stats_free(void *data)
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index b06c17ac8d4e..ebd287039a54 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -1262,6 +1262,11 @@ static int fjes_probe(struct platform_device *plat_dev)
hw->hw_res.start = res->start;
hw->hw_res.size = resource_size(res);
hw->hw_res.irq = platform_get_irq(plat_dev, 0);
+ if (hw->hw_res.irq < 0) {
+ err = hw->hw_res.irq;
+ goto err_free_control_wq;
+ }
+
err = fjes_hw_init(&adapter->hw);
if (err)
goto err_free_control_wq;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 7da2bb8a443c..edde9c3ae12b 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -794,14 +794,14 @@ static void mkiss_close(struct tty_struct *tty)
*/
netif_stop_queue(ax->dev);
- ax->tty = NULL;
-
unregister_netdev(ax->dev);
/* Free all AX25 frame buffers after unreg. */
kfree(ax->rbuff);
kfree(ax->xbuff);
+ ax->tty = NULL;
+
free_netdev(ax->dev);
}
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index 23ee0b14cbfa..2f5e7b31032a 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -93,7 +93,9 @@ static int atusb_control_msg(struct atusb *atusb, unsigned int pipe,
ret = usb_control_msg(usb_dev, pipe, request, requesttype,
value, index, data, size, timeout);
- if (ret < 0) {
+ if (ret < size) {
+ ret = ret < 0 ? ret : -ENODATA;
+
atusb->err = ret;
dev_err(&usb_dev->dev,
"%s: req 0x%02x val 0x%x idx 0x%x, error %d\n",
@@ -861,9 +863,9 @@ static int atusb_get_and_show_build(struct atusb *atusb)
if (!build)
return -ENOMEM;
- ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
- ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
- build, ATUSB_BUILD_SIZE, 1000);
+ /* We cannot call atusb_control_msg() here, since this request may read various length data */
+ ret = usb_control_msg(atusb->usb_dev, usb_rcvctrlpipe(usb_dev, 0), ATUSB_BUILD,
+ ATUSB_REQ_FROM_DEV, 0, 0, build, ATUSB_BUILD_SIZE, 1000);
if (ret >= 0) {
build[ret] = 0;
dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index 90aafb56f140..a43820212932 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -514,6 +514,7 @@ nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
goto err_free;
key = nmap->entry[i].key;
*key = i;
+ memset(nmap->entry[i].value, 0, offmap->map.value_size);
}
}
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index 0ab6a40be611..a6a713b31aad 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -77,7 +77,10 @@ static int nsim_set_ringparam(struct net_device *dev,
{
struct netdevsim *ns = netdev_priv(dev);
- memcpy(&ns->ethtool.ring, ring, sizeof(ns->ethtool.ring));
+ ns->ethtool.ring.rx_pending = ring->rx_pending;
+ ns->ethtool.ring.rx_jumbo_pending = ring->rx_jumbo_pending;
+ ns->ethtool.ring.rx_mini_pending = ring->rx_mini_pending;
+ ns->ethtool.ring.tx_pending = ring->tx_pending;
return 0;
}
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index c204067f1890..c198722e4871 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -460,6 +460,9 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus,
if (addr == mdiodev->addr) {
device_set_node(dev, of_fwnode_handle(child));
+ /* The refcount on "child" is passed to the mdio
+ * device. Do _not_ use of_node_put(child) here.
+ */
return;
}
}
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 5904546acae6..ea82ea5660e7 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1388,6 +1388,7 @@ EXPORT_SYMBOL_GPL(phylink_stop);
* @mac_wol: true if the MAC needs to receive packets for Wake-on-Lan
*
* Handle a network device suspend event. There are several cases:
+ *
* - If Wake-on-Lan is not active, we can bring down the link between
* the MAC and PHY by calling phylink_stop().
* - If Wake-on-Lan is active, and being handled only by the PHY, we
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 1572878c3403..45a67e72a02c 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -209,6 +209,9 @@ struct tun_struct {
struct tun_prog __rcu *steering_prog;
struct tun_prog __rcu *filter_prog;
struct ethtool_link_ksettings link_ksettings;
+ /* init args */
+ struct file *file;
+ struct ifreq *ifr;
};
struct veth {
@@ -216,6 +219,9 @@ struct veth {
__be16 h_vlan_TCI;
};
+static void tun_flow_init(struct tun_struct *tun);
+static void tun_flow_uninit(struct tun_struct *tun);
+
static int tun_napi_receive(struct napi_struct *napi, int budget)
{
struct tun_file *tfile = container_of(napi, struct tun_file, napi);
@@ -953,6 +959,49 @@ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
static const struct ethtool_ops tun_ethtool_ops;
+static int tun_net_init(struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+ struct ifreq *ifr = tun->ifr;
+ int err;
+
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ spin_lock_init(&tun->lock);
+
+ err = security_tun_dev_alloc_security(&tun->security);
+ if (err < 0) {
+ free_percpu(dev->tstats);
+ return err;
+ }
+
+ tun_flow_init(tun);
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
+ TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ dev->features = dev->hw_features | NETIF_F_LLTX;
+ dev->vlan_features = dev->features &
+ ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX);
+
+ tun->flags = (tun->flags & ~TUN_FEATURES) |
+ (ifr->ifr_flags & TUN_FEATURES);
+
+ INIT_LIST_HEAD(&tun->disabled);
+ err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI,
+ ifr->ifr_flags & IFF_NAPI_FRAGS, false);
+ if (err < 0) {
+ tun_flow_uninit(tun);
+ security_tun_dev_free_security(tun->security);
+ free_percpu(dev->tstats);
+ return err;
+ }
+ return 0;
+}
+
/* Net device detach from fd. */
static void tun_net_uninit(struct net_device *dev)
{
@@ -1169,6 +1218,7 @@ static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
}
static const struct net_device_ops tun_netdev_ops = {
+ .ndo_init = tun_net_init,
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
.ndo_stop = tun_net_close,
@@ -1252,6 +1302,7 @@ static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
}
static const struct net_device_ops tap_netdev_ops = {
+ .ndo_init = tun_net_init,
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
.ndo_stop = tun_net_close,
@@ -1292,7 +1343,7 @@ static void tun_flow_uninit(struct tun_struct *tun)
#define MAX_MTU 65535
/* Initialize net device. */
-static void tun_net_init(struct net_device *dev)
+static void tun_net_initialize(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
@@ -2206,11 +2257,6 @@ static void tun_free_netdev(struct net_device *dev)
BUG_ON(!(list_empty(&tun->disabled)));
free_percpu(dev->tstats);
- /* We clear tstats so that tun_set_iff() can tell if
- * tun_free_netdev() has been called from register_netdevice().
- */
- dev->tstats = NULL;
-
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
__tun_set_ebpf(tun, &tun->steering_prog, NULL);
@@ -2716,41 +2762,16 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->rx_batched = 0;
RCU_INIT_POINTER(tun->steering_prog, NULL);
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats) {
- err = -ENOMEM;
- goto err_free_dev;
- }
-
- spin_lock_init(&tun->lock);
-
- err = security_tun_dev_alloc_security(&tun->security);
- if (err < 0)
- goto err_free_stat;
-
- tun_net_init(dev);
- tun_flow_init(tun);
+ tun->ifr = ifr;
+ tun->file = file;
- dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
- TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX;
- dev->features = dev->hw_features | NETIF_F_LLTX;
- dev->vlan_features = dev->features &
- ~(NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX);
-
- tun->flags = (tun->flags & ~TUN_FEATURES) |
- (ifr->ifr_flags & TUN_FEATURES);
-
- INIT_LIST_HEAD(&tun->disabled);
- err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
- ifr->ifr_flags & IFF_NAPI_FRAGS, false);
- if (err < 0)
- goto err_free_flow;
+ tun_net_initialize(dev);
err = register_netdevice(tun->dev);
- if (err < 0)
- goto err_detach;
+ if (err < 0) {
+ free_netdev(dev);
+ return err;
+ }
/* free_netdev() won't check refcnt, to avoid race
* with dev_put() we need publish tun after registration.
*/
@@ -2767,24 +2788,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
strcpy(ifr->ifr_name, tun->dev->name);
return 0;
-
-err_detach:
- tun_detach_all(dev);
- /* We are here because register_netdevice() has failed.
- * If register_netdevice() already called tun_free_netdev()
- * while dealing with the error, dev->stats has been cleared.
- */
- if (!dev->tstats)
- goto err_free_dev;
-
-err_free_flow:
- tun_flow_uninit(tun);
- security_tun_dev_free_security(tun->security);
-err_free_stat:
- free_percpu(dev->tstats);
-err_free_dev:
- free_netdev(dev);
- return err;
}
static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 42ba4af68090..71682970be58 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -9,6 +9,8 @@
#include "asix.h"
+#define AX_HOST_EN_RETRIES 30
+
int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
u16 size, void *data, int in_pm)
{
@@ -68,7 +70,7 @@ static int asix_check_host_enable(struct usbnet *dev, int in_pm)
int i, ret;
u8 smsr;
- for (i = 0; i < 30; ++i) {
+ for (i = 0; i < AX_HOST_EN_RETRIES; ++i) {
ret = asix_set_sw_mii(dev, in_pm);
if (ret == -ENODEV || ret == -ETIMEDOUT)
break;
@@ -77,13 +79,13 @@ static int asix_check_host_enable(struct usbnet *dev, int in_pm)
0, 0, 1, &smsr, in_pm);
if (ret == -ENODEV)
break;
- else if (ret < 0)
+ else if (ret < sizeof(smsr))
continue;
else if (smsr & AX_HOST_EN)
break;
}
- return ret;
+ return i >= AX_HOST_EN_RETRIES ? -ETIMEDOUT : ret;
}
static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 24753a4da7e6..e303b522efb5 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -181,6 +181,8 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32);
max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
+ if (max == 0)
+ max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */
/* some devices set dwNtbOutMaxSize too low for the above default */
min = min(min, max);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 8cd265fc1fd9..075f8abde5cd 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -76,6 +76,8 @@
#define LAN7801_USB_PRODUCT_ID (0x7801)
#define LAN78XX_EEPROM_MAGIC (0x78A5)
#define LAN78XX_OTP_MAGIC (0x78F3)
+#define AT29M2AF_USB_VENDOR_ID (0x07C9)
+#define AT29M2AF_USB_PRODUCT_ID (0x0012)
#define MII_READ 1
#define MII_WRITE 0
@@ -4734,6 +4736,10 @@ static const struct usb_device_id products[] = {
/* LAN7801 USB Gigabit Ethernet Device */
USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
},
+ {
+ /* ATM2-AF USB Gigabit Ethernet Device */
+ USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
+ },
{},
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index c4cd40b090fd..feb247e355f7 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -493,11 +493,11 @@ static void read_bulk_callback(struct urb *urb)
goto goon;
rx_status = buf[count - 2];
- if (rx_status & 0x1e) {
+ if (rx_status & 0x1c) {
netif_dbg(pegasus, rx_err, net,
"RX packet error %x\n", rx_status);
net->stats.rx_errors++;
- if (rx_status & 0x06) /* long or runt */
+ if (rx_status & 0x04) /* runt */
net->stats.rx_length_errors++;
if (rx_status & 0x08)
net->stats.rx_crc_errors++;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 86b814e99224..f510e8219470 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1358,6 +1358,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index f9877a3e83ac..ef6010a3d33a 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
#define NETNEXT_VERSION "12"
/* Information for net */
-#define NET_VERSION "11"
+#define NET_VERSION "12"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -4016,6 +4016,11 @@ static void rtl_clear_bp(struct r8152 *tp, u16 type)
ocp_write_word(tp, type, PLA_BP_BA, 0);
}
+static inline void rtl_reset_ocp_base(struct r8152 *tp)
+{
+ tp->ocp_base = -1;
+}
+
static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait)
{
u16 data, check;
@@ -4087,8 +4092,6 @@ static int rtl_post_ram_code(struct r8152 *tp, u16 key_addr, bool wait)
rtl_phy_patch_request(tp, false, wait);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base);
-
return 0;
}
@@ -4800,6 +4803,8 @@ static void rtl_ram_code_speed_up(struct r8152 *tp, struct fw_phy_speed_up *phy,
u32 len;
u8 *data;
+ rtl_reset_ocp_base(tp);
+
if (sram_read(tp, SRAM_GPHY_FW_VER) >= __le16_to_cpu(phy->version)) {
dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n");
return;
@@ -4845,7 +4850,8 @@ static void rtl_ram_code_speed_up(struct r8152 *tp, struct fw_phy_speed_up *phy,
}
}
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, tp->ocp_base);
+ rtl_reset_ocp_base(tp);
+
rtl_phy_patch_request(tp, false, wait);
if (sram_read(tp, SRAM_GPHY_FW_VER) == __le16_to_cpu(phy->version))
@@ -4861,6 +4867,8 @@ static int rtl8152_fw_phy_ver(struct r8152 *tp, struct fw_phy_ver *phy_ver)
ver_addr = __le16_to_cpu(phy_ver->ver.addr);
ver = __le16_to_cpu(phy_ver->ver.data);
+ rtl_reset_ocp_base(tp);
+
if (sram_read(tp, ver_addr) >= ver) {
dev_dbg(&tp->intf->dev, "PHY firmware has been the newest\n");
return 0;
@@ -4877,6 +4885,8 @@ static void rtl8152_fw_phy_fixup(struct r8152 *tp, struct fw_phy_fixup *fix)
{
u16 addr, data;
+ rtl_reset_ocp_base(tp);
+
addr = __le16_to_cpu(fix->setting.addr);
data = ocp_reg_read(tp, addr);
@@ -4908,6 +4918,8 @@ static void rtl8152_fw_phy_union_apply(struct r8152 *tp, struct fw_phy_union *ph
u32 length;
int i, num;
+ rtl_reset_ocp_base(tp);
+
num = phy->pre_num;
for (i = 0; i < num; i++)
sram_write(tp, __le16_to_cpu(phy->pre_set[i].addr),
@@ -4938,6 +4950,8 @@ static void rtl8152_fw_phy_nc_apply(struct r8152 *tp, struct fw_phy_nc *phy)
u32 length, i, num;
__le16 *data;
+ rtl_reset_ocp_base(tp);
+
mode_reg = __le16_to_cpu(phy->mode_reg);
sram_write(tp, mode_reg, __le16_to_cpu(phy->mode_pre));
sram_write(tp, __le16_to_cpu(phy->ba_reg),
@@ -5107,6 +5121,7 @@ post_fw:
if (rtl_fw->post_fw)
rtl_fw->post_fw(tp);
+ rtl_reset_ocp_base(tp);
strscpy(rtl_fw->version, fw_hdr->version, RTL_VER_SIZE);
dev_info(&tp->intf->dev, "load %s successfully\n", rtl_fw->version);
}
@@ -6584,6 +6599,21 @@ static bool rtl8153_in_nway(struct r8152 *tp)
return true;
}
+static void r8156_mdio_force_mode(struct r8152 *tp)
+{
+ u16 data;
+
+ /* Select force mode through 0xa5b4 bit 15
+ * 0: MDIO force mode
+ * 1: MMD force mode
+ */
+ data = ocp_reg_read(tp, 0xa5b4);
+ if (data & BIT(15)) {
+ data &= ~BIT(15);
+ ocp_reg_write(tp, 0xa5b4, data);
+ }
+}
+
static void set_carrier(struct r8152 *tp)
{
struct net_device *netdev = tp->netdev;
@@ -8016,6 +8046,7 @@ static void r8156_init(struct r8152 *tp)
ocp_data |= ACT_ODMA;
ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_CONFIG, ocp_data);
+ r8156_mdio_force_mode(tp);
rtl_tally_reset(tp);
tp->coalesce = 15000; /* 15 us */
@@ -8145,6 +8176,7 @@ static void r8156b_init(struct r8152 *tp)
ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
+ r8156_mdio_force_mode(tp);
rtl_tally_reset(tp);
tp->coalesce = 15000; /* 15 us */
@@ -8467,6 +8499,8 @@ static int rtl8152_resume(struct usb_interface *intf)
mutex_lock(&tp->control);
+ rtl_reset_ocp_base(tp);
+
if (test_bit(SELECTIVE_SUSPEND, &tp->flags))
ret = rtl8152_runtime_resume(tp);
else
@@ -8482,6 +8516,7 @@ static int rtl8152_reset_resume(struct usb_interface *intf)
struct r8152 *tp = usb_get_intfdata(intf);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ rtl_reset_ocp_base(tp);
tp->rtl_ops.init(tp);
queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
set_ethernet_addr(tp, true);
@@ -9603,9 +9638,12 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev->hw_features &= ~NETIF_F_RXCSUM;
}
- if (udev->parent &&
- le16_to_cpu(udev->parent->descriptor.idVendor) == VENDOR_ID_LENOVO) {
- tp->lenovo_macpassthru = 1;
+ if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO) {
+ switch (le16_to_cpu(udev->descriptor.idProduct)) {
+ case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
+ case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
+ tp->lenovo_macpassthru = 1;
+ }
}
if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 4a84f90e377c..247f58cb0f84 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -609,6 +609,11 @@ static const struct usb_device_id products [] = {
USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
.driver_info = (unsigned long) &rndis_poll_status_info,
}, {
+ /* Hytera Communications DMR radios' "Radio to PC Network" */
+ USB_VENDOR_AND_INTERFACE_INFO(0x238b,
+ USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
+ .driver_info = (unsigned long)&rndis_info,
+}, {
/* RNDIS is MSFT's un-official variant of CDC ACM */
USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
.driver_info = (unsigned long) &rndis_info,
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 50eb43e5bf45..2acdb8ad6c71 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -879,8 +879,12 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
stats->xdp_bytes += skb->len;
skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
- if (skb)
- napi_gro_receive(&rq->xdp_napi, skb);
+ if (skb) {
+ if (skb_shared(skb) || skb_unclone(skb, GFP_ATOMIC))
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&rq->xdp_napi, skb);
+ }
}
done++;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 55db6a336f7e..b107835242ad 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -733,7 +733,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
pr_debug("%s: rx error: len %u exceeds max size %d\n",
dev->name, len, GOOD_PACKET_LEN);
dev->stats.rx_length_errors++;
- goto err_len;
+ goto err;
}
if (likely(!vi->xdp_enabled)) {
@@ -825,10 +825,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
skip_xdp:
skb = build_skb(buf, buflen);
- if (!skb) {
- put_page(page);
+ if (!skb)
goto err;
- }
skb_reserve(skb, headroom - delta);
skb_put(skb, len);
if (!xdp_prog) {
@@ -839,13 +837,12 @@ skip_xdp:
if (metasize)
skb_metadata_set(skb, metasize);
-err:
return skb;
err_xdp:
rcu_read_unlock();
stats->xdp_drops++;
-err_len:
+err:
stats->drops++;
put_page(page);
xdp_xmit:
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 14fae317bc70..fd407c0e2856 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -3261,7 +3261,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
#ifdef CONFIG_PCI_MSI
if (adapter->intr.type == VMXNET3_IT_MSIX) {
- int i, nvec;
+ int i, nvec, nvec_allocated;
nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
1 : adapter->num_tx_queues;
@@ -3274,14 +3274,15 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
for (i = 0; i < nvec; i++)
adapter->intr.msix_entries[i].entry = i;
- nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
- if (nvec < 0)
+ nvec_allocated = vmxnet3_acquire_msix_vectors(adapter, nvec);
+ if (nvec_allocated < 0)
goto msix_err;
/* If we cannot allocate one MSIx vector per queue
* then limit the number of rx queues to 1
*/
- if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
+ if (nvec_allocated == VMXNET3_LINUX_MIN_MSIX_VECT &&
+ nvec != VMXNET3_LINUX_MIN_MSIX_VECT) {
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
|| adapter->num_rx_queues != 1) {
adapter->share_intr = VMXNET3_INTR_TXSHARE;
@@ -3291,14 +3292,14 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
}
}
- adapter->intr.num_intrs = nvec;
+ adapter->intr.num_intrs = nvec_allocated;
return;
msix_err:
/* If we cannot allocate MSIx vectors use only one rx queue */
dev_info(&adapter->pdev->dev,
"Failed to enable MSI-X, error %d. "
- "Limiting #rx queues to 1, try MSI.\n", nvec);
+ "Limiting #rx queues to 1, try MSI.\n", nvec_allocated);
adapter->intr.type = VMXNET3_IT_MSI;
}
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 131c745dc701..b2242a082431 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -770,8 +770,6 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
skb->dev = vrf_dev;
- vrf_nf_set_untracked(skb);
-
err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
skb, NULL, vrf_dev, vrf_ip6_out_direct_finish);
@@ -792,6 +790,8 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
return skb;
+ vrf_nf_set_untracked(skb);
+
if (qdisc_tx_is_default(vrf_dev) ||
IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
return vrf_ip6_out_direct(vrf_dev, sk, skb);
@@ -1000,8 +1000,6 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
skb->dev = vrf_dev;
- vrf_nf_set_untracked(skb);
-
err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
skb, NULL, vrf_dev, vrf_ip_out_direct_finish);
@@ -1023,6 +1021,8 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
ipv4_is_lbcast(ip_hdr(skb)->daddr))
return skb;
+ vrf_nf_set_untracked(skb);
+
if (qdisc_tx_is_default(vrf_dev) ||
IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
return vrf_ip_out_direct(vrf_dev, sk, skb);
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index 26c7ae242db6..49c0b1ad40a0 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -533,7 +533,11 @@ static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci,
ret = mhi_pm_suspend(ab_pci->mhi_ctrl);
break;
case ATH11K_MHI_RESUME:
- ret = mhi_pm_resume(ab_pci->mhi_ctrl);
+ /* Do force MHI resume as some devices like QCA6390, WCN6855
+ * are not in M3 state but they are functional. So just ignore
+ * the MHI state while resuming.
+ */
+ ret = mhi_pm_resume_force(ab_pci->mhi_ctrl);
break;
case ATH11K_MHI_TRIGGER_RDDM:
ret = mhi_force_rddm_mode(ab_pci->mhi_ctrl);
diff --git a/drivers/net/wireless/broadcom/brcm80211/Kconfig b/drivers/net/wireless/broadcom/brcm80211/Kconfig
index 5bf2318763c5..3a1a35b5672f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/Kconfig
+++ b/drivers/net/wireless/broadcom/brcm80211/Kconfig
@@ -7,16 +7,20 @@ config BRCMSMAC
depends on MAC80211
depends on BCMA_POSSIBLE
select BCMA
- select NEW_LEDS if BCMA_DRIVER_GPIO
- select LEDS_CLASS if BCMA_DRIVER_GPIO
select BRCMUTIL
select FW_LOADER
select CORDIC
help
This module adds support for PCIe wireless adapters based on Broadcom
- IEEE802.11n SoftMAC chipsets. It also has WLAN led support, which will
- be available if you select BCMA_DRIVER_GPIO. If you choose to build a
- module, the driver will be called brcmsmac.ko.
+ IEEE802.11n SoftMAC chipsets. If you choose to build a module, the
+ driver will be called brcmsmac.ko.
+
+config BRCMSMAC_LEDS
+ def_bool BRCMSMAC && BCMA_DRIVER_GPIO && MAC80211_LEDS
+ help
+ The brcmsmac LED support depends on the presence of the
+ BCMA_DRIVER_GPIO driver, and it only works if LED support
+ is enabled and reachable from the driver module.
source "drivers/net/wireless/broadcom/brcm80211/brcmfmac/Kconfig"
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
index 482d7737764d..090757730ba6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
@@ -42,6 +42,6 @@ brcmsmac-y := \
brcms_trace_events.o \
debug.o
-brcmsmac-$(CONFIG_BCMA_DRIVER_GPIO) += led.o
+brcmsmac-$(CONFIG_BRCMSMAC_LEDS) += led.o
obj-$(CONFIG_BRCMSMAC) += brcmsmac.o
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h
index d65f5c268fd7..2a5cbeb9e783 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h
@@ -24,7 +24,7 @@ struct brcms_led {
struct gpio_desc *gpiod;
};
-#ifdef CONFIG_BCMA_DRIVER_GPIO
+#ifdef CONFIG_BRCMSMAC_LEDS
void brcms_led_unregister(struct brcms_info *wl);
int brcms_led_register(struct brcms_info *wl);
#else
diff --git a/drivers/net/wireless/intel/iwlegacy/Kconfig b/drivers/net/wireless/intel/iwlegacy/Kconfig
index 24fe3f63c321..7eacc8e58ee1 100644
--- a/drivers/net/wireless/intel/iwlegacy/Kconfig
+++ b/drivers/net/wireless/intel/iwlegacy/Kconfig
@@ -2,14 +2,13 @@
config IWLEGACY
tristate
select FW_LOADER
- select NEW_LEDS
- select LEDS_CLASS
select LEDS_TRIGGERS
select MAC80211_LEDS
config IWL4965
tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
depends on PCI && MAC80211
+ depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
select IWLEGACY
help
This option enables support for
@@ -38,6 +37,7 @@ config IWL4965
config IWL3945
tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
depends on PCI && MAC80211
+ depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
select IWLEGACY
help
Select to build the driver supporting the:
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 1085afbefba8..418ae4f870ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -47,7 +47,7 @@ if IWLWIFI
config IWLWIFI_LEDS
bool
- depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI
+ depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
depends on IWLMVM || IWLDVM
select LEDS_TRIGGERS
select MAC80211_LEDS
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index bdd4ee432548..76e0b7b45980 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -269,17 +269,18 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
u8 rate_plcp;
u32 rate_flags = 0;
bool is_cck;
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
/* info->control is only relevant for non HW rate control */
if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
/* HT rate doesn't make sense for a non data frame */
WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS &&
!ieee80211_is_data(fc),
"Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n",
info->control.rates[0].flags,
info->control.rates[0].idx,
- le16_to_cpu(fc), mvmsta->sta_state);
+ le16_to_cpu(fc), sta ? mvmsta->sta_state : -1);
rate_idx = info->control.rates[0].idx;
}
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index 79ab850a45a2..c78ae4b89761 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -34,4 +34,4 @@ obj-$(CONFIG_MT76x2_COMMON) += mt76x2/
obj-$(CONFIG_MT7603E) += mt7603/
obj-$(CONFIG_MT7615_COMMON) += mt7615/
obj-$(CONFIG_MT7915E) += mt7915/
-obj-$(CONFIG_MT7921E) += mt7921/
+obj-$(CONFIG_MT7921_COMMON) += mt7921/
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
index cff3b43ca4d7..12c03dacb5dd 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
@@ -181,9 +181,9 @@ void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
{
struct ipc_mem_channel *channel;
+ bool hpda_ctrl_pending = false;
struct sk_buff_head *ul_list;
bool hpda_pending = false;
- bool forced_hpdu = false;
struct ipc_pipe *pipe;
int i;
@@ -200,15 +200,19 @@ bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
ul_list = &channel->ul_list;
/* Fill the transfer descriptor with the uplink buffer info. */
- hpda_pending |= ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
+ if (!ipc_imem_check_wwan_ips(channel)) {
+ hpda_ctrl_pending |=
+ ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
pipe, ul_list);
-
- /* forced HP update needed for non data channels */
- if (hpda_pending && !ipc_imem_check_wwan_ips(channel))
- forced_hpdu = true;
+ } else {
+ hpda_pending |=
+ ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
+ pipe, ul_list);
+ }
}
- if (forced_hpdu) {
+ /* forced HP update needed for non data channels */
+ if (hpda_ctrl_pending) {
hpda_pending = false;
ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
IPC_HP_UL_WRITE_TD);
@@ -527,6 +531,9 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
return;
}
+ if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
+ ipc_devlink_deinit(ipc_imem->ipc_devlink);
+
if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg))
ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
@@ -1167,7 +1174,7 @@ void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
ipc_port_deinit(ipc_imem->ipc_port);
}
- if (ipc_imem->ipc_devlink)
+ if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
ipc_devlink_deinit(ipc_imem->ipc_devlink);
ipc_imem_device_ipc_uninit(ipc_imem);
@@ -1263,7 +1270,6 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
ipc_imem->pci_device_id = device_id;
- ipc_imem->ev_cdev_write_pending = false;
ipc_imem->cp_version = 0;
ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
@@ -1331,6 +1337,8 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
if (ipc_flash_link_establish(ipc_imem))
goto devlink_channel_fail;
+
+ set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag);
}
return ipc_imem;
devlink_channel_fail:
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h
index 6be6708b4eec..6b8a837faef2 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h
@@ -101,6 +101,7 @@ struct ipc_chnl_cfg;
#define IOSM_CHIP_INFO_SIZE_MAX 100
#define FULLY_FUNCTIONAL 0
+#define IOSM_DEVLINK_INIT 1
/* List of the supported UL/DL pipes. */
enum ipc_mem_pipes {
@@ -335,8 +336,6 @@ enum ipc_phase {
* process the irq actions.
* @flag: Flag to monitor the state of driver
* @td_update_timer_suspended: if true then td update timer suspend
- * @ev_cdev_write_pending: 0 means inform the IPC tasklet to pass
- * the accumulated uplink buffers to CP.
* @ev_mux_net_transmit_pending:0 means inform the IPC tasklet to pass
* @reset_det_n: Reset detect flag
* @pcie_wake_n: Pcie wake flag
@@ -374,7 +373,6 @@ struct iosm_imem {
u8 ev_irq_pending[IPC_IRQ_VECTORS];
unsigned long flag;
u8 td_update_timer_suspended:1,
- ev_cdev_write_pending:1,
ev_mux_net_transmit_pending:1,
reset_det_n:1,
pcie_wake_n:1;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
index 825e8e5ffb2a..831cdae28e8a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
@@ -41,7 +41,6 @@ void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
void *msg, size_t size)
{
- ipc_imem->ev_cdev_write_pending = false;
ipc_imem_ul_send(ipc_imem);
return 0;
@@ -50,11 +49,6 @@ static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
/* Through tasklet to do sio write. */
static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem)
{
- if (ipc_imem->ev_cdev_write_pending)
- return -1;
-
- ipc_imem->ev_cdev_write_pending = true;
-
return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0,
NULL, 0, false);
}
@@ -450,6 +444,7 @@ void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink)
/* Release the pipe resources */
ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
+ ipc_imem->nr_of_channels--;
}
void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink,
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 4a16d6e33c09..d9dea4829c86 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -203,6 +203,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
unsigned int rx_queue_max;
unsigned int rx_queue_len;
unsigned long last_rx_time;
+ unsigned int rx_slots_needed;
bool stalled;
struct xenvif_copy_state rx_copy;
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
index accc991d153f..dbac4c03d21a 100644
--- a/drivers/net/xen-netback/rx.c
+++ b/drivers/net/xen-netback/rx.c
@@ -33,28 +33,36 @@
#include <xen/xen.h>
#include <xen/events.h>
-static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
+/*
+ * Update the needed ring page slots for the first SKB queued.
+ * Note that any call sequence outside the RX thread calling this function
+ * needs to wake up the RX thread via a call of xenvif_kick_thread()
+ * afterwards in order to avoid a race with putting the thread to sleep.
+ */
+static void xenvif_update_needed_slots(struct xenvif_queue *queue,
+ const struct sk_buff *skb)
{
- RING_IDX prod, cons;
- struct sk_buff *skb;
- int needed;
- unsigned long flags;
-
- spin_lock_irqsave(&queue->rx_queue.lock, flags);
+ unsigned int needed = 0;
- skb = skb_peek(&queue->rx_queue);
- if (!skb) {
- spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
- return false;
+ if (skb) {
+ needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
+ if (skb_is_gso(skb))
+ needed++;
+ if (skb->sw_hash)
+ needed++;
}
- needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
- if (skb_is_gso(skb))
- needed++;
- if (skb->sw_hash)
- needed++;
+ WRITE_ONCE(queue->rx_slots_needed, needed);
+}
- spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
+static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
+{
+ RING_IDX prod, cons;
+ unsigned int needed;
+
+ needed = READ_ONCE(queue->rx_slots_needed);
+ if (!needed)
+ return false;
do {
prod = queue->rx.sring->req_prod;
@@ -80,13 +88,19 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
spin_lock_irqsave(&queue->rx_queue.lock, flags);
- __skb_queue_tail(&queue->rx_queue, skb);
-
- queue->rx_queue_len += skb->len;
- if (queue->rx_queue_len > queue->rx_queue_max) {
+ if (queue->rx_queue_len >= queue->rx_queue_max) {
struct net_device *dev = queue->vif->dev;
netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
+ kfree_skb(skb);
+ queue->vif->dev->stats.rx_dropped++;
+ } else {
+ if (skb_queue_empty(&queue->rx_queue))
+ xenvif_update_needed_slots(queue, skb);
+
+ __skb_queue_tail(&queue->rx_queue, skb);
+
+ queue->rx_queue_len += skb->len;
}
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
@@ -100,6 +114,8 @@ static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
skb = __skb_dequeue(&queue->rx_queue);
if (skb) {
+ xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue));
+
queue->rx_queue_len -= skb->len;
if (queue->rx_queue_len < queue->rx_queue_max) {
struct netdev_queue *txq;
@@ -134,6 +150,7 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
break;
xenvif_rx_dequeue(queue);
kfree_skb(skb);
+ queue->vif->dev->stats.rx_dropped++;
}
}
@@ -487,27 +504,31 @@ void xenvif_rx_action(struct xenvif_queue *queue)
xenvif_rx_copy_flush(queue);
}
-static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
+static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
{
RING_IDX prod, cons;
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
+ return prod - cons;
+}
+
+static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
+{
+ unsigned int needed = READ_ONCE(queue->rx_slots_needed);
+
return !queue->stalled &&
- prod - cons < 1 &&
+ xenvif_rx_queue_slots(queue) < needed &&
time_after(jiffies,
queue->last_rx_time + queue->vif->stall_timeout);
}
static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
{
- RING_IDX prod, cons;
-
- prod = queue->rx.sring->req_prod;
- cons = queue->rx.req_cons;
+ unsigned int needed = READ_ONCE(queue->rx_slots_needed);
- return queue->stalled && prod - cons >= 1;
+ return queue->stalled && xenvif_rx_queue_slots(queue) >= needed;
}
bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 911f43986a8c..d514d96027a6 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -148,6 +148,9 @@ struct netfront_queue {
grant_ref_t gref_rx_head;
grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
+ unsigned int rx_rsp_unconsumed;
+ spinlock_t rx_cons_lock;
+
struct page_pool *page_pool;
struct xdp_rxq_info xdp_rxq;
};
@@ -376,12 +379,13 @@ static int xennet_open(struct net_device *dev)
return 0;
}
-static void xennet_tx_buf_gc(struct netfront_queue *queue)
+static bool xennet_tx_buf_gc(struct netfront_queue *queue)
{
RING_IDX cons, prod;
unsigned short id;
struct sk_buff *skb;
bool more_to_do;
+ bool work_done = false;
const struct device *dev = &queue->info->netdev->dev;
BUG_ON(!netif_carrier_ok(queue->info->netdev));
@@ -398,6 +402,8 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
struct xen_netif_tx_response txrsp;
+ work_done = true;
+
RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
if (txrsp.status == XEN_NETIF_RSP_NULL)
continue;
@@ -441,11 +447,13 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
xennet_maybe_wake_tx(queue);
- return;
+ return work_done;
err:
queue->info->broken = true;
dev_alert(dev, "Disabled for further use\n");
+
+ return work_done;
}
struct xennet_gnttab_make_txreq {
@@ -834,6 +842,16 @@ static int xennet_close(struct net_device *dev)
return 0;
}
+static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->rx_cons_lock, flags);
+ queue->rx.rsp_cons = val;
+ queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
+ spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
+}
+
static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
grant_ref_t ref)
{
@@ -885,7 +903,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
xennet_move_rx_slot(queue, skb, ref);
} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
- queue->rx.rsp_cons = cons;
+ xennet_set_rx_rsp_cons(queue, cons);
return err;
}
@@ -1039,7 +1057,7 @@ next:
}
if (unlikely(err))
- queue->rx.rsp_cons = cons + slots;
+ xennet_set_rx_rsp_cons(queue, cons + slots);
return err;
}
@@ -1093,7 +1111,8 @@ static int xennet_fill_frags(struct netfront_queue *queue,
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
}
if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
- queue->rx.rsp_cons = ++cons + skb_queue_len(list);
+ xennet_set_rx_rsp_cons(queue,
+ ++cons + skb_queue_len(list));
kfree_skb(nskb);
return -ENOENT;
}
@@ -1106,7 +1125,7 @@ static int xennet_fill_frags(struct netfront_queue *queue,
kfree_skb(nskb);
}
- queue->rx.rsp_cons = cons;
+ xennet_set_rx_rsp_cons(queue, cons);
return 0;
}
@@ -1229,7 +1248,9 @@ err:
if (unlikely(xennet_set_skb_gso(skb, gso))) {
__skb_queue_head(&tmpq, skb);
- queue->rx.rsp_cons += skb_queue_len(&tmpq);
+ xennet_set_rx_rsp_cons(queue,
+ queue->rx.rsp_cons +
+ skb_queue_len(&tmpq));
goto err;
}
}
@@ -1253,7 +1274,8 @@ err:
__skb_queue_tail(&rxq, skb);
- i = ++queue->rx.rsp_cons;
+ i = queue->rx.rsp_cons + 1;
+ xennet_set_rx_rsp_cons(queue, i);
work_done++;
}
if (need_xdp_flush)
@@ -1417,40 +1439,79 @@ static int xennet_set_features(struct net_device *dev,
return 0;
}
-static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
+static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
{
- struct netfront_queue *queue = dev_id;
unsigned long flags;
- if (queue->info->broken)
- return IRQ_HANDLED;
+ if (unlikely(queue->info->broken))
+ return false;
spin_lock_irqsave(&queue->tx_lock, flags);
- xennet_tx_buf_gc(queue);
+ if (xennet_tx_buf_gc(queue))
+ *eoi = 0;
spin_unlock_irqrestore(&queue->tx_lock, flags);
+ return true;
+}
+
+static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
+{
+ unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
+
+ if (likely(xennet_handle_tx(dev_id, &eoiflag)))
+ xen_irq_lateeoi(irq, eoiflag);
+
return IRQ_HANDLED;
}
-static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
+static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
{
- struct netfront_queue *queue = dev_id;
- struct net_device *dev = queue->info->netdev;
+ unsigned int work_queued;
+ unsigned long flags;
- if (queue->info->broken)
- return IRQ_HANDLED;
+ if (unlikely(queue->info->broken))
+ return false;
+
+ spin_lock_irqsave(&queue->rx_cons_lock, flags);
+ work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
+ if (work_queued > queue->rx_rsp_unconsumed) {
+ queue->rx_rsp_unconsumed = work_queued;
+ *eoi = 0;
+ } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
+ const struct device *dev = &queue->info->netdev->dev;
+
+ spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
+ dev_alert(dev, "RX producer index going backwards\n");
+ dev_alert(dev, "Disabled for further use\n");
+ queue->info->broken = true;
+ return false;
+ }
+ spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
- if (likely(netif_carrier_ok(dev) &&
- RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
+ if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
napi_schedule(&queue->napi);
+ return true;
+}
+
+static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
+{
+ unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
+
+ if (likely(xennet_handle_rx(dev_id, &eoiflag)))
+ xen_irq_lateeoi(irq, eoiflag);
+
return IRQ_HANDLED;
}
static irqreturn_t xennet_interrupt(int irq, void *dev_id)
{
- xennet_tx_interrupt(irq, dev_id);
- xennet_rx_interrupt(irq, dev_id);
+ unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
+
+ if (xennet_handle_tx(dev_id, &eoiflag) &&
+ xennet_handle_rx(dev_id, &eoiflag))
+ xen_irq_lateeoi(irq, eoiflag);
+
return IRQ_HANDLED;
}
@@ -1768,9 +1829,10 @@ static int setup_netfront_single(struct netfront_queue *queue)
if (err < 0)
goto fail;
- err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
- xennet_interrupt,
- 0, queue->info->netdev->name, queue);
+ err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
+ xennet_interrupt, 0,
+ queue->info->netdev->name,
+ queue);
if (err < 0)
goto bind_fail;
queue->rx_evtchn = queue->tx_evtchn;
@@ -1798,18 +1860,18 @@ static int setup_netfront_split(struct netfront_queue *queue)
snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
"%s-tx", queue->name);
- err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
- xennet_tx_interrupt,
- 0, queue->tx_irq_name, queue);
+ err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
+ xennet_tx_interrupt, 0,
+ queue->tx_irq_name, queue);
if (err < 0)
goto bind_tx_fail;
queue->tx_irq = err;
snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
"%s-rx", queue->name);
- err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
- xennet_rx_interrupt,
- 0, queue->rx_irq_name, queue);
+ err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
+ xennet_rx_interrupt, 0,
+ queue->rx_irq_name, queue);
if (err < 0)
goto bind_rx_fail;
queue->rx_irq = err;
@@ -1911,6 +1973,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
spin_lock_init(&queue->tx_lock);
spin_lock_init(&queue->rx_lock);
+ spin_lock_init(&queue->rx_cons_lock);
timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index f126ce96a7df..35b32fb90906 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -524,7 +524,8 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
phy->gpiod_ena = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(phy->gpiod_ena)) {
nfc_err(dev, "Unable to get ENABLE GPIO\n");
- return PTR_ERR(phy->gpiod_ena);
+ r = PTR_ERR(phy->gpiod_ena);
+ goto out_free;
}
phy->se_status.is_ese_present =
@@ -535,7 +536,7 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
r = st21nfca_hci_platform_init(phy);
if (r < 0) {
nfc_err(&client->dev, "Unable to reboot st21nfca\n");
- return r;
+ goto out_free;
}
r = devm_request_threaded_irq(&client->dev, client->irq, NULL,
@@ -544,15 +545,23 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
ST21NFCA_HCI_DRIVER_NAME, phy);
if (r < 0) {
nfc_err(&client->dev, "Unable to register IRQ handler\n");
- return r;
+ goto out_free;
}
- return st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
- ST21NFCA_FRAME_HEADROOM,
- ST21NFCA_FRAME_TAILROOM,
- ST21NFCA_HCI_LLC_MAX_PAYLOAD,
- &phy->hdev,
- &phy->se_status);
+ r = st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
+ ST21NFCA_FRAME_HEADROOM,
+ ST21NFCA_FRAME_TAILROOM,
+ ST21NFCA_HCI_LLC_MAX_PAYLOAD,
+ &phy->hdev,
+ &phy->se_status);
+ if (r)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ kfree_skb(phy->pending_skb);
+ return r;
}
static int st21nfca_hci_i2c_remove(struct i2c_client *client)
@@ -563,6 +572,8 @@ static int st21nfca_hci_i2c_remove(struct i2c_client *client)
if (phy->powered)
st21nfca_hci_i2c_disable(phy);
+ if (phy->pending_skb)
+ kfree_skb(phy->pending_skb);
return 0;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 4c63564adeaa..1af8a4513708 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -666,6 +666,7 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
struct request *rq)
{
if (ctrl->state != NVME_CTRL_DELETING_NOIO &&
+ ctrl->state != NVME_CTRL_DELETING &&
ctrl->state != NVME_CTRL_DEAD &&
!test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
@@ -1749,9 +1750,20 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
*/
if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
return -EINVAL;
- if (ctrl->max_integrity_segments)
- ns->features |=
- (NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
+
+ ns->features |= NVME_NS_EXT_LBAS;
+
+ /*
+ * The current fabrics transport drivers support namespace
+ * metadata formats only if nvme_ns_has_pi() returns true.
+ * Suppress support for all other formats so the namespace will
+ * have a 0 capacity and not be usable through the block stack.
+ *
+ * Note, this check will need to be modified if any drivers
+ * gain the ability to use other metadata formats.
+ */
+ if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns))
+ ns->features |= NVME_NS_METADATA_SUPPORTED;
} else {
/*
* For PCIe controllers, we can't easily remap the separate
@@ -2696,8 +2708,9 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
if (tmp->cntlid == ctrl->cntlid) {
dev_err(ctrl->device,
- "Duplicate cntlid %u with %s, rejecting\n",
- ctrl->cntlid, dev_name(tmp->device));
+ "Duplicate cntlid %u with %s, subsys %s, rejecting\n",
+ ctrl->cntlid, dev_name(tmp->device),
+ subsys->subnqn);
return false;
}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 7f2071f2460c..13e5d503ed07 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -866,7 +866,7 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
}
if (ana_log_size > ctrl->ana_log_size) {
nvme_mpath_stop(ctrl);
- kfree(ctrl->ana_log_buf);
+ nvme_mpath_uninit(ctrl);
ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
if (!ctrl->ana_log_buf)
return -ENOMEM;
@@ -886,4 +886,5 @@ void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
{
kfree(ctrl->ana_log_buf);
ctrl->ana_log_buf = NULL;
+ ctrl->ana_log_size = 0;
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index b334af8aa264..9b095ee01364 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -709,7 +709,7 @@ static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
return true;
if (ctrl->ops->flags & NVME_F_FABRICS &&
ctrl->state == NVME_CTRL_DELETING)
- return true;
+ return queue_live;
return __nvme_check_ready(ctrl, rq, queue_live);
}
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index bfc259e0d7b8..9f81beb4df4e 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -166,7 +166,10 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns,
zone.len = ns->zsze;
zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap));
zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba));
- zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp));
+ if (zone.cond == BLK_ZONE_COND_FULL)
+ zone.wp = zone.start + zone.len;
+ else
+ zone.wp = nvme_lba_to_sect(ns, le64_to_cpu(entry->wp));
return cb(&zone, idx, data);
}
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index cb6a473c3eaf..7c1c43ce466b 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -922,7 +922,14 @@ static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
int ret;
- if (!nvme_is_write(cmd->req.cmd) ||
+ /*
+ * This command has not been processed yet, hence we are trying to
+ * figure out if there is still pending data left to receive. If
+ * we don't, we can simply prepare for the next pdu and bail out,
+ * otherwise we will need to prepare a buffer and receive the
+ * stale data before continuing forward.
+ */
+ if (!nvme_is_write(cmd->req.cmd) || !data_len ||
data_len > cmd->req.port->inline_data_size) {
nvmet_prepare_receive_pdu(queue);
return;
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index b10f015b2e37..2b07677a386b 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -76,6 +76,26 @@ struct device_node *of_irq_find_parent(struct device_node *child)
}
EXPORT_SYMBOL_GPL(of_irq_find_parent);
+/*
+ * These interrupt controllers abuse interrupt-map for unspeakable
+ * reasons and rely on the core code to *ignore* it (the drivers do
+ * their own parsing of the property).
+ *
+ * If you think of adding to the list for something *new*, think
+ * again. There is a high chance that you will be sent back to the
+ * drawing board.
+ */
+static const char * const of_irq_imap_abusers[] = {
+ "CBEA,platform-spider-pic",
+ "sti,platform-spider-pic",
+ "realtek,rtl-intc",
+ "fsl,ls1021a-extirq",
+ "fsl,ls1043a-extirq",
+ "fsl,ls1088a-extirq",
+ "renesas,rza1-irqc",
+ NULL,
+};
+
/**
* of_irq_parse_raw - Low level interrupt tree parsing
* @addr: address specifier (start of "reg" property of the device) in be32 format
@@ -159,12 +179,15 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
/*
* Now check if cursor is an interrupt-controller and
* if it is then we are done, unless there is an
- * interrupt-map which takes precedence.
+ * interrupt-map which takes precedence except on one
+ * of these broken platforms that want to parse
+ * interrupt-map themselves for $reason.
*/
bool intc = of_property_read_bool(ipar, "interrupt-controller");
imap = of_get_property(ipar, "interrupt-map", &imaplen);
- if (imap == NULL && intc) {
+ if (intc &&
+ (!imap || of_device_compatible_match(ipar, of_irq_imap_abusers))) {
pr_debug(" -> got it !\n");
return 0;
}
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 93b141110537..7fc5135ffbbf 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -332,8 +332,8 @@ config PCIE_APPLE
If unsure, say Y if you have an Apple Silicon system.
config PCIE_MT7621
- tristate "MediaTek MT7621 PCIe Controller"
- depends on (RALINK && SOC_MT7621) || (MIPS && COMPILE_TEST)
+ bool "MediaTek MT7621 PCIe Controller"
+ depends on SOC_MT7621 || (MIPS && COMPILE_TEST)
select PHY_MT7621_PCI
default SOC_MT7621
help
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index c24dab383654..722dacdd5a17 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
+#include <linux/module.h>
#include "pcie-designware.h"
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index 7b17da2f9b3f..cfe66bf04c1d 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -18,6 +18,7 @@
#include <linux/pm_domain.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <linux/module.h>
#include "pcie-designware.h"
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index c5300d49807a..c3b725afa11f 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -32,7 +32,6 @@
#define PCIE_CORE_DEV_ID_REG 0x0
#define PCIE_CORE_CMD_STATUS_REG 0x4
#define PCIE_CORE_DEV_REV_REG 0x8
-#define PCIE_CORE_EXP_ROM_BAR_REG 0x30
#define PCIE_CORE_PCIEXP_CAP 0xc0
#define PCIE_CORE_ERR_CAPCTL_REG 0x118
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
@@ -774,10 +773,6 @@ advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
*value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
return PCI_BRIDGE_EMUL_HANDLED;
- case PCI_ROM_ADDRESS1:
- *value = advk_readl(pcie, PCIE_CORE_EXP_ROM_BAR_REG);
- return PCI_BRIDGE_EMUL_HANDLED;
-
case PCI_INTERRUPT_LINE: {
/*
* From the whole 32bit register we support reading from HW only
@@ -810,10 +805,6 @@ advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
advk_writel(pcie, new, PCIE_CORE_CMD_STATUS_REG);
break;
- case PCI_ROM_ADDRESS1:
- advk_writel(pcie, new, PCIE_CORE_EXP_ROM_BAR_REG);
- break;
-
case PCI_INTERRUPT_LINE:
if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG);
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index 1bf4d75b61be..b090924b41fe 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -516,7 +516,7 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
int ret, i;
reset = gpiod_get_from_of_node(np, "reset-gpios", 0,
- GPIOD_OUT_LOW, "#PERST");
+ GPIOD_OUT_LOW, "PERST#");
if (IS_ERR(reset))
return PTR_ERR(reset);
@@ -539,12 +539,22 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
rmw_set(PORT_APPCLK_EN, port->base + PORT_APPCLK);
+ /* Assert PERST# before setting up the clock */
+ gpiod_set_value(reset, 1);
+
ret = apple_pcie_setup_refclk(pcie, port);
if (ret < 0)
return ret;
+ /* The minimal Tperst-clk value is 100us (PCIe CEM r5.0, 2.9.2) */
+ usleep_range(100, 200);
+
+ /* Deassert PERST# */
rmw_set(PORT_PERST_OFF, port->base + PORT_PERST);
- gpiod_set_value(reset, 1);
+ gpiod_set_value(reset, 0);
+
+ /* Wait for 100ms after PERST# deassertion (PCIe r5.0, 6.6.1) */
+ msleep(100);
ret = readl_relaxed_poll_timeout(port->base + PORT_STATUS, stat,
stat & PORT_STATUS_READY, 100, 250000);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 48e3f4e47b29..d84cf30bb279 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -722,9 +722,6 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
goto out_disable;
}
- /* Ensure that all table entries are masked. */
- msix_mask_all(base, tsize);
-
ret = msix_setup_entries(dev, base, entries, nvec, affd);
if (ret)
goto out_disable;
@@ -751,6 +748,16 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
/* Set MSI-X enabled bits and unmask the function */
pci_intx_for_msi(dev, 0);
dev->msix_enabled = 1;
+
+ /*
+ * Ensure that all table entries are masked to prevent
+ * stale entries from firing in a crash kernel.
+ *
+ * Done late to deal with a broken Marvell NVME device
+ * which takes the MSI-X mask bits into account even
+ * when MSI-X is disabled, which prevents MSI delivery.
+ */
+ msix_mask_all(base, tsize);
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
pcibios_free_irq(dev);
@@ -777,7 +784,7 @@ out_free:
free_msi_irqs(dev);
out_disable:
- pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE, 0);
return ret;
}
diff --git a/drivers/phy/hisilicon/phy-hi3670-pcie.c b/drivers/phy/hisilicon/phy-hi3670-pcie.c
index c64c6679b1b9..0ac9634b398d 100644
--- a/drivers/phy/hisilicon/phy-hi3670-pcie.c
+++ b/drivers/phy/hisilicon/phy-hi3670-pcie.c
@@ -757,8 +757,8 @@ static int hi3670_pcie_phy_get_resources(struct hi3670_pcie_phy *phy,
return PTR_ERR(phy->sysctrl);
phy->pmctrl = syscon_regmap_lookup_by_compatible("hisilicon,hi3670-pmctrl");
- if (IS_ERR(phy->sysctrl))
- return PTR_ERR(phy->sysctrl);
+ if (IS_ERR(phy->pmctrl))
+ return PTR_ERR(phy->pmctrl);
/* clocks */
phy->phy_ref_clk = devm_clk_get(dev, "phy_ref");
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-utmi.c b/drivers/phy/marvell/phy-mvebu-cp110-utmi.c
index 08d178a4dc13..aa27c7994610 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-utmi.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-utmi.c
@@ -82,9 +82,9 @@
* struct mvebu_cp110_utmi - PHY driver data
*
* @regs: PHY registers
- * @syscom: Regmap with system controller registers
+ * @syscon: Regmap with system controller registers
* @dev: device driver handle
- * @caps: PHY capabilities
+ * @ops: phy ops
*/
struct mvebu_cp110_utmi {
void __iomem *regs;
diff --git a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
index bfff0c8c9130..fec1da470d26 100644
--- a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
@@ -127,12 +127,13 @@ struct phy_drvdata {
};
/**
- * Write register and read back masked value to confirm it is written
+ * usb_phy_write_readback() - Write register and read back masked value to
+ * confirm it is written
*
- * @base - QCOM DWC3 PHY base virtual address.
- * @offset - register offset.
- * @mask - register bitmask specifying what should be updated
- * @val - value to write.
+ * @phy_dwc3: QCOM DWC3 phy context
+ * @offset: register offset.
+ * @mask: register bitmask specifying what should be updated
+ * @val: value to write.
*/
static inline void usb_phy_write_readback(struct usb_phy *phy_dwc3,
u32 offset,
@@ -171,11 +172,11 @@ static int wait_for_latch(void __iomem *addr)
}
/**
- * Write SSPHY register
+ * usb_ss_write_phycreg() - Write SSPHY register
*
- * @base - QCOM DWC3 PHY base virtual address.
- * @addr - SSPHY address to write.
- * @val - value to write.
+ * @phy_dwc3: QCOM DWC3 phy context
+ * @addr: SSPHY address to write.
+ * @val: value to write.
*/
static int usb_ss_write_phycreg(struct usb_phy *phy_dwc3,
u32 addr, u32 val)
@@ -209,10 +210,11 @@ err_wait:
}
/**
- * Read SSPHY register.
+ * usb_ss_read_phycreg() - Read SSPHY register.
*
- * @base - QCOM DWC3 PHY base virtual address.
- * @addr - SSPHY address to read.
+ * @phy_dwc3: QCOM DWC3 phy context
+ * @addr: SSPHY address to read.
+ * @val: pointer in which read is store.
*/
static int usb_ss_read_phycreg(struct usb_phy *phy_dwc3,
u32 addr, u32 *val)
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 456a59d8c7d0..c96639d5f581 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -2973,6 +2973,9 @@ struct qmp_phy_combo_cfg {
* @qmp: QMP phy to which this lane belongs
* @lane_rst: lane's reset controller
* @mode: current PHY mode
+ * @dp_aux_cfg: Display port aux config
+ * @dp_opts: Display port optional config
+ * @dp_clks: Display port clocks
*/
struct qmp_phy {
struct phy *phy;
diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hsic.c b/drivers/phy/qualcomm/phy-qcom-usb-hsic.c
index 04d18d52f700..716a77748ed8 100644
--- a/drivers/phy/qualcomm/phy-qcom-usb-hsic.c
+++ b/drivers/phy/qualcomm/phy-qcom-usb-hsic.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Copyright (C) 2016 Linaro Ltd
*/
#include <linux/module.h>
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index 7df6a63ad37b..e4f4a9be5132 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -478,7 +478,7 @@ static void stm32_usbphyc_phy_tuning(struct stm32_usbphyc *usbphyc,
if (!of_property_read_bool(np, "st,no-lsfs-fb-cap"))
usbphyc_phy->tune |= LFSCAPEN;
- if (of_property_read_bool(np, "st,slow-hs-slew-rate"))
+ if (of_property_read_bool(np, "st,decrease-hs-slew-rate"))
usbphyc_phy->tune |= HSDRVSLEW;
ret = of_property_read_u32(np, "st,tune-hs-dc-level", &val);
diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c
index 2ff56ce77b30..c1211c4f863c 100644
--- a/drivers/phy/ti/phy-am654-serdes.c
+++ b/drivers/phy/ti/phy-am654-serdes.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* PCIe SERDES driver for AM654x SoC
*
* Copyright (C) 2018 - 2019 Texas Instruments Incorporated - http://www.ti.com/
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index 126f5b8735cc..b3384c31637a 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* Wrapper driver for SERDES used in J721E
*
* Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
index ebceb1520ce8..3a505fe5715a 100644
--- a/drivers/phy/ti/phy-omap-usb2.c
+++ b/drivers/phy/ti/phy-omap-usb2.c
@@ -89,9 +89,9 @@ static inline void omap_usb_writel(void __iomem *addr, unsigned int offset,
}
/**
- * omap_usb2_set_comparator - links the comparator present in the system with
- * this phy
- * @comparator - the companion phy(comparator) for this phy
+ * omap_usb2_set_comparator() - links the comparator present in the system with this phy
+ *
+ * @comparator: the companion phy(comparator) for this phy
*
* The phy companion driver should call this API passing the phy_companion
* filled with set_vbus and start_srp to be used by usb phy.
diff --git a/drivers/phy/ti/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c
index a63213f5972a..15c1c79e5c29 100644
--- a/drivers/phy/ti/phy-tusb1210.c
+++ b/drivers/phy/ti/phy-tusb1210.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* tusb1210.c - TUSB1210 USB ULPI PHY driver
*
* Copyright (C) 2015 Intel Corporation
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 2abcc6ce4eba..b607d10e4cbd 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -1244,6 +1244,18 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
raw_spin_lock_init(&pc->irq_lock[i]);
}
+ pc->pctl_desc = *pdata->pctl_desc;
+ pc->pctl_dev = devm_pinctrl_register(dev, &pc->pctl_desc, pc);
+ if (IS_ERR(pc->pctl_dev)) {
+ gpiochip_remove(&pc->gpio_chip);
+ return PTR_ERR(pc->pctl_dev);
+ }
+
+ pc->gpio_range = *pdata->gpio_range;
+ pc->gpio_range.base = pc->gpio_chip.base;
+ pc->gpio_range.gc = &pc->gpio_chip;
+ pinctrl_add_gpio_range(pc->pctl_dev, &pc->gpio_range);
+
girq = &pc->gpio_chip.irq;
girq->chip = &bcm2835_gpio_irq_chip;
girq->parent_handler = bcm2835_gpio_irq_handler;
@@ -1251,8 +1263,10 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
girq->parents = devm_kcalloc(dev, BCM2835_NUM_IRQS,
sizeof(*girq->parents),
GFP_KERNEL);
- if (!girq->parents)
+ if (!girq->parents) {
+ pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range);
return -ENOMEM;
+ }
if (is_7211) {
pc->wake_irq = devm_kcalloc(dev, BCM2835_NUM_IRQS,
@@ -1307,21 +1321,10 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
err = gpiochip_add_data(&pc->gpio_chip, pc);
if (err) {
dev_err(dev, "could not add GPIO chip\n");
+ pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range);
return err;
}
- pc->pctl_desc = *pdata->pctl_desc;
- pc->pctl_dev = devm_pinctrl_register(dev, &pc->pctl_desc, pc);
- if (IS_ERR(pc->pctl_dev)) {
- gpiochip_remove(&pc->gpio_chip);
- return PTR_ERR(pc->pctl_dev);
- }
-
- pc->gpio_range = *pdata->gpio_range;
- pc->gpio_range.base = pc->gpio_chip.base;
- pc->gpio_range.gc = &pc->gpio_chip;
- pinctrl_add_gpio_range(pc->pctl_dev, &pc->gpio_range);
-
return 0;
}
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index 91553b2fc160..53779822348d 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -285,8 +285,12 @@ static int mtk_xt_get_gpio_n(void *data, unsigned long eint_n,
desc = (const struct mtk_pin_desc *)hw->soc->pins;
*gpio_chip = &hw->chip;
- /* Be greedy to guess first gpio_n is equal to eint_n */
- if (desc[eint_n].eint.eint_n == eint_n)
+ /*
+ * Be greedy to guess first gpio_n is equal to eint_n.
+ * Only eint virtual eint number is greater than gpio number.
+ */
+ if (hw->soc->npins > eint_n &&
+ desc[eint_n].eint.eint_n == eint_n)
*gpio_n = eint_n;
else
*gpio_n = mtk_xt_find_eint_num(hw, eint_n);
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 24764ebcc936..9ed764731570 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -1251,10 +1251,10 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
bank_nr = args.args[1] / STM32_GPIO_PINS_PER_BANK;
bank->gpio_chip.base = args.args[1];
- npins = args.args[2];
- while (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
- ++i, &args))
- npins += args.args[2];
+ /* get the last defined gpio line (offset + nb of pins) */
+ npins = args.args[0] + args.args[2];
+ while (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, ++i, &args))
+ npins = max(npins, (int)(args.args[0] + args.args[2]));
} else {
bank_nr = pctl->nbanks;
bank->gpio_chip.base = bank_nr * STM32_GPIO_PINS_PER_BANK;
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index 04bc3b50aa7a..65b4a819f1bd 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -1374,8 +1374,8 @@ static int mlxbf_pmc_map_counters(struct device *dev)
pmc->block[i].counters = info[2];
pmc->block[i].type = info[3];
- if (IS_ERR(pmc->block[i].mmio_base))
- return PTR_ERR(pmc->block[i].mmio_base);
+ if (!pmc->block[i].mmio_base)
+ return -ENOMEM;
ret = mlxbf_pmc_create_groups(dev, i);
if (ret)
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 219478061683..253a096b5dd8 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -68,7 +68,7 @@ obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
obj-$(CONFIG_THINKPAD_LMI) += think-lmi.o
# Intel
-obj-$(CONFIG_X86_PLATFORM_DRIVERS_INTEL) += intel/
+obj-y += intel/
# MSI
obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index b7e50ed050a8..230593ae5d6d 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -76,7 +76,7 @@
#define AMD_CPU_ID_CZN AMD_CPU_ID_RN
#define AMD_CPU_ID_YC 0x14B5
-#define PMC_MSG_DELAY_MIN_US 100
+#define PMC_MSG_DELAY_MIN_US 50
#define RESPONSE_REGISTER_LOOP_MAX 20000
#define SOC_SUBSYSTEM_IP_MAX 12
@@ -508,7 +508,8 @@ static int __maybe_unused amd_pmc_resume(struct device *dev)
}
static const struct dev_pm_ops amd_pmc_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(amd_pmc_suspend, amd_pmc_resume)
+ .suspend_noirq = amd_pmc_suspend,
+ .resume_noirq = amd_pmc_resume,
};
static const struct pci_device_id pmc_pci_ids[] = {
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 9aae45a45200..57553f9b4d1d 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -625,7 +625,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
}
gmux_data->iostart = res->start;
- gmux_data->iolen = res->end - res->start;
+ gmux_data->iolen = resource_size(res);
if (gmux_data->iolen < GMUX_MIN_IO_LEN) {
pr_err("gmux I/O region too small (%lu < %u)\n",
diff --git a/drivers/platform/x86/intel/Kconfig b/drivers/platform/x86/intel/Kconfig
index 38ce3e344589..40096b25994a 100644
--- a/drivers/platform/x86/intel/Kconfig
+++ b/drivers/platform/x86/intel/Kconfig
@@ -3,19 +3,6 @@
# Intel x86 Platform Specific Drivers
#
-menuconfig X86_PLATFORM_DRIVERS_INTEL
- bool "Intel x86 Platform Specific Device Drivers"
- default y
- help
- Say Y here to get to see options for device drivers for
- various Intel x86 platforms, including vendor-specific
- drivers. This option alone does not add any kernel code.
-
- If you say N, all options in this submenu will be skipped
- and disabled.
-
-if X86_PLATFORM_DRIVERS_INTEL
-
source "drivers/platform/x86/intel/atomisp2/Kconfig"
source "drivers/platform/x86/intel/int1092/Kconfig"
source "drivers/platform/x86/intel/int33fe/Kconfig"
@@ -183,5 +170,3 @@ config INTEL_UNCORE_FREQ_CONTROL
To compile this driver as a module, choose M here: the module
will be called intel-uncore-frequency.
-
-endif # X86_PLATFORM_DRIVERS_INTEL
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index 08598942a6d7..13f8cf70b9ae 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -99,6 +99,13 @@ static const struct dmi_system_id button_array_table[] = {
DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"),
},
},
+ {
+ .ident = "Microsoft Surface Go 3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
+ },
+ },
{ }
};
diff --git a/drivers/platform/x86/intel/pmc/pltdrv.c b/drivers/platform/x86/intel/pmc/pltdrv.c
index 73797680b895..15ca8afdd973 100644
--- a/drivers/platform/x86/intel/pmc/pltdrv.c
+++ b/drivers/platform/x86/intel/pmc/pltdrv.c
@@ -65,7 +65,7 @@ static int __init pmc_core_platform_init(void)
retval = platform_device_register(pmc_core_device);
if (retval)
- kfree(pmc_core_device);
+ platform_device_put(pmc_core_device);
return retval;
}
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index ae9293024c77..a91847a551a7 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -657,6 +657,18 @@ static int acpi_add(struct acpi_device *device)
if (product && strlen(product) > 4)
switch (product[4]) {
case '5':
+ if (strlen(product) > 5)
+ switch (product[5]) {
+ case 'N':
+ year = 2021;
+ break;
+ case '0':
+ year = 2016;
+ break;
+ default:
+ year = 2022;
+ }
+ break;
case '6':
year = 2016;
break;
diff --git a/drivers/platform/x86/system76_acpi.c b/drivers/platform/x86/system76_acpi.c
index 8b292ee95a14..7299ad08c838 100644
--- a/drivers/platform/x86/system76_acpi.c
+++ b/drivers/platform/x86/system76_acpi.c
@@ -35,6 +35,7 @@ struct system76_data {
union acpi_object *nfan;
union acpi_object *ntmp;
struct input_dev *input;
+ bool has_open_ec;
};
static const struct acpi_device_id device_ids[] = {
@@ -279,20 +280,12 @@ static struct acpi_battery_hook system76_battery_hook = {
static void system76_battery_init(void)
{
- acpi_handle handle;
-
- handle = ec_get_handle();
- if (handle && acpi_has_method(handle, "GBCT"))
- battery_hook_register(&system76_battery_hook);
+ battery_hook_register(&system76_battery_hook);
}
static void system76_battery_exit(void)
{
- acpi_handle handle;
-
- handle = ec_get_handle();
- if (handle && acpi_has_method(handle, "GBCT"))
- battery_hook_unregister(&system76_battery_hook);
+ battery_hook_unregister(&system76_battery_hook);
}
// Get the airplane mode LED brightness
@@ -673,6 +666,10 @@ static int system76_add(struct acpi_device *acpi_dev)
acpi_dev->driver_data = data;
data->acpi_dev = acpi_dev;
+ // Some models do not run open EC firmware. Check for an ACPI method
+ // that only exists on open EC to guard functionality specific to it.
+ data->has_open_ec = acpi_has_method(acpi_device_handle(data->acpi_dev), "NFAN");
+
err = system76_get(data, "INIT");
if (err)
return err;
@@ -718,27 +715,31 @@ static int system76_add(struct acpi_device *acpi_dev)
if (err)
goto error;
- err = system76_get_object(data, "NFAN", &data->nfan);
- if (err)
- goto error;
+ if (data->has_open_ec) {
+ err = system76_get_object(data, "NFAN", &data->nfan);
+ if (err)
+ goto error;
- err = system76_get_object(data, "NTMP", &data->ntmp);
- if (err)
- goto error;
+ err = system76_get_object(data, "NTMP", &data->ntmp);
+ if (err)
+ goto error;
- data->therm = devm_hwmon_device_register_with_info(&acpi_dev->dev,
- "system76_acpi", data, &thermal_chip_info, NULL);
- err = PTR_ERR_OR_ZERO(data->therm);
- if (err)
- goto error;
+ data->therm = devm_hwmon_device_register_with_info(&acpi_dev->dev,
+ "system76_acpi", data, &thermal_chip_info, NULL);
+ err = PTR_ERR_OR_ZERO(data->therm);
+ if (err)
+ goto error;
- system76_battery_init();
+ system76_battery_init();
+ }
return 0;
error:
- kfree(data->ntmp);
- kfree(data->nfan);
+ if (data->has_open_ec) {
+ kfree(data->ntmp);
+ kfree(data->nfan);
+ }
return err;
}
@@ -749,14 +750,15 @@ static int system76_remove(struct acpi_device *acpi_dev)
data = acpi_driver_data(acpi_dev);
- system76_battery_exit();
+ if (data->has_open_ec) {
+ system76_battery_exit();
+ kfree(data->nfan);
+ kfree(data->ntmp);
+ }
devm_led_classdev_unregister(&acpi_dev->dev, &data->ap_led);
devm_led_classdev_unregister(&acpi_dev->dev, &data->kb_led);
- kfree(data->nfan);
- kfree(data->ntmp);
-
system76_get(data, "FINI");
return 0;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index b3ac9c3f3b7c..bb1abb947e1e 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -3015,6 +3015,8 @@ static struct attribute *hotkey_attributes[] = {
&dev_attr_hotkey_all_mask.attr,
&dev_attr_hotkey_adaptive_all_mask.attr,
&dev_attr_hotkey_recommended_mask.attr,
+ &dev_attr_hotkey_tablet_mode.attr,
+ &dev_attr_hotkey_radio_sw.attr,
#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
&dev_attr_hotkey_source_mask.attr,
&dev_attr_hotkey_poll_freq.attr,
@@ -5726,11 +5728,11 @@ static const char * const tpacpi_led_names[TPACPI_LED_NUMLEDS] = {
"tpacpi::standby",
"tpacpi::dock_status1",
"tpacpi::dock_status2",
- "tpacpi::unknown_led2",
+ "tpacpi::lid_logo_dot",
"tpacpi::unknown_led3",
"tpacpi::thinkvantage",
};
-#define TPACPI_SAFE_LEDS 0x1081U
+#define TPACPI_SAFE_LEDS 0x1481U
static inline bool tpacpi_is_led_restricted(const unsigned int led)
{
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index fa8812039b82..17dd54d4b783 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -905,6 +905,16 @@ static const struct ts_dmi_data trekstor_primetab_t13b_data = {
.properties = trekstor_primetab_t13b_props,
};
+static const struct property_entry trekstor_surftab_duo_w1_props[] = {
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ { }
+};
+
+static const struct ts_dmi_data trekstor_surftab_duo_w1_data = {
+ .acpi_name = "GDIX1001:00",
+ .properties = trekstor_surftab_duo_w1_props,
+};
+
static const struct property_entry trekstor_surftab_twin_10_1_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 20),
PROPERTY_ENTRY_U32("touchscreen-min-y", 0),
@@ -1503,6 +1513,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* TrekStor SurfTab duo W1 10.1 ST10432-10b */
+ .driver_data = (void *)&trekstor_surftab_duo_w1_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab duo W1 10.1 (VT4)"),
+ },
+ },
+ {
/* TrekStor SurfTab twin 10.1 ST10432-8 */
.driver_data = (void *)&trekstor_surftab_twin_10_1_data,
.matches = {
diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c
index 4c5bba52b105..24d3395964cc 100644
--- a/drivers/reset/tegra/reset-bpmp.c
+++ b/drivers/reset/tegra/reset-bpmp.c
@@ -20,7 +20,6 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
struct mrq_reset_request request;
struct tegra_bpmp_message msg;
- int err;
memset(&request, 0, sizeof(request));
request.cmd = command;
@@ -31,13 +30,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
msg.tx.data = &request;
msg.tx.size = sizeof(request);
- err = tegra_bpmp_transfer(bpmp, &msg);
- if (err)
- return err;
- if (msg.rx.ret)
- return -EINVAL;
-
- return 0;
+ return tegra_bpmp_transfer(bpmp, &msg);
}
static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 284b939fb1ea..059dae8909ee 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -3100,6 +3100,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session;
+ char *tmp_persistent_address = conn->persistent_address;
+ char *tmp_local_ipaddr = conn->local_ipaddr;
del_timer_sync(&conn->transport_timer);
@@ -3121,8 +3123,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
spin_lock_bh(&session->frwd_lock);
free_pages((unsigned long) conn->data,
get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
- kfree(conn->persistent_address);
- kfree(conn->local_ipaddr);
/* regular RX path uses back_lock */
spin_lock_bh(&session->back_lock);
kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,
@@ -3134,6 +3134,8 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
mutex_unlock(&session->eh_mutex);
iscsi_destroy_conn(cls_conn);
+ kfree(tmp_persistent_address);
+ kfree(tmp_local_ipaddr);
}
EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index bd6d459afce5..08b2e85dcd7d 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -2954,8 +2954,8 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
char mybuf[64];
char *pbuf;
- if (nbytes > 64)
- nbytes = 64;
+ if (nbytes > 63)
+ nbytes = 63;
memset(mybuf, 0, sizeof(mybuf));
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index bed8cc125544..fbfeb0b046dd 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -282,12 +282,12 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
if (rc) {
pm8001_dbg(pm8001_ha, FAIL,
"pm8001_setup_irq failed [ret: %d]\n", rc);
- goto err_out_shost;
+ goto err_out;
}
/* Request Interrupt */
rc = pm8001_request_irq(pm8001_ha);
if (rc)
- goto err_out_shost;
+ goto err_out;
count = pm8001_ha->max_q_num;
/* Queues are chosen based on the number of cores/msix availability */
@@ -423,8 +423,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
pm8001_tag_init(pm8001_ha);
return 0;
-err_out_shost:
- scsi_remove_host(pm8001_ha->shost);
err_out_nodev:
for (i = 0; i < pm8001_ha->max_memcnt; i++) {
if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index b9f6d83ff380..2101fc5761c3 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -3053,7 +3053,6 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
struct smp_completion_resp *psmpPayload;
struct task_status_struct *ts;
struct pm8001_device *pm8001_dev;
- char *pdma_respaddr = NULL;
psmpPayload = (struct smp_completion_resp *)(piomb + 4);
status = le32_to_cpu(psmpPayload->status);
@@ -3080,19 +3079,23 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
if (pm8001_dev)
atomic_dec(&pm8001_dev->running_req);
if (pm8001_ha->smp_exp_mode == SMP_DIRECT) {
+ struct scatterlist *sg_resp = &t->smp_task.smp_resp;
+ u8 *payload;
+ void *to;
+
pm8001_dbg(pm8001_ha, IO,
"DIRECT RESPONSE Length:%d\n",
param);
- pdma_respaddr = (char *)(phys_to_virt(cpu_to_le64
- ((u64)sg_dma_address
- (&t->smp_task.smp_resp))));
+ to = kmap_atomic(sg_page(sg_resp));
+ payload = to + sg_resp->offset;
for (i = 0; i < param; i++) {
- *(pdma_respaddr+i) = psmpPayload->_r_a[i];
+ *(payload + i) = psmpPayload->_r_a[i];
pm8001_dbg(pm8001_ha, IO,
"SMP Byte%d DMA data 0x%x psmp 0x%x\n",
- i, *(pdma_respaddr + i),
+ i, *(payload + i),
psmpPayload->_r_a[i]);
}
+ kunmap_atomic(to);
}
break;
case IO_ABORTED:
@@ -4236,14 +4239,14 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
struct sas_task *task = ccb->task;
struct domain_device *dev = task->dev;
struct pm8001_device *pm8001_dev = dev->lldd_dev;
- struct scatterlist *sg_req, *sg_resp;
+ struct scatterlist *sg_req, *sg_resp, *smp_req;
u32 req_len, resp_len;
struct smp_req smp_cmd;
u32 opc;
struct inbound_queue_table *circularQ;
- char *preq_dma_addr = NULL;
- __le64 tmp_addr;
u32 i, length;
+ u8 *payload;
+ u8 *to;
memset(&smp_cmd, 0, sizeof(smp_cmd));
/*
@@ -4280,8 +4283,9 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
pm8001_ha->smp_exp_mode = SMP_INDIRECT;
- tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));
- preq_dma_addr = (char *)phys_to_virt(tmp_addr);
+ smp_req = &task->smp_task.smp_req;
+ to = kmap_atomic(sg_page(smp_req));
+ payload = to + smp_req->offset;
/* INDIRECT MODE command settings. Use DMA */
if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) {
@@ -4289,7 +4293,7 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
/* for SPCv indirect mode. Place the top 4 bytes of
* SMP Request header here. */
for (i = 0; i < 4; i++)
- smp_cmd.smp_req16[i] = *(preq_dma_addr + i);
+ smp_cmd.smp_req16[i] = *(payload + i);
/* exclude top 4 bytes for SMP req header */
smp_cmd.long_smp_req.long_req_addr =
cpu_to_le64((u64)sg_dma_address
@@ -4320,20 +4324,20 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
pm8001_dbg(pm8001_ha, IO, "SMP REQUEST DIRECT MODE\n");
for (i = 0; i < length; i++)
if (i < 16) {
- smp_cmd.smp_req16[i] = *(preq_dma_addr+i);
+ smp_cmd.smp_req16[i] = *(payload + i);
pm8001_dbg(pm8001_ha, IO,
"Byte[%d]:%x (DMA data:%x)\n",
i, smp_cmd.smp_req16[i],
- *(preq_dma_addr));
+ *(payload));
} else {
- smp_cmd.smp_req[i] = *(preq_dma_addr+i);
+ smp_cmd.smp_req[i] = *(payload + i);
pm8001_dbg(pm8001_ha, IO,
"Byte[%d]:%x (DMA data:%x)\n",
i, smp_cmd.smp_req[i],
- *(preq_dma_addr));
+ *(payload));
}
}
-
+ kunmap_atomic(to);
build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
&smp_cmd, pm8001_ha->smp_exp_mode, length);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &smp_cmd,
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 84a4204a2cb4..5916ed7662d5 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -732,7 +732,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
{
struct qedi_work_map *work, *work_tmp;
u32 proto_itt = cqe->itid;
- itt_t protoitt = 0;
int found = 0;
struct qedi_cmd *qedi_cmd = NULL;
u32 iscsi_cid;
@@ -812,16 +811,12 @@ unlock:
return;
check_cleanup_reqs:
- if (qedi_conn->cmd_cleanup_req > 0) {
- QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
+ if (atomic_inc_return(&qedi_conn->cmd_cleanup_cmpl) ==
+ qedi_conn->cmd_cleanup_req) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
"Freeing tid=0x%x for cid=0x%x\n",
cqe->itid, qedi_conn->iscsi_conn_id);
- qedi_conn->cmd_cleanup_cmpl++;
wake_up(&qedi_conn->wait_queue);
- } else {
- QEDI_ERR(&qedi->dbg_ctx,
- "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x\n",
- protoitt, cqe->itid, qedi_conn->iscsi_conn_id);
}
}
@@ -1163,7 +1158,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
}
qedi_conn->cmd_cleanup_req = 0;
- qedi_conn->cmd_cleanup_cmpl = 0;
+ atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
"active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n",
@@ -1215,16 +1210,15 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
qedi_conn->iscsi_conn_id);
rval = wait_event_interruptible_timeout(qedi_conn->wait_queue,
- ((qedi_conn->cmd_cleanup_req ==
- qedi_conn->cmd_cleanup_cmpl) ||
- test_bit(QEDI_IN_RECOVERY,
- &qedi->flags)),
- 5 * HZ);
+ (qedi_conn->cmd_cleanup_req ==
+ atomic_read(&qedi_conn->cmd_cleanup_cmpl)) ||
+ test_bit(QEDI_IN_RECOVERY, &qedi->flags),
+ 5 * HZ);
if (rval) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
"i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
qedi_conn->cmd_cleanup_req,
- qedi_conn->cmd_cleanup_cmpl,
+ atomic_read(&qedi_conn->cmd_cleanup_cmpl),
qedi_conn->iscsi_conn_id);
return 0;
@@ -1233,7 +1227,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
"i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n",
qedi_conn->cmd_cleanup_req,
- qedi_conn->cmd_cleanup_cmpl,
+ atomic_read(&qedi_conn->cmd_cleanup_cmpl),
qedi_conn->iscsi_conn_id);
iscsi_host_for_each_session(qedi->shost,
@@ -1242,11 +1236,10 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
/* Enable IOs for all other sessions except current.*/
if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
- (qedi_conn->cmd_cleanup_req ==
- qedi_conn->cmd_cleanup_cmpl) ||
- test_bit(QEDI_IN_RECOVERY,
- &qedi->flags),
- 5 * HZ)) {
+ (qedi_conn->cmd_cleanup_req ==
+ atomic_read(&qedi_conn->cmd_cleanup_cmpl)) ||
+ test_bit(QEDI_IN_RECOVERY, &qedi->flags),
+ 5 * HZ)) {
iscsi_host_for_each_session(qedi->shost,
qedi_mark_device_available);
return -1;
@@ -1266,7 +1259,7 @@ void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
qedi_ep = qedi_conn->ep;
qedi_conn->cmd_cleanup_req = 0;
- qedi_conn->cmd_cleanup_cmpl = 0;
+ atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
if (!qedi_ep) {
QEDI_WARN(&qedi->dbg_ctx,
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 88aa7d8b11c9..282ecb4e39bb 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -412,7 +412,7 @@ static int qedi_conn_bind(struct iscsi_cls_session *cls_session,
qedi_conn->iscsi_conn_id = qedi_ep->iscsi_cid;
qedi_conn->fw_cid = qedi_ep->fw_cid;
qedi_conn->cmd_cleanup_req = 0;
- qedi_conn->cmd_cleanup_cmpl = 0;
+ atomic_set(&qedi_conn->cmd_cleanup_cmpl, 0);
if (qedi_bind_conn_to_iscsi_cid(qedi, qedi_conn)) {
rc = -EINVAL;
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index a282860da0aa..9b9f2e44fdde 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -155,7 +155,7 @@ struct qedi_conn {
spinlock_t list_lock; /* internal conn lock */
u32 active_cmd_count;
u32 cmd_cleanup_req;
- u32 cmd_cleanup_cmpl;
+ atomic_t cmd_cleanup_cmpl;
u32 iscsi_conn_id;
int itt;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 25549a8a2d72..7cf1f78cbaee 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -2491,6 +2491,9 @@ ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
struct va_format vaf;
char pbuf[64];
+ if (!ql_mask_match(level) && !trace_ql_dbg_log_enabled())
+ return;
+
va_start(va, fmt);
vaf.fmt = fmt;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 3c0da3770edf..2104973a35cd 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -4342,7 +4342,7 @@ static int resp_report_zones(struct scsi_cmnd *scp,
rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
max_zones);
- arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
+ arr = kzalloc(alloc_len, GFP_ATOMIC);
if (!arr) {
mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
INSUFF_RES_ASCQ);
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index c2ba65224633..1f037b8ab904 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -586,9 +586,12 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
* Commands like INQUIRY may transfer less data than
* requested by the initiator via bufflen. Set residual
* count to make upper layer aware of the actual amount
- * of data returned.
+ * of data returned. There are cases when controller
+ * returns zero dataLen with non zero data - do not set
+ * residual count in that case.
*/
- scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
+ if (e->dataLen && (e->dataLen < scsi_bufflen(cmd)))
+ scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
cmd->result = (DID_OK << 16);
break;
diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c
index 519b3651d1d9..c2f076b56e24 100644
--- a/drivers/soc/imx/imx8m-blk-ctrl.c
+++ b/drivers/soc/imx/imx8m-blk-ctrl.c
@@ -17,6 +17,7 @@
#define BLK_SFT_RSTN 0x0
#define BLK_CLK_EN 0x4
+#define BLK_MIPI_RESET_DIV 0x8 /* Mini/Nano DISPLAY_BLK_CTRL only */
struct imx8m_blk_ctrl_domain;
@@ -36,6 +37,15 @@ struct imx8m_blk_ctrl_domain_data {
const char *gpc_name;
u32 rst_mask;
u32 clk_mask;
+
+ /*
+ * i.MX8M Mini and Nano have a third DISPLAY_BLK_CTRL register
+ * which is used to control the reset for the MIPI Phy.
+ * Since it's only present in certain circumstances,
+ * an if-statement should be used before setting and clearing this
+ * register.
+ */
+ u32 mipi_phy_rst_mask;
};
#define DOMAIN_MAX_CLKS 3
@@ -78,6 +88,8 @@ static int imx8m_blk_ctrl_power_on(struct generic_pm_domain *genpd)
/* put devices into reset */
regmap_clear_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
+ if (data->mipi_phy_rst_mask)
+ regmap_clear_bits(bc->regmap, BLK_MIPI_RESET_DIV, data->mipi_phy_rst_mask);
/* enable upstream and blk-ctrl clocks to allow reset to propagate */
ret = clk_bulk_prepare_enable(data->num_clks, domain->clks);
@@ -99,6 +111,8 @@ static int imx8m_blk_ctrl_power_on(struct generic_pm_domain *genpd)
/* release reset */
regmap_set_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
+ if (data->mipi_phy_rst_mask)
+ regmap_set_bits(bc->regmap, BLK_MIPI_RESET_DIV, data->mipi_phy_rst_mask);
/* disable upstream clocks */
clk_bulk_disable_unprepare(data->num_clks, domain->clks);
@@ -120,6 +134,9 @@ static int imx8m_blk_ctrl_power_off(struct generic_pm_domain *genpd)
struct imx8m_blk_ctrl *bc = domain->bc;
/* put devices into reset and disable clocks */
+ if (data->mipi_phy_rst_mask)
+ regmap_clear_bits(bc->regmap, BLK_MIPI_RESET_DIV, data->mipi_phy_rst_mask);
+
regmap_clear_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
regmap_clear_bits(bc->regmap, BLK_CLK_EN, data->clk_mask);
@@ -480,6 +497,7 @@ static const struct imx8m_blk_ctrl_domain_data imx8mm_disp_blk_ctl_domain_data[]
.gpc_name = "mipi-dsi",
.rst_mask = BIT(5),
.clk_mask = BIT(8) | BIT(9),
+ .mipi_phy_rst_mask = BIT(17),
},
[IMX8MM_DISPBLK_PD_MIPI_CSI] = {
.name = "dispblk-mipi-csi",
@@ -488,6 +506,7 @@ static const struct imx8m_blk_ctrl_domain_data imx8mm_disp_blk_ctl_domain_data[]
.gpc_name = "mipi-csi",
.rst_mask = BIT(3) | BIT(4),
.clk_mask = BIT(10) | BIT(11),
+ .mipi_phy_rst_mask = BIT(16),
},
};
diff --git a/drivers/soc/imx/soc-imx.c b/drivers/soc/imx/soc-imx.c
index ac6d856ba228..77bc12039c3d 100644
--- a/drivers/soc/imx/soc-imx.c
+++ b/drivers/soc/imx/soc-imx.c
@@ -36,6 +36,10 @@ static int __init imx_soc_device_init(void)
int ret;
int i;
+ /* Return early if this is running on devices with different SoCs */
+ if (!__mxc_cpu_type)
+ return 0;
+
if (of_machine_is_compatible("fsl,ls1021a"))
return 0;
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index f2151815db58..e714ed3b61bc 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -320,7 +320,7 @@ static struct platform_driver tegra_fuse_driver = {
};
builtin_platform_driver(tegra_fuse_driver);
-bool __init tegra_fuse_read_spare(unsigned int spare)
+u32 __init tegra_fuse_read_spare(unsigned int spare)
{
unsigned int offset = fuse->soc->info->spare + spare * 4;
diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h
index de58feba0435..ecff0c08e959 100644
--- a/drivers/soc/tegra/fuse/fuse.h
+++ b/drivers/soc/tegra/fuse/fuse.h
@@ -65,7 +65,7 @@ struct tegra_fuse {
void tegra_init_revision(void);
void tegra_init_apbmisc(void);
-bool __init tegra_fuse_read_spare(unsigned int spare);
+u32 __init tegra_fuse_read_spare(unsigned int spare);
u32 __init tegra_fuse_read_early(unsigned int offset);
u8 tegra_get_major_rev(void);
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index 46feafe4e201..d8cc4b270644 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -901,7 +901,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
return 0;
error_clk:
- clk_disable_unprepare(spi->clk);
+ clk_unprepare(spi->clk);
error:
spi_master_put(master);
out:
diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
index da6b88e80dc0..297dc62bca29 100644
--- a/drivers/tee/amdtee/core.c
+++ b/drivers/tee/amdtee/core.c
@@ -203,9 +203,8 @@ static int copy_ta_binary(struct tee_context *ctx, void *ptr, void **ta,
*ta_size = roundup(fw->size, PAGE_SIZE);
*ta = (void *)__get_free_pages(GFP_KERNEL, get_order(*ta_size));
- if (IS_ERR(*ta)) {
- pr_err("%s: get_free_pages failed 0x%llx\n", __func__,
- (u64)*ta);
+ if (!*ta) {
+ pr_err("%s: get_free_pages failed\n", __func__);
rc = -ENOMEM;
goto rel_fw;
}
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index ab2edfcc6c70..2a66a5203d2f 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -48,10 +48,8 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm,
goto err;
}
- for (i = 0; i < nr_pages; i++) {
- pages[i] = page;
- page++;
- }
+ for (i = 0; i < nr_pages; i++)
+ pages[i] = page + i;
shm->flags |= TEE_SHM_REGISTER;
rc = shm_register(shm->ctx, shm, pages, nr_pages,
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index 6196d7c3888f..cf2e3293567d 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -23,6 +23,7 @@
#include "optee_private.h"
#include "optee_smc.h"
#include "optee_rpc_cmd.h"
+#include <linux/kmemleak.h>
#define CREATE_TRACE_POINTS
#include "optee_trace.h"
@@ -783,6 +784,7 @@ static void optee_handle_rpc(struct tee_context *ctx,
param->a4 = 0;
param->a5 = 0;
}
+ kmemleak_not_leak(shm);
break;
case OPTEE_SMC_RPC_FUNC_FREE:
shm = reg_pair_to_ptr(param->a1, param->a2);
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 8a8deb95e918..499fccba3d74 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -1,20 +1,17 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015-2016, Linaro Limited
+ * Copyright (c) 2015-2017, 2019-2021 Linaro Limited
*/
+#include <linux/anon_inodes.h>
#include <linux/device.h>
-#include <linux/dma-buf.h>
-#include <linux/fdtable.h>
#include <linux/idr.h>
+#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include <linux/uio.h>
-#include <linux/module.h>
#include "tee_private.h"
-MODULE_IMPORT_NS(DMA_BUF);
-
static void release_registered_pages(struct tee_shm *shm)
{
if (shm->pages) {
@@ -31,16 +28,8 @@ static void release_registered_pages(struct tee_shm *shm)
}
}
-static void tee_shm_release(struct tee_shm *shm)
+static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
{
- struct tee_device *teedev = shm->ctx->teedev;
-
- if (shm->flags & TEE_SHM_DMA_BUF) {
- mutex_lock(&teedev->mutex);
- idr_remove(&teedev->idr, shm->id);
- mutex_unlock(&teedev->mutex);
- }
-
if (shm->flags & TEE_SHM_POOL) {
struct tee_shm_pool_mgr *poolm;
@@ -67,45 +56,6 @@ static void tee_shm_release(struct tee_shm *shm)
tee_device_put(teedev);
}
-static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment
- *attach, enum dma_data_direction dir)
-{
- return NULL;
-}
-
-static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach,
- struct sg_table *table,
- enum dma_data_direction dir)
-{
-}
-
-static void tee_shm_op_release(struct dma_buf *dmabuf)
-{
- struct tee_shm *shm = dmabuf->priv;
-
- tee_shm_release(shm);
-}
-
-static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
-{
- struct tee_shm *shm = dmabuf->priv;
- size_t size = vma->vm_end - vma->vm_start;
-
- /* Refuse sharing shared memory provided by application */
- if (shm->flags & TEE_SHM_USER_MAPPED)
- return -EINVAL;
-
- return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
- size, vma->vm_page_prot);
-}
-
-static const struct dma_buf_ops tee_shm_dma_buf_ops = {
- .map_dma_buf = tee_shm_op_map_dma_buf,
- .unmap_dma_buf = tee_shm_op_unmap_dma_buf,
- .release = tee_shm_op_release,
- .mmap = tee_shm_op_mmap,
-};
-
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
{
struct tee_device *teedev = ctx->teedev;
@@ -140,6 +90,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
goto err_dev_put;
}
+ refcount_set(&shm->refcount, 1);
shm->flags = flags | TEE_SHM_POOL;
shm->ctx = ctx;
if (flags & TEE_SHM_DMA_BUF)
@@ -153,10 +104,7 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
goto err_kfree;
}
-
if (flags & TEE_SHM_DMA_BUF) {
- DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
mutex_lock(&teedev->mutex);
shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
mutex_unlock(&teedev->mutex);
@@ -164,28 +112,11 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
ret = ERR_PTR(shm->id);
goto err_pool_free;
}
-
- exp_info.ops = &tee_shm_dma_buf_ops;
- exp_info.size = shm->size;
- exp_info.flags = O_RDWR;
- exp_info.priv = shm;
-
- shm->dmabuf = dma_buf_export(&exp_info);
- if (IS_ERR(shm->dmabuf)) {
- ret = ERR_CAST(shm->dmabuf);
- goto err_rem;
- }
}
teedev_ctx_get(ctx);
return shm;
-err_rem:
- if (flags & TEE_SHM_DMA_BUF) {
- mutex_lock(&teedev->mutex);
- idr_remove(&teedev->idr, shm->id);
- mutex_unlock(&teedev->mutex);
- }
err_pool_free:
poolm->ops->free(poolm, shm);
err_kfree:
@@ -246,6 +177,7 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
goto err;
}
+ refcount_set(&shm->refcount, 1);
shm->flags = flags | TEE_SHM_REGISTER;
shm->ctx = ctx;
shm->id = -1;
@@ -306,22 +238,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
goto err;
}
- if (flags & TEE_SHM_DMA_BUF) {
- DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
- exp_info.ops = &tee_shm_dma_buf_ops;
- exp_info.size = shm->size;
- exp_info.flags = O_RDWR;
- exp_info.priv = shm;
-
- shm->dmabuf = dma_buf_export(&exp_info);
- if (IS_ERR(shm->dmabuf)) {
- ret = ERR_CAST(shm->dmabuf);
- teedev->desc->ops->shm_unregister(ctx, shm);
- goto err;
- }
- }
-
return shm;
err:
if (shm) {
@@ -339,6 +255,35 @@ err:
}
EXPORT_SYMBOL_GPL(tee_shm_register);
+static int tee_shm_fop_release(struct inode *inode, struct file *filp)
+{
+ tee_shm_put(filp->private_data);
+ return 0;
+}
+
+static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct tee_shm *shm = filp->private_data;
+ size_t size = vma->vm_end - vma->vm_start;
+
+ /* Refuse sharing shared memory provided by application */
+ if (shm->flags & TEE_SHM_USER_MAPPED)
+ return -EINVAL;
+
+ /* check for overflowing the buffer's size */
+ if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)
+ return -EINVAL;
+
+ return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+}
+
+static const struct file_operations tee_shm_fops = {
+ .owner = THIS_MODULE,
+ .release = tee_shm_fop_release,
+ .mmap = tee_shm_fop_mmap,
+};
+
/**
* tee_shm_get_fd() - Increase reference count and return file descriptor
* @shm: Shared memory handle
@@ -351,10 +296,11 @@ int tee_shm_get_fd(struct tee_shm *shm)
if (!(shm->flags & TEE_SHM_DMA_BUF))
return -EINVAL;
- get_dma_buf(shm->dmabuf);
- fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
+ /* matched by tee_shm_put() in tee_shm_op_release() */
+ refcount_inc(&shm->refcount);
+ fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR);
if (fd < 0)
- dma_buf_put(shm->dmabuf);
+ tee_shm_put(shm);
return fd;
}
@@ -364,17 +310,7 @@ int tee_shm_get_fd(struct tee_shm *shm)
*/
void tee_shm_free(struct tee_shm *shm)
{
- /*
- * dma_buf_put() decreases the dmabuf reference counter and will
- * call tee_shm_release() when the last reference is gone.
- *
- * In the case of driver private memory we call tee_shm_release
- * directly instead as it doesn't have a reference counter.
- */
- if (shm->flags & TEE_SHM_DMA_BUF)
- dma_buf_put(shm->dmabuf);
- else
- tee_shm_release(shm);
+ tee_shm_put(shm);
}
EXPORT_SYMBOL_GPL(tee_shm_free);
@@ -481,10 +417,15 @@ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
teedev = ctx->teedev;
mutex_lock(&teedev->mutex);
shm = idr_find(&teedev->idr, id);
+ /*
+ * If the tee_shm was found in the IDR it must have a refcount
+ * larger than 0 due to the guarantee in tee_shm_put() below. So
+ * it's safe to use refcount_inc().
+ */
if (!shm || shm->ctx != ctx)
shm = ERR_PTR(-EINVAL);
- else if (shm->flags & TEE_SHM_DMA_BUF)
- get_dma_buf(shm->dmabuf);
+ else
+ refcount_inc(&shm->refcount);
mutex_unlock(&teedev->mutex);
return shm;
}
@@ -496,7 +437,24 @@ EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
*/
void tee_shm_put(struct tee_shm *shm)
{
- if (shm->flags & TEE_SHM_DMA_BUF)
- dma_buf_put(shm->dmabuf);
+ struct tee_device *teedev = shm->ctx->teedev;
+ bool do_release = false;
+
+ mutex_lock(&teedev->mutex);
+ if (refcount_dec_and_test(&shm->refcount)) {
+ /*
+ * refcount has reached 0, we must now remove it from the
+ * IDR before releasing the mutex. This will guarantee that
+ * the refcount_inc() in tee_shm_get_from_id() never starts
+ * from 0.
+ */
+ if (shm->flags & TEE_SHM_DMA_BUF)
+ idr_remove(&teedev->idr, shm->id);
+ do_release = true;
+ }
+ mutex_unlock(&teedev->mutex);
+
+ if (do_release)
+ tee_shm_release(teedev, shm);
}
EXPORT_SYMBOL_GPL(tee_shm_put);
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
index b25b54d4bac1..e693ec8234fb 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
@@ -29,7 +29,7 @@ static const char * const fivr_strings[] = {
};
static const struct mmio_reg tgl_fivr_mmio_regs[] = {
- { 0, 0x5A18, 3, 0x7, 12}, /* vco_ref_code_lo */
+ { 0, 0x5A18, 3, 0x7, 11}, /* vco_ref_code_lo */
{ 0, 0x5A18, 8, 0xFF, 16}, /* vco_ref_code_hi */
{ 0, 0x5A08, 8, 0xFF, 0}, /* spread_spectrum_pct */
{ 0, 0x5A08, 1, 0x1, 8}, /* spread_spectrum_clk_enable */
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 71e0dd2c0ce5..ebaf7500f48f 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -37,6 +37,8 @@ struct xencons_info {
struct xenbus_device *xbdev;
struct xencons_interface *intf;
unsigned int evtchn;
+ XENCONS_RING_IDX out_cons;
+ unsigned int out_cons_same;
struct hvc_struct *hvc;
int irq;
int vtermno;
@@ -138,6 +140,8 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
XENCONS_RING_IDX cons, prod;
int recv = 0;
struct xencons_info *xencons = vtermno_to_xencons(vtermno);
+ unsigned int eoiflag = 0;
+
if (xencons == NULL)
return -EINVAL;
intf = xencons->intf;
@@ -157,7 +161,27 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
mb(); /* read ring before consuming */
intf->in_cons = cons;
- notify_daemon(xencons);
+ /*
+ * When to mark interrupt having been spurious:
+ * - there was no new data to be read, and
+ * - the backend did not consume some output bytes, and
+ * - the previous round with no read data didn't see consumed bytes
+ * (we might have a race with an interrupt being in flight while
+ * updating xencons->out_cons, so account for that by allowing one
+ * round without any visible reason)
+ */
+ if (intf->out_cons != xencons->out_cons) {
+ xencons->out_cons = intf->out_cons;
+ xencons->out_cons_same = 0;
+ }
+ if (recv) {
+ notify_daemon(xencons);
+ } else if (xencons->out_cons_same++ > 1) {
+ eoiflag = XEN_EOI_FLAG_SPURIOUS;
+ }
+
+ xen_irq_lateeoi(xencons->irq, eoiflag);
+
return recv;
}
@@ -386,7 +410,7 @@ static int xencons_connect_backend(struct xenbus_device *dev,
if (ret)
return ret;
info->evtchn = evtchn;
- irq = bind_evtchn_to_irq(evtchn);
+ irq = bind_interdomain_evtchn_to_irq_lateeoi(dev, evtchn);
if (irq < 0)
return irq;
info->irq = irq;
@@ -551,7 +575,7 @@ static int __init xen_hvc_init(void)
return r;
info = vtermno_to_xencons(HVC_COOKIE);
- info->irq = bind_evtchn_to_irq(info->evtchn);
+ info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn);
}
if (info->irq < 0)
info->irq = 0; /* NO_IRQ */
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 7e0884ecc74f..23ba1fc99df8 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -140,6 +140,8 @@ struct n_hdlc {
struct n_hdlc_buf_list rx_buf_list;
struct n_hdlc_buf_list tx_free_buf_list;
struct n_hdlc_buf_list rx_free_buf_list;
+ struct work_struct write_work;
+ struct tty_struct *tty_for_write_work;
};
/*
@@ -154,6 +156,7 @@ static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list);
/* Local functions */
static struct n_hdlc *n_hdlc_alloc(void);
+static void n_hdlc_tty_write_work(struct work_struct *work);
/* max frame size for memory allocations */
static int maxframe = 4096;
@@ -210,6 +213,8 @@ static void n_hdlc_tty_close(struct tty_struct *tty)
wake_up_interruptible(&tty->read_wait);
wake_up_interruptible(&tty->write_wait);
+ cancel_work_sync(&n_hdlc->write_work);
+
n_hdlc_free_buf_list(&n_hdlc->rx_free_buf_list);
n_hdlc_free_buf_list(&n_hdlc->tx_free_buf_list);
n_hdlc_free_buf_list(&n_hdlc->rx_buf_list);
@@ -241,6 +246,8 @@ static int n_hdlc_tty_open(struct tty_struct *tty)
return -ENFILE;
}
+ INIT_WORK(&n_hdlc->write_work, n_hdlc_tty_write_work);
+ n_hdlc->tty_for_write_work = tty;
tty->disc_data = n_hdlc;
tty->receive_room = 65536;
@@ -335,6 +342,20 @@ check_again:
} /* end of n_hdlc_send_frames() */
/**
+ * n_hdlc_tty_write_work - Asynchronous callback for transmit wakeup
+ * @work: pointer to work_struct
+ *
+ * Called when low level device driver can accept more send data.
+ */
+static void n_hdlc_tty_write_work(struct work_struct *work)
+{
+ struct n_hdlc *n_hdlc = container_of(work, struct n_hdlc, write_work);
+ struct tty_struct *tty = n_hdlc->tty_for_write_work;
+
+ n_hdlc_send_frames(n_hdlc, tty);
+} /* end of n_hdlc_tty_write_work() */
+
+/**
* n_hdlc_tty_wakeup - Callback for transmit wakeup
* @tty: pointer to associated tty instance data
*
@@ -344,7 +365,7 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty)
{
struct n_hdlc *n_hdlc = tty->disc_data;
- n_hdlc_send_frames(n_hdlc, tty);
+ schedule_work(&n_hdlc->write_work);
} /* end of n_hdlc_tty_wakeup() */
/**
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 31c9e83ea3cb..251f0018ae8c 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -290,25 +290,6 @@ static void fintek_8250_set_max_fifo(struct fintek_8250 *pdata)
}
}
-static void fintek_8250_goto_highspeed(struct uart_8250_port *uart,
- struct fintek_8250 *pdata)
-{
- sio_write_reg(pdata, LDN, pdata->index);
-
- switch (pdata->pid) {
- case CHIP_ID_F81966:
- case CHIP_ID_F81866: /* set uart clock for high speed serial mode */
- sio_write_mask_reg(pdata, F81866_UART_CLK,
- F81866_UART_CLK_MASK,
- F81866_UART_CLK_14_769MHZ);
-
- uart->port.uartclk = 921600 * 16;
- break;
- default: /* leave clock speed untouched */
- break;
- }
-}
-
static void fintek_8250_set_termios(struct uart_port *port,
struct ktermios *termios,
struct ktermios *old)
@@ -430,7 +411,6 @@ static int probe_setup_port(struct fintek_8250 *pdata,
fintek_8250_set_irq_mode(pdata, level_mode);
fintek_8250_set_max_fifo(pdata);
- fintek_8250_goto_highspeed(uart, pdata);
fintek_8250_exit_key(addr[i]);
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
index 27df0c697897..e85bf768c66d 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.c
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -1541,15 +1541,27 @@ static int cdnsp_gadget_pullup(struct usb_gadget *gadget, int is_on)
{
struct cdnsp_device *pdev = gadget_to_cdnsp(gadget);
struct cdns *cdns = dev_get_drvdata(pdev->dev);
+ unsigned long flags;
trace_cdnsp_pullup(is_on);
+ /*
+ * Disable events handling while controller is being
+ * enabled/disabled.
+ */
+ disable_irq(cdns->dev_irq);
+ spin_lock_irqsave(&pdev->lock, flags);
+
if (!is_on) {
cdnsp_reset_device(pdev);
cdns_clear_vbus(cdns);
} else {
cdns_set_vbus(cdns);
}
+
+ spin_unlock_irqrestore(&pdev->lock, flags);
+ enable_irq(cdns->dev_irq);
+
return 0;
}
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
index 1b1438457fb0..e45c3d6e1536 100644
--- a/drivers/usb/cdns3/cdnsp-ring.c
+++ b/drivers/usb/cdns3/cdnsp-ring.c
@@ -1029,6 +1029,8 @@ static void cdnsp_process_ctrl_td(struct cdnsp_device *pdev,
return;
}
+ *status = 0;
+
cdnsp_finish_td(pdev, td, event, pep, status);
}
@@ -1523,7 +1525,14 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
spin_lock_irqsave(&pdev->lock, flags);
if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
- cdnsp_died(pdev);
+ /*
+ * While removing or stopping driver there may still be deferred
+ * not handled interrupt which should not be treated as error.
+ * Driver should simply ignore it.
+ */
+ if (pdev->gadget_driver)
+ cdnsp_died(pdev);
+
spin_unlock_irqrestore(&pdev->lock, flags);
return IRQ_HANDLED;
}
diff --git a/drivers/usb/cdns3/cdnsp-trace.h b/drivers/usb/cdns3/cdnsp-trace.h
index 6a2571c6aa9e..5983dfb99653 100644
--- a/drivers/usb/cdns3/cdnsp-trace.h
+++ b/drivers/usb/cdns3/cdnsp-trace.h
@@ -57,9 +57,9 @@ DECLARE_EVENT_CLASS(cdnsp_log_ep,
__entry->first_prime_det = pep->stream_info.first_prime_det;
__entry->drbls_count = pep->stream_info.drbls_count;
),
- TP_printk("%s: SID: %08x ep state: %x stream: enabled: %d num %d "
+ TP_printk("%s: SID: %08x, ep state: %x, stream: enabled: %d num %d "
"tds %d, first prime: %d drbls %d",
- __get_str(name), __entry->state, __entry->stream_id,
+ __get_str(name), __entry->stream_id, __entry->state,
__entry->enabled, __entry->num_streams, __entry->td_count,
__entry->first_prime_det, __entry->drbls_count)
);
diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c
index 84dadfa726aa..9643b905e2d8 100644
--- a/drivers/usb/cdns3/host.c
+++ b/drivers/usb/cdns3/host.c
@@ -10,6 +10,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include "core.h"
#include "drd.h"
#include "host-export.h"
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 16b1fd9dc60c..48bc8a4814ac 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -406,7 +406,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
* the USB-2 spec requires such endpoints to have wMaxPacketSize = 0
* (see the end of section 5.6.3), so don't warn about them.
*/
- maxp = usb_endpoint_maxp(&endpoint->desc);
+ maxp = le16_to_cpu(endpoint->desc.wMaxPacketSize);
if (maxp == 0 && !(usb_endpoint_xfer_isoc(d) && asnum == 0)) {
dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid wMaxPacketSize 0\n",
cfgno, inum, asnum, d->bEndpointAddress);
@@ -422,9 +422,9 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
maxpacket_maxes = full_speed_maxpacket_maxes;
break;
case USB_SPEED_HIGH:
- /* Bits 12..11 are allowed only for HS periodic endpoints */
+ /* Multiple-transactions bits are allowed only for HS periodic endpoints */
if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) {
- i = maxp & (BIT(12) | BIT(11));
+ i = maxp & USB_EP_MAXP_MULT_MASK;
maxp &= ~i;
}
fallthrough;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 019351c0b52c..d3c14b5ed4a1 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -434,6 +434,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x1532, 0x0116), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ /* Lenovo USB-C to Ethernet Adapter RTL8153-04 */
+ { USB_DEVICE(0x17ef, 0x720c), .driver_info = USB_QUIRK_NO_LPM },
+
/* Lenovo Powered USB-C Travel Hub (4X90S92381, RTL8153 GigE) */
{ USB_DEVICE(0x17ef, 0x721e), .driver_info = USB_QUIRK_NO_LPM },
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index c8f18f3ba9e3..c331a5128c2c 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -575,6 +575,9 @@ static int dwc2_driver_probe(struct platform_device *dev)
ggpio |= GGPIO_STM32_OTG_GCCFG_IDEN;
ggpio |= GGPIO_STM32_OTG_GCCFG_VBDEN;
dwc2_writel(hsotg, ggpio, GGPIO);
+
+ /* ID/VBUS detection startup time */
+ usleep_range(5000, 7000);
}
retval = dwc2_drd_init(hsotg);
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index 9abbd01028c5..3cb01cdd02c2 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -649,7 +649,6 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node, *dwc3_np;
struct device *dev = &pdev->dev;
- struct property *prop;
int ret;
dwc3_np = of_get_compatible_child(np, "snps,dwc3");
@@ -658,20 +657,6 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
return -ENODEV;
}
- prop = devm_kzalloc(dev, sizeof(*prop), GFP_KERNEL);
- if (!prop) {
- ret = -ENOMEM;
- dev_err(dev, "unable to allocate memory for property\n");
- goto node_put;
- }
-
- prop->name = "tx-fifo-resize";
- ret = of_add_property(dwc3_np, prop);
- if (ret) {
- dev_err(dev, "unable to add property\n");
- goto node_put;
- }
-
ret = of_platform_populate(np, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to register dwc3 core - %d\n", ret);
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index 933d77ad0a64..4502108069cd 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -14,7 +14,6 @@
#include <linux/pci_ids.h>
#include <linux/memblock.h>
#include <linux/io.h>
-#include <linux/iopoll.h>
#include <asm/pci-direct.h>
#include <asm/fixmap.h>
#include <linux/bcd.h>
@@ -136,9 +135,17 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done, int wait, int delay)
{
u32 result;
- return readl_poll_timeout_atomic(ptr, result,
- ((result & mask) == done),
- delay, wait);
+ /* Can not use readl_poll_timeout_atomic() for early boot things */
+ do {
+ result = readl(ptr);
+ result &= mask;
+ if (result == done)
+ return 0;
+ udelay(delay);
+ wait -= delay;
+ } while (wait > 0);
+
+ return -ETIMEDOUT;
}
static void __init xdbc_bios_handoff(void)
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 504c1cbc255d..3789c329183c 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1679,6 +1679,18 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
struct usb_function *f = NULL;
u8 endp;
+ if (w_length > USB_COMP_EP0_BUFSIZ) {
+ if (ctrl->bRequestType & USB_DIR_IN) {
+ /* Cast away the const, we are going to overwrite on purpose. */
+ __le16 *temp = (__le16 *)&ctrl->wLength;
+
+ *temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ);
+ w_length = USB_COMP_EP0_BUFSIZ;
+ } else {
+ goto done;
+ }
+ }
+
/* partial re-init of the response message; the function or the
* gadget might need to intercept e.g. a control-OUT completion
* when we delegate to it.
@@ -2209,7 +2221,7 @@ int composite_dev_prepare(struct usb_composite_driver *composite,
if (!cdev->req)
return -ENOMEM;
- cdev->req->buf = kmalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
+ cdev->req->buf = kzalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
if (!cdev->req->buf)
goto fail;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index e20c19a0f106..a7e069b18544 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1773,11 +1773,15 @@ static void ffs_data_clear(struct ffs_data *ffs)
BUG_ON(ffs->gadget);
- if (ffs->epfiles)
+ if (ffs->epfiles) {
ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
+ ffs->epfiles = NULL;
+ }
- if (ffs->ffs_eventfd)
+ if (ffs->ffs_eventfd) {
eventfd_ctx_put(ffs->ffs_eventfd);
+ ffs->ffs_eventfd = NULL;
+ }
kfree(ffs->raw_descs_data);
kfree(ffs->raw_strings);
@@ -1790,7 +1794,6 @@ static void ffs_data_reset(struct ffs_data *ffs)
ffs_data_clear(ffs);
- ffs->epfiles = NULL;
ffs->raw_descs_data = NULL;
ffs->raw_descs = NULL;
ffs->raw_strings = NULL;
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index e0ad5aed6ac9..6f5d45ef2e39 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -17,6 +17,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
#include "u_ether.h"
@@ -863,19 +864,23 @@ int gether_register_netdev(struct net_device *net)
{
struct eth_dev *dev;
struct usb_gadget *g;
- struct sockaddr sa;
int status;
if (!net->dev.parent)
return -EINVAL;
dev = netdev_priv(net);
g = dev->gadget;
+
+ net->addr_assign_type = NET_ADDR_RANDOM;
+ eth_hw_addr_set(net, dev->dev_mac);
+
status = register_netdev(net);
if (status < 0) {
dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
return status;
} else {
INFO(dev, "HOST MAC %pM\n", dev->host_mac);
+ INFO(dev, "MAC %pM\n", dev->dev_mac);
/* two kinds of host-initiated state changes:
* - iff DATA transfer is active, carrier is "on"
@@ -883,15 +888,6 @@ int gether_register_netdev(struct net_device *net)
*/
netif_carrier_off(net);
}
- sa.sa_family = net->type;
- memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN);
- rtnl_lock();
- status = dev_set_mac_address(net, &sa, NULL);
- rtnl_unlock();
- if (status)
- pr_warn("cannot set self ethernet address: %d\n", status);
- else
- INFO(dev, "MAC %pM\n", dev->dev_mac);
return status;
}
diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c
index e1d566c9918a..6bcbad382580 100644
--- a/drivers/usb/gadget/legacy/dbgp.c
+++ b/drivers/usb/gadget/legacy/dbgp.c
@@ -137,7 +137,7 @@ static int dbgp_enable_ep_req(struct usb_ep *ep)
goto fail_1;
}
- req->buf = kmalloc(DBGP_REQ_LEN, GFP_KERNEL);
+ req->buf = kzalloc(DBGP_REQ_LEN, GFP_KERNEL);
if (!req->buf) {
err = -ENOMEM;
stp = 2;
@@ -345,6 +345,19 @@ static int dbgp_setup(struct usb_gadget *gadget,
void *data = NULL;
u16 len = 0;
+ if (length > DBGP_REQ_LEN) {
+ if (ctrl->bRequestType & USB_DIR_IN) {
+ /* Cast away the const, we are going to overwrite on purpose. */
+ __le16 *temp = (__le16 *)&ctrl->wLength;
+
+ *temp = cpu_to_le16(DBGP_REQ_LEN);
+ length = DBGP_REQ_LEN;
+ } else {
+ return err;
+ }
+ }
+
+
if (request == USB_REQ_GET_DESCRIPTOR) {
switch (value>>8) {
case USB_DT_DEVICE:
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 78be94750232..3b58f4fc0a80 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -110,6 +110,8 @@ enum ep0_state {
/* enough for the whole queue: most events invalidate others */
#define N_EVENT 5
+#define RBUF_SIZE 256
+
struct dev_data {
spinlock_t lock;
refcount_t count;
@@ -144,7 +146,7 @@ struct dev_data {
struct dentry *dentry;
/* except this scratch i/o buffer for ep0 */
- u8 rbuf [256];
+ u8 rbuf[RBUF_SIZE];
};
static inline void get_dev (struct dev_data *data)
@@ -1331,6 +1333,18 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
+ if (w_length > RBUF_SIZE) {
+ if (ctrl->bRequestType & USB_DIR_IN) {
+ /* Cast away the const, we are going to overwrite on purpose. */
+ __le16 *temp = (__le16 *)&ctrl->wLength;
+
+ *temp = cpu_to_le16(RBUF_SIZE);
+ w_length = RBUF_SIZE;
+ } else {
+ return value;
+ }
+ }
+
spin_lock (&dev->lock);
dev->setup_abort = 0;
if (dev->state == STATE_DEV_UNCONNECTED) {
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index af946c42b6f0..df3522dab31b 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -717,6 +717,7 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
continue;
retval = xhci_disable_slot(xhci, i);
+ xhci_free_virt_device(xhci, i);
if (retval)
xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n",
i, retval);
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 1edef7527c11..edbfa82c6565 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -781,7 +781,7 @@ int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
ret = xhci_check_bandwidth(hcd, udev);
if (!ret)
- INIT_LIST_HEAD(&mtk->bw_ep_chk_list);
+ list_del_init(&mtk->bw_ep_chk_list);
return ret;
}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 92adf6107864..5c351970cdf1 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -71,6 +71,8 @@
#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 0x161e
#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 0x15d6
#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 0x15d7
+#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7 0x161c
+#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8 0x161f
#define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042
#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
@@ -121,7 +123,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
/* Look for vendor-specific quirks */
if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
(pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK ||
- pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100 ||
pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) {
if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
pdev->revision == 0x0) {
@@ -156,6 +157,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009)
xhci->quirks |= XHCI_BROKEN_STREAMS;
+ if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
+ pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100)
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+
if (pdev->vendor == PCI_VENDOR_ID_NEC)
xhci->quirks |= XHCI_NEC_HOST;
@@ -330,7 +335,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_3 ||
pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 ||
pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 ||
- pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6))
+ pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 ||
+ pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7 ||
+ pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8))
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if (xhci->quirks & XHCI_RESET_ON_RESUME)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index eaa49aef2935..d0b6806275e0 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1525,7 +1525,6 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
/* Delete default control endpoint resources */
xhci_free_device_endpoint_resources(xhci, virt_dev, true);
- xhci_free_virt_device(xhci, slot_id);
}
static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 902f410874e8..f5b1bcc875de 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3934,7 +3934,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
struct xhci_slot_ctx *slot_ctx;
int i, ret;
-#ifndef CONFIG_USB_DEFAULT_PERSIST
/*
* We called pm_runtime_get_noresume when the device was attached.
* Decrement the counter here to allow controller to runtime suspend
@@ -3942,7 +3941,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
*/
if (xhci->quirks & XHCI_RESET_ON_RESUME)
pm_runtime_put_noidle(hcd->self.controller);
-#endif
ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
/* If the host is halted due to driver unload, we still need to free the
@@ -3961,9 +3959,8 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
}
virt_dev->udev = NULL;
- ret = xhci_disable_slot(xhci, udev->slot_id);
- if (ret)
- xhci_free_virt_device(xhci, udev->slot_id);
+ xhci_disable_slot(xhci, udev->slot_id);
+ xhci_free_virt_device(xhci, udev->slot_id);
}
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
@@ -3973,7 +3970,7 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
u32 state;
int ret = 0;
- command = xhci_alloc_command(xhci, false, GFP_KERNEL);
+ command = xhci_alloc_command(xhci, true, GFP_KERNEL);
if (!command)
return -ENOMEM;
@@ -3998,6 +3995,15 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
+
+ wait_for_completion(command->completion);
+
+ if (command->status != COMP_SUCCESS)
+ xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n",
+ slot_id, command->status);
+
+ xhci_free_command(xhci, command);
+
return ret;
}
@@ -4094,23 +4100,20 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
xhci_debugfs_create_slot(xhci, slot_id);
-#ifndef CONFIG_USB_DEFAULT_PERSIST
/*
* If resetting upon resume, we can't put the controller into runtime
* suspend if there is a device attached.
*/
if (xhci->quirks & XHCI_RESET_ON_RESUME)
pm_runtime_get_noresume(hcd->self.controller);
-#endif
/* Is this a LS or FS device under a HS hub? */
/* Hub or peripherial? */
return 1;
disable_slot:
- ret = xhci_disable_slot(xhci, udev->slot_id);
- if (ret)
- xhci_free_virt_device(xhci, udev->slot_id);
+ xhci_disable_slot(xhci, udev->slot_id);
+ xhci_free_virt_device(xhci, udev->slot_id);
return 0;
}
@@ -4240,6 +4243,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
mutex_unlock(&xhci->mutex);
ret = xhci_disable_slot(xhci, udev->slot_id);
+ xhci_free_virt_device(xhci, udev->slot_id);
if (!ret)
xhci_alloc_dev(hcd, udev);
kfree(command->completion);
diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
index a9a65b4bbfed..9977600616d7 100644
--- a/drivers/usb/mtu3/mtu3_gadget.c
+++ b/drivers/usb/mtu3/mtu3_gadget.c
@@ -77,7 +77,7 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
if (usb_endpoint_xfer_int(desc) ||
usb_endpoint_xfer_isoc(desc)) {
interval = desc->bInterval;
- interval = clamp_val(interval, 1, 16) - 1;
+ interval = clamp_val(interval, 1, 16);
if (usb_endpoint_xfer_isoc(desc) && comp_desc)
mult = comp_desc->bmAttributes;
}
@@ -89,10 +89,17 @@ static int mtu3_ep_enable(struct mtu3_ep *mep)
if (usb_endpoint_xfer_isoc(desc) ||
usb_endpoint_xfer_int(desc)) {
interval = desc->bInterval;
- interval = clamp_val(interval, 1, 16) - 1;
+ interval = clamp_val(interval, 1, 16);
mult = usb_endpoint_maxp_mult(desc) - 1;
}
break;
+ case USB_SPEED_FULL:
+ if (usb_endpoint_xfer_isoc(desc))
+ interval = clamp_val(desc->bInterval, 1, 16);
+ else if (usb_endpoint_xfer_int(desc))
+ interval = clamp_val(desc->bInterval, 1, 255);
+
+ break;
default:
break; /*others are ignored */
}
@@ -235,6 +242,7 @@ struct usb_request *mtu3_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
mreq->request.dma = DMA_ADDR_INVALID;
mreq->epnum = mep->epnum;
mreq->mep = mep;
+ INIT_LIST_HEAD(&mreq->list);
trace_mtu3_alloc_request(mreq);
return &mreq->request;
diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c
index 3f414f91b589..2ea3157ddb6e 100644
--- a/drivers/usb/mtu3/mtu3_qmu.c
+++ b/drivers/usb/mtu3/mtu3_qmu.c
@@ -273,6 +273,8 @@ static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP);
}
+ /* prevent reorder, make sure GPD's HWO is set last */
+ mb();
gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
mreq->gpd = gpd;
@@ -306,6 +308,8 @@ static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
gpd->dw3_info = cpu_to_le32(ext_addr);
+ /* prevent reorder, make sure GPD's HWO is set last */
+ mb();
gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
mreq->gpd = gpd;
@@ -445,7 +449,8 @@ static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
return;
}
mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
-
+ /* prevent reorder, make sure GPD's HWO is set last */
+ mb();
/* by pass the current GDP */
gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 7705328034ca..8a60c0d56863 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -1635,6 +1635,8 @@ static int cp2105_gpioconf_init(struct usb_serial *serial)
/* 2 banks of GPIO - One for the pins taken from each serial port */
if (intf_num == 0) {
+ priv->gc.ngpio = 2;
+
if (mode.eci == CP210X_PIN_MODE_MODEM) {
/* mark all GPIOs of this interface as reserved */
priv->gpio_altfunc = 0xff;
@@ -1645,8 +1647,9 @@ static int cp2105_gpioconf_init(struct usb_serial *serial)
priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) &
CP210X_ECI_GPIO_MODE_MASK) >>
CP210X_ECI_GPIO_MODE_OFFSET);
- priv->gc.ngpio = 2;
} else if (intf_num == 1) {
+ priv->gc.ngpio = 3;
+
if (mode.sci == CP210X_PIN_MODE_MODEM) {
/* mark all GPIOs of this interface as reserved */
priv->gpio_altfunc = 0xff;
@@ -1657,7 +1660,6 @@ static int cp2105_gpioconf_init(struct usb_serial *serial)
priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) &
CP210X_SCI_GPIO_MODE_MASK) >>
CP210X_SCI_GPIO_MODE_OFFSET);
- priv->gc.ngpio = 3;
} else {
return -ENODEV;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 546fce4617a8..42420bfc983c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1219,6 +1219,14 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff), /* Telit LN920 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff), /* Telit FN990 (rmnet) */
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff), /* Telit FN990 (MBIM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff), /* Telit FN990 (RNDIS) */
+ .driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990 (ECM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 6010b9901126..59d4fa2443f2 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -324,6 +324,7 @@ struct tcpm_port {
bool attached;
bool connected;
+ bool registered;
bool pd_supported;
enum typec_port_type port_type;
@@ -6291,7 +6292,8 @@ static enum hrtimer_restart state_machine_timer_handler(struct hrtimer *timer)
{
struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer);
- kthread_queue_work(port->wq, &port->state_machine);
+ if (port->registered)
+ kthread_queue_work(port->wq, &port->state_machine);
return HRTIMER_NORESTART;
}
@@ -6299,7 +6301,8 @@ static enum hrtimer_restart vdm_state_machine_timer_handler(struct hrtimer *time
{
struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer);
- kthread_queue_work(port->wq, &port->vdm_state_machine);
+ if (port->registered)
+ kthread_queue_work(port->wq, &port->vdm_state_machine);
return HRTIMER_NORESTART;
}
@@ -6307,7 +6310,8 @@ static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer)
{
struct tcpm_port *port = container_of(timer, struct tcpm_port, enable_frs_timer);
- kthread_queue_work(port->wq, &port->enable_frs);
+ if (port->registered)
+ kthread_queue_work(port->wq, &port->enable_frs);
return HRTIMER_NORESTART;
}
@@ -6315,7 +6319,8 @@ static enum hrtimer_restart send_discover_timer_handler(struct hrtimer *timer)
{
struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer);
- kthread_queue_work(port->wq, &port->send_discover_work);
+ if (port->registered)
+ kthread_queue_work(port->wq, &port->send_discover_work);
return HRTIMER_NORESTART;
}
@@ -6403,6 +6408,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
typec_port_register_altmodes(port->typec_port,
&tcpm_altmode_ops, port,
port->port_altmode, ALTMODE_DISCOVERY_MAX);
+ port->registered = true;
mutex_lock(&port->lock);
tcpm_init(port);
@@ -6424,6 +6430,9 @@ void tcpm_unregister_port(struct tcpm_port *port)
{
int i;
+ port->registered = false;
+ kthread_destroy_worker(port->wq);
+
hrtimer_cancel(&port->send_discover_timer);
hrtimer_cancel(&port->enable_frs_timer);
hrtimer_cancel(&port->vdm_state_machine_timer);
@@ -6435,7 +6444,6 @@ void tcpm_unregister_port(struct tcpm_port *port)
typec_unregister_port(port->typec_port);
usb_role_switch_put(port->role_sw);
tcpm_debugfs_exit(port);
- kthread_destroy_worker(port->wq);
}
EXPORT_SYMBOL_GPL(tcpm_unregister_port);
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 6aa28384f77f..08561bf7c40c 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -1150,7 +1150,9 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
ret = 0;
}
- if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) == UCSI_CONSTAT_PWR_OPMODE_PD) {
+ if (con->partner &&
+ UCSI_CONSTAT_PWR_OPMODE(con->status.flags) ==
+ UCSI_CONSTAT_PWR_OPMODE_PD) {
ucsi_get_src_pdos(con);
ucsi_check_altmodes(con);
}
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 7332a74a4b00..09bbe53c3ac4 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -404,7 +404,8 @@ static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *m
goto msg_err;
while (mdev->id_table[i].device) {
- supported_classes |= BIT(mdev->id_table[i].device);
+ if (mdev->id_table[i].device <= 63)
+ supported_classes |= BIT_ULL(mdev->id_table[i].device);
i++;
}
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index c9204c62f339..eddcb64a910a 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -655,7 +655,8 @@ static void vduse_vdpa_get_config(struct vdpa_device *vdpa, unsigned int offset,
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
- if (len > dev->config_size - offset)
+ if (offset > dev->config_size ||
+ len > dev->config_size - offset)
return;
memcpy(buf, dev->config + offset, len);
@@ -975,7 +976,8 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
break;
ret = -EINVAL;
- if (config.length == 0 ||
+ if (config.offset > dev->config_size ||
+ config.length == 0 ||
config.length > dev->config_size - config.offset)
break;
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 29cced1cd277..e3c4f059b21a 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -197,7 +197,7 @@ static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
struct vdpa_device *vdpa = v->vdpa;
long size = vdpa->config->get_config_size(vdpa);
- if (c->len == 0)
+ if (c->len == 0 || c->off > size)
return -EINVAL;
if (c->len > size - c->off)
diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c
index 8939612ee0e0..6894ccb868a6 100644
--- a/drivers/virt/nitro_enclaves/ne_misc_dev.c
+++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c
@@ -886,8 +886,9 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
goto put_pages;
}
- gup_rc = get_user_pages(mem_region.userspace_addr + memory_size, 1, FOLL_GET,
- ne_mem_region->pages + i, NULL);
+ gup_rc = get_user_pages_unlocked(mem_region.userspace_addr + memory_size, 1,
+ ne_mem_region->pages + i, FOLL_GET);
+
if (gup_rc < 0) {
rc = gup_rc;
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 6d2614e34470..028b05d44546 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -268,7 +268,7 @@ size_t virtio_max_dma_size(struct virtio_device *vdev)
size_t max_segment_size = SIZE_MAX;
if (vring_use_dma_api(vdev))
- max_segment_size = dma_max_mapping_size(&vdev->dev);
+ max_segment_size = dma_max_mapping_size(vdev->dev.parent);
return max_segment_size;
}
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index a78704ae3618..46d9295d9a6e 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1251,6 +1251,12 @@ int bind_evtchn_to_irq(evtchn_port_t evtchn)
}
EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
+int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
+{
+ return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip, NULL);
+}
+EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
+
static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
{
struct evtchn_bind_ipi bind_ipi;
diff --git a/fs/afs/file.c b/fs/afs/file.c
index cb6ad61eec3b..afe4b803f84b 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -514,8 +514,9 @@ static void afs_add_open_mmap(struct afs_vnode *vnode)
if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) {
down_write(&vnode->volume->cell->fs_open_mmaps_lock);
- list_add_tail(&vnode->cb_mmap_link,
- &vnode->volume->cell->fs_open_mmaps);
+ if (list_empty(&vnode->cb_mmap_link))
+ list_add_tail(&vnode->cb_mmap_link,
+ &vnode->volume->cell->fs_open_mmaps);
up_write(&vnode->volume->cell->fs_open_mmaps_lock);
}
diff --git a/fs/afs/super.c b/fs/afs/super.c
index d110def8aa8e..34c68724c98b 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -667,6 +667,7 @@ static void afs_i_init_once(void *_vnode)
INIT_LIST_HEAD(&vnode->pending_locks);
INIT_LIST_HEAD(&vnode->granted_locks);
INIT_DELAYED_WORK(&vnode->lock_work, afs_lock_work);
+ INIT_LIST_HEAD(&vnode->cb_mmap_link);
seqlock_init(&vnode->cb_lock);
}
diff --git a/fs/aio.c b/fs/aio.c
index 9c81cf611d65..f6f1cbffef9e 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -181,8 +181,9 @@ struct poll_iocb {
struct file *file;
struct wait_queue_head *head;
__poll_t events;
- bool done;
bool cancelled;
+ bool work_scheduled;
+ bool work_need_resched;
struct wait_queue_entry wait;
struct work_struct work;
};
@@ -1619,6 +1620,51 @@ static void aio_poll_put_work(struct work_struct *work)
iocb_put(iocb);
}
+/*
+ * Safely lock the waitqueue which the request is on, synchronizing with the
+ * case where the ->poll() provider decides to free its waitqueue early.
+ *
+ * Returns true on success, meaning that req->head->lock was locked, req->wait
+ * is on req->head, and an RCU read lock was taken. Returns false if the
+ * request was already removed from its waitqueue (which might no longer exist).
+ */
+static bool poll_iocb_lock_wq(struct poll_iocb *req)
+{
+ wait_queue_head_t *head;
+
+ /*
+ * While we hold the waitqueue lock and the waitqueue is nonempty,
+ * wake_up_pollfree() will wait for us. However, taking the waitqueue
+ * lock in the first place can race with the waitqueue being freed.
+ *
+ * We solve this as eventpoll does: by taking advantage of the fact that
+ * all users of wake_up_pollfree() will RCU-delay the actual free. If
+ * we enter rcu_read_lock() and see that the pointer to the queue is
+ * non-NULL, we can then lock it without the memory being freed out from
+ * under us, then check whether the request is still on the queue.
+ *
+ * Keep holding rcu_read_lock() as long as we hold the queue lock, in
+ * case the caller deletes the entry from the queue, leaving it empty.
+ * In that case, only RCU prevents the queue memory from being freed.
+ */
+ rcu_read_lock();
+ head = smp_load_acquire(&req->head);
+ if (head) {
+ spin_lock(&head->lock);
+ if (!list_empty(&req->wait.entry))
+ return true;
+ spin_unlock(&head->lock);
+ }
+ rcu_read_unlock();
+ return false;
+}
+
+static void poll_iocb_unlock_wq(struct poll_iocb *req)
+{
+ spin_unlock(&req->head->lock);
+ rcu_read_unlock();
+}
+
static void aio_poll_complete_work(struct work_struct *work)
{
struct poll_iocb *req = container_of(work, struct poll_iocb, work);
@@ -1638,14 +1684,27 @@ static void aio_poll_complete_work(struct work_struct *work)
* avoid further branches in the fast path.
*/
spin_lock_irq(&ctx->ctx_lock);
- if (!mask && !READ_ONCE(req->cancelled)) {
- add_wait_queue(req->head, &req->wait);
- spin_unlock_irq(&ctx->ctx_lock);
- return;
- }
+ if (poll_iocb_lock_wq(req)) {
+ if (!mask && !READ_ONCE(req->cancelled)) {
+ /*
+ * The request isn't actually ready to be completed yet.
+ * Reschedule completion if another wakeup came in.
+ */
+ if (req->work_need_resched) {
+ schedule_work(&req->work);
+ req->work_need_resched = false;
+ } else {
+ req->work_scheduled = false;
+ }
+ poll_iocb_unlock_wq(req);
+ spin_unlock_irq(&ctx->ctx_lock);
+ return;
+ }
+ list_del_init(&req->wait.entry);
+ poll_iocb_unlock_wq(req);
+ } /* else, POLLFREE has freed the waitqueue, so we must complete */
list_del_init(&iocb->ki_list);
iocb->ki_res.res = mangle_poll(mask);
- req->done = true;
spin_unlock_irq(&ctx->ctx_lock);
iocb_put(iocb);
@@ -1657,13 +1716,14 @@ static int aio_poll_cancel(struct kiocb *iocb)
struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
struct poll_iocb *req = &aiocb->poll;
- spin_lock(&req->head->lock);
- WRITE_ONCE(req->cancelled, true);
- if (!list_empty(&req->wait.entry)) {
- list_del_init(&req->wait.entry);
- schedule_work(&aiocb->poll.work);
- }
- spin_unlock(&req->head->lock);
+ if (poll_iocb_lock_wq(req)) {
+ WRITE_ONCE(req->cancelled, true);
+ if (!req->work_scheduled) {
+ schedule_work(&aiocb->poll.work);
+ req->work_scheduled = true;
+ }
+ poll_iocb_unlock_wq(req);
+ } /* else, the request was force-cancelled by POLLFREE already */
return 0;
}
@@ -1680,21 +1740,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
if (mask && !(mask & req->events))
return 0;
- list_del_init(&req->wait.entry);
-
- if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
+ /*
+ * Complete the request inline if possible. This requires that three
+ * conditions be met:
+ * 1. An event mask must have been passed. If a plain wakeup was done
+ * instead, then mask == 0 and we have to call vfs_poll() to get
+ * the events, so inline completion isn't possible.
+ * 2. The completion work must not have already been scheduled.
+ * 3. ctx_lock must not be busy. We have to use trylock because we
+ * already hold the waitqueue lock, so this inverts the normal
+ * locking order. Use irqsave/irqrestore because not all
+ * filesystems (e.g. fuse) call this function with IRQs disabled,
+ * yet IRQs have to be disabled before ctx_lock is obtained.
+ */
+ if (mask && !req->work_scheduled &&
+ spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
struct kioctx *ctx = iocb->ki_ctx;
- /*
- * Try to complete the iocb inline if we can. Use
- * irqsave/irqrestore because not all filesystems (e.g. fuse)
- * call this function with IRQs disabled and because IRQs
- * have to be disabled before ctx_lock is obtained.
- */
+ list_del_init(&req->wait.entry);
list_del(&iocb->ki_list);
iocb->ki_res.res = mangle_poll(mask);
- req->done = true;
- if (iocb->ki_eventfd && eventfd_signal_allowed()) {
+ if (iocb->ki_eventfd && !eventfd_signal_allowed()) {
iocb = NULL;
INIT_WORK(&req->work, aio_poll_put_work);
schedule_work(&req->work);
@@ -1703,7 +1769,43 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
if (iocb)
iocb_put(iocb);
} else {
- schedule_work(&req->work);
+ /*
+ * Schedule the completion work if needed. If it was already
+ * scheduled, record that another wakeup came in.
+ *
+ * Don't remove the request from the waitqueue here, as it might
+ * not actually be complete yet (we won't know until vfs_poll()
+ * is called), and we must not miss any wakeups. POLLFREE is an
+ * exception to this; see below.
+ */
+ if (req->work_scheduled) {
+ req->work_need_resched = true;
+ } else {
+ schedule_work(&req->work);
+ req->work_scheduled = true;
+ }
+
+ /*
+ * If the waitqueue is being freed early but we can't complete
+ * the request inline, we have to tear down the request as best
+ * we can. That means immediately removing the request from its
+ * waitqueue and preventing all further accesses to the
+ * waitqueue via the request. We also need to schedule the
+ * completion work (done above). Also mark the request as
+ * cancelled, to potentially skip an unneeded call to ->poll().
+ */
+ if (mask & POLLFREE) {
+ WRITE_ONCE(req->cancelled, true);
+ list_del_init(&req->wait.entry);
+
+ /*
+ * Careful: this *must* be the last step, since as soon
+ * as req->head is NULL'ed out, the request can be
+ * completed and freed, since aio_poll_complete_work()
+ * will no longer need to take the waitqueue lock.
+ */
+ smp_store_release(&req->head, NULL);
+ }
}
return 1;
}
@@ -1711,6 +1813,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
struct aio_poll_table {
struct poll_table_struct pt;
struct aio_kiocb *iocb;
+ bool queued;
int error;
};
@@ -1721,11 +1824,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
/* multiple wait queues per file are not supported */
- if (unlikely(pt->iocb->poll.head)) {
+ if (unlikely(pt->queued)) {
pt->error = -EINVAL;
return;
}
+ pt->queued = true;
pt->error = 0;
pt->iocb->poll.head = head;
add_wait_queue(head, &pt->iocb->poll.wait);
@@ -1750,12 +1854,14 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
req->head = NULL;
- req->done = false;
req->cancelled = false;
+ req->work_scheduled = false;
+ req->work_need_resched = false;
apt.pt._qproc = aio_poll_queue_proc;
apt.pt._key = req->events;
apt.iocb = aiocb;
+ apt.queued = false;
apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
/* initialized the list so that we can do list_empty checks */
@@ -1764,23 +1870,35 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
mask = vfs_poll(req->file, &apt.pt) & req->events;
spin_lock_irq(&ctx->ctx_lock);
- if (likely(req->head)) {
- spin_lock(&req->head->lock);
- if (unlikely(list_empty(&req->wait.entry))) {
- if (apt.error)
+ if (likely(apt.queued)) {
+ bool on_queue = poll_iocb_lock_wq(req);
+
+ if (!on_queue || req->work_scheduled) {
+ /*
+ * aio_poll_wake() already either scheduled the async
+ * completion work, or completed the request inline.
+ */
+ if (apt.error) /* unsupported case: multiple queues */
cancel = true;
apt.error = 0;
mask = 0;
}
if (mask || apt.error) {
+ /* Steal to complete synchronously. */
list_del_init(&req->wait.entry);
} else if (cancel) {
+ /* Cancel if possible (may be too late though). */
WRITE_ONCE(req->cancelled, true);
- } else if (!req->done) { /* actually waiting for an event */
+ } else if (on_queue) {
+ /*
+ * Actually waiting for an event, so add the request to
+ * active_reqs so that it can be cancelled if needed.
+ */
list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
aiocb->ki_cancel = aio_poll_cancel;
}
- spin_unlock(&req->head->lock);
+ if (on_queue)
+ poll_iocb_unlock_wq(req);
}
if (mask) { /* no async, we'd stolen it */
aiocb->ki_res.res = mangle_poll(mask);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index c3983bdaf4b8..f704339c6b86 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -463,8 +463,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
BUG_ON(ret < 0);
rcu_assign_pointer(root->node, cow);
- btrfs_free_tree_block(trans, root, buf, parent_start,
- last_ref);
+ btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
+ parent_start, last_ref);
free_extent_buffer(buf);
add_root_to_dirty_list(root);
} else {
@@ -485,8 +485,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
return ret;
}
}
- btrfs_free_tree_block(trans, root, buf, parent_start,
- last_ref);
+ btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
+ parent_start, last_ref);
}
if (unlock_orig)
btrfs_tree_unlock(buf);
@@ -927,7 +927,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
free_extent_buffer(mid);
root_sub_used(root, mid->len);
- btrfs_free_tree_block(trans, root, mid, 0, 1);
+ btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
/* once for the root ptr */
free_extent_buffer_stale(mid);
return 0;
@@ -986,7 +986,8 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
btrfs_tree_unlock(right);
del_ptr(root, path, level + 1, pslot + 1);
root_sub_used(root, right->len);
- btrfs_free_tree_block(trans, root, right, 0, 1);
+ btrfs_free_tree_block(trans, btrfs_root_id(root), right,
+ 0, 1);
free_extent_buffer_stale(right);
right = NULL;
} else {
@@ -1031,7 +1032,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
btrfs_tree_unlock(mid);
del_ptr(root, path, level + 1, pslot);
root_sub_used(root, mid->len);
- btrfs_free_tree_block(trans, root, mid, 0, 1);
+ btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
free_extent_buffer_stale(mid);
mid = NULL;
} else {
@@ -4032,7 +4033,7 @@ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
root_sub_used(root, leaf->len);
atomic_inc(&leaf->refs);
- btrfs_free_tree_block(trans, root, leaf, 0, 1);
+ btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1);
free_extent_buffer_stale(leaf);
}
/*
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 7553e9dc5f93..5fe5eccb3c87 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -2257,6 +2257,11 @@ static inline bool btrfs_root_dead(const struct btrfs_root *root)
return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0;
}
+static inline u64 btrfs_root_id(const struct btrfs_root *root)
+{
+ return root->root_key.objectid;
+}
+
/* struct btrfs_root_backup */
BTRFS_SETGET_STACK_FUNCS(backup_tree_root, struct btrfs_root_backup,
tree_root, 64);
@@ -2719,7 +2724,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
u64 empty_size,
enum btrfs_lock_nesting nest);
void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ u64 root_id,
struct extent_buffer *buf,
u64 parent, int last_ref);
int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c
index 2059d1504149..40c4d6ba3fb9 100644
--- a/fs/btrfs/delalloc-space.c
+++ b/fs/btrfs/delalloc-space.c
@@ -143,10 +143,13 @@ int btrfs_check_data_free_space(struct btrfs_inode *inode,
/* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
ret = btrfs_qgroup_reserve_data(inode, reserved, start, len);
- if (ret < 0)
+ if (ret < 0) {
btrfs_free_reserved_data_space_noquota(fs_info, len);
- else
+ extent_changeset_free(*reserved);
+ *reserved = NULL;
+ } else {
ret = 0;
+ }
return ret;
}
@@ -452,8 +455,11 @@ int btrfs_delalloc_reserve_space(struct btrfs_inode *inode,
if (ret < 0)
return ret;
ret = btrfs_delalloc_reserve_metadata(inode, len);
- if (ret < 0)
+ if (ret < 0) {
btrfs_free_reserved_data_space(inode, *reserved, start, len);
+ extent_changeset_free(*reserved);
+ *reserved = NULL;
+ }
return ret;
}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 514ead6e93b6..b3f2e2232326 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1732,6 +1732,14 @@ again:
}
return root;
fail:
+ /*
+ * If our caller provided us an anonymous device, then it's his
+ * responsability to free it in case we fail. So we have to set our
+ * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root()
+ * and once again by our caller.
+ */
+ if (anon_dev)
+ root->anon_dev = 0;
btrfs_put_root(root);
return ERR_PTR(ret);
}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 3fd736a02c1e..25ef6e3fd306 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3275,20 +3275,20 @@ out_delayed_unlock:
}
void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+ u64 root_id,
struct extent_buffer *buf,
u64 parent, int last_ref)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_ref generic_ref = { 0 };
int ret;
btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF,
buf->start, buf->len, parent);
btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf),
- root->root_key.objectid, 0, false);
+ root_id, 0, false);
- if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
+ if (root_id != BTRFS_TREE_LOG_OBJECTID) {
btrfs_ref_tree_mod(fs_info, &generic_ref);
ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL);
BUG_ON(ret); /* -ENOMEM */
@@ -3298,7 +3298,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_block_group *cache;
bool must_pin = false;
- if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
+ if (root_id != BTRFS_TREE_LOG_OBJECTID) {
ret = check_ref_cleanup(trans, buf->start);
if (!ret) {
btrfs_redirty_list_add(trans->transaction, buf);
@@ -5472,7 +5472,8 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
goto owner_mismatch;
}
- btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
+ btrfs_free_tree_block(trans, btrfs_root_id(root), eb, parent,
+ wc->refs[level] == 1);
out:
wc->refs[level] = 0;
wc->flags[level] = 0;
@@ -6051,6 +6052,9 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
int dev_ret = 0;
int ret = 0;
+ if (range->start == U64_MAX)
+ return -EINVAL;
+
/*
* Check range overflow if range->len is set.
* The default range->len is U64_MAX.
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 4e03a6d3aa32..9234d96a7fd5 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4314,6 +4314,20 @@ static void set_btree_ioerr(struct page *page, struct extent_buffer *eb)
return;
/*
+ * A read may stumble upon this buffer later, make sure that it gets an
+ * error and knows there was an error.
+ */
+ clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
+
+ /*
+ * We need to set the mapping with the io error as well because a write
+ * error will flip the file system readonly, and then syncfs() will
+ * return a 0 because we are readonly if we don't modify the err seq for
+ * the superblock.
+ */
+ mapping_set_error(page->mapping, -EIO);
+
+ /*
* If we error out, we should add back the dirty_metadata_bytes
* to make it consistent.
*/
@@ -6597,6 +6611,14 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
return 0;
+ /*
+ * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
+ * operation, which could potentially still be in flight. In this case
+ * we simply want to return an error.
+ */
+ if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
+ return -EIO;
+
if (eb->fs_info->sectorsize < PAGE_SIZE)
return read_extent_buffer_subpage(eb, wait, mirror_num);
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index a33bca94d133..3abec44c6255 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -1256,8 +1256,8 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
btrfs_tree_lock(free_space_root->node);
btrfs_clean_tree_block(free_space_root->node);
btrfs_tree_unlock(free_space_root->node);
- btrfs_free_tree_block(trans, free_space_root, free_space_root->node,
- 0, 1);
+ btrfs_free_tree_block(trans, btrfs_root_id(free_space_root),
+ free_space_root->node, 0, 1);
btrfs_put_root(free_space_root);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 92138ac2a4e2..edfecfe62b4b 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -617,11 +617,13 @@ static noinline int create_subvol(struct user_namespace *mnt_userns,
* Since we don't abort the transaction in this case, free the
* tree block so that we don't leak space and leave the
* filesystem in an inconsistent state (an extent item in the
- * extent tree without backreferences). Also no need to have
- * the tree block locked since it is not in any tree at this
- * point, so no other task can find it and use it.
+ * extent tree with a backreference for a root that does not
+ * exists).
*/
- btrfs_free_tree_block(trans, root, leaf, 0, 1);
+ btrfs_tree_lock(leaf);
+ btrfs_clean_tree_block(leaf);
+ btrfs_tree_unlock(leaf);
+ btrfs_free_tree_block(trans, objectid, leaf, 0, 1);
free_extent_buffer(leaf);
goto fail;
}
@@ -3187,10 +3189,8 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
return -EPERM;
vol_args = memdup_user(arg, sizeof(*vol_args));
- if (IS_ERR(vol_args)) {
- ret = PTR_ERR(vol_args);
- goto out;
- }
+ if (IS_ERR(vol_args))
+ return PTR_ERR(vol_args);
if (vol_args->flags & ~BTRFS_DEVICE_REMOVE_ARGS_MASK) {
ret = -EOPNOTSUPP;
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index db680f5be745..6c037f1252b7 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1219,7 +1219,8 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
btrfs_tree_lock(quota_root->node);
btrfs_clean_tree_block(quota_root->node);
btrfs_tree_unlock(quota_root->node);
- btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
+ btrfs_free_tree_block(trans, btrfs_root_id(quota_root),
+ quota_root->node, 0, 1);
btrfs_put_root(quota_root);
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 12ceb14a1141..d20166336557 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -334,7 +334,8 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
key.offset = ref_id;
again:
ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
- BUG_ON(ret < 0);
+ if (ret < 0)
+ goto out;
if (ret == 0) {
leaf = path->nodes[0];
ref = btrfs_item_ptr(leaf, path->slots[0],
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 8ab33caf016f..6993dcdba6f1 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -1181,6 +1181,7 @@ again:
parent_objectid, victim_name,
victim_name_len);
if (ret < 0) {
+ kfree(victim_name);
return ret;
} else if (!ret) {
ret = -ENOENT;
@@ -2908,6 +2909,8 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
path->nodes[*level]->len);
if (ret)
return ret;
+ btrfs_redirty_list_add(trans->transaction,
+ next);
} else {
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
clear_extent_buffer_dirty(next);
@@ -2988,6 +2991,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
next->start, next->len);
if (ret)
goto out;
+ btrfs_redirty_list_add(trans->transaction, next);
} else {
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
clear_extent_buffer_dirty(next);
@@ -3438,8 +3442,6 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
extent_io_tree_release(&log->log_csum_range);
- if (trans && log->node)
- btrfs_redirty_list_add(trans->transaction, log->node);
btrfs_put_root(log);
}
@@ -3976,6 +3978,7 @@ search:
goto done;
}
if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
+ ctx->last_dir_item_offset = min_key.offset;
ret = overwrite_item(trans, log, dst_path,
path->nodes[0], path->slots[0],
&min_key);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 0997e3cd74e9..fd0ced829edb 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1370,8 +1370,10 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
bytenr_orig = btrfs_sb_offset(0);
ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr);
- if (ret)
- return ERR_PTR(ret);
+ if (ret) {
+ device = ERR_PTR(ret);
+ goto error_bdev_put;
+ }
disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig);
if (IS_ERR(disk_super)) {
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 67d932d70798..678a29469511 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1860,6 +1860,7 @@ int btrfs_zone_finish(struct btrfs_block_group *block_group)
block_group->alloc_offset = block_group->zone_capacity;
block_group->free_space_ctl->free_space = 0;
btrfs_clear_treelog_bg(block_group);
+ btrfs_clear_data_reloc_bg(block_group);
spin_unlock(&block_group->lock);
ret = blkdev_zone_mgmt(device->bdev, REQ_OP_ZONE_FINISH,
@@ -1942,6 +1943,7 @@ void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 len
ASSERT(block_group->alloc_offset == block_group->zone_capacity);
ASSERT(block_group->free_space_ctl->free_space == 0);
btrfs_clear_treelog_bg(block_group);
+ btrfs_clear_data_reloc_bg(block_group);
spin_unlock(&block_group->lock);
map = block_group->physical_map;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index b9460b6fb76f..c447fa2e2d1f 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -4350,7 +4350,7 @@ void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->vfs_inode.i_sb);
int bits = (fmode << 1) | 1;
- bool is_opened = false;
+ bool already_opened = false;
int i;
if (count == 1)
@@ -4358,19 +4358,19 @@ void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count)
spin_lock(&ci->i_ceph_lock);
for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
- if (bits & (1 << i))
- ci->i_nr_by_mode[i] += count;
-
/*
- * If any of the mode ref is larger than 1,
+ * If any of the mode ref is larger than 0,
* that means it has been already opened by
* others. Just skip checking the PIN ref.
*/
- if (i && ci->i_nr_by_mode[i] > 1)
- is_opened = true;
+ if (i && ci->i_nr_by_mode[i])
+ already_opened = true;
+
+ if (bits & (1 << i))
+ ci->i_nr_by_mode[i] += count;
}
- if (!is_opened)
+ if (!already_opened)
percpu_counter_inc(&mdsc->metric.opened_inodes);
spin_unlock(&ci->i_ceph_lock);
}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 02a0a0fd9ccd..c138e8126286 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -605,13 +605,25 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
in.cap.flags = CEPH_CAP_FLAG_AUTH;
in.ctime = in.mtime = in.atime = iinfo.btime;
- in.mode = cpu_to_le32((u32)mode);
in.truncate_seq = cpu_to_le32(1);
in.truncate_size = cpu_to_le64(-1ULL);
in.xattr_version = cpu_to_le64(1);
in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
- in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_mode & S_ISGID ?
- dir->i_gid : current_fsgid()));
+ if (dir->i_mode & S_ISGID) {
+ in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_gid));
+
+ /* Directories always inherit the setgid bit. */
+ if (S_ISDIR(mode))
+ mode |= S_ISGID;
+ else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
+ !in_group_p(dir->i_gid) &&
+ !capable_wrt_inode_uidgid(&init_user_ns, dir, CAP_FSETID))
+ mode &= ~S_ISGID;
+ } else {
+ in.gid = cpu_to_le32(from_kgid(&init_user_ns, current_fsgid()));
+ }
+ in.mode = cpu_to_le32((u32)mode);
+
in.nlink = cpu_to_le32(1);
in.max_size = cpu_to_le64(lo->stripe_unit);
@@ -847,7 +859,7 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
ssize_t ret;
u64 off = iocb->ki_pos;
u64 len = iov_iter_count(to);
- u64 i_size;
+ u64 i_size = i_size_read(inode);
dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
(file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 250aad330a10..c30eefc0ac19 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -3683,7 +3683,7 @@ static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
struct ceph_pagelist *pagelist = recon_state->pagelist;
struct dentry *dentry;
char *path;
- int pathlen, err;
+ int pathlen = 0, err;
u64 pathbase;
u64 snap_follows;
@@ -3703,7 +3703,6 @@ static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
}
} else {
path = NULL;
- pathlen = 0;
pathbase = 0;
}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 18448dbd762a..1060164b984a 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -3064,6 +3064,13 @@ static int mount_get_conns(struct mount_ctx *mnt_ctx)
(cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx)))
cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx);
+ /*
+ * The cookie is initialized from volume info returned above.
+ * Inside cifs_fscache_get_super_cookie it checks
+ * that we do not get super cookie twice.
+ */
+ cifs_fscache_get_super_cookie(tcon);
+
out:
mnt_ctx->server = server;
mnt_ctx->ses = ses;
diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
index 6a179ae753c1..e3ed25dc6f3f 100644
--- a/fs/cifs/fs_context.c
+++ b/fs/cifs/fs_context.c
@@ -435,6 +435,42 @@ out:
}
/*
+ * Remove duplicate path delimiters. Windows is supposed to do that
+ * but there are some bugs that prevent rename from working if there are
+ * multiple delimiters.
+ *
+ * Returns a sanitized duplicate of @path. The caller is responsible for
+ * cleaning up the original.
+ */
+#define IS_DELIM(c) ((c) == '/' || (c) == '\\')
+static char *sanitize_path(char *path)
+{
+ char *cursor1 = path, *cursor2 = path;
+
+ /* skip all prepended delimiters */
+ while (IS_DELIM(*cursor1))
+ cursor1++;
+
+ /* copy the first letter */
+ *cursor2 = *cursor1;
+
+ /* copy the remainder... */
+ while (*(cursor1++)) {
+ /* ... skipping all duplicated delimiters */
+ if (IS_DELIM(*cursor1) && IS_DELIM(*cursor2))
+ continue;
+ *(++cursor2) = *cursor1;
+ }
+
+ /* if the last character is a delimiter, skip it */
+ if (IS_DELIM(*(cursor2 - 1)))
+ cursor2--;
+
+ *(cursor2) = '\0';
+ return kstrdup(path, GFP_KERNEL);
+}
+
+/*
* Parse a devname into substrings and populate the ctx->UNC and ctx->prepath
* fields with the result. Returns 0 on success and an error otherwise
* (e.g. ENOMEM or EINVAL)
@@ -493,7 +529,7 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
if (!*pos)
return 0;
- ctx->prepath = kstrdup(pos, GFP_KERNEL);
+ ctx->prepath = sanitize_path(pos);
if (!ctx->prepath)
return -ENOMEM;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 96d083db1737..279622e4eb1c 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1356,11 +1356,6 @@ iget_no_retry:
goto out;
}
-#ifdef CONFIG_CIFS_FSCACHE
- /* populate tcon->resource_id */
- tcon->resource_id = CIFS_I(inode)->uniqueid;
-#endif
-
if (rc && tcon->pipe) {
cifs_dbg(FYI, "ipc connection - fake read inode\n");
spin_lock(&inode->i_lock);
@@ -1375,14 +1370,6 @@ iget_no_retry:
iget_failed(inode);
inode = ERR_PTR(rc);
}
-
- /*
- * The cookie is initialized from volume info returned above.
- * Inside cifs_fscache_get_super_cookie it checks
- * that we do not get super cookie twice.
- */
- cifs_fscache_get_super_cookie(tcon);
-
out:
kfree(path);
free_xid(xid);
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index af63548eaf26..035dc3e245dc 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -590,8 +590,8 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
{
unsigned int tioffset; /* challenge message target info area */
unsigned int tilen; /* challenge message target info area length */
-
CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr;
+ __u32 server_flags;
if (blob_len < sizeof(CHALLENGE_MESSAGE)) {
cifs_dbg(VFS, "challenge blob len %d too small\n", blob_len);
@@ -609,12 +609,37 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
return -EINVAL;
}
+ server_flags = le32_to_cpu(pblob->NegotiateFlags);
+ cifs_dbg(FYI, "%s: negotiate=0x%08x challenge=0x%08x\n", __func__,
+ ses->ntlmssp->client_flags, server_flags);
+
+ if ((ses->ntlmssp->client_flags & (NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN)) &&
+ (!(server_flags & NTLMSSP_NEGOTIATE_56) && !(server_flags & NTLMSSP_NEGOTIATE_128))) {
+ cifs_dbg(VFS, "%s: requested signing/encryption but server did not return either 56-bit or 128-bit session key size\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (!(server_flags & NTLMSSP_NEGOTIATE_NTLM) && !(server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC)) {
+ cifs_dbg(VFS, "%s: server does not seem to support either NTLMv1 or NTLMv2\n", __func__);
+ return -EINVAL;
+ }
+ if (ses->server->sign && !(server_flags & NTLMSSP_NEGOTIATE_SIGN)) {
+ cifs_dbg(VFS, "%s: forced packet signing but server does not seem to support it\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+ if ((ses->ntlmssp->client_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
+ !(server_flags & NTLMSSP_NEGOTIATE_KEY_XCH))
+ pr_warn_once("%s: authentication has been weakened as server does not support key exchange\n",
+ __func__);
+
+ ses->ntlmssp->server_flags = server_flags;
+
memcpy(ses->ntlmssp->cryptkey, pblob->Challenge, CIFS_CRYPTO_KEY_SIZE);
- /* BB we could decode pblob->NegotiateFlags; some may be useful */
/* In particular we can examine sign flags */
/* BB spec says that if AvId field of MsvAvTimestamp is populated then
we must set the MIC field of the AUTHENTICATE_MESSAGE */
- ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags);
+
tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset);
tilen = le16_to_cpu(pblob->TargetInfoArray.Length);
if (tioffset > blob_len || tioffset + tilen > blob_len) {
@@ -721,13 +746,13 @@ int build_ntlmssp_negotiate_blob(unsigned char **pbuffer,
flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET |
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
- NTLMSSP_NEGOTIATE_SEAL;
- if (server->sign)
- flags |= NTLMSSP_NEGOTIATE_SIGN;
+ NTLMSSP_NEGOTIATE_ALWAYS_SIGN | NTLMSSP_NEGOTIATE_SEAL |
+ NTLMSSP_NEGOTIATE_SIGN;
if (!server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
tmp = *pbuffer + sizeof(NEGOTIATE_MESSAGE);
+ ses->ntlmssp->client_flags = flags;
sec_blob->NegotiateFlags = cpu_to_le32(flags);
/* these fields should be null in negotiate phase MS-NLMP 3.1.5.1.1 */
@@ -779,15 +804,8 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
sec_blob->MessageType = NtLmAuthenticate;
- flags = NTLMSSP_NEGOTIATE_56 |
- NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO |
- NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
- NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
- NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED;
- if (ses->server->sign)
- flags |= NTLMSSP_NEGOTIATE_SIGN;
- if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
- flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+ flags = ses->ntlmssp->server_flags | NTLMSSP_REQUEST_TARGET |
+ NTLMSSP_NEGOTIATE_TARGET_INFO | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED;
tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
sec_blob->NegotiateFlags = cpu_to_le32(flags);
@@ -834,9 +852,9 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
*pbuffer, &tmp,
nls_cp);
- if (((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) ||
- (ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
- && !calc_seckey(ses)) {
+ if ((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) &&
+ (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess) &&
+ !calc_seckey(ses)) {
memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
diff --git a/fs/file.c b/fs/file.c
index ad4a8bf3cf10..97d212a9b814 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -841,28 +841,68 @@ void do_close_on_exec(struct files_struct *files)
spin_unlock(&files->file_lock);
}
-static struct file *__fget_files(struct files_struct *files, unsigned int fd,
- fmode_t mask, unsigned int refs)
+static inline struct file *__fget_files_rcu(struct files_struct *files,
+ unsigned int fd, fmode_t mask, unsigned int refs)
{
- struct file *file;
+ for (;;) {
+ struct file *file;
+ struct fdtable *fdt = rcu_dereference_raw(files->fdt);
+ struct file __rcu **fdentry;
- rcu_read_lock();
-loop:
- file = files_lookup_fd_rcu(files, fd);
- if (file) {
- /* File object ref couldn't be taken.
- * dup2() atomicity guarantee is the reason
- * we loop to catch the new file (or NULL pointer)
+ if (unlikely(fd >= fdt->max_fds))
+ return NULL;
+
+ fdentry = fdt->fd + array_index_nospec(fd, fdt->max_fds);
+ file = rcu_dereference_raw(*fdentry);
+ if (unlikely(!file))
+ return NULL;
+
+ if (unlikely(file->f_mode & mask))
+ return NULL;
+
+ /*
+ * Ok, we have a file pointer. However, because we do
+ * this all locklessly under RCU, we may be racing with
+ * that file being closed.
+ *
+ * Such a race can take two forms:
+ *
+ * (a) the file ref already went down to zero,
+ * and get_file_rcu_many() fails. Just try
+ * again:
*/
- if (file->f_mode & mask)
- file = NULL;
- else if (!get_file_rcu_many(file, refs))
- goto loop;
- else if (files_lookup_fd_raw(files, fd) != file) {
+ if (unlikely(!get_file_rcu_many(file, refs)))
+ continue;
+
+ /*
+ * (b) the file table entry has changed under us.
+ * Note that we don't need to re-check the 'fdt->fd'
+ * pointer having changed, because it always goes
+ * hand-in-hand with 'fdt'.
+ *
+ * If so, we need to put our refs and try again.
+ */
+ if (unlikely(rcu_dereference_raw(files->fdt) != fdt) ||
+ unlikely(rcu_dereference_raw(*fdentry) != file)) {
fput_many(file, refs);
- goto loop;
+ continue;
}
+
+ /*
+ * Ok, we have a ref to the file, and checked that it
+ * still exists.
+ */
+ return file;
}
+}
+
+static struct file *__fget_files(struct files_struct *files, unsigned int fd,
+ fmode_t mask, unsigned int refs)
+{
+ struct file *file;
+
+ rcu_read_lock();
+ file = __fget_files_rcu(files, fd, mask, refs);
rcu_read_unlock();
return file;
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 50cf9f92da36..5c4f582d6549 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -142,6 +142,7 @@ static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
struct io_wqe_acct *acct,
struct io_cb_cancel_data *match);
static void create_worker_cb(struct callback_head *cb);
+static void io_wq_cancel_tw_create(struct io_wq *wq);
static bool io_worker_get(struct io_worker *worker)
{
@@ -357,12 +358,22 @@ static bool io_queue_worker_create(struct io_worker *worker,
test_and_set_bit_lock(0, &worker->create_state))
goto fail_release;
+ atomic_inc(&wq->worker_refs);
init_task_work(&worker->create_work, func);
worker->create_index = acct->index;
if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
- clear_bit_unlock(0, &worker->create_state);
+ /*
+ * EXIT may have been set after checking it above, check after
+ * adding the task_work and remove any creation item if it is
+ * now set. wq exit does that too, but we can have added this
+ * work item after we canceled in io_wq_exit_workers().
+ */
+ if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
+ io_wq_cancel_tw_create(wq);
+ io_worker_ref_put(wq);
return true;
}
+ io_worker_ref_put(wq);
clear_bit_unlock(0, &worker->create_state);
fail_release:
io_worker_release(worker);
@@ -384,7 +395,9 @@ static void io_wqe_dec_running(struct io_worker *worker)
if (atomic_dec_and_test(&acct->nr_running) && io_acct_run_queue(acct)) {
atomic_inc(&acct->nr_running);
atomic_inc(&wqe->wq->worker_refs);
+ raw_spin_unlock(&wqe->lock);
io_queue_worker_create(worker, acct, create_worker_cb);
+ raw_spin_lock(&wqe->lock);
}
}
@@ -1198,13 +1211,9 @@ void io_wq_exit_start(struct io_wq *wq)
set_bit(IO_WQ_BIT_EXIT, &wq->state);
}
-static void io_wq_exit_workers(struct io_wq *wq)
+static void io_wq_cancel_tw_create(struct io_wq *wq)
{
struct callback_head *cb;
- int node;
-
- if (!wq->task)
- return;
while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
struct io_worker *worker;
@@ -1212,6 +1221,16 @@ static void io_wq_exit_workers(struct io_wq *wq)
worker = container_of(cb, struct io_worker, create_work);
io_worker_cancel_cb(worker);
}
+}
+
+static void io_wq_exit_workers(struct io_wq *wq)
+{
+ int node;
+
+ if (!wq->task)
+ return;
+
+ io_wq_cancel_tw_create(wq);
rcu_read_lock();
for_each_node(node) {
diff --git a/fs/io_uring.c b/fs/io_uring.c
index c4f217613f56..fb2a0cb4aaf8 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2891,9 +2891,13 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
kiocb->ki_pos = READ_ONCE(sqe->off);
- if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
- req->flags |= REQ_F_CUR_POS;
- kiocb->ki_pos = file->f_pos;
+ if (kiocb->ki_pos == -1) {
+ if (!(file->f_mode & FMODE_STREAM)) {
+ req->flags |= REQ_F_CUR_POS;
+ kiocb->ki_pos = file->f_pos;
+ } else {
+ kiocb->ki_pos = 0;
+ }
}
kiocb->ki_flags = iocb_flags(file);
ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
@@ -9824,7 +9828,7 @@ static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
/*
* Find any io_uring ctx that this task has registered or done IO on, and cancel
- * requests. @sqd should be not-null IIF it's an SQPOLL thread cancellation.
+ * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
*/
static __cold void io_uring_cancel_generic(bool cancel_all,
struct io_sq_data *sqd)
@@ -9866,8 +9870,10 @@ static __cold void io_uring_cancel_generic(bool cancel_all,
cancel_all);
}
- prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
+ io_run_task_work();
io_uring_drop_tctx_refs(current);
+
/*
* If we've seen completions, retry without waiting. This
* avoids a race where a completion comes in before we did
diff --git a/fs/ksmbd/ndr.c b/fs/ksmbd/ndr.c
index 8317f7ca402b..5052be9261d9 100644
--- a/fs/ksmbd/ndr.c
+++ b/fs/ksmbd/ndr.c
@@ -148,7 +148,7 @@ static int ndr_read_int16(struct ndr *n, __u16 *value)
static int ndr_read_int32(struct ndr *n, __u32 *value)
{
if (n->offset + sizeof(__u32) > n->length)
- return 0;
+ return -EINVAL;
if (value)
*value = le32_to_cpu(*(__le32 *)ndr_get_field(n));
diff --git a/fs/ksmbd/smb2ops.c b/fs/ksmbd/smb2ops.c
index 0a5d8450e835..02a44d28bdaf 100644
--- a/fs/ksmbd/smb2ops.c
+++ b/fs/ksmbd/smb2ops.c
@@ -271,9 +271,6 @@ int init_smb3_11_server(struct ksmbd_conn *conn)
if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
- if (conn->cipher_type)
- conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
-
if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
index 49c9da37315c..b8b3a4c28b74 100644
--- a/fs/ksmbd/smb2pdu.c
+++ b/fs/ksmbd/smb2pdu.c
@@ -915,6 +915,25 @@ static void decode_encrypt_ctxt(struct ksmbd_conn *conn,
}
}
+/**
+ * smb3_encryption_negotiated() - checks if server and client agreed on enabling encryption
+ * @conn: smb connection
+ *
+ * Return: true if connection should be encrypted, else false
+ */
+static bool smb3_encryption_negotiated(struct ksmbd_conn *conn)
+{
+ if (!conn->ops->generate_encryptionkey)
+ return false;
+
+ /*
+ * SMB 3.0 and 3.0.2 dialects use the SMB2_GLOBAL_CAP_ENCRYPTION flag.
+ * SMB 3.1.1 uses the cipher_type field.
+ */
+ return (conn->vals->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) ||
+ conn->cipher_type;
+}
+
static void decode_compress_ctxt(struct ksmbd_conn *conn,
struct smb2_compression_capabilities_context *pneg_ctxt)
{
@@ -1469,8 +1488,7 @@ static int ntlm_authenticate(struct ksmbd_work *work)
(req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
sess->sign = true;
- if (conn->vals->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION &&
- conn->ops->generate_encryptionkey &&
+ if (smb3_encryption_negotiated(conn) &&
!(req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) {
rc = conn->ops->generate_encryptionkey(sess);
if (rc) {
@@ -1559,8 +1577,7 @@ static int krb5_authenticate(struct ksmbd_work *work)
(req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
sess->sign = true;
- if ((conn->vals->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) &&
- conn->ops->generate_encryptionkey) {
+ if (smb3_encryption_negotiated(conn)) {
retval = conn->ops->generate_encryptionkey(sess);
if (retval) {
ksmbd_debug(SMB,
@@ -2962,6 +2979,10 @@ int smb2_open(struct ksmbd_work *work)
&pntsd_size, &fattr);
posix_acl_release(fattr.cf_acls);
posix_acl_release(fattr.cf_dacls);
+ if (rc) {
+ kfree(pntsd);
+ goto err_out;
+ }
rc = ksmbd_vfs_set_sd_xattr(conn,
user_ns,
diff --git a/fs/namespace.c b/fs/namespace.c
index 659a8f39c61a..b696543adab8 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -4263,12 +4263,11 @@ SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
return err;
err = user_path_at(dfd, path, kattr.lookup_flags, &target);
- if (err)
- return err;
-
- err = do_mount_setattr(&target, &kattr);
+ if (!err) {
+ err = do_mount_setattr(&target, &kattr);
+ path_put(&target);
+ }
finish_mount_kattr(&kattr);
- path_put(&target);
return err;
}
diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c
index 7046f9bdd8dc..75c76cbb27cc 100644
--- a/fs/netfs/read_helper.c
+++ b/fs/netfs/read_helper.c
@@ -354,16 +354,11 @@ static void netfs_rreq_write_to_cache_work(struct work_struct *work)
netfs_rreq_do_write_to_cache(rreq);
}
-static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq,
- bool was_async)
+static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq)
{
- if (was_async) {
- rreq->work.func = netfs_rreq_write_to_cache_work;
- if (!queue_work(system_unbound_wq, &rreq->work))
- BUG();
- } else {
- netfs_rreq_do_write_to_cache(rreq);
- }
+ rreq->work.func = netfs_rreq_write_to_cache_work;
+ if (!queue_work(system_unbound_wq, &rreq->work))
+ BUG();
}
/*
@@ -558,7 +553,7 @@ again:
wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags))
- return netfs_rreq_write_to_cache(rreq, was_async);
+ return netfs_rreq_write_to_cache(rreq);
netfs_rreq_completed(rreq, was_async);
}
@@ -960,7 +955,7 @@ int netfs_readpage(struct file *file,
rreq = netfs_alloc_read_request(ops, netfs_priv, file);
if (!rreq) {
if (netfs_priv)
- ops->cleanup(netfs_priv, folio_file_mapping(folio));
+ ops->cleanup(folio_file_mapping(folio), netfs_priv);
folio_unlock(folio);
return -ENOMEM;
}
@@ -1191,7 +1186,7 @@ have_folio:
goto error;
have_folio_no_wait:
if (netfs_priv)
- ops->cleanup(netfs_priv, mapping);
+ ops->cleanup(mapping, netfs_priv);
*_folio = folio;
_leave(" = 0");
return 0;
@@ -1202,7 +1197,7 @@ error:
folio_unlock(folio);
folio_put(folio);
if (netfs_priv)
- ops->cleanup(netfs_priv, mapping);
+ ops->cleanup(mapping, netfs_priv);
_leave(" = %d", ret);
return ret;
}
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 4418517f6f12..15dac36ca852 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -438,22 +438,19 @@ nfsd3_proc_link(struct svc_rqst *rqstp)
static void nfsd3_init_dirlist_pages(struct svc_rqst *rqstp,
struct nfsd3_readdirres *resp,
- int count)
+ u32 count)
{
struct xdr_buf *buf = &resp->dirlist;
struct xdr_stream *xdr = &resp->xdr;
- count = min_t(u32, count, svc_max_payload(rqstp));
+ count = clamp(count, (u32)(XDR_UNIT * 2), svc_max_payload(rqstp));
memset(buf, 0, sizeof(*buf));
/* Reserve room for the NULL ptr & eof flag (-2 words) */
buf->buflen = count - XDR_UNIT * 2;
buf->pages = rqstp->rq_next_page;
- while (count > 0) {
- rqstp->rq_next_page++;
- count -= PAGE_SIZE;
- }
+ rqstp->rq_next_page += (buf->buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* This is xdr_init_encode(), but it assumes that
* the head kvec has already been consumed. */
@@ -462,7 +459,7 @@ static void nfsd3_init_dirlist_pages(struct svc_rqst *rqstp,
xdr->page_ptr = buf->pages;
xdr->iov = NULL;
xdr->p = page_address(*buf->pages);
- xdr->end = xdr->p + (PAGE_SIZE >> 2);
+ xdr->end = (void *)xdr->p + min_t(u32, buf->buflen, PAGE_SIZE);
xdr->rqst = NULL;
}
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 6fedc49726bf..c634483d85d2 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -2156,6 +2156,7 @@ static struct notifier_block nfsd4_cld_block = {
int
register_cld_notifier(void)
{
+ WARN_ON(!nfsd_net_id);
return rpc_pipefs_notifier_register(&nfsd4_cld_block);
}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index bfad94c70b84..1956d377d1a6 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1207,6 +1207,11 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
return 0;
}
+static bool delegation_hashed(struct nfs4_delegation *dp)
+{
+ return !(list_empty(&dp->dl_perfile));
+}
+
static bool
unhash_delegation_locked(struct nfs4_delegation *dp)
{
@@ -1214,7 +1219,7 @@ unhash_delegation_locked(struct nfs4_delegation *dp)
lockdep_assert_held(&state_lock);
- if (list_empty(&dp->dl_perfile))
+ if (!delegation_hashed(dp))
return false;
dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
@@ -4598,7 +4603,7 @@ static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
* queued for a lease break. Don't queue it again.
*/
spin_lock(&state_lock);
- if (dp->dl_time == 0) {
+ if (delegation_hashed(dp) && dp->dl_time == 0) {
dp->dl_time = ktime_get_boottime_seconds();
list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
}
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index af8531c3854a..51a49e0cfe37 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1521,12 +1521,9 @@ static int __init init_nfsd(void)
int retval;
printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
- retval = register_cld_notifier();
- if (retval)
- return retval;
retval = nfsd4_init_slabs();
if (retval)
- goto out_unregister_notifier;
+ return retval;
retval = nfsd4_init_pnfs();
if (retval)
goto out_free_slabs;
@@ -1545,9 +1542,14 @@ static int __init init_nfsd(void)
goto out_free_exports;
retval = register_pernet_subsys(&nfsd_net_ops);
if (retval < 0)
+ goto out_free_filesystem;
+ retval = register_cld_notifier();
+ if (retval)
goto out_free_all;
return 0;
out_free_all:
+ unregister_pernet_subsys(&nfsd_net_ops);
+out_free_filesystem:
unregister_filesystem(&nfsd_fs_type);
out_free_exports:
remove_proc_entry("fs/nfs/exports", NULL);
@@ -1561,13 +1563,12 @@ out_free_pnfs:
nfsd4_exit_pnfs();
out_free_slabs:
nfsd4_free_slabs();
-out_unregister_notifier:
- unregister_cld_notifier();
return retval;
}
static void __exit exit_nfsd(void)
{
+ unregister_cld_notifier();
unregister_pernet_subsys(&nfsd_net_ops);
nfsd_drc_slab_free();
remove_proc_entry("fs/nfs/exports", NULL);
@@ -1577,7 +1578,6 @@ static void __exit exit_nfsd(void)
nfsd4_free_slabs();
nfsd4_exit_pnfs();
unregister_filesystem(&nfsd_fs_type);
- unregister_cld_notifier();
}
MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index eea5b59b6a6c..de282f3273c5 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -556,17 +556,17 @@ nfsd_proc_rmdir(struct svc_rqst *rqstp)
static void nfsd_init_dirlist_pages(struct svc_rqst *rqstp,
struct nfsd_readdirres *resp,
- int count)
+ u32 count)
{
struct xdr_buf *buf = &resp->dirlist;
struct xdr_stream *xdr = &resp->xdr;
- count = min_t(u32, count, PAGE_SIZE);
+ count = clamp(count, (u32)(XDR_UNIT * 2), svc_max_payload(rqstp));
memset(buf, 0, sizeof(*buf));
/* Reserve room for the NULL ptr & eof flag (-2 words) */
- buf->buflen = count - sizeof(__be32) * 2;
+ buf->buflen = count - XDR_UNIT * 2;
buf->pages = rqstp->rq_next_page;
rqstp->rq_next_page++;
@@ -577,7 +577,7 @@ static void nfsd_init_dirlist_pages(struct svc_rqst *rqstp,
xdr->page_ptr = buf->pages;
xdr->iov = NULL;
xdr->p = page_address(*buf->pages);
- xdr->end = xdr->p + (PAGE_SIZE >> 2);
+ xdr->end = (void *)xdr->p + min_t(u32, buf->buflen, PAGE_SIZE);
xdr->rqst = NULL;
}
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 040e1cf90528..65ce0e72e7b9 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -35,17 +35,7 @@
void signalfd_cleanup(struct sighand_struct *sighand)
{
- wait_queue_head_t *wqh = &sighand->signalfd_wqh;
- /*
- * The lockless check can race with remove_wait_queue() in progress,
- * but in this case its caller should run under rcu_read_lock() and
- * sighand_cachep is SLAB_TYPESAFE_BY_RCU, we can safely return.
- */
- if (likely(!waitqueue_active(wqh)))
- return;
-
- /* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */
- wake_up_poll(wqh, EPOLLHUP | POLLFREE);
+ wake_up_pollfree(&sighand->signalfd_wqh);
}
struct signalfd_ctx {
diff --git a/fs/smbfs_common/cifs_arc4.c b/fs/smbfs_common/cifs_arc4.c
index 85ba15a60b13..043e4cb839fa 100644
--- a/fs/smbfs_common/cifs_arc4.c
+++ b/fs/smbfs_common/cifs_arc4.c
@@ -72,16 +72,3 @@ void cifs_arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int l
ctx->y = y;
}
EXPORT_SYMBOL_GPL(cifs_arc4_crypt);
-
-static int __init
-init_smbfs_common(void)
-{
- return 0;
-}
-static void __init
-exit_smbfs_common(void)
-{
-}
-
-module_init(init_smbfs_common)
-module_exit(exit_smbfs_common)
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index e21459f9923a..778b57b1f020 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1765,7 +1765,10 @@ static int
xfs_remount_ro(
struct xfs_mount *mp)
{
- int error;
+ struct xfs_icwalk icw = {
+ .icw_flags = XFS_ICWALK_FLAG_SYNC,
+ };
+ int error;
/*
* Cancel background eofb scanning so it cannot race with the final
@@ -1773,8 +1776,13 @@ xfs_remount_ro(
*/
xfs_blockgc_stop(mp);
- /* Get rid of any leftover CoW reservations... */
- error = xfs_blockgc_free_space(mp, NULL);
+ /*
+ * Clear out all remaining COW staging extents and speculative post-EOF
+ * preallocations so that we don't leave inodes requiring inactivation
+ * cleanups during reclaim on a read-only mount. We must process every
+ * cached inode, so this requires a synchronous cache scan.
+ */
+ error = xfs_blockgc_free_space(mp, &icw);
if (error) {
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
return error;
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index 259ee2bda492..b76dfb310ab6 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -1787,5 +1787,6 @@ static void __exit zonefs_exit(void)
MODULE_AUTHOR("Damien Le Moal");
MODULE_DESCRIPTION("Zone file system for zoned block devices");
MODULE_LICENSE("GPL");
+MODULE_ALIAS_FS("zonefs");
module_init(zonefs_init);
module_exit(zonefs_exit);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index e7a163a3146b..755f38e893be 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -732,6 +732,7 @@ int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info);
void bpf_trampoline_put(struct bpf_trampoline *tr);
+int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs);
#define BPF_DISPATCHER_INIT(_name) { \
.mutex = __MUTEX_INITIALIZER(_name.mutex), \
.func = &_name##_func, \
@@ -1352,28 +1353,16 @@ extern struct mutex bpf_stats_enabled_mutex;
* kprobes, tracepoints) to prevent deadlocks on map operations as any of
* these events can happen inside a region which holds a map bucket lock
* and can deadlock on it.
- *
- * Use the preemption safe inc/dec variants on RT because migrate disable
- * is preemptible on RT and preemption in the middle of the RMW operation
- * might lead to inconsistent state. Use the raw variants for non RT
- * kernels as migrate_disable() maps to preempt_disable() so the slightly
- * more expensive save operation can be avoided.
*/
static inline void bpf_disable_instrumentation(void)
{
migrate_disable();
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- this_cpu_inc(bpf_prog_active);
- else
- __this_cpu_inc(bpf_prog_active);
+ this_cpu_inc(bpf_prog_active);
}
static inline void bpf_enable_instrumentation(void)
{
- if (IS_ENABLED(CONFIG_PREEMPT_RT))
- this_cpu_dec(bpf_prog_active);
- else
- __this_cpu_dec(bpf_prog_active);
+ this_cpu_dec(bpf_prog_active);
migrate_enable();
}
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 203eef993d76..0e1b6281fd8f 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -245,7 +245,10 @@ struct kfunc_btf_id_set {
struct module *owner;
};
-struct kfunc_btf_id_list;
+struct kfunc_btf_id_list {
+ struct list_head list;
+ struct mutex mutex;
+};
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
@@ -254,6 +257,9 @@ void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
struct kfunc_btf_id_set *s);
bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
struct module *owner);
+
+extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list;
+extern struct kfunc_btf_id_list prog_test_kfunc_list;
#else
static inline void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
struct kfunc_btf_id_set *s)
@@ -268,13 +274,13 @@ static inline bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist,
{
return false;
}
+
+static struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list __maybe_unused;
+static struct kfunc_btf_id_list prog_test_kfunc_list __maybe_unused;
#endif
#define DEFINE_KFUNC_BTF_ID_SET(set, name) \
struct kfunc_btf_id_set name = { LIST_HEAD_INIT(name.list), (set), \
THIS_MODULE }
-extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list;
-extern struct kfunc_btf_id_list prog_test_kfunc_list;
-
#endif
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 2f909ed084c6..4ff37cb763ae 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -3,7 +3,6 @@
#define _LINUX_CACHEINFO_H
#include <linux/bitops.h>
-#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 3d5af56337bd..429dcebe2b99 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -121,7 +121,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
asm volatile(__stringify_label(c) ":\n\t" \
".pushsection .discard.reachable\n\t" \
".long " __stringify_label(c) "b - .\n\t" \
- ".popsection\n\t"); \
+ ".popsection\n\t" : : "i" (c)); \
})
#define annotate_reachable() __annotate_reachable(__COUNTER__)
@@ -129,7 +129,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
asm volatile(__stringify_label(c) ":\n\t" \
".pushsection .discard.unreachable\n\t" \
".long " __stringify_label(c) "b - .\n\t" \
- ".popsection\n\t"); \
+ ".popsection\n\t" : : "i" (c)); \
})
#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
diff --git a/include/linux/delay.h b/include/linux/delay.h
index 8eacf67eb212..039e7e0c7378 100644
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -20,6 +20,7 @@
*/
#include <linux/math.h>
+#include <linux/sched.h>
extern unsigned long loops_per_jiffy;
@@ -58,7 +59,18 @@ void calibrate_delay(void);
void __attribute__((weak)) calibration_delay_done(void);
void msleep(unsigned int msecs);
unsigned long msleep_interruptible(unsigned int msecs);
-void usleep_range(unsigned long min, unsigned long max);
+void usleep_range_state(unsigned long min, unsigned long max,
+ unsigned int state);
+
+static inline void usleep_range(unsigned long min, unsigned long max)
+{
+ usleep_range_state(min, max, TASK_UNINTERRUPTIBLE);
+}
+
+static inline void usleep_idle_range(unsigned long min, unsigned long max)
+{
+ usleep_range_state(min, max, TASK_IDLE);
+}
static inline void ssleep(unsigned int seconds)
{
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
index a498ebcf4993..15e7c5e15d62 100644
--- a/include/linux/device/driver.h
+++ b/include/linux/device/driver.h
@@ -18,6 +18,7 @@
#include <linux/klist.h>
#include <linux/pm.h>
#include <linux/device/bus.h>
+#include <linux/module.h>
/**
* enum probe_type - device driver probe type to try
diff --git a/include/linux/efi.h b/include/linux/efi.h
index dbd39b20e034..ef8dbc0a1522 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1283,4 +1283,10 @@ static inline struct efi_mokvar_table_entry *efi_mokvar_entry_find(
}
#endif
+#ifdef CONFIG_SYSFB
+extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
+#else
+static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt) { }
+#endif
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 24b7ed2677af..7f1e88e3e2b5 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -6,6 +6,7 @@
#define __LINUX_FILTER_H__
#include <linux/atomic.h>
+#include <linux/bpf.h>
#include <linux/refcount.h>
#include <linux/compat.h>
#include <linux/skbuff.h>
@@ -26,7 +27,6 @@
#include <asm/byteorder.h>
#include <uapi/linux/filter.h>
-#include <uapi/linux/bpf.h>
struct sk_buff;
struct sock;
@@ -640,9 +640,6 @@ static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void
* This uses migrate_disable/enable() explicitly to document that the
* invocation of a BPF program does not require reentrancy protection
* against a BPF program which is invoked from a preempting task.
- *
- * For non RT enabled kernels migrate_disable/enable() maps to
- * preempt_disable/enable(), i.e. it disables also preemption.
*/
static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
const void *ctx)
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index b976c4177299..8fcc38467af6 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -624,7 +624,7 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1);
void free_pages_exact(void *virt, size_t size);
-__meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(1);
+__meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2);
#define __get_free_page(gfp_mask) \
__get_free_pages((gfp_mask), 0)
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 9e067f937dbc..f453be385bd4 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -840,6 +840,11 @@ static inline bool hid_is_using_ll_driver(struct hid_device *hdev,
return hdev->ll_driver == driver;
}
+static inline bool hid_is_usb(struct hid_device *hdev)
+{
+ return hid_is_using_ll_driver(hdev, &usb_hid_driver);
+}
+
#define PM_HINT_FULLON 1<<5
#define PM_HINT_NORMAL 1<<1
diff --git a/include/linux/instrumentation.h b/include/linux/instrumentation.h
index fa2cd8c63dcc..24359b4a9605 100644
--- a/include/linux/instrumentation.h
+++ b/include/linux/instrumentation.h
@@ -11,7 +11,7 @@
asm volatile(__stringify(c) ": nop\n\t" \
".pushsection .discard.instr_begin\n\t" \
".long " __stringify(c) "b - .\n\t" \
- ".popsection\n\t"); \
+ ".popsection\n\t" : : "i" (c)); \
})
#define instrumentation_begin() __instrumentation_begin(__COUNTER__)
@@ -50,7 +50,7 @@
asm volatile(__stringify(c) ": nop\n\t" \
".pushsection .discard.instr_end\n\t" \
".long " __stringify(c) "b - .\n\t" \
- ".popsection\n\t"); \
+ ".popsection\n\t" : : "i" (c)); \
})
#define instrumentation_end() __instrumentation_end(__COUNTER__)
#else
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 20c1f968da7c..a59d25f19385 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -133,6 +133,7 @@ struct inet6_skb_parm {
__u16 dsthao;
#endif
__u16 frag_max_size;
+ __u16 srhoff;
#define IP6SKB_XFRM_TRANSFORMED 1
#define IP6SKB_FORWARDED 2
@@ -142,6 +143,7 @@ struct inet6_skb_parm {
#define IP6SKB_HOPBYHOP 32
#define IP6SKB_L3SLAVE 64
#define IP6SKB_JUMBOGRAM 128
+#define IP6SKB_SEG6 256
};
#if defined(CONFIG_NET_L3_MASTER_DEV)
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 8adcf1fa8096..9dc7cb239d21 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -405,8 +405,8 @@ phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
phys_addr_t end, int nid, bool exact_nid);
phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
-static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
- phys_addr_t align)
+static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
+ phys_addr_t align)
{
return memblock_phys_alloc_range(size, align, 0,
MEMBLOCK_ALLOC_ACCESSIBLE);
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 723985879035..a5cc4cdf9cc8 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -664,6 +664,19 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
/**
+ * mhi_pm_resume_force - Force resume MHI from suspended state
+ * @mhi_cntrl: MHI controller
+ *
+ * Resume the device irrespective of its MHI state. As per the MHI spec, devices
+ * has to be in M3 state during resume. But some devices seem to be in a
+ * different MHI state other than M3 but they continue working fine if allowed.
+ * This API is intented to be used for such devices.
+ *
+ * Return: 0 if the resume succeeds, a negative error code otherwise
+ */
+int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl);
+
+/**
* mhi_download_rddm_image - Download ramdump image from device for
* debugging purpose.
* @mhi_cntrl: MHI controller
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 58e744b78c2c..936dc0b6c226 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -277,6 +277,7 @@ enum vmscan_throttle_state {
VMSCAN_THROTTLE_WRITEBACK,
VMSCAN_THROTTLE_ISOLATED,
VMSCAN_THROTTLE_NOPROGRESS,
+ VMSCAN_THROTTLE_CONGESTED,
NR_VMSCAN_THROTTLE,
};
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index be5cb3360b94..6aadcc0ecb5b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1937,7 +1937,7 @@ enum netdev_ml_priv_type {
* @udp_tunnel_nic: UDP tunnel offload state
* @xdp_state: stores info on attached XDP BPF programs
*
- * @nested_level: Used as as a parameter of spin_lock_nested() of
+ * @nested_level: Used as a parameter of spin_lock_nested() of
* dev->addr_list_lock.
* @unlink_list: As netif_addr_lock() can be called recursively,
* keep a list of interfaces to be deleted.
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 605246452305..d150a9082b31 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -285,7 +285,6 @@ static inline struct inode *folio_inode(struct folio *folio)
static inline bool page_cache_add_speculative(struct page *page, int count)
{
- VM_BUG_ON_PAGE(PageTail(page), page);
return folio_ref_try_add_rcu((struct folio *)page, count);
}
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index b31d3f3312ce..d73a1c08c3e3 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -51,9 +51,9 @@
#define _LINUX_PERCPU_REFCOUNT_H
#include <linux/atomic.h>
-#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
+#include <linux/types.h>
#include <linux/gfp.h>
struct percpu_ref;
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 96e43fbb2dd8..cbf03a5f9cf5 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -538,11 +538,12 @@ struct macsec_ops;
* @mac_managed_pm: Set true if MAC driver takes of suspending/resuming PHY
* @state: State of the PHY for management purposes
* @dev_flags: Device-specific flags used by the PHY driver.
- * Bits [15:0] are free to use by the PHY driver to communicate
- * driver specific behavior.
- * Bits [23:16] are currently reserved for future use.
- * Bits [31:24] are reserved for defining generic
- * PHY driver behavior.
+ *
+ * - Bits [15:0] are free to use by the PHY driver to communicate
+ * driver specific behavior.
+ * - Bits [23:16] are currently reserved for future use.
+ * - Bits [31:24] are reserved for defining generic
+ * PHY driver behavior.
* @irq: IRQ number of the PHY's interrupt (-1 if none)
* @phy_timer: The timer for handling the state machine
* @phylink: Pointer to phylink instance for this PHY
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 222da43b7096..eddd66d426ca 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -129,7 +129,7 @@ static inline bool pm_runtime_suspended(struct device *dev)
* pm_runtime_active - Check whether or not a device is runtime-active.
* @dev: Target device.
*
- * Return %true if runtime PM is enabled for @dev and its runtime PM status is
+ * Return %true if runtime PM is disabled for @dev or its runtime PM status is
* %RPM_ACTIVE, or %false otherwise.
*
* Note that the return value of this function can only be trusted if it is
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index bd7a73db2e66..54cf566616ae 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -499,7 +499,8 @@ struct regulator_irq_data {
* best to shut-down regulator(s) or reboot the SOC if error
* handling is repeatedly failing. If fatal_cnt is given the IRQ
* handling is aborted if it fails for fatal_cnt times and die()
- * callback (if populated) or BUG() is called to try to prevent
+ * callback (if populated) is called. If die() is not populated
+ * poweroff for the system is attempted in order to prevent any
* further damage.
* @reread_ms: The time which is waited before attempting to re-read status
* at the worker if IC reading fails. Immediate re-read is done
@@ -516,11 +517,12 @@ struct regulator_irq_data {
* @data: Driver private data pointer which will be passed as such to
* the renable, map_event and die callbacks in regulator_irq_data.
* @die: Protection callback. If IC status reading or recovery actions
- * fail fatal_cnt times this callback or BUG() is called. This
- * callback should implement a final protection attempt like
- * disabling the regulator. If protection succeeded this may
- * return 0. If anything else is returned the core assumes final
- * protection failed and calls BUG() as a last resort.
+ * fail fatal_cnt times this callback is called or system is
+ * powered off. This callback should implement a final protection
+ * attempt like disabling the regulator. If protection succeeded
+ * die() may return 0. If anything else is returned the core
+ * assumes final protection failed and attempts to perform a
+ * poweroff as a last resort.
* @map_event: Driver callback to map IRQ status into regulator devices with
* events / errors. NOTE: callback MUST initialize both the
* errors and notifs for all rdevs which it signals having
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index c8cb7e697d47..4507d77d6941 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -286,6 +286,7 @@ struct nf_bridge_info {
struct tc_skb_ext {
__u32 chain;
__u16 mru;
+ __u16 zone;
bool post_ct;
};
#endif
@@ -1380,7 +1381,7 @@ skb_flow_dissect_ct(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,
u16 *ctinfo_map, size_t mapsize,
- bool post_ct);
+ bool post_ct, u16 zone);
void
skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index a1f03461369b..cf5999626e28 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -195,7 +195,7 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
* @offset: offset of buffer in user space
* @pages: locked pages from userspace
* @num_pages: number of locked pages
- * @dmabuf: dmabuf used to for exporting to user space
+ * @refcount: reference counter
* @flags: defined by TEE_SHM_* in tee_drv.h
* @id: unique id of a shared memory object on this device, shared
* with user space
@@ -214,7 +214,7 @@ struct tee_shm {
unsigned int offset;
struct page **pages;
size_t num_pages;
- struct dma_buf *dmabuf;
+ refcount_t refcount;
u32 flags;
int id;
u64 sec_world_id;
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 04e87f4b9417..a960de68ac69 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -7,9 +7,27 @@
#include <uapi/linux/udp.h>
#include <uapi/linux/virtio_net.h>
+static inline bool virtio_net_hdr_match_proto(__be16 protocol, __u8 gso_type)
+{
+ switch (gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+ case VIRTIO_NET_HDR_GSO_TCPV4:
+ return protocol == cpu_to_be16(ETH_P_IP);
+ case VIRTIO_NET_HDR_GSO_TCPV6:
+ return protocol == cpu_to_be16(ETH_P_IPV6);
+ case VIRTIO_NET_HDR_GSO_UDP:
+ return protocol == cpu_to_be16(ETH_P_IP) ||
+ protocol == cpu_to_be16(ETH_P_IPV6);
+ default:
+ return false;
+ }
+}
+
static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
const struct virtio_net_hdr *hdr)
{
+ if (skb->protocol)
+ return 0;
+
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
case VIRTIO_NET_HDR_GSO_UDP:
@@ -88,9 +106,12 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
if (!skb->protocol) {
__be16 protocol = dev_parse_header_protocol(skb);
- virtio_net_hdr_set_proto(skb, hdr);
- if (protocol && protocol != skb->protocol)
+ if (!protocol)
+ virtio_net_hdr_set_proto(skb, hdr);
+ else if (!virtio_net_hdr_match_proto(protocol, hdr->gso_type))
return -EINVAL;
+ else
+ skb->protocol = protocol;
}
retry:
if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 2d0df57c9902..851e07da2583 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -217,6 +217,7 @@ void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void
void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
+void __wake_up_pollfree(struct wait_queue_head *wq_head);
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
@@ -245,6 +246,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
#define wake_up_interruptible_sync_poll_locked(x, m) \
__wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
+/**
+ * wake_up_pollfree - signal that a polled waitqueue is going away
+ * @wq_head: the wait queue head
+ *
+ * In the very rare cases where a ->poll() implementation uses a waitqueue whose
+ * lifetime is tied to a task rather than to the 'struct file' being polled,
+ * this function must be called before the waitqueue is freed so that
+ * non-blocking polls (e.g. epoll) are notified that the queue is going away.
+ *
+ * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
+ * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
+ */
+static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
+{
+ /*
+ * For performance reasons, we don't always take the queue lock here.
+ * Therefore, we might race with someone removing the last entry from
+ * the queue, and proceed while they still hold the queue lock.
+ * However, rcu_read_lock() is required to be held in such cases, so we
+ * can safely proceed with an RCU-delayed free.
+ */
+ if (waitqueue_active(wq_head))
+ __wake_up_pollfree(wq_head);
+}
+
#define ___wait_cond_timeout(condition) \
({ \
bool __cond = (condition); \
diff --git a/include/net/bond_alb.h b/include/net/bond_alb.h
index f6af76c87a6c..191c36afa1f4 100644
--- a/include/net/bond_alb.h
+++ b/include/net/bond_alb.h
@@ -126,7 +126,7 @@ struct tlb_slave_info {
struct alb_bond_info {
struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */
u32 unbalanced_load;
- int tx_rebalance_counter;
+ atomic_t tx_rebalance_counter;
int lp_counter;
/* -------- rlb parameters -------- */
int rlb_enabled;
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 7994455ec714..c4898fcbf923 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -136,6 +136,19 @@ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
sk_rx_queue_update(sk, skb);
}
+/* Variant of sk_mark_napi_id() for passive flow setup,
+ * as sk->sk_napi_id and sk->sk_rx_queue_mapping content
+ * needs to be set.
+ */
+static inline void sk_mark_napi_id_set(struct sock *sk,
+ const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
+#endif
+ sk_rx_queue_set(sk, skb);
+}
+
static inline void __sk_mark_napi_id_once(struct sock *sk, unsigned int napi_id)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index cc663c68ddc4..d24b0a34c8f0 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -276,14 +276,14 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
/* jiffies until ct expires, 0 if already expired */
static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
{
- s32 timeout = ct->timeout - nfct_time_stamp;
+ s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
return timeout > 0 ? timeout : 0;
}
static inline bool nf_ct_is_expired(const struct nf_conn *ct)
{
- return (__s32)(ct->timeout - nfct_time_stamp) <= 0;
+ return (__s32)(READ_ONCE(ct->timeout) - nfct_time_stamp) <= 0;
}
/* use after obtaining a reference count */
@@ -302,7 +302,7 @@ static inline bool nf_ct_should_gc(const struct nf_conn *ct)
static inline void nf_ct_offload_timeout(struct nf_conn *ct)
{
if (nf_ct_expires(ct) < NF_CT_DAY / 2)
- ct->timeout = nfct_time_stamp + NF_CT_DAY;
+ WRITE_ONCE(ct->timeout, nfct_time_stamp + NF_CT_DAY);
}
struct kernel_param;
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index bf79f3a890af..9e71691c491b 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -193,4 +193,20 @@ static inline void skb_txtime_consumed(struct sk_buff *skb)
skb->tstamp = ktime_set(0, 0);
}
+struct tc_skb_cb {
+ struct qdisc_skb_cb qdisc_cb;
+
+ u16 mru;
+ bool post_ct;
+ u16 zone; /* Only valid if post_ct = true */
+};
+
+static inline struct tc_skb_cb *tc_skb_cb(const struct sk_buff *skb)
+{
+ struct tc_skb_cb *cb = (struct tc_skb_cb *)skb->cb;
+
+ BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
+ return cb;
+}
+
#endif
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 22179b2fda72..c70e6d2b2fdd 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -447,8 +447,6 @@ struct qdisc_skb_cb {
};
#define QDISC_CB_PRIV_LEN 20
unsigned char data[QDISC_CB_PRIV_LEN];
- u16 mru;
- bool post_ct;
};
typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 189fdb9db162..3ae61ce2eabd 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -105,19 +105,18 @@ extern struct percpu_counter sctp_sockets_allocated;
int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
+typedef int (*sctp_callback_t)(struct sctp_endpoint *, struct sctp_transport *, void *);
void sctp_transport_walk_start(struct rhashtable_iter *iter);
void sctp_transport_walk_stop(struct rhashtable_iter *iter);
struct sctp_transport *sctp_transport_get_next(struct net *net,
struct rhashtable_iter *iter);
struct sctp_transport *sctp_transport_get_idx(struct net *net,
struct rhashtable_iter *iter, int pos);
-int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
- struct net *net,
+int sctp_transport_lookup_process(sctp_callback_t cb, struct net *net,
const union sctp_addr *laddr,
const union sctp_addr *paddr, void *p);
-int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
- int (*cb_done)(struct sctp_transport *, void *),
- struct net *net, int *pos, void *p);
+int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
+ struct net *net, int *pos, void *p);
int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p);
int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
struct sctp_info *info);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 899c29c326ba..8dabd8800006 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1355,6 +1355,7 @@ struct sctp_endpoint {
reconf_enable:1;
__u8 strreset_enable;
+ struct rcu_head rcu;
};
/* Recover the outter endpoint structure. */
@@ -1370,7 +1371,7 @@ static inline struct sctp_endpoint *sctp_ep(struct sctp_ep_common *base)
struct sctp_endpoint *sctp_endpoint_new(struct sock *, gfp_t);
void sctp_endpoint_free(struct sctp_endpoint *);
void sctp_endpoint_put(struct sctp_endpoint *);
-void sctp_endpoint_hold(struct sctp_endpoint *);
+int sctp_endpoint_hold(struct sctp_endpoint *ep);
void sctp_endpoint_add_asoc(struct sctp_endpoint *, struct sctp_association *);
struct sctp_association *sctp_endpoint_lookup_assoc(
const struct sctp_endpoint *ep,
diff --git a/include/net/seg6.h b/include/net/seg6.h
index 9d19c15e8545..af668f17b398 100644
--- a/include/net/seg6.h
+++ b/include/net/seg6.h
@@ -58,9 +58,30 @@ extern int seg6_local_init(void);
extern void seg6_local_exit(void);
extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced);
+extern struct ipv6_sr_hdr *seg6_get_srh(struct sk_buff *skb, int flags);
+extern void seg6_icmp_srh(struct sk_buff *skb, struct inet6_skb_parm *opt);
extern int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh,
int proto);
extern int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh);
extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
u32 tbl_id);
+
+/* If the packet which invoked an ICMP error contains an SRH return
+ * the true destination address from within the SRH, otherwise use the
+ * destination address in the IP header.
+ */
+static inline const struct in6_addr *seg6_get_daddr(struct sk_buff *skb,
+ struct inet6_skb_parm *opt)
+{
+ struct ipv6_sr_hdr *srh;
+
+ if (opt->flags & IP6SKB_SEG6) {
+ srh = (struct ipv6_sr_hdr *)(skb->data + opt->srhoff);
+ return &srh->segments[0];
+ }
+
+ return NULL;
+}
+
+
#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index bea21ff70e74..d47e9658da28 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -431,7 +431,7 @@ struct sock {
#ifdef CONFIG_XFRM
struct xfrm_policy __rcu *sk_policy[2];
#endif
- struct dst_entry *sk_rx_dst;
+ struct dst_entry __rcu *sk_rx_dst;
int sk_rx_dst_ifindex;
u32 sk_rx_dst_cookie;
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index f25a6149d3ba..ca2e9009a651 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -30,12 +30,14 @@
#define _VMSCAN_THROTTLE_WRITEBACK (1 << VMSCAN_THROTTLE_WRITEBACK)
#define _VMSCAN_THROTTLE_ISOLATED (1 << VMSCAN_THROTTLE_ISOLATED)
#define _VMSCAN_THROTTLE_NOPROGRESS (1 << VMSCAN_THROTTLE_NOPROGRESS)
+#define _VMSCAN_THROTTLE_CONGESTED (1 << VMSCAN_THROTTLE_CONGESTED)
#define show_throttle_flags(flags) \
(flags) ? __print_flags(flags, "|", \
{_VMSCAN_THROTTLE_WRITEBACK, "VMSCAN_THROTTLE_WRITEBACK"}, \
{_VMSCAN_THROTTLE_ISOLATED, "VMSCAN_THROTTLE_ISOLATED"}, \
- {_VMSCAN_THROTTLE_NOPROGRESS, "VMSCAN_THROTTLE_NOPROGRESS"} \
+ {_VMSCAN_THROTTLE_NOPROGRESS, "VMSCAN_THROTTLE_NOPROGRESS"}, \
+ {_VMSCAN_THROTTLE_CONGESTED, "VMSCAN_THROTTLE_CONGESTED"} \
) : "VMSCAN_THROTTLE_NONE"
diff --git a/include/uapi/asm-generic/poll.h b/include/uapi/asm-generic/poll.h
index 41b509f410bf..f9c520ce4bf4 100644
--- a/include/uapi/asm-generic/poll.h
+++ b/include/uapi/asm-generic/poll.h
@@ -29,7 +29,7 @@
#define POLLRDHUP 0x2000
#endif
-#define POLLFREE (__force __poll_t)0x4000 /* currently only for epoll */
+#define POLLFREE (__force __poll_t)0x4000
#define POLL_BUSY_LOOP (__force __poll_t)0x8000
diff --git a/include/uapi/linux/byteorder/big_endian.h b/include/uapi/linux/byteorder/big_endian.h
index 2199adc6a6c2..80aa5c41a763 100644
--- a/include/uapi/linux/byteorder/big_endian.h
+++ b/include/uapi/linux/byteorder/big_endian.h
@@ -9,6 +9,7 @@
#define __BIG_ENDIAN_BITFIELD
#endif
+#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/swab.h>
diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
index 601c904fd5cd..cd98982e7523 100644
--- a/include/uapi/linux/byteorder/little_endian.h
+++ b/include/uapi/linux/byteorder/little_endian.h
@@ -9,6 +9,7 @@
#define __LITTLE_ENDIAN_BITFIELD
#endif
+#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/swab.h>
diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h
index c8cc46f80a16..f106a3941cdf 100644
--- a/include/uapi/linux/mptcp.h
+++ b/include/uapi/linux/mptcp.h
@@ -136,19 +136,21 @@ struct mptcp_info {
* MPTCP_EVENT_REMOVED: token, rem_id
* An address has been lost by the peer.
*
- * MPTCP_EVENT_SUB_ESTABLISHED: token, family, saddr4 | saddr6,
- * daddr4 | daddr6, sport, dport, backup,
- * if_idx [, error]
+ * MPTCP_EVENT_SUB_ESTABLISHED: token, family, loc_id, rem_id,
+ * saddr4 | saddr6, daddr4 | daddr6, sport,
+ * dport, backup, if_idx [, error]
* A new subflow has been established. 'error' should not be set.
*
- * MPTCP_EVENT_SUB_CLOSED: token, family, saddr4 | saddr6, daddr4 | daddr6,
- * sport, dport, backup, if_idx [, error]
+ * MPTCP_EVENT_SUB_CLOSED: token, family, loc_id, rem_id, saddr4 | saddr6,
+ * daddr4 | daddr6, sport, dport, backup, if_idx
+ * [, error]
* A subflow has been closed. An error (copy of sk_err) could be set if an
* error has been detected for this subflow.
*
- * MPTCP_EVENT_SUB_PRIORITY: token, family, saddr4 | saddr6, daddr4 | daddr6,
- * sport, dport, backup, if_idx [, error]
- * The priority of a subflow has changed. 'error' should not be set.
+ * MPTCP_EVENT_SUB_PRIORITY: token, family, loc_id, rem_id, saddr4 | saddr6,
+ * daddr4 | daddr6, sport, dport, backup, if_idx
+ * [, error]
+ * The priority of a subflow has changed. 'error' should not be set.
*/
enum mptcp_event_type {
MPTCP_EVENT_UNSPEC = 0,
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index f6e3c8c9c744..4fa4e979e948 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -263,7 +263,7 @@ enum nfc_sdp_attr {
#define NFC_SE_ENABLED 0x1
struct sockaddr_nfc {
- sa_family_t sa_family;
+ __kernel_sa_family_t sa_family;
__u32 dev_idx;
__u32 target_idx;
__u32 nfc_protocol;
@@ -271,14 +271,14 @@ struct sockaddr_nfc {
#define NFC_LLCP_MAX_SERVICE_NAME 63
struct sockaddr_nfc_llcp {
- sa_family_t sa_family;
+ __kernel_sa_family_t sa_family;
__u32 dev_idx;
__u32 target_idx;
__u32 nfc_protocol;
__u8 dsap; /* Destination SAP, if known */
__u8 ssap; /* Source SAP to be bound to */
char service_name[NFC_LLCP_MAX_SERVICE_NAME]; /* Service name URI */;
- size_t service_name_len;
+ __kernel_size_t service_name_len;
};
/* NFC socket protocols */
diff --git a/include/uapi/linux/resource.h b/include/uapi/linux/resource.h
index 74ef57b38f9f..ac5d6a3031db 100644
--- a/include/uapi/linux/resource.h
+++ b/include/uapi/linux/resource.h
@@ -66,10 +66,17 @@ struct rlimit64 {
#define _STK_LIM (8*1024*1024)
/*
- * GPG2 wants 64kB of mlocked memory, to make sure pass phrases
- * and other sensitive information are never written to disk.
+ * Limit the amount of locked memory by some sane default:
+ * root can always increase this limit if needed.
+ *
+ * The main use-cases are (1) preventing sensitive memory
+ * from being swapped; (2) real-time operations; (3) via
+ * IOURING_REGISTER_BUFFERS.
+ *
+ * The first two don't need much. The latter will take as
+ * much as it can get. 8MB is a reasonably sane default.
*/
-#define MLOCK_LIMIT ((PAGE_SIZE > 64*1024) ? PAGE_SIZE : 64*1024)
+#define MLOCK_LIMIT (8*1024*1024)
/*
* Due to binary compatibility, the actual resource numbers
diff --git a/include/xen/events.h b/include/xen/events.h
index c204262d9fc2..344081e71584 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -17,6 +17,7 @@ struct xenbus_device;
unsigned xen_evtchn_nr_channels(void);
int bind_evtchn_to_irq(evtchn_port_t evtchn);
+int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn);
int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
irq_handler_t handler,
unsigned long irqflags, const char *devname,
diff --git a/kernel/audit.c b/kernel/audit.c
index 121d37e700a6..4cebadb5f30d 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -718,7 +718,7 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
{
int rc = 0;
struct sk_buff *skb;
- static unsigned int failed = 0;
+ unsigned int failed = 0;
/* NOTE: kauditd_thread takes care of all our locking, we just use
* the netlink info passed to us (e.g. sk and portid) */
@@ -735,32 +735,30 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
continue;
}
+retry:
/* grab an extra skb reference in case of error */
skb_get(skb);
rc = netlink_unicast(sk, skb, portid, 0);
if (rc < 0) {
- /* fatal failure for our queue flush attempt? */
+ /* send failed - try a few times unless fatal error */
if (++failed >= retry_limit ||
rc == -ECONNREFUSED || rc == -EPERM) {
- /* yes - error processing for the queue */
sk = NULL;
if (err_hook)
(*err_hook)(skb);
- if (!skb_hook)
- goto out;
- /* keep processing with the skb_hook */
+ if (rc == -EAGAIN)
+ rc = 0;
+ /* continue to drain the queue */
continue;
} else
- /* no - requeue to preserve ordering */
- skb_queue_head(queue, skb);
+ goto retry;
} else {
- /* it worked - drop the extra reference and continue */
+ /* skb sent - drop the extra reference and continue */
consume_skb(skb);
failed = 0;
}
}
-out:
return (rc >= 0 ? 0 : rc);
}
@@ -1609,7 +1607,8 @@ static int __net_init audit_net_init(struct net *net)
audit_panic("cannot initialize netlink socket in namespace");
return -ENOMEM;
}
- aunet->sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
+ /* limit the timeout in case auditd is blocked/stopped */
+ aunet->sk->sk_sndtimeo = HZ / 10;
return 0;
}
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index dbc3ad07e21b..9bdb03767db5 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -6346,11 +6346,6 @@ BTF_ID_LIST_GLOBAL_SINGLE(btf_task_struct_ids, struct, task_struct)
/* BTF ID set registration API for modules */
-struct kfunc_btf_id_list {
- struct list_head list;
- struct mutex mutex;
-};
-
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
@@ -6376,8 +6371,6 @@ bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
{
struct kfunc_btf_id_set *s;
- if (!owner)
- return false;
mutex_lock(&klist->mutex);
list_for_each_entry(s, &klist->list, list) {
if (s->owner == owner && btf_id_set_contains(s->set, kfunc_id)) {
@@ -6389,8 +6382,6 @@ bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
return false;
}
-#endif
-
#define DEFINE_KFUNC_BTF_ID_LIST(name) \
struct kfunc_btf_id_list name = { LIST_HEAD_INIT(name.list), \
__MUTEX_INITIALIZER(name.mutex) }; \
@@ -6398,3 +6389,5 @@ bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
DEFINE_KFUNC_BTF_ID_LIST(bpf_tcp_ca_kfunc_list);
DEFINE_KFUNC_BTF_ID_LIST(prog_test_kfunc_list);
+
+#endif
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 50efda51515b..b532f1058d35 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1366,22 +1366,28 @@ static void __reg_bound_offset(struct bpf_reg_state *reg)
reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
}
+static bool __reg32_bound_s64(s32 a)
+{
+ return a >= 0 && a <= S32_MAX;
+}
+
static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
{
reg->umin_value = reg->u32_min_value;
reg->umax_value = reg->u32_max_value;
- /* Attempt to pull 32-bit signed bounds into 64-bit bounds
- * but must be positive otherwise set to worse case bounds
- * and refine later from tnum.
+
+ /* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
+ * be positive otherwise set to worse case bounds and refine later
+ * from tnum.
*/
- if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0)
- reg->smax_value = reg->s32_max_value;
- else
- reg->smax_value = U32_MAX;
- if (reg->s32_min_value >= 0)
+ if (__reg32_bound_s64(reg->s32_min_value) &&
+ __reg32_bound_s64(reg->s32_max_value)) {
reg->smin_value = reg->s32_min_value;
- else
+ reg->smax_value = reg->s32_max_value;
+ } else {
reg->smin_value = 0;
+ reg->smax_value = U32_MAX;
+ }
}
static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
@@ -2379,8 +2385,6 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
*/
if (insn->src_reg != BPF_REG_FP)
return 0;
- if (BPF_SIZE(insn->code) != BPF_DW)
- return 0;
/* dreg = *(u64 *)[fp - off] was a fill from the stack.
* that [fp - off] slot contains scalar that needs to be
@@ -2403,8 +2407,6 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
/* scalars can only be spilled into stack */
if (insn->dst_reg != BPF_REG_FP)
return 0;
- if (BPF_SIZE(insn->code) != BPF_DW)
- return 0;
spi = (-insn->off - 1) / BPF_REG_SIZE;
if (spi >= 64) {
verbose(env, "BUG spi %d\n", spi);
@@ -4551,9 +4553,16 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
if (insn->imm == BPF_CMPXCHG) {
/* Check comparison of R0 with memory location */
- err = check_reg_arg(env, BPF_REG_0, SRC_OP);
+ const u32 aux_reg = BPF_REG_0;
+
+ err = check_reg_arg(env, aux_reg, SRC_OP);
if (err)
return err;
+
+ if (is_pointer_value(env, aux_reg)) {
+ verbose(env, "R%d leaks addr into mem\n", aux_reg);
+ return -EACCES;
+ }
}
if (is_pointer_value(env, insn->src_reg)) {
@@ -4588,13 +4597,19 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
load_reg = -1;
}
- /* check whether we can read the memory */
+ /* Check whether we can read the memory, with second call for fetch
+ * case to simulate the register fill.
+ */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
- BPF_SIZE(insn->code), BPF_READ, load_reg, true);
+ BPF_SIZE(insn->code), BPF_READ, -1, true);
+ if (!err && load_reg >= 0)
+ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
+ BPF_SIZE(insn->code), BPF_READ, load_reg,
+ true);
if (err)
return err;
- /* check whether we can write into the same memory */
+ /* Check whether we can write into the same memory. */
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE, -1, true);
if (err)
@@ -8308,6 +8323,10 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
insn->dst_reg);
}
zext_32_to_64(dst_reg);
+
+ __update_reg_bounds(dst_reg);
+ __reg_deduce_bounds(dst_reg);
+ __reg_bound_offset(dst_reg);
}
} else {
/* case: R = imm
@@ -8422,7 +8441,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
new_range = dst_reg->off;
if (range_right_open)
- new_range--;
+ new_range++;
/* Examples for register markings:
*
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index eb53f5ec62c9..256cf6db573c 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -6,6 +6,7 @@
#include <linux/buildid.h>
#include <linux/crash_core.h>
+#include <linux/init.h>
#include <linux/utsname.h>
#include <linux/vmalloc.h>
@@ -295,6 +296,16 @@ int __init parse_crashkernel_low(char *cmdline,
"crashkernel=", suffix_tbl[SUFFIX_LOW]);
}
+/*
+ * Add a dummy early_param handler to mark crashkernel= as a known command line
+ * parameter and suppress incorrect warnings in init/main.c.
+ */
+static int __init parse_crashkernel_dummy(char *arg)
+{
+ return 0;
+}
+early_param("crashkernel", parse_crashkernel_dummy);
+
Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
void *data, size_t data_len)
{
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 0c6a48dfcecb..1f25a4d7de27 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1380,7 +1380,7 @@ static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
* - the VCPU on which owner runs is preempted
*/
if (!owner->on_cpu || need_resched() ||
- rt_mutex_waiter_is_top_waiter(lock, waiter) ||
+ !rt_mutex_waiter_is_top_waiter(lock, waiter) ||
vcpu_is_preempted(task_cpu(owner))) {
res = false;
break;
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 76577d1642a5..eca38107b32f 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -238,6 +238,13 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
}
EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
+void __wake_up_pollfree(struct wait_queue_head *wq_head)
+{
+ __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
+ /* POLLFREE must have cleared the queue. */
+ WARN_ON_ONCE(waitqueue_active(wq_head));
+}
+
/*
* Note: we use "set_current_state()" _after_ the wait-queue add,
* because we need a memory barrier there on SMP, so that any
diff --git a/kernel/signal.c b/kernel/signal.c
index a629b11bf3e0..dfcee3888b00 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -4185,6 +4185,15 @@ do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
ss_mode != 0))
return -EINVAL;
+ /*
+ * Return before taking any locks if no actual
+ * sigaltstack changes were requested.
+ */
+ if (t->sas_ss_sp == (unsigned long)ss_sp &&
+ t->sas_ss_size == ss_size &&
+ t->sas_ss_flags == ss_flags)
+ return 0;
+
sigaltstack_lock();
if (ss_mode == SS_DISABLE) {
ss_size = 0;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index b348749a9fc6..dcdcb85121e4 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1306,8 +1306,7 @@ int do_settimeofday64(const struct timespec64 *ts)
timekeeping_forward_now(tk);
xt = tk_xtime(tk);
- ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
- ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
+ ts_delta = timespec64_sub(*ts, xt);
if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
ret = -EINVAL;
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index e3d2c23c413d..85f1021ad459 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -2054,26 +2054,28 @@ unsigned long msleep_interruptible(unsigned int msecs)
EXPORT_SYMBOL(msleep_interruptible);
/**
- * usleep_range - Sleep for an approximate time
- * @min: Minimum time in usecs to sleep
- * @max: Maximum time in usecs to sleep
+ * usleep_range_state - Sleep for an approximate time in a given state
+ * @min: Minimum time in usecs to sleep
+ * @max: Maximum time in usecs to sleep
+ * @state: State of the current task that will be while sleeping
*
* In non-atomic context where the exact wakeup time is flexible, use
- * usleep_range() instead of udelay(). The sleep improves responsiveness
+ * usleep_range_state() instead of udelay(). The sleep improves responsiveness
* by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
* power usage by allowing hrtimers to take advantage of an already-
* scheduled interrupt instead of scheduling a new one just for this sleep.
*/
-void __sched usleep_range(unsigned long min, unsigned long max)
+void __sched usleep_range_state(unsigned long min, unsigned long max,
+ unsigned int state)
{
ktime_t exp = ktime_add_us(ktime_get(), min);
u64 delta = (u64)(max - min) * NSEC_PER_USEC;
for (;;) {
- __set_current_state(TASK_UNINTERRUPTIBLE);
+ __set_current_state(state);
/* Do not return before the requested sleep time has elapsed */
if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
break;
}
}
-EXPORT_SYMBOL(usleep_range);
+EXPORT_SYMBOL(usleep_range_state);
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 4f5613dac227..7b32c356ebc5 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -264,15 +264,16 @@ void dec_ucount(struct ucounts *ucounts, enum ucount_type type)
long inc_rlimit_ucounts(struct ucounts *ucounts, enum ucount_type type, long v)
{
struct ucounts *iter;
+ long max = LONG_MAX;
long ret = 0;
for (iter = ucounts; iter; iter = iter->ns->ucounts) {
- long max = READ_ONCE(iter->ns->ucount_max[type]);
long new = atomic_long_add_return(v, &iter->ucount[type]);
if (new < 0 || new > max)
ret = LONG_MAX;
else if (iter == ucounts)
ret = new;
+ max = READ_ONCE(iter->ns->ucount_max[type]);
}
return ret;
}
@@ -312,15 +313,16 @@ long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum ucount_type type)
{
/* Caller must hold a reference to ucounts */
struct ucounts *iter;
+ long max = LONG_MAX;
long dec, ret = 0;
for (iter = ucounts; iter; iter = iter->ns->ucounts) {
- long max = READ_ONCE(iter->ns->ucount_max[type]);
long new = atomic_long_add_return(1, &iter->ucount[type]);
if (new < 0 || new > max)
goto unwind;
if (iter == ucounts)
ret = new;
+ max = READ_ONCE(iter->ns->ucount_max[type]);
/*
* Grab an extra ucount reference for the caller when
* the rlimit count was previously 0.
@@ -339,15 +341,16 @@ unwind:
return 0;
}
-bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long max)
+bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsigned long rlimit)
{
struct ucounts *iter;
- if (get_ucounts_value(ucounts, type) > max)
- return true;
+ long max = rlimit;
+ if (rlimit > LONG_MAX)
+ max = LONG_MAX;
for (iter = ucounts; iter; iter = iter->ns->ucounts) {
- max = READ_ONCE(iter->ns->ucount_max[type]);
if (get_ucounts_value(iter, type) > max)
return true;
+ max = READ_ONCE(iter->ns->ucount_max[type]);
}
return false;
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 5c12bde10996..5e14e32056ad 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -316,6 +316,7 @@ config DEBUG_INFO_BTF
bool "Generate BTF typeinfo"
depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
+ depends on BPF_SYSCALL
help
Generate deduplicated BTF type information from DWARF debug info.
Turning this on expects presence of pahole tool, which will convert
diff --git a/mm/Kconfig b/mm/Kconfig
index 28edafc820ad..356f4f2c779e 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -428,7 +428,7 @@ config THP_SWAP
# UP and nommu archs use km based percpu allocator
#
config NEED_PER_CPU_KM
- depends on !SMP
+ depends on !SMP || !MMU
bool
default y
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 1eead4761011..eae96dfe0261 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -945,6 +945,13 @@ void bdi_unregister(struct backing_dev_info *bdi)
wb_shutdown(&bdi->wb);
cgwb_bdi_unregister(bdi);
+ /*
+ * If this BDI's min ratio has been set, use bdi_set_min_ratio() to
+ * update the global bdi_min_ratio.
+ */
+ if (bdi->min_ratio)
+ bdi_set_min_ratio(bdi, 0);
+
if (bdi->dev) {
bdi_debug_unregister(bdi);
device_unregister(bdi->dev);
diff --git a/mm/damon/core.c b/mm/damon/core.c
index c381b3c525d0..e92497895202 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -282,7 +282,6 @@ int damon_set_targets(struct damon_ctx *ctx,
for (i = 0; i < nr_ids; i++) {
t = damon_new_target(ids[i]);
if (!t) {
- pr_err("Failed to alloc damon_target\n");
/* The caller should do cleanup of the ids itself */
damon_for_each_target_safe(t, next, ctx)
damon_destroy_target(t);
@@ -312,16 +311,10 @@ int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
unsigned long aggr_int, unsigned long primitive_upd_int,
unsigned long min_nr_reg, unsigned long max_nr_reg)
{
- if (min_nr_reg < 3) {
- pr_err("min_nr_regions (%lu) must be at least 3\n",
- min_nr_reg);
+ if (min_nr_reg < 3)
return -EINVAL;
- }
- if (min_nr_reg > max_nr_reg) {
- pr_err("invalid nr_regions. min (%lu) > max (%lu)\n",
- min_nr_reg, max_nr_reg);
+ if (min_nr_reg > max_nr_reg)
return -EINVAL;
- }
ctx->sample_interval = sample_int;
ctx->aggr_interval = aggr_int;
@@ -980,10 +973,11 @@ static unsigned long damos_wmark_wait_us(struct damos *scheme)
static void kdamond_usleep(unsigned long usecs)
{
- if (usecs > 100 * 1000)
- schedule_timeout_interruptible(usecs_to_jiffies(usecs));
+ /* See Documentation/timers/timers-howto.rst for the thresholds */
+ if (usecs > 20 * USEC_PER_MSEC)
+ schedule_timeout_idle(usecs_to_jiffies(usecs));
else
- usleep_range(usecs, usecs + 1);
+ usleep_idle_range(usecs, usecs + 1);
}
/* Returns negative error code if it's not activated but should return */
@@ -1038,7 +1032,7 @@ static int kdamond_fn(void *data)
ctx->callback.after_sampling(ctx))
done = true;
- usleep_range(ctx->sample_interval, ctx->sample_interval + 1);
+ kdamond_usleep(ctx->sample_interval);
if (ctx->primitive.check_accesses)
max_nr_accesses = ctx->primitive.check_accesses(ctx);
diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c
index 9b520bb4a3e7..ad65436756af 100644
--- a/mm/damon/dbgfs.c
+++ b/mm/damon/dbgfs.c
@@ -210,10 +210,8 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
&wmarks.low, &parsed);
if (ret != 18)
break;
- if (!damos_action_valid(action)) {
- pr_err("wrong action %d\n", action);
+ if (!damos_action_valid(action))
goto fail;
- }
pos += parsed;
scheme = damon_new_scheme(min_sz, max_sz, min_nr_a, max_nr_a,
@@ -355,6 +353,7 @@ static ssize_t dbgfs_target_ids_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
struct damon_ctx *ctx = file->private_data;
+ struct damon_target *t, *next_t;
bool id_is_pid = true;
char *kbuf, *nrs;
unsigned long *targets;
@@ -399,8 +398,12 @@ static ssize_t dbgfs_target_ids_write(struct file *file,
goto unlock_out;
}
- /* remove targets with previously-set primitive */
- damon_set_targets(ctx, NULL, 0);
+ /* remove previously set targets */
+ damon_for_each_target_safe(t, next_t, ctx) {
+ if (targetid_is_pid(ctx))
+ put_pid((struct pid *)t->id);
+ damon_destroy_target(t);
+ }
/* Configure the context for the address space type */
if (id_is_pid)
@@ -652,10 +655,12 @@ static void dbgfs_before_terminate(struct damon_ctx *ctx)
if (!targetid_is_pid(ctx))
return;
+ mutex_lock(&ctx->kdamond_lock);
damon_for_each_target_safe(t, next, ctx) {
put_pid((struct pid *)t->id);
damon_destroy_target(t);
}
+ mutex_unlock(&ctx->kdamond_lock);
}
static struct damon_ctx *dbgfs_new_ctx(void)
diff --git a/mm/damon/vaddr-test.h b/mm/damon/vaddr-test.h
index ecfd0b2ed222..6a1b9272ea12 100644
--- a/mm/damon/vaddr-test.h
+++ b/mm/damon/vaddr-test.h
@@ -135,7 +135,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
struct damon_addr_range *three_regions,
unsigned long *expected, int nr_expected)
{
- struct damon_ctx *ctx = damon_new_ctx();
struct damon_target *t;
struct damon_region *r;
int i;
@@ -145,7 +144,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
r = damon_new_region(regions[i * 2], regions[i * 2 + 1]);
damon_add_region(r, t);
}
- damon_add_target(ctx, t);
damon_va_apply_three_regions(t, three_regions);
@@ -154,8 +152,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]);
KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]);
}
-
- damon_destroy_ctx(ctx);
}
/*
@@ -252,60 +248,59 @@ static void damon_test_apply_three_regions4(struct kunit *test)
new_three_regions, expected, ARRAY_SIZE(expected));
}
-static void damon_test_split_evenly(struct kunit *test)
+static void damon_test_split_evenly_fail(struct kunit *test,
+ unsigned long start, unsigned long end, unsigned int nr_pieces)
{
- struct damon_ctx *c = damon_new_ctx();
- struct damon_target *t;
- struct damon_region *r;
- unsigned long i;
-
- KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
- -EINVAL);
-
- t = damon_new_target(42);
- r = damon_new_region(0, 100);
- KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 0), -EINVAL);
+ struct damon_target *t = damon_new_target(42);
+ struct damon_region *r = damon_new_region(start, end);
damon_add_region(r, t);
- KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 10), 0);
- KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 10u);
+ KUNIT_EXPECT_EQ(test,
+ damon_va_evenly_split_region(t, r, nr_pieces), -EINVAL);
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);
- i = 0;
damon_for_each_region(r, t) {
- KUNIT_EXPECT_EQ(test, r->ar.start, i++ * 10);
- KUNIT_EXPECT_EQ(test, r->ar.end, i * 10);
+ KUNIT_EXPECT_EQ(test, r->ar.start, start);
+ KUNIT_EXPECT_EQ(test, r->ar.end, end);
}
+
damon_free_target(t);
+}
+
+static void damon_test_split_evenly_succ(struct kunit *test,
+ unsigned long start, unsigned long end, unsigned int nr_pieces)
+{
+ struct damon_target *t = damon_new_target(42);
+ struct damon_region *r = damon_new_region(start, end);
+ unsigned long expected_width = (end - start) / nr_pieces;
+ unsigned long i = 0;
- t = damon_new_target(42);
- r = damon_new_region(5, 59);
damon_add_region(r, t);
- KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 5), 0);
- KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u);
+ KUNIT_EXPECT_EQ(test,
+ damon_va_evenly_split_region(t, r, nr_pieces), 0);
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), nr_pieces);
- i = 0;
damon_for_each_region(r, t) {
- if (i == 4)
+ if (i == nr_pieces - 1)
break;
- KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i++);
- KUNIT_EXPECT_EQ(test, r->ar.end, 5 + 10 * i);
+ KUNIT_EXPECT_EQ(test,
+ r->ar.start, start + i++ * expected_width);
+ KUNIT_EXPECT_EQ(test, r->ar.end, start + i * expected_width);
}
- KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i);
- KUNIT_EXPECT_EQ(test, r->ar.end, 59ul);
+ KUNIT_EXPECT_EQ(test, r->ar.start, start + i * expected_width);
+ KUNIT_EXPECT_EQ(test, r->ar.end, end);
damon_free_target(t);
+}
- t = damon_new_target(42);
- r = damon_new_region(5, 6);
- damon_add_region(r, t);
- KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 2), -EINVAL);
- KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);
+static void damon_test_split_evenly(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
+ -EINVAL);
- damon_for_each_region(r, t) {
- KUNIT_EXPECT_EQ(test, r->ar.start, 5ul);
- KUNIT_EXPECT_EQ(test, r->ar.end, 6ul);
- }
- damon_free_target(t);
- damon_destroy_ctx(c);
+ damon_test_split_evenly_fail(test, 0, 100, 0);
+ damon_test_split_evenly_succ(test, 0, 100, 10);
+ damon_test_split_evenly_succ(test, 5, 59, 5);
+ damon_test_split_evenly_fail(test, 5, 6, 2);
}
static struct kunit_case damon_test_cases[] = {
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 35fe49080ee9..20a9a9d69eb1 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -13,6 +13,7 @@
#include <linux/mmu_notifier.h>
#include <linux/page_idle.h>
#include <linux/pagewalk.h>
+#include <linux/sched/mm.h>
#include "prmtv-common.h"
@@ -626,7 +627,6 @@ int damon_va_apply_scheme(struct damon_ctx *ctx, struct damon_target *t,
case DAMOS_STAT:
return 0;
default:
- pr_warn("Wrong action %d\n", scheme->action);
return -EINVAL;
}
diff --git a/mm/filemap.c b/mm/filemap.c
index daa0e23a6ee6..39c4c46c6133 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3253,8 +3253,6 @@ static struct page *next_uptodate_page(struct page *page,
goto skip;
if (!PageUptodate(page) || PageReadahead(page))
goto skip;
- if (PageHWPoison(page))
- goto skip;
if (!trylock_page(page))
goto skip;
if (page->mapping != mapping)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index abcd1785c629..a1baa198519a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2973,7 +2973,7 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid)
struct huge_bootmem_page *m = NULL; /* initialize for clang */
int nr_nodes, node;
- if (nid >= nr_online_nodes)
+ if (nid != NUMA_NO_NODE && nid >= nr_online_nodes)
return 0;
/* do node specific alloc */
if (nid != NUMA_NO_NODE) {
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 09945784df9e..a19154a8d196 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -683,6 +683,7 @@ static const struct file_operations objects_fops = {
.open = open_objects,
.read = seq_read,
.llseek = seq_lseek,
+ .release = seq_release,
};
static int __init kfence_debugfs_init(void)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6863a834ed42..2ed5f2a0879d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -776,24 +776,6 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
rcu_read_unlock();
}
-/*
- * mod_objcg_mlstate() may be called with irq enabled, so
- * mod_memcg_lruvec_state() should be used.
- */
-static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
- struct pglist_data *pgdat,
- enum node_stat_item idx, int nr)
-{
- struct mem_cgroup *memcg;
- struct lruvec *lruvec;
-
- rcu_read_lock();
- memcg = obj_cgroup_memcg(objcg);
- lruvec = mem_cgroup_lruvec(memcg, pgdat);
- mod_memcg_lruvec_state(lruvec, idx, nr);
- rcu_read_unlock();
-}
-
/**
* __count_memcg_events - account VM events in a cgroup
* @memcg: the memory cgroup
@@ -2137,41 +2119,6 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
}
#endif
-/*
- * Most kmem_cache_alloc() calls are from user context. The irq disable/enable
- * sequence used in this case to access content from object stock is slow.
- * To optimize for user context access, there are now two object stocks for
- * task context and interrupt context access respectively.
- *
- * The task context object stock can be accessed by disabling preemption only
- * which is cheap in non-preempt kernel. The interrupt context object stock
- * can only be accessed after disabling interrupt. User context code can
- * access interrupt object stock, but not vice versa.
- */
-static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
-{
- struct memcg_stock_pcp *stock;
-
- if (likely(in_task())) {
- *pflags = 0UL;
- preempt_disable();
- stock = this_cpu_ptr(&memcg_stock);
- return &stock->task_obj;
- }
-
- local_irq_save(*pflags);
- stock = this_cpu_ptr(&memcg_stock);
- return &stock->irq_obj;
-}
-
-static inline void put_obj_stock(unsigned long flags)
-{
- if (likely(in_task()))
- preempt_enable();
- else
- local_irq_restore(flags);
-}
-
/**
* consume_stock: Try to consume stocked charge on this cpu.
* @memcg: memcg to consume from.
@@ -2816,6 +2763,59 @@ retry:
*/
#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
+/*
+ * Most kmem_cache_alloc() calls are from user context. The irq disable/enable
+ * sequence used in this case to access content from object stock is slow.
+ * To optimize for user context access, there are now two object stocks for
+ * task context and interrupt context access respectively.
+ *
+ * The task context object stock can be accessed by disabling preemption only
+ * which is cheap in non-preempt kernel. The interrupt context object stock
+ * can only be accessed after disabling interrupt. User context code can
+ * access interrupt object stock, but not vice versa.
+ */
+static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
+{
+ struct memcg_stock_pcp *stock;
+
+ if (likely(in_task())) {
+ *pflags = 0UL;
+ preempt_disable();
+ stock = this_cpu_ptr(&memcg_stock);
+ return &stock->task_obj;
+ }
+
+ local_irq_save(*pflags);
+ stock = this_cpu_ptr(&memcg_stock);
+ return &stock->irq_obj;
+}
+
+static inline void put_obj_stock(unsigned long flags)
+{
+ if (likely(in_task()))
+ preempt_enable();
+ else
+ local_irq_restore(flags);
+}
+
+/*
+ * mod_objcg_mlstate() may be called with irq enabled, so
+ * mod_memcg_lruvec_state() should be used.
+ */
+static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
+ struct pglist_data *pgdat,
+ enum node_stat_item idx, int nr)
+{
+ struct mem_cgroup *memcg;
+ struct lruvec *lruvec;
+
+ rcu_read_lock();
+ memcg = obj_cgroup_memcg(objcg);
+ lruvec = mem_cgroup_lruvec(memcg, pgdat);
+ mod_memcg_lruvec_state(lruvec, idx, nr);
+ rcu_read_unlock();
+}
+
int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
gfp_t gfp, bool new_page)
{
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 07c875fdeaf0..3a274468f193 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1470,17 +1470,12 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags)
if (!(flags & MF_COUNT_INCREASED)) {
res = get_hwpoison_page(p, flags);
if (!res) {
- /*
- * Check "filter hit" and "race with other subpage."
- */
lock_page(head);
- if (PageHWPoison(head)) {
- if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
- || (p != head && TestSetPageHWPoison(head))) {
+ if (hwpoison_filter(p)) {
+ if (TestClearPageHWPoison(head))
num_poisoned_pages_dec();
- unlock_page(head);
- return 0;
- }
+ unlock_page(head);
+ return 0;
}
unlock_page(head);
res = MF_FAILED;
@@ -2239,6 +2234,7 @@ retry:
} else if (ret == 0) {
if (soft_offline_free_page(page) && try_again) {
try_again = false;
+ flags &= ~MF_COUNT_INCREASED;
goto retry;
}
}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 852041f6be41..2a9627dc784c 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -35,6 +35,7 @@
#include <linux/memblock.h>
#include <linux/compaction.h>
#include <linux/rmap.h>
+#include <linux/module.h>
#include <asm/tlbflush.h>
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 10e9c87260ed..f6248affaf38 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2140,8 +2140,7 @@ struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
* memory with both reclaim and compact as well.
*/
if (!page && (gfp & __GFP_DIRECT_RECLAIM))
- page = __alloc_pages_node(hpage_node,
- gfp, order);
+ page = __alloc_pages(gfp, order, hpage_node, nmask);
goto out;
}
diff --git a/mm/slub.c b/mm/slub.c
index a8626825a829..abe7db581d68 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5081,6 +5081,7 @@ struct loc_track {
unsigned long max;
unsigned long count;
struct location *loc;
+ loff_t idx;
};
static struct dentry *slab_debugfs_root;
@@ -6052,11 +6053,11 @@ __initcall(slab_sysfs_init);
#if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
static int slab_debugfs_show(struct seq_file *seq, void *v)
{
-
- struct location *l;
- unsigned int idx = *(unsigned int *)v;
struct loc_track *t = seq->private;
+ struct location *l;
+ unsigned long idx;
+ idx = (unsigned long) t->idx;
if (idx < t->count) {
l = &t->loc[idx];
@@ -6105,16 +6106,18 @@ static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
{
struct loc_track *t = seq->private;
- v = ppos;
- ++*ppos;
+ t->idx = ++(*ppos);
if (*ppos <= t->count)
- return v;
+ return ppos;
return NULL;
}
static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
{
+ struct loc_track *t = seq->private;
+
+ t->idx = *ppos;
return ppos;
}
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index 16f706c55d92..2b5531840583 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -30,6 +30,7 @@
#include <linux/swap_slots.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
+#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mutex.h>
#include <linux/mm.h>
diff --git a/mm/vmscan.c b/mm/vmscan.c
index fb9584641ac7..700434db5735 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1021,6 +1021,39 @@ static void handle_write_error(struct address_space *mapping,
unlock_page(page);
}
+static bool skip_throttle_noprogress(pg_data_t *pgdat)
+{
+ int reclaimable = 0, write_pending = 0;
+ int i;
+
+ /*
+ * If kswapd is disabled, reschedule if necessary but do not
+ * throttle as the system is likely near OOM.
+ */
+ if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
+ return true;
+
+ /*
+ * If there are a lot of dirty/writeback pages then do not
+ * throttle as throttling will occur when the pages cycle
+ * towards the end of the LRU if still under writeback.
+ */
+ for (i = 0; i < MAX_NR_ZONES; i++) {
+ struct zone *zone = pgdat->node_zones + i;
+
+ if (!populated_zone(zone))
+ continue;
+
+ reclaimable += zone_reclaimable_pages(zone);
+ write_pending += zone_page_state_snapshot(zone,
+ NR_ZONE_WRITE_PENDING);
+ }
+ if (2 * write_pending <= reclaimable)
+ return true;
+
+ return false;
+}
+
void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason)
{
wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason];
@@ -1056,8 +1089,16 @@ void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason)
}
break;
+ case VMSCAN_THROTTLE_CONGESTED:
+ fallthrough;
case VMSCAN_THROTTLE_NOPROGRESS:
- timeout = HZ/2;
+ if (skip_throttle_noprogress(pgdat)) {
+ cond_resched();
+ return;
+ }
+
+ timeout = 1;
+
break;
case VMSCAN_THROTTLE_ISOLATED:
timeout = HZ/50;
@@ -3321,7 +3362,7 @@ again:
if (!current_is_kswapd() && current_may_throttle() &&
!sc->hibernation_mode &&
test_bit(LRUVEC_CONGESTED, &target_lruvec->flags))
- reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
+ reclaim_throttle(pgdat, VMSCAN_THROTTLE_CONGESTED);
if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
sc))
@@ -3386,16 +3427,16 @@ static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc)
}
/*
- * Do not throttle kswapd on NOPROGRESS as it will throttle on
- * VMSCAN_THROTTLE_WRITEBACK if there are too many pages under
- * writeback and marked for immediate reclaim at the tail of
- * the LRU.
+ * Do not throttle kswapd or cgroup reclaim on NOPROGRESS as it will
+ * throttle on VMSCAN_THROTTLE_WRITEBACK if there are too many pages
+ * under writeback and marked for immediate reclaim at the tail of the
+ * LRU.
*/
- if (current_is_kswapd())
+ if (current_is_kswapd() || cgroup_reclaim(sc))
return;
/* Throttle if making no progress at high prioities. */
- if (sc->priority < DEF_PRIORITY - 2)
+ if (sc->priority == 1 && !sc->nr_reclaimed)
reclaim_throttle(pgdat, VMSCAN_THROTTLE_NOPROGRESS);
}
@@ -3415,6 +3456,7 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
unsigned long nr_soft_scanned;
gfp_t orig_mask;
pg_data_t *last_pgdat = NULL;
+ pg_data_t *first_pgdat = NULL;
/*
* If the number of buffer_heads in the machine exceeds the maximum
@@ -3478,14 +3520,19 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
/* need some check for avoid more shrink_zone() */
}
+ if (!first_pgdat)
+ first_pgdat = zone->zone_pgdat;
+
/* See comment about same check for global reclaim above */
if (zone->zone_pgdat == last_pgdat)
continue;
last_pgdat = zone->zone_pgdat;
shrink_node(zone->zone_pgdat, sc);
- consider_reclaim_throttle(zone->zone_pgdat, sc);
}
+ if (first_pgdat)
+ consider_reclaim_throttle(first_pgdat, sc);
+
/*
* Restore to original mask to avoid the impact on the caller if we
* promoted it to __GFP_HIGHMEM.
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 2f34bbdde0e8..cfca99e295b8 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -85,8 +85,10 @@ static void ax25_kill_by_device(struct net_device *dev)
again:
ax25_for_each(s, &ax25_list) {
if (s->ax25_dev == ax25_dev) {
- s->ax25_dev = NULL;
spin_unlock_bh(&ax25_list_lock);
+ lock_sock(s->sk);
+ s->ax25_dev = NULL;
+ release_sock(s->sk);
ax25_disconnect(s, ENETUNREACH);
spin_lock_bh(&ax25_list_lock);
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 433901dcf0c3..f4004cf0ff6f 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -1339,6 +1339,7 @@ batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv,
* @bat_priv: the bat priv with all the soft interface information
* @skb: The multicast packet to check
* @orig: an originator to be set to forward the skb to
+ * @is_routable: stores whether the destination is routable
*
* Return: the forwarding mode as enum batadv_forw_mode and in case of
* BATADV_FORW_SINGLE set the orig to the single originator the skb
@@ -1346,17 +1347,16 @@ batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv,
*/
enum batadv_forw_mode
batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
- struct batadv_orig_node **orig)
+ struct batadv_orig_node **orig, int *is_routable)
{
int ret, tt_count, ip_count, unsnoop_count, total_count;
bool is_unsnoopable = false;
unsigned int mcast_fanout;
struct ethhdr *ethhdr;
- int is_routable = 0;
int rtr_count = 0;
ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable,
- &is_routable);
+ is_routable);
if (ret == -ENOMEM)
return BATADV_FORW_NONE;
else if (ret < 0)
@@ -1369,7 +1369,7 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr);
unsnoop_count = !is_unsnoopable ? 0 :
atomic_read(&bat_priv->mcast.num_want_all_unsnoopables);
- rtr_count = batadv_mcast_forw_rtr_count(bat_priv, is_routable);
+ rtr_count = batadv_mcast_forw_rtr_count(bat_priv, *is_routable);
total_count = tt_count + ip_count + unsnoop_count + rtr_count;
@@ -1689,6 +1689,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
* @bat_priv: the bat priv with all the soft interface information
* @skb: the multicast packet to transmit
* @vid: the vlan identifier
+ * @is_routable: stores whether the destination is routable
*
* Sends copies of a frame with multicast destination to any node that signaled
* interest in it, that is either via the translation table or the according
@@ -1701,7 +1702,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
* is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
*/
int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
- unsigned short vid)
+ unsigned short vid, int is_routable)
{
int ret;
@@ -1717,12 +1718,16 @@ int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
return ret;
}
+ if (!is_routable)
+ goto skip_mc_router;
+
ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid);
if (ret != NET_XMIT_SUCCESS) {
kfree_skb(skb);
return ret;
}
+skip_mc_router:
consume_skb(skb);
return ret;
}
diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h
index 9fee5da08311..8aec818d0bf6 100644
--- a/net/batman-adv/multicast.h
+++ b/net/batman-adv/multicast.h
@@ -43,7 +43,8 @@ enum batadv_forw_mode {
enum batadv_forw_mode
batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
- struct batadv_orig_node **mcast_single_orig);
+ struct batadv_orig_node **mcast_single_orig,
+ int *is_routable);
int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
struct sk_buff *skb,
@@ -51,7 +52,7 @@ int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node);
int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
- unsigned short vid);
+ unsigned short vid, int is_routable);
void batadv_mcast_init(struct batadv_priv *bat_priv);
@@ -68,7 +69,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node);
static inline enum batadv_forw_mode
batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
- struct batadv_orig_node **mcast_single_orig)
+ struct batadv_orig_node **mcast_single_orig,
+ int *is_routable)
{
return BATADV_FORW_ALL;
}
@@ -85,7 +87,7 @@ batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
static inline int
batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
- unsigned short vid)
+ unsigned short vid, int is_routable)
{
kfree_skb(skb);
return NET_XMIT_DROP;
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 7ee09337fc40..2dbbe6c19609 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -198,6 +198,7 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
int gw_mode;
enum batadv_forw_mode forw_mode = BATADV_FORW_SINGLE;
struct batadv_orig_node *mcast_single_orig = NULL;
+ int mcast_is_routable = 0;
int network_offset = ETH_HLEN;
__be16 proto;
@@ -300,7 +301,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
send:
if (do_bcast && !is_broadcast_ether_addr(ethhdr->h_dest)) {
forw_mode = batadv_mcast_forw_mode(bat_priv, skb,
- &mcast_single_orig);
+ &mcast_single_orig,
+ &mcast_is_routable);
if (forw_mode == BATADV_FORW_NONE)
goto dropped;
@@ -359,7 +361,8 @@ send:
ret = batadv_mcast_forw_send_orig(bat_priv, skb, vid,
mcast_single_orig);
} else if (forw_mode == BATADV_FORW_SOME) {
- ret = batadv_mcast_forw_send(bat_priv, skb, vid);
+ ret = batadv_mcast_forw_send(bat_priv, skb, vid,
+ mcast_is_routable);
} else {
if (batadv_dat_snoop_outgoing_arp_request(bat_priv,
skb))
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index db4ab2c2ce18..891cfcf45644 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -337,7 +337,7 @@ static int old_deviceless(struct net *net, void __user *uarg)
args[2] = get_bridge_ifindices(net, indices, args[2]);
- ret = copy_to_user(uarg, indices,
+ ret = copy_to_user((void __user *)args[1], indices,
array_size(args[2], sizeof(int)))
? -EFAULT : args[2];
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index f3d751105343..de2409889489 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -4522,6 +4522,38 @@ int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx,
}
#endif
+void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
+ unsigned long val)
+{
+ unsigned long intvl_jiffies = clock_t_to_jiffies(val);
+
+ if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) {
+ br_info(brmctx->br,
+ "trying to set multicast query interval below minimum, setting to %lu (%ums)\n",
+ jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN),
+ jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN));
+ intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
+ }
+
+ brmctx->multicast_query_interval = intvl_jiffies;
+}
+
+void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
+ unsigned long val)
+{
+ unsigned long intvl_jiffies = clock_t_to_jiffies(val);
+
+ if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) {
+ br_info(brmctx->br,
+ "trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n",
+ jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN),
+ jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN));
+ intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
+ }
+
+ brmctx->multicast_startup_query_interval = intvl_jiffies;
+}
+
/**
* br_multicast_list_adjacent - Returns snooped multicast addresses
* @dev: The bridge port adjacent to which to retrieve addresses
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 0c8b5f1a15bc..2ff83d84230d 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1357,7 +1357,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);
- br->multicast_ctx.multicast_query_interval = clock_t_to_jiffies(val);
+ br_multicast_set_query_intvl(&br->multicast_ctx, val);
}
if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
@@ -1369,7 +1369,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);
- br->multicast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val);
+ br_multicast_set_startup_query_intvl(&br->multicast_ctx, val);
}
if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index c0efd697865a..e8c6ee322c71 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -28,6 +28,8 @@
#define BR_MAX_PORTS (1<<BR_PORT_BITS)
#define BR_MULTICAST_DEFAULT_HASH_MAX 4096
+#define BR_MULTICAST_QUERY_INTVL_MIN msecs_to_jiffies(1000)
+#define BR_MULTICAST_STARTUP_QUERY_INTVL_MIN BR_MULTICAST_QUERY_INTVL_MIN
#define BR_HWDOM_MAX BITS_PER_LONG
@@ -963,6 +965,10 @@ int br_multicast_dump_querier_state(struct sk_buff *skb,
int nest_attr);
size_t br_multicast_querier_state_size(void);
size_t br_rports_size(const struct net_bridge_mcast *brmctx);
+void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
+ unsigned long val);
+void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
+ unsigned long val);
static inline bool br_group_is_l2(const struct br_ip *group)
{
@@ -1147,9 +1153,9 @@ br_multicast_port_ctx_get_global(const struct net_bridge_mcast_port *pmctx)
static inline bool
br_multicast_ctx_vlan_global_disabled(const struct net_bridge_mcast *brmctx)
{
- return br_opt_get(brmctx->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) &&
- br_multicast_ctx_is_vlan(brmctx) &&
- !(brmctx->vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED);
+ return br_multicast_ctx_is_vlan(brmctx) &&
+ (!br_opt_get(brmctx->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) ||
+ !(brmctx->vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED));
}
static inline bool
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index d9a89ddd0331..7b0c19772111 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -658,7 +658,7 @@ static ssize_t multicast_query_interval_show(struct device *d,
static int set_query_interval(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
- br->multicast_ctx.multicast_query_interval = clock_t_to_jiffies(val);
+ br_multicast_set_query_intvl(&br->multicast_ctx, val);
return 0;
}
@@ -706,7 +706,7 @@ static ssize_t multicast_startup_query_interval_show(
static int set_startup_query_interval(struct net_bridge *br, unsigned long val,
struct netlink_ext_ack *extack)
{
- br->multicast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val);
+ br_multicast_set_startup_query_intvl(&br->multicast_ctx, val);
return 0;
}
diff --git a/net/bridge/br_vlan_options.c b/net/bridge/br_vlan_options.c
index 8ffd4ed2563c..a6382973b3e7 100644
--- a/net/bridge/br_vlan_options.c
+++ b/net/bridge/br_vlan_options.c
@@ -521,7 +521,7 @@ static int br_vlan_process_global_one_opts(const struct net_bridge *br,
u64 val;
val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL]);
- v->br_mcast_ctx.multicast_query_interval = clock_t_to_jiffies(val);
+ br_multicast_set_query_intvl(&v->br_mcast_ctx, val);
*changed = true;
}
if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL]) {
@@ -535,7 +535,7 @@ static int br_vlan_process_global_one_opts(const struct net_bridge *br,
u64 val;
val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL]);
- v->br_mcast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val);
+ br_multicast_set_startup_query_intvl(&v->br_mcast_ctx, val);
*changed = true;
}
if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER]) {
diff --git a/net/core/dev.c b/net/core/dev.c
index 2a352e668d10..c4708e2487fb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3941,8 +3941,8 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
return skb;
/* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
- qdisc_skb_cb(skb)->mru = 0;
- qdisc_skb_cb(skb)->post_ct = false;
+ tc_skb_cb(skb)->mru = 0;
+ tc_skb_cb(skb)->post_ct = false;
mini_qdisc_bstats_cpu_update(miniq, skb);
switch (tcf_classify(skb, miniq->block, miniq->filter_list, &cl_res, false)) {
@@ -5103,8 +5103,8 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
}
qdisc_skb_cb(skb)->pkt_len = skb->len;
- qdisc_skb_cb(skb)->mru = 0;
- qdisc_skb_cb(skb)->post_ct = false;
+ tc_skb_cb(skb)->mru = 0;
+ tc_skb_cb(skb)->post_ct = false;
skb->tc_at_ingress = 1;
mini_qdisc_bstats_cpu_update(miniq, skb);
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 5ad72dbfcd07..c06c9ba6e8c5 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -4110,14 +4110,6 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
return err;
}
- if (info->attrs[DEVLINK_ATTR_NETNS_PID] ||
- info->attrs[DEVLINK_ATTR_NETNS_FD] ||
- info->attrs[DEVLINK_ATTR_NETNS_ID]) {
- dest_net = devlink_netns_get(skb, info);
- if (IS_ERR(dest_net))
- return PTR_ERR(dest_net);
- }
-
if (info->attrs[DEVLINK_ATTR_RELOAD_ACTION])
action = nla_get_u8(info->attrs[DEVLINK_ATTR_RELOAD_ACTION]);
else
@@ -4160,6 +4152,14 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
}
+ if (info->attrs[DEVLINK_ATTR_NETNS_PID] ||
+ info->attrs[DEVLINK_ATTR_NETNS_FD] ||
+ info->attrs[DEVLINK_ATTR_NETNS_ID]) {
+ dest_net = devlink_netns_get(skb, info);
+ if (IS_ERR(dest_net))
+ return PTR_ERR(dest_net);
+ }
+
err = devlink_reload(devlink, dest_net, action, limit, &actions_performed, info->extack);
if (dest_net)
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 3255f57f5131..1b094c481f1d 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -238,7 +238,7 @@ void
skb_flow_dissect_ct(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container, u16 *ctinfo_map,
- size_t mapsize, bool post_ct)
+ size_t mapsize, bool post_ct, u16 zone)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
struct flow_dissector_key_ct *key;
@@ -260,6 +260,7 @@ skb_flow_dissect_ct(const struct sk_buff *skb,
if (!ct) {
key->ct_state = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
TCA_FLOWER_KEY_CT_FLAGS_INVALID;
+ key->ct_zone = zone;
return;
}
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index 2820aca2173a..9ccd64e8a666 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -197,6 +197,10 @@ int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining,
nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
if (nla_entype) {
+ if (nla_len(nla_entype) < sizeof(u16)) {
+ NL_SET_ERR_MSG(extack, "Invalid RTA_ENCAP_TYPE");
+ return -EINVAL;
+ }
encap_type = nla_get_u16(nla_entype);
if (lwtunnel_valid_encap_type(encap_type,
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 72ba027c34cf..dda12fbd177b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -763,11 +763,10 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
ASSERT_RTNL();
- n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
+ n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
if (!n)
goto out;
- n->protocol = 0;
write_pnet(&n->net, net);
memcpy(n->key, pkey, key_len);
n->dev = dev;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ba2f38246f07..909db87d7383 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -832,7 +832,7 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
ntohs(skb->protocol), skb->pkt_type, skb->skb_iif);
if (dev)
- printk("%sdev name=%s feat=0x%pNF\n",
+ printk("%sdev name=%s feat=%pNF\n",
level, dev->name, &dev->features);
if (sk)
printk("%ssk family=%hu type=%u proto=%u\n",
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 1ae52ac943f6..8eb671c827f9 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -1124,6 +1124,8 @@ void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
{
+ psock_set_prog(&psock->progs.stream_parser, NULL);
+
if (!psock->saved_data_ready)
return;
@@ -1212,6 +1214,9 @@ void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
{
+ psock_set_prog(&psock->progs.stream_verdict, NULL);
+ psock_set_prog(&psock->progs.skb_verdict, NULL);
+
if (!psock->saved_data_ready)
return;
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index f39ef79ced67..4ca4b11f4e5f 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -167,8 +167,11 @@ static void sock_map_del_link(struct sock *sk,
write_lock_bh(&sk->sk_callback_lock);
if (strp_stop)
sk_psock_stop_strp(sk, psock);
- else
+ if (verdict_stop)
sk_psock_stop_verdict(sk, psock);
+
+ if (psock->psock_update_sk_prot)
+ psock->psock_update_sk_prot(sk, psock, false);
write_unlock_bh(&sk->sk_callback_lock);
}
}
@@ -282,6 +285,12 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk)
if (msg_parser)
psock_set_prog(&psock->progs.msg_parser, msg_parser);
+ if (stream_parser)
+ psock_set_prog(&psock->progs.stream_parser, stream_parser);
+ if (stream_verdict)
+ psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
+ if (skb_verdict)
+ psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
ret = sock_map_init_proto(sk, psock);
if (ret < 0)
@@ -292,14 +301,10 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk)
ret = sk_psock_init_strp(sk, psock);
if (ret)
goto out_unlock_drop;
- psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
- psock_set_prog(&psock->progs.stream_parser, stream_parser);
sk_psock_start_strp(sk, psock);
} else if (!stream_parser && stream_verdict && !psock->saved_data_ready) {
- psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
sk_psock_start_verdict(sk,psock);
} else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) {
- psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
sk_psock_start_verdict(sk, psock);
}
write_unlock_bh(&sk->sk_callback_lock);
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
index de1c849a0a70..4ed74d509d6a 100644
--- a/net/dsa/tag_ocelot.c
+++ b/net/dsa/tag_ocelot.c
@@ -47,9 +47,13 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
void *injection;
__be32 *prefix;
u32 rew_op = 0;
+ u64 qos_class;
ocelot_xmit_get_vlan_info(skb, dp, &vlan_tci, &tag_type);
+ qos_class = netdev_get_num_tc(netdev) ?
+ netdev_get_prio_tc_map(netdev, skb->priority) : skb->priority;
+
injection = skb_push(skb, OCELOT_TAG_LEN);
prefix = skb_push(skb, OCELOT_SHORT_PREFIX_LEN);
@@ -57,7 +61,7 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev,
memset(injection, 0, OCELOT_TAG_LEN);
ocelot_ifh_set_bypass(injection, 1);
ocelot_ifh_set_src(injection, ds->num_ports);
- ocelot_ifh_set_qos_class(injection, skb->priority);
+ ocelot_ifh_set_qos_class(injection, qos_class);
ocelot_ifh_set_vlan_tci(injection, vlan_tci);
ocelot_ifh_set_tag_type(injection, tag_type);
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index 38b44c0291b1..96f4180aabd2 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -40,7 +40,8 @@ int ethnl_ops_begin(struct net_device *dev)
if (dev->dev.parent)
pm_runtime_get_sync(dev->dev.parent);
- if (!netif_device_present(dev)) {
+ if (!netif_device_present(dev) ||
+ dev->reg_state == NETREG_UNREGISTERING) {
ret = -ENODEV;
goto err;
}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 0189e3cd4a7d..5f70ffdae1b5 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -154,7 +154,7 @@ void inet_sock_destruct(struct sock *sk)
kfree(rcu_dereference_protected(inet->inet_opt, 1));
dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
- dst_release(sk->sk_rx_dst);
+ dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1));
sk_refcnt_debug_dec(sk);
}
EXPORT_SYMBOL(inet_sock_destruct);
@@ -1994,6 +1994,10 @@ static int __init inet_init(void)
ip_init();
+ /* Initialise per-cpu ipv4 mibs */
+ if (init_ipv4_mibs())
+ panic("%s: Cannot init ipv4 mibs\n", __func__);
+
/* Setup TCP slab cache for open requests. */
tcp_init();
@@ -2024,12 +2028,6 @@ static int __init inet_init(void)
if (init_inet_pernet_ops())
pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
- /*
- * Initialise per-cpu ipv4 mibs
- */
-
- if (init_ipv4_mibs())
- pr_crit("%s: Cannot init ipv4 mibs\n", __func__);
ipv4_proc_init();
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index fde7797b5806..92c29ab3d042 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -662,6 +662,19 @@ static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining,
return nhs;
}
+static int fib_gw_from_attr(__be32 *gw, struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+{
+ if (nla_len(nla) < sizeof(*gw)) {
+ NL_SET_ERR_MSG(extack, "Invalid IPv4 address in RTA_GATEWAY");
+ return -EINVAL;
+ }
+
+ *gw = nla_get_in_addr(nla);
+
+ return 0;
+}
+
/* only called when fib_nh is integrated into fib_info */
static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
int remaining, struct fib_config *cfg,
@@ -704,7 +717,11 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
return -EINVAL;
}
if (nla) {
- fib_cfg.fc_gw4 = nla_get_in_addr(nla);
+ ret = fib_gw_from_attr(&fib_cfg.fc_gw4, nla,
+ extack);
+ if (ret)
+ goto errout;
+
if (fib_cfg.fc_gw4)
fib_cfg.fc_gw_family = AF_INET;
} else if (nlav) {
@@ -714,10 +731,18 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
}
nla = nla_find(attrs, attrlen, RTA_FLOW);
- if (nla)
+ if (nla) {
+ if (nla_len(nla) < sizeof(u32)) {
+ NL_SET_ERR_MSG(extack, "Invalid RTA_FLOW");
+ return -EINVAL;
+ }
fib_cfg.fc_flow = nla_get_u32(nla);
+ }
fib_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
+ /* RTA_ENCAP_TYPE length checked in
+ * lwtunnel_valid_encap_type_attr
+ */
nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
if (nla)
fib_cfg.fc_encap_type = nla_get_u16(nla);
@@ -902,6 +927,7 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
attrlen = rtnh_attrlen(rtnh);
if (attrlen > 0) {
struct nlattr *nla, *nlav, *attrs = rtnh_attrs(rtnh);
+ int err;
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
nlav = nla_find(attrs, attrlen, RTA_VIA);
@@ -912,12 +938,17 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
}
if (nla) {
+ __be32 gw;
+
+ err = fib_gw_from_attr(&gw, nla, extack);
+ if (err)
+ return err;
+
if (nh->fib_nh_gw_family != AF_INET ||
- nla_get_in_addr(nla) != nh->fib_nh_gw4)
+ gw != nh->fib_nh_gw4)
return 1;
} else if (nlav) {
struct fib_config cfg2;
- int err;
err = fib_gw_from_via(&cfg2, nlav, extack);
if (err)
@@ -940,8 +971,14 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
#ifdef CONFIG_IP_ROUTE_CLASSID
nla = nla_find(attrs, attrlen, RTA_FLOW);
- if (nla && nla_get_u32(nla) != nh->nh_tclassid)
- return 1;
+ if (nla) {
+ if (nla_len(nla) < sizeof(u32)) {
+ NL_SET_ERR_MSG(extack, "Invalid RTA_FLOW");
+ return -EINVAL;
+ }
+ if (nla_get_u32(nla) != nh->nh_tclassid)
+ return 1;
+ }
#endif
}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index f7fea3a7c5e6..62a67fdc344c 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -721,7 +721,7 @@ static struct request_sock *inet_reqsk_clone(struct request_sock *req,
sk_node_init(&nreq_sk->sk_node);
nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping;
#endif
nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index c8fa6e7f7d12..581b5b2d72a5 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -261,6 +261,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
r->idiag_state = sk->sk_state;
r->idiag_timer = 0;
r->idiag_retrans = 0;
+ r->idiag_expires = 0;
if (inet_diag_msg_attrs_fill(sk, skb, r, ext,
sk_user_ns(NETLINK_CB(cb->skb).sk),
@@ -314,9 +315,6 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
r->idiag_retrans = icsk->icsk_probes_out;
r->idiag_expires =
jiffies_delta_to_msecs(sk->sk_timer.expires - jiffies);
- } else {
- r->idiag_timer = 0;
- r->idiag_expires = 0;
}
if ((ext & (1 << (INET_DIAG_INFO - 1))) && handler->idiag_info_size) {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index bbb3d39c69af..2bb28bfd83bf 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3012,8 +3012,7 @@ int tcp_disconnect(struct sock *sk, int flags)
icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
__sk_dst_reset(sk);
- dst_release(sk->sk_rx_dst);
- sk->sk_rx_dst = NULL;
+ dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL));
tcp_saved_syn_free(tp);
tp->compressed_ack = 0;
tp->segs_in = 0;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 246ab7b5e857..0ce46849ec3d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5787,7 +5787,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
trace_tcp_probe(sk, skb);
tcp_mstamp_refresh(tp);
- if (unlikely(!sk->sk_rx_dst))
+ if (unlikely(!rcu_access_pointer(sk->sk_rx_dst)))
inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
/*
* Header prediction.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 13d868c43284..084df223b5df 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1701,7 +1701,10 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
struct sock *rsk;
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
- struct dst_entry *dst = sk->sk_rx_dst;
+ struct dst_entry *dst;
+
+ dst = rcu_dereference_protected(sk->sk_rx_dst,
+ lockdep_sock_is_held(sk));
sock_rps_save_rxhash(sk, skb);
sk_mark_napi_id(sk, skb);
@@ -1709,8 +1712,8 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
!INDIRECT_CALL_1(dst->ops->check, ipv4_dst_check,
dst, 0)) {
+ RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
dst_release(dst);
- sk->sk_rx_dst = NULL;
}
}
tcp_rcv_established(sk, skb);
@@ -1786,7 +1789,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
skb->sk = sk;
skb->destructor = sock_edemux;
if (sk_fullsock(sk)) {
- struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
+ struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, 0);
@@ -2201,7 +2204,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
struct dst_entry *dst = skb_dst(skb);
if (dst && dst_hold_safe(dst)) {
- sk->sk_rx_dst = dst;
+ rcu_assign_pointer(sk->sk_rx_dst, dst);
sk->sk_rx_dst_ifindex = skb->skb_iif;
}
}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index cf913a66df17..7c2d3ac2363a 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -829,8 +829,8 @@ int tcp_child_process(struct sock *parent, struct sock *child,
int ret = 0;
int state = child->sk_state;
- /* record NAPI ID of child */
- sk_mark_napi_id(child, skb);
+ /* record sk_napi_id and sk_rx_queue_mapping of child. */
+ sk_mark_napi_id_set(child, skb);
tcp_segs_in(tcp_sk(child), skb);
if (!sock_owned_by_user(child)) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 8bcecdd6aeda..0cd6b857e7ec 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -916,7 +916,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
kfree_skb(skb);
return -EINVAL;
}
- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
+ if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
kfree_skb(skb);
return -EINVAL;
}
@@ -2250,7 +2250,7 @@ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
struct dst_entry *old;
if (dst_hold_safe(dst)) {
- old = xchg(&sk->sk_rx_dst, dst);
+ old = xchg((__force struct dst_entry **)&sk->sk_rx_dst, dst);
dst_release(old);
return old != dst;
}
@@ -2440,7 +2440,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
struct dst_entry *dst = skb_dst(skb);
int ret;
- if (unlikely(sk->sk_rx_dst != dst))
+ if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
udp_sk_rx_dst_set(sk, dst);
ret = udp_unicast_rcv_skb(sk, skb, uh);
@@ -2599,7 +2599,7 @@ int udp_v4_early_demux(struct sk_buff *skb)
skb->sk = sk;
skb->destructor = sock_efree;
- dst = READ_ONCE(sk->sk_rx_dst);
+ dst = rcu_dereference(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, 0);
@@ -3075,7 +3075,7 @@ int udp4_seq_show(struct seq_file *seq, void *v)
{
seq_setwidth(seq, 127);
if (v == SEQ_START_TOKEN)
- seq_puts(seq, " sl local_address rem_address st tx_queue "
+ seq_puts(seq, " sl local_address rem_address st tx_queue "
"rx_queue tr tm->when retrnsmt uid timeout "
"inode ref pointer drops");
else {
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index a7c31ab67c5d..96c5cc0f30ce 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -57,6 +57,7 @@
#include <net/protocol.h>
#include <net/raw.h>
#include <net/rawv6.h>
+#include <net/seg6.h>
#include <net/transp_v6.h>
#include <net/ip6_route.h>
#include <net/addrconf.h>
@@ -820,6 +821,7 @@ out_bh_enable:
void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
{
+ struct inet6_skb_parm *opt = IP6CB(skb);
const struct inet6_protocol *ipprot;
int inner_offset;
__be16 frag_off;
@@ -829,6 +831,8 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
goto out;
+ seg6_icmp_srh(skb, opt);
+
nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
if (ipv6_ext_hdr(nexthdr)) {
/* now skip over extension headers */
@@ -853,7 +857,7 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
ipprot = rcu_dereference(inet6_protos[nexthdr]);
if (ipprot && ipprot->err_handler)
- ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
+ ipprot->err_handler(skb, opt, type, code, inner_offset, info);
raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info);
return;
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 527e9ead7449..5e9474bc54fc 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -808,6 +808,8 @@ vti6_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data
struct net *net = dev_net(dev);
struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+ memset(&p1, 0, sizeof(p1));
+
switch (cmd) {
case SIOCGETTUNNEL:
if (dev == ip6n->fb_tnl_dev) {
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 60f1e4f5be5a..c51d5ce3711c 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1020,6 +1020,9 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
struct raw6_sock *rp = raw6_sk(sk);
int val;
+ if (optlen < sizeof(val))
+ return -EINVAL;
+
if (copy_from_sockptr(&val, optval, sizeof(val)))
return -EFAULT;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 42d60c76d30a..1deb6297aab6 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -5224,6 +5224,19 @@ out:
return should_notify;
}
+static int fib6_gw_from_attr(struct in6_addr *gw, struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+{
+ if (nla_len(nla) < sizeof(*gw)) {
+ NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_GATEWAY");
+ return -EINVAL;
+ }
+
+ *gw = nla_get_in6_addr(nla);
+
+ return 0;
+}
+
static int ip6_route_multipath_add(struct fib6_config *cfg,
struct netlink_ext_ack *extack)
{
@@ -5264,10 +5277,18 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
if (nla) {
- r_cfg.fc_gateway = nla_get_in6_addr(nla);
+ err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
+ extack);
+ if (err)
+ goto cleanup;
+
r_cfg.fc_flags |= RTF_GATEWAY;
}
r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
+
+ /* RTA_ENCAP_TYPE length checked in
+ * lwtunnel_valid_encap_type_attr
+ */
nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
if (nla)
r_cfg.fc_encap_type = nla_get_u16(nla);
@@ -5434,7 +5455,13 @@ static int ip6_route_multipath_del(struct fib6_config *cfg,
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
if (nla) {
- nla_memcpy(&r_cfg.fc_gateway, nla, 16);
+ err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
+ extack);
+ if (err) {
+ last_err = err;
+ goto next_rtnh;
+ }
+
r_cfg.fc_flags |= RTF_GATEWAY;
}
}
@@ -5442,6 +5469,7 @@ static int ip6_route_multipath_del(struct fib6_config *cfg,
if (err)
last_err = err;
+next_rtnh:
rtnh = rtnh_next(rtnh, &remaining);
}
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index a8b5784afb1a..73aaabf0e966 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -75,6 +75,65 @@ bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced)
return true;
}
+struct ipv6_sr_hdr *seg6_get_srh(struct sk_buff *skb, int flags)
+{
+ struct ipv6_sr_hdr *srh;
+ int len, srhoff = 0;
+
+ if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, &flags) < 0)
+ return NULL;
+
+ if (!pskb_may_pull(skb, srhoff + sizeof(*srh)))
+ return NULL;
+
+ srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
+
+ len = (srh->hdrlen + 1) << 3;
+
+ if (!pskb_may_pull(skb, srhoff + len))
+ return NULL;
+
+ /* note that pskb_may_pull may change pointers in header;
+ * for this reason it is necessary to reload them when needed.
+ */
+ srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
+
+ if (!seg6_validate_srh(srh, len, true))
+ return NULL;
+
+ return srh;
+}
+
+/* Determine if an ICMP invoking packet contains a segment routing
+ * header. If it does, extract the offset to the true destination
+ * address, which is in the first segment address.
+ */
+void seg6_icmp_srh(struct sk_buff *skb, struct inet6_skb_parm *opt)
+{
+ __u16 network_header = skb->network_header;
+ struct ipv6_sr_hdr *srh;
+
+ /* Update network header to point to the invoking packet
+ * inside the ICMP packet, so we can use the seg6_get_srh()
+ * helper.
+ */
+ skb_reset_network_header(skb);
+
+ srh = seg6_get_srh(skb, 0);
+ if (!srh)
+ goto out;
+
+ if (srh->type != IPV6_SRCRT_TYPE_4)
+ goto out;
+
+ opt->flags |= IP6SKB_SEG6;
+ opt->srhoff = (unsigned char *)srh - skb->data;
+
+out:
+ /* Restore the network header back to the ICMP packet */
+ skb->network_header = network_header;
+}
+
static struct genl_family seg6_genl_family;
static const struct nla_policy seg6_genl_policy[SEG6_ATTR_MAX + 1] = {
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 3adc5d9211ad..d64855010948 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -161,6 +161,14 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+
+ /* the control block has been erased, so we have to set the
+ * iif once again.
+ * We read the receiving interface index directly from the
+ * skb->skb_iif as it is done in the IPv4 receiving path (i.e.:
+ * ip_rcv_core(...)).
+ */
+ IP6CB(skb)->iif = skb->skb_iif;
}
hdr->nexthdr = NEXTHDR_ROUTING;
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index 2dc40b3f373e..ef88489c71f5 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -150,40 +150,11 @@ static struct seg6_local_lwt *seg6_local_lwtunnel(struct lwtunnel_state *lwt)
return (struct seg6_local_lwt *)lwt->data;
}
-static struct ipv6_sr_hdr *get_srh(struct sk_buff *skb, int flags)
-{
- struct ipv6_sr_hdr *srh;
- int len, srhoff = 0;
-
- if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, &flags) < 0)
- return NULL;
-
- if (!pskb_may_pull(skb, srhoff + sizeof(*srh)))
- return NULL;
-
- srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
-
- len = (srh->hdrlen + 1) << 3;
-
- if (!pskb_may_pull(skb, srhoff + len))
- return NULL;
-
- /* note that pskb_may_pull may change pointers in header;
- * for this reason it is necessary to reload them when needed.
- */
- srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
-
- if (!seg6_validate_srh(srh, len, true))
- return NULL;
-
- return srh;
-}
-
static struct ipv6_sr_hdr *get_and_validate_srh(struct sk_buff *skb)
{
struct ipv6_sr_hdr *srh;
- srh = get_srh(skb, IP6_FH_F_SKIP_RH);
+ srh = seg6_get_srh(skb, IP6_FH_F_SKIP_RH);
if (!srh)
return NULL;
@@ -200,7 +171,7 @@ static bool decap_and_validate(struct sk_buff *skb, int proto)
struct ipv6_sr_hdr *srh;
unsigned int off = 0;
- srh = get_srh(skb, 0);
+ srh = seg6_get_srh(skb, 0);
if (srh && srh->segments_left > 0)
return false;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 1b57ee36d668..8a3618a30632 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1933,7 +1933,6 @@ static int __net_init sit_init_net(struct net *net)
return 0;
err_reg_dev:
- ipip6_dev_free(sitn->fb_tunnel_dev);
free_netdev(sitn->fb_tunnel_dev);
err_alloc_dev:
return err;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 551fce49841d..680e6481b967 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -107,7 +107,7 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
if (dst && dst_hold_safe(dst)) {
const struct rt6_info *rt = (const struct rt6_info *)dst;
- sk->sk_rx_dst = dst;
+ rcu_assign_pointer(sk->sk_rx_dst, dst);
sk->sk_rx_dst_ifindex = skb->skb_iif;
sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
}
@@ -1505,7 +1505,10 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
- struct dst_entry *dst = sk->sk_rx_dst;
+ struct dst_entry *dst;
+
+ dst = rcu_dereference_protected(sk->sk_rx_dst,
+ lockdep_sock_is_held(sk));
sock_rps_save_rxhash(sk, skb);
sk_mark_napi_id(sk, skb);
@@ -1513,8 +1516,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
INDIRECT_CALL_1(dst->ops->check, ip6_dst_check,
dst, sk->sk_rx_dst_cookie) == NULL) {
+ RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
dst_release(dst);
- sk->sk_rx_dst = NULL;
}
}
@@ -1874,7 +1877,7 @@ INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb)
skb->sk = sk;
skb->destructor = sock_edemux;
if (sk_fullsock(sk)) {
- struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
+ struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, sk->sk_rx_dst_cookie);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index e43b31d25fb6..a0871c212741 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -40,6 +40,7 @@
#include <net/transp_v6.h>
#include <net/ip6_route.h>
#include <net/raw.h>
+#include <net/seg6.h>
#include <net/tcp_states.h>
#include <net/ip6_checksum.h>
#include <net/ip6_tunnel.h>
@@ -561,7 +562,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct ipv6_pinfo *np;
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
const struct in6_addr *saddr = &hdr->saddr;
- const struct in6_addr *daddr = &hdr->daddr;
+ const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
struct udphdr *uh = (struct udphdr *)(skb->data+offset);
bool tunnel = false;
struct sock *sk;
@@ -956,7 +957,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
struct dst_entry *dst = skb_dst(skb);
int ret;
- if (unlikely(sk->sk_rx_dst != dst))
+ if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
udp6_sk_rx_dst_set(sk, dst);
if (!uh->check && !udp_sk(sk)->no_check6_rx) {
@@ -1070,7 +1071,7 @@ INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb)
skb->sk = sk;
skb->destructor = sock_efree;
- dst = READ_ONCE(sk->sk_rx_dst);
+ dst = rcu_dereference(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, sk->sk_rx_dst_cookie);
@@ -1204,7 +1205,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
kfree_skb(skb);
return -EINVAL;
}
- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
+ if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
kfree_skb(skb);
return -EINVAL;
}
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 470ff0ce3dc7..7d2925bb966e 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -9,7 +9,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2010, Intel Corporation
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
/**
@@ -191,7 +191,8 @@ static void ieee80211_add_addbaext(struct ieee80211_sub_if_data *sdata,
sband = ieee80211_get_sband(sdata);
if (!sband)
return;
- he_cap = ieee80211_get_he_iftype_cap(sband, sdata->vif.type);
+ he_cap = ieee80211_get_he_iftype_cap(sband,
+ ieee80211_vif_type_p2p(&sdata->vif));
if (!he_cap)
return;
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 430a58587538..74a878f213d3 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -9,7 +9,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2010, Intel Corporation
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2020 Intel Corporation
+ * Copyright (C) 2018 - 2021 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -106,7 +106,7 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
mgmt->u.action.u.addba_req.start_seq_num =
cpu_to_le16(start_seq_num << 4);
- ieee80211_tx_skb(sdata, skb);
+ ieee80211_tx_skb_tid(sdata, skb, tid);
}
void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
@@ -213,6 +213,8 @@ ieee80211_agg_start_txq(struct sta_info *sta, int tid, bool enable)
struct ieee80211_txq *txq = sta->sta.txq[tid];
struct txq_info *txqi;
+ lockdep_assert_held(&sta->ampdu_mlme.mtx);
+
if (!txq)
return;
@@ -290,7 +292,6 @@ static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid)
ieee80211_assign_tid_tx(sta, tid, NULL);
ieee80211_agg_splice_finish(sta->sdata, tid);
- ieee80211_agg_start_txq(sta, tid, false);
kfree_rcu(tid_tx, rcu_head);
}
@@ -480,8 +481,7 @@ static void ieee80211_send_addba_with_timeout(struct sta_info *sta,
/* send AddBA request */
ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
- tid_tx->dialog_token,
- sta->tid_seq[tid] >> 4,
+ tid_tx->dialog_token, tid_tx->ssn,
buf_size, tid_tx->timeout);
WARN_ON(test_and_set_bit(HT_AGG_STATE_SENT_ADDBA, &tid_tx->state));
@@ -523,6 +523,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
params.ssn = sta->tid_seq[tid] >> 4;
ret = drv_ampdu_action(local, sdata, &params);
+ tid_tx->ssn = params.ssn;
if (ret == IEEE80211_AMPDU_TX_START_DELAY_ADDBA) {
return;
} else if (ret == IEEE80211_AMPDU_TX_START_IMMEDIATE) {
@@ -889,6 +890,7 @@ void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
bool send_delba = false;
+ bool start_txq = false;
ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n",
sta->sta.addr, tid);
@@ -906,10 +908,14 @@ void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
send_delba = true;
ieee80211_remove_tid_tx(sta, tid);
+ start_txq = true;
unlock_sta:
spin_unlock_bh(&sta->lock);
+ if (start_txq)
+ ieee80211_agg_start_txq(sta, tid, false);
+
if (send_delba)
ieee80211_send_delba(sdata, sta->sta.addr, tid,
WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index bd3d3195097f..2d0dd69f9753 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1264,7 +1264,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
return 0;
error:
+ mutex_lock(&local->mtx);
ieee80211_vif_release_channel(sdata);
+ mutex_unlock(&local->mtx);
+
return err;
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index cd3731cbf6c6..c336267f4599 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1219,8 +1219,11 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
- if (local->in_reconfig)
+ /* In reconfig don't transmit now, but mark for waking later */
+ if (local->in_reconfig) {
+ set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txq->flags);
return;
+ }
if (!check_sdata_in_driver(sdata))
return;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 5666bbb8860b..482c98ede19b 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -647,6 +647,26 @@ struct mesh_csa_settings {
struct cfg80211_csa_settings settings;
};
+/**
+ * struct mesh_table
+ *
+ * @known_gates: list of known mesh gates and their mpaths by the station. The
+ * gate's mpath may or may not be resolved and active.
+ * @gates_lock: protects updates to known_gates
+ * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
+ * @walk_head: linked list containing all mesh_path objects
+ * @walk_lock: lock protecting walk_head
+ * @entries: number of entries in the table
+ */
+struct mesh_table {
+ struct hlist_head known_gates;
+ spinlock_t gates_lock;
+ struct rhashtable rhead;
+ struct hlist_head walk_head;
+ spinlock_t walk_lock;
+ atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
+};
+
struct ieee80211_if_mesh {
struct timer_list housekeeping_timer;
struct timer_list mesh_path_timer;
@@ -721,8 +741,8 @@ struct ieee80211_if_mesh {
/* offset from skb->data while building IE */
int meshconf_offset;
- struct mesh_table *mesh_paths;
- struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
+ struct mesh_table mesh_paths;
+ struct mesh_table mpp_paths; /* Store paths for MPP&MAP */
int mesh_paths_generation;
int mpp_paths_generation;
};
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 77080b4f87b8..b2b717a78114 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -127,26 +127,6 @@ struct mesh_path {
u32 path_change_count;
};
-/**
- * struct mesh_table
- *
- * @known_gates: list of known mesh gates and their mpaths by the station. The
- * gate's mpath may or may not be resolved and active.
- * @gates_lock: protects updates to known_gates
- * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
- * @walk_head: linked list containing all mesh_path objects
- * @walk_lock: lock protecting walk_head
- * @entries: number of entries in the table
- */
-struct mesh_table {
- struct hlist_head known_gates;
- spinlock_t gates_lock;
- struct rhashtable rhead;
- struct hlist_head walk_head;
- spinlock_t walk_lock;
- atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
-};
-
/* Recent multicast cache */
/* RMC_BUCKETS must be a power of 2, maximum 256 */
#define RMC_BUCKETS 256
@@ -308,7 +288,7 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
void mesh_path_flush_pending(struct mesh_path *mpath);
void mesh_path_tx_pending(struct mesh_path *mpath);
-int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
+void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata);
int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr);
void mesh_path_timer(struct timer_list *t);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 7cab1cf09bf1..acc1c299f1ae 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -47,32 +47,24 @@ static void mesh_path_rht_free(void *ptr, void *tblptr)
mesh_path_free_rcu(tbl, mpath);
}
-static struct mesh_table *mesh_table_alloc(void)
+static void mesh_table_init(struct mesh_table *tbl)
{
- struct mesh_table *newtbl;
+ INIT_HLIST_HEAD(&tbl->known_gates);
+ INIT_HLIST_HEAD(&tbl->walk_head);
+ atomic_set(&tbl->entries, 0);
+ spin_lock_init(&tbl->gates_lock);
+ spin_lock_init(&tbl->walk_lock);
- newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
- if (!newtbl)
- return NULL;
-
- INIT_HLIST_HEAD(&newtbl->known_gates);
- INIT_HLIST_HEAD(&newtbl->walk_head);
- atomic_set(&newtbl->entries, 0);
- spin_lock_init(&newtbl->gates_lock);
- spin_lock_init(&newtbl->walk_lock);
- if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) {
- kfree(newtbl);
- return NULL;
- }
-
- return newtbl;
+ /* rhashtable_init() may fail only in case of wrong
+ * mesh_rht_params
+ */
+ WARN_ON(rhashtable_init(&tbl->rhead, &mesh_rht_params));
}
static void mesh_table_free(struct mesh_table *tbl)
{
rhashtable_free_and_destroy(&tbl->rhead,
mesh_path_rht_free, tbl);
- kfree(tbl);
}
/**
@@ -238,13 +230,13 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
struct mesh_path *
mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
{
- return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata);
+ return mpath_lookup(&sdata->u.mesh.mesh_paths, dst, sdata);
}
struct mesh_path *
mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
{
- return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata);
+ return mpath_lookup(&sdata->u.mesh.mpp_paths, dst, sdata);
}
static struct mesh_path *
@@ -281,7 +273,7 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
struct mesh_path *
mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
{
- return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx);
+ return __mesh_path_lookup_by_idx(&sdata->u.mesh.mesh_paths, idx);
}
/**
@@ -296,7 +288,7 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
struct mesh_path *
mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
{
- return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx);
+ return __mesh_path_lookup_by_idx(&sdata->u.mesh.mpp_paths, idx);
}
/**
@@ -309,7 +301,7 @@ int mesh_path_add_gate(struct mesh_path *mpath)
int err;
rcu_read_lock();
- tbl = mpath->sdata->u.mesh.mesh_paths;
+ tbl = &mpath->sdata->u.mesh.mesh_paths;
spin_lock_bh(&mpath->state_lock);
if (mpath->is_gate) {
@@ -418,7 +410,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
if (!new_mpath)
return ERR_PTR(-ENOMEM);
- tbl = sdata->u.mesh.mesh_paths;
+ tbl = &sdata->u.mesh.mesh_paths;
spin_lock_bh(&tbl->walk_lock);
mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead,
&new_mpath->rhash,
@@ -460,7 +452,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
return -ENOMEM;
memcpy(new_mpath->mpp, mpp, ETH_ALEN);
- tbl = sdata->u.mesh.mpp_paths;
+ tbl = &sdata->u.mesh.mpp_paths;
spin_lock_bh(&tbl->walk_lock);
ret = rhashtable_lookup_insert_fast(&tbl->rhead,
@@ -489,7 +481,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
void mesh_plink_broken(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
- struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
+ struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct mesh_path *mpath;
@@ -548,7 +540,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
void mesh_path_flush_by_nexthop(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
- struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
+ struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
struct mesh_path *mpath;
struct hlist_node *n;
@@ -563,7 +555,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
const u8 *proxy)
{
- struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
+ struct mesh_table *tbl = &sdata->u.mesh.mpp_paths;
struct mesh_path *mpath;
struct hlist_node *n;
@@ -597,8 +589,8 @@ static void table_flush_by_iface(struct mesh_table *tbl)
*/
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
{
- table_flush_by_iface(sdata->u.mesh.mesh_paths);
- table_flush_by_iface(sdata->u.mesh.mpp_paths);
+ table_flush_by_iface(&sdata->u.mesh.mesh_paths);
+ table_flush_by_iface(&sdata->u.mesh.mpp_paths);
}
/**
@@ -644,7 +636,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
/* flush relevant mpp entries first */
mpp_flush_by_proxy(sdata, addr);
- err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr);
+ err = table_path_del(&sdata->u.mesh.mesh_paths, sdata, addr);
sdata->u.mesh.mesh_paths_generation++;
return err;
}
@@ -682,7 +674,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
struct mesh_path *gate;
bool copy = false;
- tbl = sdata->u.mesh.mesh_paths;
+ tbl = &sdata->u.mesh.mesh_paths;
rcu_read_lock();
hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
@@ -762,29 +754,10 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
mesh_path_tx_pending(mpath);
}
-int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
+void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
{
- struct mesh_table *tbl_path, *tbl_mpp;
- int ret;
-
- tbl_path = mesh_table_alloc();
- if (!tbl_path)
- return -ENOMEM;
-
- tbl_mpp = mesh_table_alloc();
- if (!tbl_mpp) {
- ret = -ENOMEM;
- goto free_path;
- }
-
- sdata->u.mesh.mesh_paths = tbl_path;
- sdata->u.mesh.mpp_paths = tbl_mpp;
-
- return 0;
-
-free_path:
- mesh_table_free(tbl_path);
- return ret;
+ mesh_table_init(&sdata->u.mesh.mesh_paths);
+ mesh_table_init(&sdata->u.mesh.mpp_paths);
}
static
@@ -806,12 +779,12 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
{
- mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths);
- mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths);
+ mesh_path_tbl_expire(sdata, &sdata->u.mesh.mesh_paths);
+ mesh_path_tbl_expire(sdata, &sdata->u.mesh.mpp_paths);
}
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
{
- mesh_table_free(sdata->u.mesh.mesh_paths);
- mesh_table_free(sdata->u.mesh.mpp_paths);
+ mesh_table_free(&sdata->u.mesh.mesh_paths);
+ mesh_table_free(&sdata->u.mesh.mpp_paths);
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 54ab0e1ef6ca..3147ca89f608 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2452,11 +2452,18 @@ static void ieee80211_sta_tx_wmm_ac_notify(struct ieee80211_sub_if_data *sdata,
u16 tx_time)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- u16 tid = ieee80211_get_tid(hdr);
- int ac = ieee80211_ac_from_tid(tid);
- struct ieee80211_sta_tx_tspec *tx_tspec = &ifmgd->tx_tspec[ac];
+ u16 tid;
+ int ac;
+ struct ieee80211_sta_tx_tspec *tx_tspec;
unsigned long now = jiffies;
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return;
+
+ tid = ieee80211_get_tid(hdr);
+ ac = ieee80211_ac_from_tid(tid);
+ tx_tspec = &ifmgd->tx_tspec[ac];
+
if (likely(!tx_tspec->admitted_time))
return;
@@ -5258,7 +5265,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
*/
if (new_sta) {
u32 rates = 0, basic_rates = 0;
- bool have_higher_than_11mbit;
+ bool have_higher_than_11mbit = false;
int min_rate = INT_MAX, min_rate_index = -1;
const struct cfg80211_bss_ies *ies;
int shift = ieee80211_vif_get_shift(&sdata->vif);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 9541a4c30aca..0544563ede52 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2944,6 +2944,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
if (!fwd_skb)
goto out;
+ fwd_skb->dev = sdata->dev;
fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY);
info = IEEE80211_SKB_CB(fwd_skb);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 51b49f0d3ad4..537535a88990 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -644,13 +644,13 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
/* check if STA exists already */
if (sta_info_get_bss(sdata, sta->sta.addr)) {
err = -EEXIST;
- goto out_err;
+ goto out_cleanup;
}
sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL);
if (!sinfo) {
err = -ENOMEM;
- goto out_err;
+ goto out_cleanup;
}
local->num_sta++;
@@ -667,6 +667,15 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
list_add_tail_rcu(&sta->list, &local->sta_list);
+ /* update channel context before notifying the driver about state
+ * change, this enables driver using the updated channel context right away.
+ */
+ if (sta->sta_state >= IEEE80211_STA_ASSOC) {
+ ieee80211_recalc_min_chandef(sta->sdata);
+ if (!sta->sta.support_p2p_ps)
+ ieee80211_recalc_p2p_go_ps_allowed(sta->sdata);
+ }
+
/* notify driver */
err = sta_info_insert_drv_state(local, sdata, sta);
if (err)
@@ -674,12 +683,6 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
set_sta_flag(sta, WLAN_STA_INSERTED);
- if (sta->sta_state >= IEEE80211_STA_ASSOC) {
- ieee80211_recalc_min_chandef(sta->sdata);
- if (!sta->sta.support_p2p_ps)
- ieee80211_recalc_p2p_go_ps_allowed(sta->sdata);
- }
-
/* accept BA sessions now */
clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
@@ -706,8 +709,8 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
out_drop_sta:
local->num_sta--;
synchronize_net();
+ out_cleanup:
cleanup_single_sta(sta);
- out_err:
mutex_unlock(&local->sta_mtx);
kfree(sinfo);
rcu_read_lock();
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index ba2796782008..379fd367197f 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -176,6 +176,7 @@ struct sta_info;
* @failed_bar_ssn: ssn of the last failed BAR tx attempt
* @bar_pending: BAR needs to be re-sent
* @amsdu: support A-MSDU withing A-MDPU
+ * @ssn: starting sequence number of the session
*
* This structure's lifetime is managed by RCU, assignments to
* the array holding it must hold the aggregation mutex.
@@ -199,6 +200,7 @@ struct tid_ampdu_tx {
u8 stop_initiator;
bool tx_stop;
u16 buf_size;
+ u16 ssn;
u16 failed_bar_ssn;
bool bar_pending;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 278945e3e08a..86a54df3aabd 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1822,15 +1822,15 @@ static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
ieee80211_tx_result res = TX_CONTINUE;
+ if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
+ CALL_TXH(ieee80211_tx_h_rate_ctrl);
+
if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) {
__skb_queue_tail(&tx->skbs, tx->skb);
tx->skb = NULL;
goto txh_done;
}
- if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
- CALL_TXH(ieee80211_tx_h_rate_ctrl);
-
CALL_TXH(ieee80211_tx_h_michael_mic_add);
CALL_TXH(ieee80211_tx_h_sequence);
CALL_TXH(ieee80211_tx_h_fragment);
@@ -4191,11 +4191,11 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
ieee80211_aggr_check(sdata, sta, skb);
+ sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
+
if (sta) {
struct ieee80211_fast_tx *fast_tx;
- sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
-
fast_tx = rcu_dereference(sta->fast_tx);
if (fast_tx &&
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 43df2f0c5db9..0e4e1956bcea 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -943,7 +943,12 @@ static void ieee80211_parse_extension_element(u32 *crc,
struct ieee802_11_elems *elems)
{
const void *data = elem->data + 1;
- u8 len = elem->datalen - 1;
+ u8 len;
+
+ if (!elem->datalen)
+ return;
+
+ len = elem->datalen - 1;
switch (elem->data[0]) {
case WLAN_EID_EXT_HE_MU_EDCA:
@@ -2063,7 +2068,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
chandef.chan = chan;
skb = ieee80211_probereq_get(&local->hw, src, ssid, ssid_len,
- 100 + ie_len);
+ local->scan_ies_len + ie_len);
if (!skb)
return NULL;
@@ -2646,6 +2651,13 @@ int ieee80211_reconfig(struct ieee80211_local *local)
mutex_unlock(&local->sta_mtx);
}
+ /*
+ * If this is for hw restart things are still running.
+ * We may want to change that later, however.
+ */
+ if (local->open_count && (!suspended || reconfig_due_to_wowlan))
+ drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
+
if (local->in_reconfig) {
local->in_reconfig = false;
barrier();
@@ -2664,13 +2676,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
IEEE80211_QUEUE_STOP_REASON_SUSPEND,
false);
- /*
- * If this is for hw restart things are still running.
- * We may want to change that later, however.
- */
- if (local->open_count && (!suspended || reconfig_due_to_wowlan))
- drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
-
if (!suspended)
return 0;
diff --git a/net/mctp/neigh.c b/net/mctp/neigh.c
index 5cc042121493..6ad3e33bd4d4 100644
--- a/net/mctp/neigh.c
+++ b/net/mctp/neigh.c
@@ -85,8 +85,8 @@ void mctp_neigh_remove_dev(struct mctp_dev *mdev)
mutex_unlock(&net->mctp.neigh_lock);
}
-// TODO: add a "source" flag so netlink can only delete static neighbours?
-static int mctp_neigh_remove(struct mctp_dev *mdev, mctp_eid_t eid)
+static int mctp_neigh_remove(struct mctp_dev *mdev, mctp_eid_t eid,
+ enum mctp_neigh_source source)
{
struct net *net = dev_net(mdev->dev);
struct mctp_neigh *neigh, *tmp;
@@ -94,7 +94,8 @@ static int mctp_neigh_remove(struct mctp_dev *mdev, mctp_eid_t eid)
mutex_lock(&net->mctp.neigh_lock);
list_for_each_entry_safe(neigh, tmp, &net->mctp.neighbours, list) {
- if (neigh->dev == mdev && neigh->eid == eid) {
+ if (neigh->dev == mdev && neigh->eid == eid &&
+ neigh->source == source) {
list_del_rcu(&neigh->list);
/* TODO: immediate RTM_DELNEIGH */
call_rcu(&neigh->rcu, __mctp_neigh_free);
@@ -202,7 +203,7 @@ static int mctp_rtm_delneigh(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!mdev)
return -ENODEV;
- return mctp_neigh_remove(mdev, eid);
+ return mctp_neigh_remove(mdev, eid, MCTP_NEIGH_STATIC);
}
static int mctp_fill_neigh(struct sk_buff *skb, u32 portid, u32 seq, int event,
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 7b96be1e9f14..f523051f5aef 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -700,6 +700,9 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
msk_owned_by_me(msk);
+ if (sk->sk_state == TCP_LISTEN)
+ return;
+
if (!rm_list->nr)
return;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index c82a76d2d0bf..54613f5b7521 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1524,7 +1524,7 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
int ret = 0;
prev_ssk = ssk;
- mptcp_flush_join_list(msk);
+ __mptcp_flush_join_list(msk);
ssk = mptcp_subflow_get_send(msk);
/* First check. If the ssk has changed since
@@ -2879,7 +2879,7 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
*/
if (WARN_ON_ONCE(!new_mptcp_sock)) {
tcp_sk(newsk)->is_mptcp = 0;
- return newsk;
+ goto out;
}
/* acquire the 2nd reference for the owning socket */
@@ -2891,6 +2891,8 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
}
+out:
+ newsk->sk_kern_sock = kern;
return newsk;
}
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index 0f1e661c2032..f8efd478ac97 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -525,7 +525,6 @@ static bool mptcp_supported_sockopt(int level, int optname)
case TCP_NODELAY:
case TCP_THIN_LINEAR_TIMEOUTS:
case TCP_CONGESTION:
- case TCP_ULP:
case TCP_CORK:
case TCP_KEEPIDLE:
case TCP_KEEPINTVL:
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
index bb5f1650f11c..c189b4c8a182 100644
--- a/net/ncsi/ncsi-netlink.c
+++ b/net/ncsi/ncsi-netlink.c
@@ -112,7 +112,11 @@ static int ncsi_write_package_info(struct sk_buff *skb,
pnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR);
if (!pnest)
return -ENOMEM;
- nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id);
+ rc = nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id);
+ if (rc) {
+ nla_nest_cancel(skb, pnest);
+ return rc;
+ }
if ((0x1 << np->id) == ndp->package_whitelist)
nla_put_flag(skb, NCSI_PKG_ATTR_FORCED);
cnest = nla_nest_start_noflag(skb, NCSI_PKG_ATTR_CHANNEL_LIST);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 770a63103c7a..4712a90a1820 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -684,7 +684,7 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
tstamp = nf_conn_tstamp_find(ct);
if (tstamp) {
- s32 timeout = ct->timeout - nfct_time_stamp;
+ s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
tstamp->stop = ktime_get_real_ns();
if (timeout < 0)
@@ -1036,7 +1036,7 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
}
/* We want the clashing entry to go away real soon: 1 second timeout. */
- loser_ct->timeout = nfct_time_stamp + HZ;
+ WRITE_ONCE(loser_ct->timeout, nfct_time_stamp + HZ);
/* IPS_NAT_CLASH removes the entry automatically on the first
* reply. Also prevents UDP tracker from moving the entry to
@@ -1560,7 +1560,7 @@ __nf_conntrack_alloc(struct net *net,
/* save hash for reusing when confirming */
*(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
ct->status = 0;
- ct->timeout = 0;
+ WRITE_ONCE(ct->timeout, 0);
write_pnet(&ct->ct_net, net);
memset(&ct->__nfct_init_offset, 0,
offsetof(struct nf_conn, proto) -
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index c7708bde057c..ec4164c32d27 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1195,8 +1195,6 @@ restart:
}
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]],
hnnode) {
- if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
- continue;
ct = nf_ct_tuplehash_to_ctrack(h);
if (nf_ct_is_expired(ct)) {
if (i < ARRAY_SIZE(nf_ct_evict) &&
@@ -1208,6 +1206,9 @@ restart:
if (!net_eq(net, nf_ct_net(ct)))
continue;
+ if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
+ continue;
+
if (cb->args[1]) {
if (ct != last)
continue;
@@ -1998,7 +1999,7 @@ static int ctnetlink_change_timeout(struct nf_conn *ct,
if (timeout > INT_MAX)
timeout = INT_MAX;
- ct->timeout = nfct_time_stamp + (u32)timeout;
+ WRITE_ONCE(ct->timeout, nfct_time_stamp + (u32)timeout);
if (test_bit(IPS_DYING_BIT, &ct->status))
return -ETIME;
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 87a7388b6c89..ed37bb9b4e58 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -201,8 +201,8 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
if (timeout < 0)
timeout = 0;
- if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
- ct->timeout = nfct_time_stamp + timeout;
+ if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout)
+ WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
}
static void flow_offload_fixup_ct_state(struct nf_conn *ct)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index c0851fec11d4..c20772822637 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -4481,9 +4481,9 @@ struct nft_set_elem_catchall {
static void nft_set_catchall_destroy(const struct nft_ctx *ctx,
struct nft_set *set)
{
- struct nft_set_elem_catchall *catchall;
+ struct nft_set_elem_catchall *next, *catchall;
- list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
+ list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
list_del_rcu(&catchall->list);
nft_set_elem_destroy(set, catchall->elem, true);
kfree_rcu(catchall);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 691ef4cffdd9..7f83f9697fc1 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -556,7 +556,8 @@ __build_packet_message(struct nfnl_log_net *log,
goto nla_put_failure;
if (indev && skb->dev &&
- skb->mac_header != skb->network_header) {
+ skb_mac_header_was_set(skb) &&
+ skb_mac_header_len(skb) != 0) {
struct nfulnl_msg_packet_hw phw;
int len;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 4acc4b8e9fe5..f0b9e21a2452 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -387,7 +387,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
struct net_device *indev;
struct net_device *outdev;
struct nf_conn *ct = NULL;
- enum ip_conntrack_info ctinfo;
+ enum ip_conntrack_info ctinfo = 0;
struct nfnl_ct_hook *nfnl_ct;
bool csum_verify;
char *secdata = NULL;
@@ -560,7 +560,8 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
goto nla_put_failure;
if (indev && entskb->dev &&
- skb_mac_header_was_set(entskb)) {
+ skb_mac_header_was_set(entskb) &&
+ skb_mac_header_len(entskb) != 0) {
struct nfqnl_msg_packet_hw phw;
int len;
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index af4ee874a067..dbe1f2e7dd9e 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -236,7 +236,7 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
if (!tcph)
- return;
+ goto err;
opt = (u8 *)tcph;
for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
@@ -251,16 +251,16 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
continue;
if (i + optl > tcphdr_len || priv->len + priv->offset > optl)
- return;
+ goto err;
if (skb_ensure_writable(pkt->skb,
nft_thoff(pkt) + i + priv->len))
- return;
+ goto err;
tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff,
&tcphdr_len);
if (!tcph)
- return;
+ goto err;
offset = i + priv->offset;
@@ -303,6 +303,9 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
return;
}
+ return;
+err:
+ regs->verdict.code = NFT_BREAK;
}
static void nft_exthdr_sctp_eval(const struct nft_expr *expr,
diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
index e517663e0cd1..6f4116e72958 100644
--- a/net/netfilter/nft_set_pipapo_avx2.c
+++ b/net/netfilter/nft_set_pipapo_avx2.c
@@ -886,7 +886,7 @@ static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill,
NFT_PIPAPO_AVX2_BUCKET_LOAD8(4, lt, 4, pkt[4], bsize);
NFT_PIPAPO_AVX2_AND(5, 0, 1);
- NFT_PIPAPO_AVX2_BUCKET_LOAD8(6, lt, 6, pkt[5], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(6, lt, 5, pkt[5], bsize);
NFT_PIPAPO_AVX2_AND(7, 2, 3);
/* Stall */
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 775064cdd0ee..f1ba7dd3d253 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -306,7 +306,7 @@ static int nr_setsockopt(struct socket *sock, int level, int optname,
if (optlen < sizeof(unsigned int))
return -EINVAL;
- if (copy_from_sockptr(&opt, optval, sizeof(unsigned int)))
+ if (copy_from_sockptr(&opt, optval, sizeof(unsigned long)))
return -EFAULT;
switch (optname) {
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 334f63c9529e..f184b0db79d4 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -636,8 +636,10 @@ static int nfc_genl_dump_devices_done(struct netlink_callback *cb)
{
struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
- nfc_device_iter_exit(iter);
- kfree(iter);
+ if (iter) {
+ nfc_device_iter_exit(iter);
+ kfree(iter);
+ }
return 0;
}
@@ -1392,8 +1394,10 @@ static int nfc_genl_dump_ses_done(struct netlink_callback *cb)
{
struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
- nfc_device_iter_exit(iter);
- kfree(iter);
+ if (iter) {
+ nfc_device_iter_exit(iter);
+ kfree(iter);
+ }
return 0;
}
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 9713035b89e3..6d262d9aa10e 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -34,6 +34,7 @@
#include <net/mpls.h>
#include <net/ndisc.h>
#include <net/nsh.h>
+#include <net/netfilter/nf_conntrack_zones.h>
#include "conntrack.h"
#include "datapath.h"
@@ -860,6 +861,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
#endif
bool post_ct = false;
int res, err;
+ u16 zone = 0;
/* Extract metadata from packet. */
if (tun_info) {
@@ -898,6 +900,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
key->recirc_id = tc_ext ? tc_ext->chain : 0;
OVS_CB(skb)->mru = tc_ext ? tc_ext->mru : 0;
post_ct = tc_ext ? tc_ext->post_ct : false;
+ zone = post_ct ? tc_ext->zone : 0;
} else {
key->recirc_id = 0;
}
@@ -906,8 +909,11 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
#endif
err = key_extract(skb, key);
- if (!err)
+ if (!err) {
ovs_ct_fill_key(skb, key, post_ct); /* Must be after key_extract(). */
+ if (post_ct && !skb_get_nfct(skb))
+ key->ct_zone = zone;
+ }
return err;
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 46943a18a10d..76c2dca7f0a5 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -4492,9 +4492,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
}
out_free_pg_vec:
- bitmap_free(rx_owner_map);
- if (pg_vec)
+ if (pg_vec) {
+ bitmap_free(rx_owner_map);
free_pg_vec(pg_vec, order, req->tp_block_nr);
+ }
out:
return err;
}
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index a1525916885a..65d463ad8770 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -868,6 +868,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
err = pep_accept_conn(newsk, skb);
if (err) {
+ __sock_put(sk);
sock_put(newsk);
newsk = NULL;
goto drop;
@@ -946,6 +947,8 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
ret = -EBUSY;
else if (sk->sk_state == TCP_ESTABLISHED)
ret = -EISCONN;
+ else if (!pn->pn_sk.sobject)
+ ret = -EADDRNOTAVAIL;
else
ret = pep_sock_enable(sk, NULL, 0);
release_sock(sk);
diff --git a/net/rds/connection.c b/net/rds/connection.c
index a3bc4b54d491..b4cc699c5fad 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -253,6 +253,7 @@ static struct rds_connection *__rds_conn_create(struct net *net,
* should end up here, but if it
* does, reset/destroy the connection.
*/
+ kfree(conn->c_path);
kmem_cache_free(rds_conn_slab, conn);
conn = ERR_PTR(-EOPNOTSUPP);
goto out;
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 90866ae45573..ab3591408419 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -690,10 +690,10 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
u8 family, u16 zone, bool *defrag)
{
enum ip_conntrack_info ctinfo;
- struct qdisc_skb_cb cb;
struct nf_conn *ct;
int err = 0;
bool frag;
+ u16 mru;
/* Previously seen (loopback)? Ignore. */
ct = nf_ct_get(skb, &ctinfo);
@@ -708,7 +708,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
return err;
skb_get(skb);
- cb = *qdisc_skb_cb(skb);
+ mru = tc_skb_cb(skb)->mru;
if (family == NFPROTO_IPV4) {
enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
@@ -722,7 +722,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
if (!err) {
*defrag = true;
- cb.mru = IPCB(skb)->frag_max_size;
+ mru = IPCB(skb)->frag_max_size;
}
} else { /* NFPROTO_IPV6 */
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
@@ -735,7 +735,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
if (!err) {
*defrag = true;
- cb.mru = IP6CB(skb)->frag_max_size;
+ mru = IP6CB(skb)->frag_max_size;
}
#else
err = -EOPNOTSUPP;
@@ -744,7 +744,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
}
if (err != -EINPROGRESS)
- *qdisc_skb_cb(skb) = cb;
+ tc_skb_cb(skb)->mru = mru;
skb_clear_hash(skb);
skb->ignore_df = 1;
return err;
@@ -963,7 +963,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
tcf_action_update_bstats(&c->common, skb);
if (clear) {
- qdisc_skb_cb(skb)->post_ct = false;
+ tc_skb_cb(skb)->post_ct = false;
ct = nf_ct_get(skb, &ctinfo);
if (ct) {
nf_conntrack_put(&ct->ct_general);
@@ -1048,7 +1048,8 @@ do_nat:
out_push:
skb_push_rcsum(skb, nh_ofs);
- qdisc_skb_cb(skb)->post_ct = true;
+ tc_skb_cb(skb)->post_ct = true;
+ tc_skb_cb(skb)->zone = p->zone;
out_clear:
if (defrag)
qdisc_skb_cb(skb)->pkt_len = skb->len;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 2ef8f5a6205a..35c74bdde848 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1617,12 +1617,15 @@ int tcf_classify(struct sk_buff *skb,
/* If we missed on some chain */
if (ret == TC_ACT_UNSPEC && last_executed_chain) {
+ struct tc_skb_cb *cb = tc_skb_cb(skb);
+
ext = tc_skb_ext_alloc(skb);
if (WARN_ON_ONCE(!ext))
return TC_ACT_SHOT;
ext->chain = last_executed_chain;
- ext->mru = qdisc_skb_cb(skb)->mru;
- ext->post_ct = qdisc_skb_cb(skb)->post_ct;
+ ext->mru = cb->mru;
+ ext->post_ct = cb->post_ct;
+ ext->zone = cb->zone;
}
return ret;
@@ -3687,6 +3690,7 @@ int tc_setup_flow_action(struct flow_action *flow_action,
entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
break;
default:
+ err = -EOPNOTSUPP;
goto err_out_locked;
}
} else if (is_tcf_skbedit_ptype(act)) {
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index aab13ba11767..ef54ed395874 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -19,6 +19,7 @@
#include <net/sch_generic.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/ip.h>
#include <net/flow_dissector.h>
#include <net/geneve.h>
@@ -309,7 +310,8 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
struct cls_fl_head *head = rcu_dereference_bh(tp->root);
- bool post_ct = qdisc_skb_cb(skb)->post_ct;
+ bool post_ct = tc_skb_cb(skb)->post_ct;
+ u16 zone = tc_skb_cb(skb)->zone;
struct fl_flow_key skb_key;
struct fl_flow_mask *mask;
struct cls_fl_filter *f;
@@ -327,7 +329,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
fl_ct_info_to_flower_map,
ARRAY_SIZE(fl_ct_info_to_flower_map),
- post_ct);
+ post_ct, zone);
skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
skb_flow_dissect(skb, &mask->dissector, &skb_key,
FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 3c2300d14468..857aaebd49f4 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -2736,7 +2736,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data),
GFP_KERNEL);
if (!q->tins)
- goto nomem;
+ return -ENOMEM;
for (i = 0; i < CAKE_MAX_TINS; i++) {
struct cake_tin_data *b = q->tins + i;
@@ -2766,10 +2766,6 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
q->min_netlen = ~0;
q->min_adjlen = ~0;
return 0;
-
-nomem:
- cake_destroy(sch);
- return -ENOMEM;
}
static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
index e007fc75ef2f..d73393493553 100644
--- a/net/sched/sch_ets.c
+++ b/net/sched/sch_ets.c
@@ -666,9 +666,9 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
}
}
for (i = q->nbands; i < oldbands; i++) {
- qdisc_tree_flush_backlog(q->classes[i].qdisc);
- if (i >= q->nstrict)
+ if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
list_del(&q->classes[i].alist);
+ qdisc_tree_flush_backlog(q->classes[i].qdisc);
}
q->nstrict = nstrict;
memcpy(q->prio2band, priomap, sizeof(priomap));
diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
index 830f3559f727..d6aba6edd16e 100644
--- a/net/sched/sch_fq_pie.c
+++ b/net/sched/sch_fq_pie.c
@@ -531,6 +531,7 @@ static void fq_pie_destroy(struct Qdisc *sch)
struct fq_pie_sched_data *q = qdisc_priv(sch);
tcf_block_put(q->block);
+ q->p_params.tupdate = 0;
del_timer_sync(&q->adapt_timer);
kvfree(q->flows);
}
diff --git a/net/sched/sch_frag.c b/net/sched/sch_frag.c
index 8c06381391d6..5ded4c8672a6 100644
--- a/net/sched/sch_frag.c
+++ b/net/sched/sch_frag.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
#include <net/netlink.h>
#include <net/sch_generic.h>
+#include <net/pkt_sched.h>
#include <net/dst.h>
#include <net/ip.h>
#include <net/ip6_fib.h>
@@ -137,7 +138,7 @@ err:
int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
{
- u16 mru = qdisc_skb_cb(skb)->mru;
+ u16 mru = tc_skb_cb(skb)->mru;
int err;
if (mru && skb->len > mru + skb->dev->hard_header_len)
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 0b7f9ba28deb..d4ce58c90f9f 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1421,10 +1421,8 @@ static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
if (err < 0)
return err;
- if (qdisc_dev(sch)->tx_queue_len + 1 > QFQ_MAX_AGG_CLASSES)
- max_classes = QFQ_MAX_AGG_CLASSES;
- else
- max_classes = qdisc_dev(sch)->tx_queue_len + 1;
+ max_classes = min_t(u64, (u64)qdisc_dev(sch)->tx_queue_len + 1,
+ QFQ_MAX_AGG_CLASSES);
/* max_cl_shift = floor(log_2(max_classes)) */
max_cl_shift = __fls(max_classes);
q->max_agg_classes = 1<<max_cl_shift;
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
index 760b367644c1..034e2c74497d 100644
--- a/net/sctp/diag.c
+++ b/net/sctp/diag.c
@@ -245,54 +245,49 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
+ 64;
}
-static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
+static int sctp_sock_dump_one(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
{
struct sctp_association *assoc = tsp->asoc;
- struct sock *sk = tsp->asoc->base.sk;
struct sctp_comm_param *commp = p;
- struct sk_buff *in_skb = commp->skb;
+ struct sock *sk = ep->base.sk;
const struct inet_diag_req_v2 *req = commp->r;
- const struct nlmsghdr *nlh = commp->nlh;
- struct net *net = sock_net(in_skb->sk);
+ struct sk_buff *skb = commp->skb;
struct sk_buff *rep;
int err;
err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
if (err)
- goto out;
+ return err;
- err = -ENOMEM;
rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL);
if (!rep)
- goto out;
+ return -ENOMEM;
lock_sock(sk);
- if (sk != assoc->base.sk) {
- release_sock(sk);
- sk = assoc->base.sk;
- lock_sock(sk);
+ if (ep != assoc->ep) {
+ err = -EAGAIN;
+ goto out;
}
- err = inet_sctp_diag_fill(sk, assoc, rep, req,
- sk_user_ns(NETLINK_CB(in_skb).sk),
- NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0, nlh,
- commp->net_admin);
- release_sock(sk);
+
+ err = inet_sctp_diag_fill(sk, assoc, rep, req, sk_user_ns(NETLINK_CB(skb).sk),
+ NETLINK_CB(skb).portid, commp->nlh->nlmsg_seq, 0,
+ commp->nlh, commp->net_admin);
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
- kfree_skb(rep);
goto out;
}
+ release_sock(sk);
- err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
+ return nlmsg_unicast(sock_net(skb->sk)->diag_nlsk, rep, NETLINK_CB(skb).portid);
out:
+ release_sock(sk);
+ kfree_skb(rep);
return err;
}
-static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
+static int sctp_sock_dump(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
{
- struct sctp_endpoint *ep = tsp->asoc->ep;
struct sctp_comm_param *commp = p;
struct sock *sk = ep->base.sk;
struct sk_buff *skb = commp->skb;
@@ -302,6 +297,8 @@ static int sctp_sock_dump(struct sctp_transport *tsp, void *p)
int err = 0;
lock_sock(sk);
+ if (ep != tsp->asoc->ep)
+ goto release;
list_for_each_entry(assoc, &ep->asocs, asocs) {
if (cb->args[4] < cb->args[1])
goto next;
@@ -344,9 +341,8 @@ release:
return err;
}
-static int sctp_sock_filter(struct sctp_transport *tsp, void *p)
+static int sctp_sock_filter(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
{
- struct sctp_endpoint *ep = tsp->asoc->ep;
struct sctp_comm_param *commp = p;
struct sock *sk = ep->base.sk;
const struct inet_diag_req_v2 *r = commp->r;
@@ -429,15 +425,15 @@ static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
static int sctp_diag_dump_one(struct netlink_callback *cb,
const struct inet_diag_req_v2 *req)
{
- struct sk_buff *in_skb = cb->skb;
- struct net *net = sock_net(in_skb->sk);
+ struct sk_buff *skb = cb->skb;
+ struct net *net = sock_net(skb->sk);
const struct nlmsghdr *nlh = cb->nlh;
union sctp_addr laddr, paddr;
struct sctp_comm_param commp = {
- .skb = in_skb,
+ .skb = skb,
.r = req,
.nlh = nlh,
- .net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN),
+ .net_admin = netlink_net_capable(skb, CAP_NET_ADMIN),
};
if (req->sdiag_family == AF_INET) {
@@ -460,7 +456,7 @@ static int sctp_diag_dump_one(struct netlink_callback *cb,
paddr.v6.sin6_family = AF_INET6;
}
- return sctp_transport_lookup_process(sctp_tsp_dump_one,
+ return sctp_transport_lookup_process(sctp_sock_dump_one,
net, &laddr, &paddr, &commp);
}
@@ -505,8 +501,8 @@ skip:
if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
goto done;
- sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump,
- net, &pos, &commp);
+ sctp_transport_traverse_process(sctp_sock_filter, sctp_sock_dump,
+ net, &pos, &commp);
cb->args[2] = pos;
done:
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 48c9c2c7602f..efffde7f2328 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -184,6 +184,18 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
}
/* Final destructor for endpoint. */
+static void sctp_endpoint_destroy_rcu(struct rcu_head *head)
+{
+ struct sctp_endpoint *ep = container_of(head, struct sctp_endpoint, rcu);
+ struct sock *sk = ep->base.sk;
+
+ sctp_sk(sk)->ep = NULL;
+ sock_put(sk);
+
+ kfree(ep);
+ SCTP_DBG_OBJCNT_DEC(ep);
+}
+
static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
{
struct sock *sk;
@@ -213,18 +225,13 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
if (sctp_sk(sk)->bind_hash)
sctp_put_port(sk);
- sctp_sk(sk)->ep = NULL;
- /* Give up our hold on the sock */
- sock_put(sk);
-
- kfree(ep);
- SCTP_DBG_OBJCNT_DEC(ep);
+ call_rcu(&ep->rcu, sctp_endpoint_destroy_rcu);
}
/* Hold a reference to an endpoint. */
-void sctp_endpoint_hold(struct sctp_endpoint *ep)
+int sctp_endpoint_hold(struct sctp_endpoint *ep)
{
- refcount_inc(&ep->base.refcnt);
+ return refcount_inc_not_zero(&ep->base.refcnt);
}
/* Release a reference to an endpoint and clean up if there are
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 33391254fa82..da08671a3f80 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5317,32 +5317,41 @@ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
}
EXPORT_SYMBOL_GPL(sctp_for_each_endpoint);
-int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
- struct net *net,
+int sctp_transport_lookup_process(sctp_callback_t cb, struct net *net,
const union sctp_addr *laddr,
const union sctp_addr *paddr, void *p)
{
struct sctp_transport *transport;
- int err;
+ struct sctp_endpoint *ep;
+ int err = -ENOENT;
rcu_read_lock();
transport = sctp_addrs_lookup_transport(net, laddr, paddr);
+ if (!transport) {
+ rcu_read_unlock();
+ return err;
+ }
+ ep = transport->asoc->ep;
+ if (!sctp_endpoint_hold(ep)) { /* asoc can be peeled off */
+ sctp_transport_put(transport);
+ rcu_read_unlock();
+ return err;
+ }
rcu_read_unlock();
- if (!transport)
- return -ENOENT;
- err = cb(transport, p);
+ err = cb(ep, transport, p);
+ sctp_endpoint_put(ep);
sctp_transport_put(transport);
-
return err;
}
EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
-int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
- int (*cb_done)(struct sctp_transport *, void *),
- struct net *net, int *pos, void *p) {
+int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done,
+ struct net *net, int *pos, void *p)
+{
struct rhashtable_iter hti;
struct sctp_transport *tsp;
+ struct sctp_endpoint *ep;
int ret;
again:
@@ -5351,26 +5360,32 @@ again:
tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
- ret = cb(tsp, p);
- if (ret)
- break;
+ ep = tsp->asoc->ep;
+ if (sctp_endpoint_hold(ep)) { /* asoc can be peeled off */
+ ret = cb(ep, tsp, p);
+ if (ret)
+ break;
+ sctp_endpoint_put(ep);
+ }
(*pos)++;
sctp_transport_put(tsp);
}
sctp_transport_walk_stop(&hti);
if (ret) {
- if (cb_done && !cb_done(tsp, p)) {
+ if (cb_done && !cb_done(ep, tsp, p)) {
(*pos)++;
+ sctp_endpoint_put(ep);
sctp_transport_put(tsp);
goto again;
}
+ sctp_endpoint_put(ep);
sctp_transport_put(tsp);
}
return ret;
}
-EXPORT_SYMBOL_GPL(sctp_for_each_transport);
+EXPORT_SYMBOL_GPL(sctp_transport_traverse_process);
/* 7.2.1 Association Status (SCTP_STATUS)
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 230072f9ec48..1c9289f56dc4 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -194,7 +194,9 @@ static int smc_release(struct socket *sock)
/* cleanup for a dangling non-blocking connect */
if (smc->connect_nonblock && sk->sk_state == SMC_INIT)
tcp_abort(smc->clcsock->sk, ECONNABORTED);
- flush_work(&smc->connect_work);
+
+ if (cancel_work_sync(&smc->connect_work))
+ sock_put(&smc->sk); /* sock_hold in smc_connect for passive closing */
if (sk->sk_state == SMC_LISTEN)
/* smc_close_non_accepted() is called and acquires
diff --git a/net/smc/smc.h b/net/smc/smc.h
index f4286ca1f228..1a4fc1c6c4ab 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -180,6 +180,11 @@ struct smc_connection {
u16 tx_cdc_seq; /* sequence # for CDC send */
u16 tx_cdc_seq_fin; /* sequence # - tx completed */
spinlock_t send_lock; /* protect wr_sends */
+ atomic_t cdc_pend_tx_wr; /* number of pending tx CDC wqe
+ * - inc when post wqe,
+ * - dec on polled tx cqe
+ */
+ wait_queue_head_t cdc_pend_tx_wq; /* wakeup on no cdc_pend_tx_wr*/
struct delayed_work tx_work; /* retry of smc_cdc_msg_send */
u32 tx_off; /* base offset in peer rmb */
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index 99acd337ba90..84c8a4374fdd 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -31,10 +31,6 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
struct smc_sock *smc;
int diff;
- if (!conn)
- /* already dismissed */
- return;
-
smc = container_of(conn, struct smc_sock, conn);
bh_lock_sock(&smc->sk);
if (!wc_status) {
@@ -51,6 +47,12 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
conn);
conn->tx_cdc_seq_fin = cdcpend->ctrl_seq;
}
+
+ if (atomic_dec_and_test(&conn->cdc_pend_tx_wr) &&
+ unlikely(wq_has_sleeper(&conn->cdc_pend_tx_wq)))
+ wake_up(&conn->cdc_pend_tx_wq);
+ WARN_ON(atomic_read(&conn->cdc_pend_tx_wr) < 0);
+
smc_tx_sndbuf_nonfull(smc);
bh_unlock_sock(&smc->sk);
}
@@ -107,6 +109,10 @@ int smc_cdc_msg_send(struct smc_connection *conn,
conn->tx_cdc_seq++;
conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed);
+
+ atomic_inc(&conn->cdc_pend_tx_wr);
+ smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */
+
rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
if (!rc) {
smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
@@ -114,6 +120,7 @@ int smc_cdc_msg_send(struct smc_connection *conn,
} else {
conn->tx_cdc_seq--;
conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
+ atomic_dec(&conn->cdc_pend_tx_wr);
}
return rc;
@@ -136,7 +143,18 @@ int smcr_cdc_msg_send_validation(struct smc_connection *conn,
peer->token = htonl(local->token);
peer->prod_flags.failover_validation = 1;
+ /* We need to set pend->conn here to make sure smc_cdc_tx_handler()
+ * can handle properly
+ */
+ smc_cdc_add_pending_send(conn, pend);
+
+ atomic_inc(&conn->cdc_pend_tx_wr);
+ smp_mb__after_atomic(); /* Make sure cdc_pend_tx_wr added before post */
+
rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
+ if (unlikely(rc))
+ atomic_dec(&conn->cdc_pend_tx_wr);
+
return rc;
}
@@ -193,31 +211,9 @@ int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
return rc;
}
-static bool smc_cdc_tx_filter(struct smc_wr_tx_pend_priv *tx_pend,
- unsigned long data)
+void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn)
{
- struct smc_connection *conn = (struct smc_connection *)data;
- struct smc_cdc_tx_pend *cdc_pend =
- (struct smc_cdc_tx_pend *)tx_pend;
-
- return cdc_pend->conn == conn;
-}
-
-static void smc_cdc_tx_dismisser(struct smc_wr_tx_pend_priv *tx_pend)
-{
- struct smc_cdc_tx_pend *cdc_pend =
- (struct smc_cdc_tx_pend *)tx_pend;
-
- cdc_pend->conn = NULL;
-}
-
-void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
-{
- struct smc_link *link = conn->lnk;
-
- smc_wr_tx_dismiss_slots(link, SMC_CDC_MSG_TYPE,
- smc_cdc_tx_filter, smc_cdc_tx_dismisser,
- (unsigned long)conn);
+ wait_event(conn->cdc_pend_tx_wq, !atomic_read(&conn->cdc_pend_tx_wr));
}
/* Send a SMC-D CDC header.
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h
index 0a0a89abd38b..696cc11f2303 100644
--- a/net/smc/smc_cdc.h
+++ b/net/smc/smc_cdc.h
@@ -291,7 +291,7 @@ int smc_cdc_get_free_slot(struct smc_connection *conn,
struct smc_wr_buf **wr_buf,
struct smc_rdma_wr **wr_rdma_buf,
struct smc_cdc_tx_pend **pend);
-void smc_cdc_tx_dismiss_slots(struct smc_connection *conn);
+void smc_cdc_wait_pend_tx_wr(struct smc_connection *conn);
int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
struct smc_cdc_tx_pend *pend);
int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn);
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 387d28b2f8dd..a6849362f4dd 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -647,7 +647,7 @@ static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
struct smc_link *lnk = &lgr->lnk[i];
- if (smc_link_usable(lnk))
+ if (smc_link_sendable(lnk))
lnk->state = SMC_LNK_INACTIVE;
}
wake_up_all(&lgr->llc_msg_waiter);
@@ -1127,7 +1127,7 @@ void smc_conn_free(struct smc_connection *conn)
smc_ism_unset_conn(conn);
tasklet_kill(&conn->rx_tsklet);
} else {
- smc_cdc_tx_dismiss_slots(conn);
+ smc_cdc_wait_pend_tx_wr(conn);
if (current_work() != &conn->abort_work)
cancel_work_sync(&conn->abort_work);
}
@@ -1204,7 +1204,7 @@ void smcr_link_clear(struct smc_link *lnk, bool log)
smc_llc_link_clear(lnk, log);
smcr_buf_unmap_lgr(lnk);
smcr_rtoken_clear_link(lnk);
- smc_ib_modify_qp_reset(lnk);
+ smc_ib_modify_qp_error(lnk);
smc_wr_free_link(lnk);
smc_ib_destroy_queue_pair(lnk);
smc_ib_dealloc_protection_domain(lnk);
@@ -1336,7 +1336,7 @@ static void smc_conn_kill(struct smc_connection *conn, bool soft)
else
tasklet_unlock_wait(&conn->rx_tsklet);
} else {
- smc_cdc_tx_dismiss_slots(conn);
+ smc_cdc_wait_pend_tx_wr(conn);
}
smc_lgr_unregister_conn(conn);
smc_close_active_abort(smc);
@@ -1459,11 +1459,16 @@ void smc_smcd_terminate_all(struct smcd_dev *smcd)
/* Called when an SMCR device is removed or the smc module is unloaded.
* If smcibdev is given, all SMCR link groups using this device are terminated.
* If smcibdev is NULL, all SMCR link groups are terminated.
+ *
+ * We must wait here for QPs been destroyed before we destroy the CQs,
+ * or we won't received any CQEs and cdc_pend_tx_wr cannot reach 0 thus
+ * smc_sock cannot be released.
*/
void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
{
struct smc_link_group *lgr, *lg;
LIST_HEAD(lgr_free_list);
+ LIST_HEAD(lgr_linkdown_list);
int i;
spin_lock_bh(&smc_lgr_list.lock);
@@ -1475,7 +1480,7 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
if (lgr->lnk[i].smcibdev == smcibdev)
- smcr_link_down_cond_sched(&lgr->lnk[i]);
+ list_move_tail(&lgr->list, &lgr_linkdown_list);
}
}
}
@@ -1487,6 +1492,16 @@ void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
__smc_lgr_terminate(lgr, false);
}
+ list_for_each_entry_safe(lgr, lg, &lgr_linkdown_list, list) {
+ for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
+ if (lgr->lnk[i].smcibdev == smcibdev) {
+ mutex_lock(&lgr->llc_conf_mutex);
+ smcr_link_down_cond(&lgr->lnk[i]);
+ mutex_unlock(&lgr->llc_conf_mutex);
+ }
+ }
+ }
+
if (smcibdev) {
if (atomic_read(&smcibdev->lnk_cnt))
wait_event(smcibdev->lnks_deleted,
@@ -1586,7 +1601,6 @@ static void smcr_link_down(struct smc_link *lnk)
if (!lgr || lnk->state == SMC_LNK_UNUSED || list_empty(&lgr->list))
return;
- smc_ib_modify_qp_reset(lnk);
to_lnk = smc_switch_conns(lgr, lnk, true);
if (!to_lnk) { /* no backup link available */
smcr_link_clear(lnk, true);
@@ -1824,6 +1838,7 @@ create:
conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
conn->urg_state = SMC_URG_READ;
+ init_waitqueue_head(&conn->cdc_pend_tx_wq);
INIT_WORK(&smc->conn.abort_work, smc_conn_abort_work);
if (ini->is_smcd) {
conn->rx_off = sizeof(struct smcd_cdc_msg);
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index 59cef3b830d8..d63b08274197 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -415,6 +415,12 @@ static inline bool smc_link_usable(struct smc_link *lnk)
return true;
}
+static inline bool smc_link_sendable(struct smc_link *lnk)
+{
+ return smc_link_usable(lnk) &&
+ lnk->qp_attr.cur_qp_state == IB_QPS_RTS;
+}
+
static inline bool smc_link_active(struct smc_link *lnk)
{
return lnk->state == SMC_LNK_ACTIVE;
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index d93055ec17ae..fe5d5399c4e8 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -109,12 +109,12 @@ int smc_ib_modify_qp_rts(struct smc_link *lnk)
IB_QP_MAX_QP_RD_ATOMIC);
}
-int smc_ib_modify_qp_reset(struct smc_link *lnk)
+int smc_ib_modify_qp_error(struct smc_link *lnk)
{
struct ib_qp_attr qp_attr;
memset(&qp_attr, 0, sizeof(qp_attr));
- qp_attr.qp_state = IB_QPS_RESET;
+ qp_attr.qp_state = IB_QPS_ERR;
return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE);
}
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
index 07585937370e..bfa1c6bf6313 100644
--- a/net/smc/smc_ib.h
+++ b/net/smc/smc_ib.h
@@ -90,6 +90,7 @@ int smc_ib_create_queue_pair(struct smc_link *lnk);
int smc_ib_ready_link(struct smc_link *lnk);
int smc_ib_modify_qp_rts(struct smc_link *lnk);
int smc_ib_modify_qp_reset(struct smc_link *lnk);
+int smc_ib_modify_qp_error(struct smc_link *lnk);
long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev);
int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
struct smc_buf_desc *buf_slot, u8 link_idx);
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index b102680296b8..3e9fd8a3124c 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -1630,7 +1630,7 @@ void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn)
delllc.reason = htonl(rsn);
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
- if (!smc_link_usable(&lgr->lnk[i]))
+ if (!smc_link_sendable(&lgr->lnk[i]))
continue;
if (!smc_llc_send_message_wait(&lgr->lnk[i], &delllc))
break;
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index 600ab5889227..c6cfdea8b71b 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -62,13 +62,9 @@ static inline bool smc_wr_is_tx_pend(struct smc_link *link)
}
/* wait till all pending tx work requests on the given link are completed */
-int smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
+void smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
{
- if (wait_event_timeout(link->wr_tx_wait, !smc_wr_is_tx_pend(link),
- SMC_WR_TX_WAIT_PENDING_TIME))
- return 0;
- else /* timeout */
- return -EPIPE;
+ wait_event(link->wr_tx_wait, !smc_wr_is_tx_pend(link));
}
static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id)
@@ -87,7 +83,6 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
struct smc_wr_tx_pend pnd_snd;
struct smc_link *link;
u32 pnd_snd_idx;
- int i;
link = wc->qp->qp_context;
@@ -128,14 +123,6 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
}
if (wc->status) {
- for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
- /* clear full struct smc_wr_tx_pend including .priv */
- memset(&link->wr_tx_pends[i], 0,
- sizeof(link->wr_tx_pends[i]));
- memset(&link->wr_tx_bufs[i], 0,
- sizeof(link->wr_tx_bufs[i]));
- clear_bit(i, link->wr_tx_mask);
- }
if (link->lgr->smc_version == SMC_V2) {
memset(link->wr_tx_v2_pend, 0,
sizeof(*link->wr_tx_v2_pend));
@@ -188,7 +175,7 @@ void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
{
*idx = link->wr_tx_cnt;
- if (!smc_link_usable(link))
+ if (!smc_link_sendable(link))
return -ENOLINK;
for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
if (!test_and_set_bit(*idx, link->wr_tx_mask))
@@ -231,7 +218,7 @@ int smc_wr_tx_get_free_slot(struct smc_link *link,
} else {
rc = wait_event_interruptible_timeout(
link->wr_tx_wait,
- !smc_link_usable(link) ||
+ !smc_link_sendable(link) ||
lgr->terminating ||
(smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
SMC_WR_TX_WAIT_FREE_SLOT_TIME);
@@ -358,18 +345,20 @@ int smc_wr_tx_send_wait(struct smc_link *link, struct smc_wr_tx_pend_priv *priv,
unsigned long timeout)
{
struct smc_wr_tx_pend *pend;
+ u32 pnd_idx;
int rc;
pend = container_of(priv, struct smc_wr_tx_pend, priv);
pend->compl_requested = 1;
- init_completion(&link->wr_tx_compl[pend->idx]);
+ pnd_idx = pend->idx;
+ init_completion(&link->wr_tx_compl[pnd_idx]);
rc = smc_wr_tx_send(link, priv);
if (rc)
return rc;
/* wait for completion by smc_wr_tx_process_cqe() */
rc = wait_for_completion_interruptible_timeout(
- &link->wr_tx_compl[pend->idx], timeout);
+ &link->wr_tx_compl[pnd_idx], timeout);
if (rc <= 0)
rc = -ENODATA;
if (rc > 0)
@@ -419,25 +408,6 @@ int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
return rc;
}
-void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_tx_hdr_type,
- smc_wr_tx_filter filter,
- smc_wr_tx_dismisser dismisser,
- unsigned long data)
-{
- struct smc_wr_tx_pend_priv *tx_pend;
- struct smc_wr_rx_hdr *wr_tx;
- int i;
-
- for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
- wr_tx = (struct smc_wr_rx_hdr *)&link->wr_tx_bufs[i];
- if (wr_tx->type != wr_tx_hdr_type)
- continue;
- tx_pend = &link->wr_tx_pends[i].priv;
- if (filter(tx_pend, data))
- dismisser(tx_pend);
- }
-}
-
/****************************** receive queue ********************************/
int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler)
@@ -673,10 +643,7 @@ void smc_wr_free_link(struct smc_link *lnk)
smc_wr_wakeup_reg_wait(lnk);
smc_wr_wakeup_tx_wait(lnk);
- if (smc_wr_tx_wait_no_pending_sends(lnk))
- memset(lnk->wr_tx_mask, 0,
- BITS_TO_LONGS(SMC_WR_BUF_CNT) *
- sizeof(*lnk->wr_tx_mask));
+ smc_wr_tx_wait_no_pending_sends(lnk);
wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt)));
wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt)));
diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
index f353311e6f84..47512ccce5ef 100644
--- a/net/smc/smc_wr.h
+++ b/net/smc/smc_wr.h
@@ -22,7 +22,6 @@
#define SMC_WR_BUF_CNT 16 /* # of ctrl buffers per link */
#define SMC_WR_TX_WAIT_FREE_SLOT_TIME (10 * HZ)
-#define SMC_WR_TX_WAIT_PENDING_TIME (5 * HZ)
#define SMC_WR_TX_SIZE 44 /* actual size of wr_send data (<=SMC_WR_BUF_SIZE) */
@@ -62,7 +61,7 @@ static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val)
static inline bool smc_wr_tx_link_hold(struct smc_link *link)
{
- if (!smc_link_usable(link))
+ if (!smc_link_sendable(link))
return false;
atomic_inc(&link->wr_tx_refcnt);
return true;
@@ -130,7 +129,7 @@ void smc_wr_tx_dismiss_slots(struct smc_link *lnk, u8 wr_rx_hdr_type,
smc_wr_tx_filter filter,
smc_wr_tx_dismisser dismisser,
unsigned long data);
-int smc_wr_tx_wait_no_pending_sends(struct smc_link *link);
+void smc_wr_tx_wait_no_pending_sends(struct smc_link *link);
int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler);
int smc_wr_rx_post_init(struct smc_link *link);
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index b4d9419a015b..d293614d5fc6 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -524,7 +524,7 @@ static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
return -EEXIST;
/* Allocate a new AEAD */
- tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
if (unlikely(!tmp))
return -ENOMEM;
@@ -1474,7 +1474,7 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
return -EEXIST;
/* Allocate crypto */
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = kzalloc(sizeof(*c), GFP_ATOMIC);
if (!c)
return -ENOMEM;
@@ -1488,7 +1488,7 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
}
/* Allocate statistic structure */
- c->stats = alloc_percpu(struct tipc_crypto_stats);
+ c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
if (!c->stats) {
if (c->wq)
destroy_workqueue(c->wq);
@@ -2461,7 +2461,7 @@ static void tipc_crypto_work_tx(struct work_struct *work)
}
/* Lets duplicate it first */
- skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_KERNEL);
+ skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC);
rcu_read_unlock();
/* Now, generate new key, initiate & distribute it */
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index ad570c2450be..3e63c83e641c 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1461,6 +1461,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
msg_set_syn(hdr, 1);
}
+ memset(&skaddr, 0, sizeof(skaddr));
+
/* Determine destination */
if (atype == TIPC_SERVICE_RANGE) {
return tipc_sendmcast(sock, ua, m, dlen, timeout);
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 59ee1be5a6dd..ec2c2afbf0d0 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -1299,7 +1299,8 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
space_available = virtio_transport_space_update(sk, pkt);
/* Update CID in case it has changed after a transport reset event */
- vsk->local_addr.svm_cid = dst.svm_cid;
+ if (vsk->local_addr.svm_cid != VMADDR_CID_ANY)
+ vsk->local_addr.svm_cid = dst.svm_cid;
if (space_available)
sk->sk_write_space(sk);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index df87c7f3a049..f8f01a3e020b 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -133,6 +133,7 @@ static u32 reg_is_indoor_portid;
static void restore_regulatory_settings(bool reset_user, bool cached);
static void print_regdomain(const struct ieee80211_regdomain *rd);
+static void reg_process_hint(struct regulatory_request *reg_request);
static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
{
@@ -1098,6 +1099,8 @@ int reg_reload_regdb(void)
const struct firmware *fw;
void *db;
int err;
+ const struct ieee80211_regdomain *current_regdomain;
+ struct regulatory_request *request;
err = request_firmware(&fw, "regulatory.db", &reg_pdev->dev);
if (err)
@@ -1118,8 +1121,26 @@ int reg_reload_regdb(void)
if (!IS_ERR_OR_NULL(regdb))
kfree(regdb);
regdb = db;
- rtnl_unlock();
+ /* reset regulatory domain */
+ current_regdomain = get_cfg80211_regdom();
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (!request) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+
+ request->wiphy_idx = WIPHY_IDX_INVALID;
+ request->alpha2[0] = current_regdomain->alpha2[0];
+ request->alpha2[1] = current_regdomain->alpha2[1];
+ request->initiator = NL80211_REGDOM_SET_BY_CORE;
+ request->user_reg_hint_type = NL80211_USER_REG_HINT_USER;
+
+ reg_process_hint(request);
+
+out_unlock:
+ rtnl_unlock();
out:
release_firmware(fw);
return err;
@@ -2338,6 +2359,7 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
struct cfg80211_chan_def chandef = {};
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
enum nl80211_iftype iftype;
+ bool ret;
wdev_lock(wdev);
iftype = wdev->iftype;
@@ -2387,7 +2409,11 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_ADHOC:
- return cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype);
+ wiphy_lock(wiphy);
+ ret = cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype);
+ wiphy_unlock(wiphy);
+
+ return ret;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
return cfg80211_chandef_usable(wiphy, &chandef,
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index bc4ad48ea4f0..fd39bb660ebc 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -83,6 +83,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
xskb = &pool->heads[i];
xskb->pool = pool;
xskb->xdp.frame_sz = umem->chunk_size - umem->headroom;
+ INIT_LIST_HEAD(&xskb->free_list_node);
if (pool->unaligned)
pool->free_heads[i] = xskb;
else
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 7d631aaa0ae1..3ccb2c70add4 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -219,7 +219,7 @@ if ($arch eq "x86_64") {
} elsif ($arch eq "s390" && $bits == 64) {
if ($cc =~ /-DCC_USING_HOTPATCH/) {
- $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*brcl\\s*0,[0-9a-f]+ <([^\+]*)>\$";
+ $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*(brcl\\s*0,|jgnop\\s*)[0-9a-f]+ <([^\+]*)>\$";
$mcount_adjust = 0;
}
$alignment = 8;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 62d30c0a30c2..dde4ecc0cd18 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -611,10 +611,11 @@ static int bad_option(struct superblock_security_struct *sbsec, char flag,
return 0;
}
-static int parse_sid(struct super_block *sb, const char *s, u32 *sid)
+static int parse_sid(struct super_block *sb, const char *s, u32 *sid,
+ gfp_t gfp)
{
int rc = security_context_str_to_sid(&selinux_state, s,
- sid, GFP_KERNEL);
+ sid, gfp);
if (rc)
pr_warn("SELinux: security_context_str_to_sid"
"(%s) failed for (dev %s, type %s) errno=%d\n",
@@ -685,7 +686,8 @@ static int selinux_set_mnt_opts(struct super_block *sb,
*/
if (opts) {
if (opts->fscontext) {
- rc = parse_sid(sb, opts->fscontext, &fscontext_sid);
+ rc = parse_sid(sb, opts->fscontext, &fscontext_sid,
+ GFP_KERNEL);
if (rc)
goto out;
if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid,
@@ -694,7 +696,8 @@ static int selinux_set_mnt_opts(struct super_block *sb,
sbsec->flags |= FSCONTEXT_MNT;
}
if (opts->context) {
- rc = parse_sid(sb, opts->context, &context_sid);
+ rc = parse_sid(sb, opts->context, &context_sid,
+ GFP_KERNEL);
if (rc)
goto out;
if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid,
@@ -703,7 +706,8 @@ static int selinux_set_mnt_opts(struct super_block *sb,
sbsec->flags |= CONTEXT_MNT;
}
if (opts->rootcontext) {
- rc = parse_sid(sb, opts->rootcontext, &rootcontext_sid);
+ rc = parse_sid(sb, opts->rootcontext, &rootcontext_sid,
+ GFP_KERNEL);
if (rc)
goto out;
if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid,
@@ -712,7 +716,8 @@ static int selinux_set_mnt_opts(struct super_block *sb,
sbsec->flags |= ROOTCONTEXT_MNT;
}
if (opts->defcontext) {
- rc = parse_sid(sb, opts->defcontext, &defcontext_sid);
+ rc = parse_sid(sb, opts->defcontext, &defcontext_sid,
+ GFP_KERNEL);
if (rc)
goto out;
if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid,
@@ -2702,14 +2707,14 @@ static int selinux_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts)
return (sbsec->flags & SE_MNTMASK) ? 1 : 0;
if (opts->fscontext) {
- rc = parse_sid(sb, opts->fscontext, &sid);
+ rc = parse_sid(sb, opts->fscontext, &sid, GFP_NOWAIT);
if (rc)
return 1;
if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid, sid))
return 1;
}
if (opts->context) {
- rc = parse_sid(sb, opts->context, &sid);
+ rc = parse_sid(sb, opts->context, &sid, GFP_NOWAIT);
if (rc)
return 1;
if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid, sid))
@@ -2719,14 +2724,14 @@ static int selinux_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts)
struct inode_security_struct *root_isec;
root_isec = backing_inode_security(sb->s_root);
- rc = parse_sid(sb, opts->rootcontext, &sid);
+ rc = parse_sid(sb, opts->rootcontext, &sid, GFP_NOWAIT);
if (rc)
return 1;
if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, sid))
return 1;
}
if (opts->defcontext) {
- rc = parse_sid(sb, opts->defcontext, &sid);
+ rc = parse_sid(sb, opts->defcontext, &sid, GFP_NOWAIT);
if (rc)
return 1;
if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid, sid))
@@ -2749,14 +2754,14 @@ static int selinux_sb_remount(struct super_block *sb, void *mnt_opts)
return 0;
if (opts->fscontext) {
- rc = parse_sid(sb, opts->fscontext, &sid);
+ rc = parse_sid(sb, opts->fscontext, &sid, GFP_KERNEL);
if (rc)
return rc;
if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid, sid))
goto out_bad_option;
}
if (opts->context) {
- rc = parse_sid(sb, opts->context, &sid);
+ rc = parse_sid(sb, opts->context, &sid, GFP_KERNEL);
if (rc)
return rc;
if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid, sid))
@@ -2765,14 +2770,14 @@ static int selinux_sb_remount(struct super_block *sb, void *mnt_opts)
if (opts->rootcontext) {
struct inode_security_struct *root_isec;
root_isec = backing_inode_security(sb->s_root);
- rc = parse_sid(sb, opts->rootcontext, &sid);
+ rc = parse_sid(sb, opts->rootcontext, &sid, GFP_KERNEL);
if (rc)
return rc;
if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, sid))
goto out_bad_option;
}
if (opts->defcontext) {
- rc = parse_sid(sb, opts->defcontext, &sid);
+ rc = parse_sid(sb, opts->defcontext, &sid, GFP_KERNEL);
if (rc)
return rc;
if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid, sid))
@@ -5780,7 +5785,7 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
struct sk_security_struct *sksec;
struct common_audit_data ad;
struct lsm_network_audit net = {0,};
- u8 proto;
+ u8 proto = 0;
sk = skb_to_full_sk(skb);
if (sk == NULL)
diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
index 1da2e3722b12..6799b1122c9d 100644
--- a/security/tomoyo/util.c
+++ b/security/tomoyo/util.c
@@ -1051,10 +1051,11 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
return false;
if (!domain)
return true;
+ if (READ_ONCE(domain->flags[TOMOYO_DIF_QUOTA_WARNED]))
+ return false;
list_for_each_entry_rcu(ptr, &domain->acl_info_list, list,
srcu_read_lock_held(&tomoyo_ss)) {
u16 perm;
- u8 i;
if (ptr->is_deleted)
continue;
@@ -1065,23 +1066,23 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
*/
switch (ptr->type) {
case TOMOYO_TYPE_PATH_ACL:
- data_race(perm = container_of(ptr, struct tomoyo_path_acl, head)->perm);
+ perm = data_race(container_of(ptr, struct tomoyo_path_acl, head)->perm);
break;
case TOMOYO_TYPE_PATH2_ACL:
- data_race(perm = container_of(ptr, struct tomoyo_path2_acl, head)->perm);
+ perm = data_race(container_of(ptr, struct tomoyo_path2_acl, head)->perm);
break;
case TOMOYO_TYPE_PATH_NUMBER_ACL:
- data_race(perm = container_of(ptr, struct tomoyo_path_number_acl, head)
+ perm = data_race(container_of(ptr, struct tomoyo_path_number_acl, head)
->perm);
break;
case TOMOYO_TYPE_MKDEV_ACL:
- data_race(perm = container_of(ptr, struct tomoyo_mkdev_acl, head)->perm);
+ perm = data_race(container_of(ptr, struct tomoyo_mkdev_acl, head)->perm);
break;
case TOMOYO_TYPE_INET_ACL:
- data_race(perm = container_of(ptr, struct tomoyo_inet_acl, head)->perm);
+ perm = data_race(container_of(ptr, struct tomoyo_inet_acl, head)->perm);
break;
case TOMOYO_TYPE_UNIX_ACL:
- data_race(perm = container_of(ptr, struct tomoyo_unix_acl, head)->perm);
+ perm = data_race(container_of(ptr, struct tomoyo_unix_acl, head)->perm);
break;
case TOMOYO_TYPE_MANUAL_TASK_ACL:
perm = 0;
@@ -1089,21 +1090,17 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
default:
perm = 1;
}
- for (i = 0; i < 16; i++)
- if (perm & (1 << i))
- count++;
+ count += hweight16(perm);
}
if (count < tomoyo_profile(domain->ns, domain->profile)->
pref[TOMOYO_PREF_MAX_LEARNING_ENTRY])
return true;
- if (!domain->flags[TOMOYO_DIF_QUOTA_WARNED]) {
- domain->flags[TOMOYO_DIF_QUOTA_WARNED] = true;
- /* r->granted = false; */
- tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]);
+ WRITE_ONCE(domain->flags[TOMOYO_DIF_QUOTA_WARNED], true);
+ /* r->granted = false; */
+ tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]);
#ifndef CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING
- pr_warn("WARNING: Domain '%s' has too many ACLs to hold. Stopped learning mode.\n",
- domain->domainname->name);
+ pr_warn("WARNING: Domain '%s' has too many ACLs to hold. Stopped learning mode.\n",
+ domain->domainname->name);
#endif
- }
return false;
}
diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
index 470dabc60aa0..edff063e088d 100644
--- a/sound/core/control_compat.c
+++ b/sound/core/control_compat.c
@@ -264,6 +264,7 @@ static int copy_ctl_value_to_user(void __user *userdata,
struct snd_ctl_elem_value *data,
int type, int count)
{
+ struct snd_ctl_elem_value32 __user *data32 = userdata;
int i, size;
if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
@@ -280,6 +281,8 @@ static int copy_ctl_value_to_user(void __user *userdata,
if (copy_to_user(valuep, data->value.bytes.data, size))
return -EFAULT;
}
+ if (copy_to_user(&data32->id, &data->id, sizeof(data32->id)))
+ return -EFAULT;
return 0;
}
diff --git a/sound/core/jack.c b/sound/core/jack.c
index 32350c6aba84..537df1e98f8a 100644
--- a/sound/core/jack.c
+++ b/sound/core/jack.c
@@ -509,6 +509,10 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
return -ENOMEM;
jack->id = kstrdup(id, GFP_KERNEL);
+ if (jack->id == NULL) {
+ kfree(jack);
+ return -ENOMEM;
+ }
/* don't creat input device for phantom jack */
if (!phantom_jack) {
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 82a818734a5f..20a0a4771b9a 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -147,7 +147,7 @@ snd_pcm_hw_param_value_min(const struct snd_pcm_hw_params *params,
*
* Return the maximum value for field PAR.
*/
-static unsigned int
+static int
snd_pcm_hw_param_value_max(const struct snd_pcm_hw_params *params,
snd_pcm_hw_param_t var, int *dir)
{
@@ -682,18 +682,24 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *oss_params,
struct snd_pcm_hw_params *slave_params)
{
- size_t s;
- size_t oss_buffer_size, oss_period_size, oss_periods;
- size_t min_period_size, max_period_size;
+ ssize_t s;
+ ssize_t oss_buffer_size;
+ ssize_t oss_period_size, oss_periods;
+ ssize_t min_period_size, max_period_size;
struct snd_pcm_runtime *runtime = substream->runtime;
size_t oss_frame_size;
oss_frame_size = snd_pcm_format_physical_width(params_format(oss_params)) *
params_channels(oss_params) / 8;
+ oss_buffer_size = snd_pcm_hw_param_value_max(slave_params,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ NULL);
+ if (oss_buffer_size <= 0)
+ return -EINVAL;
oss_buffer_size = snd_pcm_plug_client_size(substream,
- snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, NULL)) * oss_frame_size;
- if (!oss_buffer_size)
+ oss_buffer_size * oss_frame_size);
+ if (oss_buffer_size <= 0)
return -EINVAL;
oss_buffer_size = rounddown_pow_of_two(oss_buffer_size);
if (atomic_read(&substream->mmap_count)) {
@@ -730,7 +736,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
min_period_size = snd_pcm_plug_client_size(substream,
snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
- if (min_period_size) {
+ if (min_period_size > 0) {
min_period_size *= oss_frame_size;
min_period_size = roundup_pow_of_two(min_period_size);
if (oss_period_size < min_period_size)
@@ -739,7 +745,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
max_period_size = snd_pcm_plug_client_size(substream,
snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
- if (max_period_size) {
+ if (max_period_size > 0) {
max_period_size *= oss_frame_size;
max_period_size = rounddown_pow_of_two(max_period_size);
if (oss_period_size > max_period_size)
@@ -752,7 +758,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
oss_periods = substream->oss.setup.periods;
s = snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIODS, NULL);
- if (runtime->oss.maxfrags && s > runtime->oss.maxfrags)
+ if (s > 0 && runtime->oss.maxfrags && s > runtime->oss.maxfrags)
s = runtime->oss.maxfrags;
if (oss_periods > s)
oss_periods = s;
@@ -878,8 +884,15 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
err = -EINVAL;
goto failure;
}
- choose_rate(substream, sparams, runtime->oss.rate);
- snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_CHANNELS, runtime->oss.channels, NULL);
+
+ err = choose_rate(substream, sparams, runtime->oss.rate);
+ if (err < 0)
+ goto failure;
+ err = snd_pcm_hw_param_near(substream, sparams,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ runtime->oss.channels, NULL);
+ if (err < 0)
+ goto failure;
format = snd_pcm_oss_format_from(runtime->oss.format);
@@ -1956,7 +1969,7 @@ static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsign
if (runtime->oss.subdivision || runtime->oss.fragshift)
return -EINVAL;
fragshift = val & 0xffff;
- if (fragshift >= 31)
+ if (fragshift >= 25) /* should be large enough */
return -EINVAL;
runtime->oss.fragshift = fragshift;
runtime->oss.maxfrags = (val >> 16) & 0xffff;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 6f30231bdb88..befa9809ff00 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -447,6 +447,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file)
err = -ENOMEM;
goto __error;
}
+ rawmidi_file->user_pversion = 0;
init_waitqueue_entry(&wait, current);
add_wait_queue(&rmidi->open_wait, &wait);
while (1) {
diff --git a/sound/drivers/opl3/opl3_midi.c b/sound/drivers/opl3/opl3_midi.c
index e1b69c65c3c8..e2b7be67f0e3 100644
--- a/sound/drivers/opl3/opl3_midi.c
+++ b/sound/drivers/opl3/opl3_midi.c
@@ -397,7 +397,7 @@ void snd_opl3_note_on(void *p, int note, int vel, struct snd_midi_channel *chan)
}
if (instr_4op) {
vp2 = &opl3->voices[voice + 3];
- if (vp->state > 0) {
+ if (vp2->state > 0) {
opl3_reg = reg_side | (OPL3_REG_KEYON_BLOCK +
voice_offset + 3);
reg_val = vp->keyon_reg & ~OPL3_KEYON_BIT;
diff --git a/sound/hda/intel-sdw-acpi.c b/sound/hda/intel-sdw-acpi.c
index c0123bc31c0d..b7758dbe2371 100644
--- a/sound/hda/intel-sdw-acpi.c
+++ b/sound/hda/intel-sdw-acpi.c
@@ -132,8 +132,6 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
return AE_NOT_FOUND;
}
- info->handle = handle;
-
/*
* On some Intel platforms, multiple children of the HDAS
* device can be found, but only one of them is the SoundWire
@@ -144,6 +142,9 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
if (FIELD_GET(GENMASK(31, 28), adr) != SDW_LINK_TYPE)
return AE_OK; /* keep going */
+ /* found the correct SoundWire controller */
+ info->handle = handle;
+
/* device found, stop namespace walk */
return AE_CTRL_TERMINATE;
}
@@ -164,8 +165,14 @@ int sdw_intel_acpi_scan(acpi_handle *parent_handle,
acpi_status status;
info->handle = NULL;
+ /*
+ * In the HDAS ACPI scope, 'SNDW' may be either the child of
+ * 'HDAS' or the grandchild of 'HDAS'. So let's go through
+ * the ACPI from 'HDAS' at max depth of 2 to find the 'SNDW'
+ * device.
+ */
status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
- parent_handle, 1,
+ parent_handle, 2,
sdw_intel_acpi_cb,
NULL, info, NULL);
if (ACPI_FAILURE(status) || info->handle == NULL)
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 415701bd10ac..ffcde7409d2a 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -2947,7 +2947,8 @@ static int parse_intel_hdmi(struct hda_codec *codec)
/* Intel Haswell and onwards; audio component with eld notifier */
static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
- const int *port_map, int port_num, int dev_num)
+ const int *port_map, int port_num, int dev_num,
+ bool send_silent_stream)
{
struct hdmi_spec *spec;
int err;
@@ -2980,7 +2981,7 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
* Enable silent stream feature, if it is enabled via
* module param or Kconfig option
*/
- if (enable_silent_stream)
+ if (send_silent_stream)
spec->send_silent_stream = true;
return parse_intel_hdmi(codec);
@@ -2988,12 +2989,18 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid,
static int patch_i915_hsw_hdmi(struct hda_codec *codec)
{
- return intel_hsw_common_init(codec, 0x08, NULL, 0, 3);
+ return intel_hsw_common_init(codec, 0x08, NULL, 0, 3,
+ enable_silent_stream);
}
static int patch_i915_glk_hdmi(struct hda_codec *codec)
{
- return intel_hsw_common_init(codec, 0x0b, NULL, 0, 3);
+ /*
+ * Silent stream calls audio component .get_power() from
+ * .pin_eld_notify(). On GLK this will deadlock in i915 due
+ * to the audio vs. CDCLK workaround.
+ */
+ return intel_hsw_common_init(codec, 0x0b, NULL, 0, 3, false);
}
static int patch_i915_icl_hdmi(struct hda_codec *codec)
@@ -3004,7 +3011,8 @@ static int patch_i915_icl_hdmi(struct hda_codec *codec)
*/
static const int map[] = {0x0, 0x4, 0x6, 0x8, 0xa, 0xb};
- return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 3);
+ return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 3,
+ enable_silent_stream);
}
static int patch_i915_tgl_hdmi(struct hda_codec *codec)
@@ -3016,7 +3024,8 @@ static int patch_i915_tgl_hdmi(struct hda_codec *codec)
static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf};
int ret;
- ret = intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 4);
+ ret = intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map), 4,
+ enable_silent_stream);
if (!ret) {
struct hdmi_spec *spec = codec->spec;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 9ce7457533c9..28255e752c4a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6503,22 +6503,26 @@ static void alc287_fixup_legion_15imhg05_speakers(struct hda_codec *codec,
/* for alc285_fixup_ideapad_s740_coef() */
#include "ideapad_s740_helper.c"
-static void alc256_fixup_tongfang_reset_persistent_settings(struct hda_codec *codec,
- const struct hda_fixup *fix,
- int action)
+static const struct coef_fw alc256_fixup_set_coef_defaults_coefs[] = {
+ WRITE_COEF(0x10, 0x0020), WRITE_COEF(0x24, 0x0000),
+ WRITE_COEF(0x26, 0x0000), WRITE_COEF(0x29, 0x3000),
+ WRITE_COEF(0x37, 0xfe05), WRITE_COEF(0x45, 0x5089),
+ {}
+};
+
+static void alc256_fixup_set_coef_defaults(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
{
/*
- * A certain other OS sets these coeffs to different values. On at least one TongFang
- * barebone these settings might survive even a cold reboot. So to restore a clean slate the
- * values are explicitly reset to default here. Without this, the external microphone is
- * always in a plugged-in state, while the internal microphone is always in an unplugged
- * state, breaking the ability to use the internal microphone.
- */
- alc_write_coef_idx(codec, 0x24, 0x0000);
- alc_write_coef_idx(codec, 0x26, 0x0000);
- alc_write_coef_idx(codec, 0x29, 0x3000);
- alc_write_coef_idx(codec, 0x37, 0xfe05);
- alc_write_coef_idx(codec, 0x45, 0x5089);
+ * A certain other OS sets these coeffs to different values. On at least
+ * one TongFang barebone these settings might survive even a cold
+ * reboot. So to restore a clean slate the values are explicitly reset
+ * to default here. Without this, the external microphone is always in a
+ * plugged-in state, while the internal microphone is always in an
+ * unplugged state, breaking the ability to use the internal microphone.
+ */
+ alc_process_coef_fw(codec, alc256_fixup_set_coef_defaults_coefs);
}
static const struct coef_fw alc233_fixup_no_audio_jack_coefs[] = {
@@ -6542,6 +6546,23 @@ static void alc233_fixup_no_audio_jack(struct hda_codec *codec,
alc_process_coef_fw(codec, alc233_fixup_no_audio_jack_coefs);
}
+static void alc256_fixup_mic_no_presence_and_resume(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ /*
+ * The Clevo NJ51CU comes either with the ALC293 or the ALC256 codec,
+ * but uses the 0x8686 subproduct id in both cases. The ALC256 codec
+ * needs an additional quirk for sound working after suspend and resume.
+ */
+ if (codec->core.vendor_id == 0x10ec0256) {
+ alc_update_coef_idx(codec, 0x10, 1<<9, 0);
+ snd_hda_codec_set_pincfg(codec, 0x19, 0x04a11120);
+ } else {
+ snd_hda_codec_set_pincfg(codec, 0x1a, 0x04a1113c);
+ }
+}
+
enum {
ALC269_FIXUP_GPIO2,
ALC269_FIXUP_SONY_VAIO,
@@ -6759,9 +6780,10 @@ enum {
ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
ALC287_FIXUP_13S_GEN2_SPEAKERS,
- ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS,
+ ALC256_FIXUP_SET_COEF_DEFAULTS,
ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
ALC233_FIXUP_NO_AUDIO_JACK,
+ ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -8465,9 +8487,9 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269_FIXUP_HEADSET_MODE,
},
- [ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS] = {
+ [ALC256_FIXUP_SET_COEF_DEFAULTS] = {
.type = HDA_FIXUP_FUNC,
- .v.func = alc256_fixup_tongfang_reset_persistent_settings,
+ .v.func = alc256_fixup_set_coef_defaults,
},
[ALC245_FIXUP_HP_GPIO_LED] = {
.type = HDA_FIXUP_FUNC,
@@ -8486,6 +8508,12 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc233_fixup_no_audio_jack,
},
+ [ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc256_fixup_mic_no_presence_and_resume,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8656,6 +8684,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN),
SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
+ SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
@@ -8701,6 +8730,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x103c, 0x88d0, "HP Pavilion 15-eh1xxx (mainboard 88D0)", ALC287_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -8825,7 +8855,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[57][0-9]RZ[Q]", ALC269_FIXUP_DMIC),
SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME),
SND_PCI_QUIRK(0x1558, 0x8a20, "Clevo NH55DCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8a51, "Clevo NH70RCQ-Y", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1558, 0x8d50, "Clevo NH55RCQ-M", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -8929,7 +8959,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
- SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS),
+ SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS),
SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
@@ -9119,6 +9149,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
{.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
{.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
+ {.id = ALC285_FIXUP_HP_GPIO_AMP_INIT, .name = "alc285-hp-amp-init"},
{}
};
#define ALC225_STANDARD_PINS \
@@ -10231,6 +10262,27 @@ static void alc671_fixup_hp_headset_mic2(struct hda_codec *codec,
}
}
+static void alc897_hp_automute_hook(struct hda_codec *codec,
+ struct hda_jack_callback *jack)
+{
+ struct alc_spec *spec = codec->spec;
+ int vref;
+
+ snd_hda_gen_hp_automute(codec, jack);
+ vref = spec->gen.hp_jack_present ? (PIN_HP | AC_PINCTL_VREF_100) : PIN_HP;
+ snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
+ vref);
+}
+
+static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct alc_spec *spec = codec->spec;
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ spec->gen.hp_automute_hook = alc897_hp_automute_hook;
+ }
+}
+
static const struct coef_fw alc668_coefs[] = {
WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03, 0x0),
WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06, 0x0), WRITE_COEF(0x07, 0x0f80),
@@ -10311,6 +10363,8 @@ enum {
ALC668_FIXUP_ASUS_NO_HEADSET_MIC,
ALC668_FIXUP_HEADSET_MIC,
ALC668_FIXUP_MIC_DET_COEF,
+ ALC897_FIXUP_LENOVO_HEADSET_MIC,
+ ALC897_FIXUP_HEADSET_MIC_PIN,
};
static const struct hda_fixup alc662_fixups[] = {
@@ -10717,6 +10771,19 @@ static const struct hda_fixup alc662_fixups[] = {
{}
},
},
+ [ALC897_FIXUP_LENOVO_HEADSET_MIC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc897_fixup_lenovo_headset_mic,
+ },
+ [ALC897_FIXUP_HEADSET_MIC_PIN] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1a, 0x03a11050 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MIC
+ },
};
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -10761,6 +10828,10 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE),
SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS),
+ SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),
+ SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
diff --git a/sound/soc/amd/yc/pci-acp6x.c b/sound/soc/amd/yc/pci-acp6x.c
index 957eeb6fb8e3..7e9a9a9d8ddd 100644
--- a/sound/soc/amd/yc/pci-acp6x.c
+++ b/sound/soc/amd/yc/pci-acp6x.c
@@ -146,10 +146,11 @@ static int snd_acp6x_probe(struct pci_dev *pci,
{
struct acp6x_dev_data *adata;
struct platform_device_info pdevinfo[ACP6x_DEVS];
- int ret, index;
+ int index = 0;
int val = 0x00;
u32 addr;
unsigned int irqflags;
+ int ret;
irqflags = IRQF_SHARED;
/* Yellow Carp device check */
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index 04cb747c2b12..b34a8542077d 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -929,6 +929,8 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
unsigned int val, count;
if (jack_insert) {
+ snd_soc_dapm_mutex_lock(dapm);
+
snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1,
RT5682_PWR_VREF2 | RT5682_PWR_MB,
RT5682_PWR_VREF2 | RT5682_PWR_MB);
@@ -979,6 +981,8 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
snd_soc_component_update_bits(component, RT5682_MICBIAS_2,
RT5682_PWR_CLK25M_MASK | RT5682_PWR_CLK1M_MASK,
RT5682_PWR_CLK25M_PU | RT5682_PWR_CLK1M_PU);
+
+ snd_soc_dapm_mutex_unlock(dapm);
} else {
rt5682_enable_push_button_irq(component, false);
snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
@@ -2858,6 +2862,8 @@ int rt5682_register_dai_clks(struct rt5682_priv *rt5682)
for (i = 0; i < RT5682_DAI_NUM_CLKS; ++i) {
struct clk_init_data init = { };
+ struct clk_parent_data parent_data;
+ const struct clk_hw *parent;
dai_clk_hw = &rt5682->dai_clks_hw[i];
@@ -2865,17 +2871,17 @@ int rt5682_register_dai_clks(struct rt5682_priv *rt5682)
case RT5682_DAI_WCLK_IDX:
/* Make MCLK the parent of WCLK */
if (rt5682->mclk) {
- init.parent_data = &(struct clk_parent_data){
+ parent_data = (struct clk_parent_data){
.fw_name = "mclk",
};
+ init.parent_data = &parent_data;
init.num_parents = 1;
}
break;
case RT5682_DAI_BCLK_IDX:
/* Make WCLK the parent of BCLK */
- init.parent_hws = &(const struct clk_hw *){
- &rt5682->dai_clks_hw[RT5682_DAI_WCLK_IDX]
- };
+ parent = &rt5682->dai_clks_hw[RT5682_DAI_WCLK_IDX];
+ init.parent_hws = &parent;
init.num_parents = 1;
break;
default:
diff --git a/sound/soc/codecs/rt5682s.c b/sound/soc/codecs/rt5682s.c
index 470957fcad6b..d49a4f68566d 100644
--- a/sound/soc/codecs/rt5682s.c
+++ b/sound/soc/codecs/rt5682s.c
@@ -2693,6 +2693,8 @@ static int rt5682s_register_dai_clks(struct snd_soc_component *component)
for (i = 0; i < RT5682S_DAI_NUM_CLKS; ++i) {
struct clk_init_data init = { };
+ struct clk_parent_data parent_data;
+ const struct clk_hw *parent;
dai_clk_hw = &rt5682s->dai_clks_hw[i];
@@ -2700,17 +2702,17 @@ static int rt5682s_register_dai_clks(struct snd_soc_component *component)
case RT5682S_DAI_WCLK_IDX:
/* Make MCLK the parent of WCLK */
if (rt5682s->mclk) {
- init.parent_data = &(struct clk_parent_data){
+ parent_data = (struct clk_parent_data){
.fw_name = "mclk",
};
+ init.parent_data = &parent_data;
init.num_parents = 1;
}
break;
case RT5682S_DAI_BCLK_IDX:
/* Make WCLK the parent of BCLK */
- init.parent_hws = &(const struct clk_hw *){
- &rt5682s->dai_clks_hw[RT5682S_DAI_WCLK_IDX]
- };
+ parent = &rt5682s->dai_clks_hw[RT5682S_DAI_WCLK_IDX];
+ init.parent_hws = &parent;
init.num_parents = 1;
break;
default:
diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
index 172e79cbe0da..6549e7fef3e3 100644
--- a/sound/soc/codecs/tas2770.c
+++ b/sound/soc/codecs/tas2770.c
@@ -291,11 +291,11 @@ static int tas2770_set_samplerate(struct tas2770_priv *tas2770, int samplerate)
ramp_rate_val = TAS2770_TDM_CFG_REG0_SMP_44_1KHZ |
TAS2770_TDM_CFG_REG0_31_88_2_96KHZ;
break;
- case 19200:
+ case 192000:
ramp_rate_val = TAS2770_TDM_CFG_REG0_SMP_48KHZ |
TAS2770_TDM_CFG_REG0_31_176_4_192KHZ;
break;
- case 17640:
+ case 176400:
ramp_rate_val = TAS2770_TDM_CFG_REG0_SMP_44_1KHZ |
TAS2770_TDM_CFG_REG0_31_176_4_192KHZ;
break;
diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
index 4f568abd59e2..e63c6b723d76 100644
--- a/sound/soc/codecs/wcd934x.c
+++ b/sound/soc/codecs/wcd934x.c
@@ -3256,6 +3256,9 @@ static int wcd934x_compander_set(struct snd_kcontrol *kc,
int value = ucontrol->value.integer.value[0];
int sel;
+ if (wcd->comp_enabled[comp] == value)
+ return 0;
+
wcd->comp_enabled[comp] = value;
sel = value ? WCD934X_HPH_GAIN_SRC_SEL_COMPANDER :
WCD934X_HPH_GAIN_SRC_SEL_REGISTER;
@@ -3279,10 +3282,10 @@ static int wcd934x_compander_set(struct snd_kcontrol *kc,
case COMPANDER_8:
break;
default:
- break;
+ return 0;
}
- return 0;
+ return 1;
}
static int wcd934x_rx_hph_mode_get(struct snd_kcontrol *kc,
@@ -3326,6 +3329,31 @@ static int slim_rx_mux_get(struct snd_kcontrol *kc,
return 0;
}
+static int slim_rx_mux_to_dai_id(int mux)
+{
+ int aif_id;
+
+ switch (mux) {
+ case 1:
+ aif_id = AIF1_PB;
+ break;
+ case 2:
+ aif_id = AIF2_PB;
+ break;
+ case 3:
+ aif_id = AIF3_PB;
+ break;
+ case 4:
+ aif_id = AIF4_PB;
+ break;
+ default:
+ aif_id = -1;
+ break;
+ }
+
+ return aif_id;
+}
+
static int slim_rx_mux_put(struct snd_kcontrol *kc,
struct snd_ctl_elem_value *ucontrol)
{
@@ -3333,43 +3361,59 @@ static int slim_rx_mux_put(struct snd_kcontrol *kc,
struct wcd934x_codec *wcd = dev_get_drvdata(w->dapm->dev);
struct soc_enum *e = (struct soc_enum *)kc->private_value;
struct snd_soc_dapm_update *update = NULL;
+ struct wcd934x_slim_ch *ch, *c;
u32 port_id = w->shift;
+ bool found = false;
+ int mux_idx;
+ int prev_mux_idx = wcd->rx_port_value[port_id];
+ int aif_id;
- if (wcd->rx_port_value[port_id] == ucontrol->value.enumerated.item[0])
- return 0;
+ mux_idx = ucontrol->value.enumerated.item[0];
- wcd->rx_port_value[port_id] = ucontrol->value.enumerated.item[0];
+ if (mux_idx == prev_mux_idx)
+ return 0;
- switch (wcd->rx_port_value[port_id]) {
+ switch(mux_idx) {
case 0:
- list_del_init(&wcd->rx_chs[port_id].list);
- break;
- case 1:
- list_add_tail(&wcd->rx_chs[port_id].list,
- &wcd->dai[AIF1_PB].slim_ch_list);
- break;
- case 2:
- list_add_tail(&wcd->rx_chs[port_id].list,
- &wcd->dai[AIF2_PB].slim_ch_list);
- break;
- case 3:
- list_add_tail(&wcd->rx_chs[port_id].list,
- &wcd->dai[AIF3_PB].slim_ch_list);
+ aif_id = slim_rx_mux_to_dai_id(prev_mux_idx);
+ if (aif_id < 0)
+ return 0;
+
+ list_for_each_entry_safe(ch, c, &wcd->dai[aif_id].slim_ch_list, list) {
+ if (ch->port == port_id + WCD934X_RX_START) {
+ found = true;
+ list_del_init(&ch->list);
+ break;
+ }
+ }
+ if (!found)
+ return 0;
+
break;
- case 4:
- list_add_tail(&wcd->rx_chs[port_id].list,
- &wcd->dai[AIF4_PB].slim_ch_list);
+ case 1 ... 4:
+ aif_id = slim_rx_mux_to_dai_id(mux_idx);
+ if (aif_id < 0)
+ return 0;
+
+ if (list_empty(&wcd->rx_chs[port_id].list)) {
+ list_add_tail(&wcd->rx_chs[port_id].list,
+ &wcd->dai[aif_id].slim_ch_list);
+ } else {
+ dev_err(wcd->dev ,"SLIM_RX%d PORT is busy\n", port_id);
+ return 0;
+ }
break;
+
default:
- dev_err(wcd->dev, "Unknown AIF %d\n",
- wcd->rx_port_value[port_id]);
+ dev_err(wcd->dev, "Unknown AIF %d\n", mux_idx);
goto err;
}
+ wcd->rx_port_value[port_id] = mux_idx;
snd_soc_dapm_mux_update_power(w->dapm, kc, wcd->rx_port_value[port_id],
e, update);
- return 0;
+ return 1;
err:
return -EINVAL;
}
@@ -3815,6 +3859,7 @@ static int slim_tx_mixer_put(struct snd_kcontrol *kc,
struct soc_mixer_control *mixer =
(struct soc_mixer_control *)kc->private_value;
int enable = ucontrol->value.integer.value[0];
+ struct wcd934x_slim_ch *ch, *c;
int dai_id = widget->shift;
int port_id = mixer->shift;
@@ -3822,17 +3867,32 @@ static int slim_tx_mixer_put(struct snd_kcontrol *kc,
if (enable == wcd->tx_port_value[port_id])
return 0;
- wcd->tx_port_value[port_id] = enable;
-
- if (enable)
- list_add_tail(&wcd->tx_chs[port_id].list,
- &wcd->dai[dai_id].slim_ch_list);
- else
- list_del_init(&wcd->tx_chs[port_id].list);
+ if (enable) {
+ if (list_empty(&wcd->tx_chs[port_id].list)) {
+ list_add_tail(&wcd->tx_chs[port_id].list,
+ &wcd->dai[dai_id].slim_ch_list);
+ } else {
+ dev_err(wcd->dev ,"SLIM_TX%d PORT is busy\n", port_id);
+ return 0;
+ }
+ } else {
+ bool found = false;
+
+ list_for_each_entry_safe(ch, c, &wcd->dai[dai_id].slim_ch_list, list) {
+ if (ch->port == port_id) {
+ found = true;
+ list_del_init(&wcd->tx_chs[port_id].list);
+ break;
+ }
+ }
+ if (!found)
+ return 0;
+ }
+ wcd->tx_port_value[port_id] = enable;
snd_soc_dapm_mixer_update_power(widget->dapm, kc, enable, update);
- return 0;
+ return 1;
}
static const struct snd_kcontrol_new aif1_slim_cap_mixer[] = {
diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
index 2da4a5fa7a18..564b78f3cdd0 100644
--- a/sound/soc/codecs/wsa881x.c
+++ b/sound/soc/codecs/wsa881x.c
@@ -772,7 +772,8 @@ static int wsa881x_put_pa_gain(struct snd_kcontrol *kc,
usleep_range(1000, 1010);
}
- return 0;
+
+ return 1;
}
static int wsa881x_get_port(struct snd_kcontrol *kcontrol,
@@ -816,15 +817,22 @@ static int wsa881x_set_port(struct snd_kcontrol *kcontrol,
(struct soc_mixer_control *)kcontrol->private_value;
int portidx = mixer->reg;
- if (ucontrol->value.integer.value[0])
+ if (ucontrol->value.integer.value[0]) {
+ if (data->port_enable[portidx])
+ return 0;
+
data->port_enable[portidx] = true;
- else
+ } else {
+ if (!data->port_enable[portidx])
+ return 0;
+
data->port_enable[portidx] = false;
+ }
if (portidx == WSA881X_PORT_BOOST) /* Boost Switch */
wsa881x_boost_ctrl(comp, data->port_enable[portidx]);
- return 0;
+ return 1;
}
static const char * const smart_boost_lvl_text[] = {
diff --git a/sound/soc/meson/aiu-encoder-i2s.c b/sound/soc/meson/aiu-encoder-i2s.c
index 932224552146..67729de41a73 100644
--- a/sound/soc/meson/aiu-encoder-i2s.c
+++ b/sound/soc/meson/aiu-encoder-i2s.c
@@ -18,7 +18,6 @@
#define AIU_RST_SOFT_I2S_FAST BIT(0)
#define AIU_I2S_DAC_CFG_MSB_FIRST BIT(2)
-#define AIU_I2S_MISC_HOLD_EN BIT(2)
#define AIU_CLK_CTRL_I2S_DIV_EN BIT(0)
#define AIU_CLK_CTRL_I2S_DIV GENMASK(3, 2)
#define AIU_CLK_CTRL_AOCLK_INVERT BIT(6)
@@ -36,37 +35,6 @@ static void aiu_encoder_i2s_divider_enable(struct snd_soc_component *component,
enable ? AIU_CLK_CTRL_I2S_DIV_EN : 0);
}
-static void aiu_encoder_i2s_hold(struct snd_soc_component *component,
- bool enable)
-{
- snd_soc_component_update_bits(component, AIU_I2S_MISC,
- AIU_I2S_MISC_HOLD_EN,
- enable ? AIU_I2S_MISC_HOLD_EN : 0);
-}
-
-static int aiu_encoder_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_component *component = dai->component;
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- aiu_encoder_i2s_hold(component, false);
- return 0;
-
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- aiu_encoder_i2s_hold(component, true);
- return 0;
-
- default:
- return -EINVAL;
- }
-}
-
static int aiu_encoder_i2s_setup_desc(struct snd_soc_component *component,
struct snd_pcm_hw_params *params)
{
@@ -353,7 +321,6 @@ static void aiu_encoder_i2s_shutdown(struct snd_pcm_substream *substream,
}
const struct snd_soc_dai_ops aiu_encoder_i2s_dai_ops = {
- .trigger = aiu_encoder_i2s_trigger,
.hw_params = aiu_encoder_i2s_hw_params,
.hw_free = aiu_encoder_i2s_hw_free,
.set_fmt = aiu_encoder_i2s_set_fmt,
diff --git a/sound/soc/meson/aiu-fifo-i2s.c b/sound/soc/meson/aiu-fifo-i2s.c
index 2388a2d0b3a6..57e6e7160d2f 100644
--- a/sound/soc/meson/aiu-fifo-i2s.c
+++ b/sound/soc/meson/aiu-fifo-i2s.c
@@ -20,6 +20,8 @@
#define AIU_MEM_I2S_CONTROL_MODE_16BIT BIT(6)
#define AIU_MEM_I2S_BUF_CNTL_INIT BIT(0)
#define AIU_RST_SOFT_I2S_FAST BIT(0)
+#define AIU_I2S_MISC_HOLD_EN BIT(2)
+#define AIU_I2S_MISC_FORCE_LEFT_RIGHT BIT(4)
#define AIU_FIFO_I2S_BLOCK 256
@@ -90,6 +92,10 @@ static int aiu_fifo_i2s_hw_params(struct snd_pcm_substream *substream,
unsigned int val;
int ret;
+ snd_soc_component_update_bits(component, AIU_I2S_MISC,
+ AIU_I2S_MISC_HOLD_EN,
+ AIU_I2S_MISC_HOLD_EN);
+
ret = aiu_fifo_hw_params(substream, params, dai);
if (ret)
return ret;
@@ -117,6 +123,19 @@ static int aiu_fifo_i2s_hw_params(struct snd_pcm_substream *substream,
snd_soc_component_update_bits(component, AIU_MEM_I2S_MASKS,
AIU_MEM_I2S_MASKS_IRQ_BLOCK, val);
+ /*
+ * Most (all?) supported SoCs have this bit set by default. The vendor
+ * driver however sets it manually (depending on the version either
+ * while un-setting AIU_I2S_MISC_HOLD_EN or right before that). Follow
+ * the same approach for consistency with the vendor driver.
+ */
+ snd_soc_component_update_bits(component, AIU_I2S_MISC,
+ AIU_I2S_MISC_FORCE_LEFT_RIGHT,
+ AIU_I2S_MISC_FORCE_LEFT_RIGHT);
+
+ snd_soc_component_update_bits(component, AIU_I2S_MISC,
+ AIU_I2S_MISC_HOLD_EN, 0);
+
return 0;
}
diff --git a/sound/soc/meson/aiu-fifo.c b/sound/soc/meson/aiu-fifo.c
index 4ad23267cace..d67ff4cdabd5 100644
--- a/sound/soc/meson/aiu-fifo.c
+++ b/sound/soc/meson/aiu-fifo.c
@@ -5,6 +5,7 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/soc-dai.h>
@@ -179,6 +180,11 @@ int aiu_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd,
struct snd_card *card = rtd->card->snd_card;
struct aiu_fifo *fifo = dai->playback_dma_data;
size_t size = fifo->pcm->buffer_bytes_max;
+ int ret;
+
+ ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
snd_pcm_set_managed_buffer_all(rtd->pcm, SNDRV_DMA_TYPE_DEV,
card->dev, size, size);
diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
index cd74681e811e..928fd23e2c27 100644
--- a/sound/soc/qcom/qdsp6/q6routing.c
+++ b/sound/soc/qcom/qdsp6/q6routing.c
@@ -498,14 +498,16 @@ static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
struct session_data *session = &data->sessions[session_id];
if (ucontrol->value.integer.value[0]) {
+ if (session->port_id == be_id)
+ return 0;
+
session->port_id = be_id;
snd_soc_dapm_mixer_update_power(dapm, kcontrol, 1, update);
} else {
- if (session->port_id == be_id) {
- session->port_id = -1;
+ if (session->port_id == -1 || session->port_id != be_id)
return 0;
- }
+ session->port_id = -1;
snd_soc_dapm_mixer_update_power(dapm, kcontrol, 0, update);
}
diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c
index 17b9b287853a..5f9cb5c4c7f0 100644
--- a/sound/soc/rockchip/rockchip_i2s_tdm.c
+++ b/sound/soc/rockchip/rockchip_i2s_tdm.c
@@ -95,6 +95,7 @@ struct rk_i2s_tdm_dev {
spinlock_t lock; /* xfer lock */
bool has_playback;
bool has_capture;
+ struct snd_soc_dai_driver *dai;
};
static int to_ch_num(unsigned int val)
@@ -1310,19 +1311,14 @@ static const struct of_device_id rockchip_i2s_tdm_match[] = {
{},
};
-static struct snd_soc_dai_driver i2s_tdm_dai = {
+static const struct snd_soc_dai_driver i2s_tdm_dai = {
.probe = rockchip_i2s_tdm_dai_probe,
- .playback = {
- .stream_name = "Playback",
- },
- .capture = {
- .stream_name = "Capture",
- },
.ops = &rockchip_i2s_tdm_dai_ops,
};
-static void rockchip_i2s_tdm_init_dai(struct rk_i2s_tdm_dev *i2s_tdm)
+static int rockchip_i2s_tdm_init_dai(struct rk_i2s_tdm_dev *i2s_tdm)
{
+ struct snd_soc_dai_driver *dai;
struct property *dma_names;
const char *dma_name;
u64 formats = (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |
@@ -1337,19 +1333,33 @@ static void rockchip_i2s_tdm_init_dai(struct rk_i2s_tdm_dev *i2s_tdm)
i2s_tdm->has_capture = true;
}
+ dai = devm_kmemdup(i2s_tdm->dev, &i2s_tdm_dai,
+ sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
+
if (i2s_tdm->has_playback) {
- i2s_tdm_dai.playback.channels_min = 2;
- i2s_tdm_dai.playback.channels_max = 8;
- i2s_tdm_dai.playback.rates = SNDRV_PCM_RATE_8000_192000;
- i2s_tdm_dai.playback.formats = formats;
+ dai->playback.stream_name = "Playback";
+ dai->playback.channels_min = 2;
+ dai->playback.channels_max = 8;
+ dai->playback.rates = SNDRV_PCM_RATE_8000_192000;
+ dai->playback.formats = formats;
}
if (i2s_tdm->has_capture) {
- i2s_tdm_dai.capture.channels_min = 2;
- i2s_tdm_dai.capture.channels_max = 8;
- i2s_tdm_dai.capture.rates = SNDRV_PCM_RATE_8000_192000;
- i2s_tdm_dai.capture.formats = formats;
+ dai->capture.stream_name = "Capture";
+ dai->capture.channels_min = 2;
+ dai->capture.channels_max = 8;
+ dai->capture.rates = SNDRV_PCM_RATE_8000_192000;
+ dai->capture.formats = formats;
}
+
+ if (i2s_tdm->clk_trcm != TRCM_TXRX)
+ dai->symmetric_rate = 1;
+
+ i2s_tdm->dai = dai;
+
+ return 0;
}
static int rockchip_i2s_tdm_path_check(struct rk_i2s_tdm_dev *i2s_tdm,
@@ -1541,8 +1551,6 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
spin_lock_init(&i2s_tdm->lock);
i2s_tdm->soc_data = (struct rk_i2s_soc_data *)of_id->data;
- rockchip_i2s_tdm_init_dai(i2s_tdm);
-
i2s_tdm->frame_width = 64;
i2s_tdm->clk_trcm = TRCM_TXRX;
@@ -1555,8 +1563,10 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
}
i2s_tdm->clk_trcm = TRCM_RX;
}
- if (i2s_tdm->clk_trcm != TRCM_TXRX)
- i2s_tdm_dai.symmetric_rate = 1;
+
+ ret = rockchip_i2s_tdm_init_dai(i2s_tdm);
+ if (ret)
+ return ret;
i2s_tdm->grf = syscon_regmap_lookup_by_phandle(node, "rockchip,grf");
if (IS_ERR(i2s_tdm->grf))
@@ -1678,7 +1688,7 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
ret = devm_snd_soc_register_component(&pdev->dev,
&rockchip_i2s_tdm_component,
- &i2s_tdm_dai, 1);
+ i2s_tdm->dai, 1);
if (ret) {
dev_err(&pdev->dev, "Could not register DAI\n");
diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
index 6744318de612..13cd96e6724a 100644
--- a/sound/soc/sof/intel/hda-codec.c
+++ b/sound/soc/sof/intel/hda-codec.c
@@ -22,6 +22,7 @@
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC)
#define IDISP_VID_INTEL 0x80860000
+#define CODEC_PROBE_RETRIES 3
/* load the legacy HDA codec driver */
static int request_codec_module(struct hda_codec *codec)
@@ -121,12 +122,15 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
u32 hda_cmd = (address << 28) | (AC_NODE_ROOT << 20) |
(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
u32 resp = -1;
- int ret;
+ int ret, retry = 0;
+
+ do {
+ mutex_lock(&hbus->core.cmd_mutex);
+ snd_hdac_bus_send_cmd(&hbus->core, hda_cmd);
+ snd_hdac_bus_get_response(&hbus->core, address, &resp);
+ mutex_unlock(&hbus->core.cmd_mutex);
+ } while (resp == -1 && retry++ < CODEC_PROBE_RETRIES);
- mutex_lock(&hbus->core.cmd_mutex);
- snd_hdac_bus_send_cmd(&hbus->core, hda_cmd);
- snd_hdac_bus_get_response(&hbus->core, address, &resp);
- mutex_unlock(&hbus->core.cmd_mutex);
if (resp == -1)
return -EIO;
dev_dbg(sdev->dev, "HDA codec #%d probed OK: response: %x\n",
diff --git a/sound/soc/sof/intel/pci-tgl.c b/sound/soc/sof/intel/pci-tgl.c
index f2ea34df9741..fd46210f1730 100644
--- a/sound/soc/sof/intel/pci-tgl.c
+++ b/sound/soc/sof/intel/pci-tgl.c
@@ -112,8 +112,12 @@ static const struct pci_device_id sof_pci_ids[] = {
.driver_data = (unsigned long)&adls_desc},
{ PCI_DEVICE(0x8086, 0x51c8), /* ADL-P */
.driver_data = (unsigned long)&adl_desc},
+ { PCI_DEVICE(0x8086, 0x51cd), /* ADL-P */
+ .driver_data = (unsigned long)&adl_desc},
{ PCI_DEVICE(0x8086, 0x51cc), /* ADL-M */
.driver_data = (unsigned long)&adl_desc},
+ { PCI_DEVICE(0x8086, 0x54c8), /* ADL-N */
+ .driver_data = (unsigned long)&adl_desc},
{ 0, }
};
MODULE_DEVICE_TABLE(pci, sof_pci_ids);
diff --git a/sound/soc/tegra/tegra210_adx.c b/sound/soc/tegra/tegra210_adx.c
index 933c4503fe50..3785cade2d9a 100644
--- a/sound/soc/tegra/tegra210_adx.c
+++ b/sound/soc/tegra/tegra210_adx.c
@@ -514,8 +514,8 @@ static int tegra210_adx_platform_remove(struct platform_device *pdev)
static const struct dev_pm_ops tegra210_adx_pm_ops = {
SET_RUNTIME_PM_OPS(tegra210_adx_runtime_suspend,
tegra210_adx_runtime_resume, NULL)
- SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver tegra210_adx_driver = {
diff --git a/sound/soc/tegra/tegra210_amx.c b/sound/soc/tegra/tegra210_amx.c
index 689576302ede..d064cc67fea6 100644
--- a/sound/soc/tegra/tegra210_amx.c
+++ b/sound/soc/tegra/tegra210_amx.c
@@ -583,8 +583,8 @@ static int tegra210_amx_platform_remove(struct platform_device *pdev)
static const struct dev_pm_ops tegra210_amx_pm_ops = {
SET_RUNTIME_PM_OPS(tegra210_amx_runtime_suspend,
tegra210_amx_runtime_resume, NULL)
- SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver tegra210_amx_driver = {
diff --git a/sound/soc/tegra/tegra210_mixer.c b/sound/soc/tegra/tegra210_mixer.c
index 51d375573cfa..16e679a95658 100644
--- a/sound/soc/tegra/tegra210_mixer.c
+++ b/sound/soc/tegra/tegra210_mixer.c
@@ -666,8 +666,8 @@ static int tegra210_mixer_platform_remove(struct platform_device *pdev)
static const struct dev_pm_ops tegra210_mixer_pm_ops = {
SET_RUNTIME_PM_OPS(tegra210_mixer_runtime_suspend,
tegra210_mixer_runtime_resume, NULL)
- SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver tegra210_mixer_driver = {
diff --git a/sound/soc/tegra/tegra210_mvc.c b/sound/soc/tegra/tegra210_mvc.c
index 85b155887ec2..acf59328dcb6 100644
--- a/sound/soc/tegra/tegra210_mvc.c
+++ b/sound/soc/tegra/tegra210_mvc.c
@@ -164,7 +164,7 @@ static int tegra210_mvc_put_mute(struct snd_kcontrol *kcontrol,
if (err < 0)
goto end;
- return 1;
+ err = 1;
end:
pm_runtime_put(cmpnt->dev);
@@ -236,7 +236,7 @@ static int tegra210_mvc_put_vol(struct snd_kcontrol *kcontrol,
TEGRA210_MVC_VOLUME_SWITCH_MASK,
TEGRA210_MVC_VOLUME_SWITCH_TRIGGER);
- return 1;
+ err = 1;
end:
pm_runtime_put(cmpnt->dev);
@@ -639,8 +639,8 @@ static int tegra210_mvc_platform_remove(struct platform_device *pdev)
static const struct dev_pm_ops tegra210_mvc_pm_ops = {
SET_RUNTIME_PM_OPS(tegra210_mvc_runtime_suspend,
tegra210_mvc_runtime_resume, NULL)
- SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver tegra210_mvc_driver = {
diff --git a/sound/soc/tegra/tegra210_sfc.c b/sound/soc/tegra/tegra210_sfc.c
index 7a2227ed3df6..368f077e7bee 100644
--- a/sound/soc/tegra/tegra210_sfc.c
+++ b/sound/soc/tegra/tegra210_sfc.c
@@ -3594,8 +3594,8 @@ static int tegra210_sfc_platform_remove(struct platform_device *pdev)
static const struct dev_pm_ops tegra210_sfc_pm_ops = {
SET_RUNTIME_PM_OPS(tegra210_sfc_runtime_suspend,
tegra210_sfc_runtime_resume, NULL)
- SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver tegra210_sfc_driver = {
diff --git a/sound/soc/tegra/tegra_asoc_machine.c b/sound/soc/tegra/tegra_asoc_machine.c
index b95438c3dbf7..a73404879aa1 100644
--- a/sound/soc/tegra/tegra_asoc_machine.c
+++ b/sound/soc/tegra/tegra_asoc_machine.c
@@ -116,16 +116,24 @@ static const struct snd_kcontrol_new tegra_machine_controls[] = {
SOC_DAPM_PIN_SWITCH("Headset Mic"),
SOC_DAPM_PIN_SWITCH("Internal Mic 1"),
SOC_DAPM_PIN_SWITCH("Internal Mic 2"),
+ SOC_DAPM_PIN_SWITCH("Headphones"),
+ SOC_DAPM_PIN_SWITCH("Mic Jack"),
};
int tegra_asoc_machine_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_card *card = rtd->card;
struct tegra_machine *machine = snd_soc_card_get_drvdata(card);
+ const char *jack_name;
int err;
if (machine->gpiod_hp_det && machine->asoc->add_hp_jack) {
- err = snd_soc_card_jack_new(card, "Headphones Jack",
+ if (machine->asoc->hp_jack_name)
+ jack_name = machine->asoc->hp_jack_name;
+ else
+ jack_name = "Headphones Jack";
+
+ err = snd_soc_card_jack_new(card, jack_name,
SND_JACK_HEADPHONE,
&tegra_machine_hp_jack,
tegra_machine_hp_jack_pins,
@@ -658,6 +666,7 @@ static struct snd_soc_card snd_soc_tegra_max98090 = {
static const struct tegra_asoc_data tegra_max98090_data = {
.mclk_rate = tegra_machine_mclk_rate_12mhz,
.card = &snd_soc_tegra_max98090,
+ .hp_jack_name = "Headphones",
.add_common_dapm_widgets = true,
.add_common_controls = true,
.add_common_snd_ops = true,
diff --git a/sound/soc/tegra/tegra_asoc_machine.h b/sound/soc/tegra/tegra_asoc_machine.h
index d6a8d1320551..6f795d7dff7c 100644
--- a/sound/soc/tegra/tegra_asoc_machine.h
+++ b/sound/soc/tegra/tegra_asoc_machine.h
@@ -14,6 +14,7 @@ struct snd_soc_pcm_runtime;
struct tegra_asoc_data {
unsigned int (*mclk_rate)(unsigned int srate);
const char *codec_dev_name;
+ const char *hp_jack_name;
struct snd_soc_card *card;
unsigned int mclk_id;
bool hp_jack_gpio_active_low;
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index d489c1de3bae..823b6b8de942 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -3016,11 +3016,11 @@ static const struct snd_djm_ctl snd_djm_ctls_750mk2[] = {
static const struct snd_djm_device snd_djm_devices[] = {
- SND_DJM_DEVICE(250mk2),
- SND_DJM_DEVICE(750),
- SND_DJM_DEVICE(750mk2),
- SND_DJM_DEVICE(850),
- SND_DJM_DEVICE(900nxs2)
+ [SND_DJM_250MK2_IDX] = SND_DJM_DEVICE(250mk2),
+ [SND_DJM_750_IDX] = SND_DJM_DEVICE(750),
+ [SND_DJM_850_IDX] = SND_DJM_DEVICE(850),
+ [SND_DJM_900NXS2_IDX] = SND_DJM_DEVICE(900nxs2),
+ [SND_DJM_750MK2_IDX] = SND_DJM_DEVICE(750mk2),
};
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
index a59cb0ee609c..73409e27be01 100644
--- a/tools/bpf/resolve_btfids/main.c
+++ b/tools/bpf/resolve_btfids/main.c
@@ -83,6 +83,7 @@ struct btf_id {
int cnt;
};
int addr_cnt;
+ bool is_set;
Elf64_Addr addr[ADDR_CNT];
};
@@ -451,8 +452,10 @@ static int symbols_collect(struct object *obj)
* in symbol's size, together with 'cnt' field hence
* that - 1.
*/
- if (id)
+ if (id) {
id->cnt = sym.st_size / sizeof(int) - 1;
+ id->is_set = true;
+ }
} else {
pr_err("FAILED unsupported prefix %s\n", prefix);
return -1;
@@ -568,9 +571,8 @@ static int id_patch(struct object *obj, struct btf_id *id)
int *ptr = data->d_buf;
int i;
- if (!id->id) {
+ if (!id->id && !id->is_set)
pr_err("WARN: resolve_btfids: unresolved symbol %s\n", id->name);
- }
for (i = 0; i < id->addr_cnt; i++) {
unsigned long addr = id->addr[i];
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 45a9a59828c3..ae61f464043a 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -48,7 +48,6 @@ FEATURE_TESTS_BASIC := \
numa_num_possible_cpus \
libperl \
libpython \
- libpython-version \
libslang \
libslang-include-subdir \
libtraceevent \
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 0a3244ad9673..1480910c792e 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -32,7 +32,6 @@ FILES= \
test-numa_num_possible_cpus.bin \
test-libperl.bin \
test-libpython.bin \
- test-libpython-version.bin \
test-libslang.bin \
test-libslang-include-subdir.bin \
test-libtraceevent.bin \
@@ -227,9 +226,6 @@ $(OUTPUT)test-libperl.bin:
$(OUTPUT)test-libpython.bin:
$(BUILD) $(FLAGS_PYTHON_EMBED)
-$(OUTPUT)test-libpython-version.bin:
- $(BUILD)
-
$(OUTPUT)test-libbfd.bin:
$(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 0b243ce842be..5ffafb967b6e 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -14,10 +14,6 @@
# include "test-libpython.c"
#undef main
-#define main main_test_libpython_version
-# include "test-libpython-version.c"
-#undef main
-
#define main main_test_libperl
# include "test-libperl.c"
#undef main
@@ -177,7 +173,6 @@
int main(int argc, char *argv[])
{
main_test_libpython();
- main_test_libpython_version();
main_test_libperl();
main_test_hello();
main_test_libelf();
diff --git a/tools/build/feature/test-libpython-version.c b/tools/build/feature/test-libpython-version.c
deleted file mode 100644
index 47714b942d4d..000000000000
--- a/tools/build/feature/test-libpython-version.c
+++ /dev/null
@@ -1,11 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <Python.h>
-
-#if PY_VERSION_HEX >= 0x03000000
- #error
-#endif
-
-int main(void)
-{
- return 0;
-}
diff --git a/tools/include/linux/debug_locks.h b/tools/include/linux/debug_locks.h
deleted file mode 100644
index 72d595ce764a..000000000000
--- a/tools/include/linux/debug_locks.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_DEBUG_LOCKS_H_
-#define _LIBLOCKDEP_DEBUG_LOCKS_H_
-
-#include <stddef.h>
-#include <linux/compiler.h>
-#include <asm/bug.h>
-
-#define DEBUG_LOCKS_WARN_ON(x) WARN_ON(x)
-
-extern bool debug_locks;
-extern bool debug_locks_silent;
-
-#endif
diff --git a/tools/include/linux/hardirq.h b/tools/include/linux/hardirq.h
deleted file mode 100644
index b25580b6a9be..000000000000
--- a/tools/include/linux/hardirq.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LINUX_HARDIRQ_H_
-#define _LIBLOCKDEP_LINUX_HARDIRQ_H_
-
-#define SOFTIRQ_BITS 0UL
-#define HARDIRQ_BITS 0UL
-#define SOFTIRQ_SHIFT 0UL
-#define HARDIRQ_SHIFT 0UL
-#define hardirq_count() 0UL
-#define softirq_count() 0UL
-
-#endif
diff --git a/tools/include/linux/irqflags.h b/tools/include/linux/irqflags.h
deleted file mode 100644
index 501262aee8ff..000000000000
--- a/tools/include/linux/irqflags.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_
-#define _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_
-
-# define lockdep_hardirq_context() 0
-# define lockdep_softirq_context(p) 0
-# define lockdep_hardirqs_enabled() 0
-# define lockdep_softirqs_enabled(p) 0
-# define lockdep_hardirq_enter() do { } while (0)
-# define lockdep_hardirq_exit() do { } while (0)
-# define lockdep_softirq_enter() do { } while (0)
-# define lockdep_softirq_exit() do { } while (0)
-# define INIT_TRACE_IRQFLAGS
-
-# define stop_critical_timings() do { } while (0)
-# define start_critical_timings() do { } while (0)
-
-#define raw_local_irq_disable() do { } while (0)
-#define raw_local_irq_enable() do { } while (0)
-#define raw_local_irq_save(flags) ((flags) = 0)
-#define raw_local_irq_restore(flags) ((void)(flags))
-#define raw_local_save_flags(flags) ((flags) = 0)
-#define raw_irqs_disabled_flags(flags) ((void)(flags))
-#define raw_irqs_disabled() 0
-#define raw_safe_halt()
-
-#define local_irq_enable() do { } while (0)
-#define local_irq_disable() do { } while (0)
-#define local_irq_save(flags) ((flags) = 0)
-#define local_irq_restore(flags) ((void)(flags))
-#define local_save_flags(flags) ((flags) = 0)
-#define irqs_disabled() (1)
-#define irqs_disabled_flags(flags) ((void)(flags), 0)
-#define safe_halt() do { } while (0)
-
-#define trace_lock_release(x, y)
-#define trace_lock_acquire(a, b, c, d, e, f, g)
-
-#endif
diff --git a/tools/include/linux/lockdep.h b/tools/include/linux/lockdep.h
deleted file mode 100644
index e56997288f2b..000000000000
--- a/tools/include/linux/lockdep.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LOCKDEP_H_
-#define _LIBLOCKDEP_LOCKDEP_H_
-
-#include <sys/prctl.h>
-#include <sys/syscall.h>
-#include <string.h>
-#include <limits.h>
-#include <linux/utsname.h>
-#include <linux/compiler.h>
-#include <linux/export.h>
-#include <linux/kern_levels.h>
-#include <linux/err.h>
-#include <linux/rcu.h>
-#include <linux/list.h>
-#include <linux/hardirq.h>
-#include <unistd.h>
-
-#define MAX_LOCK_DEPTH 63UL
-
-#define asmlinkage
-#define __visible
-
-#include "../../../include/linux/lockdep.h"
-
-struct task_struct {
- u64 curr_chain_key;
- int lockdep_depth;
- unsigned int lockdep_recursion;
- struct held_lock held_locks[MAX_LOCK_DEPTH];
- gfp_t lockdep_reclaim_gfp;
- int pid;
- int state;
- char comm[17];
-};
-
-#define TASK_RUNNING 0
-
-extern struct task_struct *__curr(void);
-
-#define current (__curr())
-
-static inline int debug_locks_off(void)
-{
- return 1;
-}
-
-#define task_pid_nr(tsk) ((tsk)->pid)
-
-#define KSYM_NAME_LEN 128
-#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__)
-#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
-#define pr_warn pr_err
-#define pr_cont pr_err
-
-#define list_del_rcu list_del
-
-#define atomic_t unsigned long
-#define atomic_inc(x) ((*(x))++)
-
-#define print_tainted() ""
-#define static_obj(x) 1
-
-#define debug_show_all_locks()
-extern void debug_check_no_locks_held(void);
-
-static __used bool __is_kernel_percpu_address(unsigned long addr, void *can_addr)
-{
- return false;
-}
-
-#endif
diff --git a/tools/include/linux/proc_fs.h b/tools/include/linux/proc_fs.h
deleted file mode 100644
index 8b3b03b64fda..000000000000
--- a/tools/include/linux/proc_fs.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef _TOOLS_INCLUDE_LINUX_PROC_FS_H
-#define _TOOLS_INCLUDE_LINUX_PROC_FS_H
-
-#endif /* _TOOLS_INCLUDE_LINUX_PROC_FS_H */
diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h
index c934572d935c..622266b197d0 100644
--- a/tools/include/linux/spinlock.h
+++ b/tools/include/linux/spinlock.h
@@ -37,6 +37,4 @@ static inline bool arch_spin_is_locked(arch_spinlock_t *mutex)
return true;
}
-#include <linux/lockdep.h>
-
#endif
diff --git a/tools/include/linux/stacktrace.h b/tools/include/linux/stacktrace.h
deleted file mode 100644
index ae343ac35bfa..000000000000
--- a/tools/include/linux/stacktrace.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LINUX_STACKTRACE_H_
-#define _LIBLOCKDEP_LINUX_STACKTRACE_H_
-
-#include <execinfo.h>
-
-struct stack_trace {
- unsigned int nr_entries, max_entries;
- unsigned long *entries;
- int skip;
-};
-
-static inline void print_stack_trace(struct stack_trace *trace, int spaces)
-{
- backtrace_symbols_fd((void **)trace->entries, trace->nr_entries, 1);
-}
-
-#define save_stack_trace(trace) \
- ((trace)->nr_entries = \
- backtrace((void **)(trace)->entries, (trace)->max_entries))
-
-static inline int dump_stack(void)
-{
- void *array[64];
- size_t size;
-
- size = backtrace(array, 64);
- backtrace_symbols_fd(array, size, 1);
-
- return 0;
-}
-
-#endif
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index afd144725a0b..3df74cf5651a 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -271,8 +271,6 @@ endif
FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS)
FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
-FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS)
-FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
FEATURE_CHECK_LDFLAGS-libaio = -lrt
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
index 7bef917cc84e..15109af9d075 100644
--- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -528,3 +528,4 @@
446 common landlock_restrict_self sys_landlock_restrict_self
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
index df5261e5cfe1..ed9c5c2eafad 100644
--- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
@@ -451,3 +451,4 @@
446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv sys_futex_waitv
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c
index fa0ff4ce2b74..488f6e6ba1a5 100644
--- a/tools/perf/bench/sched-messaging.c
+++ b/tools/perf/bench/sched-messaging.c
@@ -223,8 +223,6 @@ static unsigned int group(pthread_t *pth,
snd_ctx->out_fds[i] = fds[1];
if (!thread_mode)
close(fds[0]);
-
- free(ctx);
}
/* Now we have all the fds, fork the senders */
@@ -241,8 +239,6 @@ static unsigned int group(pthread_t *pth,
for (i = 0; i < num_fds; i++)
close(snd_ctx->out_fds[i]);
- free(snd_ctx);
-
/* Return number of children to reap */
return num_fds * 2;
}
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index bc5259db5fd9..409b721666cb 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -755,12 +755,16 @@ static int parse_vm_time_correlation(const struct option *opt, const char *str,
return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM;
}
+static int output_fd(struct perf_inject *inject)
+{
+ return inject->in_place_update ? -1 : perf_data__fd(&inject->output);
+}
+
static int __cmd_inject(struct perf_inject *inject)
{
int ret = -EINVAL;
struct perf_session *session = inject->session;
- struct perf_data *data_out = &inject->output;
- int fd = inject->in_place_update ? -1 : perf_data__fd(data_out);
+ int fd = output_fd(inject);
u64 output_data_offset;
signal(SIGINT, sig_handler);
@@ -820,7 +824,7 @@ static int __cmd_inject(struct perf_inject *inject)
inject->tool.ordered_events = true;
inject->tool.ordering_requires_timestamps = true;
/* Allow space in the header for new attributes */
- output_data_offset = 4096;
+ output_data_offset = roundup(8192 + session->header.data_offset, 4096);
if (inject->strip)
strip_init(inject);
}
@@ -1015,7 +1019,7 @@ int cmd_inject(int argc, const char **argv)
}
inject.session = __perf_session__new(&data, repipe,
- perf_data__fd(&inject.output),
+ output_fd(&inject),
&inject.tool);
if (IS_ERR(inject.session)) {
ret = PTR_ERR(inject.session);
@@ -1078,7 +1082,8 @@ out_delete:
zstd_fini(&(inject.session->zstd_data));
perf_session__delete(inject.session);
out_close_output:
- perf_data__close(&inject.output);
+ if (!inject.in_place_update)
+ perf_data__close(&inject.output);
free(inject.itrace_synth_opts.vm_tm_corr_args);
return ret;
}
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 9434367af166..c82b033e8942 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -2473,7 +2473,7 @@ static int process_switch_event(struct perf_tool *tool,
if (perf_event__process_switch(tool, event, sample, machine) < 0)
return -1;
- if (scripting_ops && scripting_ops->process_switch)
+ if (scripting_ops && scripting_ops->process_switch && !filter_cpu(sample))
scripting_ops->process_switch(event, sample, machine);
if (!script->show_switch_events)
diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py
index 1d3a189a9a54..66452a8ec358 100644
--- a/tools/perf/scripts/python/intel-pt-events.py
+++ b/tools/perf/scripts/python/intel-pt-events.py
@@ -32,8 +32,7 @@ try:
except:
broken_pipe_exception = IOError
-glb_switch_str = None
-glb_switch_printed = True
+glb_switch_str = {}
glb_insn = False
glb_disassembler = None
glb_src = False
@@ -70,6 +69,7 @@ def trace_begin():
ap = argparse.ArgumentParser(usage = "", add_help = False)
ap.add_argument("--insn-trace", action='store_true')
ap.add_argument("--src-trace", action='store_true')
+ ap.add_argument("--all-switch-events", action='store_true')
global glb_args
global glb_insn
global glb_src
@@ -256,10 +256,6 @@ def print_srccode(comm, param_dict, sample, symbol, dso, with_insn):
print(start_str, src_str)
def do_process_event(param_dict):
- global glb_switch_printed
- if not glb_switch_printed:
- print(glb_switch_str)
- glb_switch_printed = True
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
@@ -274,6 +270,11 @@ def do_process_event(param_dict):
dso = get_optional(param_dict, "dso")
symbol = get_optional(param_dict, "symbol")
+ cpu = sample["cpu"]
+ if cpu in glb_switch_str:
+ print(glb_switch_str[cpu])
+ del glb_switch_str[cpu]
+
if name[0:12] == "instructions":
if glb_src:
print_srccode(comm, param_dict, sample, symbol, dso, True)
@@ -336,8 +337,6 @@ def auxtrace_error(typ, code, cpu, pid, tid, ip, ts, msg, cpumode, *x):
sys.exit(1)
def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_preempt, *x):
- global glb_switch_printed
- global glb_switch_str
if out:
out_str = "Switch out "
else:
@@ -350,6 +349,10 @@ def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_pree
machine_str = ""
else:
machine_str = "machine PID %d" % machine_pid
- glb_switch_str = "%16s %5d/%-5d [%03u] %9u.%09u %5d/%-5d %s %s" % \
+ switch_str = "%16s %5d/%-5d [%03u] %9u.%09u %5d/%-5d %s %s" % \
(out_str, pid, tid, cpu, ts / 1000000000, ts %1000000000, np_pid, np_tid, machine_str, preempt_str)
- glb_switch_printed = False
+ if glb_args.all_switch_events:
+ print(switch_str);
+ else:
+ global glb_switch_str
+ glb_switch_str[cpu] = switch_str
diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c
index c895de481fe1..d54c5371c6a6 100644
--- a/tools/perf/tests/expr.c
+++ b/tools/perf/tests/expr.c
@@ -169,7 +169,9 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
TEST_ASSERT_VAL("#num_dies", expr__parse(&num_dies, ctx, "#num_dies") == 0);
TEST_ASSERT_VAL("#num_cores >= #num_dies", num_cores >= num_dies);
TEST_ASSERT_VAL("#num_packages", expr__parse(&num_packages, ctx, "#num_packages") == 0);
- TEST_ASSERT_VAL("#num_dies >= #num_packages", num_dies >= num_packages);
+
+ if (num_dies) // Some platforms do not have CPU die support, for example s390
+ TEST_ASSERT_VAL("#num_dies >= #num_packages", num_dies >= num_packages);
/*
* Source count returns the number of events aggregating in a leader
diff --git a/tools/perf/tests/parse-metric.c b/tools/perf/tests/parse-metric.c
index 574b7e4efd3a..07b6f4ec024f 100644
--- a/tools/perf/tests/parse-metric.c
+++ b/tools/perf/tests/parse-metric.c
@@ -109,6 +109,7 @@ static void load_runtime_stat(struct runtime_stat *st, struct evlist *evlist,
struct evsel *evsel;
u64 count;
+ perf_stat__reset_shadow_stats();
evlist__for_each_entry(evlist, evsel) {
count = find_value(evsel->name, vals);
perf_stat__update_shadow_stats(evsel, count, 0, st);
diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c
index e9bfe856a5de..b1be59b4e2a4 100644
--- a/tools/perf/ui/tui/setup.c
+++ b/tools/perf/ui/tui/setup.c
@@ -170,9 +170,11 @@ void ui__exit(bool wait_for_ok)
"Press any key...", 0);
SLtt_set_cursor_visibility(1);
- SLsmg_refresh();
- SLsmg_reset_smg();
+ if (!pthread_mutex_trylock(&ui__lock)) {
+ SLsmg_refresh();
+ SLsmg_reset_smg();
+ pthread_mutex_unlock(&ui__lock);
+ }
SLang_reset_tty();
-
perf_error__unregister(&perf_tui_eops);
}
diff --git a/tools/perf/util/bpf_skel/bperf.h b/tools/perf/util/bpf_skel/bperf.h
deleted file mode 100644
index 186a5551ddb9..000000000000
--- a/tools/perf/util/bpf_skel/bperf.h
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-// Copyright (c) 2021 Facebook
-
-#ifndef __BPERF_STAT_H
-#define __BPERF_STAT_H
-
-typedef struct {
- __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
- __uint(key_size, sizeof(__u32));
- __uint(value_size, sizeof(struct bpf_perf_event_value));
- __uint(max_entries, 1);
-} reading_map;
-
-#endif /* __BPERF_STAT_H */
diff --git a/tools/perf/util/bpf_skel/bperf_follower.bpf.c b/tools/perf/util/bpf_skel/bperf_follower.bpf.c
index b8fa3cb2da23..f193998530d4 100644
--- a/tools/perf/util/bpf_skel/bperf_follower.bpf.c
+++ b/tools/perf/util/bpf_skel/bperf_follower.bpf.c
@@ -1,14 +1,23 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (c) 2021 Facebook
-#include <linux/bpf.h>
-#include <linux/perf_event.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include "bperf.h"
#include "bperf_u.h"
-reading_map diff_readings SEC(".maps");
-reading_map accum_readings SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_perf_event_value));
+ __uint(max_entries, 1);
+} diff_readings SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_perf_event_value));
+ __uint(max_entries, 1);
+} accum_readings SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
diff --git a/tools/perf/util/bpf_skel/bperf_leader.bpf.c b/tools/perf/util/bpf_skel/bperf_leader.bpf.c
index 4f70d1459e86..e2a2d4cd7779 100644
--- a/tools/perf/util/bpf_skel/bperf_leader.bpf.c
+++ b/tools/perf/util/bpf_skel/bperf_leader.bpf.c
@@ -1,10 +1,8 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (c) 2021 Facebook
-#include <linux/bpf.h>
-#include <linux/perf_event.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-#include "bperf.h"
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
@@ -13,8 +11,19 @@ struct {
__uint(map_flags, BPF_F_PRESERVE_ELEMS);
} events SEC(".maps");
-reading_map prev_readings SEC(".maps");
-reading_map diff_readings SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_perf_event_value));
+ __uint(max_entries, 1);
+} prev_readings SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_perf_event_value));
+ __uint(max_entries, 1);
+} diff_readings SEC(".maps");
SEC("raw_tp/sched_switch")
int BPF_PROG(on_switch)
diff --git a/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c b/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c
index ab12b4c4ece2..97037d3b3d9f 100644
--- a/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c
+++ b/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (c) 2020 Facebook
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 95ffed66369c..c59331eea1d9 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -44,13 +44,16 @@ struct perf_event_attr;
/* perf sample has 16 bits size limit */
#define PERF_SAMPLE_MAX_SIZE (1 << 16)
+/* number of register is bound by the number of bits in regs_dump::mask (64) */
+#define PERF_SAMPLE_REGS_CACHE_SIZE (8 * sizeof(u64))
+
struct regs_dump {
u64 abi;
u64 mask;
u64 *regs;
/* Cached values/mask filled by first register access. */
- u64 cache_regs[PERF_REGS_MAX];
+ u64 cache_regs[PERF_SAMPLE_REGS_CACHE_SIZE];
u64 cache_mask;
};
diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c
index 1d532b9fed29..666b59baeb70 100644
--- a/tools/perf/util/expr.c
+++ b/tools/perf/util/expr.c
@@ -12,6 +12,7 @@
#include "expr-bison.h"
#include "expr-flex.h"
#include "smt.h"
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <ctype.h>
@@ -65,7 +66,12 @@ static bool key_equal(const void *key1, const void *key2,
struct hashmap *ids__new(void)
{
- return hashmap__new(key_hash, key_equal, NULL);
+ struct hashmap *hash;
+
+ hash = hashmap__new(key_hash, key_equal, NULL);
+ if (IS_ERR(hash))
+ return NULL;
+ return hash;
}
void ids__free(struct hashmap *ids)
@@ -299,6 +305,10 @@ struct expr_parse_ctx *expr__ctx_new(void)
return NULL;
ctx->ids = hashmap__new(key_hash, key_equal, NULL);
+ if (IS_ERR(ctx->ids)) {
+ free(ctx);
+ return NULL;
+ }
ctx->runtime = 0;
return ctx;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 79cce216727e..e3c1a532d059 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2321,6 +2321,7 @@ out:
#define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
{\
+ free(ff->ph->env.__feat_env); \
ff->ph->env.__feat_env = do_read_string(ff); \
return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
}
@@ -4124,6 +4125,7 @@ int perf_event__process_feature(struct perf_session *session,
struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event;
int type = fe->header.type;
u64 feat = fe->feat_id;
+ int ret = 0;
if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
pr_warning("invalid record type %d in pipe-mode\n", type);
@@ -4141,11 +4143,13 @@ int perf_event__process_feature(struct perf_session *session,
ff.size = event->header.size - sizeof(*fe);
ff.ph = &session->header;
- if (feat_ops[feat].process(&ff, NULL))
- return -1;
+ if (feat_ops[feat].process(&ff, NULL)) {
+ ret = -1;
+ goto out;
+ }
if (!feat_ops[feat].print || !tool->show_feat_hdr)
- return 0;
+ goto out;
if (!feat_ops[feat].full_only ||
tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
@@ -4154,8 +4158,9 @@ int perf_event__process_feature(struct perf_session *session,
fprintf(stdout, "# %s info available, use -I to display\n",
feat_ops[feat].name);
}
-
- return 0;
+out:
+ free_event_desc(ff.events);
+ return ret;
}
size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 5f83937bf8f3..0e013c2d9eb4 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -1205,61 +1205,69 @@ out_no_progress:
static bool intel_pt_fup_event(struct intel_pt_decoder *decoder)
{
+ enum intel_pt_sample_type type = decoder->state.type;
bool ret = false;
+ decoder->state.type &= ~INTEL_PT_BRANCH;
+
if (decoder->set_fup_tx_flags) {
decoder->set_fup_tx_flags = false;
decoder->tx_flags = decoder->fup_tx_flags;
- decoder->state.type = INTEL_PT_TRANSACTION;
+ decoder->state.type |= INTEL_PT_TRANSACTION;
if (decoder->fup_tx_flags & INTEL_PT_ABORT_TX)
decoder->state.type |= INTEL_PT_BRANCH;
- decoder->state.from_ip = decoder->ip;
- decoder->state.to_ip = 0;
decoder->state.flags = decoder->fup_tx_flags;
- return true;
+ ret = true;
}
if (decoder->set_fup_ptw) {
decoder->set_fup_ptw = false;
- decoder->state.type = INTEL_PT_PTW;
+ decoder->state.type |= INTEL_PT_PTW;
decoder->state.flags |= INTEL_PT_FUP_IP;
- decoder->state.from_ip = decoder->ip;
- decoder->state.to_ip = 0;
decoder->state.ptw_payload = decoder->fup_ptw_payload;
- return true;
+ ret = true;
}
if (decoder->set_fup_mwait) {
decoder->set_fup_mwait = false;
- decoder->state.type = INTEL_PT_MWAIT_OP;
- decoder->state.from_ip = decoder->ip;
- decoder->state.to_ip = 0;
+ decoder->state.type |= INTEL_PT_MWAIT_OP;
decoder->state.mwait_payload = decoder->fup_mwait_payload;
ret = true;
}
if (decoder->set_fup_pwre) {
decoder->set_fup_pwre = false;
decoder->state.type |= INTEL_PT_PWR_ENTRY;
- decoder->state.type &= ~INTEL_PT_BRANCH;
- decoder->state.from_ip = decoder->ip;
- decoder->state.to_ip = 0;
decoder->state.pwre_payload = decoder->fup_pwre_payload;
ret = true;
}
if (decoder->set_fup_exstop) {
decoder->set_fup_exstop = false;
decoder->state.type |= INTEL_PT_EX_STOP;
- decoder->state.type &= ~INTEL_PT_BRANCH;
decoder->state.flags |= INTEL_PT_FUP_IP;
- decoder->state.from_ip = decoder->ip;
- decoder->state.to_ip = 0;
ret = true;
}
if (decoder->set_fup_bep) {
decoder->set_fup_bep = false;
decoder->state.type |= INTEL_PT_BLK_ITEMS;
- decoder->state.type &= ~INTEL_PT_BRANCH;
+ ret = true;
+ }
+ if (decoder->overflow) {
+ decoder->overflow = false;
+ if (!ret && !decoder->pge) {
+ if (decoder->hop) {
+ decoder->state.type = 0;
+ decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
+ }
+ decoder->pge = true;
+ decoder->state.type |= INTEL_PT_BRANCH | INTEL_PT_TRACE_BEGIN;
+ decoder->state.from_ip = 0;
+ decoder->state.to_ip = decoder->ip;
+ return true;
+ }
+ }
+ if (ret) {
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
- ret = true;
+ } else {
+ decoder->state.type = type;
}
return ret;
}
@@ -1608,7 +1616,16 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
intel_pt_clear_tx_flags(decoder);
intel_pt_set_nr(decoder);
decoder->timestamp_insn_cnt = 0;
- decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
+ decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+ decoder->state.from_ip = decoder->ip;
+ decoder->ip = 0;
+ decoder->pge = false;
+ decoder->set_fup_tx_flags = false;
+ decoder->set_fup_ptw = false;
+ decoder->set_fup_mwait = false;
+ decoder->set_fup_pwre = false;
+ decoder->set_fup_exstop = false;
+ decoder->set_fup_bep = false;
decoder->overflow = true;
return -EOVERFLOW;
}
@@ -2666,6 +2683,8 @@ static int intel_pt_scan_for_psb(struct intel_pt_decoder *decoder);
/* Hop mode: Ignore TNT, do not walk code, but get ip from FUPs and TIPs */
static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, int *err)
{
+ *err = 0;
+
/* Leap from PSB to PSB, getting ip from FUP within PSB+ */
if (decoder->leap && !decoder->in_psb && decoder->packet.type != INTEL_PT_PSB) {
*err = intel_pt_scan_for_psb(decoder);
@@ -2678,6 +2697,7 @@ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, in
return HOP_IGNORE;
case INTEL_PT_TIP_PGD:
+ decoder->pge = false;
if (!decoder->packet.count) {
intel_pt_set_nr(decoder);
return HOP_IGNORE;
@@ -2705,18 +2725,21 @@ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, in
if (!decoder->packet.count)
return HOP_IGNORE;
intel_pt_set_ip(decoder);
- if (intel_pt_fup_event(decoder))
- return HOP_RETURN;
- if (!decoder->branch_enable)
+ if (decoder->set_fup_mwait || decoder->set_fup_pwre)
+ *no_tip = true;
+ if (!decoder->branch_enable || !decoder->pge)
*no_tip = true;
if (*no_tip) {
decoder->state.type = INTEL_PT_INSTRUCTION;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
+ intel_pt_fup_event(decoder);
return HOP_RETURN;
}
+ intel_pt_fup_event(decoder);
+ decoder->state.type |= INTEL_PT_INSTRUCTION | INTEL_PT_BRANCH;
*err = intel_pt_walk_fup_tip(decoder);
- if (!*err)
+ if (!*err && decoder->state.to_ip)
decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
return HOP_RETURN;
@@ -2897,7 +2920,7 @@ static bool intel_pt_psb_with_fup(struct intel_pt_decoder *decoder, int *err)
{
struct intel_pt_psb_info data = { .fup = false };
- if (!decoder->branch_enable || !decoder->pge)
+ if (!decoder->branch_enable)
return false;
intel_pt_pkt_lookahead(decoder, intel_pt_psb_lookahead_cb, &data);
@@ -2924,6 +2947,7 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
if (err)
return err;
next:
+ err = 0;
if (decoder->cyc_threshold) {
if (decoder->sample_cyc && last_packet_type != INTEL_PT_CYC)
decoder->sample_cyc = false;
@@ -2962,6 +2986,7 @@ next:
case INTEL_PT_TIP_PGE: {
decoder->pge = true;
+ decoder->overflow = false;
intel_pt_mtc_cyc_cnt_pge(decoder);
intel_pt_set_nr(decoder);
if (decoder->packet.count == 0) {
@@ -2999,7 +3024,7 @@ next:
break;
}
intel_pt_set_last_ip(decoder);
- if (!decoder->branch_enable) {
+ if (!decoder->branch_enable || !decoder->pge) {
decoder->ip = decoder->last_ip;
if (intel_pt_fup_event(decoder))
return 0;
@@ -3467,10 +3492,10 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder)
decoder->set_fup_pwre = false;
decoder->set_fup_exstop = false;
decoder->set_fup_bep = false;
+ decoder->overflow = false;
if (!decoder->branch_enable) {
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
- decoder->overflow = false;
decoder->state.type = 0; /* Do not have a sample */
return 0;
}
@@ -3485,7 +3510,6 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder)
decoder->pkt_state = INTEL_PT_STATE_RESAMPLE;
else
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
- decoder->overflow = false;
decoder->state.from_ip = 0;
decoder->state.to_ip = decoder->ip;
@@ -3607,7 +3631,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder)
}
decoder->have_last_ip = true;
- decoder->pkt_state = INTEL_PT_STATE_NO_IP;
+ decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
err = intel_pt_walk_psb(decoder);
if (err)
@@ -3704,7 +3728,8 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
if (err) {
decoder->state.err = intel_pt_ext_err(err);
- decoder->state.from_ip = decoder->ip;
+ if (err != -EOVERFLOW)
+ decoder->state.from_ip = decoder->ip;
intel_pt_update_sample_time(decoder);
decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt;
intel_pt_set_nr(decoder);
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 556a893508da..e8613cbda331 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -2565,6 +2565,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
ptq->sync_switch = false;
intel_pt_next_tid(pt, ptq);
}
+ ptq->timestamp = state->est_timestamp;
if (pt->synth_opts.errors) {
err = intel_ptq_synth_error(ptq, state);
if (err)
@@ -3624,6 +3625,7 @@ static int intel_pt_parse_vm_tm_corr_arg(struct intel_pt *pt, char **args)
*args = p;
return 0;
}
+ p += 1;
while (1) {
vmcs = strtoull(p, &p, 0);
if (errno)
diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c
index 5ee47ae1509c..06a7461ba864 100644
--- a/tools/perf/util/perf_regs.c
+++ b/tools/perf/util/perf_regs.c
@@ -25,6 +25,9 @@ int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
int i, idx = 0;
u64 mask = regs->mask;
+ if ((u64)id >= PERF_SAMPLE_REGS_CACHE_SIZE)
+ return -EINVAL;
+
if (regs->cache_mask & (1ULL << id))
goto out;
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 6ae58406f4fc..8dfbba15aeb8 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -1659,6 +1659,21 @@ bool is_pmu_core(const char *name)
return !strcmp(name, "cpu") || is_arm_pmu_core(name);
}
+static bool pmu_alias_is_duplicate(struct sevent *alias_a,
+ struct sevent *alias_b)
+{
+ /* Different names -> never duplicates */
+ if (strcmp(alias_a->name, alias_b->name))
+ return false;
+
+ /* Don't remove duplicates for hybrid PMUs */
+ if (perf_pmu__is_hybrid(alias_a->pmu) &&
+ perf_pmu__is_hybrid(alias_b->pmu))
+ return false;
+
+ return true;
+}
+
void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag,
bool long_desc, bool details_flag, bool deprecated,
const char *pmu_name)
@@ -1744,12 +1759,8 @@ void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag,
qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
for (j = 0; j < len; j++) {
/* Skip duplicates */
- if (j > 0 && !strcmp(aliases[j].name, aliases[j - 1].name)) {
- if (!aliases[j].pmu || !aliases[j - 1].pmu ||
- !strcmp(aliases[j].pmu, aliases[j - 1].pmu)) {
- continue;
- }
- }
+ if (j > 0 && pmu_alias_is_duplicate(&aliases[j], &aliases[j - 1]))
+ continue;
if (name_only) {
printf("%s ", aliases[j].name);
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 563a9ba8954f..7f782a31bda3 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -461,7 +461,7 @@ get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
struct tep_event *tp_format;
tp_format = trace_event__tp_format_id(evsel->core.attr.config);
- if (!tp_format)
+ if (IS_ERR_OR_NULL(tp_format))
return NULL;
evsel->tp_format = tp_format;
diff --git a/tools/perf/util/smt.c b/tools/perf/util/smt.c
index 20bacd5972ad..34f1b1b1176c 100644
--- a/tools/perf/util/smt.c
+++ b/tools/perf/util/smt.c
@@ -15,7 +15,7 @@ int smt_on(void)
if (cached)
return cached_result;
- if (sysfs__read_int("devices/system/cpu/smt/active", &cached_result) > 0)
+ if (sysfs__read_int("devices/system/cpu/smt/active", &cached_result) >= 0)
goto done;
ncpu = sysconf(_SC_NPROCESSORS_CONF);
diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config
index 331f6d30f472..cd7106876a5f 100644
--- a/tools/power/acpi/Makefile.config
+++ b/tools/power/acpi/Makefile.config
@@ -69,6 +69,7 @@ KERNEL_INCLUDE := $(OUTPUT)include
ACPICA_INCLUDE := $(srctree)/../../../drivers/acpi/acpica
CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE)
CFLAGS += $(WARNINGS)
+MKDIR = mkdir
ifeq ($(strip $(V)),false)
QUIET=@
diff --git a/tools/power/acpi/Makefile.rules b/tools/power/acpi/Makefile.rules
index 2a6c170b57cd..1d7616f5d0ae 100644
--- a/tools/power/acpi/Makefile.rules
+++ b/tools/power/acpi/Makefile.rules
@@ -21,6 +21,7 @@ $(KERNEL_INCLUDE):
$(objdir)%.o: %.c $(KERNEL_INCLUDE)
$(ECHO) " CC " $(subst $(OUTPUT),,$@)
+ $(QUIET) $(MKDIR) -p $(objdir) 2>/dev/null
$(QUIET) $(CC) -c $(CFLAGS) -o $@ $<
all: $(OUTPUT)$(TOOL)
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index 5d52ea2768df..df3b292a8ffe 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -33,6 +33,22 @@ noinline int bpf_testmod_loop_test(int n)
return sum;
}
+__weak noinline struct file *bpf_testmod_return_ptr(int arg)
+{
+ static struct file f = {};
+
+ switch (arg) {
+ case 1: return (void *)EINVAL; /* user addr */
+ case 2: return (void *)0xcafe4a11; /* user addr */
+ case 3: return (void *)-EINVAL; /* canonical, but invalid */
+ case 4: return (void *)(1ull << 60); /* non-canonical and invalid */
+ case 5: return (void *)~(1ull << 30); /* trigger extable */
+ case 6: return &f; /* valid addr */
+ case 7: return (void *)((long)&f | 1); /* kernel tricks */
+ default: return NULL;
+ }
+}
+
noinline ssize_t
bpf_testmod_test_read(struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr,
@@ -43,6 +59,10 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
.off = off,
.len = len,
};
+ int i = 1;
+
+ while (bpf_testmod_return_ptr(i))
+ i++;
/* This is always true. Use the check to make sure the compiler
* doesn't remove bpf_testmod_loop_test.
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c b/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c
index 762f6a9da8b5..664ffc0364f4 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c
@@ -90,7 +90,7 @@ static void print_err_line(void)
static void test_conn(void)
{
- int listen_fd = -1, cli_fd = -1, err;
+ int listen_fd = -1, cli_fd = -1, srv_fd = -1, err;
socklen_t addrlen = sizeof(srv_sa6);
int srv_port;
@@ -112,6 +112,10 @@ static void test_conn(void)
if (CHECK_FAIL(cli_fd == -1))
goto done;
+ srv_fd = accept(listen_fd, NULL, NULL);
+ if (CHECK_FAIL(srv_fd == -1))
+ goto done;
+
if (CHECK(skel->bss->listen_tp_sport != srv_port ||
skel->bss->req_sk_sport != srv_port,
"Unexpected sk src port",
@@ -134,11 +138,13 @@ done:
close(listen_fd);
if (cli_fd != -1)
close(cli_fd);
+ if (srv_fd != -1)
+ close(srv_fd);
}
static void test_syncookie(void)
{
- int listen_fd = -1, cli_fd = -1, err;
+ int listen_fd = -1, cli_fd = -1, srv_fd = -1, err;
socklen_t addrlen = sizeof(srv_sa6);
int srv_port;
@@ -161,6 +167,10 @@ static void test_syncookie(void)
if (CHECK_FAIL(cli_fd == -1))
goto done;
+ srv_fd = accept(listen_fd, NULL, NULL);
+ if (CHECK_FAIL(srv_fd == -1))
+ goto done;
+
if (CHECK(skel->bss->listen_tp_sport != srv_port,
"Unexpected tp src port",
"listen_tp_sport:%u expected:%u\n",
@@ -188,6 +198,8 @@ done:
close(listen_fd);
if (cli_fd != -1)
close(cli_fd);
+ if (srv_fd != -1)
+ close(srv_fd);
}
struct test {
diff --git a/tools/testing/selftests/bpf/progs/test_module_attach.c b/tools/testing/selftests/bpf/progs/test_module_attach.c
index b36857093f71..50ce16d02da7 100644
--- a/tools/testing/selftests/bpf/progs/test_module_attach.c
+++ b/tools/testing/selftests/bpf/progs/test_module_attach.c
@@ -87,6 +87,18 @@ int BPF_PROG(handle_fexit,
return 0;
}
+SEC("fexit/bpf_testmod_return_ptr")
+int BPF_PROG(handle_fexit_ret, int arg, struct file *ret)
+{
+ long buf = 0;
+
+ bpf_probe_read_kernel(&buf, 8, ret);
+ bpf_probe_read_kernel(&buf, 8, (char *)ret + 256);
+ *(volatile long long *)ret;
+ *(volatile int *)&ret->f_mode;
+ return 0;
+}
+
__u32 fmod_ret_read_sz = 0;
SEC("fmod_ret/bpf_testmod_test_read")
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 465ef3f112c0..d3bf83d5c6cf 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -54,7 +54,7 @@
#define MAX_INSNS BPF_MAXINSNS
#define MAX_TEST_INSNS 1000000
#define MAX_FIXUPS 8
-#define MAX_NR_MAPS 21
+#define MAX_NR_MAPS 22
#define MAX_TEST_RUNS 8
#define POINTER_VALUE 0xcafe4all
#define TEST_DATA_LEN 64
diff --git a/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c b/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c
index c22dc83a41fd..b39665f33524 100644
--- a/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c
+++ b/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c
@@ -138,6 +138,8 @@
BPF_EXIT_INSN(),
},
.result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 leaks addr into mem",
},
{
"Dest pointer in r0 - succeed",
@@ -156,4 +158,88 @@
BPF_EXIT_INSN(),
},
.result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 leaks addr into mem",
+},
+{
+ "Dest pointer in r0 - succeed, check 2",
+ .insns = {
+ /* r0 = &val */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ /* val = r0; */
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ /* r5 = &val */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ /* r0 = atomic_cmpxchg(&val, r0, r5); */
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
+ /* r1 = *r0 */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
+ /* exit(0); */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R0 leaks addr into mem",
+},
+{
+ "Dest pointer in r0 - succeed, check 3",
+ .insns = {
+ /* r0 = &val */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+ /* val = r0; */
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ /* r5 = &val */
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
+ /* r0 = atomic_cmpxchg(&val, r0, r5); */
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
+ /* exit(0); */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid size of register fill",
+ .errstr_unpriv = "R0 leaks addr into mem",
+},
+{
+ "Dest pointer in r0 - succeed, check 4",
+ .insns = {
+ /* r0 = &val */
+ BPF_MOV32_REG(BPF_REG_0, BPF_REG_10),
+ /* val = r0; */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+ /* r5 = &val */
+ BPF_MOV32_REG(BPF_REG_5, BPF_REG_10),
+ /* r0 = atomic_cmpxchg(&val, r0, r5); */
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
+ /* r1 = *r10 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -8),
+ /* exit(0); */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "R10 partial copy of pointer",
+},
+{
+ "Dest pointer in r0 - succeed, check 5",
+ .insns = {
+ /* r0 = &val */
+ BPF_MOV32_REG(BPF_REG_0, BPF_REG_10),
+ /* val = r0; */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+ /* r5 = &val */
+ BPF_MOV32_REG(BPF_REG_5, BPF_REG_10),
+ /* r0 = atomic_cmpxchg(&val, r0, r5); */
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
+ /* r1 = *r0 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -8),
+ /* exit(0); */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access",
+ .errstr_unpriv = "R10 partial copy of pointer",
},
diff --git a/tools/testing/selftests/bpf/verifier/atomic_fetch.c b/tools/testing/selftests/bpf/verifier/atomic_fetch.c
index 3bc9ff7a860b..5bf03fb4fa2b 100644
--- a/tools/testing/selftests/bpf/verifier/atomic_fetch.c
+++ b/tools/testing/selftests/bpf/verifier/atomic_fetch.c
@@ -1,3 +1,97 @@
+{
+ "atomic dw/fetch and address leakage of (map ptr & -1) via stack slot",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, -1),
+ BPF_LD_MAP_FD(BPF_REG_8, 0),
+ BPF_LD_MAP_FD(BPF_REG_9, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
+ BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_2, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 2, 4 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "leaking pointer from stack off -8",
+},
+{
+ "atomic dw/fetch and address leakage of (map ptr & -1) via returned value",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, -1),
+ BPF_LD_MAP_FD(BPF_REG_8, 0),
+ BPF_LD_MAP_FD(BPF_REG_9, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
+ BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 2, 4 },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "leaking pointer from stack off -8",
+},
+{
+ "atomic w/fetch and address leakage of (map ptr & -1) via stack slot",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, -1),
+ BPF_LD_MAP_FD(BPF_REG_8, 0),
+ BPF_LD_MAP_FD(BPF_REG_9, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
+ BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_2, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 2, 4 },
+ .result = REJECT,
+ .errstr = "invalid size of register fill",
+},
+{
+ "atomic w/fetch and address leakage of (map ptr & -1) via returned value",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_1, -1),
+ BPF_LD_MAP_FD(BPF_REG_8, 0),
+ BPF_LD_MAP_FD(BPF_REG_9, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
+ BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 2, 4 },
+ .result = REJECT,
+ .errstr = "invalid size of register fill",
+},
#define __ATOMIC_FETCH_OP_TEST(src_reg, dst_reg, operand1, op, operand2, expect) \
{ \
"atomic fetch " #op ", src=" #dst_reg " dst=" #dst_reg, \
diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c b/tools/testing/selftests/bpf/verifier/search_pruning.c
index 7e50cb80873a..682519769fe3 100644
--- a/tools/testing/selftests/bpf/verifier/search_pruning.c
+++ b/tools/testing/selftests/bpf/verifier/search_pruning.c
@@ -133,6 +133,77 @@
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
+ "precision tracking for u32 spill/fill",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_MOV32_IMM(BPF_REG_6, 32),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_MOV32_IMM(BPF_REG_6, 4),
+ /* Additional insns to introduce a pruning point. */
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ /* u32 spill/fill */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_10, -8),
+ /* out-of-bound map value access for r6=32 */
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 15 },
+ .result = REJECT,
+ .errstr = "R0 min value is outside of the allowed memory range",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "precision tracking for u32 spills, u64 fill",
+ .insns = {
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV32_IMM(BPF_REG_7, 0xffffffff),
+ /* Additional insns to introduce a pruning point. */
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0),
+ /* u32 spills, u64 fill */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, -8),
+ /* if r8 != X goto pc+1 r8 known in fallthrough branch */
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_8, 0xffffffff, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 1),
+ /* if r8 == X goto pc+1 condition always true on first
+ * traversal, so starts backtracking to mark r8 as requiring
+ * precision. r7 marked as needing precision. r6 not marked
+ * since it's not tracked.
+ */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 0xffffffff, 1),
+ /* fails if r8 correctly marked unknown after fill. */
+ BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "div by zero",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
"allocated_stack",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
diff --git a/tools/testing/selftests/bpf/verifier/spill_fill.c b/tools/testing/selftests/bpf/verifier/spill_fill.c
index 7ab3de108761..6c907144311f 100644
--- a/tools/testing/selftests/bpf/verifier/spill_fill.c
+++ b/tools/testing/selftests/bpf/verifier/spill_fill.c
@@ -176,6 +176,38 @@
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
+ "Spill u32 const scalars. Refill as u64. Offset to skb->data",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ /* r6 = 0 */
+ BPF_MOV32_IMM(BPF_REG_6, 0),
+ /* r7 = 20 */
+ BPF_MOV32_IMM(BPF_REG_7, 20),
+ /* *(u32 *)(r10 -4) = r6 */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4),
+ /* *(u32 *)(r10 -8) = r7 */
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8),
+ /* r4 = *(u64 *)(r10 -8) */
+ BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
+ /* r0 = r2 */
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
+ /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+ /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "invalid access to packet",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+},
+{
"Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
index 2debba4e8a3a..359f3e8f8b60 100644
--- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
+++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
@@ -1078,6 +1078,29 @@
.errstr_unpriv = "R0 pointer -= pointer prohibited",
},
{
+ "map access: trying to leak tainted dst reg",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV32_IMM(BPF_REG_1, 0xFFFFFFFF),
+ BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_array_48b = { 4 },
+ .result = REJECT,
+ .errstr = "math between map_value pointer and 4294967295 is not allowed",
+},
+{
"32bit pkt_ptr -= scalar",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
diff --git a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c
index bfb97383e6b5..b4ec228eb95d 100644
--- a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c
+++ b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c
@@ -35,7 +35,7 @@
.prog_type = BPF_PROG_TYPE_XDP,
},
{
- "XDP pkt read, pkt_data' > pkt_end, good access",
+ "XDP pkt read, pkt_data' > pkt_end, corner case, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
@@ -88,6 +88,41 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
+ "XDP pkt read, pkt_data' > pkt_end, corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' > pkt_end, corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
"XDP pkt read, pkt_end > pkt_data', good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
@@ -106,16 +141,16 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_end > pkt_data', bad access 1",
+ "XDP pkt read, pkt_end > pkt_data', corner case -1, bad access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct xdp_md, data_end)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
@@ -143,6 +178,42 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
+ "XDP pkt read, pkt_end > pkt_data', corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end > pkt_data', corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
"XDP pkt read, pkt_data' < pkt_end, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
@@ -161,16 +232,16 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_data' < pkt_end, bad access 1",
+ "XDP pkt read, pkt_data' < pkt_end, corner case -1, bad access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct xdp_md, data_end)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
@@ -198,7 +269,43 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_end < pkt_data', good access",
+ "XDP pkt read, pkt_data' < pkt_end, corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' < pkt_end, corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end < pkt_data', corner case, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
@@ -251,6 +358,41 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
+ "XDP pkt read, pkt_end < pkt_data', corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end < pkt_data', corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
"XDP pkt read, pkt_data' >= pkt_end, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
@@ -268,15 +410,15 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
+ "XDP pkt read, pkt_data' >= pkt_end, corner case -1, bad access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct xdp_md, data_end)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
@@ -304,7 +446,41 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_end >= pkt_data', good access",
+ "XDP pkt read, pkt_data' >= pkt_end, corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' >= pkt_end, corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end >= pkt_data', corner case, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
@@ -359,7 +535,44 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_data' <= pkt_end, good access",
+ "XDP pkt read, pkt_end >= pkt_data', corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end >= pkt_data', corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' <= pkt_end, corner case, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
@@ -414,6 +627,43 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
+ "XDP pkt read, pkt_data' <= pkt_end, corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data' <= pkt_end, corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
"XDP pkt read, pkt_end <= pkt_data', good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
@@ -431,15 +681,15 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_end <= pkt_data', bad access 1",
+ "XDP pkt read, pkt_end <= pkt_data', corner case -1, bad access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct xdp_md, data_end)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
@@ -467,7 +717,41 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_meta' > pkt_data, good access",
+ "XDP pkt read, pkt_end <= pkt_data', corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_end <= pkt_data', corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' > pkt_data, corner case, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data_meta)),
@@ -520,6 +804,41 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
+ "XDP pkt read, pkt_meta' > pkt_data, corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' > pkt_data, corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
"XDP pkt read, pkt_data > pkt_meta', good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -538,16 +857,16 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_data > pkt_meta', bad access 1",
+ "XDP pkt read, pkt_data > pkt_meta', corner case -1, bad access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data_meta)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
@@ -575,6 +894,42 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
+ "XDP pkt read, pkt_data > pkt_meta', corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data > pkt_meta', corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
"XDP pkt read, pkt_meta' < pkt_data, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -593,16 +948,16 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_meta' < pkt_data, bad access 1",
+ "XDP pkt read, pkt_meta' < pkt_data, corner case -1, bad access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data_meta)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
@@ -630,7 +985,43 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_data < pkt_meta', good access",
+ "XDP pkt read, pkt_meta' < pkt_data, corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' < pkt_data, corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data < pkt_meta', corner case, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data_meta)),
@@ -683,6 +1074,41 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
+ "XDP pkt read, pkt_data < pkt_meta', corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data < pkt_meta', corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
"XDP pkt read, pkt_meta' >= pkt_data, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -700,15 +1126,15 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_meta' >= pkt_data, bad access 1",
+ "XDP pkt read, pkt_meta' >= pkt_data, corner case -1, bad access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data_meta)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
@@ -736,7 +1162,41 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_data >= pkt_meta', good access",
+ "XDP pkt read, pkt_meta' >= pkt_data, corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' >= pkt_data, corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data >= pkt_meta', corner case, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data_meta)),
@@ -791,7 +1251,44 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_meta' <= pkt_data, good access",
+ "XDP pkt read, pkt_data >= pkt_meta', corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data >= pkt_meta', corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' <= pkt_data, corner case, good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data_meta)),
@@ -846,6 +1343,43 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
+ "XDP pkt read, pkt_meta' <= pkt_data, corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_meta' <= pkt_data, corner case -1, bad access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
"XDP pkt read, pkt_data <= pkt_meta', good access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -863,15 +1397,15 @@
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
- "XDP pkt read, pkt_data <= pkt_meta', bad access 1",
+ "XDP pkt read, pkt_data <= pkt_meta', corner case -1, bad access",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct xdp_md, data_meta)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
@@ -898,3 +1432,37 @@
.prog_type = BPF_PROG_TYPE_XDP,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
+{
+ "XDP pkt read, pkt_data <= pkt_meta', corner case, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+ "XDP pkt read, pkt_data <= pkt_meta', corner case +1, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
diff --git a/tools/testing/selftests/damon/.gitignore b/tools/testing/selftests/damon/.gitignore
new file mode 100644
index 000000000000..c6c2965a6607
--- /dev/null
+++ b/tools/testing/selftests/damon/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+huge_count_read_write
diff --git a/tools/testing/selftests/damon/Makefile b/tools/testing/selftests/damon/Makefile
index 8a3f2cd9fec0..937d36ae9a69 100644
--- a/tools/testing/selftests/damon/Makefile
+++ b/tools/testing/selftests/damon/Makefile
@@ -1,7 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for damon selftests
-TEST_FILES = _chk_dependency.sh
-TEST_PROGS = debugfs_attrs.sh
+TEST_GEN_FILES += huge_count_read_write
+
+TEST_FILES = _chk_dependency.sh _debugfs_common.sh
+TEST_PROGS = debugfs_attrs.sh debugfs_schemes.sh debugfs_target_ids.sh
+TEST_PROGS += debugfs_empty_targets.sh debugfs_huge_count_read_write.sh
include ../lib.mk
diff --git a/tools/testing/selftests/damon/_debugfs_common.sh b/tools/testing/selftests/damon/_debugfs_common.sh
new file mode 100644
index 000000000000..48989d4813ae
--- /dev/null
+++ b/tools/testing/selftests/damon/_debugfs_common.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+test_write_result() {
+ file=$1
+ content=$2
+ orig_content=$3
+ expect_reason=$4
+ expected=$5
+
+ echo "$content" > "$file"
+ if [ $? -ne "$expected" ]
+ then
+ echo "writing $content to $file doesn't return $expected"
+ echo "expected because: $expect_reason"
+ echo "$orig_content" > "$file"
+ exit 1
+ fi
+}
+
+test_write_succ() {
+ test_write_result "$1" "$2" "$3" "$4" 0
+}
+
+test_write_fail() {
+ test_write_result "$1" "$2" "$3" "$4" 1
+}
+
+test_content() {
+ file=$1
+ orig_content=$2
+ expected=$3
+ expect_reason=$4
+
+ content=$(cat "$file")
+ if [ "$content" != "$expected" ]
+ then
+ echo "reading $file expected $expected but $content"
+ echo "expected because: $expect_reason"
+ echo "$orig_content" > "$file"
+ exit 1
+ fi
+}
+
+source ./_chk_dependency.sh
+
+damon_onoff="$DBGFS/monitor_on"
+if [ $(cat "$damon_onoff") = "on" ]
+then
+ echo "monitoring is on"
+ exit $ksft_skip
+fi
diff --git a/tools/testing/selftests/damon/debugfs_attrs.sh b/tools/testing/selftests/damon/debugfs_attrs.sh
index 196b6640bf37..902e312bca89 100644
--- a/tools/testing/selftests/damon/debugfs_attrs.sh
+++ b/tools/testing/selftests/damon/debugfs_attrs.sh
@@ -1,48 +1,7 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-test_write_result() {
- file=$1
- content=$2
- orig_content=$3
- expect_reason=$4
- expected=$5
-
- echo "$content" > "$file"
- if [ $? -ne "$expected" ]
- then
- echo "writing $content to $file doesn't return $expected"
- echo "expected because: $expect_reason"
- echo "$orig_content" > "$file"
- exit 1
- fi
-}
-
-test_write_succ() {
- test_write_result "$1" "$2" "$3" "$4" 0
-}
-
-test_write_fail() {
- test_write_result "$1" "$2" "$3" "$4" 1
-}
-
-test_content() {
- file=$1
- orig_content=$2
- expected=$3
- expect_reason=$4
-
- content=$(cat "$file")
- if [ "$content" != "$expected" ]
- then
- echo "reading $file expected $expected but $content"
- echo "expected because: $expect_reason"
- echo "$orig_content" > "$file"
- exit 1
- fi
-}
-
-source ./_chk_dependency.sh
+source _debugfs_common.sh
# Test attrs file
# ===============
@@ -56,33 +15,3 @@ test_write_fail "$file" "1 2 3 5 4" "$orig_content" \
"min_nr_regions > max_nr_regions"
test_content "$file" "$orig_content" "1 2 3 4 5" "successfully written"
echo "$orig_content" > "$file"
-
-# Test schemes file
-# =================
-
-file="$DBGFS/schemes"
-orig_content=$(cat "$file")
-
-test_write_succ "$file" "1 2 3 4 5 6 4 0 0 0 1 2 3 1 100 3 2 1" \
- "$orig_content" "valid input"
-test_write_fail "$file" "1 2
-3 4 5 6 3 0 0 0 1 2 3 1 100 3 2 1" "$orig_content" "multi lines"
-test_write_succ "$file" "" "$orig_content" "disabling"
-echo "$orig_content" > "$file"
-
-# Test target_ids file
-# ====================
-
-file="$DBGFS/target_ids"
-orig_content=$(cat "$file")
-
-test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input"
-test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input"
-test_content "$file" "$orig_content" "1 2" "non-integer was there"
-test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input"
-test_content "$file" "$orig_content" "" "wrong input written"
-test_write_succ "$file" "" "$orig_content" "empty input"
-test_content "$file" "$orig_content" "" "empty input written"
-echo "$orig_content" > "$file"
-
-echo "PASS"
diff --git a/tools/testing/selftests/damon/debugfs_empty_targets.sh b/tools/testing/selftests/damon/debugfs_empty_targets.sh
new file mode 100644
index 000000000000..87aff8083822
--- /dev/null
+++ b/tools/testing/selftests/damon/debugfs_empty_targets.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source _debugfs_common.sh
+
+# Test empty targets case
+# =======================
+
+orig_target_ids=$(cat "$DBGFS/target_ids")
+echo "" > "$DBGFS/target_ids"
+orig_monitor_on=$(cat "$DBGFS/monitor_on")
+test_write_fail "$DBGFS/monitor_on" "on" "orig_monitor_on" "empty target ids"
+echo "$orig_target_ids" > "$DBGFS/target_ids"
diff --git a/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh b/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh
new file mode 100644
index 000000000000..922cadac2950
--- /dev/null
+++ b/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source _debugfs_common.sh
+
+# Test huge count read write
+# ==========================
+
+dmesg -C
+
+for file in "$DBGFS/"*
+do
+ ./huge_count_read_write "$file"
+done
+
+if dmesg | grep -q WARNING
+then
+ dmesg
+ exit 1
+else
+ exit 0
+fi
diff --git a/tools/testing/selftests/damon/debugfs_schemes.sh b/tools/testing/selftests/damon/debugfs_schemes.sh
new file mode 100644
index 000000000000..5b39ab44731c
--- /dev/null
+++ b/tools/testing/selftests/damon/debugfs_schemes.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source _debugfs_common.sh
+
+# Test schemes file
+# =================
+
+file="$DBGFS/schemes"
+orig_content=$(cat "$file")
+
+test_write_succ "$file" "1 2 3 4 5 6 4 0 0 0 1 2 3 1 100 3 2 1" \
+ "$orig_content" "valid input"
+test_write_fail "$file" "1 2
+3 4 5 6 3 0 0 0 1 2 3 1 100 3 2 1" "$orig_content" "multi lines"
+test_write_succ "$file" "" "$orig_content" "disabling"
+test_write_fail "$file" "2 1 2 1 10 1 3 10 1 1 1 1 1 1 1 1 2 3" \
+ "$orig_content" "wrong condition ranges"
+echo "$orig_content" > "$file"
diff --git a/tools/testing/selftests/damon/debugfs_target_ids.sh b/tools/testing/selftests/damon/debugfs_target_ids.sh
new file mode 100644
index 000000000000..49aeabdb0aae
--- /dev/null
+++ b/tools/testing/selftests/damon/debugfs_target_ids.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source _debugfs_common.sh
+
+# Test target_ids file
+# ====================
+
+file="$DBGFS/target_ids"
+orig_content=$(cat "$file")
+
+test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input"
+test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input"
+test_content "$file" "$orig_content" "1 2" "non-integer was there"
+test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input"
+test_content "$file" "$orig_content" "" "wrong input written"
+test_write_succ "$file" "" "$orig_content" "empty input"
+test_content "$file" "$orig_content" "" "empty input written"
+echo "$orig_content" > "$file"
diff --git a/tools/testing/selftests/damon/huge_count_read_write.c b/tools/testing/selftests/damon/huge_count_read_write.c
new file mode 100644
index 000000000000..ad7a6b4cf338
--- /dev/null
+++ b/tools/testing/selftests/damon/huge_count_read_write.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: SeongJae Park <sj@kernel.org>
+ */
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdio.h>
+
+void write_read_with_huge_count(char *file)
+{
+ int filedesc = open(file, O_RDWR);
+ char buf[25];
+ int ret;
+
+ printf("%s %s\n", __func__, file);
+ if (filedesc < 0) {
+ fprintf(stderr, "failed opening %s\n", file);
+ exit(1);
+ }
+
+ write(filedesc, "", 0xfffffffful);
+ perror("after write: ");
+ ret = read(filedesc, buf, 0xfffffffful);
+ perror("after read: ");
+ close(filedesc);
+}
+
+int main(int argc, char *argv[])
+{
+ if (argc != 2) {
+ fprintf(stderr, "Usage: %s <file>\n", argv[0]);
+ exit(1);
+ }
+ write_read_with_huge_count(argv[1]);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh b/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh
index b513f64d9092..026a126f584d 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh
@@ -72,6 +72,35 @@ rif_mac_profile_replacement_test()
ip link set $h1.10 address $h1_10_mac
}
+rif_mac_profile_consolidation_test()
+{
+ local count=$1; shift
+ local h1_20_mac
+
+ RET=0
+
+ if [[ $count -eq 1 ]]; then
+ return
+ fi
+
+ h1_20_mac=$(mac_get $h1.20)
+
+ # Set the MAC of $h1.20 to that of $h1.10 and confirm that they are
+ # using the same MAC profile.
+ ip link set $h1.20 address 00:11:11:11:11:11
+ check_err $?
+
+ occ=$(devlink -j resource show $DEVLINK_DEV \
+ | jq '.[][][] | select(.name=="rif_mac_profiles") |.["occ"]')
+
+ [[ $occ -eq $((count - 1)) ]]
+ check_err $? "MAC profile occupancy did not decrease"
+
+ log_test "RIF MAC profile consolidation"
+
+ ip link set $h1.20 address $h1_20_mac
+}
+
rif_mac_profile_shared_replacement_test()
{
local count=$1; shift
@@ -104,6 +133,7 @@ rif_mac_profile_edit_test()
create_max_rif_mac_profiles $count
rif_mac_profile_replacement_test
+ rif_mac_profile_consolidation_test $count
rif_mac_profile_shared_replacement_test $count
}
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 3763105029fb..3cb5ac5da087 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -30,10 +30,12 @@
/x86_64/svm_int_ctl_test
/x86_64/sync_regs_test
/x86_64/tsc_msrs_test
+/x86_64/userspace_io_test
/x86_64/userspace_msr_exit_test
/x86_64/vmx_apic_access_test
/x86_64/vmx_close_while_nested_test
/x86_64/vmx_dirty_log_test
+/x86_64/vmx_invalid_nested_guest_state
/x86_64/vmx_preemption_timer_test
/x86_64/vmx_set_nested_state_test
/x86_64/vmx_tsc_adjust_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index c4e34717826a..17342b575e85 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -59,10 +59,12 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test
TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
+TEST_GEN_PROGS_x86_64 += x86_64/userspace_io_test
TEST_GEN_PROGS_x86_64 += x86_64/userspace_msr_exit_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_apic_access_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
+TEST_GEN_PROGS_x86_64 += x86_64/vmx_invalid_nested_guest_state
TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_nested_tsc_scaling_test
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 6a1a37f30494..2d62edc49d67 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -321,6 +321,7 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm);
unsigned int vm_get_page_size(struct kvm_vm *vm);
unsigned int vm_get_page_shift(struct kvm_vm *vm);
+unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
uint64_t vm_get_max_gfn(struct kvm_vm *vm);
int vm_get_fd(struct kvm_vm *vm);
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 8f2e0bb1ef96..53d2b5d04b82 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -302,7 +302,7 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
(1ULL << (vm->va_bits - 1)) >> vm->page_shift);
/* Limit physical addresses to PA-bits. */
- vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
+ vm->max_gfn = vm_compute_max_gfn(vm);
/* Allocate and setup memory for guest. */
vm->vpages_mapped = sparsebit_alloc();
@@ -2328,6 +2328,11 @@ unsigned int vm_get_page_shift(struct kvm_vm *vm)
return vm->page_shift;
}
+unsigned long __attribute__((weak)) vm_compute_max_gfn(struct kvm_vm *vm)
+{
+ return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
+}
+
uint64_t vm_get_max_gfn(struct kvm_vm *vm)
{
return vm->max_gfn;
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 82c39db91369..eef7b34756d5 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -1431,3 +1431,71 @@ struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpui
return cpuid;
}
+
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65
+
+static inline unsigned x86_family(unsigned int eax)
+{
+ unsigned int x86;
+
+ x86 = (eax >> 8) & 0xf;
+
+ if (x86 == 0xf)
+ x86 += (eax >> 20) & 0xff;
+
+ return x86;
+}
+
+unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
+{
+ const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
+ unsigned long ht_gfn, max_gfn, max_pfn;
+ uint32_t eax, ebx, ecx, edx, max_ext_leaf;
+
+ max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1;
+
+ /* Avoid reserved HyperTransport region on AMD processors. */
+ eax = ecx = 0;
+ cpuid(&eax, &ebx, &ecx, &edx);
+ if (ebx != X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx ||
+ ecx != X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx ||
+ edx != X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
+ return max_gfn;
+
+ /* On parts with <40 physical address bits, the area is fully hidden */
+ if (vm->pa_bits < 40)
+ return max_gfn;
+
+ /* Before family 17h, the HyperTransport area is just below 1T. */
+ ht_gfn = (1 << 28) - num_ht_pages;
+ eax = 1;
+ cpuid(&eax, &ebx, &ecx, &edx);
+ if (x86_family(eax) < 0x17)
+ goto done;
+
+ /*
+ * Otherwise it's at the top of the physical address space, possibly
+ * reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX. Use
+ * the old conservative value if MAXPHYADDR is not enumerated.
+ */
+ eax = 0x80000000;
+ cpuid(&eax, &ebx, &ecx, &edx);
+ max_ext_leaf = eax;
+ if (max_ext_leaf < 0x80000008)
+ goto done;
+
+ eax = 0x80000008;
+ cpuid(&eax, &ebx, &ecx, &edx);
+ max_pfn = (1ULL << ((eax & 0xff) - vm->page_shift)) - 1;
+ if (max_ext_leaf >= 0x8000001f) {
+ eax = 0x8000001f;
+ cpuid(&eax, &ebx, &ecx, &edx);
+ max_pfn >>= (ebx >> 6) & 0x3f;
+ }
+
+ ht_gfn = max_pfn - num_ht_pages;
+done:
+ return min(max_gfn, ht_gfn - 1);
+}
diff --git a/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c b/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c
index df04f56ce859..30a81038df46 100644
--- a/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c
+++ b/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c
@@ -75,7 +75,7 @@ static void l1_guest_code(struct svm_test_data *svm)
vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
/* No intercepts for real and virtual interrupts */
- vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR | INTERCEPT_VINTR);
+ vmcb->control.intercept &= ~(BIT(INTERCEPT_INTR) | BIT(INTERCEPT_VINTR));
/* Make a virtual interrupt VINTR_IRQ_NUMBER pending */
vmcb->control.int_ctl |= V_IRQ_MASK | (0x1 << V_INTR_PRIO_SHIFT);
diff --git a/tools/testing/selftests/kvm/x86_64/userspace_io_test.c b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c
new file mode 100644
index 000000000000..e4bef2e05686
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+
+#include "kvm_util.h"
+#include "processor.h"
+
+#define VCPU_ID 1
+
+static void guest_ins_port80(uint8_t *buffer, unsigned int count)
+{
+ unsigned long end;
+
+ if (count == 2)
+ end = (unsigned long)buffer + 1;
+ else
+ end = (unsigned long)buffer + 8192;
+
+ asm volatile("cld; rep; insb" : "+D"(buffer), "+c"(count) : "d"(0x80) : "memory");
+ GUEST_ASSERT_1(count == 0, count);
+ GUEST_ASSERT_2((unsigned long)buffer == end, buffer, end);
+}
+
+static void guest_code(void)
+{
+ uint8_t buffer[8192];
+ int i;
+
+ /*
+ * Special case tests. main() will adjust RCX 2 => 1 and 3 => 8192 to
+ * test that KVM doesn't explode when userspace modifies the "count" on
+ * a userspace I/O exit. KVM isn't required to play nice with the I/O
+ * itself as KVM doesn't support manipulating the count, it just needs
+ * to not explode or overflow a buffer.
+ */
+ guest_ins_port80(buffer, 2);
+ guest_ins_port80(buffer, 3);
+
+ /* Verify KVM fills the buffer correctly when not stuffing RCX. */
+ memset(buffer, 0, sizeof(buffer));
+ guest_ins_port80(buffer, 8192);
+ for (i = 0; i < 8192; i++)
+ GUEST_ASSERT_2(buffer[i] == 0xaa, i, buffer[i]);
+
+ GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_regs regs;
+ struct kvm_run *run;
+ struct kvm_vm *vm;
+ struct ucall uc;
+ int rc;
+
+ /* Tell stdout not to buffer its content */
+ setbuf(stdout, NULL);
+
+ /* Create VM */
+ vm = vm_create_default(VCPU_ID, 0, guest_code);
+ run = vcpu_state(vm, VCPU_ID);
+
+ memset(&regs, 0, sizeof(regs));
+
+ while (1) {
+ rc = _vcpu_run(vm, VCPU_ID);
+
+ TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Unexpected exit reason: %u (%s),\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ if (get_ucall(vm, VCPU_ID, &uc))
+ break;
+
+ TEST_ASSERT(run->io.port == 0x80,
+ "Expected I/O at port 0x80, got port 0x%x\n", run->io.port);
+
+ /*
+ * Modify the rep string count in RCX: 2 => 1 and 3 => 8192.
+ * Note, this abuses KVM's batching of rep string I/O to avoid
+ * getting stuck in an infinite loop. That behavior isn't in
+ * scope from a testing perspective as it's not ABI in any way,
+ * i.e. it really is abusing internal KVM knowledge.
+ */
+ vcpu_regs_get(vm, VCPU_ID, &regs);
+ if (regs.rcx == 2)
+ regs.rcx = 1;
+ if (regs.rcx == 3)
+ regs.rcx = 8192;
+ memset((void *)run + run->io.data_offset, 0xaa, 4096);
+ vcpu_regs_set(vm, VCPU_ID, &regs);
+ }
+
+ switch (uc.cmd) {
+ case UCALL_DONE:
+ break;
+ case UCALL_ABORT:
+ TEST_FAIL("%s at %s:%ld : argN+1 = 0x%lx, argN+2 = 0x%lx",
+ (const char *)uc.args[0], __FILE__, uc.args[1],
+ uc.args[2], uc.args[3]);
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+
+ kvm_vm_free(vm);
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c b/tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c
new file mode 100644
index 000000000000..489fbed4ca6f
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "vmx.h"
+
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "kselftest.h"
+
+#define VCPU_ID 0
+#define ARBITRARY_IO_PORT 0x2000
+
+static struct kvm_vm *vm;
+
+static void l2_guest_code(void)
+{
+ /*
+ * Generate an exit to L0 userspace, i.e. main(), via I/O to an
+ * arbitrary port.
+ */
+ asm volatile("inb %%dx, %%al"
+ : : [port] "d" (ARBITRARY_IO_PORT) : "rax");
+}
+
+static void l1_guest_code(struct vmx_pages *vmx_pages)
+{
+#define L2_GUEST_STACK_SIZE 64
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+ GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+ GUEST_ASSERT(load_vmcs(vmx_pages));
+
+ /* Prepare the VMCS for L2 execution. */
+ prepare_vmcs(vmx_pages, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ /*
+ * L2 must be run without unrestricted guest, verify that the selftests
+ * library hasn't enabled it. Because KVM selftests jump directly to
+ * 64-bit mode, unrestricted guest support isn't required.
+ */
+ GUEST_ASSERT(!(vmreadz(CPU_BASED_VM_EXEC_CONTROL) & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) ||
+ !(vmreadz(SECONDARY_VM_EXEC_CONTROL) & SECONDARY_EXEC_UNRESTRICTED_GUEST));
+
+ GUEST_ASSERT(!vmlaunch());
+
+ /* L2 should triple fault after main() stuffs invalid guest state. */
+ GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_TRIPLE_FAULT);
+ GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+ vm_vaddr_t vmx_pages_gva;
+ struct kvm_sregs sregs;
+ struct kvm_run *run;
+ struct ucall uc;
+
+ nested_vmx_check_supported();
+
+ vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
+
+ /* Allocate VMX pages and shared descriptors (vmx_pages). */
+ vcpu_alloc_vmx(vm, &vmx_pages_gva);
+ vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+
+ vcpu_run(vm, VCPU_ID);
+
+ run = vcpu_state(vm, VCPU_ID);
+
+ /*
+ * The first exit to L0 userspace should be an I/O access from L2.
+ * Running L1 should launch L2 without triggering an exit to userspace.
+ */
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Expected KVM_EXIT_IO, got: %u (%s)\n",
+ run->exit_reason, exit_reason_str(run->exit_reason));
+
+ TEST_ASSERT(run->io.port == ARBITRARY_IO_PORT,
+ "Expected IN from port %d from L2, got port %d",
+ ARBITRARY_IO_PORT, run->io.port);
+
+ /*
+ * Stuff invalid guest state for L2 by making TR unusuable. The next
+ * KVM_RUN should induce a TRIPLE_FAULT in L2 as KVM doesn't support
+ * emulating invalid guest state for L2.
+ */
+ memset(&sregs, 0, sizeof(sregs));
+ vcpu_sregs_get(vm, VCPU_ID, &sregs);
+ sregs.tr.unusable = 1;
+ vcpu_sregs_set(vm, VCPU_ID, &sregs);
+
+ vcpu_run(vm, VCPU_ID);
+
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_DONE:
+ break;
+ case UCALL_ABORT:
+ TEST_FAIL("%s", (const char *)uc.args[0]);
+ default:
+ TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
+ }
+}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c
index 23051d84b907..2454a1f2ca0c 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c
@@ -110,22 +110,5 @@ int main(int argc, char *argv[])
ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_LBR_FMT);
TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail.");
- /* testcase 4, set capabilities when we don't have PDCM bit */
- entry_1_0->ecx &= ~X86_FEATURE_PDCM;
- vcpu_set_cpuid(vm, VCPU_ID, cpuid);
- ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
- TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail.");
-
- /* testcase 5, set capabilities when we don't have PMU version bits */
- entry_1_0->ecx |= X86_FEATURE_PDCM;
- eax.split.version_id = 0;
- entry_1_0->ecx = eax.full;
- vcpu_set_cpuid(vm, VCPU_ID, cpuid);
- ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES);
- TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail.");
-
- vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, 0);
- ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), 0);
-
kvm_vm_free(vm);
}
diff --git a/tools/testing/selftests/net/amt.sh b/tools/testing/selftests/net/amt.sh
index 75528788cb95..75528788cb95 100644..100755
--- a/tools/testing/selftests/net/amt.sh
+++ b/tools/testing/selftests/net/amt.sh
diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh
index 7f5b265fcb90..ad2982b72e02 100755
--- a/tools/testing/selftests/net/fcnal-test.sh
+++ b/tools/testing/selftests/net/fcnal-test.sh
@@ -455,6 +455,22 @@ cleanup()
ip netns del ${NSC} >/dev/null 2>&1
}
+cleanup_vrf_dup()
+{
+ ip link del ${NSA_DEV2} >/dev/null 2>&1
+ ip netns pids ${NSC} | xargs kill 2>/dev/null
+ ip netns del ${NSC} >/dev/null 2>&1
+}
+
+setup_vrf_dup()
+{
+ # some VRF tests use ns-C which has the same config as
+ # ns-B but for a device NOT in the VRF
+ create_ns ${NSC} "-" "-"
+ connect_ns ${NSA} ${NSA_DEV2} ${NSA_IP}/24 ${NSA_IP6}/64 \
+ ${NSC} ${NSC_DEV} ${NSB_IP}/24 ${NSB_IP6}/64
+}
+
setup()
{
local with_vrf=${1}
@@ -484,12 +500,6 @@ setup()
ip -netns ${NSB} ro add ${VRF_IP}/32 via ${NSA_IP} dev ${NSB_DEV}
ip -netns ${NSB} -6 ro add ${VRF_IP6}/128 via ${NSA_IP6} dev ${NSB_DEV}
-
- # some VRF tests use ns-C which has the same config as
- # ns-B but for a device NOT in the VRF
- create_ns ${NSC} "-" "-"
- connect_ns ${NSA} ${NSA_DEV2} ${NSA_IP}/24 ${NSA_IP6}/64 \
- ${NSC} ${NSC_DEV} ${NSB_IP}/24 ${NSB_IP6}/64
else
ip -netns ${NSA} ro add ${NSB_LO_IP}/32 via ${NSB_IP} dev ${NSA_DEV}
ip -netns ${NSA} ro add ${NSB_LO_IP6}/128 via ${NSB_IP6} dev ${NSA_DEV}
@@ -1240,7 +1250,9 @@ ipv4_tcp_vrf()
log_test_addr ${a} $? 1 "Global server, local connection"
# run MD5 tests
+ setup_vrf_dup
ipv4_tcp_md5
+ cleanup_vrf_dup
#
# enable VRF global server
@@ -1798,8 +1810,9 @@ ipv4_addr_bind_vrf()
for a in ${NSA_IP} ${VRF_IP}
do
log_start
+ show_hint "Socket not bound to VRF, but address is in VRF"
run_cmd nettest -s -R -P icmp -l ${a} -b
- log_test_addr ${a} $? 0 "Raw socket bind to local address"
+ log_test_addr ${a} $? 1 "Raw socket bind to local address"
log_start
run_cmd nettest -s -R -P icmp -l ${a} -I ${NSA_DEV} -b
@@ -2191,7 +2204,7 @@ ipv6_ping_vrf()
log_start
show_hint "Fails since VRF device does not support linklocal or multicast"
run_cmd ${ping6} -c1 -w1 ${a}
- log_test_addr ${a} $? 2 "ping out, VRF bind"
+ log_test_addr ${a} $? 1 "ping out, VRF bind"
done
for a in ${NSB_IP6} ${NSB_LO_IP6} ${NSB_LINKIP6}%${NSA_DEV} ${MCAST}%${NSA_DEV}
@@ -2719,7 +2732,9 @@ ipv6_tcp_vrf()
log_test_addr ${a} $? 1 "Global server, local connection"
# run MD5 tests
+ setup_vrf_dup
ipv6_tcp_md5
+ cleanup_vrf_dup
#
# enable VRF global server
@@ -3414,11 +3429,14 @@ ipv6_addr_bind_novrf()
run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b
log_test_addr ${a} $? 0 "TCP socket bind to local address after device bind"
+ # Sadly, the kernel allows binding a socket to a device and then
+ # binding to an address not on the device. So this test passes
+ # when it really should not
a=${NSA_LO_IP6}
log_start
- show_hint "Should fail with 'Cannot assign requested address'"
+ show_hint "Tecnically should fail since address is not on device but kernel allows"
run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b
- log_test_addr ${a} $? 1 "TCP socket bind to out of scope local address"
+ log_test_addr ${a} $? 0 "TCP socket bind to out of scope local address"
}
ipv6_addr_bind_vrf()
@@ -3459,10 +3477,15 @@ ipv6_addr_bind_vrf()
run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b
log_test_addr ${a} $? 0 "TCP socket bind to local address with device bind"
+ # Sadly, the kernel allows binding a socket to a device and then
+ # binding to an address not on the device. The only restriction
+ # is that the address is valid in the L3 domain. So this test
+ # passes when it really should not
a=${VRF_IP6}
log_start
+ show_hint "Tecnically should fail since address is not on device but kernel allows"
run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b
- log_test_addr ${a} $? 1 "TCP socket bind to VRF address with device bind"
+ log_test_addr ${a} $? 0 "TCP socket bind to VRF address with device bind"
a=${NSA_LO_IP6}
log_start
@@ -4077,3 +4100,11 @@ cleanup 2>/dev/null
printf "\nTests passed: %3d\n" ${nsuccess}
printf "Tests failed: %3d\n" ${nfail}
+
+if [ $nfail -ne 0 ]; then
+ exit 1 # KSFT_FAIL
+elif [ $nsuccess -eq 0 ]; then
+ exit $ksft_skip
+fi
+
+exit 0 # KSFT_PASS
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 5abe92d55b69..996af1ae3d3d 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -444,24 +444,63 @@ fib_rp_filter_test()
setup
set -e
+ ip netns add ns2
+ ip netns set ns2 auto
+
+ ip -netns ns2 link set dev lo up
+
+ $IP link add name veth1 type veth peer name veth2
+ $IP link set dev veth2 netns ns2
+ $IP address add 192.0.2.1/24 dev veth1
+ ip -netns ns2 address add 192.0.2.1/24 dev veth2
+ $IP link set dev veth1 up
+ ip -netns ns2 link set dev veth2 up
+
$IP link set dev lo address 52:54:00:6a:c7:5e
- $IP link set dummy0 address 52:54:00:6a:c7:5e
- $IP link add dummy1 type dummy
- $IP link set dummy1 address 52:54:00:6a:c7:5e
- $IP link set dev dummy1 up
+ $IP link set dev veth1 address 52:54:00:6a:c7:5e
+ ip -netns ns2 link set dev lo address 52:54:00:6a:c7:5e
+ ip -netns ns2 link set dev veth2 address 52:54:00:6a:c7:5e
+
+ # 1. (ns2) redirect lo's egress to veth2's egress
+ ip netns exec ns2 tc qdisc add dev lo parent root handle 1: fq_codel
+ ip netns exec ns2 tc filter add dev lo parent 1: protocol arp basic \
+ action mirred egress redirect dev veth2
+ ip netns exec ns2 tc filter add dev lo parent 1: protocol ip basic \
+ action mirred egress redirect dev veth2
+
+ # 2. (ns1) redirect veth1's ingress to lo's ingress
+ $NS_EXEC tc qdisc add dev veth1 ingress
+ $NS_EXEC tc filter add dev veth1 ingress protocol arp basic \
+ action mirred ingress redirect dev lo
+ $NS_EXEC tc filter add dev veth1 ingress protocol ip basic \
+ action mirred ingress redirect dev lo
+
+ # 3. (ns1) redirect lo's egress to veth1's egress
+ $NS_EXEC tc qdisc add dev lo parent root handle 1: fq_codel
+ $NS_EXEC tc filter add dev lo parent 1: protocol arp basic \
+ action mirred egress redirect dev veth1
+ $NS_EXEC tc filter add dev lo parent 1: protocol ip basic \
+ action mirred egress redirect dev veth1
+
+ # 4. (ns2) redirect veth2's ingress to lo's ingress
+ ip netns exec ns2 tc qdisc add dev veth2 ingress
+ ip netns exec ns2 tc filter add dev veth2 ingress protocol arp basic \
+ action mirred ingress redirect dev lo
+ ip netns exec ns2 tc filter add dev veth2 ingress protocol ip basic \
+ action mirred ingress redirect dev lo
+
$NS_EXEC sysctl -qw net.ipv4.conf.all.rp_filter=1
$NS_EXEC sysctl -qw net.ipv4.conf.all.accept_local=1
$NS_EXEC sysctl -qw net.ipv4.conf.all.route_localnet=1
-
- $NS_EXEC tc qd add dev dummy1 parent root handle 1: fq_codel
- $NS_EXEC tc filter add dev dummy1 parent 1: protocol arp basic action mirred egress redirect dev lo
- $NS_EXEC tc filter add dev dummy1 parent 1: protocol ip basic action mirred egress redirect dev lo
+ ip netns exec ns2 sysctl -qw net.ipv4.conf.all.rp_filter=1
+ ip netns exec ns2 sysctl -qw net.ipv4.conf.all.accept_local=1
+ ip netns exec ns2 sysctl -qw net.ipv4.conf.all.route_localnet=1
set +e
- run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 198.51.100.1"
+ run_cmd "ip netns exec ns2 ping -w1 -c1 192.0.2.1"
log_test $? 0 "rp_filter passes local packets"
- run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 127.0.0.1"
+ run_cmd "ip netns exec ns2 ping -w1 -c1 127.0.0.1"
log_test $? 0 "rp_filter passes loopback packets"
cleanup
diff --git a/tools/testing/selftests/net/forwarding/forwarding.config.sample b/tools/testing/selftests/net/forwarding/forwarding.config.sample
index bf17e485684f..b0980a2efa31 100644
--- a/tools/testing/selftests/net/forwarding/forwarding.config.sample
+++ b/tools/testing/selftests/net/forwarding/forwarding.config.sample
@@ -13,6 +13,8 @@ NETIFS[p5]=veth4
NETIFS[p6]=veth5
NETIFS[p7]=veth6
NETIFS[p8]=veth7
+NETIFS[p9]=veth8
+NETIFS[p10]=veth9
# Port that does not have a cable connected.
NETIF_NO_CABLE=eth8
diff --git a/tools/testing/selftests/net/icmp_redirect.sh b/tools/testing/selftests/net/icmp_redirect.sh
index ecbf57f264ed..7b9d6e31b8e7 100755
--- a/tools/testing/selftests/net/icmp_redirect.sh
+++ b/tools/testing/selftests/net/icmp_redirect.sh
@@ -311,7 +311,7 @@ check_exception()
ip -netns h1 ro get ${H1_VRF_ARG} ${H2_N2_IP} | \
grep -E -v 'mtu|redirected' | grep -q "cache"
fi
- log_test $? 0 "IPv4: ${desc}"
+ log_test $? 0 "IPv4: ${desc}" 0
# No PMTU info for test "redirect" and "mtu exception plus redirect"
if [ "$with_redirect" = "yes" ] && [ "$desc" != "redirect exception plus mtu" ]; then
diff --git a/tools/testing/selftests/net/mptcp/config b/tools/testing/selftests/net/mptcp/config
index 0faaccd21447..2b82628decb1 100644
--- a/tools/testing/selftests/net/mptcp/config
+++ b/tools/testing/selftests/net/mptcp/config
@@ -9,7 +9,6 @@ CONFIG_NETFILTER=y
CONFIG_NETFILTER_ADVANCED=y
CONFIG_NETFILTER_NETLINK=m
CONFIG_NF_TABLES=m
-CONFIG_NFT_COUNTER=m
CONFIG_NFT_COMPAT=m
CONFIG_NETFILTER_XTABLES=m
CONFIG_NETFILTER_XT_MATCH_BPF=m
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index 8a22db0cca49..6e468e0f42f7 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -31,6 +31,8 @@ struct tls_crypto_info_keys {
struct tls12_crypto_info_chacha20_poly1305 chacha20;
struct tls12_crypto_info_sm4_gcm sm4gcm;
struct tls12_crypto_info_sm4_ccm sm4ccm;
+ struct tls12_crypto_info_aes_ccm_128 aesccm128;
+ struct tls12_crypto_info_aes_gcm_256 aesgcm256;
};
size_t len;
};
@@ -61,6 +63,16 @@ static void tls_crypto_info_init(uint16_t tls_version, uint16_t cipher_type,
tls12->sm4ccm.info.version = tls_version;
tls12->sm4ccm.info.cipher_type = cipher_type;
break;
+ case TLS_CIPHER_AES_CCM_128:
+ tls12->len = sizeof(struct tls12_crypto_info_aes_ccm_128);
+ tls12->aesccm128.info.version = tls_version;
+ tls12->aesccm128.info.cipher_type = cipher_type;
+ break;
+ case TLS_CIPHER_AES_GCM_256:
+ tls12->len = sizeof(struct tls12_crypto_info_aes_gcm_256);
+ tls12->aesgcm256.info.version = tls_version;
+ tls12->aesgcm256.info.cipher_type = cipher_type;
+ break;
default:
break;
}
@@ -261,6 +273,30 @@ FIXTURE_VARIANT_ADD(tls, 13_sm4_ccm)
.cipher_type = TLS_CIPHER_SM4_CCM,
};
+FIXTURE_VARIANT_ADD(tls, 12_aes_ccm)
+{
+ .tls_version = TLS_1_2_VERSION,
+ .cipher_type = TLS_CIPHER_AES_CCM_128,
+};
+
+FIXTURE_VARIANT_ADD(tls, 13_aes_ccm)
+{
+ .tls_version = TLS_1_3_VERSION,
+ .cipher_type = TLS_CIPHER_AES_CCM_128,
+};
+
+FIXTURE_VARIANT_ADD(tls, 12_aes_gcm_256)
+{
+ .tls_version = TLS_1_2_VERSION,
+ .cipher_type = TLS_CIPHER_AES_GCM_256,
+};
+
+FIXTURE_VARIANT_ADD(tls, 13_aes_gcm_256)
+{
+ .tls_version = TLS_1_3_VERSION,
+ .cipher_type = TLS_CIPHER_AES_GCM_256,
+};
+
FIXTURE_SETUP(tls)
{
struct tls_crypto_info_keys tls12;
diff --git a/tools/testing/selftests/net/toeplitz.c b/tools/testing/selftests/net/toeplitz.c
index 710ac956bdb3..c5489341cfb8 100644
--- a/tools/testing/selftests/net/toeplitz.c
+++ b/tools/testing/selftests/net/toeplitz.c
@@ -498,7 +498,7 @@ static void parse_opts(int argc, char **argv)
bool have_toeplitz = false;
int index, c;
- while ((c = getopt_long(argc, argv, "46C:d:i:k:r:stT:u:v", long_options, &index)) != -1) {
+ while ((c = getopt_long(argc, argv, "46C:d:i:k:r:stT:uv", long_options, &index)) != -1) {
switch (c) {
case '4':
cfg_family = AF_INET;
diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh
index 7f26591f236b..6f05e06f6761 100755
--- a/tools/testing/selftests/net/udpgro_fwd.sh
+++ b/tools/testing/selftests/net/udpgro_fwd.sh
@@ -132,7 +132,7 @@ run_test() {
local rcv=`ip netns exec $NS_DST $ipt"-save" -c | grep 'dport 8000' | \
sed -e 's/\[//' -e 's/:.*//'`
if [ $rcv != $pkts ]; then
- echo " fail - received $rvs packets, expected $pkts"
+ echo " fail - received $rcv packets, expected $pkts"
ret=1
return
fi
@@ -185,6 +185,7 @@ for family in 4 6; do
IPT=iptables
SUFFIX=24
VXDEV=vxlan
+ PING=ping
if [ $family = 6 ]; then
BM_NET=$BM_NET_V6
@@ -192,6 +193,8 @@ for family in 4 6; do
SUFFIX="64 nodad"
VXDEV=vxlan6
IPT=ip6tables
+ # Use ping6 on systems where ping doesn't handle IPv6
+ ping -w 1 -c 1 ::1 > /dev/null 2>&1 || PING="ping6"
fi
echo "IPv$family"
@@ -237,7 +240,7 @@ for family in 4 6; do
# load arp cache before running the test to reduce the amount of
# stray traffic on top of the UDP tunnel
- ip netns exec $NS_SRC ping -q -c 1 $OL_NET$DST_NAT >/dev/null
+ ip netns exec $NS_SRC $PING -q -c 1 $OL_NET$DST_NAT >/dev/null
run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 1 1 $OL_NET$DST
cleanup
diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c
index c66da6ffd6d8..7badaf215de2 100644
--- a/tools/testing/selftests/net/udpgso.c
+++ b/tools/testing/selftests/net/udpgso.c
@@ -156,13 +156,13 @@ struct testcase testcases_v4[] = {
},
{
/* send max number of min sized segments */
- .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4,
+ .tlen = UDP_MAX_SEGMENTS,
.gso_len = 1,
- .r_num_mss = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4,
+ .r_num_mss = UDP_MAX_SEGMENTS,
},
{
/* send max number + 1 of min sized segments: fail */
- .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4 + 1,
+ .tlen = UDP_MAX_SEGMENTS + 1,
.gso_len = 1,
.tfail = true,
},
@@ -259,13 +259,13 @@ struct testcase testcases_v6[] = {
},
{
/* send max number of min sized segments */
- .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6,
+ .tlen = UDP_MAX_SEGMENTS,
.gso_len = 1,
- .r_num_mss = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6,
+ .r_num_mss = UDP_MAX_SEGMENTS,
},
{
/* send max number + 1 of min sized segments: fail */
- .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6 + 1,
+ .tlen = UDP_MAX_SEGMENTS + 1,
.gso_len = 1,
.tfail = true,
},
diff --git a/tools/testing/selftests/net/udpgso_bench_tx.c b/tools/testing/selftests/net/udpgso_bench_tx.c
index 17512a43885e..f1fdaa270291 100644
--- a/tools/testing/selftests/net/udpgso_bench_tx.c
+++ b/tools/testing/selftests/net/udpgso_bench_tx.c
@@ -419,6 +419,7 @@ static void usage(const char *filepath)
static void parse_opts(int argc, char **argv)
{
+ const char *bind_addr = NULL;
int max_len, hdrlen;
int c;
@@ -446,7 +447,7 @@ static void parse_opts(int argc, char **argv)
cfg_cpu = strtol(optarg, NULL, 0);
break;
case 'D':
- setup_sockaddr(cfg_family, optarg, &cfg_dst_addr);
+ bind_addr = optarg;
break;
case 'l':
cfg_runtime_ms = strtoul(optarg, NULL, 10) * 1000;
@@ -492,6 +493,11 @@ static void parse_opts(int argc, char **argv)
}
}
+ if (!bind_addr)
+ bind_addr = cfg_family == PF_INET6 ? "::" : "0.0.0.0";
+
+ setup_sockaddr(cfg_family, bind_addr, &cfg_dst_addr);
+
if (optind != argc)
usage(argv[0]);
diff --git a/tools/testing/selftests/netfilter/conntrack_vrf.sh b/tools/testing/selftests/netfilter/conntrack_vrf.sh
index 91f3ef0f1192..8b5ea9234588 100755
--- a/tools/testing/selftests/netfilter/conntrack_vrf.sh
+++ b/tools/testing/selftests/netfilter/conntrack_vrf.sh
@@ -150,11 +150,27 @@ EOF
# oifname is the vrf device.
test_masquerade_vrf()
{
+ local qdisc=$1
+
+ if [ "$qdisc" != "default" ]; then
+ tc -net $ns0 qdisc add dev tvrf root $qdisc
+ fi
+
ip netns exec $ns0 conntrack -F 2>/dev/null
ip netns exec $ns0 nft -f - <<EOF
flush ruleset
table ip nat {
+ chain rawout {
+ type filter hook output priority raw;
+
+ oif tvrf ct state untracked counter
+ }
+ chain postrouting2 {
+ type filter hook postrouting priority mangle;
+
+ oif tvrf ct state untracked counter
+ }
chain postrouting {
type nat hook postrouting priority 0;
# NB: masquerade should always be combined with 'oif(name) bla',
@@ -171,13 +187,18 @@ EOF
fi
# must also check that nat table was evaluated on second (lower device) iteration.
- ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2'
+ ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2' &&
+ ip netns exec $ns0 nft list table ip nat |grep -q 'untracked counter packets [1-9]'
if [ $? -eq 0 ]; then
- echo "PASS: iperf3 connect with masquerade + sport rewrite on vrf device"
+ echo "PASS: iperf3 connect with masquerade + sport rewrite on vrf device ($qdisc qdisc)"
else
- echo "FAIL: vrf masq rule has unexpected counter value"
+ echo "FAIL: vrf rules have unexpected counter value"
ret=1
fi
+
+ if [ "$qdisc" != "default" ]; then
+ tc -net $ns0 qdisc del dev tvrf root
+ fi
}
# add masq rule that gets evaluated w. outif set to veth device.
@@ -213,7 +234,8 @@ EOF
}
test_ct_zone_in
-test_masquerade_vrf
+test_masquerade_vrf "default"
+test_masquerade_vrf "pfifo"
test_masquerade_veth
exit $ret
diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh
index 5a4938d6dcf2..ed61f6cab60f 100755
--- a/tools/testing/selftests/netfilter/nft_concat_range.sh
+++ b/tools/testing/selftests/netfilter/nft_concat_range.sh
@@ -23,8 +23,8 @@ TESTS="reported_issues correctness concurrency timeout"
# Set types, defined by TYPE_ variables below
TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto
- net_port_net net_mac net_mac_icmp net6_mac_icmp net6_port_net6_port
- net_port_mac_proto_net"
+ net_port_net net_mac mac_net net_mac_icmp net6_mac_icmp
+ net6_port_net6_port net_port_mac_proto_net"
# Reported bugs, also described by TYPE_ variables below
BUGS="flush_remove_add"
@@ -277,6 +277,23 @@ perf_entries 1000
perf_proto ipv4
"
+TYPE_mac_net="
+display mac,net
+type_spec ether_addr . ipv4_addr
+chain_spec ether saddr . ip saddr
+dst
+src mac addr4
+start 1
+count 5
+src_delta 2000
+tools sendip nc bash
+proto udp
+
+race_repeat 0
+
+perf_duration 0
+"
+
TYPE_net_mac_icmp="
display net,mac - ICMP
type_spec ipv4_addr . ether_addr
@@ -984,7 +1001,8 @@ format() {
fi
done
for f in ${src}; do
- __expr="${__expr} . "
+ [ "${__expr}" != "{ " ] && __expr="${__expr} . "
+
__start="$(eval format_"${f}" "${srcstart}")"
__end="$(eval format_"${f}" "${srcend}")"
diff --git a/tools/testing/selftests/netfilter/nft_zones_many.sh b/tools/testing/selftests/netfilter/nft_zones_many.sh
index ac646376eb01..04633119b29a 100755
--- a/tools/testing/selftests/netfilter/nft_zones_many.sh
+++ b/tools/testing/selftests/netfilter/nft_zones_many.sh
@@ -18,11 +18,17 @@ cleanup()
ip netns del $ns
}
-ip netns add $ns
-if [ $? -ne 0 ];then
- echo "SKIP: Could not create net namespace $gw"
- exit $ksft_skip
-fi
+checktool (){
+ if ! $1 > /dev/null 2>&1; then
+ echo "SKIP: Could not $2"
+ exit $ksft_skip
+ fi
+}
+
+checktool "nft --version" "run test without nft tool"
+checktool "ip -Version" "run test without ip tool"
+checktool "socat -V" "run test without socat tool"
+checktool "ip netns add $ns" "create net namespace"
trap cleanup EXIT
@@ -71,7 +77,8 @@ EOF
local start=$(date +%s%3N)
i=$((i + 10000))
j=$((j + 1))
- dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" nc -w 1 -q 1 -u -p 12345 127.0.0.1 12345 > /dev/null
+ # nft rule in output places each packet in a different zone.
+ dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" socat STDIN UDP:127.0.0.1:12345,sourceport=12345
if [ $? -ne 0 ] ;then
ret=1
break
diff --git a/tools/testing/selftests/tc-testing/config b/tools/testing/selftests/tc-testing/config
index b71828df5a6d..a3239d5e40c7 100644
--- a/tools/testing/selftests/tc-testing/config
+++ b/tools/testing/selftests/tc-testing/config
@@ -60,6 +60,8 @@ CONFIG_NET_IFE_SKBTCINDEX=m
CONFIG_NET_SCH_FIFO=y
CONFIG_NET_SCH_ETS=m
CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_FQ_PIE=m
+CONFIG_NETDEVSIM=m
#
## Network testing
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
index a3e43189d940..ee22e3447ec7 100755
--- a/tools/testing/selftests/tc-testing/tdc.py
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -716,6 +716,7 @@ def set_operation_mode(pm, parser, args, remaining):
list_test_cases(alltests)
exit(0)
+ exit_code = 0 # KSFT_PASS
if len(alltests):
req_plugins = pm.get_required_plugins(alltests)
try:
@@ -724,6 +725,8 @@ def set_operation_mode(pm, parser, args, remaining):
print('The following plugins were not found:')
print('{}'.format(pde.missing_pg))
catresults = test_runner(pm, args, alltests)
+ if catresults.count_failures() != 0:
+ exit_code = 1 # KSFT_FAIL
if args.format == 'none':
print('Test results output suppression requested\n')
else:
@@ -748,6 +751,8 @@ def set_operation_mode(pm, parser, args, remaining):
gid=int(os.getenv('SUDO_GID')))
else:
print('No tests found\n')
+ exit_code = 4 # KSFT_SKIP
+ exit(exit_code)
def main():
"""
@@ -767,8 +772,5 @@ def main():
set_operation_mode(pm, parser, args, remaining)
- exit(0)
-
-
if __name__ == "__main__":
main()
diff --git a/tools/testing/selftests/tc-testing/tdc.sh b/tools/testing/selftests/tc-testing/tdc.sh
index 7fe38c76db44..afb0cd86fa3d 100755
--- a/tools/testing/selftests/tc-testing/tdc.sh
+++ b/tools/testing/selftests/tc-testing/tdc.sh
@@ -1,5 +1,6 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
+modprobe netdevsim
./tdc.py -c actions --nobuildebpf
./tdc.py -c qdisc
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index 8a09057d2f22..9354a5e0321c 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -87,7 +87,7 @@ static bool test_uffdio_minor = false;
static bool map_shared;
static int shm_fd;
-static int huge_fd;
+static int huge_fd = -1; /* only used for hugetlb_shared test */
static char *huge_fd_off0;
static unsigned long long *count_verify;
static int uffd = -1;
@@ -223,6 +223,9 @@ static void noop_alias_mapping(__u64 *start, size_t len, unsigned long offset)
static void hugetlb_release_pages(char *rel_area)
{
+ if (huge_fd == -1)
+ return;
+
if (fallocate(huge_fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
rel_area == huge_fd_off0 ? 0 : nr_pages * page_size,
nr_pages * page_size))
@@ -235,16 +238,17 @@ static void hugetlb_allocate_area(void **alloc_area)
char **alloc_area_alias;
*alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
- (map_shared ? MAP_SHARED : MAP_PRIVATE) |
- MAP_HUGETLB,
- huge_fd, *alloc_area == area_src ? 0 :
- nr_pages * page_size);
+ map_shared ? MAP_SHARED :
+ MAP_PRIVATE | MAP_HUGETLB |
+ (*alloc_area == area_src ? 0 : MAP_NORESERVE),
+ huge_fd,
+ *alloc_area == area_src ? 0 : nr_pages * page_size);
if (*alloc_area == MAP_FAILED)
err("mmap of hugetlbfs file failed");
if (map_shared) {
area_alias = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_HUGETLB,
+ MAP_SHARED,
huge_fd, *alloc_area == area_src ? 0 :
nr_pages * page_size);
if (area_alias == MAP_FAILED)