summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap6
-rw-r--r--CREDITS6
-rw-r--r--Documentation/ABI/testing/sysfs-class-power1
-rw-r--r--Documentation/ABI/testing/sysfs-driver-aspeed-uart-routing6
-rw-r--r--Documentation/admin-guide/mm/pagemap.rst2
-rw-r--r--Documentation/arm64/silicon-errata.rst2
-rw-r--r--Documentation/core-api/dma-attributes.rst8
-rw-r--r--Documentation/cpu-freq/cpu-drivers.rst3
-rw-r--r--Documentation/dev-tools/kselftest.rst8
-rw-r--r--Documentation/dev-tools/kunit/usage.rst2
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-at91.yaml3
-rw-r--r--Documentation/devicetree/bindings/arm/freescale/fsl,layerscape-dcfg.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/omap/omap.txt3
-rw-r--r--Documentation/devicetree/bindings/clock/qoriq-clock.txt1
-rw-r--r--Documentation/devicetree/bindings/gpio/sifive,gpio.yaml1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml21
-rw-r--r--Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ipa.yaml6
-rw-r--r--Documentation/devicetree/bindings/phy/ti,omap-usb2.yaml2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-sifive.yaml1
-rw-r--r--Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml5
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml2
-rw-r--r--Documentation/filesystems/netfs_library.rst16
-rw-r--r--Documentation/gpu/todo.rst24
-rw-r--r--Documentation/tools/rtla/common_hist_options.rst2
-rw-r--r--Documentation/tools/rtla/common_osnoise_description.rst2
-rw-r--r--Documentation/tools/rtla/rtla-osnoise-hist.rst2
-rw-r--r--Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst2
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst3
-rw-r--r--Documentation/virt/kvm/api.rst16
-rw-r--r--MAINTAINERS144
-rw-r--r--Makefile4
-rw-r--r--arch/arm/boot/dts/Makefile1
-rw-r--r--arch/arm/boot/dts/am335x-wega.dtsi2
-rw-r--r--arch/arm/boot/dts/dra7.dtsi20
-rw-r--r--arch/arm/boot/dts/imx23-evk.dts1
-rw-r--r--arch/arm/boot/dts/imx6qdl-udoo.dtsi5
-rw-r--r--arch/arm/boot/dts/imx7ulp.dtsi2
-rw-r--r--arch/arm/boot/dts/meson.dtsi8
-rw-r--r--arch/arm/boot/dts/meson8.dtsi24
-rw-r--r--arch/arm/boot/dts/meson8b.dtsi24
-rw-r--r--arch/arm/boot/dts/omap3-beagle-ab4.dts47
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts33
-rw-r--r--arch/arm/boot/dts/omap3-devkit8000-common.dtsi18
-rw-r--r--arch/arm/boot/dts/omap3-devkit8000.dts33
-rw-r--r--arch/arm/boot/dts/rk322x.dtsi4
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi2
-rw-r--r--arch/arm/boot/dts/spear320-hmi.dts1
-rw-r--r--arch/arm/boot/dts/ste-ux500-samsung-skomer.dts4
-rw-r--r--arch/arm/boot/dts/tegra124-nyan-big.dts15
-rw-r--r--arch/arm/boot/dts/tegra124-nyan-blaze.dts15
-rw-r--r--arch/arm/boot/dts/tegra124-venice2.dts14
-rw-r--r--arch/arm/crypto/blake2s-shash.c4
-rw-r--r--arch/arm/kernel/kgdb.c36
-rw-r--r--arch/arm/mach-omap2/display.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c4
-rw-r--r--arch/arm/mach-socfpga/Kconfig2
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/arm64/Kconfig17
-rw-r--r--arch/arm64/Kconfig.platforms3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts8
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts8
-rw-r--r--arch/arm64/boot/dts/arm/juno-base.dtsi3
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8ulp.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/mba8mx.dtsi2
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_agilex.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/px30.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi17
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi20
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568.dtsi6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk356x.dtsi4
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts14
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721s2.dtsi22
-rw-r--r--arch/arm64/include/asm/el2_setup.h2
-rw-r--r--arch/arm64/kernel/cpu_errata.c8
-rw-r--r--arch/arm64/kvm/arm.c51
-rw-r--r--arch/arm64/kvm/handle_exit.c8
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h23
-rw-r--r--arch/arm64/kvm/psci.c3
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio.c2
-rw-r--r--arch/arm64/tools/cpucaps5
-rw-r--r--arch/mips/boot/dts/ingenic/ci20.dts15
-rw-r--r--arch/mips/cavium-octeon/octeon-memcpy.S2
-rw-r--r--arch/mips/kernel/setup.c2
-rw-r--r--arch/mips/kernel/smp.c6
-rw-r--r--arch/mips/kvm/mips.c50
-rw-r--r--arch/mips/kvm/vz.c12
-rw-r--r--arch/mips/ralink/mt7621.c36
-rw-r--r--arch/parisc/include/asm/bitops.h8
-rw-r--r--arch/parisc/include/asm/uaccess.h29
-rw-r--r--arch/parisc/kernel/unaligned.c14
-rw-r--r--arch/parisc/lib/iomap.c18
-rw-r--r--arch/parisc/mm/init.c9
-rw-r--r--arch/riscv/Makefile6
-rw-r--r--arch/riscv/boot/dts/canaan/k210.dtsi3
-rw-r--r--arch/riscv/configs/nommu_k210_sdcard_defconfig2
-rw-r--r--arch/riscv/include/asm/page.h2
-rw-r--r--arch/riscv/include/asm/pgtable.h1
-rw-r--r--arch/riscv/kernel/Makefile2
-rw-r--r--arch/riscv/kernel/cpu-hotplug.c2
-rw-r--r--arch/riscv/kernel/entry.S10
-rw-r--r--arch/riscv/kernel/head.S11
-rw-r--r--arch/riscv/kernel/sbi.c72
-rw-r--r--arch/riscv/kernel/stacktrace.c9
-rw-r--r--arch/riscv/kernel/trace_irq.c27
-rw-r--r--arch/riscv/kernel/trace_irq.h11
-rw-r--r--arch/riscv/kvm/vcpu.c48
-rw-r--r--arch/riscv/kvm/vcpu_sbi_base.c3
-rw-r--r--arch/riscv/mm/Makefile3
-rw-r--r--arch/riscv/mm/extable.c6
-rw-r--r--arch/riscv/mm/init.c4
-rw-r--r--arch/riscv/mm/kasan_init.c8
-rw-r--r--arch/riscv/mm/physaddr.c4
-rw-r--r--arch/s390/include/asm/extable.h9
-rw-r--r--arch/s390/include/asm/ftrace.h10
-rw-r--r--arch/s390/include/asm/ptrace.h2
-rw-r--r--arch/s390/kernel/ftrace.c37
-rw-r--r--arch/s390/kernel/mcount.S9
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c2
-rw-r--r--arch/s390/lib/test_modules.c3
-rw-r--r--arch/s390/lib/test_modules.h3
-rw-r--r--arch/x86/crypto/blake2s-shash.c4
-rw-r--r--arch/x86/events/intel/core.c13
-rw-r--r--arch/x86/events/intel/pt.c5
-rw-r--r--arch/x86/include/asm/bug.h20
-rw-r--r--arch/x86/include/asm/kvm-x86-ops.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h4
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/include/asm/svm.h36
-rw-r--r--arch/x86/include/asm/xen/cpuid.h7
-rw-r--r--arch/x86/include/asm/xen/hypervisor.h14
-rw-r--r--arch/x86/kernel/cpu/sgx/encl.c2
-rw-r--r--arch/x86/kernel/cpu/sgx/main.c10
-rw-r--r--arch/x86/kernel/fpu/regset.c9
-rw-r--r--arch/x86/kernel/fpu/xstate.c5
-rw-r--r--arch/x86/kernel/kvm.c9
-rw-r--r--arch/x86/kernel/ptrace.c4
-rw-r--r--arch/x86/kernel/resource.c23
-rw-r--r--arch/x86/kvm/cpuid.c18
-rw-r--r--arch/x86/kvm/lapic.c17
-rw-r--r--arch/x86/kvm/mmu/mmu.c13
-rw-r--r--arch/x86/kvm/pmu.c7
-rw-r--r--arch/x86/kvm/svm/avic.c93
-rw-r--r--arch/x86/kvm/svm/nested.c26
-rw-r--r--arch/x86/kvm/svm/svm.c111
-rw-r--r--arch/x86/kvm/svm/svm.h15
-rw-r--r--arch/x86/kvm/vmx/nested.c11
-rw-r--r--arch/x86/kvm/vmx/vmx.c50
-rw-r--r--arch/x86/kvm/vmx/vmx.h5
-rw-r--r--arch/x86/kvm/x86.c44
-rw-r--r--arch/x86/kvm/x86.h45
-rw-r--r--arch/x86/kvm/xen.c97
-rw-r--r--arch/x86/xen/enlighten_hvm.c22
-rw-r--r--arch/x86/xen/enlighten_pv.c4
-rw-r--r--arch/x86/xen/smp_pv.c26
-rw-r--r--arch/x86/xen/vga.c16
-rw-r--r--block/bfq-iosched.c2
-rw-r--r--block/bio-integrity.c2
-rw-r--r--block/blk-core.c10
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-mq.c4
-rw-r--r--block/elevator.c2
-rw-r--r--block/fops.c35
-rw-r--r--block/genhd.c14
-rw-r--r--crypto/af_alg.c3
-rw-r--r--crypto/algapi.c1
-rw-r--r--crypto/api.c1
-rw-r--r--crypto/blake2s_generic.c4
-rw-r--r--drivers/accessibility/speakup/speakup_dectlk.c1
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/arm64/iort.c14
-rw-r--r--drivers/acpi/ec.c10
-rw-r--r--drivers/acpi/processor_idle.c5
-rw-r--r--drivers/acpi/sleep.c15
-rw-r--r--drivers/acpi/tables.c2
-rw-r--r--drivers/acpi/x86/s2idle.c12
-rw-r--r--drivers/ata/libata-core.c25
-rw-r--r--drivers/ata/pata_hpt37x.c18
-rw-r--r--drivers/ata/sata_fsl.c16
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/auxdisplay/lcd2s.c24
-rw-r--r--drivers/base/dd.c5
-rw-r--r--drivers/base/power/wakeup.c41
-rw-r--r--drivers/base/regmap/regmap-irq.c20
-rw-r--r--drivers/block/loop.c73
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c2
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/bus/mhi/pci_generic.c2
-rw-r--r--drivers/char/random.c39
-rw-r--r--drivers/clk/ingenic/jz4725b-cgu.c3
-rw-r--r--drivers/clk/qcom/gcc-msm8994.c106
-rw-r--r--drivers/clocksource/timer-ti-dm-systimer.c3
-rw-r--r--drivers/cpufreq/cpufreq.c4
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c11
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c3
-rw-r--r--drivers/dma-buf/dma-heap.c2
-rw-r--r--drivers/dma/at_xdmac.c4
-rw-r--r--drivers/dma/ptdma/ptdma-dev.c17
-rw-r--r--drivers/dma/sh/rcar-dmac.c9
-rw-r--r--drivers/dma/sh/shdma-base.c4
-rw-r--r--drivers/dma/stm32-dmamux.c4
-rw-r--r--drivers/edac/altera_edac.c2
-rw-r--r--drivers/edac/edac_mc.c2
-rw-r--r--drivers/edac/xgene_edac.c2
-rw-r--r--drivers/firmware/arm_scmi/driver.c2
-rw-r--r--drivers/firmware/efi/libstub/riscv-stub.c17
-rw-r--r--drivers/firmware/efi/vars.c5
-rw-r--r--drivers/gpio/gpio-aggregator.c18
-rw-r--r--drivers/gpio/gpio-rockchip.c56
-rw-r--r--drivers/gpio/gpio-sifive.c2
-rw-r--r--drivers/gpio/gpio-sim.c19
-rw-r--r--drivers/gpio/gpio-tegra186.c14
-rw-r--r--drivers/gpio/gpiolib-cdev.c6
-rw-r--r--drivers/gpio/gpiolib-sysfs.c7
-rw-r--r--drivers/gpio/gpiolib.c10
-rw-r--r--drivers/gpio/gpiolib.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c61
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c64
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c9
-rw-r--r--drivers/gpu/drm/arm/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c12
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c5
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c14
-rw-r--r--drivers/gpu/drm/drm_connector.c3
-rw-r--r--drivers/gpu/drm/drm_edid.c2
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c1
-rw-r--r--drivers/gpu/drm/drm_privacy_screen.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c14
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c29
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c114
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_mm.h1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pch.h2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c169
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h1
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c4
-rw-r--r--drivers/gpu/drm/imx/dcss/Kconfig1
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c167
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_kms.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c8
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c8
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c7
-rw-r--r--drivers/gpu/drm/tegra/falcon.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c37
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h4
-rw-r--r--drivers/gpu/host1x/syncpt.c35
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c76
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.h4
-rw-r--r--drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c4
-rw-r--r--drivers/hid/hid-apple.c16
-rw-r--r--drivers/hid/hid-debug.c5
-rw-r--r--drivers/hid/hid-elo.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-input.c3
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-goodix.c28
-rw-r--r--drivers/hv/hv_utils_transport.c2
-rw-r--r--drivers/hv/vmbus_drv.c9
-rw-r--r--drivers/hwmon/hwmon.c14
-rw-r--r--drivers/hwmon/ntc_thermistor.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c5
-rw-r--r--drivers/i2c/busses/Kconfig6
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c11
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c16
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c5
-rw-r--r--drivers/iio/accel/fxls8962af-core.c12
-rw-r--r--drivers/iio/accel/fxls8962af-i2c.c2
-rw-r--r--drivers/iio/accel/fxls8962af-spi.c2
-rw-r--r--drivers/iio/accel/fxls8962af.h3
-rw-r--r--drivers/iio/accel/kxcjk-1013.c5
-rw-r--r--drivers/iio/accel/mma9551.c5
-rw-r--r--drivers/iio/accel/mma9553.c5
-rw-r--r--drivers/iio/adc/ad7124.c2
-rw-r--r--drivers/iio/adc/men_z188_adc.c9
-rw-r--r--drivers/iio/adc/ti-tsc2046.c4
-rw-r--r--drivers/iio/addac/ad74413r.c17
-rw-r--r--drivers/iio/frequency/admv1013.c2
-rw-r--r--drivers/iio/gyro/bmg160_core.c5
-rw-r--r--drivers/iio/imu/adis16480.c7
-rw-r--r--drivers/iio/imu/kmx61.c5
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c6
-rw-r--r--drivers/iio/industrialio-buffer.c14
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c5
-rw-r--r--drivers/infiniband/core/cm.c2
-rw-r--r--drivers/infiniband/core/cma.c62
-rw-r--r--drivers/infiniband/core/ucma.c34
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib.h2
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_main.c27
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c38
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c2
-rw-r--r--drivers/infiniband/sw/siw/siw.h7
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c20
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c3
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c39
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c6
-rw-r--r--drivers/input/input.c6
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c64
-rw-r--r--drivers/input/mouse/psmouse-smbus.c10
-rw-r--r--drivers/input/touchscreen/goodix.c34
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c12
-rw-r--r--drivers/input/touchscreen/zinitix.c12
-rw-r--r--drivers/iommu/amd/amd_iommu.h1
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h1
-rw-r--r--drivers/iommu/amd/init.c12
-rw-r--r--drivers/iommu/amd/io_pgtable.c12
-rw-r--r--drivers/iommu/amd/iommu.c10
-rw-r--r--drivers/iommu/intel/iommu.c2
-rw-r--r--drivers/iommu/intel/irq_remapping.c13
-rw-r--r--drivers/iommu/ioasid.c1
-rw-r--r--drivers/iommu/iommu.c33
-rw-r--r--drivers/iommu/omap-iommu.c2
-rw-r--r--drivers/iommu/tegra-smmu.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c3
-rw-r--r--drivers/irqchip/irq-sifive-plic.c1
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/md/md.c8
-rw-r--r--drivers/misc/eeprom/ee1004.c3
-rw-r--r--drivers/misc/fastrpc.c9
-rw-r--r--drivers/mmc/core/block.c28
-rw-r--r--drivers/mmc/core/sd.c8
-rw-r--r--drivers/mmc/host/moxart-mmc.c2
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c8
-rw-r--r--drivers/mmc/host/sh_mmcif.c3
-rw-r--r--drivers/mtd/devices/phram.c12
-rw-r--r--drivers/mtd/mtdcore.c2
-rw-r--r--drivers/mtd/nand/raw/Kconfig3
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c2
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c3
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.c7
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c14
-rw-r--r--drivers/mtd/parsers/qcomsmempart.c36
-rw-r--r--drivers/net/arcnet/com20020-pci.c3
-rw-r--r--drivers/net/bonding/bond_3ad.c33
-rw-r--r--drivers/net/bonding/bond_main.c5
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c6
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c9
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.h8
-rw-r--r--drivers/net/can/usb/gs_usb.c10
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/bcm_sf2.c7
-rw-r--r--drivers/net/dsa/lan9303-core.c13
-rw-r--r--drivers/net/dsa/lantiq_gswip.c14
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c26
-rw-r--r--drivers/net/dsa/mt7530.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c22
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c4
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c5
-rw-r--r--drivers/net/dsa/qca/ar9331.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c14
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c3
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c47
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c39
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c243
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c202
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c28
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c65
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c19
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c159
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c42
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c13
-rw-r--r--drivers/net/ethernet/litex/Kconfig2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c24
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h5
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c20
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c25
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c16
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c6
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c40
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/hamradio/6pack.c4
-rw-r--r--drivers/net/ieee802154/at86rf230.c13
-rw-r--r--drivers/net/ieee802154/ca8210.c5
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c1
-rw-r--r--drivers/net/ieee802154/mcr20a.c4
-rw-r--r--drivers/net/ipa/Kconfig2
-rw-r--r--drivers/net/ipa/ipa_power.c52
-rw-r--r--drivers/net/ipa/ipa_power.h7
-rw-r--r--drivers/net/ipa/ipa_uc.c5
-rw-r--r--drivers/net/macsec.c33
-rw-r--r--drivers/net/mctp/mctp-serial.c9
-rw-r--r--drivers/net/mdio/mdio-aspeed.c1
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c6
-rw-r--r--drivers/net/netdevsim/fib.c4
-rw-r--r--drivers/net/phy/at803x.c26
-rw-r--r--drivers/net/phy/marvell.c17
-rw-r--r--drivers/net/phy/mediatek-ge.c3
-rw-r--r--drivers/net/usb/ax88179_178a.c68
-rw-r--r--drivers/net/usb/cdc_ether.c12
-rw-r--r--drivers/net/usb/cdc_mbim.c5
-rw-r--r--drivers/net/usb/cdc_ncm.c8
-rw-r--r--drivers/net/usb/ipheth.c6
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/sr9700.c2
-rw-r--r--drivers/net/usb/zaurus.c12
-rw-r--r--drivers/net/veth.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c6
-rw-r--r--drivers/net/wireless/intel/Makefile1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/filter.h88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/rs.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/main.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/net.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c214
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c241
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c13
-rw-r--r--drivers/net/xen-netback/xenbus.c14
-rw-r--r--drivers/net/xen-netfront.c39
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen4.c17
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen4.h16
-rw-r--r--drivers/ntb/msi.c6
-rw-r--r--drivers/nvme/host/core.c31
-rw-r--r--drivers/nvme/host/fabrics.h1
-rw-r--r--drivers/nvme/host/multipath.c2
-rw-r--r--drivers/nvme/host/rdma.c1
-rw-r--r--drivers/nvme/host/tcp.c74
-rw-r--r--drivers/nvmem/core.c2
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/of/unittest.c16
-rw-r--r--drivers/parisc/ccio-dma.c3
-rw-r--r--drivers/parisc/sba_iommu.c3
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c85
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c31
-rw-r--r--drivers/pci/controller/pci-hyperv.c13
-rw-r--r--drivers/pci/controller/pci-mvebu.c3
-rw-r--r--drivers/pci/controller/vmd.c14
-rw-r--r--drivers/pci/msi/msi.c3
-rw-r--r--drivers/pci/pcie/portdrv_core.c47
-rw-r--r--drivers/pci/quirks.c14
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c3
-rw-r--r--drivers/phy/broadcom/Kconfig3
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c38
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c35
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c2
-rw-r--r--drivers/phy/phy-core-mipi-dphy.c4
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c3
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c2
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c1
-rw-r--r--drivers/phy/xilinx/phy-zynqmp.c11
-rw-r--r--drivers/pinctrl/Makefile2
-rw-r--r--drivers/pinctrl/bcm/Kconfig1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c23
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c5
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c64
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c1
-rw-r--r--drivers/pinctrl/pinctrl-k210.c4
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c3
-rw-r--r--drivers/pinctrl/pinctrl-starfive.c2
-rw-r--r--drivers/pinctrl/pinctrl-thunderbay.c90
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c10
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c8
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c15
-rw-r--r--drivers/platform/surface/Kconfig1
-rw-r--r--drivers/platform/surface/surface3_power.c13
-rw-r--r--drivers/platform/x86/amd-pmc.c57
-rw-r--r--drivers/platform/x86/asus-tf103c-dock.c4
-rw-r--r--drivers/platform/x86/asus-wmi.c2
-rw-r--r--drivers/platform/x86/intel/crystal_cove_charger.c26
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470_board_data.c3
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c97
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c26
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c24
-rw-r--r--drivers/platform/x86/x86-android-tablets.c105
-rw-r--r--drivers/power/supply/bq256xx_charger.c3
-rw-r--r--drivers/power/supply/cw2015_battery.c2
-rw-r--r--drivers/ptp/ptp_ocp.c25
-rw-r--r--drivers/regulator/core.c13
-rw-r--r--drivers/regulator/da9121-regulator.c16
-rw-r--r--drivers/regulator/max20086-regulator.c3
-rw-r--r--drivers/s390/cio/device.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c21
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c14
-rw-r--r--drivers/scsi/lpfc/lpfc.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c8
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c18
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c5
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c31
-rw-r--r--drivers/scsi/qedi/qedi_fw.c6
-rw-r--r--drivers/scsi/scsi_scan.c55
-rw-r--r--drivers/scsi/ufs/ufshcd.c2
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c7
-rw-r--r--drivers/soc/fsl/guts.c14
-rw-r--r--drivers/soc/fsl/qe/qe.c4
-rw-r--r--drivers/soc/fsl/qe/qe_io.c2
-rw-r--r--drivers/soc/imx/gpcv2.c3
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c15
-rw-r--r--drivers/soc/samsung/Kconfig2
-rw-r--r--drivers/spi/spi-bcm-qspi.c2
-rw-r--r--drivers/spi/spi-meson-spicc.c5
-rw-r--r--drivers/spi/spi-mt65xx.c2
-rw-r--r--drivers/spi/spi-rockchip.c13
-rw-r--r--drivers/spi/spi-stm32-qspi.c47
-rw-r--r--drivers/spi/spi-stm32.c7
-rw-r--r--drivers/spi/spi-uniphier.c18
-rw-r--r--drivers/spi/spi-zynq-qspi.c3
-rw-r--r--drivers/staging/fbtft/fb_st7789v.c2
-rw-r--r--drivers/staging/fbtft/fbtft.h5
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c20
-rw-r--r--drivers/tee/optee/core.c1
-rw-r--r--drivers/tee/optee/ffa_abi.c94
-rw-r--r--drivers/tee/optee/notif.c2
-rw-r--r--drivers/tee/optee/optee_private.h5
-rw-r--r--drivers/tee/optee/smc_abi.c60
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c4
-rw-r--r--drivers/thermal/thermal_netlink.c5
-rw-r--r--drivers/tty/n_gsm.c61
-rw-r--r--drivers/tty/n_tty.c10
-rw-r--r--drivers/tty/serial/8250/8250_gsc.c2
-rw-r--r--drivers/tty/serial/8250/8250_pericom.c2
-rw-r--r--drivers/tty/serial/sc16is7xx.c3
-rw-r--r--drivers/tty/vt/vt_ioctl.c3
-rw-r--r--drivers/usb/common/ulpi.c10
-rw-r--r--drivers/usb/core/port.c9
-rw-r--r--drivers/usb/dwc2/core.h2
-rw-r--r--drivers/usb/dwc2/drd.c6
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c21
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c2
-rw-r--r--drivers/usb/dwc3/gadget.c15
-rw-r--r--drivers/usb/gadget/composite.c3
-rw-r--r--drivers/usb/gadget/function/f_fs.c56
-rw-r--r--drivers/usb/gadget/function/f_uac2.c4
-rw-r--r--drivers/usb/gadget/function/rndis.c17
-rw-r--r--drivers/usb/gadget/function/rndis.h1
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c2
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c2
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c6
-rw-r--r--drivers/usb/host/xhci.c28
-rw-r--r--drivers/usb/misc/usb251xb.c4
-rw-r--r--drivers/usb/serial/ch341.c2
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h3
-rw-r--r--drivers/usb/serial/option.c14
-rw-r--r--drivers/usb/typec/tipd/core.c7
-rw-r--r--drivers/vhost/vsock.c21
-rw-r--r--drivers/video/console/Kconfig20
-rw-r--r--drivers/video/fbdev/core/bitblit.c16
-rw-r--r--drivers/video/fbdev/core/fbcon.c554
-rw-r--r--drivers/video/fbdev/core/fbcon.h72
-rw-r--r--drivers/video/fbdev/core/fbcon_ccw.c28
-rw-r--r--drivers/video/fbdev/core/fbcon_cw.c28
-rw-r--r--drivers/video/fbdev/core/fbcon_rotate.h9
-rw-r--r--drivers/video/fbdev/core/fbcon_ud.c37
-rw-r--r--drivers/video/fbdev/core/fbmem.c2
-rw-r--r--drivers/video/fbdev/core/tileblit.c16
-rw-r--r--drivers/video/fbdev/skeletonfb.c12
-rw-r--r--drivers/xen/pci.c2
-rw-r--r--fs/9p/fid.c9
-rw-r--r--fs/Kconfig4
-rw-r--r--fs/Makefile2
-rw-r--r--fs/binfmt_elf.c27
-rw-r--r--fs/binfmt_misc.c6
-rw-r--r--fs/btrfs/block-group.c39
-rw-r--r--fs/btrfs/ctree.h8
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/btrfs/extent_map.h8
-rw-r--r--fs/btrfs/file.c97
-rw-r--r--fs/btrfs/inode.c4
-rw-r--r--fs/btrfs/ioctl.c268
-rw-r--r--fs/btrfs/lzo.c11
-rw-r--r--fs/btrfs/qgroup.c21
-rw-r--r--fs/btrfs/send.c4
-rw-r--r--fs/btrfs/transaction.c36
-rw-r--r--fs/btrfs/transaction.h2
-rw-r--r--fs/btrfs/tree-checker.c15
-rw-r--r--fs/btrfs/tree-log.c23
-rw-r--r--fs/cachefiles/interface.c2
-rw-r--r--fs/cachefiles/io.c59
-rw-r--r--fs/cifs/cifs_swn.c6
-rw-r--r--fs/cifs/cifsacl.c9
-rw-r--r--fs/cifs/cifsfs.c1
-rw-r--r--fs/cifs/connect.c54
-rw-r--r--fs/cifs/dfs_cache.c2
-rw-r--r--fs/cifs/file.c221
-rw-r--r--fs/cifs/fs_context.c4
-rw-r--r--fs/cifs/fscache.c126
-rw-r--r--fs/cifs/fscache.h79
-rw-r--r--fs/cifs/inode.c8
-rw-r--r--fs/cifs/sess.c17
-rw-r--r--fs/cifs/smb1ops.c4
-rw-r--r--fs/cifs/transport.c5
-rw-r--r--fs/cifs/xattr.c2
-rw-r--r--fs/configfs/dir.c14
-rw-r--r--fs/erofs/data.c8
-rw-r--r--fs/erofs/internal.h2
-rw-r--r--fs/erofs/zdata.c113
-rw-r--r--fs/erofs/zmap.c7
-rw-r--r--fs/ext4/acl.c8
-rw-r--r--fs/ext4/ext4.h31
-rw-r--r--fs/ext4/ext4_jbd2.h2
-rw-r--r--fs/ext4/extents.c16
-rw-r--r--fs/ext4/fast_commit.c133
-rw-r--r--fs/ext4/hash.c2
-rw-r--r--fs/ext4/indirect.c2
-rw-r--r--fs/ext4/inline.c23
-rw-r--r--fs/ext4/inode.c12
-rw-r--r--fs/ext4/ioctl.c4
-rw-r--r--fs/ext4/mballoc.c26
-rw-r--r--fs/ext4/migrate.c2
-rw-r--r--fs/ext4/namei.c16
-rw-r--r--fs/ext4/orphan.c4
-rw-r--r--fs/ext4/super.c14
-rw-r--r--fs/ext4/sysfs.c8
-rw-r--r--fs/ext4/xattr.c6
-rw-r--r--fs/f2fs/dir.c10
-rw-r--r--fs/f2fs/f2fs.h2
-rw-r--r--fs/f2fs/hash.c2
-rw-r--r--fs/f2fs/namei.c4
-rw-r--r--fs/f2fs/recovery.c4
-rw-r--r--fs/f2fs/super.c10
-rw-r--r--fs/f2fs/sysfs.c10
-rw-r--r--fs/file_table.c6
-rw-r--r--fs/gfs2/file.c7
-rw-r--r--fs/gfs2/glock.c3
-rw-r--r--fs/io_uring.c32
-rw-r--r--fs/iomap/buffered-io.c52
-rw-r--r--fs/jbd2/commit.c21
-rw-r--r--fs/jbd2/journal.c6
-rw-r--r--fs/jbd2/transaction.c53
-rw-r--r--fs/ksmbd/auth.c27
-rw-r--r--fs/ksmbd/smb2pdu.c45
-rw-r--r--fs/ksmbd/smb_common.c5
-rw-r--r--fs/ksmbd/transport_rdma.c2
-rw-r--r--fs/ksmbd/vfs.h1
-rw-r--r--fs/libfs.c10
-rw-r--r--fs/lockd/svcsubs.c18
-rw-r--r--fs/namespace.c30
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/dir.c28
-rw-r--r--fs/nfs/inode.c9
-rw-r--r--fs/nfs/nfs4proc.c6
-rw-r--r--fs/nfsd/nfs3proc.c19
-rw-r--r--fs/nfsd/nfs3xdr.c4
-rw-r--r--fs/nfsd/nfs4proc.c13
-rw-r--r--fs/nfsd/nfs4state.c4
-rw-r--r--fs/nfsd/nfs4xdr.c10
-rw-r--r--fs/nfsd/trace.h14
-rw-r--r--fs/nfsd/vfs.c57
-rw-r--r--fs/nfsd/vfs.h4
-rw-r--r--fs/notify/fanotify/fanotify_user.c6
-rw-r--r--fs/overlayfs/copy_up.c16
-rw-r--r--fs/proc/task_mmu.c49
-rw-r--r--fs/quota/dquot.c11
-rw-r--r--fs/super.c19
-rw-r--r--fs/sync.c18
-rw-r--r--fs/tracefs/inode.c5
-rw-r--r--fs/unicode/Kconfig18
-rw-r--r--fs/unicode/Makefile6
-rw-r--r--fs/userfaultfd.c6
-rw-r--r--fs/xfs/xfs_aops.c16
-rw-r--r--fs/xfs/xfs_bmap_util.c9
-rw-r--r--fs/xfs/xfs_file.c86
-rw-r--r--fs/xfs/xfs_inode.h9
-rw-r--r--fs/xfs/xfs_ioctl.c2
-rw-r--r--fs/xfs/xfs_pnfs.c42
-rw-r--r--fs/xfs/xfs_super.c13
-rw-r--r--include/crypto/internal/blake2s.h40
-rw-r--r--include/dt-bindings/clock/dra7.h14
-rw-r--r--include/linux/ata.h2
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/bpf.h9
-rw-r--r--include/linux/ceph/libceph.h1
-rw-r--r--include/linux/ceph/messenger.h5
-rw-r--r--include/linux/compiler.h21
-rw-r--r--include/linux/cpufreq.h3
-rw-r--r--include/linux/dma-mapping.h8
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/hyperv.h1
-rw-r--r--include/linux/if_vlan.h6
-rw-r--r--include/linux/iomap.h2
-rw-r--r--include/linux/jbd2.h13
-rw-r--r--include/linux/kfence.h2
-rw-r--r--include/linux/kvm_host.h112
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/memcontrol.h5
-rw-r--r--include/linux/mm.h7
-rw-r--r--include/linux/mm_inline.h97
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/netfilter_netdev.h4
-rw-r--r--include/linux/netfs.h7
-rw-r--r--include/linux/nfs.h8
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/nvme-tcp.h1
-rw-r--r--include/linux/nvmem-provider.h4
-rw-r--r--include/linux/page_table_check.h19
-rw-r--r--include/linux/pgtable.h1
-rw-r--r--include/linux/rfkill.h5
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/sched/task.h4
-rw-r--r--include/linux/slab.h3
-rw-r--r--include/linux/suspend.h4
-rw-r--r--include/linux/trace_events.h22
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/ax25.h12
-rw-r--r--include/net/bluetooth/bluetooth.h3
-rw-r--r--include/net/bluetooth/hci_core.h8
-rw-r--r--include/net/bond_3ad.h2
-rw-r--r--include/net/checksum.h52
-rw-r--r--include/net/dsa.h1
-rw-r--r--include/net/dst_metadata.h14
-rw-r--r--include/net/ip6_fib.h10
-rw-r--r--include/net/ipv6.h5
-rw-r--r--include/net/ndisc.h4
-rw-r--r--include/net/neighbour.h18
-rw-r--r--include/net/netfilter/nf_flow_table.h6
-rw-r--r--include/net/netfilter/nf_queue.h2
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/net/netfilter/nf_tables_offload.h2
-rw-r--r--include/net/netns/ipv6.h3
-rw-r--r--include/net/sock.h4
-rw-r--r--include/net/xfrm.h6
-rw-r--r--include/soc/fsl/dpaa2-fd.h3
-rw-r--r--include/soc/fsl/qe/immap_qe.h3
-rw-r--r--include/soc/fsl/qe/qe_tdm.h4
-rw-r--r--include/soc/fsl/qe/ucc_fast.h2
-rw-r--r--include/soc/fsl/qe/ucc_slow.h2
-rw-r--r--include/sound/pcm.h15
-rw-r--r--include/uapi/linux/input-event-codes.h4
-rw-r--r--include/uapi/linux/kvm.h7
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h2
-rw-r--r--include/uapi/linux/perf_event.h2
-rw-r--r--include/uapi/linux/smc_diag.h11
-rw-r--r--include/uapi/linux/xfrm.h6
-rw-r--r--include/uapi/sound/asound.h4
-rw-r--r--include/uapi/xen/gntdev.h8
-rw-r--r--include/xen/xenbus_dev.h2
-rw-r--r--ipc/sem.c4
-rw-r--r--kernel/async.c3
-rw-r--r--kernel/audit.c62
-rw-r--r--kernel/auditsc.c2
-rw-r--r--kernel/bpf/bpf_lsm.c2
-rw-r--r--kernel/bpf/btf.c5
-rw-r--r--kernel/bpf/helpers.c2
-rw-r--r--kernel/bpf/ringbuf.c2
-rw-r--r--kernel/bpf/syscall.c3
-rw-r--r--kernel/bpf/trampoline.c5
-rw-r--r--kernel/cgroup/cgroup-v1.c16
-rw-r--r--kernel/cgroup/cgroup.c14
-rw-r--r--kernel/cgroup/cpuset.c77
-rw-r--r--kernel/configs/debug.config2
-rw-r--r--kernel/cred.c9
-rw-r--r--kernel/dma/swiotlb.c3
-rw-r--r--kernel/events/core.c20
-rw-r--r--kernel/fork.c34
-rw-r--r--kernel/locking/lockdep.c4
-rw-r--r--kernel/module.c25
-rw-r--r--kernel/module_decompress.c2
-rw-r--r--kernel/power/main.c5
-rw-r--r--kernel/power/process.c2
-rw-r--r--kernel/power/suspend.c2
-rw-r--r--kernel/printk/sysctl.c2
-rw-r--r--kernel/sched/core.c23
-rw-r--r--kernel/seccomp.c10
-rw-r--r--kernel/signal.c5
-rw-r--r--kernel/stackleak.c5
-rw-r--r--kernel/sys.c39
-rw-r--r--kernel/trace/blktrace.c26
-rw-r--r--kernel/trace/ftrace.c1
-rw-r--r--kernel/trace/trace.c18
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_eprobe.c16
-rw-r--r--kernel/trace/trace_events_hist.c6
-rw-r--r--kernel/trace/trace_events_trigger.c73
-rw-r--r--kernel/trace/trace_kprobe.c2
-rw-r--r--kernel/trace/trace_osnoise.c53
-rw-r--r--kernel/trace/trace_probe.c10
-rw-r--r--kernel/trace/trace_probe.h1
-rw-r--r--kernel/trace/trace_selftest.c6
-rw-r--r--kernel/ucount.c3
-rw-r--r--kernel/user_namespace.c14
-rw-r--r--lib/Kconfig1
-rw-r--r--lib/crypto/blake2s.c4
-rw-r--r--lib/iov_iter.c2
-rw-r--r--lib/test_kasan.c5
-rw-r--r--mm/debug_vm_pgtable.c2
-rw-r--r--mm/gup.c35
-rw-r--r--mm/hugetlb.c11
-rw-r--r--mm/kfence/core.c3
-rw-r--r--mm/kfence/kfence_test.c8
-rw-r--r--mm/khugepaged.c37
-rw-r--r--mm/kmemleak.c13
-rw-r--r--mm/madvise.c92
-rw-r--r--mm/memblock.c10
-rw-r--r--mm/memcontrol.c10
-rw-r--r--mm/memfd.c40
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/mlock.c2
-rw-r--r--mm/mmap.c13
-rw-r--r--mm/mprotect.c4
-rw-r--r--mm/page_isolation.c2
-rw-r--r--mm/page_table_check.c55
-rw-r--r--mm/util.c4
-rw-r--r--mm/vmscan.c4
-rw-r--r--net/8021q/vlan.h2
-rw-r--r--net/8021q/vlan_dev.c15
-rw-r--r--net/8021q/vlan_netlink.c7
-rw-r--r--net/ax25/af_ax25.c28
-rw-r--r--net/ax25/ax25_dev.c28
-rw-r--r--net/ax25/ax25_route.c13
-rw-r--r--net/batman-adv/hard-interface.c29
-rw-r--r--net/bluetooth/hci_core.c1
-rw-r--r--net/bluetooth/hci_sync.c30
-rw-r--r--net/bluetooth/mgmt.c99
-rw-r--r--net/bluetooth/mgmt_util.c3
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c8
-rw-r--r--net/can/isotp.c29
-rw-r--r--net/can/j1939/transport.c2
-rw-r--r--net/ceph/ceph_common.c7
-rw-r--r--net/ceph/messenger.c4
-rw-r--r--net/ceph/messenger_v1.c54
-rw-r--r--net/ceph/messenger_v2.c250
-rw-r--r--net/core/drop_monitor.c11
-rw-r--r--net/core/filter.c3
-rw-r--r--net/core/neighbour.c18
-rw-r--r--net/core/net-sysfs.c2
-rw-r--r--net/core/rtnetlink.c12
-rw-r--r--net/core/skbuff.c10
-rw-r--r--net/core/skmsg.c2
-rw-r--r--net/core/sock.c4
-rw-r--r--net/dcb/dcbnl.c44
-rw-r--r--net/dsa/dsa.c1
-rw-r--r--net/dsa/dsa2.c27
-rw-r--r--net/dsa/dsa_priv.h1
-rw-r--r--net/dsa/master.c7
-rw-r--r--net/dsa/port.c29
-rw-r--r--net/dsa/tag_lan9303.c21
-rw-r--r--net/ieee802154/nl802154.c8
-rw-r--r--net/ipv4/af_inet.c5
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_frontend.c3
-rw-r--r--net/ipv4/fib_lookup.h7
-rw-r--r--net/ipv4/fib_semantics.c6
-rw-r--r--net/ipv4/fib_trie.c22
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/netfilter/Kconfig4
-rw-r--r--net/ipv4/ping.c10
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/tcp.c40
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/udp_tunnel_nic.c2
-rw-r--r--net/ipv6/addrconf.c14
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/ip6_flowlabel.c4
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ip6_output.c13
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/mcast.c34
-rw-r--r--net/ipv6/netfilter/Kconfig4
-rw-r--r--net/ipv6/netfilter/Makefile3
-rw-r--r--net/ipv6/netfilter/nf_flow_table_ipv6.c0
-rw-r--r--net/ipv6/route.c19
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/mac80211/agg-tx.c10
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/mlme.c45
-rw-r--r--net/mac80211/rx.c14
-rw-r--r--net/mctp/route.c11
-rw-r--r--net/mpls/af_mpls.c2
-rw-r--r--net/mptcp/mib.c2
-rw-r--r--net/mptcp/mib.h2
-rw-r--r--net/mptcp/pm.c8
-rw-r--r--net/mptcp/pm_netlink.c37
-rw-r--r--net/mptcp/protocol.c18
-rw-r--r--net/netfilter/core.c5
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c9
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c59
-rw-r--r--net/netfilter/nf_flow_table_offload.c6
-rw-r--r--net/netfilter/nf_queue.c36
-rw-r--r--net/netfilter/nf_tables_api.c21
-rw-r--r--net/netfilter/nf_tables_offload.c3
-rw-r--r--net/netfilter/nfnetlink_queue.c12
-rw-r--r--net/netfilter/nft_byteorder.c12
-rw-r--r--net/netfilter/nft_ct.c5
-rw-r--r--net/netfilter/nft_dup_netdev.c6
-rw-r--r--net/netfilter/nft_exthdr.c2
-rw-r--r--net/netfilter/nft_fwd_netdev.c6
-rw-r--r--net/netfilter/nft_immediate.c12
-rw-r--r--net/netfilter/nft_limit.c18
-rw-r--r--net/netfilter/nft_payload.c9
-rw-r--r--net/netfilter/nft_synproxy.c4
-rw-r--r--net/netfilter/xt_socket.c4
-rw-r--r--net/openvswitch/actions.c46
-rw-r--r--net/packet/af_packet.c8
-rw-r--r--net/sched/act_api.c15
-rw-r--r--net/sched/act_ct.c18
-rw-r--r--net/sched/cls_api.c17
-rw-r--r--net/sched/sch_api.c22
-rw-r--r--net/sched/sch_generic.c29
-rw-r--r--net/smc/af_smc.c157
-rw-r--r--net/smc/smc.h20
-rw-r--r--net/smc/smc_core.c5
-rw-r--r--net/smc/smc_diag.c2
-rw-r--r--net/smc/smc_pnet.c50
-rw-r--r--net/smc/smc_pnet.h2
-rw-r--r--net/socket.c4
-rw-r--r--net/sunrpc/sysfs.c5
-rw-r--r--net/sunrpc/xprtrdma/verbs.c3
-rw-r--r--net/sunrpc/xprtsock.c7
-rw-r--r--net/tipc/crypto.c2
-rw-r--r--net/tipc/link.c9
-rw-r--r--net/tipc/monitor.c2
-rw-r--r--net/tipc/name_distr.c2
-rw-r--r--net/tipc/name_table.c2
-rw-r--r--net/tipc/node.c13
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/vmw_vsock/af_vsock.c1
-rw-r--r--net/wireless/Makefile2
-rw-r--r--net/wireless/core.c17
-rw-r--r--net/wireless/nl80211.c15
-rw-r--r--net/xfrm/xfrm_device.c6
-rw-r--r--net/xfrm/xfrm_interface.c2
-rw-r--r--net/xfrm/xfrm_policy.c14
-rw-r--r--net/xfrm/xfrm_state.c29
-rw-r--r--net/xfrm/xfrm_user.c6
-rw-r--r--samples/seccomp/dropper.c9
-rw-r--r--scripts/Makefile.extrawarn1
-rw-r--r--scripts/kconfig/confdata.c25
-rw-r--r--scripts/kconfig/preprocess.c2
-rw-r--r--security/integrity/digsig_asymmetric.c15
-rw-r--r--security/integrity/ima/ima_fs.c2
-rw-r--r--security/integrity/ima/ima_policy.c8
-rw-r--r--security/integrity/ima/ima_template.c10
-rw-r--r--security/integrity/integrity_audit.c2
-rw-r--r--security/selinux/ima.c4
-rw-r--r--security/selinux/ss/conditional.c3
-rw-r--r--sound/core/memalloc.c15
-rw-r--r--sound/core/pcm_native.c13
-rw-r--r--sound/hda/intel-sdw-acpi.c7
-rw-r--r--sound/pci/hda/hda_auto_parser.c2
-rw-r--r--sound/pci/hda/hda_codec.c4
-rw-r--r--sound/pci/hda/hda_generic.c17
-rw-r--r--sound/pci/hda/hda_generic.h3
-rw-r--r--sound/pci/hda/hda_intel.c6
-rw-r--r--sound/pci/hda/patch_realtek.c79
-rw-r--r--sound/soc/amd/acp/acp-mach-common.c4
-rw-r--r--sound/soc/amd/acp/acp-mach.h1
-rw-r--r--sound/soc/amd/acp/acp-sof-mach.c4
-rw-r--r--sound/soc/codecs/cpcap.c2
-rw-r--r--sound/soc/codecs/cs4265.c3
-rw-r--r--sound/soc/codecs/hdmi-codec.c2
-rw-r--r--sound/soc/codecs/lpass-rx-macro.c8
-rw-r--r--sound/soc/codecs/max9759.c3
-rw-r--r--sound/soc/codecs/rt5668.c12
-rw-r--r--sound/soc/codecs/rt5682-i2c.c15
-rw-r--r--sound/soc/codecs/rt5682.c34
-rw-r--r--sound/soc/codecs/rt5682.h2
-rw-r--r--sound/soc/codecs/rt5682s.c12
-rw-r--r--sound/soc/codecs/tas2770.c7
-rw-r--r--sound/soc/codecs/wcd938x.c31
-rw-r--r--sound/soc/codecs/wm_adsp.c3
-rw-r--r--sound/soc/fsl/pcm030-audio-fabric.c11
-rw-r--r--sound/soc/generic/simple-card.c26
-rw-r--r--sound/soc/intel/skylake/skl.c1
-rw-r--r--sound/soc/mediatek/Kconfig2
-rw-r--r--sound/soc/qcom/lpass-platform.c8
-rw-r--r--sound/soc/qcom/qdsp6/q6apm-dai.c7
-rw-r--r--sound/soc/soc-acpi.c7
-rw-r--r--sound/soc/soc-ops.c70
-rw-r--r--sound/soc/soc-pcm.c18
-rw-r--r--sound/soc/sof/intel/hda.c1
-rw-r--r--sound/soc/xilinx/xlnx_formatter_pcm.c27
-rw-r--r--sound/usb/implicit.c4
-rw-r--r--sound/usb/mixer.c13
-rw-r--r--sound/usb/quirks-table.h2
-rw-r--r--sound/x86/intel_hdmi_audio.c2
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h2
-rw-r--r--tools/arch/x86/include/asm/msr-index.h1
-rw-r--r--tools/bpf/resolve_btfids/Makefile6
-rw-r--r--tools/cgroup/memcg_slabinfo.py30
-rw-r--r--tools/include/uapi/linux/kvm.h7
-rw-r--r--tools/include/uapi/linux/lirc.h229
-rw-r--r--tools/include/uapi/linux/perf_event.h8
-rw-r--r--tools/include/uapi/linux/prctl.h3
-rw-r--r--tools/include/uapi/sound/asound.h11
-rw-r--r--tools/lib/perf/include/internal/cpumap.h6
-rw-r--r--tools/lib/perf/include/perf/cpumap.h5
-rw-r--r--tools/lib/perf/libperf.map1
-rw-r--r--tools/lib/perf/mmap.c98
-rw-r--r--tools/lib/perf/tests/test-cpumap.c11
-rw-r--r--tools/lib/perf/tests/test-evlist.c5
-rw-r--r--tools/lib/perf/tests/test-evsel.c5
-rw-r--r--tools/lib/subcmd/subcmd-util.h11
-rw-r--r--tools/objtool/check.c2
-rw-r--r--tools/perf/builtin-ftrace.c45
-rw-r--r--tools/perf/builtin-script.c2
-rw-r--r--tools/perf/builtin-trace.c23
-rw-r--r--tools/perf/tests/attr/README2
-rw-r--r--tools/perf/tests/attr/test-record-graph-default2
-rw-r--r--tools/perf/tests/attr/test-record-graph-default-aarch649
-rw-r--r--tools/perf/tests/attr/test-record-graph-fp2
-rw-r--r--tools/perf/tests/attr/test-record-graph-fp-aarch649
-rw-r--r--tools/perf/tests/sigtrap.c15
-rwxr-xr-xtools/perf/trace/beauty/prctl_option.sh2
-rw-r--r--tools/perf/util/annotate.c1
-rw-r--r--tools/perf/util/bpf-loader.c3
-rw-r--r--tools/perf/util/bpf_counter_cgroup.c2
-rw-r--r--tools/perf/util/cs-etm.c16
-rw-r--r--tools/perf/util/data.c7
-rw-r--r--tools/perf/util/evlist-hybrid.c4
-rw-r--r--tools/perf/util/machine.c2
-rw-r--r--tools/perf/util/map_symbol.h1
-rw-r--r--tools/perf/util/perf_event_attr_fprintf.c2
-rw-r--r--tools/perf/util/session.c3
-rw-r--r--tools/perf/util/sort.c4
-rw-r--r--tools/perf/util/stat-display.c19
-rw-r--r--tools/perf/util/synthetic-events.c19
-rw-r--r--tools/scripts/Makefile.include2
-rw-r--r--tools/testing/kunit/kunit_kernel.py1
-rwxr-xr-xtools/testing/kunit/run_checks.py2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer_crash.c32
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_kern.h26
-rw-r--r--tools/testing/selftests/bpf/progs/timer_crash.c54
-rw-r--r--tools/testing/selftests/bpf/test_lirc_mode2_user.c1
-rw-r--r--tools/testing/selftests/clone3/clone3.c2
-rwxr-xr-xtools/testing/selftests/cpufreq/main.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh2
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh3
-rw-r--r--tools/testing/selftests/exec/Makefile6
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc2
-rw-r--r--tools/testing/selftests/futex/Makefile4
-rw-r--r--tools/testing/selftests/ir/ir_loopback.c10
-rw-r--r--tools/testing/selftests/kselftest_harness.h4
-rw-r--r--tools/testing/selftests/kvm/Makefile1
-rw-r--r--tools/testing/selftests/kvm/aarch64/arch_timer.c7
-rw-r--r--tools/testing/selftests/kvm/aarch64/vgic_irq.c4
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/vgic.c4
-rw-r--r--tools/testing/selftests/memfd/memfd_test.c1
-rw-r--r--tools/testing/selftests/mincore/mincore_selftest.c20
-rw-r--r--tools/testing/selftests/mount_setattr/mount_setattr_test.c4
-rwxr-xr-xtools/testing/selftests/net/mptcp/diag.sh44
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh4
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh33
-rw-r--r--tools/testing/selftests/netfilter/.gitignore1
-rw-r--r--tools/testing/selftests/netfilter/Makefile4
-rw-r--r--tools/testing/selftests/netfilter/connect_close.c136
-rwxr-xr-xtools/testing/selftests/netfilter/nft_concat_range.sh74
-rwxr-xr-xtools/testing/selftests/netfilter/nft_fib.sh1
-rwxr-xr-xtools/testing/selftests/netfilter/nft_nat.sh152
-rwxr-xr-xtools/testing/selftests/netfilter/nft_queue.sh19
-rwxr-xr-xtools/testing/selftests/netfilter/nft_synproxy.sh117
-rwxr-xr-xtools/testing/selftests/netfilter/nft_zones_many.sh12
-rw-r--r--tools/testing/selftests/openat2/Makefile2
-rw-r--r--tools/testing/selftests/openat2/helpers.h12
-rw-r--r--tools/testing/selftests/openat2/openat2_test.c12
-rw-r--r--tools/testing/selftests/perf_events/sigtrap_threads.c17
-rw-r--r--tools/testing/selftests/pidfd/pidfd.h13
-rw-r--r--tools/testing/selftests/pidfd/pidfd_fdinfo_test.c22
-rw-r--r--tools/testing/selftests/pidfd/pidfd_test.c6
-rw-r--r--tools/testing/selftests/pidfd/pidfd_wait.c5
-rw-r--r--tools/testing/selftests/rtc/settings2
-rw-r--r--tools/testing/selftests/seccomp/Makefile2
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_abi.c135
-rw-r--r--tools/testing/selftests/vm/hugepage-mremap.c26
-rw-r--r--tools/testing/selftests/vm/map_fixed_noreplace.c49
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests.sh3
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c10
-rwxr-xr-xtools/testing/selftests/zram/zram.sh15
-rwxr-xr-xtools/testing/selftests/zram/zram01.sh33
-rwxr-xr-xtools/testing/selftests/zram/zram02.sh1
-rwxr-xr-xtools/testing/selftests/zram/zram_lib.sh134
-rw-r--r--tools/tracing/rtla/Makefile4
-rw-r--r--tools/tracing/rtla/src/osnoise.c5
-rw-r--r--tools/tracing/rtla/src/osnoise_hist.c17
-rw-r--r--tools/tracing/rtla/src/osnoise_top.c8
-rw-r--r--tools/tracing/rtla/src/timerlat_hist.c17
-rw-r--r--tools/tracing/rtla/src/timerlat_top.c7
-rw-r--r--tools/tracing/rtla/src/trace.c8
-rw-r--r--tools/tracing/rtla/src/utils.c4
-rw-r--r--virt/kvm/kvm_main.c4
1254 files changed, 12874 insertions, 6807 deletions
diff --git a/.mailmap b/.mailmap
index b76e520809d0..10ee1103c823 100644
--- a/.mailmap
+++ b/.mailmap
@@ -80,6 +80,9 @@ Chris Chiu <chris.chiu@canonical.com> <chiu@endlessos.org>
Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com>
Christian Borntraeger <borntraeger@linux.ibm.com> <cborntra@de.ibm.com>
Christian Borntraeger <borntraeger@linux.ibm.com> <borntrae@de.ibm.com>
+Christian Brauner <brauner@kernel.org> <christian@brauner.io>
+Christian Brauner <brauner@kernel.org> <christian.brauner@canonical.com>
+Christian Brauner <brauner@kernel.org> <christian.brauner@ubuntu.com>
Christophe Ricard <christophe.ricard@gmail.com>
Christoph Hellwig <hch@lst.de>
Colin Ian King <colin.king@intel.com> <colin.king@canonical.com>
@@ -330,6 +333,9 @@ Rémi Denis-Courmont <rdenis@simphalempin.com>
Ricardo Ribalda <ribalda@kernel.org> <ricardo@ribalda.com>
Ricardo Ribalda <ribalda@kernel.org> Ricardo Ribalda Delgado <ribalda@kernel.org>
Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
+Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
+Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
+Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>
Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com>
Rudolf Marek <R.Marek@sh.cvut.cz>
Rui Saraiva <rmps@joel.ist.utl.pt>
diff --git a/CREDITS b/CREDITS
index b97256d5bc24..7e85a53b6a88 100644
--- a/CREDITS
+++ b/CREDITS
@@ -895,6 +895,12 @@ S: 3000 FORE Drive
S: Warrendale, Pennsylvania 15086
S: USA
+N: Ludovic Desroches
+E: ludovic.desroches@microchip.com
+D: Maintainer for ARM/Microchip (AT91) SoC support
+D: Author of ADC, pinctrl, XDMA and SDHCI drivers for this platform
+S: France
+
N: Martin Devera
E: devik@cdi.cz
W: http://luxik.cdi.cz/~devik/qos/
diff --git a/Documentation/ABI/testing/sysfs-class-power b/Documentation/ABI/testing/sysfs-class-power
index fde21d900420..859501366777 100644
--- a/Documentation/ABI/testing/sysfs-class-power
+++ b/Documentation/ABI/testing/sysfs-class-power
@@ -468,6 +468,7 @@ Description:
auto: Charge normally, respect thresholds
inhibit-charge: Do not charge while AC is attached
force-discharge: Force discharge while AC is attached
+ ================ ====================================
What: /sys/class/power_supply/<supply_name>/technology
Date: May 2007
diff --git a/Documentation/ABI/testing/sysfs-driver-aspeed-uart-routing b/Documentation/ABI/testing/sysfs-driver-aspeed-uart-routing
index b363827da437..910df0e5815a 100644
--- a/Documentation/ABI/testing/sysfs-driver-aspeed-uart-routing
+++ b/Documentation/ABI/testing/sysfs-driver-aspeed-uart-routing
@@ -1,4 +1,4 @@
-What: /sys/bus/platform/drivers/aspeed-uart-routing/*/uart*
+What: /sys/bus/platform/drivers/aspeed-uart-routing/\*/uart\*
Date: September 2021
Contact: Oskar Senft <osk@google.com>
Chia-Wei Wang <chiawei_wang@aspeedtech.com>
@@ -9,7 +9,7 @@ Description: Selects the RX source of the UARTx device.
depends on the selected file.
e.g.
- cat /sys/bus/platform/drivers/aspeed-uart-routing/*.uart_routing/uart1
+ cat /sys/bus/platform/drivers/aspeed-uart-routing/\*.uart_routing/uart1
[io1] io2 io3 io4 uart2 uart3 uart4 io6
In this case, UART1 gets its input from IO1 (physical serial port 1).
@@ -17,7 +17,7 @@ Description: Selects the RX source of the UARTx device.
Users: OpenBMC. Proposed changes should be mailed to
openbmc@lists.ozlabs.org
-What: /sys/bus/platform/drivers/aspeed-uart-routing/*/io*
+What: /sys/bus/platform/drivers/aspeed-uart-routing/\*/io\*
Date: September 2021
Contact: Oskar Senft <osk@google.com>
Chia-Wei Wang <chiawei_wang@aspeedtech.com>
diff --git a/Documentation/admin-guide/mm/pagemap.rst b/Documentation/admin-guide/mm/pagemap.rst
index bfc28704856c..6e2e416af783 100644
--- a/Documentation/admin-guide/mm/pagemap.rst
+++ b/Documentation/admin-guide/mm/pagemap.rst
@@ -23,7 +23,7 @@ There are four components to pagemap:
* Bit 56 page exclusively mapped (since 4.2)
* Bit 57 pte is uffd-wp write-protected (since 5.13) (see
:ref:`Documentation/admin-guide/mm/userfaultfd.rst <userfaultfd>`)
- * Bits 57-60 zero
+ * Bits 58-60 zero
* Bit 61 page is file-page or shared-anon (since 3.5)
* Bit 62 page swapped
* Bit 63 page present
diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
index 0ec7b7f1524b..ea281dd75517 100644
--- a/Documentation/arm64/silicon-errata.rst
+++ b/Documentation/arm64/silicon-errata.rst
@@ -100,6 +100,8 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2051678 | ARM64_ERRATUM_2051678 |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A510 | #2077057 | ARM64_ERRATUM_2077057 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
diff --git a/Documentation/core-api/dma-attributes.rst b/Documentation/core-api/dma-attributes.rst
index 1887d92e8e92..17706dc91ec9 100644
--- a/Documentation/core-api/dma-attributes.rst
+++ b/Documentation/core-api/dma-attributes.rst
@@ -130,3 +130,11 @@ accesses to DMA buffers in both privileged "supervisor" and unprivileged
subsystem that the buffer is fully accessible at the elevated privilege
level (and ideally inaccessible or at least read-only at the
lesser-privileged levels).
+
+DMA_ATTR_OVERWRITE
+------------------
+
+This is a hint to the DMA-mapping subsystem that the device is expected to
+overwrite the entire mapped size, thus the caller does not require any of the
+previous buffer contents to be preserved. This allows bounce-buffering
+implementations to optimise DMA_FROM_DEVICE transfers.
diff --git a/Documentation/cpu-freq/cpu-drivers.rst b/Documentation/cpu-freq/cpu-drivers.rst
index 3b32336a7803..d84ededb66f9 100644
--- a/Documentation/cpu-freq/cpu-drivers.rst
+++ b/Documentation/cpu-freq/cpu-drivers.rst
@@ -75,6 +75,9 @@ And optionally
.resume - A pointer to a per-policy resume function which is called
with interrupts disabled and _before_ the governor is started again.
+ .ready - A pointer to a per-policy ready function which is called after
+ the policy is fully initialized.
+
.attr - A pointer to a NULL-terminated list of "struct freq_attr" which
allow to export values to sysfs.
diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst
index dcefee707ccd..a833ecf12fbc 100644
--- a/Documentation/dev-tools/kselftest.rst
+++ b/Documentation/dev-tools/kselftest.rst
@@ -7,6 +7,14 @@ directory. These are intended to be small tests to exercise individual code
paths in the kernel. Tests are intended to be run after building, installing
and booting a kernel.
+Kselftest from mainline can be run on older stable kernels. Running tests
+from mainline offers the best coverage. Several test rings run mainline
+kselftest suite on stable releases. The reason is that when a new test
+gets added to test existing code to regression test a bug, we should be
+able to run that test on an older kernel. Hence, it is important to keep
+code that can still test an older kernel and make sure it skips the test
+gracefully on newer releases.
+
You can find additional information on Kselftest framework, how to
write new tests using the framework on Kselftest wiki:
diff --git a/Documentation/dev-tools/kunit/usage.rst b/Documentation/dev-tools/kunit/usage.rst
index 76af931a332c..1c83e7d60a8a 100644
--- a/Documentation/dev-tools/kunit/usage.rst
+++ b/Documentation/dev-tools/kunit/usage.rst
@@ -242,7 +242,7 @@ example:
int rectangle_area(struct shape *this)
{
- struct rectangle *self = container_of(this, struct shape, parent);
+ struct rectangle *self = container_of(this, struct rectangle, parent);
return self->length * self->width;
};
diff --git a/Documentation/devicetree/bindings/arm/atmel-at91.yaml b/Documentation/devicetree/bindings/arm/atmel-at91.yaml
index c612e1f48dba..ff91df04f9f4 100644
--- a/Documentation/devicetree/bindings/arm/atmel-at91.yaml
+++ b/Documentation/devicetree/bindings/arm/atmel-at91.yaml
@@ -8,7 +8,8 @@ title: Atmel AT91 device tree bindings.
maintainers:
- Alexandre Belloni <alexandre.belloni@bootlin.com>
- - Ludovic Desroches <ludovic.desroches@microchip.com>
+ - Claudiu Beznea <claudiu.beznea@microchip.com>
+ - Nicolas Ferre <nicolas.ferre@microchip.com>
description: |
Boards with a SoC of the Atmel AT91 or SMART family shall have the following
diff --git a/Documentation/devicetree/bindings/arm/freescale/fsl,layerscape-dcfg.txt b/Documentation/devicetree/bindings/arm/freescale/fsl,layerscape-dcfg.txt
index b5cb374dc47d..10a91cc8b997 100644
--- a/Documentation/devicetree/bindings/arm/freescale/fsl,layerscape-dcfg.txt
+++ b/Documentation/devicetree/bindings/arm/freescale/fsl,layerscape-dcfg.txt
@@ -8,7 +8,7 @@ Required properties:
- compatible: Should contain a chip-specific compatible string,
Chip-specific strings are of the form "fsl,<chip>-dcfg",
The following <chip>s are known to be supported:
- ls1012a, ls1021a, ls1043a, ls1046a, ls2080a.
+ ls1012a, ls1021a, ls1043a, ls1046a, ls2080a, lx2160a
- reg : should contain base address and length of DCFG memory-mapped registers
diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt
index e77635c5422c..fa8b31660cad 100644
--- a/Documentation/devicetree/bindings/arm/omap/omap.txt
+++ b/Documentation/devicetree/bindings/arm/omap/omap.txt
@@ -119,6 +119,9 @@ Boards (incomplete list of examples):
- OMAP3 BeagleBoard : Low cost community board
compatible = "ti,omap3-beagle", "ti,omap3430", "ti,omap3"
+- OMAP3 BeagleBoard A to B4 : Early BeagleBoard revisions A to B4 with a timer quirk
+ compatible = "ti,omap3-beagle-ab4", "ti,omap3-beagle", "ti,omap3430", "ti,omap3"
+
- OMAP3 Tobi with Overo : Commercial expansion board with daughter board
compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3430", "ti,omap3"
diff --git a/Documentation/devicetree/bindings/clock/qoriq-clock.txt b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
index f7d48f23da44..10119d9ef4b1 100644
--- a/Documentation/devicetree/bindings/clock/qoriq-clock.txt
+++ b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
@@ -44,6 +44,7 @@ Required properties:
* "fsl,ls1046a-clockgen"
* "fsl,ls1088a-clockgen"
* "fsl,ls2080a-clockgen"
+ * "fsl,lx2160a-clockgen"
Chassis-version clock strings include:
* "fsl,qoriq-clockgen-1.0": for chassis 1.0 clocks
* "fsl,qoriq-clockgen-2.0": for chassis 2.0 clocks
diff --git a/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml b/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml
index e04349567eeb..427c5873f96a 100644
--- a/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml
+++ b/Documentation/devicetree/bindings/gpio/sifive,gpio.yaml
@@ -7,7 +7,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: SiFive GPIO controller
maintainers:
- - Yash Shah <yash.shah@sifive.com>
- Paul Walmsley <paul.walmsley@sifive.com>
properties:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
index 0dfa6b26e099..27092c6a86c4 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
@@ -35,6 +35,10 @@ description:
contains a specific memory layout, which is documented in chapter 8 of the
SiFive U5 Coreplex Series Manual <https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf>.
+ The thead,c900-plic is different from sifive,plic-1.0.0 in opensbi, the
+ T-HEAD PLIC implementation requires setting a delegation bit to allow access
+ from S-mode. So add thead,c900-plic to distinguish them.
+
maintainers:
- Sagar Kadam <sagar.kadam@sifive.com>
- Paul Walmsley <paul.walmsley@sifive.com>
@@ -42,12 +46,17 @@ maintainers:
properties:
compatible:
- items:
- - enum:
- - sifive,fu540-c000-plic
- - starfive,jh7100-plic
- - canaan,k210-plic
- - const: sifive,plic-1.0.0
+ oneOf:
+ - items:
+ - enum:
+ - sifive,fu540-c000-plic
+ - starfive,jh7100-plic
+ - canaan,k210-plic
+ - const: sifive,plic-1.0.0
+ - items:
+ - enum:
+ - allwinner,sun20i-d1-plic
+ - const: thead,c900-plic
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml b/Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml
index 272832e9f8f2..fa86691ebf16 100644
--- a/Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml
+++ b/Documentation/devicetree/bindings/mfd/ti,j721e-system-controller.yaml
@@ -20,7 +20,7 @@ description: |
maintainers:
- Kishon Vijay Abraham I <kishon@ti.com>
- - Roger Quadros <rogerq@ti.com
+ - Roger Quadros <rogerq@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/qcom,ipa.yaml b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
index b86edf67ce62..58ecc62adfaa 100644
--- a/Documentation/devicetree/bindings/net/qcom,ipa.yaml
+++ b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
@@ -107,6 +107,10 @@ properties:
- const: imem
- const: config
+ qcom,qmp:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: phandle to the AOSS side-channel message RAM
+
qcom,smem-states:
$ref: /schemas/types.yaml#/definitions/phandle-array
description: State bits used in by the AP to signal the modem.
@@ -222,6 +226,8 @@ examples:
"imem",
"config";
+ qcom,qmp = <&aoss_qmp>;
+
qcom,smem-states = <&ipa_smp2p_out 0>,
<&ipa_smp2p_out 1>;
qcom,smem-state-names = "ipa-clock-enabled-valid",
diff --git a/Documentation/devicetree/bindings/phy/ti,omap-usb2.yaml b/Documentation/devicetree/bindings/phy/ti,omap-usb2.yaml
index cbbf5e8b1197..f78d3246fbdc 100644
--- a/Documentation/devicetree/bindings/phy/ti,omap-usb2.yaml
+++ b/Documentation/devicetree/bindings/phy/ti,omap-usb2.yaml
@@ -8,7 +8,7 @@ title: OMAP USB2 PHY
maintainers:
- Kishon Vijay Abraham I <kishon@ti.com>
- - Roger Quadros <rogerq@ti.com>
+ - Roger Quadros <rogerq@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/pwm/pwm-sifive.yaml b/Documentation/devicetree/bindings/pwm/pwm-sifive.yaml
index 84e66913d042..db41cd7bf150 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-sifive.yaml
+++ b/Documentation/devicetree/bindings/pwm/pwm-sifive.yaml
@@ -8,7 +8,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: SiFive PWM controller
maintainers:
- - Yash Shah <yash.shah@sifive.com>
- Sagar Kadam <sagar.kadam@sifive.com>
- Paul Walmsley <paul.walmsley@sifive.com>
diff --git a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
index 2b1f91603897..e2d330bd4608 100644
--- a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
+++ b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
@@ -9,7 +9,6 @@ title: SiFive L2 Cache Controller
maintainers:
- Sagar Kadam <sagar.kadam@sifive.com>
- - Yash Shah <yash.shah@sifive.com>
- Paul Walmsley <paul.walmsley@sifive.com>
description:
diff --git a/Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml b/Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
index 77adbebed824..c3e9f3485449 100644
--- a/Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
+++ b/Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
@@ -8,6 +8,7 @@ title: Audio codec controlled by ChromeOS EC
maintainers:
- Cheng-Yi Chiang <cychiang@chromium.org>
+ - Tzung-Bi Shih <tzungbi@google.com>
description: |
Google's ChromeOS EC codec is a digital mic codec provided by the
diff --git a/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml b/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml
index 5dd209206e88..3ec2d7b83775 100644
--- a/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml
@@ -23,8 +23,9 @@ properties:
minItems: 1
maxItems: 256
items:
- minimum: 0
- maximum: 256
+ items:
+ - minimum: 0
+ maximum: 256
description:
Chip select used by the device.
diff --git a/Documentation/devicetree/bindings/usb/dwc2.yaml b/Documentation/devicetree/bindings/usb/dwc2.yaml
index f00867ebc147..481aaa09f3f2 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.yaml
+++ b/Documentation/devicetree/bindings/usb/dwc2.yaml
@@ -53,6 +53,7 @@ properties:
- const: st,stm32mp15-hsotg
- const: snps,dwc2
- const: samsung,s3c6400-hsotg
+ - const: intel,socfpga-agilex-hsotg
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
index a634774c537c..eedde385d299 100644
--- a/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
+++ b/Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
@@ -7,7 +7,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Bindings for the TI wrapper module for the Cadence USBSS-DRD controller
maintainers:
- - Roger Quadros <rogerq@ti.com>
+ - Roger Quadros <rogerq@kernel.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml b/Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml
index f6e91a5fd8fe..4f7a212fddd3 100644
--- a/Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/ti,keystone-dwc3.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: TI Keystone Soc USB Controller
maintainers:
- - Roger Quadros <rogerq@ti.com>
+ - Roger Quadros <rogerq@kernel.org>
properties:
compatible:
diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst
index 136f8da3d0e2..4f373a8ec47b 100644
--- a/Documentation/filesystems/netfs_library.rst
+++ b/Documentation/filesystems/netfs_library.rst
@@ -462,6 +462,10 @@ operation table looks like the following::
struct iov_iter *iter,
netfs_io_terminated_t term_func,
void *term_func_priv);
+
+ int (*query_occupancy)(struct netfs_cache_resources *cres,
+ loff_t start, size_t len, size_t granularity,
+ loff_t *_data_start, size_t *_data_len);
};
With a termination handler function pointer::
@@ -536,6 +540,18 @@ The methods defined in the table are:
indicating whether the termination is definitely happening in the caller's
context.
+ * ``query_occupancy()``
+
+ [Required] Called to find out where the next piece of data is within a
+ particular region of the cache. The start and length of the region to be
+ queried are passed in, along with the granularity to which the answer needs
+ to be aligned. The function passes back the start and length of the data,
+ if any, available within that region. Note that there may be a hole at the
+ front.
+
+ It returns 0 if some data was found, -ENODATA if there was no usable data
+ within the region or -ENOBUFS if there is no caching on this file.
+
Note that these methods are passed a pointer to the cache resource structure,
not the read request structure as they could be used in other situations where
there isn't a read request structure as well, such as writing dirty data to the
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index da138dd39883..a1212b5b3026 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -300,30 +300,6 @@ Contact: Daniel Vetter, Noralf Tronnes
Level: Advanced
-Garbage collect fbdev scrolling acceleration
---------------------------------------------
-
-Scroll acceleration has been disabled in fbcon. Now it works as the old
-SCROLL_REDRAW mode. A ton of code was removed in fbcon.c and the hook bmove was
-removed from fbcon_ops.
-Remaining tasks:
-
-- a bunch of the hooks in fbcon_ops could be removed or simplified by calling
- directly instead of the function table (with a switch on p->rotate)
-
-- fb_copyarea is unused after this, and can be deleted from all drivers
-
-- after that, fb_copyarea can be deleted from fb_ops in include/linux/fb.h as
- well as cfb_copyarea
-
-Note that not all acceleration code can be deleted, since clearing and cursor
-support is still accelerated, which might be good candidates for further
-deletion projects.
-
-Contact: Daniel Vetter
-
-Level: Intermediate
-
idr_init_base()
---------------
diff --git a/Documentation/tools/rtla/common_hist_options.rst b/Documentation/tools/rtla/common_hist_options.rst
index 0266cd08a6c9..df53ff835bfb 100644
--- a/Documentation/tools/rtla/common_hist_options.rst
+++ b/Documentation/tools/rtla/common_hist_options.rst
@@ -2,7 +2,7 @@
Set the histogram bucket size (default *1*).
-**-e**, **--entries** *N*
+**-E**, **--entries** *N*
Set the number of entries of the histogram (default 256).
diff --git a/Documentation/tools/rtla/common_osnoise_description.rst b/Documentation/tools/rtla/common_osnoise_description.rst
index 8973c5df888f..d5d61615b967 100644
--- a/Documentation/tools/rtla/common_osnoise_description.rst
+++ b/Documentation/tools/rtla/common_osnoise_description.rst
@@ -1,7 +1,7 @@
The **rtla osnoise** tool is an interface for the *osnoise* tracer. The
*osnoise* tracer dispatches a kernel thread per-cpu. These threads read the
time in a loop while with preemption, softirq and IRQs enabled, thus
-allowing all the sources of operating systme noise during its execution.
+allowing all the sources of operating system noise during its execution.
The *osnoise*'s tracer threads take note of the delta between each time
read, along with an interference counter of all sources of interference.
At the end of each period, the *osnoise* tracer displays a summary of
diff --git a/Documentation/tools/rtla/rtla-osnoise-hist.rst b/Documentation/tools/rtla/rtla-osnoise-hist.rst
index 52298ddd8701..f2e79d22c4c4 100644
--- a/Documentation/tools/rtla/rtla-osnoise-hist.rst
+++ b/Documentation/tools/rtla/rtla-osnoise-hist.rst
@@ -36,7 +36,7 @@ default). The reason for reducing the runtime is to avoid starving the
**rtla** tool. The tool is also set to run for *one minute*. The output
histogram is set to group outputs in buckets of *10us* and *25* entries::
- [root@f34 ~/]# rtla osnoise hist -P F:1 -c 0-11 -r 900000 -d 1M -b 10 -e 25
+ [root@f34 ~/]# rtla osnoise hist -P F:1 -c 0-11 -r 900000 -d 1M -b 10 -E 25
# RTLA osnoise histogram
# Time unit is microseconds (us)
# Duration: 0 00:01:00
diff --git a/Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst b/Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst
index 87a36044f828..2ca92042767b 100644
--- a/Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst
+++ b/Documentation/translations/zh_CN/cpu-freq/cpu-drivers.rst
@@ -84,6 +84,8 @@ CPUfreq核心层注册一个cpufreq_driver结构体。
.resume - 一个指向per-policy恢复函数的指针,该函数在关中断且在调节器再一次启动前被
调用。
+ .ready - 一个指向per-policy准备函数的指针,该函数在策略完全初始化之后被调用。
+
.attr - 一个指向NULL结尾的"struct freq_attr"列表的指针,该列表允许导出值到
sysfs。
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index 687efcf245c1..e6fce2cbd99e 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -115,6 +115,7 @@ Code Seq# Include File Comments
'B' 00-1F linux/cciss_ioctl.h conflict!
'B' 00-0F include/linux/pmu.h conflict!
'B' C0-FF advanced bbus <mailto:maassen@uni-freiburg.de>
+'B' 00-0F xen/xenbus_dev.h conflict!
'C' all linux/soundcard.h conflict!
'C' 01-2F linux/capi.h conflict!
'C' F0-FF drivers/net/wan/cosa.h conflict!
@@ -134,6 +135,7 @@ Code Seq# Include File Comments
'F' 80-8F linux/arcfb.h conflict!
'F' DD video/sstfb.h conflict!
'G' 00-3F drivers/misc/sgi-gru/grulib.h conflict!
+'G' 00-0F xen/gntalloc.h, xen/gntdev.h conflict!
'H' 00-7F linux/hiddev.h conflict!
'H' 00-0F linux/hidraw.h conflict!
'H' 01 linux/mei.h conflict!
@@ -176,6 +178,7 @@ Code Seq# Include File Comments
'P' 60-6F sound/sscape_ioctl.h conflict!
'P' 00-0F drivers/usb/class/usblp.c conflict!
'P' 01-09 drivers/misc/pci_endpoint_test.c conflict!
+'P' 00-0F xen/privcmd.h conflict!
'Q' all linux/soundcard.h
'R' 00-1F linux/random.h conflict!
'R' 01 linux/rfkill.h conflict!
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index a4267104db50..9f3172376ec3 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -1394,7 +1394,7 @@ documentation when it pops into existence).
-------------------
:Capability: KVM_CAP_ENABLE_CAP
-:Architectures: mips, ppc, s390
+:Architectures: mips, ppc, s390, x86
:Type: vcpu ioctl
:Parameters: struct kvm_enable_cap (in)
:Returns: 0 on success; -1 on error
@@ -6997,6 +6997,20 @@ indicated by the fd to the VM this is called on.
This is intended to support intra-host migration of VMs between userspace VMMs,
upgrading the VMM process without interrupting the guest.
+7.30 KVM_CAP_PPC_AIL_MODE_3
+-------------------------------
+
+:Capability: KVM_CAP_PPC_AIL_MODE_3
+:Architectures: ppc
+:Type: vm
+
+This capability indicates that the kernel supports the mode 3 setting for the
+"Address Translation Mode on Interrupt" aka "Alternate Interrupt Location"
+resource that is controlled with the H_SET_MODE hypercall.
+
+This capability allows a guest kernel to use a better-performance mode for
+handling interrupts and system calls.
+
8. Other capabilities.
======================
diff --git a/MAINTAINERS b/MAINTAINERS
index f41088418aae..05fd080b82f3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1620,6 +1620,7 @@ M: Olof Johansson <olof@lixom.net>
M: soc@kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+C: irc://irc.libera.chat/armlinux
T: git git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc.git
F: arch/arm/boot/dts/Makefile
F: arch/arm64/boot/dts/Makefile
@@ -1627,6 +1628,7 @@ F: arch/arm64/boot/dts/Makefile
ARM SUB-ARCHITECTURES
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+C: irc://irc.libera.chat/armlinux
T: git git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc.git
F: arch/arm/mach-*/
F: arch/arm/plat-*/
@@ -1780,6 +1782,7 @@ F: drivers/irqchip/irq-apple-aic.c
F: drivers/mailbox/apple-mailbox.c
F: drivers/pinctrl/pinctrl-apple-gpio.c
F: drivers/soc/apple/*
+F: drivers/watchdog/apple_wdt.c
F: include/dt-bindings/interrupt-controller/apple-aic.h
F: include/dt-bindings/pinctrl/apple.h
F: include/linux/apple-mailbox.h
@@ -2251,7 +2254,7 @@ F: drivers/phy/mediatek/
ARM/Microchip (AT91) SoC support
M: Nicolas Ferre <nicolas.ferre@microchip.com>
M: Alexandre Belloni <alexandre.belloni@bootlin.com>
-M: Ludovic Desroches <ludovic.desroches@microchip.com>
+M: Claudiu Beznea <claudiu.beznea@microchip.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
W: http://www.linux4sam.org
@@ -2570,10 +2573,13 @@ N: rockchip
ARM/SAMSUNG S3C, S5P AND EXYNOS ARM ARCHITECTURES
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+R: Alim Akhtar <alim.akhtar@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org
S: Maintained
+C: irc://irc.libera.chat/linux-exynos
Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux.git
F: Documentation/arm/samsung/
F: Documentation/devicetree/bindings/arm/samsung/
F: Documentation/devicetree/bindings/power/pd-samsung.yaml
@@ -3133,11 +3139,9 @@ W: https://wireless.wiki.kernel.org/en/users/Drivers/ath5k
F: drivers/net/wireless/ath/ath5k/
ATHEROS ATH6KL WIRELESS DRIVER
-M: Kalle Valo <kvalo@kernel.org>
L: linux-wireless@vger.kernel.org
-S: Supported
+S: Orphan
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath6kl
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: drivers/net/wireless/ath/ath6kl/
ATI_REMOTE2 DRIVER
@@ -4157,9 +4161,8 @@ N: csky
K: csky
CA8210 IEEE-802.15.4 RADIO DRIVER
-M: Harry Morris <h.morris@cascoda.com>
L: linux-wpan@vger.kernel.org
-S: Maintained
+S: Orphan
W: https://github.com/Cascoda/ca8210-linux.git
F: Documentation/devicetree/bindings/net/ieee802154/ca8210.txt
F: drivers/net/ieee802154/ca8210.c
@@ -4544,6 +4547,7 @@ F: drivers/platform/chrome/
CHROMEOS EC CODEC DRIVER
M: Cheng-Yi Chiang <cychiang@chromium.org>
+M: Tzung-Bi Shih <tzungbi@google.com>
R: Guenter Roeck <groeck@chromium.org>
S: Maintained
F: Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
@@ -4909,7 +4913,8 @@ F: kernel/cgroup/cpuset.c
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
M: Johannes Weiner <hannes@cmpxchg.org>
M: Michal Hocko <mhocko@kernel.org>
-M: Vladimir Davydov <vdavydov.dev@gmail.com>
+M: Roman Gushchin <roman.gushchin@linux.dev>
+M: Shakeel Butt <shakeelb@google.com>
L: cgroups@vger.kernel.org
L: linux-mm@kvack.org
S: Maintained
@@ -5773,7 +5778,7 @@ F: tools/testing/selftests/dma/
DMA-BUF HEAPS FRAMEWORK
M: Sumit Semwal <sumit.semwal@linaro.org>
-R: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+R: Benjamin Gaignard <benjamin.gaignard@collabora.com>
R: Liam Mark <lmark@codeaurora.org>
R: Laura Abbott <labbott@redhat.com>
R: Brian Starkey <Brian.Starkey@arm.com>
@@ -6503,7 +6508,7 @@ F: Documentation/devicetree/bindings/display/rockchip/
F: drivers/gpu/drm/rockchip/
DRM DRIVERS FOR STI
-M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+M: Alain Volmat <alain.volmat@foss.st.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
@@ -6512,8 +6517,8 @@ F: drivers/gpu/drm/sti
DRM DRIVERS FOR STM
M: Yannick Fertre <yannick.fertre@foss.st.com>
+M: Raphael Gallais-Pou <raphael.gallais-pou@foss.st.com>
M: Philippe Cornu <philippe.cornu@foss.st.com>
-M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
@@ -7007,12 +7012,6 @@ L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/sb_edac.c
-EDAC-SIFIVE
-M: Yash Shah <yash.shah@sifive.com>
-L: linux-edac@vger.kernel.org
-S: Supported
-F: drivers/edac/sifive_edac.c
-
EDAC-SKYLAKE
M: Tony Luck <tony.luck@intel.com>
L: linux-edac@vger.kernel.org
@@ -7183,7 +7182,7 @@ F: drivers/net/can/usb/etas_es58x/
ETHERNET BRIDGE
M: Roopa Prabhu <roopa@nvidia.com>
-M: Nikolay Aleksandrov <nikolay@nvidia.com>
+M: Nikolay Aleksandrov <razor@blackwall.org>
L: bridge@lists.linux-foundation.org (moderated for non-subscribers)
L: netdev@vger.kernel.org
S: Maintained
@@ -7576,6 +7575,12 @@ S: Maintained
W: http://floatingpoint.sourceforge.net/emulator/index.html
F: arch/x86/math-emu/
+FRAMEBUFFER CORE
+M: Daniel Vetter <daniel@ffwll.ch>
+F: drivers/video/fbdev/core/
+S: Odd Fixes
+T: git git://anongit.freedesktop.org/drm/drm-misc
+
FRAMEBUFFER LAYER
M: Helge Deller <deller@gmx.de>
L: linux-fbdev@vger.kernel.org
@@ -7739,8 +7744,7 @@ M: Qiang Zhao <qiang.zhao@nxp.com>
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: drivers/soc/fsl/qe/
-F: include/soc/fsl/*qe*.h
-F: include/soc/fsl/*ucc*.h
+F: include/soc/fsl/qe/
FREESCALE QUICC ENGINE UCC ETHERNET DRIVER
M: Li Yang <leoyang.li@nxp.com>
@@ -7771,6 +7775,7 @@ F: Documentation/devicetree/bindings/misc/fsl,dpaa2-console.yaml
F: Documentation/devicetree/bindings/soc/fsl/
F: drivers/soc/fsl/
F: include/linux/fsl/
+F: include/soc/fsl/
FREESCALE SOC FS_ENET DRIVER
M: Pantelis Antoniou <pantelis.antoniou@gmail.com>
@@ -9253,6 +9258,15 @@ S: Maintained
W: https://github.com/o2genum/ideapad-slidebar
F: drivers/input/misc/ideapad_slidebar.c
+IDMAPPED MOUNTS
+M: Christian Brauner <brauner@kernel.org>
+L: linux-fsdevel@vger.kernel.org
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux.git
+F: Documentation/filesystems/idmappings.rst
+F: tools/testing/selftests/mount_setattr/
+F: include/linux/mnt_idmapping.h
+
IDT VersaClock 5 CLOCK DRIVER
M: Luca Ceresoli <luca@lucaceresoli.net>
S: Maintained
@@ -10880,6 +10894,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
F: drivers/ata/pata_arasan_cf.c
F: include/linux/pata_arasan_cf_data.h
+LIBATA PATA DRIVERS
+R: Sergey Shtylyov <s.shtylyov@omp.ru>
+L: linux-ide@vger.kernel.org
+F: drivers/ata/ata_*.c
+F: drivers/ata/pata_*.c
+
LIBATA PATA FARADAY FTIDE010 AND GEMINI SATA BRIDGE DRIVERS
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-ide@vger.kernel.org
@@ -12400,7 +12420,7 @@ F: include/uapi/linux/membarrier.h
F: kernel/sched/membarrier.c
MEMBLOCK
-M: Mike Rapoport <rppt@linux.ibm.com>
+M: Mike Rapoport <rppt@kernel.org>
L: linux-mm@kvack.org
S: Maintained
F: Documentation/core-api/boot-time-mm.rst
@@ -13298,8 +13318,8 @@ W: http://www.iptables.org/
W: http://www.nftables.org/
Q: http://patchwork.ozlabs.org/project/netfilter-devel/list/
C: irc://irc.libera.chat/netfilter
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next.git
F: include/linux/netfilter*
F: include/linux/netfilter/
F: include/net/netfilter/
@@ -13566,7 +13586,7 @@ F: tools/testing/selftests/nci/
NFS, SUNRPC, AND LOCKD CLIENTS
M: Trond Myklebust <trond.myklebust@hammerspace.com>
-M: Anna Schumaker <anna.schumaker@netapp.com>
+M: Anna Schumaker <anna@kernel.org>
L: linux-nfs@vger.kernel.org
S: Maintained
W: http://client.linux-nfs.org
@@ -13679,7 +13699,7 @@ F: scripts/nsdeps
NTB AMD DRIVER
M: Sanjay R Mehta <sanju.mehta@amd.com>
M: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
-L: linux-ntb@googlegroups.com
+L: ntb@lists.linux.dev
S: Supported
F: drivers/ntb/hw/amd/
@@ -13687,7 +13707,7 @@ NTB DRIVER CORE
M: Jon Mason <jdmason@kudzu.us>
M: Dave Jiang <dave.jiang@intel.com>
M: Allen Hubbe <allenbh@gmail.com>
-L: linux-ntb@googlegroups.com
+L: ntb@lists.linux.dev
S: Supported
W: https://github.com/jonmason/ntb/wiki
T: git git://github.com/jonmason/ntb.git
@@ -13699,13 +13719,13 @@ F: tools/testing/selftests/ntb/
NTB IDT DRIVER
M: Serge Semin <fancer.lancer@gmail.com>
-L: linux-ntb@googlegroups.com
+L: ntb@lists.linux.dev
S: Supported
F: drivers/ntb/hw/idt/
NTB INTEL DRIVER
M: Dave Jiang <dave.jiang@intel.com>
-L: linux-ntb@googlegroups.com
+L: ntb@lists.linux.dev
S: Supported
W: https://github.com/davejiang/linux/wiki
T: git https://github.com/davejiang/linux.git
@@ -14384,6 +14404,7 @@ M: Rob Herring <robh+dt@kernel.org>
M: Frank Rowand <frowand.list@gmail.com>
L: devicetree@vger.kernel.org
S: Maintained
+C: irc://irc.libera.chat/devicetree
W: http://www.devicetree.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
F: Documentation/ABI/testing/sysfs-firmware-ofw
@@ -14395,6 +14416,7 @@ OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
M: Rob Herring <robh+dt@kernel.org>
L: devicetree@vger.kernel.org
S: Maintained
+C: irc://irc.libera.chat/devicetree
Q: http://patchwork.ozlabs.org/project/devicetree-bindings/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
F: Documentation/devicetree/
@@ -15128,7 +15150,7 @@ M: Ingo Molnar <mingo@redhat.com>
M: Arnaldo Carvalho de Melo <acme@kernel.org>
R: Mark Rutland <mark.rutland@arm.com>
R: Alexander Shishkin <alexander.shishkin@linux.intel.com>
-R: Jiri Olsa <jolsa@redhat.com>
+R: Jiri Olsa <jolsa@kernel.org>
R: Namhyung Kim <namhyung@kernel.org>
L: linux-perf-users@vger.kernel.org
L: linux-kernel@vger.kernel.org
@@ -15285,9 +15307,11 @@ PIN CONTROLLER - SAMSUNG
M: Tomasz Figa <tomasz.figa@gmail.com>
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
+R: Alim Akhtar <alim.akhtar@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org
S: Maintained
+C: irc://irc.libera.chat/linux-exynos
Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/samsung.git
F: Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
@@ -15544,6 +15568,7 @@ M: Iurii Zaikin <yzaikin@google.com>
L: linux-kernel@vger.kernel.org
L: linux-fsdevel@vger.kernel.org
S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mcgrof/linux.git sysctl-next
F: fs/proc/proc_sysctl.c
F: include/linux/sysctl.h
F: kernel/sysctl-test.c
@@ -15891,6 +15916,7 @@ S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath10k
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: drivers/net/wireless/ath/ath10k/
+F: Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
QUALCOMM ATHEROS ATH11K WIRELESS DRIVER
M: Kalle Valo <kvalo@kernel.org>
@@ -15898,11 +15924,12 @@ L: ath11k@lists.infradead.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: drivers/net/wireless/ath/ath11k/
+F: Documentation/devicetree/bindings/net/wireless/qcom,ath11k.txt
QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
-M: ath9k-devel@qca.qualcomm.com
+M: Toke Høiland-Jørgensen <toke@toke.dk>
L: linux-wireless@vger.kernel.org
-S: Supported
+S: Maintained
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath9k
F: Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml
F: drivers/net/wireless/ath/ath9k/
@@ -15977,14 +16004,6 @@ F: Documentation/devicetree/bindings/misc/qcom,fastrpc.txt
F: drivers/misc/fastrpc.c
F: include/uapi/misc/fastrpc.h
-QUALCOMM GENERIC INTERFACE I2C DRIVER
-M: Akash Asthana <akashast@codeaurora.org>
-M: Mukesh Savaliya <msavaliy@codeaurora.org>
-L: linux-i2c@vger.kernel.org
-L: linux-arm-msm@vger.kernel.org
-S: Supported
-F: drivers/i2c/busses/i2c-qcom-geni.c
-
QUALCOMM HEXAGON ARCHITECTURE
M: Brian Cain <bcain@codeaurora.org>
L: linux-hexagon@vger.kernel.org
@@ -16056,8 +16075,8 @@ F: Documentation/devicetree/bindings/mtd/qcom,nandc.yaml
F: drivers/mtd/nand/raw/qcom_nandc.c
QUALCOMM RMNET DRIVER
-M: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
-M: Sean Tranchetti <stranche@codeaurora.org>
+M: Subash Abhinov Kasiviswanathan <quic_subashab@quicinc.com>
+M: Sean Tranchetti <quic_stranche@quicinc.com>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst
@@ -16083,11 +16102,10 @@ F: Documentation/devicetree/bindings/media/*venus*
F: drivers/media/platform/qcom/venus/
QUALCOMM WCN36XX WIRELESS DRIVER
-M: Kalle Valo <kvalo@kernel.org>
+M: Loic Poulain <loic.poulain@linaro.org>
L: wcn36xx@lists.infradead.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx
-T: git git://github.com/KrasnikovEugene/wcn36xx.git
F: drivers/net/wireless/ath/wcn36xx/
QUANTENNA QTNFMAC WIRELESS DRIVER
@@ -16350,6 +16368,7 @@ F: drivers/watchdog/realtek_otto_wdt.c
REALTEK RTL83xx SMI DSA ROUTER CHIPS
M: Linus Walleij <linus.walleij@linaro.org>
+M: Alvin Šipraga <alsi@bang-olufsen.dk>
S: Maintained
F: Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
F: drivers/net/dsa/realtek-smi*
@@ -16469,6 +16488,14 @@ F: Documentation/devicetree/bindings/i2c/renesas,rmobile-iic.yaml
F: drivers/i2c/busses/i2c-rcar.c
F: drivers/i2c/busses/i2c-sh_mobile.c
+RENESAS R-CAR SATA DRIVER
+R: Sergey Shtylyov <s.shtylyov@omp.ru>
+S: Supported
+L: linux-ide@vger.kernel.org
+L: linux-renesas-soc@vger.kernel.org
+F: Documentation/devicetree/bindings/ata/renesas,rcar-sata.yaml
+F: drivers/ata/sata_rcar.c
+
RENESAS R-CAR THERMAL DRIVERS
M: Niklas Söderlund <niklas.soderlund@ragnatech.se>
L: linux-renesas-soc@vger.kernel.org
@@ -16804,8 +16831,8 @@ F: drivers/video/fbdev/savage/
S390
M: Heiko Carstens <hca@linux.ibm.com>
M: Vasily Gorbik <gor@linux.ibm.com>
-M: Christian Borntraeger <borntraeger@linux.ibm.com>
-R: Alexander Gordeev <agordeev@linux.ibm.com>
+M: Alexander Gordeev <agordeev@linux.ibm.com>
+R: Christian Borntraeger <borntraeger@linux.ibm.com>
R: Sven Schnelle <svens@linux.ibm.com>
L: linux-s390@vger.kernel.org
S: Supported
@@ -17076,6 +17103,7 @@ SAMSUNG SOC CLOCK DRIVERS
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
M: Tomasz Figa <tomasz.figa@gmail.com>
M: Chanwoo Choi <cw00.choi@samsung.com>
+R: Alim Akhtar <alim.akhtar@samsung.com>
L: linux-samsung-soc@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/snawrocki/clk.git
@@ -17712,6 +17740,21 @@ S: Maintained
W: http://www.winischhofer.at/linuxsisusbvga.shtml
F: drivers/usb/misc/sisusbvga/
+SL28 CPLD MFD DRIVER
+M: Michael Walle <michael@walle.cc>
+S: Maintained
+F: Documentation/devicetree/bindings/gpio/kontron,sl28cpld-gpio.yaml
+F: Documentation/devicetree/bindings/hwmon/kontron,sl28cpld-hwmon.yaml
+F: Documentation/devicetree/bindings/interrupt-controller/kontron,sl28cpld-intc.yaml
+F: Documentation/devicetree/bindings/mfd/kontron,sl28cpld.yaml
+F: Documentation/devicetree/bindings/pwm/kontron,sl28cpld-pwm.yaml
+F: Documentation/devicetree/bindings/watchdog/kontron,sl28cpld-wdt.yaml
+F: drivers/gpio/gpio-sl28cpld.c
+F: drivers/hwmon/sl28cpld-hwmon.c
+F: drivers/irqchip/irq-sl28cpld.c
+F: drivers/pwm/pwm-sl28cpld.c
+F: drivers/watchdog/sl28cpld_wdt.c
+
SLAB ALLOCATOR
M: Christoph Lameter <cl@linux.com>
M: Pekka Enberg <penberg@kernel.org>
@@ -17719,8 +17762,10 @@ M: David Rientjes <rientjes@google.com>
M: Joonsoo Kim <iamjoonsoo.kim@lge.com>
M: Andrew Morton <akpm@linux-foundation.org>
M: Vlastimil Babka <vbabka@suse.cz>
+R: Roman Gushchin <roman.gushchin@linux.dev>
L: linux-mm@kvack.org
S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab.git
F: include/linux/sl?b*.h
F: mm/sl?b*
@@ -18428,7 +18473,7 @@ F: Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
F: sound/soc/sti/
STI CEC DRIVER
-M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+M: Alain Volmat <alain.volmat@foss.st.com>
S: Maintained
F: Documentation/devicetree/bindings/media/stih-cec.txt
F: drivers/media/cec/platform/sti/
@@ -19582,6 +19627,14 @@ F: Documentation/trace/timerlat-tracer.rst
F: Documentation/trace/hwlat_detector.rst
F: arch/*/kernel/trace.c
+Real-time Linux Analysis (RTLA) tools
+M: Daniel Bristot de Oliveira <bristot@kernel.org>
+M: Steven Rostedt <rostedt@goodmis.org>
+L: linux-trace-devel@vger.kernel.org
+S: Maintained
+F: Documentation/tools/rtla/
+F: tools/tracing/rtla/
+
TRADITIONAL CHINESE DOCUMENTATION
M: Hu Haowen <src.res@email.cn>
L: linux-doc-tw-discuss@lists.sourceforge.net
@@ -21415,7 +21468,6 @@ THE REST
M: Linus Torvalds <torvalds@linux-foundation.org>
L: linux-kernel@vger.kernel.org
S: Buried alive in reporters
-Q: http://patchwork.kernel.org/project/LKML/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
F: *
F: */
diff --git a/Makefile b/Makefile
index 1fc3491096cb..daeb5c88b50b 100644
--- a/Makefile
+++ b/Makefile
@@ -2,8 +2,8 @@
VERSION = 5
PATCHLEVEL = 17
SUBLEVEL = 0
-EXTRAVERSION = -rc2
-NAME = Gobble Gobble
+EXTRAVERSION = -rc6
+NAME = Superb Owl
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 235ad559acb2..e41eca79c950 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -806,6 +806,7 @@ dtb-$(CONFIG_ARCH_OMAP3) += \
logicpd-som-lv-37xx-devkit.dtb \
omap3430-sdp.dtb \
omap3-beagle.dtb \
+ omap3-beagle-ab4.dtb \
omap3-beagle-xm.dtb \
omap3-beagle-xm-ab.dtb \
omap3-cm-t3517.dtb \
diff --git a/arch/arm/boot/dts/am335x-wega.dtsi b/arch/arm/boot/dts/am335x-wega.dtsi
index 673159d93a6a..f957fea8208e 100644
--- a/arch/arm/boot/dts/am335x-wega.dtsi
+++ b/arch/arm/boot/dts/am335x-wega.dtsi
@@ -55,7 +55,7 @@
2 1 0 0 /* # 0: INACTIVE, 1: TX, 2: RX */
>;
tx-num-evt = <16>;
- rt-num-evt = <16>;
+ rx-num-evt = <16>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 6b485cbed8d5..42bff117656c 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -160,7 +160,7 @@
target-module@48210000 {
compatible = "ti,sysc-omap4-simple", "ti,sysc";
power-domains = <&prm_mpu>;
- clocks = <&mpu_clkctrl DRA7_MPU_CLKCTRL 0>;
+ clocks = <&mpu_clkctrl DRA7_MPU_MPU_CLKCTRL 0>;
clock-names = "fck";
#address-cells = <1>;
#size-cells = <1>;
@@ -875,10 +875,10 @@
<0x58000014 4>;
reg-names = "rev", "syss";
ti,syss-mask = <1>;
- clocks = <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 0>,
- <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 9>,
- <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 10>,
- <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 11>;
+ clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 0>,
+ <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 9>,
+ <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 10>,
+ <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 11>;
clock-names = "fck", "hdmi_clk", "sys_clk", "tv_clk";
#address-cells = <1>;
#size-cells = <1>;
@@ -912,7 +912,7 @@
SYSC_OMAP2_SOFTRESET |
SYSC_OMAP2_AUTOIDLE)>;
ti,syss-mask = <1>;
- clocks = <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 8>;
+ clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 8>;
clock-names = "fck";
#address-cells = <1>;
#size-cells = <1>;
@@ -939,8 +939,8 @@
<SYSC_IDLE_SMART>,
<SYSC_IDLE_SMART_WKUP>;
ti,sysc-mask = <(SYSC_OMAP4_SOFTRESET)>;
- clocks = <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 9>,
- <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 8>;
+ clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 9>,
+ <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 8>;
clock-names = "fck", "dss_clk";
#address-cells = <1>;
#size-cells = <1>;
@@ -979,7 +979,7 @@
compatible = "vivante,gc";
reg = <0x0 0x700>;
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&dss_clkctrl DRA7_BB2D_CLKCTRL 0>;
+ clocks = <&dss_clkctrl DRA7_DSS_BB2D_CLKCTRL 0>;
clock-names = "core";
};
};
@@ -1333,7 +1333,7 @@
ti,no-reset-on-init;
ti,no-idle;
timer@0 {
- assigned-clocks = <&wkupaon_clkctrl DRA7_TIMER1_CLKCTRL 24>;
+ assigned-clocks = <&wkupaon_clkctrl DRA7_WKUPAON_TIMER1_CLKCTRL 24>;
assigned-clock-parents = <&sys_32k_ck>;
};
};
diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts
index 8cbaf1c81174..3b609d987d88 100644
--- a/arch/arm/boot/dts/imx23-evk.dts
+++ b/arch/arm/boot/dts/imx23-evk.dts
@@ -79,7 +79,6 @@
MX23_PAD_LCD_RESET__GPIO_1_18
MX23_PAD_PWM3__GPIO_1_29
MX23_PAD_PWM4__GPIO_1_30
- MX23_PAD_SSP1_DETECT__SSP1_DETECT
>;
fsl,drive-strength = <MXS_DRIVE_4mA>;
fsl,voltage = <MXS_VOLTAGE_HIGH>;
diff --git a/arch/arm/boot/dts/imx6qdl-udoo.dtsi b/arch/arm/boot/dts/imx6qdl-udoo.dtsi
index d07d8f83456d..ccfa8e320be6 100644
--- a/arch/arm/boot/dts/imx6qdl-udoo.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-udoo.dtsi
@@ -5,6 +5,8 @@
* Author: Fabio Estevam <fabio.estevam@freescale.com>
*/
+#include <dt-bindings/gpio/gpio.h>
+
/ {
aliases {
backlight = &backlight;
@@ -226,6 +228,7 @@
MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
+ MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x1b0b0
>;
};
@@ -304,7 +307,7 @@
&usdhc3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc3>;
- non-removable;
+ cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi
index b7ea37ad4e55..bcec98b96411 100644
--- a/arch/arm/boot/dts/imx7ulp.dtsi
+++ b/arch/arm/boot/dts/imx7ulp.dtsi
@@ -259,7 +259,7 @@
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&pcc2 IMX7ULP_CLK_WDG1>;
assigned-clocks = <&pcc2 IMX7ULP_CLK_WDG1>;
- assigned-clocks-parents = <&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>;
+ assigned-clock-parents = <&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>;
timeout-sec = <40>;
};
diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
index 3be7cba603d5..26eaba3fa96f 100644
--- a/arch/arm/boot/dts/meson.dtsi
+++ b/arch/arm/boot/dts/meson.dtsi
@@ -59,7 +59,7 @@
};
uart_A: serial@84c0 {
- compatible = "amlogic,meson6-uart", "amlogic,meson-uart";
+ compatible = "amlogic,meson6-uart";
reg = <0x84c0 0x18>;
interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
fifo-size = <128>;
@@ -67,7 +67,7 @@
};
uart_B: serial@84dc {
- compatible = "amlogic,meson6-uart", "amlogic,meson-uart";
+ compatible = "amlogic,meson6-uart";
reg = <0x84dc 0x18>;
interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
@@ -105,7 +105,7 @@
};
uart_C: serial@8700 {
- compatible = "amlogic,meson6-uart", "amlogic,meson-uart";
+ compatible = "amlogic,meson6-uart";
reg = <0x8700 0x18>;
interrupts = <GIC_SPI 93 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
@@ -228,7 +228,7 @@
};
uart_AO: serial@4c0 {
- compatible = "amlogic,meson6-uart", "amlogic,meson-ao-uart", "amlogic,meson-uart";
+ compatible = "amlogic,meson6-uart", "amlogic,meson-ao-uart";
reg = <0x4c0 0x18>;
interrupts = <GIC_SPI 90 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
index f80ddc98d3a2..9997a5d0333a 100644
--- a/arch/arm/boot/dts/meson8.dtsi
+++ b/arch/arm/boot/dts/meson8.dtsi
@@ -736,27 +736,27 @@
};
&uart_AO {
- compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_CLK81>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8-uart", "amlogic,meson-ao-uart";
+ clocks = <&xtal>, <&clkc CLKID_CLK81>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&uart_A {
- compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART0>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8-uart";
+ clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&uart_B {
- compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART1>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8-uart";
+ clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&uart_C {
- compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART2>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8-uart";
+ clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&usb0 {
diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
index b49b7cbaed4e..94f1c03decce 100644
--- a/arch/arm/boot/dts/meson8b.dtsi
+++ b/arch/arm/boot/dts/meson8b.dtsi
@@ -724,27 +724,27 @@
};
&uart_AO {
- compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_CLK81>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8b-uart", "amlogic,meson-ao-uart";
+ clocks = <&xtal>, <&clkc CLKID_CLK81>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&uart_A {
- compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART0>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8b-uart";
+ clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&uart_B {
- compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART1>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8b-uart";
+ clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&uart_C {
- compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART2>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8b-uart";
+ clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&usb0 {
diff --git a/arch/arm/boot/dts/omap3-beagle-ab4.dts b/arch/arm/boot/dts/omap3-beagle-ab4.dts
new file mode 100644
index 000000000000..990ff2d84686
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-beagle-ab4.dts
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/dts-v1/;
+
+#include "omap3-beagle.dts"
+
+/ {
+ model = "TI OMAP3 BeagleBoard A to B4";
+ compatible = "ti,omap3-beagle-ab4", "ti,omap3-beagle", "ti,omap3430", "ti,omap3";
+};
+
+/*
+ * Workaround for capacitor C70 issue, see "Boards revision A and < B5"
+ * section at https://elinux.org/BeagleBoard_Community
+ */
+
+/* Unusable as clocksource because of unreliable oscillator */
+&counter32k {
+ status = "disabled";
+};
+
+/* Unusable as clockevent because of unreliable oscillator, allow to idle */
+&timer1_target {
+ /delete-property/ti,no-reset-on-init;
+ /delete-property/ti,no-idle;
+ timer@0 {
+ /delete-property/ti,timer-alwon;
+ };
+};
+
+/* Preferred always-on timer for clocksource */
+&timer12_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ /* Always clocked by secure_32k_fck */
+ };
+};
+
+/* Preferred timer for clockevent */
+&timer2_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&gpt2_fck>;
+ assigned-clock-parents = <&sys_ck>;
+ };
+};
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index f9f34b8458e9..0548b391334f 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -304,39 +304,6 @@
phys = <0 &hsusb2_phy>;
};
-/* Unusable as clocksource because of unreliable oscillator */
-&counter32k {
- status = "disabled";
-};
-
-/* Unusable as clockevent because if unreliable oscillator, allow to idle */
-&timer1_target {
- /delete-property/ti,no-reset-on-init;
- /delete-property/ti,no-idle;
- timer@0 {
- /delete-property/ti,timer-alwon;
- };
-};
-
-/* Preferred always-on timer for clocksource */
-&timer12_target {
- ti,no-reset-on-init;
- ti,no-idle;
- timer@0 {
- /* Always clocked by secure_32k_fck */
- };
-};
-
-/* Preferred timer for clockevent */
-&timer2_target {
- ti,no-reset-on-init;
- ti,no-idle;
- timer@0 {
- assigned-clocks = <&gpt2_fck>;
- assigned-clock-parents = <&sys_ck>;
- };
-};
-
&twl_gpio {
ti,use-leds;
/* pullups: BIT(1) */
diff --git a/arch/arm/boot/dts/omap3-devkit8000-common.dtsi b/arch/arm/boot/dts/omap3-devkit8000-common.dtsi
index 5e55198e4576..54cd37336be7 100644
--- a/arch/arm/boot/dts/omap3-devkit8000-common.dtsi
+++ b/arch/arm/boot/dts/omap3-devkit8000-common.dtsi
@@ -158,6 +158,24 @@
status = "disabled";
};
+/* Unusable as clockevent because if unreliable oscillator, allow to idle */
+&timer1_target {
+ /delete-property/ti,no-reset-on-init;
+ /delete-property/ti,no-idle;
+ timer@0 {
+ /delete-property/ti,timer-alwon;
+ };
+};
+
+/* Preferred timer for clockevent */
+&timer12_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ /* Always clocked by secure_32k_fck */
+ };
+};
+
&twl_gpio {
ti,use-leds;
/*
diff --git a/arch/arm/boot/dts/omap3-devkit8000.dts b/arch/arm/boot/dts/omap3-devkit8000.dts
index c2995a280729..162d0726b008 100644
--- a/arch/arm/boot/dts/omap3-devkit8000.dts
+++ b/arch/arm/boot/dts/omap3-devkit8000.dts
@@ -14,36 +14,3 @@
display2 = &tv0;
};
};
-
-/* Unusable as clocksource because of unreliable oscillator */
-&counter32k {
- status = "disabled";
-};
-
-/* Unusable as clockevent because if unreliable oscillator, allow to idle */
-&timer1_target {
- /delete-property/ti,no-reset-on-init;
- /delete-property/ti,no-idle;
- timer@0 {
- /delete-property/ti,timer-alwon;
- };
-};
-
-/* Preferred always-on timer for clocksource */
-&timer12_target {
- ti,no-reset-on-init;
- ti,no-idle;
- timer@0 {
- /* Always clocked by secure_32k_fck */
- };
-};
-
-/* Preferred timer for clockevent */
-&timer2_target {
- ti,no-reset-on-init;
- ti,no-idle;
- timer@0 {
- assigned-clocks = <&gpt2_fck>;
- assigned-clock-parents = <&sys_ck>;
- };
-};
diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi
index 8eed9e3a92e9..5868eb512f69 100644
--- a/arch/arm/boot/dts/rk322x.dtsi
+++ b/arch/arm/boot/dts/rk322x.dtsi
@@ -718,8 +718,8 @@
interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
assigned-clocks = <&cru SCLK_HDMI_PHY>;
assigned-clock-parents = <&hdmi_phy>;
- clocks = <&cru SCLK_HDMI_HDCP>, <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_CEC>;
- clock-names = "isfr", "iahb", "cec";
+ clocks = <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_HDCP>, <&cru SCLK_HDMI_CEC>;
+ clock-names = "iahb", "isfr", "cec";
pinctrl-names = "default";
pinctrl-0 = <&hdmii2c_xfer &hdmi_hpd &hdmi_cec>;
resets = <&cru SRST_HDMI_P>;
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index aaaa61875701..45a9d9b908d2 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -971,7 +971,7 @@
status = "disabled";
};
- crypto: cypto-controller@ff8a0000 {
+ crypto: crypto@ff8a0000 {
compatible = "rockchip,rk3288-crypto";
reg = <0x0 0xff8a0000 0x0 0x4000>;
interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/spear320-hmi.dts b/arch/arm/boot/dts/spear320-hmi.dts
index 367ba48aac3e..b587e4ec11e5 100644
--- a/arch/arm/boot/dts/spear320-hmi.dts
+++ b/arch/arm/boot/dts/spear320-hmi.dts
@@ -235,7 +235,6 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x41>;
- irq-over-gpio;
irq-gpios = <&gpiopinctrl 29 0x4>;
id = <0>;
blocks = <0x5>;
diff --git a/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts b/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts
index 580ca497f312..f8c5899fbdba 100644
--- a/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts
+++ b/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts
@@ -185,10 +185,6 @@
cap-sd-highspeed;
cap-mmc-highspeed;
/* All direction control is used */
- st,sig-dir-cmd;
- st,sig-dir-dat0;
- st,sig-dir-dat2;
- st,sig-dir-dat31;
st,sig-pin-fbclk;
full-pwr-cycle;
vmmc-supply = <&ab8500_ldo_aux3_reg>;
diff --git a/arch/arm/boot/dts/tegra124-nyan-big.dts b/arch/arm/boot/dts/tegra124-nyan-big.dts
index 1d2aac2cb6d0..fdc1d64dfff9 100644
--- a/arch/arm/boot/dts/tegra124-nyan-big.dts
+++ b/arch/arm/boot/dts/tegra124-nyan-big.dts
@@ -13,12 +13,15 @@
"google,nyan-big-rev1", "google,nyan-big-rev0",
"google,nyan-big", "google,nyan", "nvidia,tegra124";
- panel: panel {
- compatible = "auo,b133xtn01";
-
- power-supply = <&vdd_3v3_panel>;
- backlight = <&backlight>;
- ddc-i2c-bus = <&dpaux>;
+ host1x@50000000 {
+ dpaux@545c0000 {
+ aux-bus {
+ panel: panel {
+ compatible = "auo,b133xtn01";
+ backlight = <&backlight>;
+ };
+ };
+ };
};
mmc@700b0400 { /* SD Card on this bus */
diff --git a/arch/arm/boot/dts/tegra124-nyan-blaze.dts b/arch/arm/boot/dts/tegra124-nyan-blaze.dts
index 677babde6460..abdf4456826f 100644
--- a/arch/arm/boot/dts/tegra124-nyan-blaze.dts
+++ b/arch/arm/boot/dts/tegra124-nyan-blaze.dts
@@ -15,12 +15,15 @@
"google,nyan-blaze-rev0", "google,nyan-blaze",
"google,nyan", "nvidia,tegra124";
- panel: panel {
- compatible = "samsung,ltn140at29-301";
-
- power-supply = <&vdd_3v3_panel>;
- backlight = <&backlight>;
- ddc-i2c-bus = <&dpaux>;
+ host1x@50000000 {
+ dpaux@545c0000 {
+ aux-bus {
+ panel: panel {
+ compatible = "samsung,ltn140at29-301";
+ backlight = <&backlight>;
+ };
+ };
+ };
};
sound {
diff --git a/arch/arm/boot/dts/tegra124-venice2.dts b/arch/arm/boot/dts/tegra124-venice2.dts
index 232c90604df9..6a9592ceb5f2 100644
--- a/arch/arm/boot/dts/tegra124-venice2.dts
+++ b/arch/arm/boot/dts/tegra124-venice2.dts
@@ -48,6 +48,13 @@
dpaux@545c0000 {
vdd-supply = <&vdd_3v3_panel>;
status = "okay";
+
+ aux-bus {
+ panel: panel {
+ compatible = "lg,lp129qe";
+ backlight = <&backlight>;
+ };
+ };
};
};
@@ -1080,13 +1087,6 @@
};
};
- panel: panel {
- compatible = "lg,lp129qe";
- power-supply = <&vdd_3v3_panel>;
- backlight = <&backlight>;
- ddc-i2c-bus = <&dpaux>;
- };
-
vdd_mux: regulator-mux {
compatible = "regulator-fixed";
regulator-name = "+VDD_MUX";
diff --git a/arch/arm/crypto/blake2s-shash.c b/arch/arm/crypto/blake2s-shash.c
index 17c1c3bfe2f5..763c73beea2d 100644
--- a/arch/arm/crypto/blake2s-shash.c
+++ b/arch/arm/crypto/blake2s-shash.c
@@ -13,12 +13,12 @@
static int crypto_blake2s_update_arm(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
- return crypto_blake2s_update(desc, in, inlen, blake2s_compress);
+ return crypto_blake2s_update(desc, in, inlen, false);
}
static int crypto_blake2s_final_arm(struct shash_desc *desc, u8 *out)
{
- return crypto_blake2s_final(desc, out, blake2s_compress);
+ return crypto_blake2s_final(desc, out, false);
}
#define BLAKE2S_ALG(name, driver_name, digest_size) \
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index 7bd30c0a4280..22f937e6f3ff 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -154,22 +154,38 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr)
return 0;
}
-static struct undef_hook kgdb_brkpt_hook = {
+static struct undef_hook kgdb_brkpt_arm_hook = {
.instr_mask = 0xffffffff,
.instr_val = KGDB_BREAKINST,
- .cpsr_mask = MODE_MASK,
+ .cpsr_mask = PSR_T_BIT | MODE_MASK,
.cpsr_val = SVC_MODE,
.fn = kgdb_brk_fn
};
-static struct undef_hook kgdb_compiled_brkpt_hook = {
+static struct undef_hook kgdb_brkpt_thumb_hook = {
+ .instr_mask = 0xffff,
+ .instr_val = KGDB_BREAKINST & 0xffff,
+ .cpsr_mask = PSR_T_BIT | MODE_MASK,
+ .cpsr_val = PSR_T_BIT | SVC_MODE,
+ .fn = kgdb_brk_fn
+};
+
+static struct undef_hook kgdb_compiled_brkpt_arm_hook = {
.instr_mask = 0xffffffff,
.instr_val = KGDB_COMPILED_BREAK,
- .cpsr_mask = MODE_MASK,
+ .cpsr_mask = PSR_T_BIT | MODE_MASK,
.cpsr_val = SVC_MODE,
.fn = kgdb_compiled_brk_fn
};
+static struct undef_hook kgdb_compiled_brkpt_thumb_hook = {
+ .instr_mask = 0xffff,
+ .instr_val = KGDB_COMPILED_BREAK & 0xffff,
+ .cpsr_mask = PSR_T_BIT | MODE_MASK,
+ .cpsr_val = PSR_T_BIT | SVC_MODE,
+ .fn = kgdb_compiled_brk_fn
+};
+
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
struct pt_regs *regs = args->regs;
@@ -210,8 +226,10 @@ int kgdb_arch_init(void)
if (ret != 0)
return ret;
- register_undef_hook(&kgdb_brkpt_hook);
- register_undef_hook(&kgdb_compiled_brkpt_hook);
+ register_undef_hook(&kgdb_brkpt_arm_hook);
+ register_undef_hook(&kgdb_brkpt_thumb_hook);
+ register_undef_hook(&kgdb_compiled_brkpt_arm_hook);
+ register_undef_hook(&kgdb_compiled_brkpt_thumb_hook);
return 0;
}
@@ -224,8 +242,10 @@ int kgdb_arch_init(void)
*/
void kgdb_arch_exit(void)
{
- unregister_undef_hook(&kgdb_brkpt_hook);
- unregister_undef_hook(&kgdb_compiled_brkpt_hook);
+ unregister_undef_hook(&kgdb_brkpt_arm_hook);
+ unregister_undef_hook(&kgdb_brkpt_thumb_hook);
+ unregister_undef_hook(&kgdb_compiled_brkpt_arm_hook);
+ unregister_undef_hook(&kgdb_compiled_brkpt_thumb_hook);
unregister_die_notifier(&kgdb_notifier);
}
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 6daaa645ae5d..21413a9b7b6c 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -263,9 +263,9 @@ static int __init omapdss_init_of(void)
}
r = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ put_device(&pdev->dev);
if (r) {
pr_err("Unable to populate DSS submodule devices\n");
- put_device(&pdev->dev);
return r;
}
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index ccb0e3732c0d..31d1a21f6041 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -752,8 +752,10 @@ static int __init _init_clkctrl_providers(void)
for_each_matching_node(np, ti_clkctrl_match_table) {
ret = _setup_clkctrl_provider(np);
- if (ret)
+ if (ret) {
+ of_node_put(np);
break;
+ }
}
return ret;
diff --git a/arch/arm/mach-socfpga/Kconfig b/arch/arm/mach-socfpga/Kconfig
index 43ddec677c0b..594edf9bbea4 100644
--- a/arch/arm/mach-socfpga/Kconfig
+++ b/arch/arm/mach-socfpga/Kconfig
@@ -2,6 +2,7 @@
menuconfig ARCH_INTEL_SOCFPGA
bool "Altera SOCFPGA family"
depends on ARCH_MULTI_V7
+ select ARCH_HAS_RESET_CONTROLLER
select ARCH_SUPPORTS_BIG_ENDIAN
select ARM_AMBA
select ARM_GIC
@@ -18,6 +19,7 @@ menuconfig ARCH_INTEL_SOCFPGA
select PL310_ERRATA_727915
select PL310_ERRATA_753970 if PL310
select PL310_ERRATA_769419
+ select RESET_CONTROLLER
if ARCH_INTEL_SOCFPGA
config SOCFPGA_SUSPEND
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 274e4f73fd33..5e2be37a198e 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -212,12 +212,14 @@ early_param("ecc", early_ecc);
static int __init early_cachepolicy(char *p)
{
pr_warn("cachepolicy kernel parameter not supported without cp15\n");
+ return 0;
}
early_param("cachepolicy", early_cachepolicy);
static int __init noalign_setup(char *__unused)
{
pr_warn("noalign kernel parameter not supported without cp15\n");
+ return 1;
}
__setup("noalign", noalign_setup);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index f2b5a4abef21..09b885cc4db5 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -672,6 +672,7 @@ config ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
config ARM64_ERRATUM_2051678
bool "Cortex-A510: 2051678: disable Hardware Update of the page table dirty bit"
+ default y
help
This options adds the workaround for ARM Cortex-A510 erratum ARM64_ERRATUM_2051678.
Affected Coretex-A510 might not respect the ordering rules for
@@ -680,6 +681,22 @@ config ARM64_ERRATUM_2051678
If unsure, say Y.
+config ARM64_ERRATUM_2077057
+ bool "Cortex-A510: 2077057: workaround software-step corrupting SPSR_EL2"
+ help
+ This option adds the workaround for ARM Cortex-A510 erratum 2077057.
+ Affected Cortex-A510 may corrupt SPSR_EL2 when the a step exception is
+ expected, but a Pointer Authentication trap is taken instead. The
+ erratum causes SPSR_EL1 to be copied to SPSR_EL2, which could allow
+ EL1 to cause a return to EL2 with a guest controlled ELR_EL2.
+
+ This can only happen when EL2 is stepping EL1.
+
+ When these conditions occur, the SPSR_EL2 value is unchanged from the
+ previous guest entry, and can be restored from the in-memory copy.
+
+ If unsure, say Y.
+
config ARM64_ERRATUM_2119858
bool "Cortex-A710/X2: 2119858: workaround TRBE overwriting trace data in FILL mode"
default y
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 7d5d58800170..21697449d762 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -309,9 +309,6 @@ config ARCH_VISCONTI
help
This enables support for Toshiba Visconti SoCs Family.
-config ARCH_VULCAN
- def_bool n
-
config ARCH_XGENE
bool "AppliedMicro X-Gene SOC Family"
help
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
index 517519e6e87f..f84d4b489e0b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
@@ -107,6 +107,12 @@
no-map;
};
+ /* 32 MiB reserved for ARM Trusted Firmware (BL32) */
+ secmon_reserved_bl32: secmon@5300000 {
+ reg = <0x0 0x05300000 0x0 0x2000000>;
+ no-map;
+ };
+
linux,cma {
compatible = "shared-dma-pool";
reusable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
index d8838dde0f0f..4fb31c2ba31c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
@@ -157,14 +157,6 @@
regulator-always-on;
};
- reserved-memory {
- /* TEE Reserved Memory */
- bl32_reserved: bl32@5000000 {
- reg = <0x0 0x05300000 0x0 0x2000000>;
- no-map;
- };
- };
-
sdio_pwrseq: sdio-pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi
index 3e968b244191..fd3fa82e4c33 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi
@@ -17,7 +17,7 @@
rtc1 = &vrtc;
};
- dioo2133: audio-amplifier-0 {
+ dio2133: audio-amplifier-0 {
compatible = "simple-audio-amplifier";
enable-gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_HIGH>;
VCC-supply = <&vcc_5v>;
@@ -219,7 +219,7 @@
audio-widgets = "Line", "Lineout";
audio-aux-devs = <&tdmout_b>, <&tdmout_c>, <&tdmin_a>,
<&tdmin_b>, <&tdmin_c>, <&tdmin_lb>,
- <&dioo2133>;
+ <&dio2133>;
audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1",
"TDMOUT_B IN 1", "FRDDR_B OUT 1",
"TDMOUT_B IN 2", "FRDDR_C OUT 1",
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
index 6b457b2c30a4..aa14ea017a61 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
@@ -49,6 +49,12 @@
no-map;
};
+ /* 32 MiB reserved for ARM Trusted Firmware (BL32) */
+ secmon_reserved_bl32: secmon@5300000 {
+ reg = <0x0 0x05300000 0x0 0x2000000>;
+ no-map;
+ };
+
linux,cma {
compatible = "shared-dma-pool";
reusable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
index 212c6aa5a3b8..5751c48620ed 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
@@ -123,7 +123,7 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
- enable-gpio = <&gpio GPIOE_2 GPIO_ACTIVE_HIGH>;
+ enable-gpio = <&gpio_ao GPIOE_2 GPIO_ACTIVE_HIGH>;
enable-active-high;
regulator-always-on;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi
index 0bd1e98a0eef..ddb1b345397f 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi
@@ -48,7 +48,7 @@
regulator-max-microvolt = <3300000>;
vin-supply = <&vcc_5v>;
- enable-gpio = <&gpio GPIOE_2 GPIO_ACTIVE_HIGH>;
+ enable-gpio = <&gpio_ao GPIOE_2 GPIO_OPEN_DRAIN>;
enable-active-high;
regulator-always-on;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
index 427475846fc7..a5d79f2f7c19 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
@@ -203,14 +203,6 @@
regulator-always-on;
};
- reserved-memory {
- /* TEE Reserved Memory */
- bl32_reserved: bl32@5000000 {
- reg = <0x0 0x05300000 0x0 0x2000000>;
- no-map;
- };
- };
-
sdio_pwrseq: sdio-pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index 6288e104a089..a2635b14da30 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -543,8 +543,7 @@
<0x02000000 0x00 0x50000000 0x00 0x50000000 0x0 0x08000000>,
<0x42000000 0x40 0x00000000 0x40 0x00000000 0x1 0x00000000>;
/* Standard AXI Translation entries as programmed by EDK2 */
- dma-ranges = <0x02000000 0x0 0x2c1c0000 0x0 0x2c1c0000 0x0 0x00040000>,
- <0x02000000 0x0 0x80000000 0x0 0x80000000 0x0 0x80000000>,
+ dma-ranges = <0x02000000 0x0 0x80000000 0x0 0x80000000 0x0 0x80000000>,
<0x43000000 0x8 0x00000000 0x8 0x00000000 0x2 0x00000000>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts
index d74e738e4070..c03f4e183389 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts
@@ -157,6 +157,10 @@
};
};
+&ftm_alarm0 {
+ status = "okay";
+};
+
&gpio1 {
gpio-line-names =
"", "", "", "", "", "", "", "",
diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
index f77f90ed416f..0c7a72c51a31 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
@@ -707,7 +707,6 @@
clocks = <&clk IMX8MM_CLK_VPU_DEC_ROOT>;
assigned-clocks = <&clk IMX8MM_CLK_VPU_BUS>;
assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_800M>;
- resets = <&src IMX8MQ_RESET_VPU_RESET>;
};
pgc_vpu_g1: power-domain@7 {
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
index f3e3418f7edc..2d4a472af6a9 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
@@ -1115,8 +1115,8 @@
status = "okay";
ports {
- port@1 {
- reg = <1>;
+ port@0 {
+ reg = <0>;
mipi1_sensor_ep: endpoint {
remote-endpoint = <&camera1_ep>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index 2df2510d0118..e92ebb6147e6 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -554,7 +554,7 @@
assigned-clock-rates = <0>, <0>, <0>, <594000000>;
status = "disabled";
- port@0 {
+ port {
lcdif_mipi_dsi: endpoint {
remote-endpoint = <&mipi_dsi_lcdif_in>;
};
@@ -1151,8 +1151,8 @@
#address-cells = <1>;
#size-cells = <0>;
- port@0 {
- reg = <0>;
+ port@1 {
+ reg = <1>;
csi1_mipi_ep: endpoint {
remote-endpoint = <&csi1_ep>;
@@ -1203,8 +1203,8 @@
#address-cells = <1>;
#size-cells = <0>;
- port@0 {
- reg = <0>;
+ port@1 {
+ reg = <1>;
csi2_mipi_ep: endpoint {
remote-endpoint = <&csi2_ep>;
diff --git a/arch/arm64/boot/dts/freescale/imx8ulp.dtsi b/arch/arm64/boot/dts/freescale/imx8ulp.dtsi
index a987ff7156bd..09f7364dd1d0 100644
--- a/arch/arm64/boot/dts/freescale/imx8ulp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8ulp.dtsi
@@ -132,7 +132,7 @@
scmi_sensor: protocol@15 {
reg = <0x15>;
- #thermal-sensor-cells = <0>;
+ #thermal-sensor-cells = <1>;
};
};
};
diff --git a/arch/arm64/boot/dts/freescale/mba8mx.dtsi b/arch/arm64/boot/dts/freescale/mba8mx.dtsi
index f27e3c8de916..ce6d5bdba0a8 100644
--- a/arch/arm64/boot/dts/freescale/mba8mx.dtsi
+++ b/arch/arm64/boot/dts/freescale/mba8mx.dtsi
@@ -91,7 +91,7 @@
sound {
compatible = "fsl,imx-audio-tlv320aic32x4";
- model = "tqm-tlv320aic32";
+ model = "imx-audio-tlv320aic32x4";
ssi-controller = <&sai3>;
audio-codec = <&tlv320aic3x04>;
};
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
index 0dd2d2ee765a..f4270cf18996 100644
--- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
@@ -502,7 +502,7 @@
};
usb0: usb@ffb00000 {
- compatible = "snps,dwc2";
+ compatible = "intel,socfpga-agilex-hsotg", "snps,dwc2";
reg = <0xffb00000 0x40000>;
interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usbphy0>;
@@ -515,7 +515,7 @@
};
usb1: usb@ffb40000 {
- compatible = "snps,dwc2";
+ compatible = "intel,socfpga-agilex-hsotg", "snps,dwc2";
reg = <0xffb40000 0x40000>;
interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usbphy0>;
diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi
index f972704dfe7a..56dfbb2e2fa6 100644
--- a/arch/arm64/boot/dts/rockchip/px30.dtsi
+++ b/arch/arm64/boot/dts/rockchip/px30.dtsi
@@ -711,7 +711,7 @@
clock-names = "pclk", "timer";
};
- dmac: dmac@ff240000 {
+ dmac: dma-controller@ff240000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x0 0xff240000 0x0 0x4000>;
interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 39db0b85b4da..b822533dc7f1 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -489,7 +489,7 @@
status = "disabled";
};
- dmac: dmac@ff1f0000 {
+ dmac: dma-controller@ff1f0000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x0 0xff1f0000 0x0 0x4000>;
interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
index 45a5ae5d2027..162f08bca0d4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
@@ -286,7 +286,7 @@
sound: sound {
compatible = "rockchip,rk3399-gru-sound";
- rockchip,cpu = <&i2s0 &i2s2>;
+ rockchip,cpu = <&i2s0 &spdif>;
};
};
@@ -437,10 +437,6 @@ ap_i2c_audio: &i2c8 {
status = "okay";
};
-&i2s2 {
- status = "okay";
-};
-
&io_domains {
status = "okay";
@@ -537,6 +533,17 @@ ap_i2c_audio: &i2c8 {
vqmmc-supply = <&ppvar_sd_card_io>;
};
+&spdif {
+ status = "okay";
+
+ /*
+ * SPDIF is routed internally to DP; we either don't use these pins, or
+ * mux them to something else.
+ */
+ /delete-property/ pinctrl-0;
+ /delete-property/ pinctrl-names;
+};
+
&spi1 {
status = "okay";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
index 292bb7e80cf3..3ae5d727e367 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
@@ -232,6 +232,7 @@
&usbdrd_dwc3_0 {
dr_mode = "otg";
+ extcon = <&extcon_usb3>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index fb67db4619ea..08fa00364b42 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -25,6 +25,13 @@
};
};
+ extcon_usb3: extcon-usb3 {
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb3_id>;
+ };
+
clkin_gmac: external-gmac-clock {
compatible = "fixed-clock";
clock-frequency = <125000000>;
@@ -422,9 +429,22 @@
<4 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;
};
};
+
+ usb3 {
+ usb3_id: usb3-id {
+ rockchip,pins =
+ <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
};
&sdhci {
+ /*
+ * Signal integrity isn't great at 200MHz but 100MHz has proven stable
+ * enough.
+ */
+ max-frequency = <100000000>;
+
bus-width = <8>;
mmc-hs400-1_8v;
mmc-hs400-enhanced-strobe;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index d3cdf6f42a30..080457a68e3c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -1881,10 +1881,10 @@
interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&cru PCLK_HDMI_CTRL>,
<&cru SCLK_HDMI_SFR>,
- <&cru PLL_VPLL>,
+ <&cru SCLK_HDMI_CEC>,
<&cru PCLK_VIO_GRF>,
- <&cru SCLK_HDMI_CEC>;
- clock-names = "iahb", "isfr", "vpll", "grf", "cec";
+ <&cru PLL_VPLL>;
+ clock-names = "iahb", "isfr", "cec", "grf", "vpll";
power-domains = <&power RK3399_PD_HDCP>;
reg-io-width = <4>;
rockchip,grf = <&grf>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
index 166399b7f13f..d9eb92d59099 100644
--- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
@@ -285,8 +285,6 @@
vcc_ddr: DCDC_REG3 {
regulator-always-on;
regulator-boot-on;
- regulator-min-microvolt = <1100000>;
- regulator-max-microvolt = <1100000>;
regulator-initial-mode = <0x2>;
regulator-name = "vcc_ddr";
regulator-state-mem {
diff --git a/arch/arm64/boot/dts/rockchip/rk3568.dtsi b/arch/arm64/boot/dts/rockchip/rk3568.dtsi
index 2fd313a295f8..d91df1cde736 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3568.dtsi
@@ -32,13 +32,11 @@
clocks = <&cru SCLK_GMAC0>, <&cru SCLK_GMAC0_RX_TX>,
<&cru SCLK_GMAC0_RX_TX>, <&cru CLK_MAC0_REFOUT>,
<&cru ACLK_GMAC0>, <&cru PCLK_GMAC0>,
- <&cru SCLK_GMAC0_RX_TX>, <&cru CLK_GMAC0_PTP_REF>,
- <&cru PCLK_XPCS>;
+ <&cru SCLK_GMAC0_RX_TX>, <&cru CLK_GMAC0_PTP_REF>;
clock-names = "stmmaceth", "mac_clk_rx",
"mac_clk_tx", "clk_mac_refout",
"aclk_mac", "pclk_mac",
- "clk_mac_speed", "ptp_ref",
- "pclk_xpcs";
+ "clk_mac_speed", "ptp_ref";
resets = <&cru SRST_A_GMAC0>;
reset-names = "stmmaceth";
rockchip,grf = <&grf>;
diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
index a68033a23975..8ccce54ee8e7 100644
--- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
@@ -651,7 +651,7 @@
status = "disabled";
};
- dmac0: dmac@fe530000 {
+ dmac0: dma-controller@fe530000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x0 0xfe530000 0x0 0x4000>;
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
@@ -662,7 +662,7 @@
#dma-cells = <1>;
};
- dmac1: dmac@fe550000 {
+ dmac1: dma-controller@fe550000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x0 0xfe550000 0x0 0x4000>;
interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts
index a5a24f9f46c5..b210cc07c539 100644
--- a/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts
@@ -15,8 +15,18 @@
model = "Texas Instruments J721S2 EVM";
chosen {
- stdout-path = "serial10:115200n8";
- bootargs = "console=ttyS10,115200n8 earlycon=ns16550a,mmio32,2880000";
+ stdout-path = "serial2:115200n8";
+ bootargs = "console=ttyS2,115200n8 earlycon=ns16550a,mmio32,2880000";
+ };
+
+ aliases {
+ serial1 = &mcu_uart0;
+ serial2 = &main_uart8;
+ mmc0 = &main_sdhci0;
+ mmc1 = &main_sdhci1;
+ can0 = &main_mcan16;
+ can1 = &mcu_mcan0;
+ can2 = &mcu_mcan1;
};
evm_12v0: fixedregulator-evm12v0 {
diff --git a/arch/arm64/boot/dts/ti/k3-j721s2.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2.dtsi
index 80d3cae03e88..fe5234c40f6c 100644
--- a/arch/arm64/boot/dts/ti/k3-j721s2.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721s2.dtsi
@@ -21,28 +21,6 @@
#address-cells = <2>;
#size-cells = <2>;
- aliases {
- serial0 = &wkup_uart0;
- serial1 = &mcu_uart0;
- serial2 = &main_uart0;
- serial3 = &main_uart1;
- serial4 = &main_uart2;
- serial5 = &main_uart3;
- serial6 = &main_uart4;
- serial7 = &main_uart5;
- serial8 = &main_uart6;
- serial9 = &main_uart7;
- serial10 = &main_uart8;
- serial11 = &main_uart9;
- mmc0 = &main_sdhci0;
- mmc1 = &main_sdhci1;
- can0 = &main_mcan16;
- can1 = &mcu_mcan0;
- can2 = &mcu_mcan1;
- can3 = &main_mcan3;
- can4 = &main_mcan5;
- };
-
chosen { };
cpus {
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index 3198acb2aad8..7f3c87f7a0ce 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -106,7 +106,7 @@
msr_s SYS_ICC_SRE_EL2, x0
isb // Make sure SRE is now set
mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back,
- tbz x0, #0, 1f // and check that it sticks
+ tbz x0, #0, .Lskip_gicv3_\@ // and check that it sticks
msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
.Lskip_gicv3_\@:
.endm
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 066098198c24..b217941713a8 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -600,6 +600,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_2077057
+ {
+ .desc = "ARM erratum 2077057",
+ .capability = ARM64_WORKAROUND_2077057,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
+ },
+#endif
#ifdef CONFIG_ARM64_ERRATUM_2064142
{
.desc = "ARM erratum 2064142",
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index a4a0063df456..ecc5958e27fe 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -797,6 +797,24 @@ static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
xfer_to_guest_mode_work_pending();
}
+/*
+ * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
+ * the vCPU is running.
+ *
+ * This must be noinstr as instrumentation may make use of RCU, and this is not
+ * safe during the EQS.
+ */
+static int noinstr kvm_arm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ guest_state_enter_irqoff();
+ ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
+ guest_state_exit_irqoff();
+
+ return ret;
+}
+
/**
* kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
* @vcpu: The VCPU pointer
@@ -881,9 +899,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
* Enter the guest
*/
trace_kvm_entry(*vcpu_pc(vcpu));
- guest_enter_irqoff();
+ guest_timing_enter_irqoff();
- ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
+ ret = kvm_arm_vcpu_enter_exit(vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu->stat.exits++;
@@ -918,26 +936,23 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_arch_vcpu_ctxsync_fp(vcpu);
/*
- * We may have taken a host interrupt in HYP mode (ie
- * while executing the guest). This interrupt is still
- * pending, as we haven't serviced it yet!
+ * We must ensure that any pending interrupts are taken before
+ * we exit guest timing so that timer ticks are accounted as
+ * guest time. Transiently unmask interrupts so that any
+ * pending interrupts are taken.
*
- * We're now back in SVC mode, with interrupts
- * disabled. Enabling the interrupts now will have
- * the effect of taking the interrupt again, in SVC
- * mode this time.
+ * Per ARM DDI 0487G.b section D1.13.4, an ISB (or other
+ * context synchronization event) is necessary to ensure that
+ * pending interrupts are taken.
*/
local_irq_enable();
+ isb();
+ local_irq_disable();
+
+ guest_timing_exit_irqoff();
+
+ local_irq_enable();
- /*
- * We do local_irq_enable() before calling guest_exit() so
- * that if a timer interrupt hits while running the guest we
- * account that tick as being spent in the guest. We enable
- * preemption after calling guest_exit() so that if we get
- * preempted we make sure ticks after that is not counted as
- * guest time.
- */
- guest_exit();
trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
/* Exit types that need handling before we can be preempted */
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index fd2dd26caf91..e3140abd2e2e 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -228,6 +228,14 @@ int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
{
struct kvm_run *run = vcpu->run;
+ if (ARM_SERROR_PENDING(exception_index)) {
+ /*
+ * The SError is handled by handle_exit_early(). If the guest
+ * survives it will re-execute the original instruction.
+ */
+ return 1;
+ }
+
exception_index = ARM_EXCEPTION_CODE(exception_index);
switch (exception_index) {
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 58e14f8ead23..701cfb964905 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -402,6 +402,24 @@ static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
return false;
}
+static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+ /*
+ * Check for the conditions of Cortex-A510's #2077057. When these occur
+ * SPSR_EL2 can't be trusted, but isn't needed either as it is
+ * unchanged from the value in vcpu_gp_regs(vcpu)->pstate.
+ * Are we single-stepping the guest, and took a PAC exception from the
+ * active-not-pending state?
+ */
+ if (cpus_have_final_cap(ARM64_WORKAROUND_2077057) &&
+ vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
+ *vcpu_cpsr(vcpu) & DBG_SPSR_SS &&
+ ESR_ELx_EC(read_sysreg_el2(SYS_ESR)) == ESR_ELx_EC_PAC)
+ write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
+
+ vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
+}
+
/*
* Return true when we were able to fixup the guest exit and should return to
* the guest, false when we should restore the host state and return to the
@@ -413,7 +431,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
* Save PSTATE early so that we can evaluate the vcpu mode
* early on.
*/
- vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
+ synchronize_vcpu_pstate(vcpu, exit_code);
/*
* Check whether we want to repaint the state one way or
@@ -424,7 +442,8 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
- if (ARM_SERROR_PENDING(*exit_code)) {
+ if (ARM_SERROR_PENDING(*exit_code) &&
+ ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) {
u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
/*
diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
index 3eae32876897..2ce60fecd861 100644
--- a/arch/arm64/kvm/psci.c
+++ b/arch/arm64/kvm/psci.c
@@ -46,8 +46,7 @@ static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
* specification (ARM DEN 0022A). This means all suspend states
* for KVM will preserve the register state.
*/
- kvm_vcpu_halt(vcpu);
- kvm_clear_request(KVM_REQ_UNHALT, vcpu);
+ kvm_vcpu_wfi(vcpu);
return PSCI_RET_SUCCESS;
}
diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c
index 7068da080799..49837d3a3ef5 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio.c
@@ -248,6 +248,8 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
IRQCHIP_STATE_PENDING,
&val);
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
+ } else if (vgic_irq_is_mapped_level(irq)) {
+ val = vgic_get_phys_line_level(irq);
} else {
val = irq_is_pending(irq);
}
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index e7719e8f18de..9c65b1e25a96 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -55,9 +55,10 @@ WORKAROUND_1418040
WORKAROUND_1463225
WORKAROUND_1508412
WORKAROUND_1542419
-WORKAROUND_2064142
-WORKAROUND_2038923
WORKAROUND_1902691
+WORKAROUND_2038923
+WORKAROUND_2064142
+WORKAROUND_2077057
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
WORKAROUND_TSB_FLUSH_FAILURE
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
index 3e336b3dbb10..ab6e3dc0bc1d 100644
--- a/arch/mips/boot/dts/ingenic/ci20.dts
+++ b/arch/mips/boot/dts/ingenic/ci20.dts
@@ -83,6 +83,8 @@
label = "HDMI OUT";
type = "a";
+ ddc-en-gpios = <&gpa 25 GPIO_ACTIVE_HIGH>;
+
port {
hdmi_con: endpoint {
remote-endpoint = <&dw_hdmi_out>;
@@ -114,17 +116,6 @@
gpio = <&gpf 14 GPIO_ACTIVE_LOW>;
enable-active-high;
};
-
- hdmi_power: fixedregulator@3 {
- compatible = "regulator-fixed";
-
- regulator-name = "hdmi_power";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
-
- gpio = <&gpa 25 0>;
- enable-active-high;
- };
};
&ext {
@@ -576,8 +567,6 @@
pinctrl-names = "default";
pinctrl-0 = <&pins_hdmi_ddc>;
- hdmi-5v-supply = <&hdmi_power>;
-
ports {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S
index 0a515cde1c18..25860fba6218 100644
--- a/arch/mips/cavium-octeon/octeon-memcpy.S
+++ b/arch/mips/cavium-octeon/octeon-memcpy.S
@@ -74,7 +74,7 @@
#define EXC(inst_reg,addr,handler) \
9: inst_reg, addr; \
.section __ex_table,"a"; \
- PTR 9b, handler; \
+ PTR_WD 9b, handler; \
.previous
/*
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index f979adfd4fc2..ef73ba1e0ec1 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -803,7 +803,7 @@ early_param("coherentio", setcoherentio);
static int __init setnocoherentio(char *str)
{
- dma_default_coherent = true;
+ dma_default_coherent = false;
pr_info("Software DMA cache coherency (command line)\n");
return 0;
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index d542fb7af3ba..1986d1309410 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -351,6 +351,9 @@ asmlinkage void start_secondary(void)
cpu = smp_processor_id();
cpu_data[cpu].udelay_val = loops_per_jiffy;
+ set_cpu_sibling_map(cpu);
+ set_cpu_core_map(cpu);
+
cpumask_set_cpu(cpu, &cpu_coherent_mask);
notify_cpu_starting(cpu);
@@ -362,9 +365,6 @@ asmlinkage void start_secondary(void)
/* The CPU is running and counters synchronised, now mark it online */
set_cpu_online(cpu, true);
- set_cpu_sibling_map(cpu);
- set_cpu_core_map(cpu);
-
calculate_cpu_foreign_map();
/*
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index e59cb6246f76..a25e0b73ee70 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -414,6 +414,24 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
return -ENOIOCTLCMD;
}
+/*
+ * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
+ * the vCPU is running.
+ *
+ * This must be noinstr as instrumentation may make use of RCU, and this is not
+ * safe during the EQS.
+ */
+static int noinstr kvm_mips_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ guest_state_enter_irqoff();
+ ret = kvm_mips_callbacks->vcpu_run(vcpu);
+ guest_state_exit_irqoff();
+
+ return ret;
+}
+
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
int r = -EINTR;
@@ -434,7 +452,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
lose_fpu(1);
local_irq_disable();
- guest_enter_irqoff();
+ guest_timing_enter_irqoff();
trace_kvm_enter(vcpu);
/*
@@ -445,10 +463,23 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
- r = kvm_mips_callbacks->vcpu_run(vcpu);
+ r = kvm_mips_vcpu_enter_exit(vcpu);
+
+ /*
+ * We must ensure that any pending interrupts are taken before
+ * we exit guest timing so that timer ticks are accounted as
+ * guest time. Transiently unmask interrupts so that any
+ * pending interrupts are taken.
+ *
+ * TODO: is there a barrier which ensures that pending interrupts are
+ * recognised? Currently this just hopes that the CPU takes any pending
+ * interrupts between the enable and disable.
+ */
+ local_irq_enable();
+ local_irq_disable();
trace_kvm_out(vcpu);
- guest_exit_irqoff();
+ guest_timing_exit_irqoff();
local_irq_enable();
out:
@@ -1168,7 +1199,7 @@ static void kvm_mips_set_c0_status(void)
/*
* Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
*/
-int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
+static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
u32 cause = vcpu->arch.host_cp0_cause;
@@ -1357,6 +1388,17 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
return ret;
}
+int noinstr kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ guest_state_exit_irqoff();
+ ret = __kvm_mips_handle_exit(vcpu);
+ guest_state_enter_irqoff();
+
+ return ret;
+}
+
/* Enable FPU for guest and restore context */
void kvm_own_fpu(struct kvm_vcpu *vcpu)
{
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
index 4adca5abbc72..c706f5890a05 100644
--- a/arch/mips/kvm/vz.c
+++ b/arch/mips/kvm/vz.c
@@ -458,8 +458,8 @@ void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
/**
* _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
* @vcpu: Virtual CPU.
- * @compare: Pointer to write compare value to.
- * @cause: Pointer to write cause value to.
+ * @out_compare: Pointer to write compare value to.
+ * @out_cause: Pointer to write cause value to.
*
* Save VZ guest timer state and switch to software emulation of guest CP0
* timer. The hard timer must already be in use, so preemption should be
@@ -1541,11 +1541,14 @@ static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
}
/**
- * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor.
+ * kvm_trap_vz_handle_cop_unusable() - Guest used unusable coprocessor.
* @vcpu: Virtual CPU context.
*
* Handle when the guest attempts to use a coprocessor which hasn't been allowed
* by the root context.
+ *
+ * Return: value indicating whether to resume the host or the guest
+ * (RESUME_HOST or RESUME_GUEST)
*/
static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
{
@@ -1592,6 +1595,9 @@ static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
*
* Handle when the guest attempts to use MSA when it is disabled in the root
* context.
+ *
+ * Return: value indicating whether to resume the host or the guest
+ * (RESUME_HOST or RESUME_GUEST)
*/
static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
{
diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c
index d6efffd4dd20..fb0565bc34fd 100644
--- a/arch/mips/ralink/mt7621.c
+++ b/arch/mips/ralink/mt7621.c
@@ -22,7 +22,9 @@
#include "common.h"
-static void *detect_magic __initdata = detect_memory_region;
+#define MT7621_MEM_TEST_PATTERN 0xaa5555aa
+
+static u32 detect_magic __initdata;
int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
{
@@ -58,24 +60,32 @@ phys_addr_t mips_cpc_default_phys_base(void)
panic("Cannot detect cpc address");
}
+static bool __init mt7621_addr_wraparound_test(phys_addr_t size)
+{
+ void *dm = (void *)KSEG1ADDR(&detect_magic);
+
+ if (CPHYSADDR(dm + size) >= MT7621_LOWMEM_MAX_SIZE)
+ return true;
+ __raw_writel(MT7621_MEM_TEST_PATTERN, dm);
+ if (__raw_readl(dm) != __raw_readl(dm + size))
+ return false;
+ __raw_writel(~MT7621_MEM_TEST_PATTERN, dm);
+ return __raw_readl(dm) == __raw_readl(dm + size);
+}
+
static void __init mt7621_memory_detect(void)
{
- void *dm = &detect_magic;
phys_addr_t size;
- for (size = 32 * SZ_1M; size < 256 * SZ_1M; size <<= 1) {
- if (!__builtin_memcmp(dm, dm + size, sizeof(detect_magic)))
- break;
+ for (size = 32 * SZ_1M; size <= 256 * SZ_1M; size <<= 1) {
+ if (mt7621_addr_wraparound_test(size)) {
+ memblock_add(MT7621_LOWMEM_BASE, size);
+ return;
+ }
}
- if ((size == 256 * SZ_1M) &&
- (CPHYSADDR(dm + size) < MT7621_LOWMEM_MAX_SIZE) &&
- __builtin_memcmp(dm, dm + size, sizeof(detect_magic))) {
- memblock_add(MT7621_LOWMEM_BASE, MT7621_LOWMEM_MAX_SIZE);
- memblock_add(MT7621_HIGHMEM_BASE, MT7621_HIGHMEM_SIZE);
- } else {
- memblock_add(MT7621_LOWMEM_BASE, size);
- }
+ memblock_add(MT7621_LOWMEM_BASE, MT7621_LOWMEM_MAX_SIZE);
+ memblock_add(MT7621_HIGHMEM_BASE, MT7621_HIGHMEM_SIZE);
}
void __init ralink_of_remap(void)
diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
index 0ec9cfc5131f..56ffd260c669 100644
--- a/arch/parisc/include/asm/bitops.h
+++ b/arch/parisc/include/asm/bitops.h
@@ -12,6 +12,14 @@
#include <asm/barrier.h>
#include <linux/atomic.h>
+/* compiler build environment sanity checks: */
+#if !defined(CONFIG_64BIT) && defined(__LP64__)
+#error "Please use 'ARCH=parisc' to build the 32-bit kernel."
+#endif
+#if defined(CONFIG_64BIT) && !defined(__LP64__)
+#error "Please use 'ARCH=parisc64' to build the 64-bit kernel."
+#endif
+
/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
* on use of volatile and __*_bit() (set/clear/change):
* *_bit() want use of volatile.
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index ebf8a845b017..123d5f16cd9d 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -89,8 +89,8 @@ struct exception_table_entry {
__asm__("1: " ldx " 0(" sr "%2),%0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
- : "=r"(__gu_val), "=r"(__gu_err) \
- : "r"(ptr), "1"(__gu_err)); \
+ : "=r"(__gu_val), "+r"(__gu_err) \
+ : "r"(ptr)); \
\
(val) = (__force __typeof__(*(ptr))) __gu_val; \
}
@@ -123,8 +123,8 @@ struct exception_table_entry {
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
- : "=&r"(__gu_tmp.l), "=r"(__gu_err) \
- : "r"(ptr), "1"(__gu_err)); \
+ : "=&r"(__gu_tmp.l), "+r"(__gu_err) \
+ : "r"(ptr)); \
\
(val) = __gu_tmp.t; \
}
@@ -135,13 +135,12 @@ struct exception_table_entry {
#define __put_user_internal(sr, x, ptr) \
({ \
ASM_EXCEPTIONTABLE_VAR(__pu_err); \
- __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
\
switch (sizeof(*(ptr))) { \
- case 1: __put_user_asm(sr, "stb", __x, ptr); break; \
- case 2: __put_user_asm(sr, "sth", __x, ptr); break; \
- case 4: __put_user_asm(sr, "stw", __x, ptr); break; \
- case 8: STD_USER(sr, __x, ptr); break; \
+ case 1: __put_user_asm(sr, "stb", x, ptr); break; \
+ case 2: __put_user_asm(sr, "sth", x, ptr); break; \
+ case 4: __put_user_asm(sr, "stw", x, ptr); break; \
+ case 8: STD_USER(sr, x, ptr); break; \
default: BUILD_BUG(); \
} \
\
@@ -150,7 +149,9 @@ struct exception_table_entry {
#define __put_user(x, ptr) \
({ \
- __put_user_internal("%%sr3,", x, ptr); \
+ __typeof__(&*(ptr)) __ptr = ptr; \
+ __typeof__(*(__ptr)) __x = (__typeof__(*(__ptr)))(x); \
+ __put_user_internal("%%sr3,", __x, __ptr); \
})
#define __put_kernel_nofault(dst, src, type, err_label) \
@@ -180,8 +181,8 @@ struct exception_table_entry {
"1: " stx " %2,0(" sr "%1)\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
- : "=r"(__pu_err) \
- : "r"(ptr), "r"(x), "0"(__pu_err))
+ : "+r"(__pu_err) \
+ : "r"(ptr), "r"(x))
#if !defined(CONFIG_64BIT)
@@ -193,8 +194,8 @@ struct exception_table_entry {
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
- : "=r"(__pu_err) \
- : "r"(ptr), "r"(__val), "0"(__pu_err)); \
+ : "+r"(__pu_err) \
+ : "r"(ptr), "r"(__val)); \
} while (0)
#endif /* !defined(CONFIG_64BIT) */
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index 237d20dd5622..286cec4d86d7 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -340,7 +340,7 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop)
: "r" (val), "r" (regs->ior), "r" (regs->isr)
: "r19", "r20", "r21", "r22", "r1", FIXUP_BRANCH_CLOBBER );
- return 0;
+ return ret;
}
static int emulate_std(struct pt_regs *regs, int frreg, int flop)
{
@@ -397,7 +397,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
__asm__ __volatile__ (
" mtsp %4, %%sr1\n"
" zdep %2, 29, 2, %%r19\n"
-" dep %%r0, 31, 2, %2\n"
+" dep %%r0, 31, 2, %3\n"
" mtsar %%r19\n"
" zvdepi -2, 32, %%r19\n"
"1: ldw 0(%%sr1,%3),%%r20\n"
@@ -409,7 +409,7 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop)
" andcm %%r21, %%r19, %%r21\n"
" or %1, %%r20, %1\n"
" or %2, %%r21, %2\n"
-"3: stw %1,0(%%sr1,%1)\n"
+"3: stw %1,0(%%sr1,%3)\n"
"4: stw %%r1,4(%%sr1,%3)\n"
"5: stw %2,8(%%sr1,%3)\n"
" copy %%r0, %0\n"
@@ -596,7 +596,6 @@ void handle_unaligned(struct pt_regs *regs)
ret = ERR_NOTHANDLED; /* "undefined", but lets kill them. */
break;
}
-#ifdef CONFIG_PA20
switch (regs->iir & OPCODE2_MASK)
{
case OPCODE_FLDD_L:
@@ -607,22 +606,23 @@ void handle_unaligned(struct pt_regs *regs)
flop=1;
ret = emulate_std(regs, R2(regs->iir),1);
break;
+#ifdef CONFIG_PA20
case OPCODE_LDD_L:
ret = emulate_ldd(regs, R2(regs->iir),0);
break;
case OPCODE_STD_L:
ret = emulate_std(regs, R2(regs->iir),0);
break;
- }
#endif
+ }
switch (regs->iir & OPCODE3_MASK)
{
case OPCODE_FLDW_L:
flop=1;
- ret = emulate_ldw(regs, R2(regs->iir),0);
+ ret = emulate_ldw(regs, R2(regs->iir), 1);
break;
case OPCODE_LDW_M:
- ret = emulate_ldw(regs, R2(regs->iir),1);
+ ret = emulate_ldw(regs, R2(regs->iir), 0);
break;
case OPCODE_FSTW_L:
diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c
index 367f6397bda7..860385058085 100644
--- a/arch/parisc/lib/iomap.c
+++ b/arch/parisc/lib/iomap.c
@@ -346,6 +346,16 @@ u64 ioread64be(const void __iomem *addr)
return *((u64 *)addr);
}
+u64 ioread64_lo_hi(const void __iomem *addr)
+{
+ u32 low, high;
+
+ low = ioread32(addr);
+ high = ioread32(addr + sizeof(u32));
+
+ return low + ((u64)high << 32);
+}
+
u64 ioread64_hi_lo(const void __iomem *addr)
{
u32 low, high;
@@ -419,6 +429,12 @@ void iowrite64be(u64 datum, void __iomem *addr)
}
}
+void iowrite64_lo_hi(u64 val, void __iomem *addr)
+{
+ iowrite32(val, addr);
+ iowrite32(val >> 32, addr + sizeof(u32));
+}
+
void iowrite64_hi_lo(u64 val, void __iomem *addr)
{
iowrite32(val >> 32, addr + sizeof(u32));
@@ -530,6 +546,7 @@ EXPORT_SYMBOL(ioread32);
EXPORT_SYMBOL(ioread32be);
EXPORT_SYMBOL(ioread64);
EXPORT_SYMBOL(ioread64be);
+EXPORT_SYMBOL(ioread64_lo_hi);
EXPORT_SYMBOL(ioread64_hi_lo);
EXPORT_SYMBOL(iowrite8);
EXPORT_SYMBOL(iowrite16);
@@ -538,6 +555,7 @@ EXPORT_SYMBOL(iowrite32);
EXPORT_SYMBOL(iowrite32be);
EXPORT_SYMBOL(iowrite64);
EXPORT_SYMBOL(iowrite64be);
+EXPORT_SYMBOL(iowrite64_lo_hi);
EXPORT_SYMBOL(iowrite64_hi_lo);
EXPORT_SYMBOL(ioread8_rep);
EXPORT_SYMBOL(ioread16_rep);
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 1ae31db9988f..1dc2e88e7b04 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -337,9 +337,9 @@ static void __init setup_bootmem(void)
static bool kernel_set_to_readonly;
-static void __init map_pages(unsigned long start_vaddr,
- unsigned long start_paddr, unsigned long size,
- pgprot_t pgprot, int force)
+static void __ref map_pages(unsigned long start_vaddr,
+ unsigned long start_paddr, unsigned long size,
+ pgprot_t pgprot, int force)
{
pmd_t *pmd;
pte_t *pg_table;
@@ -449,7 +449,7 @@ void __init set_kernel_text_rw(int enable_read_write)
flush_tlb_all();
}
-void __ref free_initmem(void)
+void free_initmem(void)
{
unsigned long init_begin = (unsigned long)__init_begin;
unsigned long init_end = (unsigned long)__init_end;
@@ -463,7 +463,6 @@ void __ref free_initmem(void)
/* The init text pages are marked R-X. We have to
* flush the icache and mark them RW-
*
- * This is tricky, because map_pages is in the init section.
* Do a dummy remap of the data section first (the data
* section is already PAGE_KERNEL) to pull in the TLB entries
* for map_kernel */
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 8a107ed18b0d..7d81102cffd4 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -50,6 +50,12 @@ riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima
riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima
riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd
riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c
+
+# Newer binutils versions default to ISA spec version 20191213 which moves some
+# instructions from the I extension to the Zicsr and Zifencei extensions.
+toolchain-need-zicsr-zifencei := $(call cc-option-yn, -march=$(riscv-march-y)_zicsr_zifencei)
+riscv-march-$(toolchain-need-zicsr-zifencei) := $(riscv-march-y)_zicsr_zifencei
+
KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y))
KBUILD_AFLAGS += -march=$(riscv-march-y)
diff --git a/arch/riscv/boot/dts/canaan/k210.dtsi b/arch/riscv/boot/dts/canaan/k210.dtsi
index 56f57118c633..44d338514761 100644
--- a/arch/riscv/boot/dts/canaan/k210.dtsi
+++ b/arch/riscv/boot/dts/canaan/k210.dtsi
@@ -113,7 +113,8 @@
compatible = "canaan,k210-plic", "sifive,plic-1.0.0";
reg = <0xC000000 0x4000000>;
interrupt-controller;
- interrupts-extended = <&cpu0_intc 11>, <&cpu1_intc 11>;
+ interrupts-extended = <&cpu0_intc 11>, <&cpu0_intc 9>,
+ <&cpu1_intc 11>, <&cpu1_intc 9>;
riscv,ndev = <65>;
};
diff --git a/arch/riscv/configs/nommu_k210_sdcard_defconfig b/arch/riscv/configs/nommu_k210_sdcard_defconfig
index 2a82a3b2992b..af64b95e88cc 100644
--- a/arch/riscv/configs/nommu_k210_sdcard_defconfig
+++ b/arch/riscv/configs/nommu_k210_sdcard_defconfig
@@ -23,7 +23,7 @@ CONFIG_SLOB=y
CONFIG_SOC_CANAAN=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
-CONFIG_CMDLINE="earlycon console=ttySIF0 rootdelay=2 root=/dev/mmcblk0p1 ro"
+CONFIG_CMDLINE="earlycon console=ttySIF0 root=/dev/mmcblk0p1 rootwait ro"
CONFIG_CMDLINE_FORCE=y
# CONFIG_SECCOMP is not set
# CONFIG_STACKPROTECTOR is not set
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 160e3a1e8f8b..004372f8da54 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -119,7 +119,7 @@ extern phys_addr_t phys_ram_base;
((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
#define is_linear_mapping(x) \
- ((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < kernel_map.virt_addr))
+ ((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < PAGE_OFFSET + KERN_VIRT_SIZE))
#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
#define kernel_mapping_pa_to_va(y) ({ \
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 7e949f25c933..e3549e50de95 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -13,6 +13,7 @@
#ifndef CONFIG_MMU
#define KERNEL_LINK_ADDR PAGE_OFFSET
+#define KERN_VIRT_SIZE (UL(-1))
#else
#define ADDRESS_SPACE_END (UL(-1))
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 612556faa527..ffc87e76b1dd 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -51,6 +51,8 @@ obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o
+obj-$(CONFIG_TRACE_IRQFLAGS) += trace_irq.o
+
obj-$(CONFIG_RISCV_BASE_PMU) += perf_event.o
obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
diff --git a/arch/riscv/kernel/cpu-hotplug.c b/arch/riscv/kernel/cpu-hotplug.c
index be7f05b542bb..f7a832e3a1d1 100644
--- a/arch/riscv/kernel/cpu-hotplug.c
+++ b/arch/riscv/kernel/cpu-hotplug.c
@@ -12,6 +12,7 @@
#include <linux/sched/hotplug.h>
#include <asm/irq.h>
#include <asm/cpu_ops.h>
+#include <asm/numa.h>
#include <asm/sbi.h>
bool cpu_has_hotplug(unsigned int cpu)
@@ -40,6 +41,7 @@ int __cpu_disable(void)
return ret;
remove_cpu_topology(cpu);
+ numa_remove_cpu(cpu);
set_cpu_online(cpu, false);
irq_migrate_all_off_this_cpu();
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index ed29e9c8f660..d6a46ed0bf05 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -108,7 +108,7 @@ _save_context:
.option pop
#ifdef CONFIG_TRACE_IRQFLAGS
- call trace_hardirqs_off
+ call __trace_hardirqs_off
#endif
#ifdef CONFIG_CONTEXT_TRACKING
@@ -143,7 +143,7 @@ skip_context_tracking:
li t0, EXC_BREAKPOINT
beq s4, t0, 1f
#ifdef CONFIG_TRACE_IRQFLAGS
- call trace_hardirqs_on
+ call __trace_hardirqs_on
#endif
csrs CSR_STATUS, SR_IE
@@ -234,7 +234,7 @@ ret_from_exception:
REG_L s0, PT_STATUS(sp)
csrc CSR_STATUS, SR_IE
#ifdef CONFIG_TRACE_IRQFLAGS
- call trace_hardirqs_off
+ call __trace_hardirqs_off
#endif
#ifdef CONFIG_RISCV_M_MODE
/* the MPP value is too large to be used as an immediate arg for addi */
@@ -270,10 +270,10 @@ restore_all:
REG_L s1, PT_STATUS(sp)
andi t0, s1, SR_PIE
beqz t0, 1f
- call trace_hardirqs_on
+ call __trace_hardirqs_on
j 2f
1:
- call trace_hardirqs_off
+ call __trace_hardirqs_off
2:
#endif
REG_L a0, PT_STATUS(sp)
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 2363b43312fc..ec07f991866a 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -22,14 +22,13 @@
add \reg, \reg, t0
.endm
.macro XIP_FIXUP_FLASH_OFFSET reg
- la t1, __data_loc
- li t0, XIP_OFFSET_MASK
- and t1, t1, t0
- li t1, XIP_OFFSET
- sub t0, t0, t1
- sub \reg, \reg, t0
+ la t0, __data_loc
+ REG_L t1, _xip_phys_offset
+ sub \reg, \reg, t1
+ add \reg, \reg, t0
.endm
_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
+_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
#else
.macro XIP_FIXUP_OFFSET reg
.endm
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
index f72527fcb347..775d3322b422 100644
--- a/arch/riscv/kernel/sbi.c
+++ b/arch/riscv/kernel/sbi.c
@@ -5,6 +5,7 @@
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
*/
+#include <linux/bits.h>
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/reboot.h>
@@ -85,7 +86,7 @@ static unsigned long __sbi_v01_cpumask_to_hartmask(const struct cpumask *cpu_mas
pr_warn("Unable to send any request to hartid > BITS_PER_LONG for SBI v0.1\n");
break;
}
- hmask |= 1 << hartid;
+ hmask |= BIT(hartid);
}
return hmask;
@@ -160,7 +161,7 @@ static int __sbi_send_ipi_v01(const struct cpumask *cpu_mask)
{
unsigned long hart_mask;
- if (!cpu_mask)
+ if (!cpu_mask || cpumask_empty(cpu_mask))
cpu_mask = cpu_online_mask;
hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask);
@@ -176,7 +177,7 @@ static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask,
int result = 0;
unsigned long hart_mask;
- if (!cpu_mask)
+ if (!cpu_mask || cpumask_empty(cpu_mask))
cpu_mask = cpu_online_mask;
hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask);
@@ -249,26 +250,37 @@ static void __sbi_set_timer_v02(uint64_t stime_value)
static int __sbi_send_ipi_v02(const struct cpumask *cpu_mask)
{
- unsigned long hartid, cpuid, hmask = 0, hbase = 0;
+ unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0;
struct sbiret ret = {0};
int result;
- if (!cpu_mask)
+ if (!cpu_mask || cpumask_empty(cpu_mask))
cpu_mask = cpu_online_mask;
for_each_cpu(cpuid, cpu_mask) {
hartid = cpuid_to_hartid_map(cpuid);
- if (hmask && ((hbase + BITS_PER_LONG) <= hartid)) {
- ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI,
- hmask, hbase, 0, 0, 0, 0);
- if (ret.error)
- goto ecall_failed;
- hmask = 0;
- hbase = 0;
+ if (hmask) {
+ if (hartid + BITS_PER_LONG <= htop ||
+ hbase + BITS_PER_LONG <= hartid) {
+ ret = sbi_ecall(SBI_EXT_IPI,
+ SBI_EXT_IPI_SEND_IPI, hmask,
+ hbase, 0, 0, 0, 0);
+ if (ret.error)
+ goto ecall_failed;
+ hmask = 0;
+ } else if (hartid < hbase) {
+ /* shift the mask to fit lower hartid */
+ hmask <<= hbase - hartid;
+ hbase = hartid;
+ }
}
- if (!hmask)
+ if (!hmask) {
hbase = hartid;
- hmask |= 1UL << (hartid - hbase);
+ htop = hartid;
+ } else if (hartid > htop) {
+ htop = hartid;
+ }
+ hmask |= BIT(hartid - hbase);
}
if (hmask) {
@@ -344,25 +356,35 @@ static int __sbi_rfence_v02(int fid, const struct cpumask *cpu_mask,
unsigned long start, unsigned long size,
unsigned long arg4, unsigned long arg5)
{
- unsigned long hartid, cpuid, hmask = 0, hbase = 0;
+ unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0;
int result;
- if (!cpu_mask)
+ if (!cpu_mask || cpumask_empty(cpu_mask))
cpu_mask = cpu_online_mask;
for_each_cpu(cpuid, cpu_mask) {
hartid = cpuid_to_hartid_map(cpuid);
- if (hmask && ((hbase + BITS_PER_LONG) <= hartid)) {
- result = __sbi_rfence_v02_call(fid, hmask, hbase,
- start, size, arg4, arg5);
- if (result)
- return result;
- hmask = 0;
- hbase = 0;
+ if (hmask) {
+ if (hartid + BITS_PER_LONG <= htop ||
+ hbase + BITS_PER_LONG <= hartid) {
+ result = __sbi_rfence_v02_call(fid, hmask,
+ hbase, start, size, arg4, arg5);
+ if (result)
+ return result;
+ hmask = 0;
+ } else if (hartid < hbase) {
+ /* shift the mask to fit lower hartid */
+ hmask <<= hbase - hartid;
+ hbase = hartid;
+ }
}
- if (!hmask)
+ if (!hmask) {
hbase = hartid;
- hmask |= 1UL << (hartid - hbase);
+ htop = hartid;
+ } else if (hartid > htop) {
+ htop = hartid;
+ }
+ hmask |= BIT(hartid - hbase);
}
if (hmask) {
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 201ee206fb57..14d2b53ec322 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -22,15 +22,16 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
bool (*fn)(void *, unsigned long), void *arg)
{
unsigned long fp, sp, pc;
+ int level = 0;
if (regs) {
fp = frame_pointer(regs);
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
- fp = (unsigned long)__builtin_frame_address(1);
- sp = (unsigned long)__builtin_frame_address(0);
- pc = (unsigned long)__builtin_return_address(0);
+ fp = (unsigned long)__builtin_frame_address(0);
+ sp = sp_in_global;
+ pc = (unsigned long)walk_stackframe;
} else {
/* task blocked in __switch_to */
fp = task->thread.s[0];
@@ -42,7 +43,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
unsigned long low, high;
struct stackframe *frame;
- if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
+ if (unlikely(!__kernel_text_address(pc) || (level++ >= 1 && !fn(arg, pc))))
break;
/* Validate frame pointer */
diff --git a/arch/riscv/kernel/trace_irq.c b/arch/riscv/kernel/trace_irq.c
new file mode 100644
index 000000000000..095ac976d7da
--- /dev/null
+++ b/arch/riscv/kernel/trace_irq.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Changbin Du <changbin.du@gmail.com>
+ */
+
+#include <linux/irqflags.h>
+#include <linux/kprobes.h>
+#include "trace_irq.h"
+
+/*
+ * trace_hardirqs_on/off require the caller to setup frame pointer properly.
+ * Otherwise, CALLER_ADDR1 might trigger an pagging exception in kernel.
+ * Here we add one extra level so they can be safely called by low
+ * level entry code which $fp is used for other purpose.
+ */
+
+void __trace_hardirqs_on(void)
+{
+ trace_hardirqs_on();
+}
+NOKPROBE_SYMBOL(__trace_hardirqs_on);
+
+void __trace_hardirqs_off(void)
+{
+ trace_hardirqs_off();
+}
+NOKPROBE_SYMBOL(__trace_hardirqs_off);
diff --git a/arch/riscv/kernel/trace_irq.h b/arch/riscv/kernel/trace_irq.h
new file mode 100644
index 000000000000..99fe67377e5e
--- /dev/null
+++ b/arch/riscv/kernel/trace_irq.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 Changbin Du <changbin.du@gmail.com>
+ */
+#ifndef __TRACE_IRQ_H
+#define __TRACE_IRQ_H
+
+void __trace_hardirqs_on(void);
+void __trace_hardirqs_off(void);
+
+#endif /* __TRACE_IRQ_H */
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 0c5239e05721..624166004e36 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -90,6 +90,7 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *cntx;
+ struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
/* Mark this VCPU never ran */
vcpu->arch.ran_atleast_once = false;
@@ -106,6 +107,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
cntx->hstatus |= HSTATUS_SPVP;
cntx->hstatus |= HSTATUS_SPV;
+ /* By default, make CY, TM, and IR counters accessible in VU mode */
+ reset_csr->scounteren = 0x7;
+
/* Setup VCPU timer */
kvm_riscv_vcpu_timer_init(vcpu);
@@ -699,6 +703,20 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
csr_write(CSR_HVIP, csr->hvip);
}
+/*
+ * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
+ * the vCPU is running.
+ *
+ * This must be noinstr as instrumentation may make use of RCU, and this is not
+ * safe during the EQS.
+ */
+static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+{
+ guest_state_enter_irqoff();
+ __kvm_riscv_switch_to(&vcpu->arch);
+ guest_state_exit_irqoff();
+}
+
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
int ret;
@@ -790,9 +808,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
continue;
}
- guest_enter_irqoff();
+ guest_timing_enter_irqoff();
- __kvm_riscv_switch_to(&vcpu->arch);
+ kvm_riscv_vcpu_enter_exit(vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu->stat.exits++;
@@ -812,25 +830,21 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_riscv_vcpu_sync_interrupts(vcpu);
/*
- * We may have taken a host interrupt in VS/VU-mode (i.e.
- * while executing the guest). This interrupt is still
- * pending, as we haven't serviced it yet!
+ * We must ensure that any pending interrupts are taken before
+ * we exit guest timing so that timer ticks are accounted as
+ * guest time. Transiently unmask interrupts so that any
+ * pending interrupts are taken.
*
- * We're now back in HS-mode with interrupts disabled
- * so enabling the interrupts now will have the effect
- * of taking the interrupt again, in HS-mode this time.
+ * There's no barrier which ensures that pending interrupts are
+ * recognised, so we just hope that the CPU takes any pending
+ * interrupts between the enable and disable.
*/
local_irq_enable();
+ local_irq_disable();
- /*
- * We do local_irq_enable() before calling guest_exit() so
- * that if a timer interrupt hits while running the guest
- * we account that tick as being spent in the guest. We
- * enable preemption after calling guest_exit() so that if
- * we get preempted we make sure ticks after that is not
- * counted as guest time.
- */
- guest_exit();
+ guest_timing_exit_irqoff();
+
+ local_irq_enable();
preempt_enable();
diff --git a/arch/riscv/kvm/vcpu_sbi_base.c b/arch/riscv/kvm/vcpu_sbi_base.c
index 4ecf377f483b..48f431091cdb 100644
--- a/arch/riscv/kvm/vcpu_sbi_base.c
+++ b/arch/riscv/kvm/vcpu_sbi_base.c
@@ -9,6 +9,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
+#include <linux/version.h>
#include <asm/csr.h>
#include <asm/sbi.h>
#include <asm/kvm_vcpu_timer.h>
@@ -32,7 +33,7 @@ static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
*out_val = KVM_SBI_IMPID;
break;
case SBI_EXT_BASE_GET_IMP_VERSION:
- *out_val = 0;
+ *out_val = LINUX_VERSION_CODE;
break;
case SBI_EXT_BASE_PROBE_EXT:
if ((cp->a0 >= SBI_EXT_EXPERIMENTAL_START &&
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index 7ebaef10ea1b..ac7a25298a04 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -24,6 +24,9 @@ obj-$(CONFIG_KASAN) += kasan_init.o
ifdef CONFIG_KASAN
KASAN_SANITIZE_kasan_init.o := n
KASAN_SANITIZE_init.o := n
+ifdef CONFIG_DEBUG_VIRTUAL
+KASAN_SANITIZE_physaddr.o := n
+endif
endif
obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
diff --git a/arch/riscv/mm/extable.c b/arch/riscv/mm/extable.c
index 05978f78579f..35484d830fd6 100644
--- a/arch/riscv/mm/extable.c
+++ b/arch/riscv/mm/extable.c
@@ -33,7 +33,7 @@ static inline void regs_set_gpr(struct pt_regs *regs, unsigned int offset,
if (unlikely(offset > MAX_REG_OFFSET))
return;
- if (!offset)
+ if (offset)
*(unsigned long *)((unsigned long)regs + offset) = val;
}
@@ -43,8 +43,8 @@ static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
int reg_zero = FIELD_GET(EX_DATA_REG_ZERO, ex->data);
- regs_set_gpr(regs, reg_err, -EFAULT);
- regs_set_gpr(regs, reg_zero, 0);
+ regs_set_gpr(regs, reg_err * sizeof(unsigned long), -EFAULT);
+ regs_set_gpr(regs, reg_zero * sizeof(unsigned long), 0);
regs->epc = get_ex_fixup(ex);
return true;
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index cf4d018b7d66..0d588032d6e6 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -125,7 +125,6 @@ void __init mem_init(void)
else
swiotlb_force = SWIOTLB_NO_FORCE;
#endif
- high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
memblock_free_all();
print_vm_layout();
@@ -195,6 +194,7 @@ static void __init setup_bootmem(void)
min_low_pfn = PFN_UP(phys_ram_base);
max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
+ high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
@@ -232,6 +232,7 @@ static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAG
#ifdef CONFIG_XIP_KERNEL
#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops))
+#define riscv_pfn_base (*(unsigned long *)XIP_FIXUP(&riscv_pfn_base))
#define trampoline_pg_dir ((pgd_t *)XIP_FIXUP(trampoline_pg_dir))
#define fixmap_pte ((pte_t *)XIP_FIXUP(fixmap_pte))
#define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir))
@@ -522,6 +523,7 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
}
#ifdef CONFIG_XIP_KERNEL
+#define phys_ram_base (*(phys_addr_t *)XIP_FIXUP(&phys_ram_base))
extern char _xiprom[], _exiprom[], __data_loc;
/* called from head.S with MMU off */
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index f61f7ca6fe0f..cd1a145257b7 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -113,8 +113,11 @@ static void __init kasan_populate_pud(pgd_t *pgd,
base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd)));
} else {
base_pud = (pud_t *)pgd_page_vaddr(*pgd);
- if (base_pud == lm_alias(kasan_early_shadow_pud))
+ if (base_pud == lm_alias(kasan_early_shadow_pud)) {
base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
+ memcpy(base_pud, (void *)kasan_early_shadow_pud,
+ sizeof(pud_t) * PTRS_PER_PUD);
+ }
}
pudp = base_pud + pud_index(vaddr);
@@ -202,8 +205,7 @@ asmlinkage void __init kasan_early_init(void)
for (i = 0; i < PTRS_PER_PTE; ++i)
set_pte(kasan_early_shadow_pte + i,
- mk_pte(virt_to_page(kasan_early_shadow_page),
- PAGE_KERNEL));
+ pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
for (i = 0; i < PTRS_PER_PMD; ++i)
set_pmd(kasan_early_shadow_pmd + i,
diff --git a/arch/riscv/mm/physaddr.c b/arch/riscv/mm/physaddr.c
index e7fd0c253c7b..19cf25a74ee2 100644
--- a/arch/riscv/mm/physaddr.c
+++ b/arch/riscv/mm/physaddr.c
@@ -8,12 +8,10 @@
phys_addr_t __virt_to_phys(unsigned long x)
{
- phys_addr_t y = x - PAGE_OFFSET;
-
/*
* Boundary checking aginst the kernel linear mapping space.
*/
- WARN(y >= KERN_VIRT_SIZE,
+ WARN(!is_linear_mapping(x) && !is_kernel_mapping(x),
"virt_to_phys used for non-linear address: %pK (%pS)\n",
(void *)x, (void *)x);
diff --git a/arch/s390/include/asm/extable.h b/arch/s390/include/asm/extable.h
index 16dc57dd90b3..8511f0e59290 100644
--- a/arch/s390/include/asm/extable.h
+++ b/arch/s390/include/asm/extable.h
@@ -69,8 +69,13 @@ static inline void swap_ex_entry_fixup(struct exception_table_entry *a,
{
a->fixup = b->fixup + delta;
b->fixup = tmp.fixup - delta;
- a->handler = b->handler + delta;
- b->handler = tmp.handler - delta;
+ a->handler = b->handler;
+ if (a->handler)
+ a->handler += delta;
+ b->handler = tmp.handler;
+ if (b->handler)
+ b->handler -= delta;
}
+#define swap_ex_entry_fixup swap_ex_entry_fixup
#endif
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 267f70f4393f..6f80ec9c04be 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -47,15 +47,17 @@ struct ftrace_regs {
static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
{
- return &fregs->regs;
+ struct pt_regs *regs = &fregs->regs;
+
+ if (test_pt_regs_flag(regs, PIF_FTRACE_FULL_REGS))
+ return regs;
+ return NULL;
}
static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *fregs,
unsigned long ip)
{
- struct pt_regs *regs = arch_ftrace_get_regs(fregs);
-
- regs->psw.addr = ip;
+ fregs->regs.psw.addr = ip;
}
/*
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 4ffa8e7f0ed3..ddb70fb13fbc 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -15,11 +15,13 @@
#define PIF_EXECVE_PGSTE_RESTART 1 /* restart execve for PGSTE binaries */
#define PIF_SYSCALL_RET_SET 2 /* return value was set via ptrace */
#define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */
+#define PIF_FTRACE_FULL_REGS 4 /* all register contents valid (ftrace) */
#define _PIF_SYSCALL BIT(PIF_SYSCALL)
#define _PIF_EXECVE_PGSTE_RESTART BIT(PIF_EXECVE_PGSTE_RESTART)
#define _PIF_SYSCALL_RET_SET BIT(PIF_SYSCALL_RET_SET)
#define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT)
+#define _PIF_FTRACE_FULL_REGS BIT(PIF_FTRACE_FULL_REGS)
#ifndef __ASSEMBLY__
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 21d62d8b6b9a..89c0870d5679 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -159,9 +159,38 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
return 0;
}
+static struct ftrace_hotpatch_trampoline *ftrace_get_trampoline(struct dyn_ftrace *rec)
+{
+ struct ftrace_hotpatch_trampoline *trampoline;
+ struct ftrace_insn insn;
+ s64 disp;
+ u16 opc;
+
+ if (copy_from_kernel_nofault(&insn, (void *)rec->ip, sizeof(insn)))
+ return ERR_PTR(-EFAULT);
+ disp = (s64)insn.disp * 2;
+ trampoline = (void *)(rec->ip + disp);
+ if (get_kernel_nofault(opc, &trampoline->brasl_opc))
+ return ERR_PTR(-EFAULT);
+ if (opc != 0xc015)
+ return ERR_PTR(-EINVAL);
+ return trampoline;
+}
+
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
+ struct ftrace_hotpatch_trampoline *trampoline;
+ u64 old;
+
+ trampoline = ftrace_get_trampoline(rec);
+ if (IS_ERR(trampoline))
+ return PTR_ERR(trampoline);
+ if (get_kernel_nofault(old, &trampoline->interceptor))
+ return -EFAULT;
+ if (old != old_addr)
+ return -EINVAL;
+ s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
return 0;
}
@@ -188,6 +217,12 @@ static void brcl_enable(void *brcl)
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
+ struct ftrace_hotpatch_trampoline *trampoline;
+
+ trampoline = ftrace_get_trampoline(rec);
+ if (IS_ERR(trampoline))
+ return PTR_ERR(trampoline);
+ s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
brcl_enable((void *)rec->ip);
return 0;
}
@@ -291,7 +326,7 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
regs = ftrace_get_regs(fregs);
p = get_kprobe((kprobe_opcode_t *)ip);
- if (unlikely(!p) || kprobe_disabled(p))
+ if (!regs || unlikely(!p) || kprobe_disabled(p))
goto out;
if (kprobe_running()) {
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 39bcc0e39a10..a24177dcd12a 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -27,6 +27,7 @@ ENDPROC(ftrace_stub)
#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
#define STACK_PTREGS_ORIG_GPR2 (STACK_PTREGS + __PT_ORIG_GPR2)
+#define STACK_PTREGS_FLAGS (STACK_PTREGS + __PT_FLAGS)
#ifdef __PACK_STACK
/* allocate just enough for r14, r15 and backchain */
#define TRACED_FUNC_FRAME_SIZE 24
@@ -57,6 +58,14 @@ ENDPROC(ftrace_stub)
.if \allregs == 1
stg %r14,(STACK_PTREGS_PSW)(%r15)
stosm (STACK_PTREGS_PSW)(%r15),0
+#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+ mvghi STACK_PTREGS_FLAGS(%r15),_PIF_FTRACE_FULL_REGS
+#else
+ lghi %r14,_PIF_FTRACE_FULL_REGS
+ stg %r14,STACK_PTREGS_FLAGS(%r15)
+#endif
+ .else
+ xc STACK_PTREGS_FLAGS(8,%r15),STACK_PTREGS_FLAGS(%r15)
.endif
lg %r14,(__SF_GPRS+8*8)(%r1) # restore original return address
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index f2c25d113e7b..05327be3a982 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -800,6 +800,8 @@ static void __init check_initrd(void)
static void __init reserve_kernel(void)
{
memblock_reserve(0, STARTUP_NORMAL_OFFSET);
+ memblock_reserve(OLDMEM_BASE, sizeof(unsigned long));
+ memblock_reserve(OLDMEM_SIZE, sizeof(unsigned long));
memblock_reserve(__amode31_base, __eamode31 - __samode31);
memblock_reserve(__pa(sclp_early_sccb), EXT_SCCB_READ_SCP);
memblock_reserve(__pa(_stext), _end - _stext);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 577f1ead6a51..2296b1ff1e02 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4667,6 +4667,8 @@ static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu,
return -EINVAL;
if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
return -E2BIG;
+ if (!kvm_s390_pv_cpu_is_protected(vcpu))
+ return -EINVAL;
switch (mop->op) {
case KVM_S390_MEMOP_SIDA_READ:
diff --git a/arch/s390/lib/test_modules.c b/arch/s390/lib/test_modules.c
index d056baa8fbb0..9894009fc1f2 100644
--- a/arch/s390/lib/test_modules.c
+++ b/arch/s390/lib/test_modules.c
@@ -5,9 +5,6 @@
#include "test_modules.h"
-#define DECLARE_RETURN(i) int test_modules_return_ ## i(void)
-REPEAT_10000(DECLARE_RETURN);
-
/*
* Test that modules with many relocations are loaded properly.
*/
diff --git a/arch/s390/lib/test_modules.h b/arch/s390/lib/test_modules.h
index 43b5e4b4af3e..6371fcf17684 100644
--- a/arch/s390/lib/test_modules.h
+++ b/arch/s390/lib/test_modules.h
@@ -47,4 +47,7 @@
__REPEAT_10000_1(f, 8); \
__REPEAT_10000_1(f, 9)
+#define DECLARE_RETURN(i) int test_modules_return_ ## i(void)
+REPEAT_10000(DECLARE_RETURN);
+
#endif
diff --git a/arch/x86/crypto/blake2s-shash.c b/arch/x86/crypto/blake2s-shash.c
index f9e2fecdb761..59ae28abe35c 100644
--- a/arch/x86/crypto/blake2s-shash.c
+++ b/arch/x86/crypto/blake2s-shash.c
@@ -18,12 +18,12 @@
static int crypto_blake2s_update_x86(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
- return crypto_blake2s_update(desc, in, inlen, blake2s_compress);
+ return crypto_blake2s_update(desc, in, inlen, false);
}
static int crypto_blake2s_final_x86(struct shash_desc *desc, u8 *out)
{
- return crypto_blake2s_final(desc, out, blake2s_compress);
+ return crypto_blake2s_final(desc, out, false);
}
#define BLAKE2S_ALG(name, driver_name, digest_size) \
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index c91434056c29..a3c7ca876aeb 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4703,6 +4703,19 @@ static __initconst const struct x86_pmu intel_pmu = {
.lbr_read = intel_pmu_lbr_read_64,
.lbr_save = intel_pmu_lbr_save,
.lbr_restore = intel_pmu_lbr_restore,
+
+ /*
+ * SMM has access to all 4 rings and while traditionally SMM code only
+ * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM.
+ *
+ * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction
+ * between SMM or not, this results in what should be pure userspace
+ * counters including SMM data.
+ *
+ * This is a clear privilege issue, therefore globally disable
+ * counting SMM by default.
+ */
+ .attr_freeze_on_smi = 1,
};
static __init void intel_clovertown_quirk(void)
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index 7f406c14715f..2d33bba9a144 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -897,8 +897,9 @@ static void pt_handle_status(struct pt *pt)
* means we are already losing data; need to let the decoder
* know.
*/
- if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) ||
- buf->output_off == pt_buffer_region_size(buf)) {
+ if (!buf->single &&
+ (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) ||
+ buf->output_off == pt_buffer_region_size(buf))) {
perf_aux_output_flag(&pt->handle,
PERF_AUX_FLAG_TRUNCATED);
advance++;
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index 84b87538a15d..bab883c0b6fe 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -22,7 +22,7 @@
#ifdef CONFIG_DEBUG_BUGVERBOSE
-#define _BUG_FLAGS(ins, flags) \
+#define _BUG_FLAGS(ins, flags, extra) \
do { \
asm_inline volatile("1:\t" ins "\n" \
".pushsection __bug_table,\"aw\"\n" \
@@ -31,7 +31,8 @@ do { \
"\t.word %c1" "\t# bug_entry::line\n" \
"\t.word %c2" "\t# bug_entry::flags\n" \
"\t.org 2b+%c3\n" \
- ".popsection" \
+ ".popsection\n" \
+ extra \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (flags), \
"i" (sizeof(struct bug_entry))); \
@@ -39,14 +40,15 @@ do { \
#else /* !CONFIG_DEBUG_BUGVERBOSE */
-#define _BUG_FLAGS(ins, flags) \
+#define _BUG_FLAGS(ins, flags, extra) \
do { \
asm_inline volatile("1:\t" ins "\n" \
".pushsection __bug_table,\"aw\"\n" \
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
"\t.word %c0" "\t# bug_entry::flags\n" \
"\t.org 2b+%c1\n" \
- ".popsection" \
+ ".popsection\n" \
+ extra \
: : "i" (flags), \
"i" (sizeof(struct bug_entry))); \
} while (0)
@@ -55,7 +57,7 @@ do { \
#else
-#define _BUG_FLAGS(ins, flags) asm volatile(ins)
+#define _BUG_FLAGS(ins, flags, extra) asm volatile(ins)
#endif /* CONFIG_GENERIC_BUG */
@@ -63,8 +65,8 @@ do { \
#define BUG() \
do { \
instrumentation_begin(); \
- _BUG_FLAGS(ASM_UD2, 0); \
- unreachable(); \
+ _BUG_FLAGS(ASM_UD2, 0, ""); \
+ __builtin_unreachable(); \
} while (0)
/*
@@ -75,9 +77,9 @@ do { \
*/
#define __WARN_FLAGS(flags) \
do { \
+ __auto_type f = BUGFLAG_WARNING|(flags); \
instrumentation_begin(); \
- _BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags)); \
- annotate_reachable(); \
+ _BUG_FLAGS(ASM_UD2, f, ASM_REACHABLE); \
instrumentation_end(); \
} while (0)
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 631d5040b31e..d39e0de06be2 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -82,7 +82,7 @@ KVM_X86_OP_NULL(guest_apic_has_interrupt)
KVM_X86_OP(load_eoi_exitmap)
KVM_X86_OP(set_virtual_apic_mode)
KVM_X86_OP_NULL(set_apic_access_page_addr)
-KVM_X86_OP(deliver_posted_interrupt)
+KVM_X86_OP(deliver_interrupt)
KVM_X86_OP_NULL(sync_pir_to_irr)
KVM_X86_OP(set_tss_addr)
KVM_X86_OP(set_identity_map_addr)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6e7c545bc7ee..ec9830d2aabf 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -703,7 +703,6 @@ struct kvm_vcpu_arch {
struct fpu_guest guest_fpu;
u64 xcr0;
- u64 guest_supported_xcr0;
struct kvm_pio_request pio;
void *pio_data;
@@ -1410,7 +1409,8 @@ struct kvm_x86_ops {
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu);
- int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
+ void (*deliver_interrupt)(struct kvm_lapic *apic, int delivery_mode,
+ int trig_mode, int vector);
int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 3faf0f97edb1..a4a39c3e0f19 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -476,6 +476,7 @@
#define MSR_AMD64_ICIBSEXTDCTL 0xc001103c
#define MSR_AMD64_IBSOPDATA4 0xc001103d
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
+#define MSR_AMD64_SVM_AVIC_DOORBELL 0xc001011b
#define MSR_AMD64_VM_PAGE_FLUSH 0xc001011e
#define MSR_AMD64_SEV_ES_GHCB 0xc0010130
#define MSR_AMD64_SEV 0xc0010131
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index b00dbc5fac2b..bb2fb78523ce 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -220,6 +220,42 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define SVM_NESTED_CTL_SEV_ENABLE BIT(1)
#define SVM_NESTED_CTL_SEV_ES_ENABLE BIT(2)
+
+/* AVIC */
+#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
+#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
+#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
+
+#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
+#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
+#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
+#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
+#define AVIC_PHYSICAL_ID_TABLE_SIZE_MASK (0xFF)
+
+#define AVIC_DOORBELL_PHYSICAL_ID_MASK (0xFF)
+
+#define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
+#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
+#define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
+
+enum avic_ipi_failure_cause {
+ AVIC_IPI_FAILURE_INVALID_INT_TYPE,
+ AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
+ AVIC_IPI_FAILURE_INVALID_TARGET,
+ AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
+};
+
+
+/*
+ * 0xff is broadcast, so the max index allowed for physical APIC ID
+ * table is 0xfe. APIC IDs above 0xff are reserved.
+ */
+#define AVIC_MAX_PHYSICAL_ID_COUNT 0xff
+
+#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
+#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
+
+
struct vmcb_seg {
u16 selector;
u16 attrib;
diff --git a/arch/x86/include/asm/xen/cpuid.h b/arch/x86/include/asm/xen/cpuid.h
index a9630104f1c4..78e667a31d6c 100644
--- a/arch/x86/include/asm/xen/cpuid.h
+++ b/arch/x86/include/asm/xen/cpuid.h
@@ -100,6 +100,13 @@
/* Memory mapped from other domains has valid IOMMU entries */
#define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2)
#define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3) /* vcpu id is present in EBX */
+#define XEN_HVM_CPUID_DOMID_PRESENT (1u << 4) /* domid is present in ECX */
+/*
+ * Bits 55:49 from the IO-APIC RTE and bits 11:5 from the MSI address can be
+ * used to store high bits for the Destination ID. This expands the Destination
+ * ID field from 8 to 15 bits, allowing to target APIC IDs up 32768.
+ */
+#define XEN_HVM_CPUID_EXT_DEST_ID (1u << 5)
/*
* Leaf 6 (0x40000x05)
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index 1bf2ad34188a..16f548a661cf 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -43,20 +43,6 @@ static inline uint32_t xen_cpuid_base(void)
return hypervisor_cpuid_base("XenVMMXenVMM", 2);
}
-#ifdef CONFIG_XEN
-extern bool __init xen_hvm_need_lapic(void);
-
-static inline bool __init xen_x2apic_para_available(void)
-{
- return xen_hvm_need_lapic();
-}
-#else
-static inline bool __init xen_x2apic_para_available(void)
-{
- return (xen_cpuid_base() != 0);
-}
-#endif
-
struct pci_dev;
#ifdef CONFIG_XEN_PV_DOM0
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
index 001808e3901c..48afe96ae0f0 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
@@ -410,6 +410,8 @@ void sgx_encl_release(struct kref *ref)
}
kfree(entry);
+ /* Invoke scheduler to prevent soft lockups. */
+ cond_resched();
}
xa_destroy(&encl->page_array);
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index 4b41efc9e367..8e4bc6453d26 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -344,10 +344,8 @@ static void sgx_reclaim_pages(void)
{
struct sgx_epc_page *chunk[SGX_NR_TO_SCAN];
struct sgx_backing backing[SGX_NR_TO_SCAN];
- struct sgx_epc_section *section;
struct sgx_encl_page *encl_page;
struct sgx_epc_page *epc_page;
- struct sgx_numa_node *node;
pgoff_t page_index;
int cnt = 0;
int ret;
@@ -418,13 +416,7 @@ skip:
kref_put(&encl_page->encl->refcount, sgx_encl_release);
epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
- section = &sgx_epc_sections[epc_page->section];
- node = section->node;
-
- spin_lock(&node->lock);
- list_add_tail(&epc_page->list, &node->free_page_list);
- spin_unlock(&node->lock);
- atomic_long_inc(&sgx_nr_free_pages);
+ sgx_free_epc_page(epc_page);
}
}
diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
index 437d7c930c0b..75ffaef8c299 100644
--- a/arch/x86/kernel/fpu/regset.c
+++ b/arch/x86/kernel/fpu/regset.c
@@ -91,11 +91,9 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
struct fpu *fpu = &target->thread.fpu;
- struct user32_fxsr_struct newstate;
+ struct fxregs_state newstate;
int ret;
- BUILD_BUG_ON(sizeof(newstate) != sizeof(struct fxregs_state));
-
if (!cpu_feature_enabled(X86_FEATURE_FXSR))
return -ENODEV;
@@ -116,9 +114,10 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
/* Copy the state */
memcpy(&fpu->fpstate->regs.fxsave, &newstate, sizeof(newstate));
- /* Clear xmm8..15 */
+ /* Clear xmm8..15 for 32-bit callers */
BUILD_BUG_ON(sizeof(fpu->__fpstate.regs.fxsave.xmm_space) != 16 * 16);
- memset(&fpu->fpstate->regs.fxsave.xmm_space[8], 0, 8 * 16);
+ if (in_ia32_syscall())
+ memset(&fpu->fpstate->regs.fxsave.xmm_space[8*4], 0, 8 * 16);
/* Mark FP and SSE as in use when XSAVE is enabled */
if (use_xsave())
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 02b3ddaf4f75..7c7824ae7862 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -1558,7 +1558,10 @@ static int fpstate_realloc(u64 xfeatures, unsigned int ksize,
fpregs_restore_userregs();
newfps->xfeatures = curfps->xfeatures | xfeatures;
- newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
+
+ if (!guest_fpu)
+ newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
+
newfps->xfd = curfps->xfd & ~xfeatures;
/* Do the final updates within the locked region */
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index a438217cbfac..f734e3b0cfec 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -462,19 +462,22 @@ static bool pv_tlb_flush_supported(void)
{
return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
- kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
+ kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) &&
+ (num_possible_cpus() != 1));
}
static bool pv_ipi_supported(void)
{
- return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
+ return (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI) &&
+ (num_possible_cpus() != 1));
}
static bool pv_sched_yield_supported(void)
{
return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
- kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
+ kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) &&
+ (num_possible_cpus() != 1));
}
#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 6d2244c94799..8d2f2f995539 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -1224,7 +1224,7 @@ static struct user_regset x86_64_regsets[] __ro_after_init = {
},
[REGSET_FP] = {
.core_note_type = NT_PRFPREG,
- .n = sizeof(struct user_i387_struct) / sizeof(long),
+ .n = sizeof(struct fxregs_state) / sizeof(long),
.size = sizeof(long), .align = sizeof(long),
.active = regset_xregset_fpregs_active, .regset_get = xfpregs_get, .set = xfpregs_set
},
@@ -1271,7 +1271,7 @@ static struct user_regset x86_32_regsets[] __ro_after_init = {
},
[REGSET_XFP] = {
.core_note_type = NT_PRXFPREG,
- .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
+ .n = sizeof(struct fxregs_state) / sizeof(u32),
.size = sizeof(u32), .align = sizeof(u32),
.active = regset_xregset_fpregs_active, .regset_get = xfpregs_get, .set = xfpregs_set
},
diff --git a/arch/x86/kernel/resource.c b/arch/x86/kernel/resource.c
index 9ae64f9af956..9b9fb7882c20 100644
--- a/arch/x86/kernel/resource.c
+++ b/arch/x86/kernel/resource.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/dmi.h>
#include <linux/ioport.h>
#include <asm/e820/api.h>
@@ -24,31 +23,11 @@ static void resource_clip(struct resource *res, resource_size_t start,
res->start = end + 1;
}
-/*
- * Some BIOS-es contain a bug where they add addresses which map to
- * system RAM in the PCI host bridge window returned by the ACPI _CRS
- * method, see commit 4dc2287c1805 ("x86: avoid E820 regions when
- * allocating address space"). To avoid this Linux by default excludes
- * E820 reservations when allocating addresses since 2010.
- * In 2019 some systems have shown-up with E820 reservations which cover
- * the entire _CRS returned PCI host bridge window, causing all attempts
- * to assign memory to PCI BARs to fail if Linux uses E820 reservations.
- *
- * Ideally Linux would fully stop using E820 reservations, but then
- * the old systems this was added for will regress.
- * Instead keep the old behavior for old systems, while ignoring the
- * E820 reservations for any systems from now on.
- */
static void remove_e820_regions(struct resource *avail)
{
- int i, year = dmi_get_bios_year();
+ int i;
struct e820_entry *entry;
- if (year >= 2018)
- return;
-
- pr_info_once("PCI: Removing E820 reservations from host bridge windows\n");
-
for (i = 0; i < e820_table->nr_entries; i++) {
entry = &e820_table->entries[i];
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 28be02adc669..b8f8d268d058 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -282,6 +282,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct kvm_cpuid_entry2 *best;
+ u64 guest_supported_xcr0;
best = kvm_find_cpuid_entry(vcpu, 1, 0);
if (best && apic) {
@@ -293,9 +294,11 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
kvm_apic_set_version(vcpu);
}
- vcpu->arch.guest_supported_xcr0 =
+ guest_supported_xcr0 =
cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
+ vcpu->arch.guest_fpu.fpstate->user_xfeatures = guest_supported_xcr0;
+
kvm_update_pv_runtime(vcpu);
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
@@ -554,12 +557,13 @@ void kvm_set_cpu_caps(void)
);
kvm_cpu_cap_mask(CPUID_7_0_EBX,
- F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
- F(BMI2) | F(ERMS) | F(INVPCID) | F(RTM) | 0 /*MPX*/ | F(RDSEED) |
- F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
- F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
- F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | 0 /*INTEL_PT*/
- );
+ F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) |
+ F(FDP_EXCPTN_ONLY) | F(SMEP) | F(BMI2) | F(ERMS) | F(INVPCID) |
+ F(RTM) | F(ZERO_FCS_FDS) | 0 /*MPX*/ | F(AVX512F) |
+ F(AVX512DQ) | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) |
+ F(CLFLUSHOPT) | F(CLWB) | 0 /*INTEL_PT*/ | F(AVX512PF) |
+ F(AVX512ER) | F(AVX512CD) | F(SHA_NI) | F(AVX512BW) |
+ F(AVX512VL));
kvm_cpu_cap_mask(CPUID_7_ECX,
F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 4662469240bc..9322e6340a74 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1096,14 +1096,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
apic->regs + APIC_TMR);
}
- if (static_call(kvm_x86_deliver_posted_interrupt)(vcpu, vector)) {
- kvm_lapic_set_irr(vector, apic);
- kvm_make_request(KVM_REQ_EVENT, vcpu);
- kvm_vcpu_kick(vcpu);
- } else {
- trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
- trig_mode, vector);
- }
+ static_call(kvm_x86_deliver_interrupt)(apic, delivery_mode,
+ trig_mode, vector);
break;
case APIC_DM_REMRD:
@@ -2312,7 +2306,12 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
apic->irr_pending = true;
apic->isr_count = 1;
} else {
- apic->irr_pending = (apic_search_irr(apic) != -1);
+ /*
+ * Don't clear irr_pending, searching the IRR can race with
+ * updates from the CPU as APICv is still active from hardware's
+ * perspective. The flag will be cleared as appropriate when
+ * KVM injects the interrupt.
+ */
apic->isr_count = count_vectors(apic->regs + APIC_ISR);
}
}
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 593093b52395..8e24f73bf60b 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3889,12 +3889,23 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
walk_shadow_page_lockless_end(vcpu);
}
+static u32 alloc_apf_token(struct kvm_vcpu *vcpu)
+{
+ /* make sure the token value is not 0 */
+ u32 id = vcpu->arch.apf.id;
+
+ if (id << 12 == 0)
+ vcpu->arch.apf.id = 1;
+
+ return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
+}
+
static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
gfn_t gfn)
{
struct kvm_arch_async_pf arch;
- arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
+ arch.token = alloc_apf_token(vcpu);
arch.gfn = gfn;
arch.direct_map = vcpu->arch.mmu->direct_map;
arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index f614f95acc6b..b1a02993782b 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -95,7 +95,7 @@ static void kvm_perf_overflow(struct perf_event *perf_event,
}
static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
- unsigned config, bool exclude_user,
+ u64 config, bool exclude_user,
bool exclude_kernel, bool intr,
bool in_tx, bool in_tx_cp)
{
@@ -181,7 +181,8 @@ static int cmp_u64(const void *a, const void *b)
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
{
- unsigned config, type = PERF_TYPE_RAW;
+ u64 config;
+ u32 type = PERF_TYPE_RAW;
struct kvm *kvm = pmc->vcpu->kvm;
struct kvm_pmu_event_filter *filter;
bool allow_event = true;
@@ -220,7 +221,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
}
if (type == PERF_TYPE_RAW)
- config = eventsel & X86_RAW_EVENT_MASK;
+ config = eventsel & AMD64_RAW_EVENT_MASK;
if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
return;
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 90364d02f22a..fb3e20791338 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -27,20 +27,6 @@
#include "irq.h"
#include "svm.h"
-#define SVM_AVIC_DOORBELL 0xc001011b
-
-#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
-
-/*
- * 0xff is broadcast, so the max index allowed for physical APIC ID
- * table is 0xfe. APIC IDs above 0xff are reserved.
- */
-#define AVIC_MAX_PHYSICAL_ID_COUNT 255
-
-#define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
-#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
-#define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
-
/* AVIC GATAG is encoded using VM and VCPU IDs */
#define AVIC_VCPU_ID_BITS 8
#define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
@@ -73,12 +59,6 @@ struct amd_svm_iommu_ir {
void *data; /* Storing pointer to struct amd_ir_data */
};
-enum avic_ipi_failure_cause {
- AVIC_IPI_FAILURE_INVALID_INT_TYPE,
- AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
- AVIC_IPI_FAILURE_INVALID_TARGET,
- AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
-};
/* Note:
* This function is called from IOMMU driver to notify
@@ -289,6 +269,22 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
return 0;
}
+void avic_ring_doorbell(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Note, the vCPU could get migrated to a different pCPU at any point,
+ * which could result in signalling the wrong/previous pCPU. But if
+ * that happens the vCPU is guaranteed to do a VMRUN (after being
+ * migrated) and thus will process pending interrupts, i.e. a doorbell
+ * is not needed (and the spurious one is harmless).
+ */
+ int cpu = READ_ONCE(vcpu->cpu);
+
+ if (cpu != get_cpu())
+ wrmsrl(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
+ put_cpu();
+}
+
static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
u32 icrl, u32 icrh)
{
@@ -304,8 +300,13 @@ static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
kvm_for_each_vcpu(i, vcpu, kvm) {
if (kvm_apic_match_dest(vcpu, source, icrl & APIC_SHORT_MASK,
GET_APIC_DEST_FIELD(icrh),
- icrl & APIC_DEST_MASK))
- kvm_vcpu_wake_up(vcpu);
+ icrl & APIC_DEST_MASK)) {
+ vcpu->arch.apic->irr_pending = true;
+ svm_complete_interrupt_delivery(vcpu,
+ icrl & APIC_MODE_MASK,
+ icrl & APIC_INT_LEVELTRIG,
+ icrl & APIC_VECTOR_MASK);
+ }
}
}
@@ -345,8 +346,6 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh);
break;
case AVIC_IPI_FAILURE_INVALID_TARGET:
- WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
- index, vcpu->vcpu_id, icrh, icrl);
break;
case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
WARN_ONCE(1, "Invalid backing page\n");
@@ -669,52 +668,6 @@ void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
return;
}
-int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
-{
- if (!vcpu->arch.apicv_active)
- return -1;
-
- kvm_lapic_set_irr(vec, vcpu->arch.apic);
-
- /*
- * Pairs with the smp_mb_*() after setting vcpu->guest_mode in
- * vcpu_enter_guest() to ensure the write to the vIRR is ordered before
- * the read of guest_mode, which guarantees that either VMRUN will see
- * and process the new vIRR entry, or that the below code will signal
- * the doorbell if the vCPU is already running in the guest.
- */
- smp_mb__after_atomic();
-
- /*
- * Signal the doorbell to tell hardware to inject the IRQ if the vCPU
- * is in the guest. If the vCPU is not in the guest, hardware will
- * automatically process AVIC interrupts at VMRUN.
- */
- if (vcpu->mode == IN_GUEST_MODE) {
- int cpu = READ_ONCE(vcpu->cpu);
-
- /*
- * Note, the vCPU could get migrated to a different pCPU at any
- * point, which could result in signalling the wrong/previous
- * pCPU. But if that happens the vCPU is guaranteed to do a
- * VMRUN (after being migrated) and thus will process pending
- * interrupts, i.e. a doorbell is not needed (and the spurious
- * one is harmless).
- */
- if (cpu != get_cpu())
- wrmsrl(SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
- put_cpu();
- } else {
- /*
- * Wake the vCPU if it was blocking. KVM will then detect the
- * pending IRQ when checking if the vCPU has a wake event.
- */
- kvm_vcpu_wake_up(vcpu);
- }
-
- return 0;
-}
-
bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
{
return false;
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 1218b5a342fc..39d280e7e80e 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -1457,18 +1457,6 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
!__nested_vmcb_check_save(vcpu, &save_cached))
goto out_free;
- /*
- * While the nested guest CR3 is already checked and set by
- * KVM_SET_SREGS, it was set when nested state was yet loaded,
- * thus MMU might not be initialized correctly.
- * Set it again to fix this.
- */
-
- ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
- nested_npt_enabled(svm), false);
- if (WARN_ON_ONCE(ret))
- goto out_free;
-
/*
* All checks done, we can enter guest mode. Userspace provides
@@ -1494,6 +1482,20 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
svm_switch_vmcb(svm, &svm->nested.vmcb02);
nested_vmcb02_prepare_control(svm);
+
+ /*
+ * While the nested guest CR3 is already checked and set by
+ * KVM_SET_SREGS, it was set when nested state was yet loaded,
+ * thus MMU might not be initialized correctly.
+ * Set it again to fix this.
+ */
+
+ ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
+ nested_npt_enabled(svm), false);
+ if (WARN_ON_ONCE(ret))
+ goto out_free;
+
+
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
ret = 0;
out_free:
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 6d97629655e3..fd3a00c892c7 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1585,6 +1585,7 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
struct vcpu_svm *svm = to_svm(vcpu);
u64 hcr0 = cr0;
+ bool old_paging = is_paging(vcpu);
#ifdef CONFIG_X86_64
if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
@@ -1601,8 +1602,11 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
#endif
vcpu->arch.cr0 = cr0;
- if (!npt_enabled)
+ if (!npt_enabled) {
hcr0 |= X86_CR0_PG | X86_CR0_WP;
+ if (old_paging != is_paging(vcpu))
+ svm_set_cr4(vcpu, kvm_read_cr4(vcpu));
+ }
/*
* re-enable caching here because the QEMU bios
@@ -1646,8 +1650,12 @@ void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
svm_flush_tlb(vcpu);
vcpu->arch.cr4 = cr4;
- if (!npt_enabled)
+ if (!npt_enabled) {
cr4 |= X86_CR4_PAE;
+
+ if (!is_paging(vcpu))
+ cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
+ }
cr4 |= host_cr4_mce;
to_svm(vcpu)->vmcb->save.cr4 = cr4;
vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
@@ -2685,8 +2693,23 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
u64 data = msr->data;
switch (ecx) {
case MSR_AMD64_TSC_RATIO:
- if (!msr->host_initiated && !svm->tsc_scaling_enabled)
- return 1;
+
+ if (!svm->tsc_scaling_enabled) {
+
+ if (!msr->host_initiated)
+ return 1;
+ /*
+ * In case TSC scaling is not enabled, always
+ * leave this MSR at the default value.
+ *
+ * Due to bug in qemu 6.2.0, it would try to set
+ * this msr to 0 if tsc scaling is not enabled.
+ * Ignore this value as well.
+ */
+ if (data != 0 && data != svm->tsc_ratio_msr)
+ return 1;
+ break;
+ }
if (data & TSC_RATIO_RSVD)
return 1;
@@ -3291,6 +3314,55 @@ static void svm_set_irq(struct kvm_vcpu *vcpu)
SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
}
+void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
+ int trig_mode, int vector)
+{
+ /*
+ * vcpu->arch.apicv_active must be read after vcpu->mode.
+ * Pairs with smp_store_release in vcpu_enter_guest.
+ */
+ bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE);
+
+ if (!READ_ONCE(vcpu->arch.apicv_active)) {
+ /* Process the interrupt via inject_pending_event */
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ kvm_vcpu_kick(vcpu);
+ return;
+ }
+
+ trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector);
+ if (in_guest_mode) {
+ /*
+ * Signal the doorbell to tell hardware to inject the IRQ. If
+ * the vCPU exits the guest before the doorbell chimes, hardware
+ * will automatically process AVIC interrupts at the next VMRUN.
+ */
+ avic_ring_doorbell(vcpu);
+ } else {
+ /*
+ * Wake the vCPU if it was blocking. KVM will then detect the
+ * pending IRQ when checking if the vCPU has a wake event.
+ */
+ kvm_vcpu_wake_up(vcpu);
+ }
+}
+
+static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
+ int trig_mode, int vector)
+{
+ kvm_lapic_set_irr(vector, apic);
+
+ /*
+ * Pairs with the smp_mb_*() after setting vcpu->guest_mode in
+ * vcpu_enter_guest() to ensure the write to the vIRR is ordered before
+ * the read of guest_mode. This guarantees that either VMRUN will see
+ * and process the new vIRR entry, or that svm_complete_interrupt_delivery
+ * will signal the doorbell if the CPU has already entered the guest.
+ */
+ smp_mb__after_atomic();
+ svm_complete_interrupt_delivery(apic->vcpu, delivery_mode, trig_mode, vector);
+}
+
static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -3338,11 +3410,13 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
if (svm->nested.nested_run_pending)
return -EBUSY;
+ if (svm_nmi_blocked(vcpu))
+ return 0;
+
/* An NMI must not be injected into L2 if it's supposed to VM-Exit. */
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
return -EBUSY;
-
- return !svm_nmi_blocked(vcpu);
+ return 1;
}
static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
@@ -3394,9 +3468,13 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
{
struct vcpu_svm *svm = to_svm(vcpu);
+
if (svm->nested.nested_run_pending)
return -EBUSY;
+ if (svm_interrupt_blocked(vcpu))
+ return 0;
+
/*
* An IRQ must not be injected into L2 if it's supposed to VM-Exit,
* e.g. if the IRQ arrived asynchronously after checking nested events.
@@ -3404,7 +3482,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
return -EBUSY;
- return !svm_interrupt_blocked(vcpu);
+ return 1;
}
static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
@@ -3615,7 +3693,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
unsigned long vmcb_pa = svm->current_vmcb->pa;
- kvm_guest_enter_irqoff();
+ guest_state_enter_irqoff();
if (sev_es_guest(vcpu->kvm)) {
__svm_sev_es_vcpu_run(vmcb_pa);
@@ -3635,7 +3713,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
vmload(__sme_page_pa(sd->save_area));
}
- kvm_guest_exit_irqoff();
+ guest_state_exit_irqoff();
}
static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
@@ -4135,11 +4213,14 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
if (svm->nested.nested_run_pending)
return -EBUSY;
+ if (svm_smi_blocked(vcpu))
+ return 0;
+
/* An SMI must not be injected into L2 if it's supposed to VM-Exit. */
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
return -EBUSY;
- return !svm_smi_blocked(vcpu);
+ return 1;
}
static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
@@ -4233,11 +4314,18 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
* Enter the nested guest now
*/
+ vmcb_mark_all_dirty(svm->vmcb01.ptr);
+
vmcb12 = map.hva;
nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
+ if (ret)
+ goto unmap_save;
+
+ svm->nested.nested_run_pending = 1;
+
unmap_save:
kvm_vcpu_unmap(vcpu, &map_save, true);
unmap_map:
@@ -4545,7 +4633,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.pmu_ops = &amd_pmu_ops,
.nested_ops = &svm_nested_ops,
- .deliver_posted_interrupt = svm_deliver_avic_intr,
+ .deliver_interrupt = svm_deliver_interrupt,
.dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
.update_pi_irte = svm_update_pi_irte,
.setup_mce = svm_setup_mce,
@@ -4622,6 +4710,7 @@ static __init void svm_set_cpu_caps(void)
/* CPUID 0x80000001 and 0x8000000A (SVM features) */
if (nested) {
kvm_cpu_cap_set(X86_FEATURE_SVM);
+ kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN);
if (nrips)
kvm_cpu_cap_set(X86_FEATURE_NRIPS);
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 73525353e424..fa98d6844728 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -489,6 +489,8 @@ void svm_set_gif(struct vcpu_svm *svm, bool value);
int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
int read, int write);
+void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
+ int trig_mode, int vec);
/* nested.c */
@@ -556,17 +558,6 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
/* avic.c */
-#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
-#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
-#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
-
-#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
-#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
-#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
-#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
-
-#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
-
int avic_ga_log_notifier(u32 ga_tag);
void avic_vm_destroy(struct kvm *kvm);
int avic_vm_init(struct kvm *kvm);
@@ -583,12 +574,12 @@ bool svm_check_apicv_inhibit_reasons(ulong bit);
void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
-int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
+void avic_ring_doorbell(struct kvm_vcpu *vcpu);
/* sev.c */
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index ba34e94049c7..dc822a1d403d 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -246,8 +246,7 @@ static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
src = &prev->host_state;
dest = &vmx->loaded_vmcs->host_state;
- vmx_set_vmcs_host_state(dest, src->cr3, src->fs_sel, src->gs_sel,
- src->fs_base, src->gs_base);
+ vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base);
dest->ldt_sel = src->ldt_sel;
#ifdef CONFIG_X86_64
dest->ds_sel = src->ds_sel;
@@ -3056,7 +3055,7 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- unsigned long cr4;
+ unsigned long cr3, cr4;
bool vm_fail;
if (!nested_early_check)
@@ -3079,6 +3078,12 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
*/
vmcs_writel(GUEST_RFLAGS, 0);
+ cr3 = __get_current_cr3_fast();
+ if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
+ vmcs_writel(HOST_CR3, cr3);
+ vmx->loaded_vmcs->host_state.cr3 = cr3;
+ }
+
cr4 = cr4_read_shadow();
if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
vmcs_writel(HOST_CR4, cr4);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index aca3ae2a02f3..b730d799c26e 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1080,14 +1080,9 @@ static void pt_guest_exit(struct vcpu_vmx *vmx)
wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
}
-void vmx_set_vmcs_host_state(struct vmcs_host_state *host, unsigned long cr3,
- u16 fs_sel, u16 gs_sel,
- unsigned long fs_base, unsigned long gs_base)
+void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
+ unsigned long fs_base, unsigned long gs_base)
{
- if (unlikely(cr3 != host->cr3)) {
- vmcs_writel(HOST_CR3, cr3);
- host->cr3 = cr3;
- }
if (unlikely(fs_sel != host->fs_sel)) {
if (!(fs_sel & 7))
vmcs_write16(HOST_FS_SELECTOR, fs_sel);
@@ -1182,9 +1177,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
gs_base = segment_base(gs_sel);
#endif
- vmx_set_vmcs_host_state(host_state, __get_current_cr3_fast(),
- fs_sel, gs_sel, fs_base, gs_base);
-
+ vmx_set_host_fs_gs(host_state, fs_sel, gs_sel, fs_base, gs_base);
vmx->guest_state_loaded = true;
}
@@ -4041,6 +4034,21 @@ static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
return 0;
}
+static void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
+ int trig_mode, int vector)
+{
+ struct kvm_vcpu *vcpu = apic->vcpu;
+
+ if (vmx_deliver_posted_interrupt(vcpu, vector)) {
+ kvm_lapic_set_irr(vector, apic);
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ kvm_vcpu_kick(vcpu);
+ } else {
+ trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
+ trig_mode, vector);
+ }
+}
+
/*
* Set up the vmcs's constant host-state fields, i.e., host-state fields that
* will not change in the lifetime of the guest.
@@ -6754,7 +6762,7 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
struct vcpu_vmx *vmx)
{
- kvm_guest_enter_irqoff();
+ guest_state_enter_irqoff();
/* L1D Flush includes CPU buffer clear to mitigate MDS */
if (static_branch_unlikely(&vmx_l1d_should_flush))
@@ -6770,13 +6778,13 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
vcpu->arch.cr2 = native_read_cr2();
- kvm_guest_exit_irqoff();
+ guest_state_exit_irqoff();
}
static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- unsigned long cr4;
+ unsigned long cr3, cr4;
/* Record the guest's net vcpu time for enforced NMI injections. */
if (unlikely(!enable_vnmi &&
@@ -6819,6 +6827,19 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
vcpu->arch.regs_dirty = 0;
+ /*
+ * Refresh vmcs.HOST_CR3 if necessary. This must be done immediately
+ * prior to VM-Enter, as the kernel may load a new ASID (PCID) any time
+ * it switches back to the current->mm, which can occur in KVM context
+ * when switching to a temporary mm to patch kernel code, e.g. if KVM
+ * toggles a static key while handling a VM-Exit.
+ */
+ cr3 = __get_current_cr3_fast();
+ if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
+ vmcs_writel(HOST_CR3, cr3);
+ vmx->loaded_vmcs->host_state.cr3 = cr3;
+ }
+
cr4 = cr4_read_shadow();
if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
vmcs_writel(HOST_CR4, cr4);
@@ -7644,6 +7665,7 @@ static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
if (ret)
return ret;
+ vmx->nested.nested_run_pending = 1;
vmx->nested.smm.guest_mode = false;
}
return 0;
@@ -7768,7 +7790,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.hwapic_isr_update = vmx_hwapic_isr_update,
.guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
.sync_pir_to_irr = vmx_sync_pir_to_irr,
- .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
+ .deliver_interrupt = vmx_deliver_interrupt,
.dy_apicv_has_pending_interrupt = pi_has_pending_interrupt,
.set_tss_addr = vmx_set_tss_addr,
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 7f2c82e7f38f..9c6bfcd84008 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -374,9 +374,8 @@ int allocate_vpid(void);
void free_vpid(int vpid);
void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
-void vmx_set_vmcs_host_state(struct vmcs_host_state *host, unsigned long cr3,
- u16 fs_sel, u16 gs_sel,
- unsigned long fs_base, unsigned long gs_base);
+void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
+ unsigned long fs_base, unsigned long gs_base);
int vmx_get_cpl(struct kvm_vcpu *vcpu);
bool vmx_emulation_required(struct kvm_vcpu *vcpu);
unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 74b53a16f38a..82a9dcd8c67f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -90,6 +90,8 @@
u64 __read_mostly kvm_mce_cap_supported = MCG_CTL_P | MCG_SER_P;
EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);
+#define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e))
+
#define emul_to_vcpu(ctxt) \
((struct kvm_vcpu *)(ctxt)->vcpu)
@@ -982,6 +984,18 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state);
+static inline u64 kvm_guest_supported_xcr0(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.guest_fpu.fpstate->user_xfeatures;
+}
+
+#ifdef CONFIG_X86_64
+static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu)
+{
+ return kvm_guest_supported_xcr0(vcpu) & XFEATURE_MASK_USER_DYNAMIC;
+}
+#endif
+
static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{
u64 xcr0 = xcr;
@@ -1001,7 +1015,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
* saving. However, xcr0 bit 0 is always set, even if the
* emulated CPU does not support XSAVE (see kvm_vcpu_reset()).
*/
- valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
+ valid_bits = kvm_guest_supported_xcr0(vcpu) | XFEATURE_MASK_FP;
if (xcr0 & ~valid_bits)
return 1;
@@ -2349,10 +2363,12 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
return tsc;
}
+#ifdef CONFIG_X86_64
static inline int gtod_is_based_on_tsc(int mode)
{
return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK;
}
+#endif
static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
{
@@ -3704,8 +3720,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
!guest_cpuid_has(vcpu, X86_FEATURE_XFD))
return 1;
- if (data & ~(XFEATURE_MASK_USER_DYNAMIC &
- vcpu->arch.guest_supported_xcr0))
+ if (data & ~kvm_guest_supported_xfd(vcpu))
return 1;
fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data);
@@ -3715,8 +3730,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
!guest_cpuid_has(vcpu, X86_FEATURE_XFD))
return 1;
- if (data & ~(XFEATURE_MASK_USER_DYNAMIC &
- vcpu->arch.guest_supported_xcr0))
+ if (data & ~kvm_guest_supported_xfd(vcpu))
return 1;
vcpu->arch.guest_fpu.xfd_err = data;
@@ -4231,6 +4245,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
case KVM_CAP_VCPU_ATTRIBUTES:
case KVM_CAP_SYS_ATTRIBUTES:
+ case KVM_CAP_ENABLE_CAP:
r = 1;
break;
case KVM_CAP_EXIT_HYPERCALL:
@@ -4340,7 +4355,7 @@ static inline void __user *kvm_get_attr_addr(struct kvm_device_attr *attr)
void __user *uaddr = (void __user*)(unsigned long)attr->addr;
if ((u64)(unsigned long)uaddr != attr->addr)
- return ERR_PTR(-EFAULT);
+ return ERR_PTR_USR(-EFAULT);
return uaddr;
}
@@ -8940,6 +8955,13 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK)
return -KVM_EOPNOTSUPP;
+ /*
+ * When tsc is in permanent catchup mode guests won't be able to use
+ * pvclock_read_retry loop to get consistent view of pvclock
+ */
+ if (vcpu->arch.tsc_always_catchup)
+ return -KVM_EOPNOTSUPP;
+
if (!kvm_get_walltime_and_clockread(&ts, &cycle))
return -KVM_EOPNOTSUPP;
@@ -9981,7 +10003,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* result in virtual interrupt delivery.
*/
local_irq_disable();
- vcpu->mode = IN_GUEST_MODE;
+
+ /* Store vcpu->apicv_active before vcpu->mode. */
+ smp_store_release(&vcpu->mode, IN_GUEST_MODE);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -10041,6 +10065,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
set_debugreg(0, 7);
}
+ guest_timing_enter_irqoff();
+
for (;;) {
/*
* Assert that vCPU vs. VM APICv state is consistent. An APICv
@@ -10125,7 +10151,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* of accounting via context tracking, but the loss of accuracy is
* acceptable for all known use cases.
*/
- vtime_account_guest_exit();
+ guest_timing_exit_irqoff();
if (lapic_in_kernel(vcpu)) {
s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta;
@@ -11639,8 +11665,6 @@ void kvm_arch_sync_events(struct kvm *kvm)
kvm_free_pit(kvm);
}
-#define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e))
-
/**
* __x86_set_memory_region: Setup KVM internal memory slot
*
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 635b75f9e145..767ec7f99516 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -10,51 +10,6 @@
void kvm_spurious_fault(void);
-static __always_inline void kvm_guest_enter_irqoff(void)
-{
- /*
- * VMENTER enables interrupts (host state), but the kernel state is
- * interrupts disabled when this is invoked. Also tell RCU about
- * it. This is the same logic as for exit_to_user_mode().
- *
- * This ensures that e.g. latency analysis on the host observes
- * guest mode as interrupt enabled.
- *
- * guest_enter_irqoff() informs context tracking about the
- * transition to guest mode and if enabled adjusts RCU state
- * accordingly.
- */
- instrumentation_begin();
- trace_hardirqs_on_prepare();
- lockdep_hardirqs_on_prepare(CALLER_ADDR0);
- instrumentation_end();
-
- guest_enter_irqoff();
- lockdep_hardirqs_on(CALLER_ADDR0);
-}
-
-static __always_inline void kvm_guest_exit_irqoff(void)
-{
- /*
- * VMEXIT disables interrupts (host state), but tracing and lockdep
- * have them in state 'on' as recorded before entering guest mode.
- * Same as enter_from_user_mode().
- *
- * context_tracking_guest_exit() restores host context and reinstates
- * RCU if enabled and required.
- *
- * This needs to be done immediately after VM-Exit, before any code
- * that might contain tracepoints or call out to the greater world,
- * e.g. before x86_spec_ctrl_restore_host().
- */
- lockdep_hardirqs_off(CALLER_ADDR0);
- context_tracking_guest_exit();
-
- instrumentation_begin();
- trace_hardirqs_off_finish();
- instrumentation_end();
-}
-
#define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \
({ \
bool failed = (consistency_check); \
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index bad57535fad0..74be1fda58e3 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -133,32 +133,57 @@ static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
{
struct kvm_vcpu_xen *vx = &v->arch.xen;
+ struct gfn_to_hva_cache *ghc = &vx->runstate_cache;
+ struct kvm_memslots *slots = kvm_memslots(v->kvm);
+ bool atomic = (state == RUNSTATE_runnable);
uint64_t state_entry_time;
- unsigned int offset;
+ int __user *user_state;
+ uint64_t __user *user_times;
kvm_xen_update_runstate(v, state);
if (!vx->runstate_set)
return;
- BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
+ if (unlikely(slots->generation != ghc->generation || kvm_is_error_hva(ghc->hva)) &&
+ kvm_gfn_to_hva_cache_init(v->kvm, ghc, ghc->gpa, ghc->len))
+ return;
+
+ /* We made sure it fits in a single page */
+ BUG_ON(!ghc->memslot);
+
+ if (atomic)
+ pagefault_disable();
- offset = offsetof(struct compat_vcpu_runstate_info, state_entry_time);
-#ifdef CONFIG_X86_64
/*
- * The only difference is alignment of uint64_t in 32-bit.
- * So the first field 'state' is accessed directly using
- * offsetof() (where its offset happens to be zero), while the
- * remaining fields which are all uint64_t, start at 'offset'
- * which we tweak here by adding 4.
+ * The only difference between 32-bit and 64-bit versions of the
+ * runstate struct us the alignment of uint64_t in 32-bit, which
+ * means that the 64-bit version has an additional 4 bytes of
+ * padding after the first field 'state'.
+ *
+ * So we use 'int __user *user_state' to point to the state field,
+ * and 'uint64_t __user *user_times' for runstate_entry_time. So
+ * the actual array of time[] in each state starts at user_times[1].
*/
+ BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0);
+ BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0);
+ user_state = (int __user *)ghc->hva;
+
+ BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
+
+ user_times = (uint64_t __user *)(ghc->hva +
+ offsetof(struct compat_vcpu_runstate_info,
+ state_entry_time));
+#ifdef CONFIG_X86_64
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
offsetof(struct compat_vcpu_runstate_info, time) + 4);
if (v->kvm->arch.xen.long_mode)
- offset = offsetof(struct vcpu_runstate_info, state_entry_time);
+ user_times = (uint64_t __user *)(ghc->hva +
+ offsetof(struct vcpu_runstate_info,
+ state_entry_time));
#endif
/*
* First write the updated state_entry_time at the appropriate
@@ -172,10 +197,8 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
sizeof(state_entry_time));
- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
- &state_entry_time, offset,
- sizeof(state_entry_time)))
- return;
+ if (__put_user(state_entry_time, user_times))
+ goto out;
smp_wmb();
/*
@@ -189,11 +212,8 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
sizeof(vx->current_runstate));
- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
- &vx->current_runstate,
- offsetof(struct vcpu_runstate_info, state),
- sizeof(vx->current_runstate)))
- return;
+ if (__put_user(vx->current_runstate, user_state))
+ goto out;
/*
* Write the actual runstate times immediately after the
@@ -208,24 +228,23 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
sizeof(vx->runstate_times));
- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
- &vx->runstate_times[0],
- offset + sizeof(u64),
- sizeof(vx->runstate_times)))
- return;
-
+ if (__copy_to_user(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times)))
+ goto out;
smp_wmb();
/*
* Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's
* runstate_entry_time field.
*/
-
state_entry_time &= ~XEN_RUNSTATE_UPDATE;
- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
- &state_entry_time, offset,
- sizeof(state_entry_time)))
- return;
+ __put_user(state_entry_time, user_times);
+ smp_wmb();
+
+ out:
+ mark_page_dirty_in_slot(v->kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
+
+ if (atomic)
+ pagefault_enable();
}
int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
@@ -443,6 +462,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
break;
}
+ /* It must fit within a single page */
+ if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_info) > PAGE_SIZE) {
+ r = -EINVAL;
+ break;
+ }
+
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.xen.vcpu_info_cache,
data->u.gpa,
@@ -460,6 +485,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
break;
}
+ /* It must fit within a single page */
+ if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct pvclock_vcpu_time_info) > PAGE_SIZE) {
+ r = -EINVAL;
+ break;
+ }
+
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.xen.vcpu_time_info_cache,
data->u.gpa,
@@ -481,6 +512,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
break;
}
+ /* It must fit within a single page */
+ if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_runstate_info) > PAGE_SIZE) {
+ r = -EINVAL;
+ break;
+ }
+
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.xen.runstate_cache,
data->u.gpa,
diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
index 42300941ec29..517a9d8d8f94 100644
--- a/arch/x86/xen/enlighten_hvm.c
+++ b/arch/x86/xen/enlighten_hvm.c
@@ -9,6 +9,7 @@
#include <xen/events.h>
#include <xen/interface/memory.h>
+#include <asm/apic.h>
#include <asm/cpu.h>
#include <asm/smp.h>
#include <asm/io_apic.h>
@@ -184,8 +185,7 @@ static int xen_cpu_dead_hvm(unsigned int cpu)
if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
xen_teardown_timer(cpu);
-
- return 0;
+ return 0;
}
static bool no_vector_callback __initdata;
@@ -242,15 +242,14 @@ static __init int xen_parse_no_vector_callback(char *arg)
}
early_param("xen_no_vector_callback", xen_parse_no_vector_callback);
-bool __init xen_hvm_need_lapic(void)
+static __init bool xen_x2apic_available(void)
{
- if (xen_pv_domain())
- return false;
- if (!xen_hvm_domain())
- return false;
- if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
- return false;
- return true;
+ return x2apic_supported();
+}
+
+static bool __init msi_ext_dest_id(void)
+{
+ return cpuid_eax(xen_cpuid_base() + 4) & XEN_HVM_CPUID_EXT_DEST_ID;
}
static __init void xen_hvm_guest_late_init(void)
@@ -312,9 +311,10 @@ struct hypervisor_x86 x86_hyper_xen_hvm __initdata = {
.detect = xen_platform_hvm,
.type = X86_HYPER_XEN_HVM,
.init.init_platform = xen_hvm_guest_init,
- .init.x2apic_available = xen_x2apic_para_available,
+ .init.x2apic_available = xen_x2apic_available,
.init.init_mem_mapping = xen_hvm_init_mem_mapping,
.init.guest_late_init = xen_hvm_guest_late_init,
+ .init.msi_ext_dest_id = msi_ext_dest_id,
.runtime.pin_vcpu = xen_pin_vcpu,
.ignore_nopv = true,
};
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 5004feb16783..d47c3d176ae4 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -1341,10 +1341,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
xen_acpi_sleep_register();
- /* Avoid searching for BIOS MP tables */
- x86_init.mpparse.find_smp_config = x86_init_noop;
- x86_init.mpparse.get_smp_config = x86_init_uint_noop;
-
xen_boot_params_init_edd();
#ifdef CONFIG_ACPI
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 6a8f3b53ab83..4a6019238ee7 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -148,28 +148,12 @@ int xen_smp_intr_init_pv(unsigned int cpu)
return rc;
}
-static void __init xen_fill_possible_map(void)
-{
- int i, rc;
-
- if (xen_initial_domain())
- return;
-
- for (i = 0; i < nr_cpu_ids; i++) {
- rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
- if (rc >= 0) {
- num_processors++;
- set_cpu_possible(i, true);
- }
- }
-}
-
-static void __init xen_filter_cpu_maps(void)
+static void __init _get_smp_config(unsigned int early)
{
int i, rc;
unsigned int subtract = 0;
- if (!xen_initial_domain())
+ if (early)
return;
num_processors = 0;
@@ -210,7 +194,6 @@ static void __init xen_pv_smp_prepare_boot_cpu(void)
* sure the old memory can be recycled. */
make_lowmem_page_readwrite(xen_initial_gdt);
- xen_filter_cpu_maps();
xen_setup_vcpu_info_placement();
/*
@@ -476,5 +459,8 @@ static const struct smp_ops xen_smp_ops __initconst = {
void __init xen_smp_init(void)
{
smp_ops = xen_smp_ops;
- xen_fill_possible_map();
+
+ /* Avoid searching for BIOS MP tables */
+ x86_init.mpparse.find_smp_config = x86_init_noop;
+ x86_init.mpparse.get_smp_config = _get_smp_config;
}
diff --git a/arch/x86/xen/vga.c b/arch/x86/xen/vga.c
index 31b1e3477cb6..14ea32e734d5 100644
--- a/arch/x86/xen/vga.c
+++ b/arch/x86/xen/vga.c
@@ -57,6 +57,14 @@ void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size)
screen_info->rsvd_size = info->u.vesa_lfb.rsvd_size;
screen_info->rsvd_pos = info->u.vesa_lfb.rsvd_pos;
+ if (size >= offsetof(struct dom0_vga_console_info,
+ u.vesa_lfb.ext_lfb_base)
+ + sizeof(info->u.vesa_lfb.ext_lfb_base)
+ && info->u.vesa_lfb.ext_lfb_base) {
+ screen_info->ext_lfb_base = info->u.vesa_lfb.ext_lfb_base;
+ screen_info->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
+ }
+
if (info->video_type == XEN_VGATYPE_EFI_LFB) {
screen_info->orig_video_isVGA = VIDEO_TYPE_EFI;
break;
@@ -66,14 +74,6 @@ void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size)
u.vesa_lfb.mode_attrs)
+ sizeof(info->u.vesa_lfb.mode_attrs))
screen_info->vesa_attributes = info->u.vesa_lfb.mode_attrs;
-
- if (size >= offsetof(struct dom0_vga_console_info,
- u.vesa_lfb.ext_lfb_base)
- + sizeof(info->u.vesa_lfb.ext_lfb_base)
- && info->u.vesa_lfb.ext_lfb_base) {
- screen_info->ext_lfb_base = info->u.vesa_lfb.ext_lfb_base;
- screen_info->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
- }
break;
}
}
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 0c612a911696..36a66e97e3c2 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -7018,6 +7018,8 @@ static void bfq_exit_queue(struct elevator_queue *e)
spin_unlock_irq(&bfqd->lock);
#endif
+ wbt_enable_default(bfqd->queue);
+
kfree(bfqd);
}
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index d25114715459..0827b19820c5 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -373,7 +373,7 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
- bip->bip_iter.bi_sector += bytes_done >> 9;
+ bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9);
bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
}
diff --git a/block/blk-core.c b/block/blk-core.c
index d93e3bb9a769..1039515c99d6 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -284,13 +284,6 @@ void blk_queue_start_drain(struct request_queue *q)
wake_up_all(&q->mq_freeze_wq);
}
-void blk_set_queue_dying(struct request_queue *q)
-{
- blk_queue_flag_set(QUEUE_FLAG_DYING, q);
- blk_queue_start_drain(q);
-}
-EXPORT_SYMBOL_GPL(blk_set_queue_dying);
-
/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
@@ -308,7 +301,8 @@ void blk_cleanup_queue(struct request_queue *q)
WARN_ON_ONCE(blk_queue_registered(q));
/* mark @q DYING, no new request or merges will be allowed afterwards */
- blk_set_queue_dying(q);
+ blk_queue_flag_set(QUEUE_FLAG_DYING, q);
+ blk_queue_start_drain(q);
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
diff --git a/block/blk-map.c b/block/blk-map.c
index 4526adde0156..c7f71d83eff1 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -446,7 +446,7 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
if (bytes > len)
bytes = len;
- page = alloc_page(GFP_NOIO | gfp_mask);
+ page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask);
if (!page)
goto cleanup;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1adfe4824ef5..d69ca91fbc8b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -736,6 +736,10 @@ static void blk_complete_request(struct request *req)
/* Completion has already been traced */
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
+
+ if (req_op(req) == REQ_OP_ZONE_APPEND)
+ bio->bi_iter.bi_sector = req->__sector;
+
if (!is_flush)
bio_endio(bio);
bio = next;
diff --git a/block/elevator.c b/block/elevator.c
index ec98aed39c4f..482df2a350fc 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -525,8 +525,6 @@ void elv_unregister_queue(struct request_queue *q)
kobject_del(&e->kobj);
e->registered = 0;
- /* Re-enable throttling in case elevator disabled it */
- wbt_enable_default(q);
}
}
diff --git a/block/fops.c b/block/fops.c
index 26bf15c770d2..a18e7fbd97b8 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -289,6 +289,8 @@ static void blkdev_bio_end_io_async(struct bio *bio)
struct kiocb *iocb = dio->iocb;
ssize_t ret;
+ WRITE_ONCE(iocb->private, NULL);
+
if (likely(!bio->bi_status)) {
ret = dio->size;
iocb->ki_pos += ret;
@@ -566,34 +568,37 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct block_device *bdev = iocb->ki_filp->private_data;
loff_t size = bdev_nr_bytes(bdev);
- size_t count = iov_iter_count(to);
loff_t pos = iocb->ki_pos;
size_t shorted = 0;
ssize_t ret = 0;
+ size_t count;
- if (unlikely(pos + count > size)) {
+ if (unlikely(pos + iov_iter_count(to) > size)) {
if (pos >= size)
return 0;
size -= pos;
- if (count > size) {
- shorted = count - size;
- iov_iter_truncate(to, size);
- }
+ shorted = iov_iter_count(to) - size;
+ iov_iter_truncate(to, size);
}
+ count = iov_iter_count(to);
+ if (!count)
+ goto reexpand; /* skip atime */
+
if (iocb->ki_flags & IOCB_DIRECT) {
struct address_space *mapping = iocb->ki_filp->f_mapping;
if (iocb->ki_flags & IOCB_NOWAIT) {
- if (filemap_range_needs_writeback(mapping, iocb->ki_pos,
- iocb->ki_pos + count - 1))
- return -EAGAIN;
+ if (filemap_range_needs_writeback(mapping, pos,
+ pos + count - 1)) {
+ ret = -EAGAIN;
+ goto reexpand;
+ }
} else {
- ret = filemap_write_and_wait_range(mapping,
- iocb->ki_pos,
- iocb->ki_pos + count - 1);
+ ret = filemap_write_and_wait_range(mapping, pos,
+ pos + count - 1);
if (ret < 0)
- return ret;
+ goto reexpand;
}
file_accessed(iocb->ki_filp);
@@ -603,12 +608,14 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
iocb->ki_pos += ret;
count -= ret;
}
+ iov_iter_revert(to, count - iov_iter_count(to));
if (ret < 0 || !count)
- return ret;
+ goto reexpand;
}
ret = filemap_read(iocb, to, ret);
+reexpand:
if (unlikely(shorted))
iov_iter_reexpand(to, iov_iter_count(to) + shorted);
return ret;
diff --git a/block/genhd.c b/block/genhd.c
index 626c8406f21a..9eca1f7d35c9 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -549,6 +549,20 @@ out_free_ext_minor:
EXPORT_SYMBOL(device_add_disk);
/**
+ * blk_mark_disk_dead - mark a disk as dead
+ * @disk: disk to mark as dead
+ *
+ * Mark as disk as dead (e.g. surprise removed) and don't accept any new I/O
+ * to this disk.
+ */
+void blk_mark_disk_dead(struct gendisk *disk)
+{
+ set_bit(GD_DEAD, &disk->state);
+ blk_queue_start_drain(disk->queue);
+}
+EXPORT_SYMBOL_GPL(blk_mark_disk_dead);
+
+/**
* del_gendisk - remove the gendisk
* @disk: the struct gendisk to remove
*
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index e1ea18536a5f..c8289b7a85ba 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -25,12 +25,9 @@ struct alg_type_list {
struct list_head list;
};
-static atomic_long_t alg_memory_allocated;
-
static struct proto alg_proto = {
.name = "ALG",
.owner = THIS_MODULE,
- .memory_allocated = &alg_memory_allocated,
.obj_size = sizeof(struct alg_sock),
};
diff --git a/crypto/algapi.c b/crypto/algapi.c
index a366cb3e8aa1..76fdaa16bd4a 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -1324,3 +1324,4 @@ module_exit(crypto_algapi_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cryptographic algorithms API");
+MODULE_SOFTDEP("pre: cryptomgr");
diff --git a/crypto/api.c b/crypto/api.c
index cf0869dd130b..7ddfe946dd56 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -643,4 +643,3 @@ EXPORT_SYMBOL_GPL(crypto_req_done);
MODULE_DESCRIPTION("Cryptographic core API");
MODULE_LICENSE("GPL");
-MODULE_SOFTDEP("pre: cryptomgr");
diff --git a/crypto/blake2s_generic.c b/crypto/blake2s_generic.c
index 72fe480f9bd6..5f96a21f8788 100644
--- a/crypto/blake2s_generic.c
+++ b/crypto/blake2s_generic.c
@@ -15,12 +15,12 @@
static int crypto_blake2s_update_generic(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
- return crypto_blake2s_update(desc, in, inlen, blake2s_compress_generic);
+ return crypto_blake2s_update(desc, in, inlen, true);
}
static int crypto_blake2s_final_generic(struct shash_desc *desc, u8 *out)
{
- return crypto_blake2s_final(desc, out, blake2s_compress_generic);
+ return crypto_blake2s_final(desc, out, true);
}
#define BLAKE2S_ALG(name, driver_name, digest_size) \
diff --git a/drivers/accessibility/speakup/speakup_dectlk.c b/drivers/accessibility/speakup/speakup_dectlk.c
index 580ec796816b..78ca4987e619 100644
--- a/drivers/accessibility/speakup/speakup_dectlk.c
+++ b/drivers/accessibility/speakup/speakup_dectlk.c
@@ -44,6 +44,7 @@ static struct var_t vars[] = {
{ CAPS_START, .u.s = {"[:dv ap 160] " } },
{ CAPS_STOP, .u.s = {"[:dv ap 100 ] " } },
{ RATE, .u.n = {"[:ra %d] ", 180, 75, 650, 0, 0, NULL } },
+ { PITCH, .u.n = {"[:dv ap %d] ", 122, 50, 350, 0, 0, NULL } },
{ INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } },
{ VOL, .u.n = {"[:dv g5 %d] ", 86, 60, 86, 0, 0, NULL } },
{ PUNCT, .u.n = {"[:pu %c] ", 0, 0, 2, 0, 0, "nsa" } },
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index ba45541b1f1f..273741dedfd2 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -11,6 +11,7 @@ menuconfig ACPI
depends on ARCH_SUPPORTS_ACPI
select PNP
select NLS
+ select CRC32
default y if X86
help
Advanced Configuration and Power Interface (ACPI) support for
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 3b23fb775ac4..f2f8f05662de 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1361,9 +1361,17 @@ static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
res[0].start = pmcg->page0_base_address;
res[0].end = pmcg->page0_base_address + SZ_4K - 1;
res[0].flags = IORESOURCE_MEM;
- res[1].start = pmcg->page1_base_address;
- res[1].end = pmcg->page1_base_address + SZ_4K - 1;
- res[1].flags = IORESOURCE_MEM;
+ /*
+ * The initial version in DEN0049C lacked a way to describe register
+ * page 1, which makes it broken for most PMCG implementations; in
+ * that case, just let the driver fail gracefully if it expects to
+ * find a second memory resource.
+ */
+ if (node->revision > 0) {
+ res[1].start = pmcg->page1_base_address;
+ res[1].end = pmcg->page1_base_address + SZ_4K - 1;
+ res[1].flags = IORESOURCE_MEM;
+ }
if (pmcg->overflow_gsiv)
acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 0077d2c85df8..46710380a402 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2066,6 +2066,16 @@ bool acpi_ec_dispatch_gpe(void)
return true;
/*
+ * Cancel the SCI wakeup and process all pending events in case there
+ * are any wakeup ones in there.
+ *
+ * Note that if any non-EC GPEs are active at this point, the SCI will
+ * retrigger after the rearming in acpi_s2idle_wake(), so no events
+ * should be missed by canceling the wakeup here.
+ */
+ pm_system_cancel_wakeup();
+
+ /*
* Dispatch the EC GPE in-band, but do not report wakeup in any case
* to allow the caller to process events properly after that.
*/
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 86560a28751b..f8e9fa82cb9b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -96,6 +96,11 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
(void *)1},
+ /* T40 can not handle C3 idle state */
+ { set_max_cstate, "IBM ThinkPad T40", {
+ DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "23737CU")},
+ (void *)2},
{},
};
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index a60ff5dfed3a..d4fbea91ab6b 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -736,21 +736,15 @@ bool acpi_s2idle_wake(void)
return true;
}
- /* Check non-EC GPE wakeups and dispatch the EC GPE. */
+ /*
+ * Check non-EC GPE wakeups and if there are none, cancel the
+ * SCI-related wakeup and dispatch the EC GPE.
+ */
if (acpi_ec_dispatch_gpe()) {
pm_pr_dbg("ACPI non-EC GPE wakeup\n");
return true;
}
- /*
- * Cancel the SCI wakeup and process all pending events in case
- * there are any wakeup ones in there.
- *
- * Note that if any non-EC GPEs are active at this point, the
- * SCI will retrigger after the rearming below, so no events
- * should be missed by canceling the wakeup here.
- */
- pm_system_cancel_wakeup();
acpi_os_wait_events_complete();
/*
@@ -764,6 +758,7 @@ bool acpi_s2idle_wake(void)
return true;
}
+ pm_wakeup_clear(acpi_sci_irq);
rearm_wake_irq(acpi_sci_irq);
}
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 0741a4933f62..34600b5b9d8e 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -400,7 +400,7 @@ int __init_or_acpilib acpi_table_parse_entries_array(
acpi_get_table(id, instance, &table_header);
if (!table_header) {
- pr_warn("%4.4s not present\n", id);
+ pr_debug("%4.4s not present\n", id);
return -ENODEV;
}
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index abc06e7f89d8..ed889f827f53 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -424,15 +424,11 @@ static int lps0_device_attach(struct acpi_device *adev,
mem_sleep_current = PM_SUSPEND_TO_IDLE;
/*
- * Some Intel based LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U don't
- * use intel-hid or intel-vbtn but require the EC GPE to be enabled while
- * suspended for certain wakeup devices to work, so mark it as wakeup-capable.
- *
- * Only enable on !AMD as enabling this universally causes problems for a number
- * of AMD based systems.
+ * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
+ * EC GPE to be enabled while suspended for certain wakeup devices to
+ * work, so mark it as wakeup-capable.
*/
- if (!acpi_s2idle_vendor_amd())
- acpi_ec_mark_gpe_for_wake();
+ acpi_ec_mark_gpe_for_wake();
return 0;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 67f88027680a..0c854aebfe0b 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2007,6 +2007,9 @@ static bool ata_log_supported(struct ata_device *dev, u8 log)
{
struct ata_port *ap = dev->link->ap;
+ if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
+ return false;
+
if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
return false;
return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
@@ -2445,23 +2448,21 @@ static void ata_dev_config_cpr(struct ata_device *dev)
struct ata_cpr_log *cpr_log = NULL;
u8 *desc, *buf = NULL;
- if (!ata_identify_page_supported(dev,
- ATA_LOG_CONCURRENT_POSITIONING_RANGES))
+ if (ata_id_major_version(dev->id) < 11 ||
+ !ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES))
goto out;
/*
- * Read IDENTIFY DEVICE data log, page 0x47
- * (concurrent positioning ranges). We can have at most 255 32B range
- * descriptors plus a 64B header.
+ * Read the concurrent positioning ranges log (0x47). We can have at
+ * most 255 32B range descriptors plus a 64B header.
*/
buf_len = (64 + 255 * 32 + 511) & ~511;
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
goto out;
- err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
- ATA_LOG_CONCURRENT_POSITIONING_RANGES,
- buf, buf_len >> 9);
+ err_mask = ata_read_log_page(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES,
+ 0, buf, buf_len >> 9);
if (err_mask)
goto out;
@@ -4028,6 +4029,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* devices that don't properly handle TRIM commands */
{ "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
+ { "M88V29*", NULL, ATA_HORKAGE_NOTRIM, },
/*
* As defined, the DRAT (Deterministic Read After Trim) and RZAT
@@ -4073,6 +4075,13 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
{ "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ /*
+ * This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY
+ * log page is accessed. Ensure we never ask for this log page with
+ * these devices.
+ */
+ { "SATADOM-ML 3ME", NULL, ATA_HORKAGE_NO_LOG_DIR },
+
/* End Marker */
{ }
};
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 7abc7e04f656..6fa4a2faf49c 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -920,6 +920,20 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
pci_write_config_byte(dev, 0x5a, irqmask);
/*
+ * HPT371 chips physically have only one channel, the secondary one,
+ * but the primary channel registers do exist! Go figure...
+ * So, we manually disable the non-existing channel here
+ * (if the BIOS hasn't done this already).
+ */
+ if (dev->device == PCI_DEVICE_ID_TTI_HPT371) {
+ u8 mcr1;
+
+ pci_read_config_byte(dev, 0x50, &mcr1);
+ mcr1 &= ~0x04;
+ pci_write_config_byte(dev, 0x50, mcr1);
+ }
+
+ /*
* default to pci clock. make sure MA15/16 are set to output
* to prevent drives having problems with 40-pin cables. Needed
* for some drives such as IBM-DTLA which will not enter ready
@@ -950,14 +964,14 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
if ((freq >> 12) != 0xABCDE) {
int i;
- u8 sr;
+ u16 sr;
u32 total = 0;
dev_warn(&dev->dev, "BIOS has not set timing clocks\n");
/* This is the process the HPT371 BIOS is reported to use */
for (i = 0; i < 128; i++) {
- pci_read_config_byte(dev, 0x78, &sr);
+ pci_read_config_word(dev, 0x78, &sr);
total += sr & 0x1FF;
udelay(15);
}
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index da0152116d9f..556034a15430 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -322,7 +322,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
static ssize_t fsl_sata_intr_coalescing_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sysfs_emit(buf, "%d %d\n",
+ return sysfs_emit(buf, "%u %u\n",
intr_coalescing_count, intr_coalescing_ticks);
}
@@ -332,10 +332,8 @@ static ssize_t fsl_sata_intr_coalescing_store(struct device *dev,
{
unsigned int coalescing_count, coalescing_ticks;
- if (sscanf(buf, "%d%d",
- &coalescing_count,
- &coalescing_ticks) != 2) {
- printk(KERN_ERR "fsl-sata: wrong parameter format.\n");
+ if (sscanf(buf, "%u%u", &coalescing_count, &coalescing_ticks) != 2) {
+ dev_err(dev, "fsl-sata: wrong parameter format.\n");
return -EINVAL;
}
@@ -359,7 +357,7 @@ static ssize_t fsl_sata_rx_watermark_show(struct device *dev,
rx_watermark &= 0x1f;
spin_unlock_irqrestore(&host->lock, flags);
- return sysfs_emit(buf, "%d\n", rx_watermark);
+ return sysfs_emit(buf, "%u\n", rx_watermark);
}
static ssize_t fsl_sata_rx_watermark_store(struct device *dev,
@@ -373,8 +371,8 @@ static ssize_t fsl_sata_rx_watermark_store(struct device *dev,
void __iomem *csr_base = host_priv->csr_base;
u32 temp;
- if (sscanf(buf, "%d", &rx_watermark) != 1) {
- printk(KERN_ERR "fsl-sata: wrong parameter format.\n");
+ if (kstrtouint(buf, 10, &rx_watermark) < 0) {
+ dev_err(dev, "fsl-sata: wrong parameter format.\n");
return -EINVAL;
}
@@ -382,8 +380,8 @@ static ssize_t fsl_sata_rx_watermark_store(struct device *dev,
temp = ioread32(csr_base + TRANSCFG);
temp &= 0xffffffe0;
iowrite32(temp | rx_watermark, csr_base + TRANSCFG);
-
spin_unlock_irqrestore(&host->lock, flags);
+
return strlen(buf);
}
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 3bc3c314a467..4f67404fe64c 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1676,6 +1676,8 @@ static int fs_init(struct fs_dev *dev)
dev->hw_base = pci_resource_start(pci_dev, 0);
dev->base = ioremap(dev->hw_base, 0x1000);
+ if (!dev->base)
+ return 1;
reset_chip (dev);
diff --git a/drivers/auxdisplay/lcd2s.c b/drivers/auxdisplay/lcd2s.c
index 38ba08628ccb..2578b2d45439 100644
--- a/drivers/auxdisplay/lcd2s.c
+++ b/drivers/auxdisplay/lcd2s.c
@@ -238,7 +238,7 @@ static int lcd2s_redefine_char(struct charlcd *lcd, char *esc)
if (buf[1] > 7)
return 1;
- i = 0;
+ i = 2;
shift = 0;
value = 0;
while (*esc && i < LCD2S_CHARACTER_SIZE + 2) {
@@ -298,6 +298,10 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c,
I2C_FUNC_SMBUS_WRITE_BLOCK_DATA))
return -EIO;
+ lcd2s = devm_kzalloc(&i2c->dev, sizeof(*lcd2s), GFP_KERNEL);
+ if (!lcd2s)
+ return -ENOMEM;
+
/* Test, if the display is responding */
err = lcd2s_i2c_smbus_write_byte(i2c, LCD2S_CMD_DISPLAY_OFF);
if (err < 0)
@@ -307,12 +311,6 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c,
if (!lcd)
return -ENOMEM;
- lcd2s = kzalloc(sizeof(struct lcd2s_data), GFP_KERNEL);
- if (!lcd2s) {
- err = -ENOMEM;
- goto fail1;
- }
-
lcd->drvdata = lcd2s;
lcd2s->i2c = i2c;
lcd2s->charlcd = lcd;
@@ -321,26 +319,24 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c,
err = device_property_read_u32(&i2c->dev, "display-height-chars",
&lcd->height);
if (err)
- goto fail2;
+ goto fail1;
err = device_property_read_u32(&i2c->dev, "display-width-chars",
&lcd->width);
if (err)
- goto fail2;
+ goto fail1;
lcd->ops = &lcd2s_ops;
err = charlcd_register(lcd2s->charlcd);
if (err)
- goto fail2;
+ goto fail1;
i2c_set_clientdata(i2c, lcd2s);
return 0;
-fail2:
- kfree(lcd2s);
fail1:
- kfree(lcd);
+ charlcd_free(lcd2s->charlcd);
return err;
}
@@ -349,7 +345,7 @@ static int lcd2s_i2c_remove(struct i2c_client *i2c)
struct lcd2s_data *lcd2s = i2c_get_clientdata(i2c);
charlcd_unregister(lcd2s->charlcd);
- kfree(lcd2s->charlcd);
+ charlcd_free(lcd2s->charlcd);
return 0;
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 9eaaff2f556c..f47cab21430f 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -629,6 +629,9 @@ re_probe:
drv->remove(dev);
devres_release_all(dev);
+ arch_teardown_dma_ops(dev);
+ kfree(dev->dma_range_map);
+ dev->dma_range_map = NULL;
driver_sysfs_remove(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
@@ -1209,6 +1212,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
devres_release_all(dev);
arch_teardown_dma_ops(dev);
+ kfree(dev->dma_range_map);
+ dev->dma_range_map = NULL;
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
if (dev->pm_domain && dev->pm_domain->dismiss)
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 99bda0da23a8..8666590201c9 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -34,7 +34,8 @@ suspend_state_t pm_suspend_target_state;
bool events_check_enabled __read_mostly;
/* First wakeup IRQ seen by the kernel in the last cycle. */
-unsigned int pm_wakeup_irq __read_mostly;
+static unsigned int wakeup_irq[2] __read_mostly;
+static DEFINE_RAW_SPINLOCK(wakeup_irq_lock);
/* If greater than 0 and the system is suspending, terminate the suspend. */
static atomic_t pm_abort_suspend __read_mostly;
@@ -942,19 +943,45 @@ void pm_system_cancel_wakeup(void)
atomic_dec_if_positive(&pm_abort_suspend);
}
-void pm_wakeup_clear(bool reset)
+void pm_wakeup_clear(unsigned int irq_number)
{
- pm_wakeup_irq = 0;
- if (reset)
+ raw_spin_lock_irq(&wakeup_irq_lock);
+
+ if (irq_number && wakeup_irq[0] == irq_number)
+ wakeup_irq[0] = wakeup_irq[1];
+ else
+ wakeup_irq[0] = 0;
+
+ wakeup_irq[1] = 0;
+
+ raw_spin_unlock_irq(&wakeup_irq_lock);
+
+ if (!irq_number)
atomic_set(&pm_abort_suspend, 0);
}
void pm_system_irq_wakeup(unsigned int irq_number)
{
- if (pm_wakeup_irq == 0) {
- pm_wakeup_irq = irq_number;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&wakeup_irq_lock, flags);
+
+ if (wakeup_irq[0] == 0)
+ wakeup_irq[0] = irq_number;
+ else if (wakeup_irq[1] == 0)
+ wakeup_irq[1] = irq_number;
+ else
+ irq_number = 0;
+
+ raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags);
+
+ if (irq_number)
pm_system_wakeup();
- }
+}
+
+unsigned int pm_wakeup_irq(void)
+{
+ return wakeup_irq[0];
}
/**
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index d2656581a608..4a446259a184 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -189,11 +189,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
ret = regmap_write(map, reg, d->mask_buf[i]);
if (d->chip->clear_ack) {
if (d->chip->ack_invert && !ret)
- ret = regmap_write(map, reg,
- d->mask_buf[i]);
+ ret = regmap_write(map, reg, UINT_MAX);
else if (!ret)
- ret = regmap_write(map, reg,
- ~d->mask_buf[i]);
+ ret = regmap_write(map, reg, 0);
}
if (ret != 0)
dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
@@ -556,11 +554,9 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
data->status_buf[i]);
if (chip->clear_ack) {
if (chip->ack_invert && !ret)
- ret = regmap_write(map, reg,
- data->status_buf[i]);
+ ret = regmap_write(map, reg, UINT_MAX);
else if (!ret)
- ret = regmap_write(map, reg,
- ~data->status_buf[i]);
+ ret = regmap_write(map, reg, 0);
}
if (ret != 0)
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
@@ -817,13 +813,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
d->status_buf[i] & d->mask_buf[i]);
if (chip->clear_ack) {
if (chip->ack_invert && !ret)
- ret = regmap_write(map, reg,
- (d->status_buf[i] &
- d->mask_buf[i]));
+ ret = regmap_write(map, reg, UINT_MAX);
else if (!ret)
- ret = regmap_write(map, reg,
- ~(d->status_buf[i] &
- d->mask_buf[i]));
+ ret = regmap_write(map, reg, 0);
}
if (ret != 0) {
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 01cbbfc4e9e2..19fe19eaa50e 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -79,6 +79,7 @@
#include <linux/ioprio.h>
#include <linux/blk-cgroup.h>
#include <linux/sched/mm.h>
+#include <linux/statfs.h>
#include "loop.h"
@@ -774,8 +775,13 @@ static void loop_config_discard(struct loop_device *lo)
granularity = 0;
} else {
+ struct kstatfs sbuf;
+
max_discard_sectors = UINT_MAX >> 9;
- granularity = inode->i_sb->s_blocksize;
+ if (!vfs_statfs(&file->f_path, &sbuf))
+ granularity = sbuf.f_bsize;
+ else
+ max_discard_sectors = 0;
}
if (max_discard_sectors) {
@@ -1082,7 +1088,7 @@ out_putf:
return error;
}
-static void __loop_clr_fd(struct loop_device *lo)
+static void __loop_clr_fd(struct loop_device *lo, bool release)
{
struct file *filp;
gfp_t gfp = lo->old_gfp_mask;
@@ -1144,6 +1150,8 @@ static void __loop_clr_fd(struct loop_device *lo)
/* let user-space know about this change */
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
mapping_set_gfp_mask(filp->f_mapping, gfp);
+ /* This is safe: open() is still holding a reference. */
+ module_put(THIS_MODULE);
blk_mq_unfreeze_queue(lo->lo_queue);
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
@@ -1151,52 +1159,44 @@ static void __loop_clr_fd(struct loop_device *lo)
if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
int err;
- mutex_lock(&lo->lo_disk->open_mutex);
+ /*
+ * open_mutex has been held already in release path, so don't
+ * acquire it if this function is called in such case.
+ *
+ * If the reread partition isn't from release path, lo_refcnt
+ * must be at least one and it can only become zero when the
+ * current holder is released.
+ */
+ if (!release)
+ mutex_lock(&lo->lo_disk->open_mutex);
err = bdev_disk_changed(lo->lo_disk, false);
- mutex_unlock(&lo->lo_disk->open_mutex);
+ if (!release)
+ mutex_unlock(&lo->lo_disk->open_mutex);
if (err)
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
__func__, lo->lo_number, err);
/* Device is gone, no point in returning error */
}
+ /*
+ * lo->lo_state is set to Lo_unbound here after above partscan has
+ * finished. There cannot be anybody else entering __loop_clr_fd() as
+ * Lo_rundown state protects us from all the other places trying to
+ * change the 'lo' device.
+ */
lo->lo_flags = 0;
if (!part_shift)
lo->lo_disk->flags |= GENHD_FL_NO_PART;
-
- fput(filp);
-}
-
-static void loop_rundown_completed(struct loop_device *lo)
-{
mutex_lock(&lo->lo_mutex);
lo->lo_state = Lo_unbound;
mutex_unlock(&lo->lo_mutex);
- module_put(THIS_MODULE);
-}
-
-static void loop_rundown_workfn(struct work_struct *work)
-{
- struct loop_device *lo = container_of(work, struct loop_device,
- rundown_work);
- struct block_device *bdev = lo->lo_device;
- struct gendisk *disk = lo->lo_disk;
-
- __loop_clr_fd(lo);
- kobject_put(&bdev->bd_device.kobj);
- module_put(disk->fops->owner);
- loop_rundown_completed(lo);
-}
-
-static void loop_schedule_rundown(struct loop_device *lo)
-{
- struct block_device *bdev = lo->lo_device;
- struct gendisk *disk = lo->lo_disk;
- __module_get(disk->fops->owner);
- kobject_get(&bdev->bd_device.kobj);
- INIT_WORK(&lo->rundown_work, loop_rundown_workfn);
- queue_work(system_long_wq, &lo->rundown_work);
+ /*
+ * Need not hold lo_mutex to fput backing file. Calling fput holding
+ * lo_mutex triggers a circular lock dependency possibility warning as
+ * fput can take open_mutex which is usually taken before lo_mutex.
+ */
+ fput(filp);
}
static int loop_clr_fd(struct loop_device *lo)
@@ -1228,8 +1228,7 @@ static int loop_clr_fd(struct loop_device *lo)
lo->lo_state = Lo_rundown;
mutex_unlock(&lo->lo_mutex);
- __loop_clr_fd(lo);
- loop_rundown_completed(lo);
+ __loop_clr_fd(lo, false);
return 0;
}
@@ -1754,7 +1753,7 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
* In autoclear mode, stop the loop thread
* and remove configuration after last close.
*/
- loop_schedule_rundown(lo);
+ __loop_clr_fd(lo, true);
return;
} else if (lo->lo_state == Lo_bound) {
/*
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 918a7a2dc025..082d4b6bfc6a 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -56,7 +56,6 @@ struct loop_device {
struct gendisk *lo_disk;
struct mutex lo_mutex;
bool idr_visible;
- struct work_struct rundown_work;
};
struct loop_cmd {
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index e6005c232328..2b588b62cbbb 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -4112,7 +4112,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
"Completion workers still active!\n");
}
- blk_set_queue_dying(dd->queue);
+ blk_mark_disk_dead(dd->disk);
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
/* Clean up the block layer. */
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 4203cdab8abf..b844432bad20 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -7185,7 +7185,7 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
* IO to complete/fail.
*/
blk_mq_freeze_queue(rbd_dev->disk->queue);
- blk_set_queue_dying(rbd_dev->disk->queue);
+ blk_mark_disk_dead(rbd_dev->disk);
}
del_gendisk(rbd_dev->disk);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index ccd0dd0c6b83..ca71a0585333 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2126,7 +2126,7 @@ static void blkfront_closing(struct blkfront_info *info)
/* No more blkif_request(). */
blk_mq_stop_hw_queues(info->rq);
- blk_set_queue_dying(info->rq);
+ blk_mark_disk_dead(info->gd);
set_capacity(info->gd, 0);
for_each_rinfo(info, rinfo, i) {
diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
index 3a258a677df8..b79895810c52 100644
--- a/drivers/bus/mhi/pci_generic.c
+++ b/drivers/bus/mhi/pci_generic.c
@@ -366,6 +366,7 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
+ .mru_default = 32768,
.sideband_wake = false,
};
@@ -401,6 +402,7 @@ static const struct mhi_pci_dev_info mhi_mv31_info = {
.config = &modem_mv31_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
+ .mru_default = 32768,
};
static const struct mhi_channel_config mhi_sierra_em919x_channels[] = {
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 68613f0b6887..3404a91edf29 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -762,7 +762,7 @@ static bool crng_init_try_arch(struct crng_state *crng)
return arch_init;
}
-static bool __init crng_init_try_arch_early(struct crng_state *crng)
+static bool __init crng_init_try_arch_early(void)
{
int i;
bool arch_init = true;
@@ -774,7 +774,7 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
rv = random_get_entropy();
arch_init = false;
}
- crng->state[i] ^= rv;
+ primary_crng.state[i] ^= rv;
}
return arch_init;
@@ -788,22 +788,20 @@ static void crng_initialize_secondary(struct crng_state *crng)
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
-static void __init crng_initialize_primary(struct crng_state *crng)
+static void __init crng_initialize_primary(void)
{
- _extract_entropy(&crng->state[4], sizeof(u32) * 12);
- if (crng_init_try_arch_early(crng) && trust_cpu && crng_init < 2) {
+ _extract_entropy(&primary_crng.state[4], sizeof(u32) * 12);
+ if (crng_init_try_arch_early() && trust_cpu && crng_init < 2) {
invalidate_batched_entropy();
numa_crng_init();
crng_init = 2;
pr_notice("crng init done (trusting CPU's manufacturer)\n");
}
- crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
+ primary_crng.init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
-static void crng_finalize_init(struct crng_state *crng)
+static void crng_finalize_init(void)
{
- if (crng != &primary_crng || crng_init >= 2)
- return;
if (!system_wq) {
/* We can't call numa_crng_init until we have workqueues,
* so mark this for processing later. */
@@ -814,6 +812,7 @@ static void crng_finalize_init(struct crng_state *crng)
invalidate_batched_entropy();
numa_crng_init();
crng_init = 2;
+ crng_need_final_init = false;
process_random_ready_list();
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
@@ -980,7 +979,8 @@ static void crng_reseed(struct crng_state *crng, bool use_input_pool)
memzero_explicit(&buf, sizeof(buf));
WRITE_ONCE(crng->init_time, jiffies);
spin_unlock_irqrestore(&crng->lock, flags);
- crng_finalize_init(crng);
+ if (crng == &primary_crng && crng_init < 2)
+ crng_finalize_init();
}
static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE])
@@ -1697,8 +1697,8 @@ int __init rand_initialize(void)
{
init_std_data();
if (crng_need_final_init)
- crng_finalize_init(&primary_crng);
- crng_initialize_primary(&primary_crng);
+ crng_finalize_init();
+ crng_initialize_primary();
crng_global_init_time = jiffies;
if (ratelimit_disable) {
urandom_warning.interval = 0;
@@ -1856,7 +1856,10 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
*/
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- input_pool.entropy_count = 0;
+ if (xchg(&input_pool.entropy_count, 0) && random_write_wakeup_bits) {
+ wake_up_interruptible(&random_write_wait);
+ kill_fasync(&fasync, SIGIO, POLL_OUT);
+ }
return 0;
case RNDRESEEDCRNG:
if (!capable(CAP_SYS_ADMIN))
@@ -2205,13 +2208,15 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
return;
}
- /* Suspend writing if we're above the trickle threshold.
+ /* Throttle writing if we're above the trickle threshold.
* We'll be woken up again once below random_write_wakeup_thresh,
- * or when the calling thread is about to terminate.
+ * when the calling thread is about to terminate, or once
+ * CRNG_RESEED_INTERVAL has lapsed.
*/
- wait_event_interruptible(random_write_wait,
+ wait_event_interruptible_timeout(random_write_wait,
!system_wq || kthread_should_stop() ||
- POOL_ENTROPY_BITS() <= random_write_wakeup_bits);
+ POOL_ENTROPY_BITS() <= random_write_wakeup_bits,
+ CRNG_RESEED_INTERVAL);
mix_pool_bytes(buffer, count);
credit_entropy_bits(entropy);
}
diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c
index 744d136b721b..15d61793f53b 100644
--- a/drivers/clk/ingenic/jz4725b-cgu.c
+++ b/drivers/clk/ingenic/jz4725b-cgu.c
@@ -139,11 +139,10 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = {
},
[JZ4725B_CLK_I2S] = {
- "i2s", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
+ "i2s", CGU_CLK_MUX | CGU_CLK_DIV,
.parents = { JZ4725B_CLK_EXT, JZ4725B_CLK_PLL_HALF, -1, -1 },
.mux = { CGU_REG_CPCCR, 31, 1 },
.div = { CGU_REG_I2SCDR, 0, 1, 9, -1, -1, -1 },
- .gate = { CGU_REG_CLKGR, 6 },
},
[JZ4725B_CLK_SPI] = {
diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c
index 71aa630fa4bd..f09499999eb3 100644
--- a/drivers/clk/qcom/gcc-msm8994.c
+++ b/drivers/clk/qcom/gcc-msm8994.c
@@ -108,42 +108,6 @@ static const struct clk_parent_data gcc_xo_gpll0_gpll4[] = {
{ .hw = &gpll4.clkr.hw },
};
-static struct clk_rcg2 system_noc_clk_src = {
- .cmd_rcgr = 0x0120,
- .hid_width = 5,
- .parent_map = gcc_xo_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "system_noc_clk_src",
- .parent_data = gcc_xo_gpll0,
- .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
- .ops = &clk_rcg2_ops,
- },
-};
-
-static struct clk_rcg2 config_noc_clk_src = {
- .cmd_rcgr = 0x0150,
- .hid_width = 5,
- .parent_map = gcc_xo_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "config_noc_clk_src",
- .parent_data = gcc_xo_gpll0,
- .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
- .ops = &clk_rcg2_ops,
- },
-};
-
-static struct clk_rcg2 periph_noc_clk_src = {
- .cmd_rcgr = 0x0190,
- .hid_width = 5,
- .parent_map = gcc_xo_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "periph_noc_clk_src",
- .parent_data = gcc_xo_gpll0,
- .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
- .ops = &clk_rcg2_ops,
- },
-};
-
static struct freq_tbl ftbl_ufs_axi_clk_src[] = {
F(50000000, P_GPLL0, 12, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
@@ -1150,8 +1114,6 @@ static struct clk_branch gcc_blsp1_ahb_clk = {
.enable_mask = BIT(17),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1435,8 +1397,6 @@ static struct clk_branch gcc_blsp2_ahb_clk = {
.enable_mask = BIT(15),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1764,8 +1724,6 @@ static struct clk_branch gcc_lpass_q6_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_lpass_q6_axi_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1778,8 +1736,6 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mss_q6_bimc_axi_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1807,9 +1763,6 @@ static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_cfg_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1822,9 +1775,6 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_mstr_axi_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1854,9 +1804,6 @@ static struct clk_branch gcc_pcie_0_slv_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_slv_axi_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1884,9 +1831,6 @@ static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_cfg_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1899,9 +1843,6 @@ static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_mstr_axi_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1930,9 +1871,6 @@ static struct clk_branch gcc_pcie_1_slv_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_slv_axi_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1960,8 +1898,6 @@ static struct clk_branch gcc_pdm_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pdm_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1989,9 +1925,6 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2004,9 +1937,6 @@ static struct clk_branch gcc_sdcc2_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2034,9 +1964,6 @@ static struct clk_branch gcc_sdcc3_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc3_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2064,9 +1991,6 @@ static struct clk_branch gcc_sdcc4_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc4_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2124,8 +2048,6 @@ static struct clk_branch gcc_tsif_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_tsif_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2153,8 +2075,6 @@ static struct clk_branch gcc_ufs_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2198,8 +2118,6 @@ static struct clk_branch gcc_ufs_rx_symbol_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_rx_symbol_0_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2213,8 +2131,6 @@ static struct clk_branch gcc_ufs_rx_symbol_1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_rx_symbol_1_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2243,8 +2159,6 @@ static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_tx_symbol_0_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2258,8 +2172,6 @@ static struct clk_branch gcc_ufs_tx_symbol_1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_tx_symbol_1_clk",
- .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2364,8 +2276,6 @@ static struct clk_branch gcc_usb_hs_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb_hs_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2488,8 +2398,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
.enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "gcc_boot_rom_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2503,8 +2411,6 @@ static struct clk_branch gcc_prng_ahb_clk = {
.enable_mask = BIT(13),
.hw.init = &(struct clk_init_data){
.name = "gcc_prng_ahb_clk",
- .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
- .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2547,9 +2453,6 @@ static struct clk_regmap *gcc_msm8994_clocks[] = {
[GPLL0] = &gpll0.clkr,
[GPLL4_EARLY] = &gpll4_early.clkr,
[GPLL4] = &gpll4.clkr,
- [CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr,
- [PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr,
- [SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr,
[UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr,
[USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
[BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
@@ -2696,6 +2599,15 @@ static struct clk_regmap *gcc_msm8994_clocks[] = {
[USB_SS_PHY_LDO] = &usb_ss_phy_ldo.clkr,
[GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
[GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+
+ /*
+ * The following clocks should NOT be managed by this driver, but they once were
+ * mistakengly added. Now they are only here to indicate that they are not defined
+ * on purpose, even though the names will stay in the header file (for ABI sanity).
+ */
+ [CONFIG_NOC_CLK_SRC] = NULL,
+ [PERIPH_NOC_CLK_SRC] = NULL,
+ [SYSTEM_NOC_CLK_SRC] = NULL,
};
static struct gdsc *gcc_msm8994_gdscs[] = {
diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
index b6f97960d8ee..1fccb457fcc5 100644
--- a/drivers/clocksource/timer-ti-dm-systimer.c
+++ b/drivers/clocksource/timer-ti-dm-systimer.c
@@ -241,8 +241,7 @@ static void __init dmtimer_systimer_assign_alwon(void)
bool quirk_unreliable_oscillator = false;
/* Quirk unreliable 32 KiHz oscillator with incomplete dts */
- if (of_machine_is_compatible("ti,omap3-beagle") ||
- of_machine_is_compatible("timll,omap3-devkit8000")) {
+ if (of_machine_is_compatible("ti,omap3-beagle-ab4")) {
quirk_unreliable_oscillator = true;
counter_32k = -ENODEV;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b8d95536ee22..80f535cc8a75 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1518,6 +1518,10 @@ static int cpufreq_online(unsigned int cpu)
kobject_uevent(&policy->kobj, KOBJ_ADD);
+ /* Callback for handling stuff after policy is ready */
+ if (cpufreq_driver->ready)
+ cpufreq_driver->ready(policy);
+
if (cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 05f3d7876e44..effbb680b453 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -388,7 +388,7 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
snprintf(data->irq_name, sizeof(data->irq_name), "dcvsh-irq-%u", policy->cpu);
ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq,
- IRQF_ONESHOT, data->irq_name, data);
+ IRQF_ONESHOT | IRQF_NO_AUTOEN, data->irq_name, data);
if (ret) {
dev_err(&pdev->dev, "Error registering %s: %d\n", data->irq_name, ret);
return 0;
@@ -542,6 +542,14 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
+static void qcom_cpufreq_ready(struct cpufreq_policy *policy)
+{
+ struct qcom_cpufreq_data *data = policy->driver_data;
+
+ if (data->throttle_irq >= 0)
+ enable_irq(data->throttle_irq);
+}
+
static struct freq_attr *qcom_cpufreq_hw_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
&cpufreq_freq_attr_scaling_boost_freqs,
@@ -561,6 +569,7 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.fast_switch = qcom_cpufreq_hw_fast_switch,
.name = "qcom-cpufreq-hw",
.attr = qcom_cpufreq_hw_attr,
+ .ready = qcom_cpufreq_ready,
};
static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index 4c8ebdf671ca..1b4d425bbf0e 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -1753,7 +1753,6 @@ void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf)
char engs_info[2 * OTX2_CPT_NAME_LENGTH];
struct otx2_cpt_eng_grp_info *grp;
struct otx2_cpt_engs_rsvd *engs;
- u32 mask[4];
int i, j;
pr_debug("Engine groups global info");
@@ -1785,6 +1784,8 @@ void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf)
for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
engs = &grp->engs[j];
if (engs->type) {
+ u32 mask[5] = { };
+
get_engs_info(grp, engs_info,
2 * OTX2_CPT_NAME_LENGTH, j);
pr_debug("Slot%d: %s", j, engs_info);
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index 56bf5ad01ad5..8f5848aa144f 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -14,6 +14,7 @@
#include <linux/xarray.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/dma-heap.h>
@@ -135,6 +136,7 @@ static long dma_heap_ioctl(struct file *file, unsigned int ucmd,
if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds))
return -EINVAL;
+ nr = array_index_nospec(nr, ARRAY_SIZE(dma_heap_ioctl_cmds));
/* Get the kernel ioctl cmd that matches */
kcmd = dma_heap_ioctl_cmds[nr];
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index a1da2b4b6d73..1476156af74b 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1681,8 +1681,10 @@ static void at_xdmac_tasklet(struct tasklet_struct *t)
__func__, atchan->irq_status);
if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) &&
- !(atchan->irq_status & error_mask))
+ !(atchan->irq_status & error_mask)) {
+ spin_unlock_irq(&atchan->lock);
return;
+ }
if (atchan->irq_status & error_mask)
at_xdmac_handle_error(atchan);
diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c
index 8a6bf291a73f..daafea5bc35d 100644
--- a/drivers/dma/ptdma/ptdma-dev.c
+++ b/drivers/dma/ptdma/ptdma-dev.c
@@ -207,7 +207,7 @@ int pt_core_init(struct pt_device *pt)
if (!cmd_q->qbase) {
dev_err(dev, "unable to allocate command queue\n");
ret = -ENOMEM;
- goto e_dma_alloc;
+ goto e_destroy_pool;
}
cmd_q->qidx = 0;
@@ -229,8 +229,10 @@ int pt_core_init(struct pt_device *pt)
/* Request an irq */
ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt);
- if (ret)
- goto e_pool;
+ if (ret) {
+ dev_err(dev, "unable to allocate an IRQ\n");
+ goto e_free_dma;
+ }
/* Update the device registers with queue information. */
cmd_q->qcontrol &= ~CMD_Q_SIZE;
@@ -250,21 +252,20 @@ int pt_core_init(struct pt_device *pt)
/* Register the DMA engine support */
ret = pt_dmaengine_register(pt);
if (ret)
- goto e_dmaengine;
+ goto e_free_irq;
/* Set up debugfs entries */
ptdma_debugfs_setup(pt);
return 0;
-e_dmaengine:
+e_free_irq:
free_irq(pt->pt_irq, pt);
-e_dma_alloc:
+e_free_dma:
dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma);
-e_pool:
- dev_err(dev, "unable to allocate an IRQ\n");
+e_destroy_pool:
dma_pool_destroy(pt->cmd_q.dma_pool);
return ret;
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 481f45c77ce1..13d12d660cc2 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1868,8 +1868,13 @@ static int rcar_dmac_probe(struct platform_device *pdev)
dmac->dev = &pdev->dev;
platform_set_drvdata(pdev, dmac);
- dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
- dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
+ ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
+ if (ret)
+ return ret;
+
+ ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
+ if (ret)
+ return ret;
ret = rcar_dmac_parse_of(&pdev->dev, dmac);
if (ret < 0)
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 158e5e7defae..b26ed690f03c 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -115,8 +115,10 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
ret = pm_runtime_get(schan->dev);
spin_unlock_irq(&schan->chan_lock);
- if (ret < 0)
+ if (ret < 0) {
dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
+ pm_runtime_put(schan->dev);
+ }
pm_runtime_barrier(schan->dev);
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index a42164389ebc..d5d55732adba 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -292,10 +292,12 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
&stm32_dmamux->dmarouter);
if (ret)
- goto err_clk;
+ goto pm_disable;
return 0;
+pm_disable:
+ pm_runtime_disable(&pdev->dev);
err_clk:
clk_disable_unprepare(stm32_dmamux->clk);
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 3a6d2416cb0f..5dd29789f97d 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -350,7 +350,7 @@ static int altr_sdram_probe(struct platform_device *pdev)
if (irq < 0) {
edac_printk(KERN_ERR, EDAC_MC,
"No irq %d in DT\n", irq);
- return -ENODEV;
+ return irq;
}
/* Arria10 has a 2nd IRQ */
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 9d9aabdec96b..f5677d81bd2d 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -215,7 +215,7 @@ void *edac_align_ptr(void **p, unsigned int size, int n_elems)
else
return (char *)ptr;
- r = (unsigned long)p % align;
+ r = (unsigned long)ptr % align;
if (r == 0)
return (char *)ptr;
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 2ccd1db5e98f..7197f9fa0245 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -1919,7 +1919,7 @@ static int xgene_edac_probe(struct platform_device *pdev)
irq = platform_get_irq_optional(pdev, i);
if (irq < 0) {
dev_err(&pdev->dev, "No IRQ resource\n");
- rc = -EINVAL;
+ rc = irq;
goto out_err;
}
rc = devm_request_irq(&pdev->dev, irq,
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index b406b3f78f46..d76bab3aaac4 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -2112,7 +2112,7 @@ static void __exit scmi_driver_exit(void)
}
module_exit(scmi_driver_exit);
-MODULE_ALIAS("platform: arm-scmi");
+MODULE_ALIAS("platform:arm-scmi");
MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
MODULE_DESCRIPTION("ARM SCMI protocol driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/libstub/riscv-stub.c b/drivers/firmware/efi/libstub/riscv-stub.c
index 380e4e251399..9c460843442f 100644
--- a/drivers/firmware/efi/libstub/riscv-stub.c
+++ b/drivers/firmware/efi/libstub/riscv-stub.c
@@ -25,7 +25,7 @@ typedef void __noreturn (*jump_kernel_func)(unsigned int, unsigned long);
static u32 hartid;
-static u32 get_boot_hartid_from_fdt(void)
+static int get_boot_hartid_from_fdt(void)
{
const void *fdt;
int chosen_node, len;
@@ -33,23 +33,26 @@ static u32 get_boot_hartid_from_fdt(void)
fdt = get_efi_config_table(DEVICE_TREE_GUID);
if (!fdt)
- return U32_MAX;
+ return -EINVAL;
chosen_node = fdt_path_offset(fdt, "/chosen");
if (chosen_node < 0)
- return U32_MAX;
+ return -EINVAL;
prop = fdt_getprop((void *)fdt, chosen_node, "boot-hartid", &len);
if (!prop || len != sizeof(u32))
- return U32_MAX;
+ return -EINVAL;
- return fdt32_to_cpu(*prop);
+ hartid = fdt32_to_cpu(*prop);
+ return 0;
}
efi_status_t check_platform_features(void)
{
- hartid = get_boot_hartid_from_fdt();
- if (hartid == U32_MAX) {
+ int ret;
+
+ ret = get_boot_hartid_from_fdt();
+ if (ret) {
efi_err("/chosen/boot-hartid missing or invalid!\n");
return EFI_UNSUPPORTED;
}
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index abdc8a6a3963..cae590bd08f2 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -742,6 +742,7 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
{
const struct efivar_operations *ops;
efi_status_t status;
+ unsigned long varsize;
if (!__efivars)
return -EINVAL;
@@ -764,15 +765,17 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
return efivar_entry_set_nonblocking(name, vendor, attributes,
size, data);
+ varsize = size + ucs2_strsize(name, 1024);
if (!block) {
if (down_trylock(&efivars_lock))
return -EBUSY;
+ status = check_var_size_nonblocking(attributes, varsize);
} else {
if (down_interruptible(&efivars_lock))
return -EINTR;
+ status = check_var_size(attributes, varsize);
}
- status = check_var_size(attributes, size + ucs2_strsize(name, 1024));
if (status != EFI_SUCCESS) {
up(&efivars_lock);
return -ENOSPC;
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index 869dc952cf45..0cb2664085cf 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -278,7 +278,8 @@ static int gpio_fwd_get(struct gpio_chip *chip, unsigned int offset)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
- return gpiod_get_value(fwd->descs[offset]);
+ return chip->can_sleep ? gpiod_get_value_cansleep(fwd->descs[offset])
+ : gpiod_get_value(fwd->descs[offset]);
}
static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
@@ -293,7 +294,10 @@ static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
for_each_set_bit(i, mask, fwd->chip.ngpio)
descs[j++] = fwd->descs[i];
- error = gpiod_get_array_value(j, descs, NULL, values);
+ if (fwd->chip.can_sleep)
+ error = gpiod_get_array_value_cansleep(j, descs, NULL, values);
+ else
+ error = gpiod_get_array_value(j, descs, NULL, values);
if (error)
return error;
@@ -328,7 +332,10 @@ static void gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
- gpiod_set_value(fwd->descs[offset], value);
+ if (chip->can_sleep)
+ gpiod_set_value_cansleep(fwd->descs[offset], value);
+ else
+ gpiod_set_value(fwd->descs[offset], value);
}
static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
@@ -343,7 +350,10 @@ static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
descs[j++] = fwd->descs[i];
}
- gpiod_set_array_value(j, descs, NULL, values);
+ if (fwd->chip.can_sleep)
+ gpiod_set_array_value_cansleep(j, descs, NULL, values);
+ else
+ gpiod_set_array_value(j, descs, NULL, values);
}
static void gpio_fwd_set_multiple_locked(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index a4c4e4584f5b..099e358d2491 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -410,10 +410,8 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
level = rockchip_gpio_readl(bank, bank->gpio_regs->int_type);
polarity = rockchip_gpio_readl(bank, bank->gpio_regs->int_polarity);
- switch (type) {
- case IRQ_TYPE_EDGE_BOTH:
+ if (type == IRQ_TYPE_EDGE_BOTH) {
if (bank->gpio_type == GPIO_TYPE_V2) {
- bank->toggle_edge_mode &= ~mask;
rockchip_gpio_writel_bit(bank, d->hwirq, 1,
bank->gpio_regs->int_bothedge);
goto out;
@@ -431,30 +429,34 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
else
polarity |= mask;
}
- break;
- case IRQ_TYPE_EDGE_RISING:
- bank->toggle_edge_mode &= ~mask;
- level |= mask;
- polarity |= mask;
- break;
- case IRQ_TYPE_EDGE_FALLING:
- bank->toggle_edge_mode &= ~mask;
- level |= mask;
- polarity &= ~mask;
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- bank->toggle_edge_mode &= ~mask;
- level &= ~mask;
- polarity |= mask;
- break;
- case IRQ_TYPE_LEVEL_LOW:
- bank->toggle_edge_mode &= ~mask;
- level &= ~mask;
- polarity &= ~mask;
- break;
- default:
- ret = -EINVAL;
- goto out;
+ } else {
+ if (bank->gpio_type == GPIO_TYPE_V2) {
+ rockchip_gpio_writel_bit(bank, d->hwirq, 0,
+ bank->gpio_regs->int_bothedge);
+ } else {
+ bank->toggle_edge_mode &= ~mask;
+ }
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ level |= mask;
+ polarity |= mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ level |= mask;
+ polarity &= ~mask;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ level &= ~mask;
+ polarity |= mask;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ level &= ~mask;
+ polarity &= ~mask;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
}
rockchip_gpio_writel(bank, level, bank->gpio_regs->int_type);
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
index 403f9e833d6a..7d82388b4ab7 100644
--- a/drivers/gpio/gpio-sifive.c
+++ b/drivers/gpio/gpio-sifive.c
@@ -223,7 +223,7 @@ static int sifive_gpio_probe(struct platform_device *pdev)
NULL,
chip->base + SIFIVE_GPIO_OUTPUT_EN,
chip->base + SIFIVE_GPIO_INPUT_EN,
- 0);
+ BGPIOF_READ_OUTPUT_REG_SET);
if (ret) {
dev_err(dev, "unable to init generic GPIO\n");
return ret;
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index 04b137eca8da..153fe79e1bf3 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -570,6 +570,11 @@ static struct gpio_sim_bank *to_gpio_sim_bank(struct config_item *item)
return container_of(group, struct gpio_sim_bank, group);
}
+static bool gpio_sim_bank_has_label(struct gpio_sim_bank *bank)
+{
+ return bank->label && *bank->label;
+}
+
static struct gpio_sim_device *
gpio_sim_bank_get_device(struct gpio_sim_bank *bank)
{
@@ -770,9 +775,15 @@ static int gpio_sim_add_hogs(struct gpio_sim_device *dev)
* point the device doesn't exist yet and so dev_name()
* is not available.
*/
- hog->chip_label = kasprintf(GFP_KERNEL,
- "gpio-sim.%u-%s", dev->id,
- fwnode_get_name(bank->swnode));
+ if (gpio_sim_bank_has_label(bank))
+ hog->chip_label = kstrdup(bank->label,
+ GFP_KERNEL);
+ else
+ hog->chip_label = kasprintf(GFP_KERNEL,
+ "gpio-sim.%u-%s",
+ dev->id,
+ fwnode_get_name(
+ bank->swnode));
if (!hog->chip_label) {
gpio_sim_remove_hogs(dev);
return -ENOMEM;
@@ -816,7 +827,7 @@ gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank,
properties[prop_idx++] = PROPERTY_ENTRY_U32("ngpios", bank->num_lines);
- if (bank->label && (strlen(bank->label) > 0))
+ if (gpio_sim_bank_has_label(bank))
properties[prop_idx++] = PROPERTY_ENTRY_STRING("gpio-sim,label",
bank->label);
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 34b36a8c035f..8d298beffd86 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -343,9 +343,12 @@ static int tegra186_gpio_of_xlate(struct gpio_chip *chip,
return offset + pin;
}
+#define to_tegra_gpio(x) container_of((x), struct tegra_gpio, gpio)
+
static void tegra186_irq_ack(struct irq_data *data)
{
- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct tegra_gpio *gpio = to_tegra_gpio(gc);
void __iomem *base;
base = tegra186_gpio_get_base(gpio, data->hwirq);
@@ -357,7 +360,8 @@ static void tegra186_irq_ack(struct irq_data *data)
static void tegra186_irq_mask(struct irq_data *data)
{
- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct tegra_gpio *gpio = to_tegra_gpio(gc);
void __iomem *base;
u32 value;
@@ -372,7 +376,8 @@ static void tegra186_irq_mask(struct irq_data *data)
static void tegra186_irq_unmask(struct irq_data *data)
{
- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct tegra_gpio *gpio = to_tegra_gpio(gc);
void __iomem *base;
u32 value;
@@ -387,7 +392,8 @@ static void tegra186_irq_unmask(struct irq_data *data)
static int tegra186_irq_set_type(struct irq_data *data, unsigned int type)
{
- struct tegra_gpio *gpio = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct tegra_gpio *gpio = to_tegra_gpio(gc);
void __iomem *base;
u32 value;
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index c7b5446d01fd..ffa0256cad5a 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -330,7 +330,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
goto out_free_lh;
}
- ret = gpiod_request(desc, lh->label);
+ ret = gpiod_request_user(desc, lh->label);
if (ret)
goto out_free_lh;
lh->descs[i] = desc;
@@ -1378,7 +1378,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
goto out_free_linereq;
}
- ret = gpiod_request(desc, lr->label);
+ ret = gpiod_request_user(desc, lr->label);
if (ret)
goto out_free_linereq;
@@ -1764,7 +1764,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
}
}
- ret = gpiod_request(desc, le->label);
+ ret = gpiod_request_user(desc, le->label);
if (ret)
goto out_free_le;
le->desc = desc;
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 4098bc7f88b7..44c1ad51b3fe 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -475,12 +475,9 @@ static ssize_t export_store(struct class *class,
* they may be undone on its behalf too.
*/
- status = gpiod_request(desc, "sysfs");
- if (status) {
- if (status == -EPROBE_DEFER)
- status = -ENODEV;
+ status = gpiod_request_user(desc, "sysfs");
+ if (status)
goto done;
- }
status = gpiod_set_transitory(desc, false);
if (!status) {
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 3859911b61e9..a3d14277f17c 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -3147,6 +3147,16 @@ int gpiod_to_irq(const struct gpio_desc *desc)
return retirq;
}
+#ifdef CONFIG_GPIOLIB_IRQCHIP
+ if (gc->irq.chip) {
+ /*
+ * Avoid race condition with other code, which tries to lookup
+ * an IRQ before the irqchip has been properly registered,
+ * i.e. while gpiochip is still being brought up.
+ */
+ return -EPROBE_DEFER;
+ }
+#endif
return -ENXIO;
}
EXPORT_SYMBOL_GPL(gpiod_to_irq);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 30bc3f80f83e..c31f4626915d 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -135,6 +135,18 @@ struct gpio_desc {
int gpiod_request(struct gpio_desc *desc, const char *label);
void gpiod_free(struct gpio_desc *desc);
+
+static inline int gpiod_request_user(struct gpio_desc *desc, const char *label)
+{
+ int ret;
+
+ ret = gpiod_request(desc, label);
+ if (ret == -EPROBE_DEFER)
+ ret = -ENODEV;
+
+ return ret;
+}
+
int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
unsigned long lflags, enum gpiod_flags dflags);
int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d8b854fcbffa..9a53a4de2bb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1408,12 +1408,10 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
-bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
void amdgpu_acpi_detect(void);
#else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
-static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline void amdgpu_acpi_detect(void) { }
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
@@ -1422,6 +1420,14 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
enum amdgpu_ss ss_state) { return 0; }
#endif
+#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
+bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
+bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
+#else
+static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
+static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
+#endif
+
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
uint64_t addr, struct amdgpu_bo **bo,
struct amdgpu_bo_va_mapping **mapping);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 4811b0faafd9..0e12315fa0cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1031,6 +1031,20 @@ void amdgpu_acpi_detect(void)
}
}
+#if IS_ENABLED(CONFIG_SUSPEND)
+/**
+ * amdgpu_acpi_is_s3_active
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * returns true if supported, false if not.
+ */
+bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
+{
+ return !(adev->flags & AMD_IS_APU) ||
+ (pm_suspend_target_state == PM_SUSPEND_MEM);
+}
+
/**
* amdgpu_acpi_is_s0ix_active
*
@@ -1040,11 +1054,24 @@ void amdgpu_acpi_detect(void)
*/
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
{
-#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND)
- if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
- if (adev->flags & AMD_IS_APU)
- return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
+ if (!(adev->flags & AMD_IS_APU) ||
+ (pm_suspend_target_state != PM_SUSPEND_TO_IDLE))
+ return false;
+
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
+ dev_warn_once(adev->dev,
+ "Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
+ "To use suspend-to-idle change the sleep mode in BIOS setup.\n");
+ return false;
}
-#endif
+
+#if !IS_ENABLED(CONFIG_AMD_PMC)
+ dev_warn_once(adev->dev,
+ "Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n");
return false;
+#else
+ return true;
+#endif /* CONFIG_AMD_PMC */
}
+
+#endif /* CONFIG_SUSPEND */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 82011e75ed85..c4387b38229c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -1141,7 +1141,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
if (ret)
return ret;
- if (!dev->mode_config.allow_fb_modifiers) {
+ if (!dev->mode_config.allow_fb_modifiers && !adev->enable_virtual_display) {
drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
"GFX9+ requires FB check based on format modifier\n");
ret = check_tiling_flags_gfx6(rfb);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 4c83f1db8a24..0ead08ba58c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -2011,6 +2011,9 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENODEV;
}
+ if (amdgpu_aspm == -1 && !pcie_aspm_enabled(pdev))
+ amdgpu_aspm = 0;
+
if (amdgpu_virtual_display ||
amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
supports_atomic = true;
@@ -2246,13 +2249,20 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
static int amdgpu_pmops_prepare(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
/* Return a positive number here so
* DPM_FLAG_SMART_SUSPEND works properly
*/
if (amdgpu_device_supports_boco(drm_dev))
- return pm_runtime_suspended(dev) &&
- pm_suspend_via_firmware();
+ return pm_runtime_suspended(dev);
+
+ /* if we will not support s3 or s2i for the device
+ * then skip suspend
+ */
+ if (!amdgpu_acpi_is_s0ix_active(adev) &&
+ !amdgpu_acpi_is_s3_active(adev))
+ return 1;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 5c3f24069f2a..4655702a5e00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1904,7 +1904,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
unsigned i;
int r;
- if (direct_submit && !ring->sched.ready) {
+ if (!direct_submit && !ring->sched.ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index d99c8779b51e..5224d9a39737 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -391,7 +391,6 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev,
int index)
{
struct drm_plane *plane;
- uint64_t modifiers[] = {DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID};
int ret;
plane = kzalloc(sizeof(*plane), GFP_KERNEL);
@@ -402,7 +401,7 @@ static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev,
&amdgpu_vkms_plane_funcs,
amdgpu_vkms_formats,
ARRAY_SIZE(amdgpu_vkms_formats),
- modifiers, type, NULL);
+ NULL, type, NULL);
if (ret) {
kfree(plane);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index b37fc7d7d2c7..418341a67517 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -768,11 +768,17 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* Check if all VM PDs/PTs are ready for updates
*
* Returns:
- * True if eviction list is empty.
+ * True if VM is not evicting.
*/
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
- return list_empty(&vm->evicted);
+ bool ret;
+
+ amdgpu_vm_eviction_lock(vm);
+ ret = !vm->evicting;
+ amdgpu_vm_eviction_unlock(vm);
+
+ return ret && list_empty(&vm->evicted);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index b4eddf6e98a6..ff738e9725ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -543,7 +543,9 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
adev->gfx.config.max_sh_per_se *
adev->gfx.config.max_shader_engines);
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) {
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 3):
/* Get SA disabled bitmap from eFuse setting */
efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE);
efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK;
@@ -566,6 +568,9 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
disabled_sa = tmp;
WREG32_SOC15(GC, 0, mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP, disabled_sa);
+ break;
+ default:
+ break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 38bb42727715..a2f8ed0e6a64 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -1140,6 +1140,9 @@ static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 3))
+ return;
+
adev->mmhub.funcs->get_clockgating(adev, flags);
if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index e8e4749e9c79..f0638db57111 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2057,6 +2057,10 @@ static int sdma_v4_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* SMU saves SDMA state for us */
+ if (adev->in_s0ix)
+ return 0;
+
return sdma_v4_0_hw_fini(adev);
}
@@ -2064,6 +2068,10 @@ static int sdma_v4_0_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* SMU restores SDMA state for us */
+ if (adev->in_s0ix)
+ return 0;
+
return sdma_v4_0_hw_init(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 0fc1747e4a70..12f80fdc1fbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -619,8 +619,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
static int soc15_asic_reset(struct amdgpu_device *adev)
{
/* original raven doesn't have full asic reset */
- if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
- !(adev->apu_flags & AMD_APU_IS_RAVEN2))
+ if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
+ (adev->apu_flags & AMD_APU_IS_RAVEN2))
return 0;
switch (soc15_asic_reset_method(adev)) {
@@ -1114,8 +1114,11 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_VCN_MGCG;
+ /*
+ * MMHUB PG needs to be disabled for Picasso for
+ * stability reasons.
+ */
adev->pg_flags = AMD_PG_SUPPORT_SDMA |
- AMD_PG_SUPPORT_MMHUB |
AMD_PG_SUPPORT_VCN;
} else {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 7f9773f8dab6..075429bea427 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3653,7 +3653,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
/* Use GRPH_PFLIP interrupt */
for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
- i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
+ i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
i++) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
if (r) {
@@ -4256,6 +4256,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
#endif
+ /* Disable vblank IRQs aggressively for power-saving. */
+ adev_to_drm(adev)->vblank_disable_immediate = true;
+
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL;
@@ -4301,19 +4304,17 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
update_connector_ext_caps(aconnector);
if (psr_feature_enabled)
amdgpu_dm_set_psr_caps(link);
+
+ /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
+ * PSR is also supported.
+ */
+ if (link->psr_settings.psr_feature_enabled)
+ adev_to_drm(adev)->vblank_disable_immediate = false;
}
}
- /*
- * Disable vblank IRQs aggressively for power-saving.
- *
- * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR
- * is also supported.
- */
- adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled;
-
/* Software is initialized. Now we can register interrupt handlers. */
switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index f977f29907df..10c7be40dfb0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -473,8 +473,10 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
clk_mgr_base->bw_params->dc_mode_softmax_memclk = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
/* Refresh bounding box */
+ DC_FP_START();
clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
+ DC_FP_END();
}
static bool dcn3_is_smu_present(struct clk_mgr *clk_mgr_base)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 48005def1164..bc4ddc36fe58 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -570,32 +570,32 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 7.95,
- .sr_enter_plus_exit_time_us = 9,
+ .sr_exit_time_us = 13.5,
+ .sr_enter_plus_exit_time_us = 16.5,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.82,
- .sr_enter_plus_exit_time_us = 11.196,
+ .sr_exit_time_us = 13.5,
+ .sr_enter_plus_exit_time_us = 16.5,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.89,
- .sr_enter_plus_exit_time_us = 11.24,
+ .sr_exit_time_us = 13.5,
+ .sr_enter_plus_exit_time_us = 16.5,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.748,
- .sr_enter_plus_exit_time_us = 11.102,
+ .sr_exit_time_us = 13.5,
+ .sr_enter_plus_exit_time_us = 16.5,
.valid = true,
},
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index 4162ce40089b..9d17c5a5ae01 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -329,38 +329,38 @@ static struct clk_bw_params dcn31_bw_params = {
};
-static struct wm_table ddr4_wm_table = {
+static struct wm_table ddr5_wm_table = {
.entries = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 6.09,
- .sr_enter_plus_exit_time_us = 7.14,
+ .sr_exit_time_us = 9,
+ .sr_enter_plus_exit_time_us = 11,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 10.12,
- .sr_enter_plus_exit_time_us = 11.48,
+ .sr_exit_time_us = 9,
+ .sr_enter_plus_exit_time_us = 11,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 10.12,
- .sr_enter_plus_exit_time_us = 11.48,
+ .sr_exit_time_us = 9,
+ .sr_enter_plus_exit_time_us = 11,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 10.12,
- .sr_enter_plus_exit_time_us = 11.48,
+ .sr_exit_time_us = 9,
+ .sr_enter_plus_exit_time_us = 11,
.valid = true,
},
}
@@ -687,7 +687,7 @@ void dcn31_clk_mgr_construct(
if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
dcn31_bw_params.wm_table = lpddr5_wm_table;
} else {
- dcn31_bw_params.wm_table = ddr4_wm_table;
+ dcn31_bw_params.wm_table = ddr5_wm_table;
}
/* Saved clocks configured at boot for debug purposes */
dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
index a1011f3273f3..de3f4643eeef 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
@@ -120,7 +120,11 @@ static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000);
if (result == VBIOSSMC_Result_Failed) {
- ASSERT(0);
+ if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
+ param == TABLE_WATERMARKS)
+ DC_LOG_WARNING("Watermarks table not configured properly by SMU");
+ else
+ ASSERT(0);
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
return -1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 6f5528d34093..ba1aa994db4b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -985,10 +985,13 @@ static bool dc_construct(struct dc *dc,
goto fail;
#ifdef CONFIG_DRM_AMD_DC_DCN
dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
-#endif
- if (dc->res_pool->funcs->update_bw_bounding_box)
+ if (dc->res_pool->funcs->update_bw_bounding_box) {
+ DC_FP_START();
dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
+ DC_FP_END();
+ }
+#endif
/* Creation of current_state must occur after dc->dml
* is initialized in dc_create_resource_pool because
@@ -1220,6 +1223,8 @@ struct dc *dc_create(const struct dc_init_data *init_params)
dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
+ dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
+
if (dc->res_pool->dmcu != NULL)
dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 4c3ab2575e4b..61b8f29a0c30 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -5597,6 +5597,26 @@ static bool retrieve_link_cap(struct dc_link *link)
dp_hw_fw_revision.ieee_fw_rev,
sizeof(dp_hw_fw_revision.ieee_fw_rev));
+ /* Quirk for Apple MBP 2018 15" Retina panels: wrong DP_MAX_LINK_RATE */
+ {
+ uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 };
+ uint8_t fwrev_mbp_2018[] = { 7, 4 };
+ uint8_t fwrev_mbp_2018_vega[] = { 8, 4 };
+
+ /* We also check for the firmware revision as 16,1 models have an
+ * identical device id and are incorrectly quirked otherwise.
+ */
+ if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
+ !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018,
+ sizeof(str_mbp_2018)) &&
+ (!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018,
+ sizeof(fwrev_mbp_2018)) ||
+ !memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega,
+ sizeof(fwrev_mbp_2018_vega)))) {
+ link->reported_link_cap.link_rate = LINK_RATE_RBR2;
+ }
+ }
+
memset(&link->dpcd_caps.dsc_caps, '\0',
sizeof(link->dpcd_caps.dsc_caps));
memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap));
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index b3912ff9dc91..18757c158523 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1964,10 +1964,6 @@ enum dc_status dc_remove_stream_from_ctx(
dc->res_pool,
del_pipe->stream_res.stream_enc,
false);
- /* Release link encoder from stream in new dc_state. */
- if (dc->res_pool->funcs->link_enc_unassign)
- dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream);
-
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (is_dp_128b_132b_signal(del_pipe)) {
update_hpo_dp_stream_engine_usage(
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 288e7b01f561..b51864890621 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -202,6 +202,7 @@ struct dc_caps {
bool edp_dsc_support;
bool vbios_lttpr_aware;
bool vbios_lttpr_enable;
+ uint32_t max_otg_num;
};
struct dc_bug_wa {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index f3ff141b706a..eb2755bdb30e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1608,11 +1608,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst);
- if (dc_is_embedded_signal(pipe_ctx->stream->signal) &&
- pipe_ctx->stream_res.stream_enc->funcs->reset_fifo)
- pipe_ctx->stream_res.stream_enc->funcs->reset_fifo(
- pipe_ctx->stream_res.stream_enc);
-
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
@@ -1839,9 +1834,29 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
break;
}
}
- // We are trying to enable eDP, don't power down VDD
- if (can_apply_edp_fast_boot)
+
+ /*
+ * TO-DO: So far the code logic below only addresses single eDP case.
+ * For dual eDP case, there are a few things that need to be
+ * implemented first:
+ *
+ * 1. Change the fastboot logic above, so eDP link[0 or 1]'s
+ * stream[0 or 1] will all be checked.
+ *
+ * 2. Change keep_edp_vdd_on to an array, and maintain keep_edp_vdd_on
+ * for each eDP.
+ *
+ * Once above 2 things are completed, we can then change the logic below
+ * correspondingly, so dual eDP case will be fully covered.
+ */
+
+ // We are trying to enable eDP, don't power down VDD if eDP stream is existing
+ if ((edp_stream_num == 1 && edp_streams[0] != NULL) || can_apply_edp_fast_boot) {
keep_edp_vdd_on = true;
+ DC_LOG_EVENT_LINK_TRAINING("Keep eDP Vdd on\n");
+ } else {
+ DC_LOG_EVENT_LINK_TRAINING("No eDP stream enabled, turn eDP Vdd off\n");
+ }
}
// Check seamless boot support
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index bf4436d7aaab..b0c08ee6bc2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -902,19 +902,6 @@ void enc1_stream_encoder_stop_dp_info_packets(
}
-void enc1_stream_encoder_reset_fifo(
- struct stream_encoder *enc)
-{
- struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
-
- /* set DIG_START to 0x1 to reset FIFO */
- REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
- udelay(100);
-
- /* write 0 to take the FIFO out of reset */
- REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
-}
-
void enc1_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc)
@@ -1600,8 +1587,6 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
enc1_stream_encoder_send_immediate_sdp_message,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
- .reset_fifo =
- enc1_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index a146a41f68e9..687d7e4bf7ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -626,9 +626,6 @@ void enc1_stream_encoder_send_immediate_sdp_message(
void enc1_stream_encoder_stop_dp_info_packets(
struct stream_encoder *enc);
-void enc1_stream_encoder_reset_fifo(
- struct stream_encoder *enc);
-
void enc1_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 2bc93df023ad..2a72517e2b28 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1069,7 +1069,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+ .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index 8a70f92795c2..aab25ca8343a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -593,8 +593,6 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
enc1_stream_encoder_send_immediate_sdp_message,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
- .reset_fifo =
- enc1_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
index 8daa12730bc1..a04ca4a98392 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
@@ -789,8 +789,6 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = {
enc3_stream_encoder_update_dp_info_packets,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
- .reset_fifo =
- enc1_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
index 90c73a1cb986..5e3bcaf12cac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
@@ -138,8 +138,11 @@ static uint32_t convert_and_clamp(
ret_val = wm_ns * refclk_mhz;
ret_val /= 1000;
- if (ret_val > clamp_value)
+ if (ret_val > clamp_value) {
+ /* clamping WMs is abnormal, unexpected and may lead to underflow*/
+ ASSERT(0);
ret_val = clamp_value;
+ }
return ret_val;
}
@@ -159,7 +162,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) {
hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
@@ -193,7 +196,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) {
hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
} else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns)
@@ -203,7 +206,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) {
hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
@@ -237,7 +240,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) {
hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
} else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns)
@@ -247,7 +250,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) {
hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
@@ -281,7 +284,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) {
hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
} else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns)
@@ -291,7 +294,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) {
hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
@@ -325,7 +328,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) {
hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
} else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns)
@@ -351,7 +354,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
@@ -367,7 +370,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->a.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
@@ -383,7 +386,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n"
@@ -399,7 +402,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->a.cstate_pstate.cstate_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n"
@@ -416,7 +419,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
@@ -432,7 +435,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->b.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
@@ -448,7 +451,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n"
@@ -464,7 +467,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->b.cstate_pstate.cstate_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n"
@@ -481,7 +484,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
@@ -497,7 +500,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->c.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
@@ -513,7 +516,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n"
@@ -529,7 +532,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->c.cstate_pstate.cstate_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n"
@@ -546,7 +549,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
@@ -562,7 +565,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->d.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
@@ -578,7 +581,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n"
@@ -594,7 +597,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->d.cstate_pstate.cstate_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n"
@@ -625,7 +628,7 @@ static bool hubbub31_program_pstate_watermarks(
watermarks->a.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
@@ -642,7 +645,7 @@ static bool hubbub31_program_pstate_watermarks(
watermarks->b.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
@@ -659,7 +662,7 @@ static bool hubbub31_program_pstate_watermarks(
watermarks->c.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
@@ -676,7 +679,7 @@ static bool hubbub31_program_pstate_watermarks(
watermarks->d.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 073f8b667eff..c88e113b94d1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -164,10 +164,6 @@ struct stream_encoder_funcs {
void (*stop_dp_info_packets)(
struct stream_encoder *enc);
- void (*reset_fifo)(
- struct stream_encoder *enc
- );
-
void (*dp_blank)(
struct dc_link *link,
struct stream_encoder *enc);
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index e2cae97f4ff1..48cc009d9bdf 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -3462,8 +3462,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
- attr == &sensor_dev_attr_power2_label.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_label.dev_attr.attr))
+ attr == &sensor_dev_attr_power2_label.dev_attr.attr))
return 0;
return effective_mode;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 777f717c37ae..5488a0edb942 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -421,6 +421,36 @@ static int sienna_cichlid_store_powerplay_table(struct smu_context *smu)
return 0;
}
+static int sienna_cichlid_patch_pptable_quirk(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t *board_reserved;
+ uint16_t *freq_table_gfx;
+ uint32_t i;
+
+ /* Fix some OEM SKU specific stability issues */
+ GET_PPTABLE_MEMBER(BoardReserved, &board_reserved);
+ if ((adev->pdev->device == 0x73DF) &&
+ (adev->pdev->revision == 0XC3) &&
+ (adev->pdev->subsystem_device == 0x16C2) &&
+ (adev->pdev->subsystem_vendor == 0x1043))
+ board_reserved[0] = 1387;
+
+ GET_PPTABLE_MEMBER(FreqTableGfx, &freq_table_gfx);
+ if ((adev->pdev->device == 0x73DF) &&
+ (adev->pdev->revision == 0XC3) &&
+ ((adev->pdev->subsystem_device == 0x16C2) ||
+ (adev->pdev->subsystem_device == 0x133C)) &&
+ (adev->pdev->subsystem_vendor == 0x1043)) {
+ for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) {
+ if (freq_table_gfx[i] > 2500)
+ freq_table_gfx[i] = 2500;
+ }
+ }
+
+ return 0;
+}
+
static int sienna_cichlid_setup_pptable(struct smu_context *smu)
{
int ret = 0;
@@ -441,7 +471,7 @@ static int sienna_cichlid_setup_pptable(struct smu_context *smu)
if (ret)
return ret;
- return ret;
+ return sienna_cichlid_patch_pptable_quirk(smu);
}
static int sienna_cichlid_tables_init(struct smu_context *smu)
@@ -1238,21 +1268,37 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu)
&dpm_context->dpm_tables.soc_table;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
+ struct amdgpu_device *adev = smu->adev;
pstate_table->gfxclk_pstate.min = gfx_table->min;
pstate_table->gfxclk_pstate.peak = gfx_table->max;
- if (gfx_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK)
- pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
pstate_table->uclk_pstate.min = mem_table->min;
pstate_table->uclk_pstate.peak = mem_table->max;
- if (mem_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK)
- pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.min = soc_table->min;
pstate_table->socclk_pstate.peak = soc_table->max;
- if (soc_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK)
+
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
+ pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK;
+ break;
+ case CHIP_DIMGREY_CAVEFISH:
+ pstate_table->gfxclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK;
+ pstate_table->uclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK;
+ pstate_table->socclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK;
+ break;
+ case CHIP_BEIGE_GOBY:
+ pstate_table->gfxclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK;
+ pstate_table->uclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK;
+ pstate_table->socclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK;
+ break;
+ default:
+ break;
+ }
return 0;
}
@@ -3696,14 +3742,14 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
{
- struct smu_table_context *table_context = &smu->smu_table;
- PPTable_t *smc_pptable = table_context->driver_pptable;
+ uint16_t *mgpu_fan_boost_limit_rpm;
+ GET_PPTABLE_MEMBER(MGpuFanBoostLimitRpm, &mgpu_fan_boost_limit_rpm);
/*
* Skip the MGpuFanBoost setting for those ASICs
* which do not support it
*/
- if (!smc_pptable->MGpuFanBoostLimitRpm)
+ if (*mgpu_fan_boost_limit_rpm == 0)
return 0;
return smu_cmn_send_smc_msg_with_param(smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
index 38cd0ece24f6..42f705c7a36f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
@@ -33,6 +33,14 @@ typedef enum {
#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK 960
#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK 1000
+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK 1950
+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK 960
+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK 676
+
+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK 2200
+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK 960
+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK 1000
+
extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index caf1775d48ef..0bc84b709a93 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -282,14 +282,9 @@ static int yellow_carp_post_smu_init(struct smu_context *smu)
static int yellow_carp_mode_reset(struct smu_context *smu, int type)
{
- int ret = 0, index = 0;
-
- index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
- SMU_MSG_GfxDeviceDriverReset);
- if (index < 0)
- return index == -EACCES ? 0 : index;
+ int ret = 0;
- ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL);
if (ret)
dev_err(smu->adev->dev, "Failed to mode reset!\n");
diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig
index 58a242871b28..6e3f1d600541 100644
--- a/drivers/gpu/drm/arm/Kconfig
+++ b/drivers/gpu/drm/arm/Kconfig
@@ -6,6 +6,7 @@ config DRM_HDLCD
depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
depends on COMMON_CLK
select DRM_KMS_HELPER
+ select DRM_GEM_CMA_HELPER
help
Choose this option if you have an ARM High Definition Colour LCD
controller.
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index a7389a0facfb..af07eeb47ca0 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/math64.h>
@@ -196,12 +197,9 @@ static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps)
/*
* ui2bc - UI time periods to byte clock cycles
*/
-static u32 ui2bc(struct nwl_dsi *dsi, unsigned long long ui)
+static u32 ui2bc(unsigned int ui)
{
- u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
-
- return DIV64_U64_ROUND_UP(ui * dsi->lanes,
- dsi->mode.clock * 1000 * bpp);
+ return DIV_ROUND_UP(ui, BITS_PER_BYTE);
}
/*
@@ -232,12 +230,12 @@ static int nwl_dsi_config_host(struct nwl_dsi *dsi)
}
/* values in byte clock cycles */
- cycles = ui2bc(dsi, cfg->clk_pre);
+ cycles = ui2bc(cfg->clk_pre);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_pre: 0x%x\n", cycles);
nwl_dsi_write(dsi, NWL_DSI_CFG_T_PRE, cycles);
cycles = ps2bc(dsi, cfg->lpx + cfg->clk_prepare + cfg->clk_zero);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap (pre): 0x%x\n", cycles);
- cycles += ui2bc(dsi, cfg->clk_pre);
+ cycles += ui2bc(cfg->clk_pre);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_post: 0x%x\n", cycles);
nwl_dsi_write(dsi, NWL_DSI_CFG_T_POST, cycles);
cycles = ps2bc(dsi, cfg->hs_exit);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index dab8f76618f3..68d8415e6c28 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -1802,6 +1802,7 @@ static inline void ti_sn_gpio_unregister(void) {}
static void ti_sn65dsi86_runtime_disable(void *data)
{
+ pm_runtime_dont_use_autosuspend(data);
pm_runtime_disable(data);
}
@@ -1861,11 +1862,11 @@ static int ti_sn65dsi86_probe(struct i2c_client *client,
"failed to get reference clock\n");
pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(pdata->dev, 500);
+ pm_runtime_use_autosuspend(pdata->dev);
ret = devm_add_action_or_reset(dev, ti_sn65dsi86_runtime_disable, dev);
if (ret)
return ret;
- pm_runtime_set_autosuspend_delay(pdata->dev, 500);
- pm_runtime_use_autosuspend(pdata->dev);
ti_sn65dsi86_debugfs_init(pdata);
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 9781722519c3..54d62fdb4ef9 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -76,15 +76,17 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
state->mode_blob = NULL;
if (mode) {
+ struct drm_property_blob *blob;
+
drm_mode_convert_to_umode(&umode, mode);
- state->mode_blob =
- drm_property_create_blob(state->crtc->dev,
- sizeof(umode),
- &umode);
- if (IS_ERR(state->mode_blob))
- return PTR_ERR(state->mode_blob);
+ blob = drm_property_create_blob(crtc->dev,
+ sizeof(umode), &umode);
+ if (IS_ERR(blob))
+ return PTR_ERR(blob);
drm_mode_copy(&state->mode, mode);
+
+ state->mode_blob = blob;
state->enable = true;
drm_dbg_atomic(crtc->dev,
"Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index a50c82bc2b2f..76a8c707c34b 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -2330,6 +2330,9 @@ EXPORT_SYMBOL(drm_connector_atomic_hdr_metadata_equal);
void drm_connector_set_vrr_capable_property(
struct drm_connector *connector, bool capable)
{
+ if (!connector->vrr_capable_property)
+ return;
+
drm_object_property_set_value(&connector->base,
connector->vrr_capable_property,
capable);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 12893e7be89b..f5f5de362ff2 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -5345,6 +5345,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
return quirks;
+ info->color_formats |= DRM_COLOR_FORMAT_RGB444;
drm_parse_cea_ext(connector, edid);
/*
@@ -5393,7 +5394,6 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n",
connector->name, info->bpc);
- info->color_formats |= DRM_COLOR_FORMAT_RGB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index cefd0cbf9deb..dc275c466c9c 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -512,6 +512,7 @@ int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *
*/
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_DONTEXPAND;
if (cma_obj->map_noncoherent) {
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
diff --git a/drivers/gpu/drm/drm_privacy_screen.c b/drivers/gpu/drm/drm_privacy_screen.c
index beaf99e9120a..b688841c18e4 100644
--- a/drivers/gpu/drm/drm_privacy_screen.c
+++ b/drivers/gpu/drm/drm_privacy_screen.c
@@ -269,7 +269,7 @@ EXPORT_SYMBOL(drm_privacy_screen_get_state);
*
* The notifier is called with no locks held. The new hw_state and sw_state
* can be retrieved using the drm_privacy_screen_get_state() function.
- * A pointer to the drm_privacy_screen's struct is passed as the void *data
+ * A pointer to the drm_privacy_screen's struct is passed as the ``void *data``
* argument of the notifier_block's notifier_call.
*
* The notifier will NOT be called when changes are made through
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 12571ac45540..c04264f70ad1 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -678,7 +678,6 @@ static int decon_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct decon_context *ctx;
struct device_node *i80_if_timings;
- struct resource *res;
int ret;
if (!dev->of_node)
@@ -728,16 +727,11 @@ static int decon_probe(struct platform_device *pdev)
goto err_iounmap;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
- ctx->i80_if ? "lcd_sys" : "vsync");
- if (!res) {
- dev_err(dev, "irq request failed.\n");
- ret = -ENXIO;
+ ret = platform_get_irq_byname(pdev, ctx->i80_if ? "lcd_sys" : "vsync");
+ if (ret < 0)
goto err_iounmap;
- }
- ret = devm_request_irq(dev, res->start, decon_irq_handler,
- 0, "drm_decon", ctx);
+ ret = devm_request_irq(dev, ret, decon_irq_handler, 0, "drm_decon", ctx);
if (ret) {
dev_err(dev, "irq request failed.\n");
goto err_iounmap;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 32a36572b894..d13f5e3a030d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1334,8 +1334,10 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi,
int ret;
int te_gpio_irq;
- dsi->te_gpio = devm_gpiod_get_optional(dsi->dev, "te", GPIOD_IN);
- if (IS_ERR(dsi->te_gpio)) {
+ dsi->te_gpio = gpiod_get_optional(panel, "te", GPIOD_IN);
+ if (!dsi->te_gpio) {
+ return 0;
+ } else if (IS_ERR(dsi->te_gpio)) {
dev_err(dsi->dev, "gpio request failed with %ld\n",
PTR_ERR(dsi->te_gpio));
return PTR_ERR(dsi->te_gpio);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 023f54ee61a8..0ee32e4b1e43 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1267,7 +1267,6 @@ static int fimc_probe(struct platform_device *pdev)
struct exynos_drm_ipp_formats *formats;
struct device *dev = &pdev->dev;
struct fimc_context *ctx;
- struct resource *res;
int ret;
int i, j, num_limits, num_formats;
@@ -1330,14 +1329,12 @@ static int fimc_probe(struct platform_device *pdev)
return PTR_ERR(ctx->regs);
/* resource irq */
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev, "failed to request irq resource.\n");
- return -ENOENT;
- }
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
- ret = devm_request_irq(dev, res->start, fimc_irq_handler,
- 0, dev_name(dev), ctx);
+ ret = devm_request_irq(dev, ret, fimc_irq_handler,
+ 0, dev_name(dev), ctx);
if (ret < 0) {
dev_err(dev, "failed to request irq.\n");
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index c735e53939d8..7d5a483a54de 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -1133,7 +1133,6 @@ static int fimd_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct fimd_context *ctx;
struct device_node *i80_if_timings;
- struct resource *res;
int ret;
if (!dev->of_node)
@@ -1206,15 +1205,11 @@ static int fimd_probe(struct platform_device *pdev)
if (IS_ERR(ctx->regs))
return PTR_ERR(ctx->regs);
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
- ctx->i80_if ? "lcd_sys" : "vsync");
- if (!res) {
- dev_err(dev, "irq request failed.\n");
- return -ENXIO;
- }
+ ret = platform_get_irq_byname(pdev, ctx->i80_if ? "lcd_sys" : "vsync");
+ if (ret < 0)
+ return ret;
- ret = devm_request_irq(dev, res->start, fimd_irq_handler,
- 0, "drm_fimd", ctx);
+ ret = devm_request_irq(dev, ret, fimd_irq_handler, 0, "drm_fimd", ctx);
if (ret) {
dev_err(dev, "irq request failed.\n");
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 166a80262896..964dceb28c1e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1220,7 +1220,6 @@ static int gsc_probe(struct platform_device *pdev)
struct gsc_driverdata *driver_data;
struct exynos_drm_ipp_formats *formats;
struct gsc_context *ctx;
- struct resource *res;
int num_formats, ret, i, j;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
@@ -1275,13 +1274,10 @@ static int gsc_probe(struct platform_device *pdev)
return PTR_ERR(ctx->regs);
/* resource irq */
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev, "failed to request irq resource.\n");
- return -ENOENT;
- }
+ ctx->irq = platform_get_irq(pdev, 0);
+ if (ctx->irq < 0)
+ return ctx->irq;
- ctx->irq = res->start;
ret = devm_request_irq(dev, ctx->irq, gsc_irq_handler, 0,
dev_name(dev), ctx);
if (ret < 0) {
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 41c54f1f60bc..e5204be86093 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -809,19 +809,17 @@ static int mixer_resources_init(struct mixer_context *mixer_ctx)
return -ENXIO;
}
- res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
- dev_err(dev, "get interrupt resource failed.\n");
- return -ENXIO;
- }
+ ret = platform_get_irq(mixer_ctx->pdev, 0);
+ if (ret < 0)
+ return ret;
+ mixer_ctx->irq = ret;
- ret = devm_request_irq(dev, res->start, mixer_irq_handler,
- 0, "drm_mixer", mixer_ctx);
+ ret = devm_request_irq(dev, mixer_ctx->irq, mixer_irq_handler,
+ 0, "drm_mixer", mixer_ctx);
if (ret) {
dev_err(dev, "request interrupt failed.\n");
return ret;
}
- mixer_ctx->irq = res->start;
return 0;
}
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index a4c94dc2e216..cfd932514da2 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -101,6 +101,7 @@ config DRM_I915_USERPTR
config DRM_I915_GVT
bool "Enable Intel GVT-g graphics virtualization host support"
depends on DRM_I915
+ depends on X86
depends on 64BIT
default n
help
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 2da4aacc956b..8ac196e814d5 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -825,6 +825,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
unsigned int max_bw_point = 0, max_bw = 0;
unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
unsigned int num_psf_gv_points = dev_priv->max_bw[0].num_psf_gv_points;
+ bool changed = false;
u32 mask = 0;
/* FIXME earlier gens need some checks too */
@@ -868,6 +869,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
new_bw_state->data_rate[crtc->pipe] = new_data_rate;
new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
+ changed = true;
+
drm_dbg_kms(&dev_priv->drm,
"pipe %c data rate %u num active planes %u\n",
pipe_name(crtc->pipe),
@@ -875,7 +878,19 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
new_bw_state->num_active_planes[crtc->pipe]);
}
- if (!new_bw_state)
+ old_bw_state = intel_atomic_get_old_bw_state(state);
+ new_bw_state = intel_atomic_get_new_bw_state(state);
+
+ if (new_bw_state &&
+ intel_can_enable_sagv(dev_priv, old_bw_state) !=
+ intel_can_enable_sagv(dev_priv, new_bw_state))
+ changed = true;
+
+ /*
+ * If none of our inputs (data rates, number of active
+ * planes, SAGV yes/no) changed then nothing to do here.
+ */
+ if (!changed)
return 0;
ret = intel_atomic_lock_global_state(&new_bw_state->base);
@@ -961,7 +976,6 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
*/
new_bw_state->qgv_points_mask = ~allowed_points & mask;
- old_bw_state = intel_atomic_get_old_bw_state(state);
/*
* If the actual mask had changed we need to make sure that
* the commits are serialized(in case this is a nomodeset, nonblocking)
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 46c6eecbd917..0ceaed1c9656 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -30,19 +30,19 @@ struct intel_bw_state {
*/
u8 pipe_sagv_reject;
+ /* bitmask of active pipes */
+ u8 active_pipes;
+
/*
* Current QGV points mask, which restricts
* some particular SAGV states, not to confuse
* with pipe_sagv_mask.
*/
- u8 qgv_points_mask;
+ u16 qgv_points_mask;
unsigned int data_rate[I915_MAX_PIPES];
u8 num_active_planes[I915_MAX_PIPES];
- /* bitmask of active pipes */
- u8 active_pipes;
-
int min_cdclk;
};
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index bf7ce684dd8e..bb4a85445fc6 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -10673,6 +10673,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
vlv_wm_sanitize(dev_priv);
} else if (DISPLAY_VER(dev_priv) >= 9) {
skl_wm_get_hw_state(dev_priv);
+ skl_wm_sanitize(dev_priv);
} else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_wm_get_hw_state(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index c1439fcb5a95..3ff149df4a77 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -405,6 +405,7 @@ intel_drrs_init(struct intel_connector *connector,
struct drm_display_mode *fixed_mode)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_encoder *encoder = connector->encoder;
struct drm_display_mode *downclock_mode = NULL;
INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_drrs_downclock_work);
@@ -416,6 +417,13 @@ intel_drrs_init(struct intel_connector *connector,
return NULL;
}
+ if ((DISPLAY_VER(dev_priv) < 8 && !HAS_GMCH(dev_priv)) &&
+ encoder->port != PORT_A) {
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS only supported on eDP port A\n");
+ return NULL;
+ }
+
if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
return NULL;
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 160fd2bdafe5..957feeccff3f 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -1115,7 +1115,8 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
/* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
if (DISPLAY_VER(i915) >= 11 &&
- (plane_state->view.color_plane[0].y + drm_rect_height(&plane_state->uapi.src)) & 3) {
+ (plane_state->view.color_plane[0].y +
+ (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) {
plane_state->no_fbc_reason = "plane end Y offset misaligned";
return false;
}
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 0065111593a6..4a2662838cd8 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -360,6 +360,21 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
port++;
}
+ /*
+ * The port numbering and mapping here is bizarre. The now-obsolete
+ * swsci spec supports ports numbered [0..4]. Port E is handled as a
+ * special case, but port F and beyond are not. The functionality is
+ * supposed to be obsolete for new platforms. Just bail out if the port
+ * number is out of bounds after mapping.
+ */
+ if (port > 4) {
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n",
+ intel_encoder->base.base.id, intel_encoder->base.name,
+ port_name(intel_encoder->port), port);
+ return -EINVAL;
+ }
+
if (!enable)
parm |= 4 << 8;
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 1a376e9a1ff3..d610e48cab94 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -959,6 +959,9 @@ static int check_overlay_dst(struct intel_overlay *overlay,
const struct intel_crtc_state *pipe_config =
overlay->crtc->config;
+ if (rec->dst_height == 0 || rec->dst_width == 0)
+ return -EINVAL;
+
if (rec->dst_x < pipe_config->pipe_src_w &&
rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w &&
rec->dst_y < pipe_config->pipe_src_h &&
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 09f405e4d363..92ff654f54f5 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -34,7 +34,7 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *dev_priv)
if (intel_de_wait_for_clear(dev_priv, ICL_PHY_MISC(phy),
DG2_PHY_DP_TX_ACK_MASK, 25))
DRM_ERROR("SNPS PHY %c failed to calibrate after 25ms.\n",
- phy);
+ phy_name(phy));
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 40faa18947c9..7784c30fe893 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -345,10 +345,11 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 val;
- val = intel_uncore_read(uncore, TCSS_DDI_STATUS(dig_port->tc_phy_fia_idx));
+ val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
if (val == 0xffffffff) {
drm_dbg_kms(&i915->drm,
"Port %s: PHY in TCCOLD, assuming not complete\n",
@@ -690,6 +691,8 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_encoder *encoder = &dig_port->base;
+ intel_wakeref_t tc_cold_wref;
+ enum intel_display_power_domain domain;
int active_links = 0;
mutex_lock(&dig_port->tc_lock);
@@ -701,12 +704,11 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED);
drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
- if (active_links) {
- enum intel_display_power_domain domain;
- intel_wakeref_t tc_cold_wref = tc_cold_block(dig_port, &domain);
- dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
+ tc_cold_wref = tc_cold_block(dig_port, &domain);
+ dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
+ if (active_links) {
if (!icl_tc_phy_is_connected(dig_port))
drm_dbg_kms(&i915->drm,
"Port %s: PHY disconnected with %d active link(s)\n",
@@ -715,10 +717,23 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
dig_port->tc_lock_wakeref = tc_cold_block(dig_port,
&dig_port->tc_lock_power_domain);
-
- tc_cold_unblock(dig_port, domain, tc_cold_wref);
+ } else {
+ /*
+ * TBT-alt is the default mode in any case the PHY ownership is not
+ * held (regardless of the sink's connected live state), so
+ * we'll just switch to disconnected mode from it here without
+ * a note.
+ */
+ if (dig_port->tc_mode != TC_PORT_TBT_ALT)
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(dig_port->tc_mode));
+ icl_tc_phy_disconnect(dig_port);
}
+ tc_cold_unblock(dig_port, domain, tc_cold_wref);
+
drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
dig_port->tc_port_name,
tc_port_mode_name(dig_port->tc_mode));
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 3a5b247be738..1736efa43339 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -2505,9 +2505,14 @@ static int eb_pin_timeline(struct i915_execbuffer *eb, struct intel_context *ce,
timeout) < 0) {
i915_request_put(rq);
- tl = intel_context_timeline_lock(ce);
+ /*
+ * Error path, cannot use intel_context_timeline_lock as
+ * that is user interruptable and this clean up step
+ * must be done.
+ */
+ mutex_lock(&ce->timeline->mutex);
intel_context_exit(ce);
- intel_context_timeline_unlock(tl);
+ mutex_unlock(&ce->timeline->mutex);
if (nonblock)
return -EWOULDBLOCK;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index de3fe79b665a..1f880c8c66e7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -842,11 +842,9 @@ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
} else if (obj->mm.madv != I915_MADV_WILLNEED) {
bo->priority = I915_TTM_PRIO_PURGE;
} else if (!i915_gem_object_has_pages(obj)) {
- if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
- bo->priority = I915_TTM_PRIO_HAS_PAGES;
+ bo->priority = I915_TTM_PRIO_NO_PAGES;
} else {
- if (bo->priority > I915_TTM_PRIO_NO_PAGES)
- bo->priority = I915_TTM_PRIO_NO_PAGES;
+ bo->priority = I915_TTM_PRIO_HAS_PAGES;
}
ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index ee9612a3ee5e..e130c820ae4e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -427,11 +427,17 @@ __i915_ttm_move(struct ttm_buffer_object *bo,
if (!IS_ERR(fence))
goto out;
- } else if (move_deps) {
- int err = i915_deps_sync(move_deps, ctx);
+ } else {
+ int err = PTR_ERR(fence);
+
+ if (err == -EINTR || err == -ERESTARTSYS || err == -EAGAIN)
+ return fence;
- if (err)
- return ERR_PTR(err);
+ if (move_deps) {
+ err = i915_deps_sync(move_deps, ctx);
+ if (err)
+ return ERR_PTR(err);
+ }
}
/* Error intercept failed or no accelerated migration to start with */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index f9240d4baa69..3aabe164c329 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -206,6 +206,11 @@ struct intel_guc {
* context usage for overflows.
*/
struct delayed_work work;
+
+ /**
+ * @shift: Right shift value for the gpm timestamp
+ */
+ u32 shift;
} timestamp;
#ifdef CONFIG_DRM_I915_SELFTEST
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 13b27b8ff74e..ba21ace973da 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -110,7 +110,7 @@ static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id)
{
u32 request[] = {
GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
- SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 2),
+ SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
id,
};
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index e7517206af82..154ad726e266 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1113,6 +1113,19 @@ __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
if (new_start == lower_32_bits(*prev_start))
return;
+ /*
+ * When gt is unparked, we update the gt timestamp and start the ping
+ * worker that updates the gt_stamp every POLL_TIME_CLKS. As long as gt
+ * is unparked, all switched in contexts will have a start time that is
+ * within +/- POLL_TIME_CLKS of the most recent gt_stamp.
+ *
+ * If neither gt_stamp nor new_start has rolled over, then the
+ * gt_stamp_hi does not need to be adjusted, however if one of them has
+ * rolled over, we need to adjust gt_stamp_hi accordingly.
+ *
+ * The below conditions address the cases of new_start rollover and
+ * gt_stamp_last rollover respectively.
+ */
if (new_start < gt_stamp_last &&
(new_start - gt_stamp_last) <= POLL_TIME_CLKS)
gt_stamp_hi++;
@@ -1124,17 +1137,45 @@ __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
*prev_start = ((u64)gt_stamp_hi << 32) | new_start;
}
-static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
+/*
+ * GuC updates shared memory and KMD reads it. Since this is not synchronized,
+ * we run into a race where the value read is inconsistent. Sometimes the
+ * inconsistency is in reading the upper MSB bytes of the last_in value when
+ * this race occurs. 2 types of cases are seen - upper 8 bits are zero and upper
+ * 24 bits are zero. Since these are non-zero values, it is non-trivial to
+ * determine validity of these values. Instead we read the values multiple times
+ * until they are consistent. In test runs, 3 attempts results in consistent
+ * values. The upper bound is set to 6 attempts and may need to be tuned as per
+ * any new occurences.
+ */
+static void __get_engine_usage_record(struct intel_engine_cs *engine,
+ u32 *last_in, u32 *id, u32 *total)
{
struct guc_engine_usage_record *rec = intel_guc_engine_usage(engine);
+ int i = 0;
+
+ do {
+ *last_in = READ_ONCE(rec->last_switch_in_stamp);
+ *id = READ_ONCE(rec->current_context_index);
+ *total = READ_ONCE(rec->total_runtime);
+
+ if (READ_ONCE(rec->last_switch_in_stamp) == *last_in &&
+ READ_ONCE(rec->current_context_index) == *id &&
+ READ_ONCE(rec->total_runtime) == *total)
+ break;
+ } while (++i < 6);
+}
+
+static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
+{
struct intel_engine_guc_stats *stats = &engine->stats.guc;
struct intel_guc *guc = &engine->gt->uc.guc;
- u32 last_switch = rec->last_switch_in_stamp;
- u32 ctx_id = rec->current_context_index;
- u32 total = rec->total_runtime;
+ u32 last_switch, ctx_id, total;
lockdep_assert_held(&guc->timestamp.lock);
+ __get_engine_usage_record(engine, &last_switch, &ctx_id, &total);
+
stats->running = ctx_id != ~0U && last_switch;
if (stats->running)
__extend_last_switch(guc, &stats->start_gt_clk, last_switch);
@@ -1149,23 +1190,51 @@ static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
}
}
-static void guc_update_pm_timestamp(struct intel_guc *guc,
- struct intel_engine_cs *engine,
- ktime_t *now)
+static u32 gpm_timestamp_shift(struct intel_gt *gt)
+{
+ intel_wakeref_t wakeref;
+ u32 reg, shift;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ reg = intel_uncore_read(gt->uncore, RPM_CONFIG0);
+
+ shift = (reg & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
+ GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT;
+
+ return 3 - shift;
+}
+
+static u64 gpm_timestamp(struct intel_gt *gt)
+{
+ u32 lo, hi, old_hi, loop = 0;
+
+ hi = intel_uncore_read(gt->uncore, MISC_STATUS1);
+ do {
+ lo = intel_uncore_read(gt->uncore, MISC_STATUS0);
+ old_hi = hi;
+ hi = intel_uncore_read(gt->uncore, MISC_STATUS1);
+ } while (old_hi != hi && loop++ < 2);
+
+ return ((u64)hi << 32) | lo;
+}
+
+static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now)
{
- u32 gt_stamp_now, gt_stamp_hi;
+ struct intel_gt *gt = guc_to_gt(guc);
+ u32 gt_stamp_lo, gt_stamp_hi;
+ u64 gpm_ts;
lockdep_assert_held(&guc->timestamp.lock);
gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
- gt_stamp_now = intel_uncore_read(engine->uncore,
- RING_TIMESTAMP(engine->mmio_base));
+ gpm_ts = gpm_timestamp(gt) >> guc->timestamp.shift;
+ gt_stamp_lo = lower_32_bits(gpm_ts);
*now = ktime_get();
- if (gt_stamp_now < lower_32_bits(guc->timestamp.gt_stamp))
+ if (gt_stamp_lo < lower_32_bits(guc->timestamp.gt_stamp))
gt_stamp_hi++;
- guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_now;
+ guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_lo;
}
/*
@@ -1208,8 +1277,12 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
if (!in_reset && intel_gt_pm_get_if_awake(gt)) {
stats_saved = *stats;
gt_stamp_saved = guc->timestamp.gt_stamp;
+ /*
+ * Update gt_clks, then gt timestamp to simplify the 'gt_stamp -
+ * start_gt_clk' calculation below for active engines.
+ */
guc_update_engine_gt_clks(engine);
- guc_update_pm_timestamp(guc, engine, now);
+ guc_update_pm_timestamp(guc, now);
intel_gt_pm_put_async(gt);
if (i915_reset_count(gpu_error) != reset_count) {
*stats = stats_saved;
@@ -1241,8 +1314,8 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
spin_lock_irqsave(&guc->timestamp.lock, flags);
+ guc_update_pm_timestamp(guc, &unused);
for_each_engine(engine, gt, id) {
- guc_update_pm_timestamp(guc, engine, &unused);
guc_update_engine_gt_clks(engine);
engine->stats.guc.prev_total = 0;
}
@@ -1259,10 +1332,11 @@ static void __update_guc_busyness_stats(struct intel_guc *guc)
ktime_t unused;
spin_lock_irqsave(&guc->timestamp.lock, flags);
- for_each_engine(engine, gt, id) {
- guc_update_pm_timestamp(guc, engine, &unused);
+
+ guc_update_pm_timestamp(guc, &unused);
+ for_each_engine(engine, gt, id)
guc_update_engine_gt_clks(engine);
- }
+
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
}
@@ -1335,10 +1409,15 @@ void intel_guc_busyness_park(struct intel_gt *gt)
void intel_guc_busyness_unpark(struct intel_gt *gt)
{
struct intel_guc *guc = &gt->uc.guc;
+ unsigned long flags;
+ ktime_t unused;
if (!guc_submission_initialized(guc))
return;
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
+ guc_update_pm_timestamp(guc, &unused);
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
guc->timestamp.ping_delay);
}
@@ -1783,6 +1862,7 @@ int intel_guc_submission_init(struct intel_guc *guc)
spin_lock_init(&guc->timestamp.lock);
INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
+ guc->timestamp.shift = gpm_timestamp_shift(gt);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 99d1781fa5f0..af79b39048f7 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1148,7 +1148,7 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
ops->set_pfn(se, s->shadow_page.mfn);
}
-/**
+/*
* Check if can do 2M page
* @vgpu: target vgpu
* @entry: target pfn's gtt entry
@@ -2193,7 +2193,7 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
}
/**
- * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
+ * intel_vgpu_emulate_ggtt_mmio_read - emulate GTT MMIO register read
* @vgpu: a vGPU
* @off: register offset
* @p_data: data will be returned to guest
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 5ae812d60abe..0633888a411e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1522,7 +1522,7 @@ capture_engine(struct intel_engine_cs *engine,
struct i915_request *rq = NULL;
unsigned long flags;
- ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
+ ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL);
if (!ee)
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_mm.h b/drivers/gpu/drm/i915/i915_mm.h
index 76f1d53bdf34..3ad22bbe80eb 100644
--- a/drivers/gpu/drm/i915/i915_mm.h
+++ b/drivers/gpu/drm/i915/i915_mm.h
@@ -6,6 +6,7 @@
#ifndef __I915_MM_H__
#define __I915_MM_H__
+#include <linux/bug.h>
#include <linux/types.h>
struct vm_area_struct;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c32420cb8ed5..c2bb33febb68 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2684,7 +2684,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */
#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
-#define GUCPMTIMESTAMP _MMIO(0xC3E8)
+#define MISC_STATUS0 _MMIO(0xA500)
+#define MISC_STATUS1 _MMIO(0xA504)
/* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */
#define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8)
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index da8f82c2342f..fc8a68f3a2ed 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -108,6 +108,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
/* Comet Lake V PCH is based on KBP, which is SPT compatible */
return PCH_SPT;
case INTEL_PCH_ICP_DEVICE_ID_TYPE:
+ case INTEL_PCH_ICP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n");
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
return PCH_ICP;
@@ -123,7 +124,6 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
!IS_GEN9_BC(dev_priv));
return PCH_TGP;
case INTEL_PCH_JSP_DEVICE_ID_TYPE:
- case INTEL_PCH_JSP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
drm_WARN_ON(&dev_priv->drm, !IS_JSL_EHL(dev_priv));
return PCH_JSP;
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h
index 6bff77521094..4ba0f1967cca 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/intel_pch.h
@@ -50,11 +50,11 @@ enum intel_pch {
#define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680
#define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380
#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
+#define INTEL_PCH_ICP2_DEVICE_ID_TYPE 0x3880
#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
#define INTEL_PCH_TGP2_DEVICE_ID_TYPE 0x4380
#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80
-#define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880
#define INTEL_PCH_ADP_DEVICE_ID_TYPE 0x7A80
#define INTEL_PCH_ADP2_DEVICE_ID_TYPE 0x5180
#define INTEL_PCH_ADP3_DEVICE_ID_TYPE 0x7A00
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 434b1f8b7fe3..fae4f7818d28 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4029,6 +4029,17 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
return ret;
}
+ if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
+ intel_can_enable_sagv(dev_priv, old_bw_state)) {
+ ret = intel_atomic_serialize_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
for_each_new_intel_crtc_in_state(state, crtc,
new_crtc_state, i) {
struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal;
@@ -4044,17 +4055,6 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state)
intel_can_enable_sagv(dev_priv, new_bw_state);
}
- if (intel_can_enable_sagv(dev_priv, new_bw_state) !=
- intel_can_enable_sagv(dev_priv, old_bw_state)) {
- ret = intel_atomic_serialize_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
return 0;
}
@@ -4717,6 +4717,10 @@ static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
};
static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
+ /*
+ * Keep the join_mbus cases first so check_mbus_joined()
+ * will prefer them over the !join_mbus cases.
+ */
{
.active_pipes = BIT(PIPE_A),
.dbuf_mask = {
@@ -4732,6 +4736,20 @@ static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
.join_mbus = true,
},
{
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ .join_mbus = false,
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ .join_mbus = false,
+ },
+ {
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
@@ -4835,7 +4853,7 @@ static bool check_mbus_joined(u8 active_pipes,
{
int i;
- for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
+ for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
if (dbuf_slices[i].active_pipes == active_pipes)
return dbuf_slices[i].join_mbus;
}
@@ -4847,13 +4865,14 @@ static bool adlp_check_mbus_joined(u8 active_pipes)
return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
}
-static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
+static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
const struct dbuf_slice_conf_entry *dbuf_slices)
{
int i;
- for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
- if (dbuf_slices[i].active_pipes == active_pipes)
+ for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
+ if (dbuf_slices[i].active_pipes == active_pipes &&
+ dbuf_slices[i].join_mbus == join_mbus)
return dbuf_slices[i].dbuf_mask[pipe];
}
return 0;
@@ -4864,7 +4883,7 @@ static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
* returns correspondent DBuf slice mask as stated in BSpec for particular
* platform.
*/
-static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
+static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{
/*
* FIXME: For ICL this is still a bit unclear as prev BSpec revision
@@ -4878,37 +4897,41 @@ static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
* still here - we will need it once those additional constraints
* pop up.
*/
- return compute_dbuf_slices(pipe, active_pipes, icl_allowed_dbufs);
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ icl_allowed_dbufs);
}
-static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
+static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{
- return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs);
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ tgl_allowed_dbufs);
}
-static u32 adlp_compute_dbuf_slices(enum pipe pipe, u32 active_pipes)
+static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{
- return compute_dbuf_slices(pipe, active_pipes, adlp_allowed_dbufs);
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ adlp_allowed_dbufs);
}
-static u32 dg2_compute_dbuf_slices(enum pipe pipe, u32 active_pipes)
+static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{
- return compute_dbuf_slices(pipe, active_pipes, dg2_allowed_dbufs);
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ dg2_allowed_dbufs);
}
-static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes)
+static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
if (IS_DG2(dev_priv))
- return dg2_compute_dbuf_slices(pipe, active_pipes);
+ return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
else if (IS_ALDERLAKE_P(dev_priv))
- return adlp_compute_dbuf_slices(pipe, active_pipes);
+ return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
else if (DISPLAY_VER(dev_priv) == 12)
- return tgl_compute_dbuf_slices(pipe, active_pipes);
+ return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
else if (DISPLAY_VER(dev_priv) == 11)
- return icl_compute_dbuf_slices(pipe, active_pipes);
+ return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
/*
* For anything else just return one slice yet.
* Should be extended for other platforms.
@@ -6127,11 +6150,16 @@ skl_compute_ddb(struct intel_atomic_state *state)
return ret;
}
+ if (IS_ALDERLAKE_P(dev_priv))
+ new_dbuf_state->joined_mbus =
+ adlp_check_mbus_joined(new_dbuf_state->active_pipes);
+
for_each_intel_crtc(&dev_priv->drm, crtc) {
enum pipe pipe = crtc->pipe;
new_dbuf_state->slices[pipe] =
- skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes);
+ skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
+ new_dbuf_state->joined_mbus);
if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
continue;
@@ -6143,9 +6171,6 @@ skl_compute_ddb(struct intel_atomic_state *state)
new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
- if (IS_ALDERLAKE_P(dev_priv))
- new_dbuf_state->joined_mbus = adlp_check_mbus_joined(new_dbuf_state->active_pipes);
-
if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
@@ -6626,6 +6651,7 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
enum pipe pipe = crtc->pipe;
unsigned int mbus_offset;
enum plane_id plane_id;
+ u8 slices;
skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
@@ -6645,19 +6671,22 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_uv);
}
- dbuf_state->slices[pipe] =
- skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes);
-
dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
/*
* Used for checking overlaps, so we need absolute
* offsets instead of MBUS relative offsets.
*/
- mbus_offset = mbus_ddb_offset(dev_priv, dbuf_state->slices[pipe]);
+ slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
+ dbuf_state->joined_mbus);
+ mbus_offset = mbus_ddb_offset(dev_priv, slices);
crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
+ /* The slices actually used by the planes on the pipe */
+ dbuf_state->slices[pipe] =
+ skl_ddb_dbuf_slice_mask(dev_priv, &crtc_state->wm.skl.ddb);
+
drm_dbg_kms(&dev_priv->drm,
"[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
crtc->base.base.id, crtc->base.name,
@@ -6669,6 +6698,74 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
}
+static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
+{
+ const struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->dbuf.obj.state);
+ struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ entries[crtc->pipe] = crtc_state->wm.skl.ddb;
+ }
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ u8 slices;
+
+ slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
+ dbuf_state->joined_mbus);
+ if (dbuf_state->slices[crtc->pipe] & ~slices)
+ return true;
+
+ if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
+ I915_MAX_PIPES, crtc->pipe))
+ return true;
+ }
+
+ return false;
+}
+
+void skl_wm_sanitize(struct drm_i915_private *i915)
+{
+ struct intel_crtc *crtc;
+
+ /*
+ * On TGL/RKL (at least) the BIOS likes to assign the planes
+ * to the wrong DBUF slices. This will cause an infinite loop
+ * in skl_commit_modeset_enables() as it can't find a way to
+ * transition between the old bogus DBUF layout to the new
+ * proper DBUF layout without DBUF allocation overlaps between
+ * the planes (which cannot be allowed or else the hardware
+ * may hang). If we detect a bogus DBUF layout just turn off
+ * all the planes so that skl_commit_modeset_enables() can
+ * simply ignore them.
+ */
+ if (!skl_dbuf_is_misconfigured(i915))
+ return;
+
+ drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ if (plane_state->uapi.visible)
+ intel_plane_disable_noatomic(crtc, plane);
+
+ drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
+
+ memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
+ }
+}
+
static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 990cdcaf85ce..d2243653a893 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -47,6 +47,7 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
+void skl_wm_sanitize(struct drm_i915_private *dev_priv);
bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
const struct intel_bw_state *bw_state);
void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 53f1ccb78849..64c2708efc9e 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -68,9 +68,7 @@ static noinline depot_stack_handle_t __save_depot_stack(void)
static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
spin_lock_init(&rpm->debug.lock);
-
- if (rpm->available)
- stack_depot_init();
+ stack_depot_init();
}
static noinline depot_stack_handle_t
diff --git a/drivers/gpu/drm/imx/dcss/Kconfig b/drivers/gpu/drm/imx/dcss/Kconfig
index 7374f1952762..5c2b2277afbf 100644
--- a/drivers/gpu/drm/imx/dcss/Kconfig
+++ b/drivers/gpu/drm/imx/dcss/Kconfig
@@ -2,6 +2,7 @@ config DRM_IMX_DCSS
tristate "i.MX8MQ DCSS"
select IMX_IRQSTEER
select DRM_KMS_HELPER
+ select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS
depends on DRM && ARCH_MXC && ARM64
help
diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
index 00404ba4126d..2735b8eb3537 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.c
+++ b/drivers/gpu/drm/kmb/kmb_plane.c
@@ -158,12 +158,6 @@ static void kmb_plane_atomic_disable(struct drm_plane *plane,
case LAYER_1:
kmb->plane_status[plane_id].ctrl = LCD_CTRL_VL2_ENABLE;
break;
- case LAYER_2:
- kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL1_ENABLE;
- break;
- case LAYER_3:
- kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL2_ENABLE;
- break;
}
kmb->plane_status[plane_id].disable = true;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 5d90d2eb0019..bced4c7d668e 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -786,18 +786,101 @@ void mtk_dsi_ddp_stop(struct device *dev)
mtk_dsi_poweroff(dsi);
}
+static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
+{
+ int ret;
+
+ ret = drm_simple_encoder_init(drm, &dsi->encoder,
+ DRM_MODE_ENCODER_DSI);
+ if (ret) {
+ DRM_ERROR("Failed to encoder init to drm\n");
+ return ret;
+ }
+
+ dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev);
+
+ ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ goto err_cleanup_encoder;
+
+ dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder);
+ if (IS_ERR(dsi->connector)) {
+ DRM_ERROR("Unable to create bridge connector\n");
+ ret = PTR_ERR(dsi->connector);
+ goto err_cleanup_encoder;
+ }
+ drm_connector_attach_encoder(dsi->connector, &dsi->encoder);
+
+ return 0;
+
+err_cleanup_encoder:
+ drm_encoder_cleanup(&dsi->encoder);
+ return ret;
+}
+
+static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
+{
+ int ret;
+ struct drm_device *drm = data;
+ struct mtk_dsi *dsi = dev_get_drvdata(dev);
+
+ ret = mtk_dsi_encoder_init(drm, dsi);
+ if (ret)
+ return ret;
+
+ return device_reset_optional(dev);
+}
+
+static void mtk_dsi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct mtk_dsi *dsi = dev_get_drvdata(dev);
+
+ drm_encoder_cleanup(&dsi->encoder);
+}
+
+static const struct component_ops mtk_dsi_component_ops = {
+ .bind = mtk_dsi_bind,
+ .unbind = mtk_dsi_unbind,
+};
+
static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct mtk_dsi *dsi = host_to_dsi(host);
+ struct device *dev = host->dev;
+ int ret;
dsi->lanes = device->lanes;
dsi->format = device->format;
dsi->mode_flags = device->mode_flags;
+ dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
+ if (IS_ERR(dsi->next_bridge))
+ return PTR_ERR(dsi->next_bridge);
+
+ drm_bridge_add(&dsi->bridge);
+
+ ret = component_add(host->dev, &mtk_dsi_component_ops);
+ if (ret) {
+ DRM_ERROR("failed to add dsi_host component: %d\n", ret);
+ drm_bridge_remove(&dsi->bridge);
+ return ret;
+ }
return 0;
}
+static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct mtk_dsi *dsi = host_to_dsi(host);
+
+ component_del(host->dev, &mtk_dsi_component_ops);
+ drm_bridge_remove(&dsi->bridge);
+ return 0;
+}
+
static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
{
int ret;
@@ -938,73 +1021,14 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
static const struct mipi_dsi_host_ops mtk_dsi_ops = {
.attach = mtk_dsi_host_attach,
+ .detach = mtk_dsi_host_detach,
.transfer = mtk_dsi_host_transfer,
};
-static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
-{
- int ret;
-
- ret = drm_simple_encoder_init(drm, &dsi->encoder,
- DRM_MODE_ENCODER_DSI);
- if (ret) {
- DRM_ERROR("Failed to encoder init to drm\n");
- return ret;
- }
-
- dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev);
-
- ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
- DRM_BRIDGE_ATTACH_NO_CONNECTOR);
- if (ret)
- goto err_cleanup_encoder;
-
- dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder);
- if (IS_ERR(dsi->connector)) {
- DRM_ERROR("Unable to create bridge connector\n");
- ret = PTR_ERR(dsi->connector);
- goto err_cleanup_encoder;
- }
- drm_connector_attach_encoder(dsi->connector, &dsi->encoder);
-
- return 0;
-
-err_cleanup_encoder:
- drm_encoder_cleanup(&dsi->encoder);
- return ret;
-}
-
-static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
-{
- int ret;
- struct drm_device *drm = data;
- struct mtk_dsi *dsi = dev_get_drvdata(dev);
-
- ret = mtk_dsi_encoder_init(drm, dsi);
- if (ret)
- return ret;
-
- return device_reset_optional(dev);
-}
-
-static void mtk_dsi_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct mtk_dsi *dsi = dev_get_drvdata(dev);
-
- drm_encoder_cleanup(&dsi->encoder);
-}
-
-static const struct component_ops mtk_dsi_component_ops = {
- .bind = mtk_dsi_bind,
- .unbind = mtk_dsi_unbind,
-};
-
static int mtk_dsi_probe(struct platform_device *pdev)
{
struct mtk_dsi *dsi;
struct device *dev = &pdev->dev;
- struct drm_panel *panel;
struct resource *regs;
int irq_num;
int ret;
@@ -1021,19 +1045,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
return ret;
}
- ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
- &panel, &dsi->next_bridge);
- if (ret)
- goto err_unregister_host;
-
- if (panel) {
- dsi->next_bridge = devm_drm_panel_bridge_add(dev, panel);
- if (IS_ERR(dsi->next_bridge)) {
- ret = PTR_ERR(dsi->next_bridge);
- goto err_unregister_host;
- }
- }
-
dsi->driver_data = of_device_get_match_data(dev);
dsi->engine_clk = devm_clk_get(dev, "engine");
@@ -1098,14 +1109,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
dsi->bridge.of_node = dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
- drm_bridge_add(&dsi->bridge);
-
- ret = component_add(&pdev->dev, &mtk_dsi_component_ops);
- if (ret) {
- dev_err(&pdev->dev, "failed to add component: %d\n", ret);
- goto err_unregister_host;
- }
-
return 0;
err_unregister_host:
@@ -1118,8 +1121,6 @@ static int mtk_dsi_remove(struct platform_device *pdev)
struct mtk_dsi *dsi = platform_get_drvdata(pdev);
mtk_output_dsi_disable(dsi);
- drm_bridge_remove(&dsi->bridge);
- component_del(&pdev->dev, &mtk_dsi_component_ops);
mipi_dsi_host_unregister(&dsi->host);
return 0;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
index 0655582ae8ed..4cfb6c001679 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
@@ -361,7 +361,11 @@ static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc,
bridge_state =
drm_atomic_get_new_bridge_state(state,
mxsfb->bridge);
- bus_format = bridge_state->input_bus_cfg.format;
+ if (!bridge_state)
+ bus_format = MEDIA_BUS_FMT_FIXED;
+ else
+ bus_format = bridge_state->input_bus_cfg.format;
+
if (bus_format == MEDIA_BUS_FMT_FIXED) {
dev_warn_once(drm->dev,
"Bridge does not provide bus format, assuming MEDIA_BUS_FMT_RGB888_1X24.\n"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
index d0f52d59fc2f..64e423dddd9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
@@ -38,7 +38,7 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
*addr += bios->imaged_addr;
}
- if (unlikely(*addr + size >= bios->size)) {
+ if (unlikely(*addr + size > bios->size)) {
nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr);
return false;
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 9e46db5e359c..3c08f9827acf 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -588,6 +588,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
err = panel_dpi_probe(dev, panel);
if (err)
goto free_ddc;
+ desc = panel->desc;
} else {
if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
panel_simple_parse_panel_timing_node(dev, panel, &dt);
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 0fce73b9a646..70bd84b7ef2b 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -198,7 +198,8 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
* so don't register a backlight device
*/
if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
- (rdev->pdev->device == 0x6741))
+ (rdev->pdev->device == 0x6741) &&
+ !dmi_match(DMI_PRODUCT_NAME, "iMac12,1"))
return;
if (!radeon_encoder->enc_priv)
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 377f9cdb5b53..84013faa4756 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -470,8 +470,8 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
int32_t *msg, msg_type, handle;
unsigned img_size = 0;
void *ptr;
-
- int i, r;
+ long r;
+ int i;
if (offset & 0x3F) {
DRM_ERROR("UVD messages must be 64 byte aligned!\n");
@@ -481,13 +481,13 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
MAX_SCHEDULE_TIMEOUT);
if (r <= 0) {
- DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
+ DRM_ERROR("Failed waiting for UVD message (%ld)!\n", r);
return r ? r : -ETIME;
}
r = radeon_bo_kmap(bo, &ptr);
if (r) {
- DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
+ DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
return r;
}
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 830bdd5e9b7c..8677c8271678 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -529,13 +529,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
return ret;
}
- ret = clk_prepare_enable(hdmi->vpll_clk);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
- ret);
- return ret;
- }
-
hdmi->phy = devm_phy_optional_get(dev, "hdmi");
if (IS_ERR(hdmi->phy)) {
ret = PTR_ERR(hdmi->phy);
@@ -544,6 +537,13 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
return ret;
}
+ ret = clk_prepare_enable(hdmi->vpll_clk);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
+ ret);
+ return ret;
+ }
+
drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 1f7353f0684a..798b542e5916 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -902,6 +902,7 @@ static const struct vop_win_phy rk3399_win01_data = {
.enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
.format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
.rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
+ .x_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 21),
.y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22),
.act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
.dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
@@ -912,6 +913,7 @@ static const struct vop_win_phy rk3399_win01_data = {
.uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
.src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
.dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
+ .channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0),
};
/*
@@ -922,11 +924,11 @@ static const struct vop_win_phy rk3399_win01_data = {
static const struct vop_win_data rk3399_vop_win_data[] = {
{ .base = 0x00, .phy = &rk3399_win01_data,
.type = DRM_PLANE_TYPE_PRIMARY },
- { .base = 0x40, .phy = &rk3288_win01_data,
+ { .base = 0x40, .phy = &rk3368_win01_data,
.type = DRM_PLANE_TYPE_OVERLAY },
- { .base = 0x00, .phy = &rk3288_win23_data,
+ { .base = 0x00, .phy = &rk3368_win23_data,
.type = DRM_PLANE_TYPE_OVERLAY },
- { .base = 0x50, .phy = &rk3288_win23_data,
+ { .base = 0x50, .phy = &rk3368_win23_data,
.type = DRM_PLANE_TYPE_CURSOR },
};
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 8cf5aeb9db6c..201f5175ecfe 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -5,6 +5,7 @@ config DRM_TEGRA
depends on COMMON_CLK
depends on DRM
depends on OF
+ select DRM_DP_AUX_BUS
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 1f96e416fa08..d7a731d287d2 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -19,6 +19,7 @@
#include <linux/workqueue.h>
#include <drm/drm_dp_helper.h>
+#include <drm/drm_dp_aux_bus.h>
#include <drm/drm_panel.h>
#include "dp.h"
@@ -570,6 +571,12 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
list_add_tail(&dpaux->list, &dpaux_list);
mutex_unlock(&dpaux_lock);
+ err = devm_of_dp_aux_populate_ep_devices(&dpaux->aux);
+ if (err < 0) {
+ dev_err(dpaux->dev, "failed to populate AUX bus: %d\n", err);
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
index 223ab2ceb7e6..3762d87759d9 100644
--- a/drivers/gpu/drm/tegra/falcon.c
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -63,7 +63,7 @@ static void falcon_copy_firmware_image(struct falcon *falcon,
/* copy the whole thing taking into account endianness */
for (i = 0; i < firmware->size / sizeof(u32); i++)
- virt[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
+ virt[i] = le32_to_cpu(((__le32 *)firmware->data)[i]);
}
static int falcon_parse_firmware_image(struct falcon *falcon)
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 287dbc89ad64..783890e8d43a 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -525,9 +525,11 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
if (ret)
return ret;
- ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
- if (ret)
- return ret;
+ /*
+ * post_crtc_powerdown will have called pm_runtime_put, so we
+ * don't need it here otherwise we'll get the reference counting
+ * wrong.
+ */
return 0;
}
@@ -671,7 +673,6 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
- mode = &crtc_state->adjusted_mode;
if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) {
vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000,
mode->clock * 9 / 10) * 1000;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 053fbaf765ca..3a1626f261e5 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -196,14 +196,8 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
if (gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio))
connected = true;
} else {
- unsigned long flags;
- u32 hotplug;
-
- spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
- hotplug = HDMI_READ(HDMI_HOTPLUG);
- spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
-
- if (hotplug & VC4_HDMI_HOTPLUG_CONNECTED)
+ if (vc4_hdmi->variant->hp_detect &&
+ vc4_hdmi->variant->hp_detect(vc4_hdmi))
connected = true;
}
@@ -1251,6 +1245,7 @@ static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
unsigned long long tmds_rate;
if (vc4_hdmi->variant->unsupported_odd_h_timings &&
+ !(mode->flags & DRM_MODE_FLAG_DBLCLK) &&
((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
(mode->hsync_end % 2) || (mode->htotal % 2)))
return -EINVAL;
@@ -1298,6 +1293,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
if (vc4_hdmi->variant->unsupported_odd_h_timings &&
+ !(mode->flags & DRM_MODE_FLAG_DBLCLK) &&
((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
(mode->hsync_end % 2) || (mode->htotal % 2)))
return MODE_H_ILLEGAL;
@@ -1343,6 +1339,18 @@ static u32 vc5_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask)
return channel_map;
}
+static bool vc5_hdmi_hp_detect(struct vc4_hdmi *vc4_hdmi)
+{
+ unsigned long flags;
+ u32 hotplug;
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+ hotplug = HDMI_READ(HDMI_HOTPLUG);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ return !!(hotplug & VC4_HDMI_HOTPLUG_CONNECTED);
+}
+
/* HDMI audio codec callbacks */
static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
unsigned int samplerate)
@@ -1741,6 +1749,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
dev_err(dev, "Couldn't register the HDMI codec: %ld\n", PTR_ERR(codec_pdev));
return PTR_ERR(codec_pdev);
}
+ vc4_hdmi->audio.codec_pdev = codec_pdev;
dai_link->cpus = &vc4_hdmi->audio.cpu;
dai_link->codecs = &vc4_hdmi->audio.codec;
@@ -1780,6 +1789,12 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
}
+static void vc4_hdmi_audio_exit(struct vc4_hdmi *vc4_hdmi)
+{
+ platform_device_unregister(vc4_hdmi->audio.codec_pdev);
+ vc4_hdmi->audio.codec_pdev = NULL;
+}
+
static irqreturn_t vc4_hdmi_hpd_irq_thread(int irq, void *priv)
{
struct vc4_hdmi *vc4_hdmi = priv;
@@ -2504,7 +2519,8 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
* vc4_hdmi_disable_scrambling() will thus run at boot, make
* sure it's disabled, and avoid any inconsistency.
*/
- vc4_hdmi->scdc_enabled = true;
+ if (variant->max_pixel_clock > HDMI_14_MAX_TMDS_CLK)
+ vc4_hdmi->scdc_enabled = true;
ret = variant->init_resources(vc4_hdmi);
if (ret)
@@ -2651,6 +2667,7 @@ static void vc4_hdmi_unbind(struct device *dev, struct device *master,
kfree(vc4_hdmi->hdmi_regset.regs);
kfree(vc4_hdmi->hd_regset.regs);
+ vc4_hdmi_audio_exit(vc4_hdmi);
vc4_hdmi_cec_exit(vc4_hdmi);
vc4_hdmi_hotplug_exit(vc4_hdmi);
vc4_hdmi_connector_destroy(&vc4_hdmi->connector);
@@ -2723,6 +2740,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
.phy_rng_disable = vc5_hdmi_phy_rng_disable,
.channel_map = vc5_hdmi_channel_map,
.supports_hdr = true,
+ .hp_detect = vc5_hdmi_hp_detect,
};
static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
@@ -2751,6 +2769,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
.phy_rng_disable = vc5_hdmi_phy_rng_disable,
.channel_map = vc5_hdmi_channel_map,
.supports_hdr = true,
+ .hp_detect = vc5_hdmi_hp_detect,
};
static const struct of_device_id vc4_hdmi_dt_match[] = {
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index 36c0b082a43b..6ffdd4ec5fb6 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -102,6 +102,9 @@ struct vc4_hdmi_variant {
/* Enables HDR metadata */
bool supports_hdr;
+
+ /* Callback for hardware specific hotplug detect */
+ bool (*hp_detect)(struct vc4_hdmi *vc4_hdmi);
};
/* HDMI audio information */
@@ -113,6 +116,7 @@ struct vc4_hdmi_audio {
struct snd_soc_dai_link_component platform;
struct snd_dmaengine_dai_dma_data dma_data;
struct hdmi_audio_infoframe infoframe;
+ struct platform_device *codec_pdev;
bool streaming;
};
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index e08e331e46ae..f87a8705f518 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -137,8 +137,15 @@ void host1x_syncpt_restore(struct host1x *host)
struct host1x_syncpt *sp_base = host->syncpt;
unsigned int i;
- for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
+ for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
+ /*
+ * Unassign syncpt from channels for purposes of Tegra186
+ * syncpoint protection. This prevents any channel from
+ * accessing it until it is reassigned.
+ */
+ host1x_hw_syncpt_assign_to_channel(host, sp_base + i, NULL);
host1x_hw_syncpt_restore(host, sp_base + i);
+ }
for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
@@ -227,27 +234,12 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
void *ref;
struct host1x_waitlist *waiter;
int err = 0, check_count = 0;
- u32 val;
if (value)
- *value = 0;
-
- /* first check cache */
- if (host1x_syncpt_is_expired(sp, thresh)) {
- if (value)
- *value = host1x_syncpt_load(sp);
+ *value = host1x_syncpt_load(sp);
+ if (host1x_syncpt_is_expired(sp, thresh))
return 0;
- }
-
- /* try to read from register */
- val = host1x_hw_syncpt_load(sp->host, sp);
- if (host1x_syncpt_is_expired(sp, thresh)) {
- if (value)
- *value = val;
-
- goto done;
- }
if (!timeout) {
err = -EAGAIN;
@@ -352,13 +344,6 @@ int host1x_syncpt_init(struct host1x *host)
for (i = 0; i < host->info->nb_pts; i++) {
syncpt[i].id = i;
syncpt[i].host = host;
-
- /*
- * Unassign syncpt from channels for purposes of Tegra186
- * syncpoint protection. This prevents any channel from
- * accessing it until it is reassigned.
- */
- host1x_hw_syncpt_assign_to_channel(host, &syncpt[i], NULL);
}
for (i = 0; i < host->info->nb_bases; i++)
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 2503be0253d3..19fa734a9a79 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -37,11 +37,11 @@ static int amd_sfh_wait_response_v2(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_
{
union cmd_response cmd_resp;
- /* Get response with status within a max of 800 ms timeout */
+ /* Get response with status within a max of 1600 ms timeout */
if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp,
(cmd_resp.response_v2.response == sensor_sts &&
cmd_resp.response_v2.status == 0 && (sid == 0xff ||
- cmd_resp.response_v2.sensor_id == sid)), 500, 800000))
+ cmd_resp.response_v2.sensor_id == sid)), 500, 1600000))
return cmd_resp.response_v2.response;
return SENSOR_DISABLED;
@@ -53,6 +53,7 @@ static void amd_start_sensor_v2(struct amd_mp2_dev *privdata, struct amd_mp2_sen
cmd_base.ul = 0;
cmd_base.cmd_v2.cmd_id = ENABLE_SENSOR;
+ cmd_base.cmd_v2.intr_disable = 1;
cmd_base.cmd_v2.period = info.period;
cmd_base.cmd_v2.sensor_id = info.sensor_idx;
cmd_base.cmd_v2.length = 16;
@@ -70,6 +71,7 @@ static void amd_stop_sensor_v2(struct amd_mp2_dev *privdata, u16 sensor_idx)
cmd_base.ul = 0;
cmd_base.cmd_v2.cmd_id = DISABLE_SENSOR;
+ cmd_base.cmd_v2.intr_disable = 1;
cmd_base.cmd_v2.period = 0;
cmd_base.cmd_v2.sensor_id = sensor_idx;
cmd_base.cmd_v2.length = 16;
@@ -83,12 +85,51 @@ static void amd_stop_all_sensor_v2(struct amd_mp2_dev *privdata)
union sfh_cmd_base cmd_base;
cmd_base.cmd_v2.cmd_id = STOP_ALL_SENSORS;
+ cmd_base.cmd_v2.intr_disable = 1;
cmd_base.cmd_v2.period = 0;
cmd_base.cmd_v2.sensor_id = 0;
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
}
+static void amd_sfh_clear_intr_v2(struct amd_mp2_dev *privdata)
+{
+ if (readl(privdata->mmio + AMD_P2C_MSG(4))) {
+ writel(0, privdata->mmio + AMD_P2C_MSG(4));
+ writel(0xf, privdata->mmio + AMD_P2C_MSG(5));
+ }
+}
+
+static void amd_sfh_clear_intr(struct amd_mp2_dev *privdata)
+{
+ if (privdata->mp2_ops->clear_intr)
+ privdata->mp2_ops->clear_intr(privdata);
+}
+
+static irqreturn_t amd_sfh_irq_handler(int irq, void *data)
+{
+ amd_sfh_clear_intr(data);
+
+ return IRQ_HANDLED;
+}
+
+static int amd_sfh_irq_init_v2(struct amd_mp2_dev *privdata)
+{
+ int rc;
+
+ pci_intx(privdata->pdev, true);
+
+ rc = devm_request_irq(&privdata->pdev->dev, privdata->pdev->irq,
+ amd_sfh_irq_handler, 0, DRIVER_NAME, privdata);
+ if (rc) {
+ dev_err(&privdata->pdev->dev, "failed to request irq %d err=%d\n",
+ privdata->pdev->irq, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
{
union sfh_cmd_param cmd_param;
@@ -193,6 +234,8 @@ static void amd_mp2_pci_remove(void *privdata)
struct amd_mp2_dev *mp2 = privdata;
amd_sfh_hid_client_deinit(privdata);
mp2->mp2_ops->stop_all(mp2);
+ pci_intx(mp2->pdev, false);
+ amd_sfh_clear_intr(mp2);
}
static const struct amd_mp2_ops amd_sfh_ops_v2 = {
@@ -200,6 +243,8 @@ static const struct amd_mp2_ops amd_sfh_ops_v2 = {
.stop = amd_stop_sensor_v2,
.stop_all = amd_stop_all_sensor_v2,
.response = amd_sfh_wait_response_v2,
+ .clear_intr = amd_sfh_clear_intr_v2,
+ .init_intr = amd_sfh_irq_init_v2,
};
static const struct amd_mp2_ops amd_sfh_ops = {
@@ -225,6 +270,14 @@ static void mp2_select_ops(struct amd_mp2_dev *privdata)
}
}
+static int amd_sfh_irq_init(struct amd_mp2_dev *privdata)
+{
+ if (privdata->mp2_ops->init_intr)
+ return privdata->mp2_ops->init_intr(privdata);
+
+ return 0;
+}
+
static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct amd_mp2_dev *privdata;
@@ -261,9 +314,20 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
mp2_select_ops(privdata);
+ rc = amd_sfh_irq_init(privdata);
+ if (rc) {
+ dev_err(&pdev->dev, "amd_sfh_irq_init failed\n");
+ return rc;
+ }
+
rc = amd_sfh_hid_client_init(privdata);
- if (rc)
+ if (rc) {
+ amd_sfh_clear_intr(privdata);
+ dev_err(&pdev->dev, "amd_sfh_hid_client_init failed\n");
return rc;
+ }
+
+ amd_sfh_clear_intr(privdata);
return devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
}
@@ -290,6 +354,9 @@ static int __maybe_unused amd_mp2_pci_resume(struct device *dev)
}
}
+ schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
+ amd_sfh_clear_intr(mp2);
+
return 0;
}
@@ -312,6 +379,9 @@ static int __maybe_unused amd_mp2_pci_suspend(struct device *dev)
}
}
+ cancel_delayed_work_sync(&cl_data->work_buffer);
+ amd_sfh_clear_intr(mp2);
+
return 0;
}
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
index ae30e059f847..97b99861fae2 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
@@ -49,7 +49,7 @@ union sfh_cmd_base {
} s;
struct {
u32 cmd_id : 4;
- u32 intr_enable : 1;
+ u32 intr_disable : 1;
u32 rsvd1 : 3;
u32 length : 7;
u32 mem_type : 1;
@@ -141,5 +141,7 @@ struct amd_mp2_ops {
void (*stop)(struct amd_mp2_dev *privdata, u16 sensor_idx);
void (*stop_all)(struct amd_mp2_dev *privdata);
int (*response)(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts);
+ void (*clear_intr)(struct amd_mp2_dev *privdata);
+ int (*init_intr)(struct amd_mp2_dev *privdata);
};
#endif
diff --git a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
index be41f83b0289..76095bd53c65 100644
--- a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
+++ b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
@@ -27,6 +27,7 @@
#define HID_USAGE_SENSOR_STATE_READY_ENUM 0x02
#define HID_USAGE_SENSOR_STATE_INITIALIZING_ENUM 0x05
#define HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM 0x04
+#define ILLUMINANCE_MASK GENMASK(14, 0)
int get_report_descriptor(int sensor_idx, u8 *rep_desc)
{
@@ -246,7 +247,8 @@ u8 get_input_report(u8 current_index, int sensor_idx, int report_id, struct amd_
get_common_inputs(&als_input.common_property, report_id);
/* For ALS ,V2 Platforms uses C2P_MSG5 register instead of DRAM access method */
if (supported_input == V2_STATUS)
- als_input.illuminance_value = (int)readl(privdata->mmio + AMD_C2P_MSG(5));
+ als_input.illuminance_value =
+ readl(privdata->mmio + AMD_C2P_MSG(5)) & ILLUMINANCE_MASK;
else
als_input.illuminance_value =
(int)sensor_virt_addr[0] / AMD_SFH_FW_MULTIPLIER;
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 24802a4a636e..7dc89dc6b0f0 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -691,49 +691,49 @@ static const struct hid_device_id apple_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 26c31d759914..81e7e404a5fc 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -860,7 +860,9 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_F22] = "F22", [KEY_F23] = "F23",
[KEY_F24] = "F24", [KEY_PLAYCD] = "PlayCD",
[KEY_PAUSECD] = "PauseCD", [KEY_PROG3] = "Prog3",
- [KEY_PROG4] = "Prog4", [KEY_SUSPEND] = "Suspend",
+ [KEY_PROG4] = "Prog4",
+ [KEY_ALL_APPLICATIONS] = "AllApplications",
+ [KEY_SUSPEND] = "Suspend",
[KEY_CLOSE] = "Close", [KEY_PLAY] = "Play",
[KEY_FASTFORWARD] = "FastForward", [KEY_BASSBOOST] = "BassBoost",
[KEY_PRINT] = "Print", [KEY_HP] = "HP",
@@ -969,6 +971,7 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_ASSISTANT] = "Assistant",
[KEY_KBD_LAYOUT_NEXT] = "KbdLayoutNext",
[KEY_EMOJI_PICKER] = "EmojiPicker",
+ [KEY_DICTATE] = "Dictate",
[KEY_BRIGHTNESS_MIN] = "BrightnessMin",
[KEY_BRIGHTNESS_MAX] = "BrightnessMax",
[KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
index 8e960d7b233b..9b42b0cdeef0 100644
--- a/drivers/hid/hid-elo.c
+++ b/drivers/hid/hid-elo.c
@@ -262,6 +262,7 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
return 0;
err_free:
+ usb_put_dev(udev);
kfree(priv);
return ret;
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 85975031389b..78bd3ddda442 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -1370,6 +1370,7 @@
#define USB_VENDOR_ID_UGTIZER 0x2179
#define USB_DEVICE_ID_UGTIZER_TABLET_GP0610 0x0053
#define USB_DEVICE_ID_UGTIZER_TABLET_GT5040 0x0077
+#define USB_DEVICE_ID_UGTIZER_TABLET_WP5540 0x0004
#define USB_VENDOR_ID_VIEWSONIC 0x0543
#define USB_DEVICE_ID_VIEWSONIC_PD1011 0xe621
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 112901d2d8d2..56ec27398a00 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -992,6 +992,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break;
case 0x0cf: map_key_clear(KEY_VOICECOMMAND); break;
+ case 0x0d8: map_key_clear(KEY_DICTATE); break;
case 0x0d9: map_key_clear(KEY_EMOJI_PICKER); break;
case 0x0e0: map_abs_clear(ABS_VOLUME); break;
@@ -1083,6 +1084,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x29d: map_key_clear(KEY_KBD_LAYOUT_NEXT); break;
+ case 0x2a2: map_key_clear(KEY_ALL_APPLICATIONS); break;
+
case 0x2c7: map_key_clear(KEY_KBDINPUTASSIST_PREV); break;
case 0x2c8: map_key_clear(KEY_KBDINPUTASSIST_NEXT); break;
case 0x2c9: map_key_clear(KEY_KBDINPUTASSIST_PREVGROUP); break;
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 9af1dc8ae3a2..c066ba901867 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -187,6 +187,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_WP5540), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
index b4dad66fa954..ec6c73f75ffe 100644
--- a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
+++ b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
@@ -27,7 +27,6 @@ struct i2c_hid_of_goodix {
struct regulator *vdd;
struct notifier_block nb;
- struct mutex regulator_mutex;
struct gpio_desc *reset_gpio;
const struct goodix_i2c_hid_timing_data *timings;
};
@@ -67,8 +66,6 @@ static int ihid_goodix_vdd_notify(struct notifier_block *nb,
container_of(nb, struct i2c_hid_of_goodix, nb);
int ret = NOTIFY_OK;
- mutex_lock(&ihid_goodix->regulator_mutex);
-
switch (event) {
case REGULATOR_EVENT_PRE_DISABLE:
gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1);
@@ -87,8 +84,6 @@ static int ihid_goodix_vdd_notify(struct notifier_block *nb,
break;
}
- mutex_unlock(&ihid_goodix->regulator_mutex);
-
return ret;
}
@@ -102,8 +97,6 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client,
if (!ihid_goodix)
return -ENOMEM;
- mutex_init(&ihid_goodix->regulator_mutex);
-
ihid_goodix->ops.power_up = goodix_i2c_hid_power_up;
ihid_goodix->ops.power_down = goodix_i2c_hid_power_down;
@@ -130,25 +123,28 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client,
* long. Holding the controller in reset apparently draws extra
* power.
*/
- mutex_lock(&ihid_goodix->regulator_mutex);
ihid_goodix->nb.notifier_call = ihid_goodix_vdd_notify;
ret = devm_regulator_register_notifier(ihid_goodix->vdd, &ihid_goodix->nb);
- if (ret) {
- mutex_unlock(&ihid_goodix->regulator_mutex);
+ if (ret)
return dev_err_probe(&client->dev, ret,
"regulator notifier request failed\n");
- }
/*
* If someone else is holding the regulator on (or the regulator is
* an always-on one) we might never be told to deassert reset. Do it
- * now. Here we'll assume that someone else might have _just
- * barely_ turned the regulator on so we'll do the full
- * "post_power_delay" just in case.
+ * now... and temporarily bump the regulator reference count just to
+ * make sure it is impossible for this to race with our own notifier!
+ * We also assume that someone else might have _just barely_ turned
+ * the regulator on so we'll do the full "post_power_delay" just in
+ * case.
*/
- if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd))
+ if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd)) {
+ ret = regulator_enable(ihid_goodix->vdd);
+ if (ret)
+ return ret;
goodix_i2c_hid_deassert_reset(ihid_goodix, true);
- mutex_unlock(&ihid_goodix->regulator_mutex);
+ regulator_disable(ihid_goodix->vdd);
+ }
return i2c_hid_core_probe(client, &ihid_goodix->ops, 0x0001, 0);
}
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index eb2833d2b5d0..832885198643 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -13,7 +13,7 @@
#include "hv_utils_transport.h"
static DEFINE_SPINLOCK(hvt_list_lock);
-static struct list_head hvt_list = LIST_HEAD_INIT(hvt_list);
+static LIST_HEAD(hvt_list);
static void hvt_reset(struct hvutil_transport *hvt)
{
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 17bf55fe3169..12a2b37e87f3 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -2028,8 +2028,10 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
kobj->kset = dev->channels_kset;
ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
"%u", relid);
- if (ret)
+ if (ret) {
+ kobject_put(kobj);
return ret;
+ }
ret = sysfs_create_group(kobj, &vmbus_chan_group);
@@ -2038,6 +2040,7 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
* The calling functions' error handling paths will cleanup the
* empty channel directory.
*/
+ kobject_put(kobj);
dev_err(device, "Unable to set up channel sysfs files\n");
return ret;
}
@@ -2079,7 +2082,6 @@ struct hv_device *vmbus_device_create(const guid_t *type,
return child_device_obj;
}
-static u64 vmbus_dma_mask = DMA_BIT_MASK(64);
/*
* vmbus_device_register - Register the child device
*/
@@ -2120,8 +2122,9 @@ int vmbus_device_register(struct hv_device *child_device_obj)
}
hv_debug_add_dev_dir(child_device_obj);
- child_device_obj->device.dma_mask = &vmbus_dma_mask;
child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
+ child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
+ dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
return 0;
err_kset_unregister:
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 3501a3ead4ba..3ae961986fc3 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -214,12 +214,14 @@ static int hwmon_thermal_add_sensor(struct device *dev, int index)
tzd = devm_thermal_zone_of_sensor_register(dev, index, tdata,
&hwmon_thermal_ops);
- /*
- * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV,
- * so ignore that error but forward any other error.
- */
- if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV))
- return PTR_ERR(tzd);
+ if (IS_ERR(tzd)) {
+ if (PTR_ERR(tzd) != -ENODEV)
+ return PTR_ERR(tzd);
+ dev_info(dev, "temp%d_input not attached to any thermal zone\n",
+ index + 1);
+ devm_kfree(dev, tdata);
+ return 0;
+ }
err = devm_add_action(dev, hwmon_thermal_remove_sensor, &tdata->node);
if (err)
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 414204f5704c..9c9e9f4ccb9e 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -59,7 +59,7 @@ static const struct platform_device_id ntc_thermistor_id[] = {
[NTC_NCP15XH103] = { "ncp15xh103", TYPE_NCPXXXH103 },
[NTC_NCP18WB473] = { "ncp18wb473", TYPE_NCPXXWB473 },
[NTC_NCP21WB473] = { "ncp21wb473", TYPE_NCPXXWB473 },
- [NTC_SSG1404001221] = { "ssg1404-001221", TYPE_NCPXXWB473 },
+ [NTC_SSG1404001221] = { "ssg1404_001221", TYPE_NCPXXWB473 },
[NTC_LAST] = { },
};
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 776ee2237be2..ac2fbee1ba9c 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -911,6 +911,11 @@ static int pmbus_get_boolean(struct i2c_client *client, struct pmbus_boolean *b,
pmbus_update_sensor_data(client, s2);
regval = status & mask;
+ if (regval) {
+ ret = pmbus_write_byte_data(client, page, reg, regval);
+ if (ret)
+ goto unlock;
+ }
if (s1 && s2) {
s64 v1, v2;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 42da31c1ab70..8a6c6ee28556 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -488,7 +488,7 @@ config I2C_BRCMSTB
config I2C_CADENCE
tristate "Cadence I2C Controller"
- depends on ARCH_ZYNQ || ARM64 || XTENSA
+ depends on ARCH_ZYNQ || ARM64 || XTENSA || COMPILE_TEST
help
Say yes here to select Cadence I2C Host Controller. This controller is
e.g. used by Xilinx Zynq.
@@ -680,7 +680,7 @@ config I2C_IMG
config I2C_IMX
tristate "IMX I2C interface"
- depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE
+ depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE || COMPILE_TEST
select I2C_SLAVE
help
Say Y here if you want to use the IIC bus controller on
@@ -935,7 +935,7 @@ config I2C_QCOM_GENI
config I2C_QUP
tristate "Qualcomm QUP based I2C controller"
- depends on ARCH_QCOM
+ depends on ARCH_QCOM || COMPILE_TEST
help
If you say yes to this option, support will be included for the
built-in I2C interface on the Qualcomm SoCs.
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index dfc534065595..5149454eef4a 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -23,6 +23,11 @@
#define BCM2835_I2C_FIFO 0x10
#define BCM2835_I2C_DIV 0x14
#define BCM2835_I2C_DEL 0x18
+/*
+ * 16-bit field for the number of SCL cycles to wait after rising SCL
+ * before deciding the slave is not responding. 0 disables the
+ * timeout detection.
+ */
#define BCM2835_I2C_CLKT 0x1c
#define BCM2835_I2C_C_READ BIT(0)
@@ -474,6 +479,12 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
adap->dev.of_node = pdev->dev.of_node;
adap->quirks = of_device_get_match_data(&pdev->dev);
+ /*
+ * Disable the hardware clock stretching timeout. SMBUS
+ * specifies a limit for how long the device can stretch the
+ * clock, but core I2C doesn't.
+ */
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_CLKT, 0);
bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0);
ret = i2c_add_adapter(adap);
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 490ee3962645..b00f35c0b066 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -673,7 +673,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
/* set the data in/out register size for compatible SoCs */
if (of_device_is_compatible(dev->device->of_node,
- "brcmstb,brcmper-i2c"))
+ "brcm,brcmper-i2c"))
dev->data_regsz = sizeof(u8);
else
dev->data_regsz = sizeof(u32);
diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
index c1de8eb66169..cf54f1cb4c57 100644
--- a/drivers/i2c/busses/i2c-qcom-cci.c
+++ b/drivers/i2c/busses/i2c-qcom-cci.c
@@ -558,7 +558,7 @@ static int cci_probe(struct platform_device *pdev)
cci->master[idx].adap.quirks = &cci->data->quirks;
cci->master[idx].adap.algo = &cci_algo;
cci->master[idx].adap.dev.parent = dev;
- cci->master[idx].adap.dev.of_node = child;
+ cci->master[idx].adap.dev.of_node = of_node_get(child);
cci->master[idx].master = idx;
cci->master[idx].cci = cci;
@@ -643,8 +643,10 @@ static int cci_probe(struct platform_device *pdev)
continue;
ret = i2c_add_adapter(&cci->master[i].adap);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(cci->master[i].adap.dev.of_node);
goto error_i2c;
+ }
}
pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
@@ -655,9 +657,11 @@ static int cci_probe(struct platform_device *pdev)
return 0;
error_i2c:
- for (; i >= 0; i--) {
- if (cci->master[i].cci)
+ for (--i ; i >= 0; i--) {
+ if (cci->master[i].cci) {
i2c_del_adapter(&cci->master[i].adap);
+ of_node_put(cci->master[i].adap.dev.of_node);
+ }
}
error:
disable_irq(cci->irq);
@@ -673,8 +677,10 @@ static int cci_remove(struct platform_device *pdev)
int i;
for (i = 0; i < cci->data->num_masters; i++) {
- if (cci->master[i].cci)
+ if (cci->master[i].cci) {
i2c_del_adapter(&cci->master[i].adap);
+ of_node_put(cci->master[i].adap.dev.of_node);
+ }
cci_halt(cci, i);
}
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index e6081dd0a880..d11f668016a6 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -1783,11 +1783,14 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(dev, "Unable to register iio device\n");
- goto err_trigger_unregister;
+ goto err_pm_cleanup;
}
return 0;
+err_pm_cleanup:
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_disable(dev);
err_trigger_unregister:
bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1);
err_buffer_cleanup:
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index 32989d91b982..f7fd9e046588 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -173,12 +173,20 @@ struct fxls8962af_data {
u16 upper_thres;
};
-const struct regmap_config fxls8962af_regmap_conf = {
+const struct regmap_config fxls8962af_i2c_regmap_conf = {
.reg_bits = 8,
.val_bits = 8,
.max_register = FXLS8962AF_MAX_REG,
};
-EXPORT_SYMBOL_GPL(fxls8962af_regmap_conf);
+EXPORT_SYMBOL_GPL(fxls8962af_i2c_regmap_conf);
+
+const struct regmap_config fxls8962af_spi_regmap_conf = {
+ .reg_bits = 8,
+ .pad_bits = 8,
+ .val_bits = 8,
+ .max_register = FXLS8962AF_MAX_REG,
+};
+EXPORT_SYMBOL_GPL(fxls8962af_spi_regmap_conf);
enum {
fxls8962af_idx_x,
diff --git a/drivers/iio/accel/fxls8962af-i2c.c b/drivers/iio/accel/fxls8962af-i2c.c
index cfb004b20455..6bde9891effb 100644
--- a/drivers/iio/accel/fxls8962af-i2c.c
+++ b/drivers/iio/accel/fxls8962af-i2c.c
@@ -18,7 +18,7 @@ static int fxls8962af_probe(struct i2c_client *client)
{
struct regmap *regmap;
- regmap = devm_regmap_init_i2c(client, &fxls8962af_regmap_conf);
+ regmap = devm_regmap_init_i2c(client, &fxls8962af_i2c_regmap_conf);
if (IS_ERR(regmap)) {
dev_err(&client->dev, "Failed to initialize i2c regmap\n");
return PTR_ERR(regmap);
diff --git a/drivers/iio/accel/fxls8962af-spi.c b/drivers/iio/accel/fxls8962af-spi.c
index 57108d3d480b..6f4dff3238d3 100644
--- a/drivers/iio/accel/fxls8962af-spi.c
+++ b/drivers/iio/accel/fxls8962af-spi.c
@@ -18,7 +18,7 @@ static int fxls8962af_probe(struct spi_device *spi)
{
struct regmap *regmap;
- regmap = devm_regmap_init_spi(spi, &fxls8962af_regmap_conf);
+ regmap = devm_regmap_init_spi(spi, &fxls8962af_spi_regmap_conf);
if (IS_ERR(regmap)) {
dev_err(&spi->dev, "Failed to initialize spi regmap\n");
return PTR_ERR(regmap);
diff --git a/drivers/iio/accel/fxls8962af.h b/drivers/iio/accel/fxls8962af.h
index b67572c3ef06..9cbe98c3ba9a 100644
--- a/drivers/iio/accel/fxls8962af.h
+++ b/drivers/iio/accel/fxls8962af.h
@@ -17,6 +17,7 @@ int fxls8962af_core_probe(struct device *dev, struct regmap *regmap, int irq);
int fxls8962af_core_remove(struct device *dev);
extern const struct dev_pm_ops fxls8962af_pm_ops;
-extern const struct regmap_config fxls8962af_regmap_conf;
+extern const struct regmap_config fxls8962af_i2c_regmap_conf;
+extern const struct regmap_config fxls8962af_spi_regmap_conf;
#endif /* _FXLS8962AF_H_ */
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 0fe570316848..ac74cdcd2bc8 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -1590,11 +1590,14 @@ static int kxcjk1013_probe(struct i2c_client *client,
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(&client->dev, "unable to register iio device\n");
- goto err_buffer_cleanup;
+ goto err_pm_cleanup;
}
return 0;
+err_pm_cleanup:
+ pm_runtime_dont_use_autosuspend(&client->dev);
+ pm_runtime_disable(&client->dev);
err_buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
err_trigger_unregister:
diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c
index 4c359fb05480..c53a3398b14c 100644
--- a/drivers/iio/accel/mma9551.c
+++ b/drivers/iio/accel/mma9551.c
@@ -495,11 +495,14 @@ static int mma9551_probe(struct i2c_client *client,
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(&client->dev, "unable to register iio device\n");
- goto out_poweroff;
+ goto err_pm_cleanup;
}
return 0;
+err_pm_cleanup:
+ pm_runtime_dont_use_autosuspend(&client->dev);
+ pm_runtime_disable(&client->dev);
out_poweroff:
mma9551_set_device_state(client, false);
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index 0570ab1cc064..5ff6bc70708b 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -1134,12 +1134,15 @@ static int mma9553_probe(struct i2c_client *client,
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(&client->dev, "unable to register iio device\n");
- goto out_poweroff;
+ goto err_pm_cleanup;
}
dev_dbg(&indio_dev->dev, "Registered device %s\n", name);
return 0;
+err_pm_cleanup:
+ pm_runtime_dont_use_autosuspend(&client->dev);
+ pm_runtime_disable(&client->dev);
out_poweroff:
mma9551_set_device_state(client, false);
return ret;
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index bc2cfa5f9592..b400bbe291aa 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -76,7 +76,7 @@
#define AD7124_CONFIG_REF_SEL(x) FIELD_PREP(AD7124_CONFIG_REF_SEL_MSK, x)
#define AD7124_CONFIG_PGA_MSK GENMASK(2, 0)
#define AD7124_CONFIG_PGA(x) FIELD_PREP(AD7124_CONFIG_PGA_MSK, x)
-#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(7, 6)
+#define AD7124_CONFIG_IN_BUFF_MSK GENMASK(6, 5)
#define AD7124_CONFIG_IN_BUFF(x) FIELD_PREP(AD7124_CONFIG_IN_BUFF_MSK, x)
/* AD7124_FILTER_X */
diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
index 42ea8bc7e780..adc5ceaef8c9 100644
--- a/drivers/iio/adc/men_z188_adc.c
+++ b/drivers/iio/adc/men_z188_adc.c
@@ -103,6 +103,7 @@ static int men_z188_probe(struct mcb_device *dev,
struct z188_adc *adc;
struct iio_dev *indio_dev;
struct resource *mem;
+ int ret;
indio_dev = devm_iio_device_alloc(&dev->dev, sizeof(struct z188_adc));
if (!indio_dev)
@@ -128,8 +129,14 @@ static int men_z188_probe(struct mcb_device *dev,
adc->mem = mem;
mcb_set_drvdata(dev, indio_dev);
- return iio_device_register(indio_dev);
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto err_unmap;
+
+ return 0;
+err_unmap:
+ iounmap(adc->base);
err:
mcb_release_mem(mem);
return -ENXIO;
diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c
index d84ae6b008c1..e8fc4d01f30b 100644
--- a/drivers/iio/adc/ti-tsc2046.c
+++ b/drivers/iio/adc/ti-tsc2046.c
@@ -388,7 +388,7 @@ static int tsc2046_adc_update_scan_mode(struct iio_dev *indio_dev,
mutex_lock(&priv->slock);
size = 0;
- for_each_set_bit(ch_idx, active_scan_mask, indio_dev->num_channels) {
+ for_each_set_bit(ch_idx, active_scan_mask, ARRAY_SIZE(priv->l)) {
size += tsc2046_adc_group_set_layout(priv, group, ch_idx);
tsc2046_adc_group_set_cmd(priv, group, ch_idx);
group++;
@@ -548,7 +548,7 @@ static int tsc2046_adc_setup_spi_msg(struct tsc2046_adc_priv *priv)
* enabled.
*/
size = 0;
- for (ch_idx = 0; ch_idx < priv->dcfg->num_channels; ch_idx++)
+ for (ch_idx = 0; ch_idx < ARRAY_SIZE(priv->l); ch_idx++)
size += tsc2046_adc_group_set_layout(priv, ch_idx, ch_idx);
priv->tx = devm_kzalloc(&priv->spi->dev, size, GFP_KERNEL);
diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c
index 5271073bb74e..acd230a6af35 100644
--- a/drivers/iio/addac/ad74413r.c
+++ b/drivers/iio/addac/ad74413r.c
@@ -134,7 +134,6 @@ struct ad74413r_state {
#define AD74413R_CH_EN_MASK(x) BIT(x)
#define AD74413R_REG_DIN_COMP_OUT 0x25
-#define AD74413R_DIN_COMP_OUT_SHIFT_X(x) x
#define AD74413R_REG_ADC_RESULT_X(x) (0x26 + (x))
#define AD74413R_ADC_RESULT_MAX GENMASK(15, 0)
@@ -288,7 +287,7 @@ static void ad74413r_gpio_set_multiple(struct gpio_chip *chip,
unsigned int offset = 0;
int ret;
- for_each_set_bit_from(offset, mask, AD74413R_CHANNEL_MAX) {
+ for_each_set_bit_from(offset, mask, chip->ngpio) {
unsigned int real_offset = st->gpo_gpio_offsets[offset];
ret = ad74413r_set_gpo_config(st, real_offset,
@@ -316,7 +315,7 @@ static int ad74413r_gpio_get(struct gpio_chip *chip, unsigned int offset)
if (ret)
return ret;
- status &= AD74413R_DIN_COMP_OUT_SHIFT_X(real_offset);
+ status &= BIT(real_offset);
return status ? 1 : 0;
}
@@ -334,11 +333,10 @@ static int ad74413r_gpio_get_multiple(struct gpio_chip *chip,
if (ret)
return ret;
- for_each_set_bit_from(offset, mask, AD74413R_CHANNEL_MAX) {
+ for_each_set_bit_from(offset, mask, chip->ngpio) {
unsigned int real_offset = st->comp_gpio_offsets[offset];
- if (val & BIT(real_offset))
- *bits |= offset;
+ __assign_bit(offset, bits, val & BIT(real_offset));
}
return ret;
@@ -840,7 +838,7 @@ static int ad74413r_update_scan_mode(struct iio_dev *indio_dev,
{
struct ad74413r_state *st = iio_priv(indio_dev);
struct spi_transfer *xfer = st->adc_samples_xfer;
- u8 *rx_buf = &st->adc_samples_buf.rx_buf[-1 * AD74413R_FRAME_SIZE];
+ u8 *rx_buf = st->adc_samples_buf.rx_buf;
u8 *tx_buf = st->adc_samples_tx_buf;
unsigned int channel;
int ret = -EINVAL;
@@ -894,9 +892,10 @@ static int ad74413r_update_scan_mode(struct iio_dev *indio_dev,
spi_message_add_tail(xfer, &st->adc_samples_msg);
- xfer++;
tx_buf += AD74413R_FRAME_SIZE;
- rx_buf += AD74413R_FRAME_SIZE;
+ if (xfer != st->adc_samples_xfer)
+ rx_buf += AD74413R_FRAME_SIZE;
+ xfer++;
}
xfer->rx_buf = rx_buf;
diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c
index 6cdeb50143af..3f3c478e9baa 100644
--- a/drivers/iio/frequency/admv1013.c
+++ b/drivers/iio/frequency/admv1013.c
@@ -348,7 +348,7 @@ static int admv1013_update_mixer_vgate(struct admv1013_state *st)
vcm = regulator_get_voltage(st->reg);
- if (vcm >= 0 && vcm < 1800000)
+ if (vcm < 1800000)
mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100;
else if (vcm > 1800000 && vcm < 2600000)
mixer_vgate = (2375 * vcm / 1000000 + 125) / 100;
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 17b939a367ad..81a6d09788bd 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -1188,11 +1188,14 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq,
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(dev, "unable to register iio device\n");
- goto err_buffer_cleanup;
+ goto err_pm_cleanup;
}
return 0;
+err_pm_cleanup:
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_disable(dev);
err_buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
err_trigger_unregister:
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index ed129321a14d..f9b4540db1f4 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -1403,6 +1403,7 @@ static int adis16480_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
const struct adis_data *adis16480_data;
+ irq_handler_t trigger_handler = NULL;
struct iio_dev *indio_dev;
struct adis16480 *st;
int ret;
@@ -1474,8 +1475,12 @@ static int adis16480_probe(struct spi_device *spi)
st->clk_freq = st->chip_info->int_clk;
}
+ /* Only use our trigger handler if burst mode is supported */
+ if (adis16480_data->burst_len)
+ trigger_handler = adis16480_trigger_handler;
+
ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev,
- adis16480_trigger_handler);
+ trigger_handler);
if (ret)
return ret;
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index 1dabfd615dab..f89724481df9 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -1385,7 +1385,7 @@ static int kmx61_probe(struct i2c_client *client,
ret = iio_device_register(data->acc_indio_dev);
if (ret < 0) {
dev_err(&client->dev, "Failed to register acc iio device\n");
- goto err_buffer_cleanup_mag;
+ goto err_pm_cleanup;
}
ret = iio_device_register(data->mag_indio_dev);
@@ -1398,6 +1398,9 @@ static int kmx61_probe(struct i2c_client *client,
err_iio_unregister_acc:
iio_device_unregister(data->acc_indio_dev);
+err_pm_cleanup:
+ pm_runtime_dont_use_autosuspend(&client->dev);
+ pm_runtime_disable(&client->dev);
err_buffer_cleanup_mag:
if (client->irq > 0)
iio_triggered_buffer_cleanup(data->mag_indio_dev);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index 727b4b6ac696..93f0c6bce502 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -1374,8 +1374,12 @@ static int st_lsm6dsx_read_oneshot(struct st_lsm6dsx_sensor *sensor,
if (err < 0)
return err;
+ /*
+ * we need to wait for sensor settling time before
+ * reading data in order to avoid corrupted samples
+ */
delay = 1000000000 / sensor->odr;
- usleep_range(delay, 2 * delay);
+ usleep_range(3 * delay, 4 * delay);
err = st_lsm6dsx_read_locked(hw, addr, &data, sizeof(data));
if (err < 0)
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 94eb9f6cf128..208b5193c621 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -1569,9 +1569,17 @@ static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg
}
if (copy_to_user(ival, &fd, sizeof(fd))) {
- put_unused_fd(fd);
- ret = -EFAULT;
- goto error_free_ib;
+ /*
+ * "Leak" the fd, as there's not much we can do about this
+ * anyway. 'fd' might have been closed already, as
+ * anon_inode_getfd() called fd_install() on it, which made
+ * it reachable by userland.
+ *
+ * Instead of allowing a malicious user to play tricks with
+ * us, rely on the process exit path to do any necessary
+ * cleanup, as in releasing the file, if still needed.
+ */
+ return -EFAULT;
}
return 0;
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index f96f53175349..3d4d21f979fa 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -962,13 +962,14 @@ int bmc150_magn_probe(struct device *dev, struct regmap *regmap,
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(dev, "unable to register iio device\n");
- goto err_disable_runtime_pm;
+ goto err_pm_cleanup;
}
dev_dbg(dev, "Registered device %s\n", name);
return 0;
-err_disable_runtime_pm:
+err_pm_cleanup:
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
err_buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index c903b74f46a4..35f0d5e7533d 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3322,7 +3322,7 @@ static int cm_lap_handler(struct cm_work *work)
ret = cm_init_av_by_path(param->alternate_path, NULL, &alt_av);
if (ret) {
rdma_destroy_ah_attr(&ah_attr);
- return -EINVAL;
+ goto deref;
}
spin_lock_irq(&cm_id_priv->lock);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 27a00ce2e101..50c53409ceb6 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -67,8 +67,8 @@ static const char * const cma_events[] = {
[RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit",
};
-static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr,
- union ib_gid *mgid);
+static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
+ enum ib_gid_type gid_type);
const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
{
@@ -1846,17 +1846,19 @@ static void destroy_mc(struct rdma_id_private *id_priv,
if (dev_addr->bound_dev_if)
ndev = dev_get_by_index(dev_addr->net,
dev_addr->bound_dev_if);
- if (ndev) {
+ if (ndev && !send_only) {
+ enum ib_gid_type gid_type;
union ib_gid mgid;
- cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
- &mgid);
-
- if (!send_only)
- cma_igmp_send(ndev, &mgid, false);
-
- dev_put(ndev);
+ gid_type = id_priv->cma_dev->default_gid_type
+ [id_priv->id.port_num -
+ rdma_start_port(
+ id_priv->cma_dev->device)];
+ cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid,
+ gid_type);
+ cma_igmp_send(ndev, &mgid, false);
}
+ dev_put(ndev);
cancel_work_sync(&mc->iboe_join.work);
}
@@ -3368,22 +3370,30 @@ err:
static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
const struct sockaddr *dst_addr)
{
- if (!src_addr || !src_addr->sa_family) {
- src_addr = (struct sockaddr *) &id->route.addr.src_addr;
- src_addr->sa_family = dst_addr->sa_family;
- if (IS_ENABLED(CONFIG_IPV6) &&
- dst_addr->sa_family == AF_INET6) {
- struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
- struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
- src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
- if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
- id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id;
- } else if (dst_addr->sa_family == AF_IB) {
- ((struct sockaddr_ib *) src_addr)->sib_pkey =
- ((struct sockaddr_ib *) dst_addr)->sib_pkey;
- }
- }
- return rdma_bind_addr(id, src_addr);
+ struct sockaddr_storage zero_sock = {};
+
+ if (src_addr && src_addr->sa_family)
+ return rdma_bind_addr(id, src_addr);
+
+ /*
+ * When the src_addr is not specified, automatically supply an any addr
+ */
+ zero_sock.ss_family = dst_addr->sa_family;
+ if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *src_addr6 =
+ (struct sockaddr_in6 *)&zero_sock;
+ struct sockaddr_in6 *dst_addr6 =
+ (struct sockaddr_in6 *)dst_addr;
+
+ src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
+ if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
+ id->route.addr.dev_addr.bound_dev_if =
+ dst_addr6->sin6_scope_id;
+ } else if (dst_addr->sa_family == AF_IB) {
+ ((struct sockaddr_ib *)&zero_sock)->sib_pkey =
+ ((struct sockaddr_ib *)dst_addr)->sib_pkey;
+ }
+ return rdma_bind_addr(id, (struct sockaddr *)&zero_sock);
}
/*
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 2b72c4fa9550..9d6ac9dff39a 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -95,6 +95,7 @@ struct ucma_context {
u64 uid;
struct list_head list;
+ struct list_head mc_list;
struct work_struct close_work;
};
@@ -105,6 +106,7 @@ struct ucma_multicast {
u64 uid;
u8 join_state;
+ struct list_head list;
struct sockaddr_storage addr;
};
@@ -198,6 +200,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
INIT_WORK(&ctx->close_work, ucma_close_id);
init_completion(&ctx->comp);
+ INIT_LIST_HEAD(&ctx->mc_list);
/* So list_del() will work if we don't do ucma_finish_ctx() */
INIT_LIST_HEAD(&ctx->list);
ctx->file = file;
@@ -484,19 +487,19 @@ err1:
static void ucma_cleanup_multicast(struct ucma_context *ctx)
{
- struct ucma_multicast *mc;
- unsigned long index;
+ struct ucma_multicast *mc, *tmp;
- xa_for_each(&multicast_table, index, mc) {
- if (mc->ctx != ctx)
- continue;
+ xa_lock(&multicast_table);
+ list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
+ list_del(&mc->list);
/*
* At this point mc->ctx->ref is 0 so the mc cannot leave the
* lock on the reader and this is enough serialization
*/
- xa_erase(&multicast_table, index);
+ __xa_erase(&multicast_table, mc->id);
kfree(mc);
}
+ xa_unlock(&multicast_table);
}
static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
@@ -1469,12 +1472,16 @@ static ssize_t ucma_process_join(struct ucma_file *file,
mc->uid = cmd->uid;
memcpy(&mc->addr, addr, cmd->addr_size);
- if (xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b,
+ xa_lock(&multicast_table);
+ if (__xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b,
GFP_KERNEL)) {
ret = -ENOMEM;
goto err_free_mc;
}
+ list_add_tail(&mc->list, &ctx->mc_list);
+ xa_unlock(&multicast_table);
+
mutex_lock(&ctx->mutex);
ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
join_state, mc);
@@ -1500,8 +1507,11 @@ err_leave_multicast:
mutex_unlock(&ctx->mutex);
ucma_cleanup_mc_events(mc);
err_xa_erase:
- xa_erase(&multicast_table, mc->id);
+ xa_lock(&multicast_table);
+ list_del(&mc->list);
+ __xa_erase(&multicast_table, mc->id);
err_free_mc:
+ xa_unlock(&multicast_table);
kfree(mc);
err_put_ctx:
ucma_put_ctx(ctx);
@@ -1569,15 +1579,17 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
mc = ERR_PTR(-EINVAL);
else if (!refcount_inc_not_zero(&mc->ctx->ref))
mc = ERR_PTR(-ENXIO);
- else
- __xa_erase(&multicast_table, mc->id);
- xa_unlock(&multicast_table);
if (IS_ERR(mc)) {
+ xa_unlock(&multicast_table);
ret = PTR_ERR(mc);
goto out;
}
+ list_del(&mc->list);
+ __xa_erase(&multicast_table, mc->id);
+ xa_unlock(&multicast_table);
+
mutex_lock(&mc->ctx->mutex);
rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
mutex_unlock(&mc->ctx->mutex);
diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h
index 909122934246..aec60d4888eb 100644
--- a/drivers/infiniband/hw/hfi1/ipoib.h
+++ b/drivers/infiniband/hw/hfi1/ipoib.h
@@ -55,7 +55,7 @@ union hfi1_ipoib_flow {
*/
struct ipoib_txreq {
struct sdma_txreq txreq;
- struct hfi1_sdma_header sdma_hdr;
+ struct hfi1_sdma_header *sdma_hdr;
int sdma_status;
int complete;
struct hfi1_ipoib_dev_priv *priv;
diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c
index e1a2b02bbd91..5d814afdf7f3 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_main.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_main.c
@@ -22,26 +22,35 @@ static int hfi1_ipoib_dev_init(struct net_device *dev)
int ret;
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ return -ENOMEM;
ret = priv->netdev_ops->ndo_init(dev);
if (ret)
- return ret;
+ goto out_ret;
ret = hfi1_netdev_add_data(priv->dd,
qpn_from_mac(priv->netdev->dev_addr),
dev);
if (ret < 0) {
priv->netdev_ops->ndo_uninit(dev);
- return ret;
+ goto out_ret;
}
return 0;
+out_ret:
+ free_percpu(dev->tstats);
+ dev->tstats = NULL;
+ return ret;
}
static void hfi1_ipoib_dev_uninit(struct net_device *dev)
{
struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ free_percpu(dev->tstats);
+ dev->tstats = NULL;
+
hfi1_netdev_remove_data(priv->dd, qpn_from_mac(priv->netdev->dev_addr));
priv->netdev_ops->ndo_uninit(dev);
@@ -166,12 +175,7 @@ static void hfi1_ipoib_netdev_dtor(struct net_device *dev)
hfi1_ipoib_rxq_deinit(priv->netdev);
free_percpu(dev->tstats);
-}
-
-static void hfi1_ipoib_free_rdma_netdev(struct net_device *dev)
-{
- hfi1_ipoib_netdev_dtor(dev);
- free_netdev(dev);
+ dev->tstats = NULL;
}
static void hfi1_ipoib_set_id(struct net_device *dev, int id)
@@ -211,24 +215,23 @@ static int hfi1_ipoib_setup_rn(struct ib_device *device,
priv->port_num = port_num;
priv->netdev_ops = netdev->netdev_ops;
- netdev->netdev_ops = &hfi1_ipoib_netdev_ops;
-
ib_query_pkey(device, port_num, priv->pkey_index, &priv->pkey);
rc = hfi1_ipoib_txreq_init(priv);
if (rc) {
dd_dev_err(dd, "IPoIB netdev TX init - failed(%d)\n", rc);
- hfi1_ipoib_free_rdma_netdev(netdev);
return rc;
}
rc = hfi1_ipoib_rxq_init(netdev);
if (rc) {
dd_dev_err(dd, "IPoIB netdev RX init - failed(%d)\n", rc);
- hfi1_ipoib_free_rdma_netdev(netdev);
+ hfi1_ipoib_txreq_deinit(priv);
return rc;
}
+ netdev->netdev_ops = &hfi1_ipoib_netdev_ops;
+
netdev->priv_destructor = hfi1_ipoib_netdev_dtor;
netdev->needs_free_netdev = true;
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
index f4010890309f..d6bbdb8fcb50 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -122,7 +122,7 @@ static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
dd_dev_warn(priv->dd,
"%s: Status = 0x%x pbc 0x%llx txq = %d sde = %d\n",
__func__, tx->sdma_status,
- le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx,
+ le64_to_cpu(tx->sdma_hdr->pbc), tx->txq->q_idx,
tx->txq->sde->this_idx);
}
@@ -231,7 +231,7 @@ static int hfi1_ipoib_build_tx_desc(struct ipoib_txreq *tx,
{
struct hfi1_devdata *dd = txp->dd;
struct sdma_txreq *txreq = &tx->txreq;
- struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
+ struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr;
u16 pkt_bytes =
sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2) + tx->skb->len;
int ret;
@@ -256,7 +256,7 @@ static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx,
struct ipoib_txparms *txp)
{
struct hfi1_ipoib_dev_priv *priv = tx->txq->priv;
- struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
+ struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr;
struct sk_buff *skb = tx->skb;
struct hfi1_pportdata *ppd = ppd_from_ibp(txp->ibp);
struct rdma_ah_attr *ah_attr = txp->ah_attr;
@@ -483,7 +483,7 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
if (likely(!ret)) {
tx_ok:
trace_sdma_output_ibhdr(txq->priv->dd,
- &tx->sdma_hdr.hdr,
+ &tx->sdma_hdr->hdr,
ib_is_sc5(txp->flow.sc5));
hfi1_ipoib_check_queue_depth(txq);
return NETDEV_TX_OK;
@@ -547,7 +547,7 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev,
hfi1_ipoib_check_queue_depth(txq);
trace_sdma_output_ibhdr(txq->priv->dd,
- &tx->sdma_hdr.hdr,
+ &tx->sdma_hdr->hdr,
ib_is_sc5(txp->flow.sc5));
if (!netdev_xmit_more())
@@ -683,7 +683,8 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
{
struct net_device *dev = priv->netdev;
u32 tx_ring_size, tx_item_size;
- int i;
+ struct hfi1_ipoib_circ_buf *tx_ring;
+ int i, j;
/*
* Ring holds 1 less than tx_ring_size
@@ -701,7 +702,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
for (i = 0; i < dev->num_tx_queues; i++) {
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+ struct ipoib_txreq *tx;
+ tx_ring = &txq->tx_ring;
iowait_init(&txq->wait,
0,
hfi1_ipoib_flush_txq,
@@ -725,14 +728,19 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
priv->dd->node);
txq->tx_ring.items =
- kcalloc_node(tx_ring_size, tx_item_size,
- GFP_KERNEL, priv->dd->node);
+ kvzalloc_node(array_size(tx_ring_size, tx_item_size),
+ GFP_KERNEL, priv->dd->node);
if (!txq->tx_ring.items)
goto free_txqs;
txq->tx_ring.max_items = tx_ring_size;
- txq->tx_ring.shift = ilog2(tx_ring_size);
+ txq->tx_ring.shift = ilog2(tx_item_size);
txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq);
+ tx_ring = &txq->tx_ring;
+ for (j = 0; j < tx_ring_size; j++)
+ hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr =
+ kzalloc_node(sizeof(*tx->sdma_hdr),
+ GFP_KERNEL, priv->dd->node);
netif_tx_napi_add(dev, &txq->napi,
hfi1_ipoib_poll_tx_ring,
@@ -746,7 +754,10 @@ free_txqs:
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
netif_napi_del(&txq->napi);
- kfree(txq->tx_ring.items);
+ tx_ring = &txq->tx_ring;
+ for (j = 0; j < tx_ring_size; j++)
+ kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr);
+ kvfree(tx_ring->items);
}
kfree(priv->txqs);
@@ -780,17 +791,20 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
{
- int i;
+ int i, j;
for (i = 0; i < priv->netdev->num_tx_queues; i++) {
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+ struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
iowait_cancel_work(&txq->wait);
iowait_sdma_drain(&txq->wait);
hfi1_ipoib_drain_tx_list(txq);
netif_napi_del(&txq->napi);
hfi1_ipoib_drain_tx_ring(txq);
- kfree(txq->tx_ring.items);
+ for (j = 0; j < tx_ring->max_items; j++)
+ kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr);
+ kvfree(tx_ring->items);
}
kfree(priv->txqs);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 1c3d97229988..93b1650eacfa 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -3237,7 +3237,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
ew = kmalloc(sizeof *ew, GFP_ATOMIC);
if (!ew)
- break;
+ return;
INIT_WORK(&ew->work, handle_port_mgmt_change_event);
memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 0a3b28142c05..41c272980f91 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -541,7 +541,7 @@ static struct attribute *port_diagc_attributes[] = {
};
static const struct attribute_group port_diagc_group = {
- .name = "linkcontrol",
+ .name = "diag_counters",
.attrs = port_diagc_attributes,
};
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 3305f2744bfa..ae50b56e8913 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -3073,6 +3073,8 @@ do_write:
case IB_WR_ATOMIC_FETCH_AND_ADD:
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
goto inv_err;
+ if (unlikely(wqe->atomic_wr.remote_addr & (sizeof(u64) - 1)))
+ goto inv_err;
if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
wqe->atomic_wr.remote_addr,
wqe->atomic_wr.rkey,
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index 368959ae9a8c..df03d84c6868 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -644,14 +644,9 @@ static inline struct siw_sqe *orq_get_current(struct siw_qp *qp)
return &qp->orq[qp->orq_get % qp->attrs.orq_size];
}
-static inline struct siw_sqe *orq_get_tail(struct siw_qp *qp)
-{
- return &qp->orq[qp->orq_put % qp->attrs.orq_size];
-}
-
static inline struct siw_sqe *orq_get_free(struct siw_qp *qp)
{
- struct siw_sqe *orq_e = orq_get_tail(qp);
+ struct siw_sqe *orq_e = &qp->orq[qp->orq_put % qp->attrs.orq_size];
if (READ_ONCE(orq_e->flags) == 0)
return orq_e;
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index 60116f20653c..875ea6f1b04a 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -1153,11 +1153,12 @@ static int siw_check_tx_fence(struct siw_qp *qp)
spin_lock_irqsave(&qp->orq_lock, flags);
- rreq = orq_get_current(qp);
-
/* free current orq entry */
+ rreq = orq_get_current(qp);
WRITE_ONCE(rreq->flags, 0);
+ qp->orq_get++;
+
if (qp->tx_ctx.orq_fence) {
if (unlikely(tx_waiting->wr_status != SIW_WR_QUEUED)) {
pr_warn("siw: [QP %u]: fence resume: bad status %d\n",
@@ -1165,10 +1166,12 @@ static int siw_check_tx_fence(struct siw_qp *qp)
rv = -EPROTO;
goto out;
}
- /* resume SQ processing */
+ /* resume SQ processing, if possible */
if (tx_waiting->sqe.opcode == SIW_OP_READ ||
tx_waiting->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
- rreq = orq_get_tail(qp);
+
+ /* SQ processing was stopped because of a full ORQ */
+ rreq = orq_get_free(qp);
if (unlikely(!rreq)) {
pr_warn("siw: [QP %u]: no ORQE\n", qp_id(qp));
rv = -EPROTO;
@@ -1181,15 +1184,14 @@ static int siw_check_tx_fence(struct siw_qp *qp)
resume_tx = 1;
} else if (siw_orq_empty(qp)) {
+ /*
+ * SQ processing was stopped by fenced work request.
+ * Resume since all previous Read's are now completed.
+ */
qp->tx_ctx.orq_fence = 0;
resume_tx = 1;
- } else {
- pr_warn("siw: [QP %u]: fence resume: orq idx: %d:%d\n",
- qp_id(qp), qp->orq_get, qp->orq_put);
- rv = -EPROTO;
}
}
- qp->orq_get++;
out:
spin_unlock_irqrestore(&qp->orq_lock, flags);
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index a3dd2cb6d5c9..54ef367b074a 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -313,7 +313,8 @@ int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
siw_dbg(base_dev, "too many QP's\n");
- return -ENOMEM;
+ rv = -ENOMEM;
+ goto err_atomic;
}
if (attrs->qp_type != IB_QPT_RC) {
siw_dbg(base_dev, "only RC QP's supported\n");
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 7c3f98e57889..759b85f03331 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -2682,6 +2682,8 @@ static void rtrs_clt_dev_release(struct device *dev)
struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
dev);
+ mutex_destroy(&clt->paths_ev_mutex);
+ mutex_destroy(&clt->paths_mutex);
kfree(clt);
}
@@ -2711,6 +2713,8 @@ static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num,
return ERR_PTR(-ENOMEM);
}
+ clt->dev.class = rtrs_clt_dev_class;
+ clt->dev.release = rtrs_clt_dev_release;
uuid_gen(&clt->paths_uuid);
INIT_LIST_HEAD_RCU(&clt->paths_list);
clt->paths_num = paths_num;
@@ -2727,53 +2731,51 @@ static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num,
init_waitqueue_head(&clt->permits_wait);
mutex_init(&clt->paths_ev_mutex);
mutex_init(&clt->paths_mutex);
+ device_initialize(&clt->dev);
- clt->dev.class = rtrs_clt_dev_class;
- clt->dev.release = rtrs_clt_dev_release;
err = dev_set_name(&clt->dev, "%s", sessname);
if (err)
- goto err;
+ goto err_put;
+
/*
* Suppress user space notification until
* sysfs files are created
*/
dev_set_uevent_suppress(&clt->dev, true);
- err = device_register(&clt->dev);
- if (err) {
- put_device(&clt->dev);
- goto err;
- }
+ err = device_add(&clt->dev);
+ if (err)
+ goto err_put;
clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
if (!clt->kobj_paths) {
err = -ENOMEM;
- goto err_dev;
+ goto err_del;
}
err = rtrs_clt_create_sysfs_root_files(clt);
if (err) {
kobject_del(clt->kobj_paths);
kobject_put(clt->kobj_paths);
- goto err_dev;
+ goto err_del;
}
dev_set_uevent_suppress(&clt->dev, false);
kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
return clt;
-err_dev:
- device_unregister(&clt->dev);
-err:
+err_del:
+ device_del(&clt->dev);
+err_put:
free_percpu(clt->pcpu_path);
- kfree(clt);
+ put_device(&clt->dev);
return ERR_PTR(err);
}
static void free_clt(struct rtrs_clt_sess *clt)
{
- free_permits(clt);
free_percpu(clt->pcpu_path);
- mutex_destroy(&clt->paths_ev_mutex);
- mutex_destroy(&clt->paths_mutex);
- /* release callback will free clt in last put */
+
+ /*
+ * release callback will free clt and destroy mutexes in last put
+ */
device_unregister(&clt->dev);
}
@@ -2890,6 +2892,7 @@ void rtrs_clt_close(struct rtrs_clt_sess *clt)
rtrs_clt_destroy_path_files(clt_path, NULL);
kobject_put(&clt_path->kobj);
}
+ free_permits(clt);
free_clt(clt);
}
EXPORT_SYMBOL(rtrs_clt_close);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index e174e853f8a4..285b766e4e70 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -4047,9 +4047,11 @@ static void srp_remove_one(struct ib_device *device, void *client_data)
spin_unlock(&host->target_lock);
/*
- * Wait for tl_err and target port removal tasks.
+ * srp_queue_remove_work() queues a call to
+ * srp_remove_target(). The latter function cancels
+ * target->tl_err_work so waiting for the remove works to
+ * finish is sufficient.
*/
- flush_workqueue(system_long_wq);
flush_workqueue(srp_remove_wq);
kfree(host);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index ccaeb2426385..c3139bc2aa0d 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -2285,6 +2285,12 @@ int input_register_device(struct input_dev *dev)
/* KEY_RESERVED is not supposed to be transmitted to userspace. */
__clear_bit(KEY_RESERVED, dev->keybit);
+ /* Buttonpads should not map BTN_RIGHT and/or BTN_MIDDLE. */
+ if (test_bit(INPUT_PROP_BUTTONPAD, dev->propbit)) {
+ __clear_bit(BTN_RIGHT, dev->keybit);
+ __clear_bit(BTN_MIDDLE, dev->keybit);
+ }
+
/* Make sure that bitmasks not mentioned in dev->evbit are clean. */
input_cleanse_bitmasks(dev);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 0c607da9ee10..9417ee0b1eff 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -556,7 +556,7 @@ config KEYBOARD_PMIC8XXX
config KEYBOARD_SAMSUNG
tristate "Samsung keypad support"
- depends on HAVE_CLK
+ depends on HAS_IOMEM && HAVE_CLK
select INPUT_MATRIXKMAP
help
Say Y here if you want to use the keypad on your Samsung mobile
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 47af62c12267..e1758d5ffe42 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -186,55 +186,21 @@ static int elan_get_fwinfo(u16 ic_type, u8 iap_version, u16 *validpage_count,
return 0;
}
-static int elan_enable_power(struct elan_tp_data *data)
+static int elan_set_power(struct elan_tp_data *data, bool on)
{
int repeat = ETP_RETRY_COUNT;
int error;
- error = regulator_enable(data->vcc);
- if (error) {
- dev_err(&data->client->dev,
- "failed to enable regulator: %d\n", error);
- return error;
- }
-
do {
- error = data->ops->power_control(data->client, true);
+ error = data->ops->power_control(data->client, on);
if (error >= 0)
return 0;
msleep(30);
} while (--repeat > 0);
- dev_err(&data->client->dev, "failed to enable power: %d\n", error);
- return error;
-}
-
-static int elan_disable_power(struct elan_tp_data *data)
-{
- int repeat = ETP_RETRY_COUNT;
- int error;
-
- do {
- error = data->ops->power_control(data->client, false);
- if (!error) {
- error = regulator_disable(data->vcc);
- if (error) {
- dev_err(&data->client->dev,
- "failed to disable regulator: %d\n",
- error);
- /* Attempt to power the chip back up */
- data->ops->power_control(data->client, true);
- break;
- }
-
- return 0;
- }
-
- msleep(30);
- } while (--repeat > 0);
-
- dev_err(&data->client->dev, "failed to disable power: %d\n", error);
+ dev_err(&data->client->dev, "failed to set power %s: %d\n",
+ on ? "on" : "off", error);
return error;
}
@@ -1399,9 +1365,19 @@ static int __maybe_unused elan_suspend(struct device *dev)
/* Enable wake from IRQ */
data->irq_wake = (enable_irq_wake(client->irq) == 0);
} else {
- ret = elan_disable_power(data);
+ ret = elan_set_power(data, false);
+ if (ret)
+ goto err;
+
+ ret = regulator_disable(data->vcc);
+ if (ret) {
+ dev_err(dev, "error %d disabling regulator\n", ret);
+ /* Attempt to power the chip back up */
+ elan_set_power(data, true);
+ }
}
+err:
mutex_unlock(&data->sysfs_mutex);
return ret;
}
@@ -1412,12 +1388,18 @@ static int __maybe_unused elan_resume(struct device *dev)
struct elan_tp_data *data = i2c_get_clientdata(client);
int error;
- if (device_may_wakeup(dev) && data->irq_wake) {
+ if (!device_may_wakeup(dev)) {
+ error = regulator_enable(data->vcc);
+ if (error) {
+ dev_err(dev, "error %d enabling regulator\n", error);
+ goto err;
+ }
+ } else if (data->irq_wake) {
disable_irq_wake(client->irq);
data->irq_wake = false;
}
- error = elan_enable_power(data);
+ error = elan_set_power(data, true);
if (error) {
dev_err(dev, "power up when resuming failed: %d\n", error);
goto err;
diff --git a/drivers/input/mouse/psmouse-smbus.c b/drivers/input/mouse/psmouse-smbus.c
index a472489ccbad..164f6c757f6b 100644
--- a/drivers/input/mouse/psmouse-smbus.c
+++ b/drivers/input/mouse/psmouse-smbus.c
@@ -75,6 +75,8 @@ static void psmouse_smbus_detach_i2c_client(struct i2c_client *client)
"Marking SMBus companion %s as gone\n",
dev_name(&smbdev->client->dev));
smbdev->dead = true;
+ device_link_remove(&smbdev->client->dev,
+ &smbdev->psmouse->ps2dev.serio->dev);
serio_rescan(smbdev->psmouse->ps2dev.serio);
} else {
list_del(&smbdev->node);
@@ -174,6 +176,8 @@ static void psmouse_smbus_disconnect(struct psmouse *psmouse)
kfree(smbdev);
} else {
smbdev->dead = true;
+ device_link_remove(&smbdev->client->dev,
+ &psmouse->ps2dev.serio->dev);
psmouse_dbg(smbdev->psmouse,
"posting removal request for SMBus companion %s\n",
dev_name(&smbdev->client->dev));
@@ -270,6 +274,12 @@ int psmouse_smbus_init(struct psmouse *psmouse,
if (smbdev->client) {
/* We have our companion device */
+ if (!device_link_add(&smbdev->client->dev,
+ &psmouse->ps2dev.serio->dev,
+ DL_FLAG_STATELESS))
+ psmouse_warn(psmouse,
+ "failed to set up link with iSMBus companion %s\n",
+ dev_name(&smbdev->client->dev));
return 0;
}
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index a3bfc7a41679..752e8ba4fecb 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -18,6 +18,7 @@
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/platform_data/x86/soc.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/of.h>
@@ -805,21 +806,6 @@ static int goodix_reset(struct goodix_ts_data *ts)
}
#ifdef ACPI_GPIO_SUPPORT
-#include <asm/cpu_device_id.h>
-#include <asm/intel-family.h>
-
-static const struct x86_cpu_id baytrail_cpu_ids[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT, X86_FEATURE_ANY, },
- {}
-};
-
-static inline bool is_byt(void)
-{
- const struct x86_cpu_id *id = x86_match_cpu(baytrail_cpu_ids);
-
- return !!id;
-}
-
static const struct acpi_gpio_params first_gpio = { 0, 0, false };
static const struct acpi_gpio_params second_gpio = { 1, 0, false };
@@ -878,7 +864,7 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
const struct acpi_gpio_mapping *gpio_mapping = NULL;
struct device *dev = &ts->client->dev;
LIST_HEAD(resources);
- int ret;
+ int irq, ret;
ts->gpio_count = 0;
ts->gpio_int_idx = -1;
@@ -891,6 +877,20 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
acpi_dev_free_resource_list(&resources);
+ /*
+ * CHT devices should have a GpioInt + a regular GPIO ACPI resource.
+ * Some CHT devices have a bug (where the also is bogus Interrupt
+ * resource copied from a previous BYT based generation). i2c-core-acpi
+ * will use the non-working Interrupt resource, fix this up.
+ */
+ if (soc_intel_is_cht() && ts->gpio_count == 2 && ts->gpio_int_idx != -1) {
+ irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 0);
+ if (irq > 0 && irq != ts->client->irq) {
+ dev_warn(dev, "Overriding IRQ %d -> %d\n", ts->client->irq, irq);
+ ts->client->irq = irq;
+ }
+ }
+
if (ts->gpio_count == 2 && ts->gpio_int_idx == 0) {
ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
gpio_mapping = acpi_goodix_int_first_gpios;
@@ -903,7 +903,7 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
dev_info(dev, "Using ACPI INTI and INTO methods for IRQ pin access\n");
ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_METHOD;
gpio_mapping = acpi_goodix_reset_only_gpios;
- } else if (is_byt() && ts->gpio_count == 2 && ts->gpio_int_idx == -1) {
+ } else if (soc_intel_is_byt() && ts->gpio_count == 2 && ts->gpio_int_idx == -1) {
dev_info(dev, "No ACPI GpioInt resource, assuming that the GPIO order is reset, int\n");
ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
gpio_mapping = acpi_goodix_int_last_gpios;
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 78d2ee99f37a..1b58611c8084 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -615,10 +615,9 @@ static int wm97xx_register_touch(struct wm97xx *wm)
* extensions)
*/
wm->touch_dev = platform_device_alloc("wm97xx-touch", -1);
- if (!wm->touch_dev) {
- ret = -ENOMEM;
- goto touch_err;
- }
+ if (!wm->touch_dev)
+ return -ENOMEM;
+
platform_set_drvdata(wm->touch_dev, wm);
wm->touch_dev->dev.parent = wm->dev;
wm->touch_dev->dev.platform_data = pdata;
@@ -629,9 +628,6 @@ static int wm97xx_register_touch(struct wm97xx *wm)
return 0;
touch_reg_err:
platform_device_put(wm->touch_dev);
-touch_err:
- input_unregister_device(wm->input_dev);
- wm->input_dev = NULL;
return ret;
}
@@ -639,8 +635,6 @@ touch_err:
static void wm97xx_unregister_touch(struct wm97xx *wm)
{
platform_device_unregister(wm->touch_dev);
- input_unregister_device(wm->input_dev);
- wm->input_dev = NULL;
}
static int _wm97xx_probe(struct wm97xx *wm)
diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c
index 7c82c4f5fa6b..129ebc810de8 100644
--- a/drivers/input/touchscreen/zinitix.c
+++ b/drivers/input/touchscreen/zinitix.c
@@ -571,8 +571,20 @@ static SIMPLE_DEV_PM_OPS(zinitix_pm_ops, zinitix_suspend, zinitix_resume);
#ifdef CONFIG_OF
static const struct of_device_id zinitix_of_match[] = {
+ { .compatible = "zinitix,bt402" },
+ { .compatible = "zinitix,bt403" },
+ { .compatible = "zinitix,bt404" },
+ { .compatible = "zinitix,bt412" },
+ { .compatible = "zinitix,bt413" },
+ { .compatible = "zinitix,bt431" },
+ { .compatible = "zinitix,bt432" },
+ { .compatible = "zinitix,bt531" },
{ .compatible = "zinitix,bt532" },
+ { .compatible = "zinitix,bt538" },
{ .compatible = "zinitix,bt541" },
+ { .compatible = "zinitix,bt548" },
+ { .compatible = "zinitix,bt554" },
+ { .compatible = "zinitix,at100" },
{ }
};
MODULE_DEVICE_TABLE(of, zinitix_of_match);
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 416815a525d6..bb95edf74415 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -14,6 +14,7 @@
extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
extern void amd_iommu_apply_erratum_63(u16 devid);
+extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
extern int amd_iommu_init_devices(void);
extern void amd_iommu_uninit_devices(void);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index ffc89c4fb120..47108ed44fbb 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -110,6 +110,7 @@
#define PASID_MASK 0x0000ffff
/* MMIO status bits */
+#define MMIO_STATUS_EVT_OVERFLOW_INT_MASK (1 << 0)
#define MMIO_STATUS_EVT_INT_MASK (1 << 1)
#define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2)
#define MMIO_STATUS_PPR_INT_MASK (1 << 6)
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index dc338acf3338..7bfe37e52e21 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -21,6 +21,7 @@
#include <linux/export.h>
#include <linux/kmemleak.h>
#include <linux/cc_platform.h>
+#include <linux/iopoll.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
#include <asm/apic.h>
@@ -657,6 +658,16 @@ static int __init alloc_command_buffer(struct amd_iommu *iommu)
}
/*
+ * This function restarts event logging in case the IOMMU experienced
+ * an event log buffer overflow.
+ */
+void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
+{
+ iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
+ iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
+}
+
+/*
* This function resets the command buffer if the IOMMU stopped fetching
* commands from it.
*/
@@ -834,6 +845,7 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
if (status & (MMIO_STATUS_GALOG_RUN_MASK))
break;
+ udelay(10);
}
if (WARN_ON(i >= LOOP_TIMEOUT))
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index b1bf4125b0f7..6608d1717574 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -492,18 +492,18 @@ static void v1_free_pgtable(struct io_pgtable *iop)
dom = container_of(pgtable, struct protection_domain, iop);
- /* Update data structure */
- amd_iommu_domain_clr_pt_root(dom);
-
- /* Make changes visible to IOMMUs */
- amd_iommu_domain_update(dom);
-
/* Page-table is not visible to IOMMU anymore, so free it */
BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
pgtable->mode > PAGE_MODE_6_LEVEL);
free_sub_pt(pgtable->root, pgtable->mode, &freelist);
+ /* Update data structure */
+ amd_iommu_domain_clr_pt_root(dom);
+
+ /* Make changes visible to IOMMUs */
+ amd_iommu_domain_update(dom);
+
put_pages_list(&freelist);
}
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 461f1844ed1f..a18b549951bb 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -764,7 +764,8 @@ amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
#endif /* !CONFIG_IRQ_REMAP */
#define AMD_IOMMU_INT_MASK \
- (MMIO_STATUS_EVT_INT_MASK | \
+ (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \
+ MMIO_STATUS_EVT_INT_MASK | \
MMIO_STATUS_PPR_INT_MASK | \
MMIO_STATUS_GALOG_INT_MASK)
@@ -774,7 +775,7 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
while (status & AMD_IOMMU_INT_MASK) {
- /* Enable EVT and PPR and GA interrupts again */
+ /* Enable interrupt sources again */
writel(AMD_IOMMU_INT_MASK,
iommu->mmio_base + MMIO_STATUS_OFFSET);
@@ -795,6 +796,11 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
}
#endif
+ if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) {
+ pr_info_ratelimited("IOMMU event log overflow\n");
+ amd_iommu_restart_event_logging(iommu);
+ }
+
/*
* Hardware bug: ERBT1312
* When re-enabling interrupt (by writing 1
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 92fea3fbbb11..5b196cfe9ed2 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -2738,7 +2738,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
spin_unlock_irqrestore(&device_domain_lock, flags);
/* PASID table is mandatory for a PCI device in scalable mode. */
- if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
+ if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
ret = intel_pasid_alloc_table(dev);
if (ret) {
dev_err(dev, "PASID table allocation failed\n");
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index f912fe45bea2..a67319597884 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -569,9 +569,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
fn, &intel_ir_domain_ops,
iommu);
if (!iommu->ir_domain) {
- irq_domain_free_fwnode(fn);
pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
- goto out_free_bitmap;
+ goto out_free_fwnode;
}
iommu->ir_msi_domain =
arch_create_remap_msi_irq_domain(iommu->ir_domain,
@@ -595,7 +594,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
if (dmar_enable_qi(iommu)) {
pr_err("Failed to enable queued invalidation\n");
- goto out_free_bitmap;
+ goto out_free_ir_domain;
}
}
@@ -619,6 +618,14 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
return 0;
+out_free_ir_domain:
+ if (iommu->ir_msi_domain)
+ irq_domain_remove(iommu->ir_msi_domain);
+ iommu->ir_msi_domain = NULL;
+ irq_domain_remove(iommu->ir_domain);
+ iommu->ir_domain = NULL;
+out_free_fwnode:
+ irq_domain_free_fwnode(fn);
out_free_bitmap:
bitmap_free(bitmap);
out_free_pages:
diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c
index 50ee27bbd04e..06fee7416816 100644
--- a/drivers/iommu/ioasid.c
+++ b/drivers/iommu/ioasid.c
@@ -349,6 +349,7 @@ EXPORT_SYMBOL_GPL(ioasid_alloc);
/**
* ioasid_get - obtain a reference to the IOASID
+ * @ioasid: the ID to get
*/
void ioasid_get(ioasid_t ioasid)
{
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 8b86406b7162..107dcf5938d6 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -207,9 +207,14 @@ static struct dev_iommu *dev_iommu_get(struct device *dev)
static void dev_iommu_free(struct device *dev)
{
- iommu_fwspec_free(dev);
- kfree(dev->iommu);
+ struct dev_iommu *param = dev->iommu;
+
dev->iommu = NULL;
+ if (param->fwspec) {
+ fwnode_handle_put(param->fwspec->iommu_fwnode);
+ kfree(param->fwspec);
+ }
+ kfree(param);
}
static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
@@ -980,17 +985,6 @@ static int iommu_group_device_count(struct iommu_group *group)
return ret;
}
-/**
- * iommu_group_for_each_dev - iterate over each device in the group
- * @group: the group
- * @data: caller opaque data to be passed to callback function
- * @fn: caller supplied callback function
- *
- * This function is called by group users to iterate over group devices.
- * Callers should hold a reference count to the group during callback.
- * The group->mutex is held across callbacks, which will block calls to
- * iommu_group_add/remove_device.
- */
static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
int (*fn)(struct device *, void *))
{
@@ -1005,7 +999,17 @@ static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
return ret;
}
-
+/**
+ * iommu_group_for_each_dev - iterate over each device in the group
+ * @group: the group
+ * @data: caller opaque data to be passed to callback function
+ * @fn: caller supplied callback function
+ *
+ * This function is called by group users to iterate over group devices.
+ * Callers should hold a reference count to the group during callback.
+ * The group->mutex is held across callbacks, which will block calls to
+ * iommu_group_add/remove_device.
+ */
int iommu_group_for_each_dev(struct iommu_group *group, void *data,
int (*fn)(struct device *, void *))
{
@@ -3032,6 +3036,7 @@ EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
* iommu_sva_bind_device() - Bind a process address space to a device
* @dev: the device
* @mm: the mm to bind, caller must hold a reference to it
+ * @drvdata: opaque data pointer to pass to bind callback
*
* Create a bond between device and address space, allowing the device to access
* the mm using the returned PASID. If a bond already exists between @device and
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 91749654fd49..980e4af3f06b 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1085,7 +1085,7 @@ static __maybe_unused int omap_iommu_runtime_resume(struct device *dev)
}
/**
- * omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation
+ * omap_iommu_prepare - prepare() dev_pm_ops implementation
* @dev: iommu device
*
* This function performs the necessary checks to determine if the IOMMU
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index e900e3c46903..2561ce8a2ce8 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -808,8 +808,10 @@ static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
return NULL;
mc = platform_get_drvdata(pdev);
- if (!mc)
+ if (!mc) {
+ put_device(&pdev->dev);
return NULL;
+ }
return mc->smmu;
}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 9e93ff2b6375..cd772973114a 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -5517,6 +5517,9 @@ int __init its_lpi_memreserve_init(void)
if (!efi_enabled(EFI_CONFIG_TABLES))
return 0;
+ if (list_empty(&its_nodes))
+ return 0;
+
gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"irqchip/arm/gicv3/memreserve:online",
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 259065d271ef..09cc98266d30 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -398,3 +398,4 @@ out_free_priv:
IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */
+IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_init); /* for firmware driver */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index dcbd6d201619..997ace47bbd5 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2077,7 +2077,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
- blk_set_queue_dying(md->queue);
+ blk_mark_disk_dead(md->disk);
/*
* Take suspend_lock so that presuspend and postsuspend methods
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 5881d05a76eb..4d38bd7dadd6 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5869,10 +5869,6 @@ int md_run(struct mddev *mddev)
nowait = nowait && blk_queue_nowait(bdev_get_queue(rdev->bdev));
}
- /* Set the NOWAIT flags if all underlying devices support it */
- if (nowait)
- blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
-
if (!bioset_initialized(&mddev->bio_set)) {
err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
if (err)
@@ -6010,6 +6006,10 @@ int md_run(struct mddev *mddev)
else
blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue);
+
+ /* Set the NOWAIT flags if all underlying devices support it */
+ if (nowait)
+ blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
}
if (pers->sync_request) {
if (mddev->kobj.sd &&
diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
index bb9c4512c968..9fbfe784d710 100644
--- a/drivers/misc/eeprom/ee1004.c
+++ b/drivers/misc/eeprom/ee1004.c
@@ -114,6 +114,9 @@ static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf,
if (offset + count > EE1004_PAGE_SIZE)
count = EE1004_PAGE_SIZE - offset;
+ if (count > I2C_SMBUS_BLOCK_MAX)
+ count = I2C_SMBUS_BLOCK_MAX;
+
return i2c_smbus_read_i2c_block_data_or_emulated(client, offset, count, buf);
}
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 4ccbf43e6bfa..aa1682b94a23 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -1288,7 +1288,14 @@ static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
}
if (copy_to_user(argp, &bp, sizeof(bp))) {
- dma_buf_put(buf->dmabuf);
+ /*
+ * The usercopy failed, but we can't do much about it, as
+ * dma_buf_fd() already called fd_install() and made the
+ * file descriptor accessible for the current process. It
+ * might already be closed and dmabuf no longer valid when
+ * we reach this point. Therefore "leak" the fd and rely on
+ * the process exit path to do any required cleanup.
+ */
return -EFAULT;
}
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 4e61b28a002f..8d718aa56d33 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1682,31 +1682,31 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
blk_status_t error = BLK_STS_OK;
- int retries = 0;
do {
u32 status;
int err;
+ int retries = 0;
- mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
+ while (retries++ <= MMC_READ_SINGLE_RETRIES) {
+ mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
- mmc_wait_for_req(host, mrq);
+ mmc_wait_for_req(host, mrq);
- err = mmc_send_status(card, &status);
- if (err)
- goto error_exit;
-
- if (!mmc_host_is_spi(host) &&
- !mmc_ready_for_data(status)) {
- err = mmc_blk_fix_state(card, req);
+ err = mmc_send_status(card, &status);
if (err)
goto error_exit;
- }
- if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES)
- continue;
+ if (!mmc_host_is_spi(host) &&
+ !mmc_ready_for_data(status)) {
+ err = mmc_blk_fix_state(card, req);
+ if (err)
+ goto error_exit;
+ }
- retries = 0;
+ if (!mrq->cmd->error)
+ break;
+ }
if (mrq->cmd->error ||
mrq->data->error ||
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 45f578793980..bd87012c220c 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -67,7 +67,7 @@ static const unsigned int sd_au_size[] = {
__res & __mask; \
})
-#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 2000
+#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 1000
#define SD_WRITE_EXTR_SINGLE_TIMEOUT_MS 1000
struct sd_busy_data {
@@ -1664,6 +1664,12 @@ static int sd_poweroff_notify(struct mmc_card *card)
goto out;
}
+ /* Find out when the command is completed. */
+ err = mmc_poll_for_busy(card, SD_WRITE_EXTR_SINGLE_TIMEOUT_MS, false,
+ MMC_BUSY_EXTR_SINGLE);
+ if (err)
+ goto out;
+
cb_data.card = card;
cb_data.reg_buf = reg_buf;
err = __mmc_poll_for_busy(card->host, SD_POWEROFF_NOTIFY_TIMEOUT_MS,
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index 16d1c7a43d33..b6eb75f4bbfc 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -705,12 +705,12 @@ static int moxart_remove(struct platform_device *pdev)
if (!IS_ERR_OR_NULL(host->dma_chan_rx))
dma_release_channel(host->dma_chan_rx);
mmc_remove_host(mmc);
- mmc_free_host(mmc);
writel(0, host->base + REG_INTERRUPT_MASK);
writel(0, host->base + REG_POWER_CONTROL);
writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF,
host->base + REG_CLOCK_CONTROL);
+ mmc_free_host(mmc);
return 0;
}
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index a593b1fbd69e..0f3658b36513 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -524,12 +524,16 @@ static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
static int esdhc_of_enable_dma(struct sdhci_host *host)
{
+ int ret;
u32 value;
struct device *dev = mmc_dev(host->mmc);
if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
- of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc"))
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (ret)
+ return ret;
+ }
value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index bcc595c70a9f..104dcd702870 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -405,6 +405,9 @@ static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host,
struct dma_slave_config cfg = { 0, };
res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
cfg.direction = direction;
if (direction == DMA_DEV_TO_MEM) {
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 6ed6c51fac69..d503821a3e60 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -264,16 +264,20 @@ static int phram_setup(const char *val)
}
}
- if (erasesize)
- div_u64_rem(len, (uint32_t)erasesize, &rem);
-
if (len == 0 || erasesize == 0 || erasesize > len
- || erasesize > UINT_MAX || rem) {
+ || erasesize > UINT_MAX) {
parse_err("illegal erasesize or len\n");
ret = -EINVAL;
goto error;
}
+ div_u64_rem(len, (uint32_t)erasesize, &rem);
+ if (rem) {
+ parse_err("len is not multiple of erasesize\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
ret = register_device(name, start, len, (uint32_t)erasesize);
if (ret)
goto error;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 70f492dce158..eef87b28d6c8 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -546,6 +546,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
config.stride = 1;
config.read_only = true;
config.root_only = true;
+ config.ignore_wp = true;
config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
config.priv = mtd;
@@ -833,6 +834,7 @@ static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
config.owner = THIS_MODULE;
config.type = NVMEM_TYPE_OTP;
config.root_only = true;
+ config.ignore_wp = true;
config.reg_read = reg_read;
config.size = size;
config.of_node = np;
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 20408b7db540..d986ab4e4c35 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -42,7 +42,8 @@ config MTD_NAND_OMAP2
tristate "OMAP2, OMAP3, OMAP4 and Keystone NAND controller"
depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
depends on HAS_IOMEM
- select OMAP_GPMC if ARCH_K3
+ select MEMORY
+ select OMAP_GPMC
help
Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4
and Keystone platforms.
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index f75929783b94..aee78f5f4f15 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -2106,7 +2106,7 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
mtd->oobsize / trans,
host->hwcfg.sector_size_1k);
- if (!ret) {
+ if (ret != -EBADMSG) {
*err_addr = brcmnand_get_uncorrecc_addr(ctrl);
if (*err_addr)
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 1b64c5a5140d..ded4df473928 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -2285,7 +2285,7 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
this->hw.must_apply_timings = false;
ret = gpmi_nfc_apply_timings(this);
if (ret)
- return ret;
+ goto out_pm;
}
dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
@@ -2414,6 +2414,7 @@ unmap:
this->bch = false;
+out_pm:
pm_runtime_mark_last_busy(this->dev);
pm_runtime_put_autosuspend(this->dev);
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
index efe0ffe4f1ab..9054559e52dd 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
@@ -68,9 +68,14 @@ static struct ingenic_ecc *ingenic_ecc_get(struct device_node *np)
struct ingenic_ecc *ecc;
pdev = of_find_device_by_node(np);
- if (!pdev || !platform_get_drvdata(pdev))
+ if (!pdev)
return ERR_PTR(-EPROBE_DEFER);
+ if (!platform_get_drvdata(pdev)) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
ecc = platform_get_drvdata(pdev);
clk_prepare_enable(ecc->clk);
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 7c6efa3b6255..1a77542c6d67 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2,7 +2,6 @@
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*/
-
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/bitops.h>
@@ -3073,10 +3072,6 @@ static int qcom_nandc_probe(struct platform_device *pdev)
if (dma_mapping_error(dev, nandc->base_dma))
return -ENXIO;
- ret = qcom_nandc_alloc(nandc);
- if (ret)
- goto err_nandc_alloc;
-
ret = clk_prepare_enable(nandc->core_clk);
if (ret)
goto err_core_clk;
@@ -3085,6 +3080,10 @@ static int qcom_nandc_probe(struct platform_device *pdev)
if (ret)
goto err_aon_clk;
+ ret = qcom_nandc_alloc(nandc);
+ if (ret)
+ goto err_nandc_alloc;
+
ret = qcom_nandc_setup(nandc);
if (ret)
goto err_setup;
@@ -3096,15 +3095,14 @@ static int qcom_nandc_probe(struct platform_device *pdev)
return 0;
err_setup:
+ qcom_nandc_unalloc(nandc);
+err_nandc_alloc:
clk_disable_unprepare(nandc->aon_clk);
err_aon_clk:
clk_disable_unprepare(nandc->core_clk);
err_core_clk:
- qcom_nandc_unalloc(nandc);
-err_nandc_alloc:
dma_unmap_resource(dev, res->start, resource_size(res),
DMA_BIDIRECTIONAL, 0);
-
return ret;
}
diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c
index 06a818cd2433..4311b89d8df0 100644
--- a/drivers/mtd/parsers/qcomsmempart.c
+++ b/drivers/mtd/parsers/qcomsmempart.c
@@ -58,11 +58,11 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
+ size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
+ int ret, i, j, tmpparts, numparts = 0;
struct smem_flash_pentry *pentry;
struct smem_flash_ptable *ptable;
- size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
struct mtd_partition *parts;
- int ret, i, numparts;
char *name, *c;
if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS)
@@ -75,7 +75,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
pr_debug("Parsing partition table info from SMEM\n");
ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
if (IS_ERR(ptable)) {
- pr_err("Error reading partition table header\n");
+ if (PTR_ERR(ptable) != -EPROBE_DEFER)
+ pr_err("Error reading partition table header\n");
return PTR_ERR(ptable);
}
@@ -87,8 +88,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
}
/* Ensure that # of partitions is less than the max we have allocated */
- numparts = le32_to_cpu(ptable->numparts);
- if (numparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
+ tmpparts = le32_to_cpu(ptable->numparts);
+ if (tmpparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
pr_err("Partition numbers exceed the max limit\n");
return -EINVAL;
}
@@ -116,11 +117,17 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
return PTR_ERR(ptable);
}
+ for (i = 0; i < tmpparts; i++) {
+ pentry = &ptable->pentry[i];
+ if (pentry->name[0] != '\0')
+ numparts++;
+ }
+
parts = kcalloc(numparts, sizeof(*parts), GFP_KERNEL);
if (!parts)
return -ENOMEM;
- for (i = 0; i < numparts; i++) {
+ for (i = 0, j = 0; i < tmpparts; i++) {
pentry = &ptable->pentry[i];
if (pentry->name[0] == '\0')
continue;
@@ -135,24 +142,25 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
for (c = name; *c != '\0'; c++)
*c = tolower(*c);
- parts[i].name = name;
- parts[i].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
- parts[i].mask_flags = pentry->attr;
- parts[i].size = le32_to_cpu(pentry->length) * mtd->erasesize;
+ parts[j].name = name;
+ parts[j].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
+ parts[j].mask_flags = pentry->attr;
+ parts[j].size = le32_to_cpu(pentry->length) * mtd->erasesize;
pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n",
i, pentry->name, le32_to_cpu(pentry->offset),
le32_to_cpu(pentry->length), pentry->attr);
+ j++;
}
pr_debug("SMEM partition table found: ver: %d len: %d\n",
- le32_to_cpu(ptable->version), numparts);
+ le32_to_cpu(ptable->version), tmpparts);
*pparts = parts;
return numparts;
out_free_parts:
- while (--i >= 0)
- kfree(parts[i].name);
+ while (--j >= 0)
+ kfree(parts[j].name);
kfree(parts);
*pparts = NULL;
@@ -166,6 +174,8 @@ static void parse_qcomsmem_cleanup(const struct mtd_partition *pparts,
for (i = 0; i < nr_parts; i++)
kfree(pparts[i].name);
+
+ kfree(pparts);
}
static const struct of_device_id qcomsmem_of_match_table[] = {
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 6382e1937cca..c580acb8b1d3 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -138,6 +138,9 @@ static int com20020pci_probe(struct pci_dev *pdev,
return -ENOMEM;
ci = (struct com20020_pci_card_info *)id->driver_data;
+ if (!ci)
+ return -EINVAL;
+
priv->ci = ci;
mm = &ci->misc_map;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 6006c2e8fa2b..a86b1f71762e 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -225,7 +225,7 @@ static inline int __check_agg_selection_timer(struct port *port)
if (bond == NULL)
return 0;
- return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0;
+ return atomic_read(&BOND_AD_INFO(bond).agg_select_timer) ? 1 : 0;
}
/**
@@ -1021,8 +1021,8 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
if (port->aggregator &&
port->aggregator->is_active &&
!__port_is_enabled(port)) {
-
__enable_port(port);
+ *update_slave_arr = true;
}
}
break;
@@ -1779,6 +1779,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
port = port->next_port_in_aggregator) {
__enable_port(port);
}
+ *update_slave_arr = true;
}
}
@@ -1994,7 +1995,7 @@ static void ad_marker_response_received(struct bond_marker *marker,
*/
void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
{
- BOND_AD_INFO(bond).agg_select_timer = timeout;
+ atomic_set(&BOND_AD_INFO(bond).agg_select_timer, timeout);
}
/**
@@ -2278,6 +2279,28 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond)
}
/**
+ * bond_agg_timer_advance - advance agg_select_timer
+ * @bond: bonding structure
+ *
+ * Return true when agg_select_timer reaches 0.
+ */
+static bool bond_agg_timer_advance(struct bonding *bond)
+{
+ int val, nval;
+
+ while (1) {
+ val = atomic_read(&BOND_AD_INFO(bond).agg_select_timer);
+ if (!val)
+ return false;
+ nval = val - 1;
+ if (atomic_cmpxchg(&BOND_AD_INFO(bond).agg_select_timer,
+ val, nval) == val)
+ break;
+ }
+ return nval == 0;
+}
+
+/**
* bond_3ad_state_machine_handler - handle state machines timeout
* @work: work context to fetch bonding struct to work on from
*
@@ -2312,9 +2335,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
if (!bond_has_slaves(bond))
goto re_arm;
- /* check if agg_select_timer timer after initialize is timed out */
- if (BOND_AD_INFO(bond).agg_select_timer &&
- !(--BOND_AD_INFO(bond).agg_select_timer)) {
+ if (bond_agg_timer_advance(bond)) {
slave = bond_first_slave_rcu(bond);
port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 238b56d77c36..aebeb46e6fa6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2379,10 +2379,9 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_select_active_slave(bond);
}
- if (!bond_has_slaves(bond)) {
- bond_set_carrier(bond);
+ bond_set_carrier(bond);
+ if (!bond_has_slaves(bond))
eth_hw_addr_random(bond_dev);
- }
unblock_netpoll_tx();
synchronize_rcu();
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index b7dc1c32875f..acd74725831f 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1715,15 +1715,15 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
netif_napi_add(ndev, &priv->napi, rcar_canfd_rx_poll,
RCANFD_NAPI_WEIGHT);
+ spin_lock_init(&priv->tx_lock);
+ devm_can_led_init(ndev);
+ gpriv->ch[priv->channel] = priv;
err = register_candev(ndev);
if (err) {
dev_err(&pdev->dev,
"register_candev() failed, error %d\n", err);
goto fail_candev;
}
- spin_lock_init(&priv->tx_lock);
- devm_can_led_init(ndev);
- gpriv->ch[priv->channel] = priv;
dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel);
return 0;
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index 2ed2370a3166..2d73ebbf3836 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -1787,7 +1787,7 @@ static int es58x_open(struct net_device *netdev)
struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev;
int ret;
- if (atomic_inc_return(&es58x_dev->opened_channel_cnt) == 1) {
+ if (!es58x_dev->opened_channel_cnt) {
ret = es58x_alloc_rx_urbs(es58x_dev);
if (ret)
return ret;
@@ -1805,12 +1805,13 @@ static int es58x_open(struct net_device *netdev)
if (ret)
goto free_urbs;
+ es58x_dev->opened_channel_cnt++;
netif_start_queue(netdev);
return ret;
free_urbs:
- if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt))
+ if (!es58x_dev->opened_channel_cnt)
es58x_free_urbs(es58x_dev);
netdev_err(netdev, "%s: Could not open the network device: %pe\n",
__func__, ERR_PTR(ret));
@@ -1845,7 +1846,8 @@ static int es58x_stop(struct net_device *netdev)
es58x_flush_pending_tx_msg(netdev);
- if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt))
+ es58x_dev->opened_channel_cnt--;
+ if (!es58x_dev->opened_channel_cnt)
es58x_free_urbs(es58x_dev);
return 0;
@@ -2215,7 +2217,6 @@ static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf,
init_usb_anchor(&es58x_dev->tx_urbs_idle);
init_usb_anchor(&es58x_dev->tx_urbs_busy);
atomic_set(&es58x_dev->tx_urbs_idle_cnt, 0);
- atomic_set(&es58x_dev->opened_channel_cnt, 0);
usb_set_intfdata(intf, es58x_dev);
es58x_dev->rx_pipe = usb_rcvbulkpipe(es58x_dev->udev,
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h
index 826a15871573..e5033cb5e695 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.h
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.h
@@ -373,8 +373,6 @@ struct es58x_operators {
* queue wake/stop logic should prevent this URB from getting
* empty. Please refer to es58x_get_tx_urb() for more details.
* @tx_urbs_idle_cnt: number of urbs in @tx_urbs_idle.
- * @opened_channel_cnt: number of channels opened (c.f. es58x_open()
- * and es58x_stop()).
* @ktime_req_ns: kernel timestamp when es58x_set_realtime_diff_ns()
* was called.
* @realtime_diff_ns: difference in nanoseconds between the clocks of
@@ -384,6 +382,10 @@ struct es58x_operators {
* in RX branches.
* @rx_max_packet_size: Maximum length of bulk-in URB.
* @num_can_ch: Number of CAN channel (i.e. number of elements of @netdev).
+ * @opened_channel_cnt: number of channels opened. Free of race
+ * conditions because its two users (net_device_ops:ndo_open()
+ * and net_device_ops:ndo_close()) guarantee that the network
+ * stack big kernel lock (a.k.a. rtnl_mutex) is being hold.
* @rx_cmd_buf_len: Length of @rx_cmd_buf.
* @rx_cmd_buf: The device might split the URB commands in an
* arbitrary amount of pieces. This buffer is used to concatenate
@@ -406,7 +408,6 @@ struct es58x_device {
struct usb_anchor tx_urbs_busy;
struct usb_anchor tx_urbs_idle;
atomic_t tx_urbs_idle_cnt;
- atomic_t opened_channel_cnt;
u64 ktime_req_ns;
s64 realtime_diff_ns;
@@ -415,6 +416,7 @@ struct es58x_device {
u16 rx_max_packet_size;
u8 num_can_ch;
+ u8 opened_channel_cnt;
u16 rx_cmd_buf_len;
union es58x_urb_cmd rx_cmd_buf;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index b487e3fe770a..d35749fad1ef 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -191,8 +191,8 @@ struct gs_can {
struct gs_usb {
struct gs_can *canch[GS_MAX_INTF];
struct usb_anchor rx_submitted;
- atomic_t active_channels;
struct usb_device *udev;
+ u8 active_channels;
};
/* 'allocate' a tx context.
@@ -589,7 +589,7 @@ static int gs_can_open(struct net_device *netdev)
if (rc)
return rc;
- if (atomic_add_return(1, &parent->active_channels) == 1) {
+ if (!parent->active_channels) {
for (i = 0; i < GS_MAX_RX_URBS; i++) {
struct urb *urb;
u8 *buf;
@@ -690,6 +690,7 @@ static int gs_can_open(struct net_device *netdev)
dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ parent->active_channels++;
if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
netif_start_queue(netdev);
@@ -705,7 +706,8 @@ static int gs_can_close(struct net_device *netdev)
netif_stop_queue(netdev);
/* Stop polling */
- if (atomic_dec_and_test(&parent->active_channels))
+ parent->active_channels--;
+ if (!parent->active_channels)
usb_kill_anchored_urbs(&parent->rx_submitted);
/* Stop sending URBs */
@@ -984,8 +986,6 @@ static int gs_usb_probe(struct usb_interface *intf,
init_usb_anchor(&dev->rx_submitted);
- atomic_set(&dev->active_channels, 0);
-
usb_set_intfdata(intf, dev);
dev->udev = interface_to_usbdev(intf);
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 7b1457a6e327..0029d279616f 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -36,6 +36,7 @@ config NET_DSA_LANTIQ_GSWIP
config NET_DSA_MT7530
tristate "MediaTek MT753x and MT7621 Ethernet switch support"
select NET_DSA_TAG_MTK
+ select MEDIATEK_GE_PHY
help
This enables support for the MediaTek MT7530, MT7531, and MT7621
Ethernet switch chips.
@@ -81,6 +82,7 @@ config NET_DSA_REALTEK_SMI
config NET_DSA_SMSC_LAN9303
tristate
+ depends on VLAN_8021Q || VLAN_8021Q=n
select NET_DSA_TAG_LAN9303
select REGMAP
help
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 33499fcd8848..6afb5db8244c 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -621,7 +621,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
get_device(&priv->master_mii_bus->dev);
priv->master_mii_dn = dn;
- priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
+ priv->slave_mii_bus = mdiobus_alloc();
if (!priv->slave_mii_bus) {
of_node_put(dn);
return -ENOMEM;
@@ -681,8 +681,10 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
}
err = mdiobus_register(priv->slave_mii_bus);
- if (err && dn)
+ if (err && dn) {
+ mdiobus_free(priv->slave_mii_bus);
of_node_put(dn);
+ }
return err;
}
@@ -690,6 +692,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
{
mdiobus_unregister(priv->slave_mii_bus);
+ mdiobus_free(priv->slave_mii_bus);
of_node_put(priv->master_mii_dn);
}
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index d55784d19fa4..3969d89fa4db 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -10,6 +10,7 @@
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
#include "lan9303.h"
@@ -1083,21 +1084,27 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port,
static int lan9303_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct lan9303 *chip = ds->priv;
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_port_is_user(dp))
return 0;
+ vlan_vid_add(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
+
return lan9303_enable_processing_port(chip, port);
}
static void lan9303_port_disable(struct dsa_switch *ds, int port)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct lan9303 *chip = ds->priv;
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_port_is_user(dp))
return;
+ vlan_vid_del(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
+
lan9303_disable_processing_port(chip, port);
lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
}
@@ -1310,7 +1317,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip,
struct device_node *np)
{
chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
- GPIOD_OUT_LOW);
+ GPIOD_OUT_HIGH);
if (IS_ERR(chip->reset_gpio))
return PTR_ERR(chip->reset_gpio);
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 46ed953e787e..8a7a8093a156 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -498,8 +498,9 @@ static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
{
struct dsa_switch *ds = priv->ds;
+ int err;
- ds->slave_mii_bus = devm_mdiobus_alloc(priv->dev);
+ ds->slave_mii_bus = mdiobus_alloc();
if (!ds->slave_mii_bus)
return -ENOMEM;
@@ -512,7 +513,11 @@ static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
ds->slave_mii_bus->parent = priv->dev;
ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
- return of_mdiobus_register(ds->slave_mii_bus, mdio_np);
+ err = of_mdiobus_register(ds->slave_mii_bus, mdio_np);
+ if (err)
+ mdiobus_free(ds->slave_mii_bus);
+
+ return err;
}
static int gswip_pce_table_entry_read(struct gswip_priv *priv,
@@ -2145,8 +2150,10 @@ disable_switch:
gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
dsa_unregister_switch(priv->ds);
mdio_bus:
- if (mdio_np)
+ if (mdio_np) {
mdiobus_unregister(priv->ds->slave_mii_bus);
+ mdiobus_free(priv->ds->slave_mii_bus);
+ }
put_mdio_node:
of_node_put(mdio_np);
for (i = 0; i < priv->num_gphy_fw; i++)
@@ -2170,6 +2177,7 @@ static int gswip_remove(struct platform_device *pdev)
if (priv->ds->slave_mii_bus) {
mdiobus_unregister(priv->ds->slave_mii_bus);
of_node_put(priv->ds->slave_mii_bus->dev.of_node);
+ mdiobus_free(priv->ds->slave_mii_bus);
}
for (i = 0; i < priv->num_gphy_fw; i++)
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 55dbda04ea62..243f8ad6d06e 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -26,7 +26,7 @@ void ksz_update_port_member(struct ksz_device *dev, int port)
struct dsa_switch *ds = dev->ds;
u8 port_member = 0, cpu_port;
const struct dsa_port *dp;
- int i;
+ int i, j;
if (!dsa_is_user_port(ds, port))
return;
@@ -45,13 +45,33 @@ void ksz_update_port_member(struct ksz_device *dev, int port)
continue;
if (!dsa_port_bridge_same(dp, other_dp))
continue;
+ if (other_p->stp_state != BR_STATE_FORWARDING)
+ continue;
- if (other_p->stp_state == BR_STATE_FORWARDING &&
- p->stp_state == BR_STATE_FORWARDING) {
+ if (p->stp_state == BR_STATE_FORWARDING) {
val |= BIT(port);
port_member |= BIT(i);
}
+ /* Retain port [i]'s relationship to other ports than [port] */
+ for (j = 0; j < ds->num_ports; j++) {
+ const struct dsa_port *third_dp;
+ struct ksz_port *third_p;
+
+ if (j == i)
+ continue;
+ if (j == port)
+ continue;
+ if (!dsa_is_user_port(ds, j))
+ continue;
+ third_p = &dev->ports[j];
+ if (third_p->stp_state != BR_STATE_FORWARDING)
+ continue;
+ third_dp = dsa_to_port(ds, j);
+ if (dsa_port_bridge_same(other_dp, third_dp))
+ val |= BIT(j);
+ }
+
dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
}
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index b82512e5b33b..ff3c267d0f26 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2074,7 +2074,7 @@ mt7530_setup_mdio(struct mt7530_priv *priv)
if (priv->irq)
mt7530_setup_mdio_irq(priv);
- ret = mdiobus_register(bus);
+ ret = devm_mdiobus_register(dev, bus);
if (ret) {
dev_err(dev, "failed to register MDIO bus: %d\n", ret);
if (priv->irq)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 58ca684d73f7..ab1676553714 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2284,6 +2284,13 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
if (!mv88e6xxx_max_vid(chip))
return -EOPNOTSUPP;
+ /* The ATU removal procedure needs the FID to be mapped in the VTU,
+ * but FDB deletion runs concurrently with VLAN deletion. Flush the DSA
+ * switchdev workqueue to ensure that all FDB entries are deleted
+ * before we remove the VLAN.
+ */
+ dsa_flush_workqueue();
+
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_get_pvid(chip, port, &pvid);
@@ -3399,7 +3406,7 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
return err;
}
- bus = devm_mdiobus_alloc_size(chip->dev, sizeof(*mdio_bus));
+ bus = mdiobus_alloc_size(sizeof(*mdio_bus));
if (!bus)
return -ENOMEM;
@@ -3424,14 +3431,14 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
if (!external) {
err = mv88e6xxx_g2_irq_mdio_setup(chip, bus);
if (err)
- return err;
+ goto out;
}
err = of_mdiobus_register(bus, np);
if (err) {
dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err);
mv88e6xxx_g2_irq_mdio_free(chip, bus);
- return err;
+ goto out;
}
if (external)
@@ -3440,21 +3447,26 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
list_add(&mdio_bus->list, &chip->mdios);
return 0;
+
+out:
+ mdiobus_free(bus);
+ return err;
}
static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
{
- struct mv88e6xxx_mdio_bus *mdio_bus;
+ struct mv88e6xxx_mdio_bus *mdio_bus, *p;
struct mii_bus *bus;
- list_for_each_entry(mdio_bus, &chip->mdios, list) {
+ list_for_each_entry_safe(mdio_bus, p, &chip->mdios, list) {
bus = mdio_bus->bus;
if (!mdio_bus->external)
mv88e6xxx_g2_irq_mdio_free(chip, bus);
mdiobus_unregister(bus);
+ mdiobus_free(bus);
}
}
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index bf8d38239e7e..33f0ceae381d 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1061,7 +1061,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
return PTR_ERR(hw);
}
- bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
if (!bus)
return -ENOMEM;
@@ -1081,6 +1081,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
rc = mdiobus_register(bus);
if (rc < 0) {
dev_err(dev, "failed to register MDIO bus\n");
+ mdiobus_free(bus);
return rc;
}
@@ -1132,6 +1133,7 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
lynx_pcs_destroy(phylink_pcs);
}
mdiobus_unregister(felix->imdio);
+ mdiobus_free(felix->imdio);
}
static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 8c1c9da61602..f2f1608a476c 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -1029,7 +1029,7 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
}
/* Needed in order to initialize the bus mutex lock */
- rc = of_mdiobus_register(bus, NULL);
+ rc = devm_of_mdiobus_register(dev, bus, NULL);
if (rc < 0) {
dev_err(dev, "failed to register MDIO bus\n");
return rc;
@@ -1083,7 +1083,8 @@ static void vsc9953_mdio_bus_free(struct ocelot *ocelot)
mdio_device_free(mdio_device);
lynx_pcs_destroy(phylink_pcs);
}
- mdiobus_unregister(felix->imdio);
+
+ /* mdiobus_unregister and mdiobus_free handled by devres */
}
static const struct felix_info seville_info_vsc9953 = {
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index da0d7e68643a..c39de2a4c1fe 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -378,7 +378,7 @@ static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv)
if (!mnp)
return -ENODEV;
- ret = of_mdiobus_register(mbus, mnp);
+ ret = devm_of_mdiobus_register(dev, mbus, mnp);
of_node_put(mnp);
if (ret)
return ret;
@@ -1091,7 +1091,6 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev)
}
irq_domain_remove(priv->irqdomain);
- mdiobus_unregister(priv->mbus);
dsa_unregister_switch(&priv->ds);
reset_control_assert(priv->sw_reset);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 492ac383f16d..a3593290886f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -721,7 +721,9 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
if (!channel->tx_ring)
break;
+ /* Deactivate the Tx timer */
del_timer_sync(&channel->tx_timer);
+ channel->tx_timer_active = 0;
}
}
@@ -2550,6 +2552,14 @@ read_again:
buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
len += buf2_len;
+ if (buf2_len > rdata->rx.buf.dma_len) {
+ /* Hardware inconsistency within the descriptors
+ * that has resulted in a length underflow.
+ */
+ error = 1;
+ goto skip_data;
+ }
+
if (!skb) {
skb = xgbe_create_skb(pdata, napi, rdata,
buf1_len);
@@ -2579,8 +2589,10 @@ skip_data:
if (!last || context_next)
goto read_again;
- if (!skb)
+ if (!skb || error) {
+ dev_kfree_skb(skb);
goto next_packet;
+ }
/* Be sure we don't exceed the configured MTU */
max_len = netdev->mtu + ETH_HLEN;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index efdcf484a510..2af3da4b2d05 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -425,6 +425,9 @@ static void xgbe_pci_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdata->pcidev);
+ /* Disable all interrupts in the hardware */
+ XP_IOWRITE(pdata, XP_INT_EN, 0x0);
+
xgbe_free_pdata(pdata);
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index da595242bc13..f50604f3e541 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -900,7 +900,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
atl1c_clean_buffer(pdev, buffer_info);
}
- netdev_reset_queue(adapter->netdev);
+ netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue));
/* Zero out Tx-buffers */
memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index c6412c523637..b4381cd41979 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -172,6 +172,7 @@ static int bgmac_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct bgmac *bgmac;
+ struct resource *regs;
int ret;
bgmac = bgmac_alloc(&pdev->dev);
@@ -208,15 +209,23 @@ static int bgmac_probe(struct platform_device *pdev)
if (IS_ERR(bgmac->plat.base))
return PTR_ERR(bgmac->plat.base);
- bgmac->plat.idm_base = devm_platform_ioremap_resource_byname(pdev, "idm_base");
- if (IS_ERR(bgmac->plat.idm_base))
- return PTR_ERR(bgmac->plat.idm_base);
- else
+ /* The idm_base resource is optional for some platforms */
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
+ if (regs) {
+ bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(bgmac->plat.idm_base))
+ return PTR_ERR(bgmac->plat.idm_base);
bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
+ }
- bgmac->plat.nicpm_base = devm_platform_ioremap_resource_byname(pdev, "nicpm_base");
- if (IS_ERR(bgmac->plat.nicpm_base))
- return PTR_ERR(bgmac->plat.nicpm_base);
+ /* The nicpm_base resource is optional for some platforms */
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
+ if (regs) {
+ bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
+ regs);
+ if (IS_ERR(bgmac->plat.nicpm_base))
+ return PTR_ERR(bgmac->plat.nicpm_base);
+ }
bgmac->read = platform_bgmac_read;
bgmac->write = platform_bgmac_write;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index e20aafeb4ca9..b97ed9b5f685 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8216,7 +8216,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
rc = dma_set_coherent_mask(&pdev->dev, persist_dma_mask);
if (rc) {
dev_err(&pdev->dev,
- "pci_set_consistent_dma_mask failed, aborting\n");
+ "dma_set_coherent_mask failed, aborting\n");
goto err_out_unmap;
}
} else if ((rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 774c1f1a57c3..eedb48d945ed 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -100,6 +100,9 @@ MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FW_FILE_NAME_E1);
MODULE_FIRMWARE(FW_FILE_NAME_E1H);
MODULE_FIRMWARE(FW_FILE_NAME_E2);
+MODULE_FIRMWARE(FW_FILE_NAME_E1_V15);
+MODULE_FIRMWARE(FW_FILE_NAME_E1H_V15);
+MODULE_FIRMWARE(FW_FILE_NAME_E2_V15);
int bnx2x_num_queues;
module_param_named(num_queues, bnx2x_num_queues, int, 0444);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 4f94136a011a..b1c98d1408b8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4747,8 +4747,10 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
return rc;
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
- req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
- req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+ if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
+ req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
+ req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+ }
req->mask = cpu_to_le32(vnic->rx_mask);
return hwrm_req_send_silent(bp, req);
}
@@ -7787,6 +7789,19 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
return 0;
}
+static void bnxt_remap_fw_health_regs(struct bnxt *bp)
+{
+ if (!bp->fw_health)
+ return;
+
+ if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
+ bp->fw_health->status_reliable = true;
+ bp->fw_health->resets_reliable = true;
+ } else {
+ bnxt_try_map_fw_health_reg(bp);
+ }
+}
+
static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -8639,6 +8654,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
vnic->uc_filter_count = 1;
vnic->rx_mask = 0;
+ if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
+ goto skip_rx_mask;
+
if (bp->dev->flags & IFF_BROADCAST)
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
@@ -8648,7 +8666,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (bp->dev->flags & IFF_ALLMULTI) {
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
- } else {
+ } else if (bp->dev->flags & IFF_MULTICAST) {
u32 mask = 0;
bnxt_mc_list_updated(bp, &mask);
@@ -8659,6 +8677,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (rc)
goto err_out;
+skip_rx_mask:
rc = bnxt_hwrm_set_coal(bp);
if (rc)
netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
@@ -9850,8 +9869,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
resc_reinit = true;
if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
fw_reset = true;
- else if (bp->fw_health && !bp->fw_health->status_reliable)
- bnxt_try_map_fw_health_reg(bp);
+ else
+ bnxt_remap_fw_health_regs(bp);
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
@@ -10330,13 +10349,15 @@ int bnxt_half_open_nic(struct bnxt *bp)
goto half_open_err;
}
- rc = bnxt_alloc_mem(bp, false);
+ rc = bnxt_alloc_mem(bp, true);
if (rc) {
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
goto half_open_err;
}
- rc = bnxt_init_nic(bp, false);
+ set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
+ rc = bnxt_init_nic(bp, true);
if (rc) {
+ clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
goto half_open_err;
}
@@ -10344,7 +10365,7 @@ int bnxt_half_open_nic(struct bnxt *bp)
half_open_err:
bnxt_free_skbs(bp);
- bnxt_free_mem(bp, false);
+ bnxt_free_mem(bp, true);
dev_close(bp->dev);
return rc;
}
@@ -10354,9 +10375,10 @@ half_open_err:
*/
void bnxt_half_close_nic(struct bnxt *bp)
{
- bnxt_hwrm_resource_free(bp, false, false);
+ bnxt_hwrm_resource_free(bp, false, true);
bnxt_free_skbs(bp);
- bnxt_free_mem(bp, false);
+ bnxt_free_mem(bp, true);
+ clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
}
void bnxt_reenable_sriov(struct bnxt *bp)
@@ -10772,7 +10794,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
- } else {
+ } else if (dev->flags & IFF_MULTICAST) {
mc_update = bnxt_mc_list_updated(bp, &mask);
}
@@ -10849,9 +10871,10 @@ skip_uc:
!bnxt_promisc_ok(bp))
vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
- if (rc && vnic->mc_list_count) {
+ if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
rc);
+ vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 440dfeb4948b..666fc1e7a7d2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1921,6 +1921,7 @@ struct bnxt {
#define BNXT_STATE_RECOVER 12
#define BNXT_STATE_FW_NON_FATAL_COND 13
#define BNXT_STATE_FW_ACTIVATE_RESET 14
+#define BNXT_STATE_HALF_OPEN 15 /* For offline ethtool tests */
#define BNXT_NO_FW_ACCESS(bp) \
(test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 4da31b1b84f9..f6e21fac0e69 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -367,6 +367,16 @@ bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack,
}
}
+/* Live patch status in NVM */
+#define BNXT_LIVEPATCH_NOT_INSTALLED 0
+#define BNXT_LIVEPATCH_INSTALLED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL
+#define BNXT_LIVEPATCH_REMOVED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE
+#define BNXT_LIVEPATCH_MASK (FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL | \
+ FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE)
+#define BNXT_LIVEPATCH_ACTIVATED BNXT_LIVEPATCH_MASK
+
+#define BNXT_LIVEPATCH_STATE(flags) ((flags) & BNXT_LIVEPATCH_MASK)
+
static int
bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
{
@@ -374,8 +384,9 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
struct hwrm_fw_livepatch_query_input *query_req;
struct hwrm_fw_livepatch_output *patch_resp;
struct hwrm_fw_livepatch_input *patch_req;
+ u16 flags, live_patch_state;
+ bool activated = false;
u32 installed = 0;
- u16 flags;
u8 target;
int rc;
@@ -394,7 +405,6 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
hwrm_req_drop(bp, query_req);
return rc;
}
- patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE;
patch_req->loadtype = FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL;
patch_resp = hwrm_req_hold(bp, patch_req);
@@ -407,12 +417,20 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
}
flags = le16_to_cpu(query_resp->status_flags);
- if (~flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL)
+ live_patch_state = BNXT_LIVEPATCH_STATE(flags);
+
+ if (live_patch_state == BNXT_LIVEPATCH_NOT_INSTALLED)
continue;
- if ((flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) &&
- !strncmp(query_resp->active_ver, query_resp->install_ver,
- sizeof(query_resp->active_ver)))
+
+ if (live_patch_state == BNXT_LIVEPATCH_ACTIVATED) {
+ activated = true;
continue;
+ }
+
+ if (live_patch_state == BNXT_LIVEPATCH_INSTALLED)
+ patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE;
+ else if (live_patch_state == BNXT_LIVEPATCH_REMOVED)
+ patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE;
patch_req->fw_target = target;
rc = hwrm_req_send(bp, patch_req);
@@ -424,8 +442,13 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
}
if (!rc && !installed) {
- NL_SET_ERR_MSG_MOD(extack, "No live patches found");
- rc = -ENOENT;
+ if (activated) {
+ NL_SET_ERR_MSG_MOD(extack, "Live patch already activated");
+ rc = -EEXIST;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "No live patches found");
+ rc = -ENOENT;
+ }
}
hwrm_req_drop(bp, query_req);
hwrm_req_drop(bp, patch_req);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 003330e8cd58..8aaa2335f848 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -25,6 +25,7 @@
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
+#include "bnxt_ulp.h"
#include "bnxt_xdp.h"
#include "bnxt_ptp.h"
#include "bnxt_ethtool.h"
@@ -1969,6 +1970,9 @@ static int bnxt_get_fecparam(struct net_device *dev,
case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
fec->active_fec |= ETHTOOL_FEC_LLRS;
break;
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
+ fec->active_fec |= ETHTOOL_FEC_OFF;
+ break;
}
return 0;
}
@@ -3454,7 +3458,7 @@ static int bnxt_run_loopback(struct bnxt *bp)
if (!skb)
return -ENOMEM;
data = skb_put(skb, pkt_size);
- eth_broadcast_addr(data);
+ ether_addr_copy(&data[i], bp->dev->dev_addr);
i += ETH_ALEN;
ether_addr_copy(&data[i], bp->dev->dev_addr);
i += ETH_ALEN;
@@ -3548,9 +3552,12 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (!offline) {
bnxt_run_fw_tests(bp, test_mask, &test_results);
} else {
- rc = bnxt_close_nic(bp, false, false);
- if (rc)
+ bnxt_ulp_stop(bp);
+ rc = bnxt_close_nic(bp, true, false);
+ if (rc) {
+ bnxt_ulp_start(bp, rc);
return;
+ }
bnxt_run_fw_tests(bp, test_mask, &test_results);
buf[BNXT_MACLPBK_TEST_IDX] = 1;
@@ -3560,6 +3567,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (rc) {
bnxt_hwrm_mac_loopback(bp, false);
etest->flags |= ETH_TEST_FL_FAILED;
+ bnxt_ulp_start(bp, rc);
return;
}
if (bnxt_run_loopback(bp))
@@ -3585,7 +3593,8 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
}
bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp);
- rc = bnxt_open_nic(bp, false, true);
+ rc = bnxt_open_nic(bp, true, true);
+ bnxt_ulp_start(bp, rc);
}
if (rc || bnxt_test_irq(bp)) {
buf[BNXT_IRQ_TEST_IDX] = 1;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
index 566c9487ef55..b01d42928a53 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -644,17 +644,23 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
/* Last byte of resp contains valid bit */
valid = ((u8 *)ctx->resp) + len - 1;
- for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
+ for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; ) {
/* make sure we read from updated DMA memory */
dma_rmb();
if (*valid)
break;
- usleep_range(1, 5);
+ if (j < 10) {
+ udelay(1);
+ j++;
+ } else {
+ usleep_range(20, 30);
+ j += 20;
+ }
}
if (j >= HWRM_VALID_BIT_DELAY_USEC) {
hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
- hwrm_total_timeout(i), req_type,
+ hwrm_total_timeout(i) + j, req_type,
le16_to_cpu(ctx->req->seq_id), len, *valid);
goto exit;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
index d52bd2d63aec..c98032e38188 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
@@ -90,7 +90,7 @@ static inline unsigned int hwrm_total_timeout(unsigned int n)
}
-#define HWRM_VALID_BIT_DELAY_USEC 150
+#define HWRM_VALID_BIT_DELAY_USEC 50000
static inline bool bnxt_cfa_hwrm_message(u16 req_type)
{
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index a363da928e8b..98498a76ae16 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -4712,7 +4712,7 @@ static int macb_probe(struct platform_device *pdev)
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
bp->hw_dma_cap |= HW_DMA_CAP_64B;
}
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index da41eee2f25c..a06003bfa04b 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -3613,6 +3613,8 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
adapter->params.pci.vpd_cap_addr =
pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
+ if (!adapter->params.pci.vpd_cap_addr)
+ return -ENODEV;
ret = get_vpd_params(adapter, &adapter->params.vpd);
if (ret < 0)
return ret;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 691605c15265..d5356db7539a 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -989,117 +989,6 @@ static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv)
return 0;
}
-static void ftgmac100_adjust_link(struct net_device *netdev)
-{
- struct ftgmac100 *priv = netdev_priv(netdev);
- struct phy_device *phydev = netdev->phydev;
- bool tx_pause, rx_pause;
- int new_speed;
-
- /* We store "no link" as speed 0 */
- if (!phydev->link)
- new_speed = 0;
- else
- new_speed = phydev->speed;
-
- /* Grab pause settings from PHY if configured to do so */
- if (priv->aneg_pause) {
- rx_pause = tx_pause = phydev->pause;
- if (phydev->asym_pause)
- tx_pause = !rx_pause;
- } else {
- rx_pause = priv->rx_pause;
- tx_pause = priv->tx_pause;
- }
-
- /* Link hasn't changed, do nothing */
- if (phydev->speed == priv->cur_speed &&
- phydev->duplex == priv->cur_duplex &&
- rx_pause == priv->rx_pause &&
- tx_pause == priv->tx_pause)
- return;
-
- /* Print status if we have a link or we had one and just lost it,
- * don't print otherwise.
- */
- if (new_speed || priv->cur_speed)
- phy_print_status(phydev);
-
- priv->cur_speed = new_speed;
- priv->cur_duplex = phydev->duplex;
- priv->rx_pause = rx_pause;
- priv->tx_pause = tx_pause;
-
- /* Link is down, do nothing else */
- if (!new_speed)
- return;
-
- /* Disable all interrupts */
- iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
-
- /* Reset the adapter asynchronously */
- schedule_work(&priv->reset_task);
-}
-
-static int ftgmac100_mii_probe(struct net_device *netdev)
-{
- struct ftgmac100 *priv = netdev_priv(netdev);
- struct platform_device *pdev = to_platform_device(priv->dev);
- struct device_node *np = pdev->dev.of_node;
- struct phy_device *phydev;
- phy_interface_t phy_intf;
- int err;
-
- /* Default to RGMII. It's a gigabit part after all */
- err = of_get_phy_mode(np, &phy_intf);
- if (err)
- phy_intf = PHY_INTERFACE_MODE_RGMII;
-
- /* Aspeed only supports these. I don't know about other IP
- * block vendors so I'm going to just let them through for
- * now. Note that this is only a warning if for some obscure
- * reason the DT really means to lie about it or it's a newer
- * part we don't know about.
- *
- * On the Aspeed SoC there are additionally straps and SCU
- * control bits that could tell us what the interface is
- * (or allow us to configure it while the IP block is held
- * in reset). For now I chose to keep this driver away from
- * those SoC specific bits and assume the device-tree is
- * right and the SCU has been configured properly by pinmux
- * or the firmware.
- */
- if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) {
- netdev_warn(netdev,
- "Unsupported PHY mode %s !\n",
- phy_modes(phy_intf));
- }
-
- phydev = phy_find_first(priv->mii_bus);
- if (!phydev) {
- netdev_info(netdev, "%s: no PHY found\n", netdev->name);
- return -ENODEV;
- }
-
- phydev = phy_connect(netdev, phydev_name(phydev),
- &ftgmac100_adjust_link, phy_intf);
-
- if (IS_ERR(phydev)) {
- netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
- return PTR_ERR(phydev);
- }
-
- /* Indicate that we support PAUSE frames (see comment in
- * Documentation/networking/phy.rst)
- */
- phy_support_asym_pause(phydev);
-
- /* Display what we found */
- phy_attached_info(phydev);
-
- return 0;
-}
-
static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
{
struct net_device *netdev = bus->priv;
@@ -1410,10 +1299,8 @@ static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err)
return err;
}
-static void ftgmac100_reset_task(struct work_struct *work)
+static void ftgmac100_reset(struct ftgmac100 *priv)
{
- struct ftgmac100 *priv = container_of(work, struct ftgmac100,
- reset_task);
struct net_device *netdev = priv->netdev;
int err;
@@ -1459,6 +1346,134 @@ static void ftgmac100_reset_task(struct work_struct *work)
rtnl_unlock();
}
+static void ftgmac100_reset_task(struct work_struct *work)
+{
+ struct ftgmac100 *priv = container_of(work, struct ftgmac100,
+ reset_task);
+
+ ftgmac100_reset(priv);
+}
+
+static void ftgmac100_adjust_link(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ bool tx_pause, rx_pause;
+ int new_speed;
+
+ /* We store "no link" as speed 0 */
+ if (!phydev->link)
+ new_speed = 0;
+ else
+ new_speed = phydev->speed;
+
+ /* Grab pause settings from PHY if configured to do so */
+ if (priv->aneg_pause) {
+ rx_pause = tx_pause = phydev->pause;
+ if (phydev->asym_pause)
+ tx_pause = !rx_pause;
+ } else {
+ rx_pause = priv->rx_pause;
+ tx_pause = priv->tx_pause;
+ }
+
+ /* Link hasn't changed, do nothing */
+ if (phydev->speed == priv->cur_speed &&
+ phydev->duplex == priv->cur_duplex &&
+ rx_pause == priv->rx_pause &&
+ tx_pause == priv->tx_pause)
+ return;
+
+ /* Print status if we have a link or we had one and just lost it,
+ * don't print otherwise.
+ */
+ if (new_speed || priv->cur_speed)
+ phy_print_status(phydev);
+
+ priv->cur_speed = new_speed;
+ priv->cur_duplex = phydev->duplex;
+ priv->rx_pause = rx_pause;
+ priv->tx_pause = tx_pause;
+
+ /* Link is down, do nothing else */
+ if (!new_speed)
+ return;
+
+ /* Disable all interrupts */
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+
+ /* Release phy lock to allow ftgmac100_reset to aquire it, keeping lock
+ * order consistent to prevent dead lock.
+ */
+ if (netdev->phydev)
+ mutex_unlock(&netdev->phydev->lock);
+
+ ftgmac100_reset(priv);
+
+ if (netdev->phydev)
+ mutex_lock(&netdev->phydev->lock);
+
+}
+
+static int ftgmac100_mii_probe(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct platform_device *pdev = to_platform_device(priv->dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct phy_device *phydev;
+ phy_interface_t phy_intf;
+ int err;
+
+ /* Default to RGMII. It's a gigabit part after all */
+ err = of_get_phy_mode(np, &phy_intf);
+ if (err)
+ phy_intf = PHY_INTERFACE_MODE_RGMII;
+
+ /* Aspeed only supports these. I don't know about other IP
+ * block vendors so I'm going to just let them through for
+ * now. Note that this is only a warning if for some obscure
+ * reason the DT really means to lie about it or it's a newer
+ * part we don't know about.
+ *
+ * On the Aspeed SoC there are additionally straps and SCU
+ * control bits that could tell us what the interface is
+ * (or allow us to configure it while the IP block is held
+ * in reset). For now I chose to keep this driver away from
+ * those SoC specific bits and assume the device-tree is
+ * right and the SCU has been configured properly by pinmux
+ * or the firmware.
+ */
+ if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) {
+ netdev_warn(netdev,
+ "Unsupported PHY mode %s !\n",
+ phy_modes(phy_intf));
+ }
+
+ phydev = phy_find_first(priv->mii_bus);
+ if (!phydev) {
+ netdev_info(netdev, "%s: no PHY found\n", netdev->name);
+ return -ENODEV;
+ }
+
+ phydev = phy_connect(netdev, phydev_name(phydev),
+ &ftgmac100_adjust_link, phy_intf);
+
+ if (IS_ERR(phydev)) {
+ netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
+ return PTR_ERR(phydev);
+ }
+
+ /* Indicate that we support PAUSE frames (see comment in
+ * Documentation/networking/phy.rst)
+ */
+ phy_support_asym_pause(phydev);
+
+ /* Display what we found */
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
static int ftgmac100_open(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index e985ae008a97..0f90d2d5bb60 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -4338,7 +4338,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
}
INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
-
+ mutex_init(&priv->onestep_tstamp_lock);
skb_queue_head_init(&priv->tx_skbs);
priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
@@ -4523,12 +4523,12 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
#ifdef CONFIG_DEBUG_FS
dpaa2_dbg_remove(priv);
#endif
+
+ unregister_netdev(net_dev);
rtnl_lock();
dpaa2_eth_disconnect_mac(priv);
rtnl_unlock();
- unregister_netdev(net_dev);
-
dpaa2_eth_dl_port_del(priv);
dpaa2_eth_dl_traps_unregister(priv);
dpaa2_eth_dl_free(priv);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
index d6eefbbf163f..cacd454ac696 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
@@ -532,6 +532,7 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
struct netlink_ext_ack *extack = cls->common.extack;
+ int ret = -EOPNOTSUPP;
if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
@@ -561,9 +562,10 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
}
*vlan = (u16)match.key->vlan_id;
+ ret = 0;
}
- return 0;
+ return ret;
}
static int
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 2ad7f57f7e5b..f7621ab672b9 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -301,7 +301,7 @@ static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
*/
static int gve_adminq_kick_and_wait(struct gve_priv *priv)
{
- u32 tail, head;
+ int tail, head;
int i;
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 2068199445bd..e4e98aa7745f 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -609,6 +609,7 @@ static bool gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
*packet_size_bytes = skb->len + (skb->protocol ? ETH_HLEN : 0);
*work_done = work_cnt;
+ skb_record_rx_queue(skb, rx->q_num);
if (skb_is_nonlinear(skb))
napi_gro_frags(napi);
else
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index bda7a2a9d211..b423e94956f1 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -110,6 +110,7 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *tx_scrq);
static void free_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb);
+static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -1424,7 +1425,7 @@ static int __ibmvnic_open(struct net_device *netdev)
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
if (rc) {
ibmvnic_napi_disable(adapter);
- release_resources(adapter);
+ ibmvnic_disable_irqs(adapter);
return rc;
}
@@ -1474,9 +1475,6 @@ static int ibmvnic_open(struct net_device *netdev)
rc = init_resources(adapter);
if (rc) {
netdev_err(netdev, "failed to initialize resources\n");
- release_resources(adapter);
- release_rx_pools(adapter);
- release_tx_pools(adapter);
goto out;
}
}
@@ -1493,6 +1491,13 @@ out:
adapter->state = VNIC_OPEN;
rc = 0;
}
+
+ if (rc) {
+ release_resources(adapter);
+ release_rx_pools(adapter);
+ release_tx_pools(adapter);
+ }
+
return rc;
}
@@ -2208,6 +2213,19 @@ static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
}
/*
+ * Initialize the init_done completion and return code values. We
+ * can get a transport event just after registering the CRQ and the
+ * tasklet will use this to communicate the transport event. To ensure
+ * we don't miss the notification/error, initialize these _before_
+ * regisering the CRQ.
+ */
+static inline void reinit_init_done(struct ibmvnic_adapter *adapter)
+{
+ reinit_completion(&adapter->init_done);
+ adapter->init_done_rc = 0;
+}
+
+/*
* do_reset returns zero if we are able to keep processing reset events, or
* non-zero if we hit a fatal error and must halt.
*/
@@ -2313,6 +2331,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
+ reinit_init_done(adapter);
+
if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
rc = init_crq_queue(adapter);
} else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
@@ -2456,7 +2476,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
- reinit_completion(&adapter->init_done);
+ reinit_init_done(adapter);
+
rc = init_crq_queue(adapter);
if (rc) {
netdev_err(adapter->netdev,
@@ -2597,23 +2618,82 @@ out:
static void __ibmvnic_reset(struct work_struct *work)
{
struct ibmvnic_adapter *adapter;
- bool saved_state = false;
+ unsigned int timeout = 5000;
struct ibmvnic_rwi *tmprwi;
+ bool saved_state = false;
struct ibmvnic_rwi *rwi;
unsigned long flags;
- u32 reset_state;
+ struct device *dev;
+ bool need_reset;
int num_fails = 0;
+ u32 reset_state;
int rc = 0;
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
+ dev = &adapter->vdev->dev;
- if (test_and_set_bit_lock(0, &adapter->resetting)) {
+ /* Wait for ibmvnic_probe() to complete. If probe is taking too long
+ * or if another reset is in progress, defer work for now. If probe
+ * eventually fails it will flush and terminate our work.
+ *
+ * Three possibilities here:
+ * 1. Adpater being removed - just return
+ * 2. Timed out on probe or another reset in progress - delay the work
+ * 3. Completed probe - perform any resets in queue
+ */
+ if (adapter->state == VNIC_PROBING &&
+ !wait_for_completion_timeout(&adapter->probe_done, timeout)) {
+ dev_err(dev, "Reset thread timed out on probe");
queue_delayed_work(system_long_wq,
&adapter->ibmvnic_delayed_reset,
IBMVNIC_RESET_DELAY);
return;
}
+ /* adapter is done with probe (i.e state is never VNIC_PROBING now) */
+ if (adapter->state == VNIC_REMOVING)
+ return;
+
+ /* ->rwi_list is stable now (no one else is removing entries) */
+
+ /* ibmvnic_probe() may have purged the reset queue after we were
+ * scheduled to process a reset so there maybe no resets to process.
+ * Before setting the ->resetting bit though, we have to make sure
+ * that there is infact a reset to process. Otherwise we may race
+ * with ibmvnic_open() and end up leaving the vnic down:
+ *
+ * __ibmvnic_reset() ibmvnic_open()
+ * ----------------- --------------
+ *
+ * set ->resetting bit
+ * find ->resetting bit is set
+ * set ->state to IBMVNIC_OPEN (i.e
+ * assume reset will open device)
+ * return
+ * find reset queue empty
+ * return
+ *
+ * Neither performed vnic login/open and vnic stays down
+ *
+ * If we hold the lock and conditionally set the bit, either we
+ * or ibmvnic_open() will complete the open.
+ */
+ need_reset = false;
+ spin_lock(&adapter->rwi_lock);
+ if (!list_empty(&adapter->rwi_list)) {
+ if (test_and_set_bit_lock(0, &adapter->resetting)) {
+ queue_delayed_work(system_long_wq,
+ &adapter->ibmvnic_delayed_reset,
+ IBMVNIC_RESET_DELAY);
+ } else {
+ need_reset = true;
+ }
+ }
+ spin_unlock(&adapter->rwi_lock);
+
+ if (!need_reset)
+ return;
+
rwi = get_next_rwi(adapter);
while (rwi) {
spin_lock_irqsave(&adapter->state_lock, flags);
@@ -2730,12 +2810,23 @@ static void __ibmvnic_delayed_reset(struct work_struct *work)
__ibmvnic_reset(&adapter->ibmvnic_reset);
}
+static void flush_reset_queue(struct ibmvnic_adapter *adapter)
+{
+ struct list_head *entry, *tmp_entry;
+
+ if (!list_empty(&adapter->rwi_list)) {
+ list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) {
+ list_del(entry);
+ kfree(list_entry(entry, struct ibmvnic_rwi, list));
+ }
+ }
+}
+
static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
enum ibmvnic_reset_reason reason)
{
- struct list_head *entry, *tmp_entry;
- struct ibmvnic_rwi *rwi, *tmp;
struct net_device *netdev = adapter->netdev;
+ struct ibmvnic_rwi *rwi, *tmp;
unsigned long flags;
int ret;
@@ -2754,13 +2845,6 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
goto err;
}
- if (adapter->state == VNIC_PROBING) {
- netdev_warn(netdev, "Adapter reset during probe\n");
- adapter->init_done_rc = -EAGAIN;
- ret = EAGAIN;
- goto err;
- }
-
list_for_each_entry(tmp, &adapter->rwi_list, list) {
if (tmp->reset_reason == reason) {
netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
@@ -2778,10 +2862,9 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
/* if we just received a transport event,
* flush reset queue and process this reset
*/
- if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
- list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
- list_del(entry);
- }
+ if (adapter->force_reset_recovery)
+ flush_reset_queue(adapter);
+
rwi->reset_reason = reason;
list_add_tail(&rwi->list, &adapter->rwi_list);
netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
@@ -5316,9 +5399,9 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
}
if (!completion_done(&adapter->init_done)) {
- complete(&adapter->init_done);
if (!adapter->init_done_rc)
adapter->init_done_rc = -EAGAIN;
+ complete(&adapter->init_done);
}
break;
@@ -5341,6 +5424,13 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
adapter->fw_done_rc = -EIO;
complete(&adapter->fw_done);
}
+
+ /* if we got here during crq-init, retry crq-init */
+ if (!completion_done(&adapter->init_done)) {
+ adapter->init_done_rc = -EAGAIN;
+ complete(&adapter->init_done);
+ }
+
if (!completion_done(&adapter->stats_done))
complete(&adapter->stats_done);
if (test_bit(0, &adapter->resetting))
@@ -5657,10 +5747,6 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
adapter->from_passive_init = false;
- if (reset)
- reinit_completion(&adapter->init_done);
-
- adapter->init_done_rc = 0;
rc = ibmvnic_send_crq_init(adapter);
if (rc) {
dev_err(dev, "Send crq init failed with error %d\n", rc);
@@ -5674,12 +5760,14 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
if (adapter->init_done_rc) {
release_crq_queue(adapter);
+ dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc);
return adapter->init_done_rc;
}
if (adapter->from_passive_init) {
adapter->state = VNIC_OPEN;
adapter->from_passive_init = false;
+ dev_err(dev, "CRQ-init failed, passive-init\n");
return -EINVAL;
}
@@ -5719,6 +5807,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
struct ibmvnic_adapter *adapter;
struct net_device *netdev;
unsigned char *mac_addr_p;
+ unsigned long flags;
bool init_success;
int rc;
@@ -5763,6 +5852,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
spin_lock_init(&adapter->rwi_lock);
spin_lock_init(&adapter->state_lock);
mutex_init(&adapter->fw_lock);
+ init_completion(&adapter->probe_done);
init_completion(&adapter->init_done);
init_completion(&adapter->fw_done);
init_completion(&adapter->reset_done);
@@ -5773,6 +5863,33 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
init_success = false;
do {
+ reinit_init_done(adapter);
+
+ /* clear any failovers we got in the previous pass
+ * since we are reinitializing the CRQ
+ */
+ adapter->failover_pending = false;
+
+ /* If we had already initialized CRQ, we may have one or
+ * more resets queued already. Discard those and release
+ * the CRQ before initializing the CRQ again.
+ */
+ release_crq_queue(adapter);
+
+ /* Since we are still in PROBING state, __ibmvnic_reset()
+ * will not access the ->rwi_list and since we released CRQ,
+ * we won't get _new_ transport events. But there maybe an
+ * ongoing ibmvnic_reset() call. So serialize access to
+ * rwi_list. If we win the race, ibvmnic_reset() could add
+ * a reset after we purged but thats ok - we just may end
+ * up with an extra reset (i.e similar to having two or more
+ * resets in the queue at once).
+ * CHECK.
+ */
+ spin_lock_irqsave(&adapter->rwi_lock, flags);
+ flush_reset_queue(adapter);
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
+
rc = init_crq_queue(adapter);
if (rc) {
dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
@@ -5804,12 +5921,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
goto ibmvnic_dev_file_err;
netif_carrier_off(netdev);
- rc = register_netdev(netdev);
- if (rc) {
- dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
- goto ibmvnic_register_fail;
- }
- dev_info(&dev->dev, "ibmvnic registered\n");
if (init_success) {
adapter->state = VNIC_PROBED;
@@ -5822,6 +5933,16 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->wait_for_reset = false;
adapter->last_reset_time = jiffies;
+
+ rc = register_netdev(netdev);
+ if (rc) {
+ dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
+ goto ibmvnic_register_fail;
+ }
+ dev_info(&dev->dev, "ibmvnic registered\n");
+
+ complete(&adapter->probe_done);
+
return 0;
ibmvnic_register_fail:
@@ -5836,6 +5957,17 @@ ibmvnic_stats_fail:
ibmvnic_init_fail:
release_sub_crqs(adapter, 1);
release_crq_queue(adapter);
+
+ /* cleanup worker thread after releasing CRQ so we don't get
+ * transport events (i.e new work items for the worker thread).
+ */
+ adapter->state = VNIC_REMOVING;
+ complete(&adapter->probe_done);
+ flush_work(&adapter->ibmvnic_reset);
+ flush_delayed_work(&adapter->ibmvnic_delayed_reset);
+
+ flush_reset_queue(adapter);
+
mutex_destroy(&adapter->fw_lock);
free_netdev(netdev);
@@ -5912,10 +6044,14 @@ static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
be64_to_cpu(session_token));
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
H_SESSION_ERR_DETECTED, session_token, 0, 0);
- if (rc)
+ if (rc) {
netdev_err(netdev,
"H_VIOCTL initiated failover failed, rc %ld\n",
rc);
+ goto last_resort;
+ }
+
+ return count;
last_resort:
netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 4a7a56ff74ce..fa2d607a7b1b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -930,6 +930,7 @@ struct ibmvnic_adapter {
struct ibmvnic_tx_pool *tx_pool;
struct ibmvnic_tx_pool *tso_pool;
+ struct completion probe_done;
struct completion init_done;
int init_done_rc;
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index c3def0ee7788..8d06c9d8ff8b 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -115,7 +115,8 @@ enum e1000_boards {
board_pch_lpt,
board_pch_spt,
board_pch_cnp,
- board_pch_tgp
+ board_pch_tgp,
+ board_pch_adp
};
struct e1000_ps_page {
@@ -502,6 +503,7 @@ extern const struct e1000_info e1000_pch_lpt_info;
extern const struct e1000_info e1000_pch_spt_info;
extern const struct e1000_info e1000_pch_cnp_info;
extern const struct e1000_info e1000_pch_tgp_info;
+extern const struct e1000_info e1000_pch_adp_info;
extern const struct e1000_info e1000_es2_info;
void e1000e_ptp_init(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index bcf680e83811..13382df2f2ef 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -630,6 +630,7 @@ struct e1000_phy_info {
bool disable_polarity_correction;
bool is_mdix;
bool polarity_correction;
+ bool reset_disable;
bool speed_downgraded;
bool autoneg_wait_to_complete;
};
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 5e4fc9b4e2ad..d60e2016d03c 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -2050,6 +2050,10 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
bool blocked = false;
int i = 0;
+ /* Check the PHY (LCD) reset flag */
+ if (hw->phy.reset_disable)
+ return true;
+
while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
(i++ < 30))
usleep_range(10000, 11000);
@@ -4136,9 +4140,9 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
return ret_val;
if (!(data & valid_csum_mask)) {
- e_dbg("NVM Checksum Invalid\n");
+ e_dbg("NVM Checksum valid bit not set\n");
- if (hw->mac.type < e1000_pch_cnp) {
+ if (hw->mac.type < e1000_pch_tgp) {
data |= valid_csum_mask;
ret_val = e1000_write_nvm(hw, word, 1, &data);
if (ret_val)
@@ -6021,3 +6025,23 @@ const struct e1000_info e1000_pch_tgp_info = {
.phy_ops = &ich8_phy_ops,
.nvm_ops = &spt_nvm_ops,
};
+
+const struct e1000_info e1000_pch_adp_info = {
+ .mac = e1000_pch_adp,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS
+ | FLAG2_HAS_EEE,
+ .pba = 26,
+ .max_hw_frame_size = 9022,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &spt_nvm_ops,
+};
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 2504b11c3169..638a3ddd7ada 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -271,6 +271,7 @@
#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
#define I217_MEMPWR PHY_REG(772, 26)
#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
+#define I217_MEMPWR_MOEM 0x1000
/* Receive Address Initial CRC Calculation */
#define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4))
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 635a95927e93..c5bdef3ffe26 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -52,6 +52,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_pch_spt] = &e1000_pch_spt_info,
[board_pch_cnp] = &e1000_pch_cnp_info,
[board_pch_tgp] = &e1000_pch_tgp_info,
+ [board_pch_adp] = &e1000_pch_adp_info,
};
struct e1000_reg_info {
@@ -6341,7 +6342,8 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
u32 mac_data;
u16 phy_data;
- if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
+ hw->mac.type >= e1000_pch_adp) {
/* Request ME configure the device for S0ix */
mac_data = er32(H2ME);
mac_data |= E1000_H2ME_START_DPG;
@@ -6490,7 +6492,8 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
u16 phy_data;
u32 i = 0;
- if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
+ hw->mac.type >= e1000_pch_adp) {
/* Request ME unconfigure the device from S0ix */
mac_data = er32(H2ME);
mac_data &= ~E1000_H2ME_START_DPG;
@@ -6984,8 +6987,21 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 phy_data;
int rc;
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
+ hw->mac.type >= e1000_pch_adp) {
+ /* Mask OEM Bits / Gig Disable / Restart AN (772_26[12] = 1) */
+ e1e_rphy(hw, I217_MEMPWR, &phy_data);
+ phy_data |= I217_MEMPWR_MOEM;
+ e1e_wphy(hw, I217_MEMPWR, phy_data);
+
+ /* Disable LCD reset */
+ hw->phy.reset_disable = true;
+ }
+
e1000e_flush_lpic(pdev);
e1000e_pm_freeze(dev);
@@ -7007,6 +7023,8 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 phy_data;
int rc;
/* Introduce S0ix implementation */
@@ -7017,6 +7035,17 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
if (rc)
return rc;
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
+ hw->mac.type >= e1000_pch_adp) {
+ /* Unmask OEM Bits / Gig Disable / Restart AN 772_26[12] = 0 */
+ e1e_rphy(hw, I217_MEMPWR, &phy_data);
+ phy_data &= ~I217_MEMPWR_MOEM;
+ e1e_wphy(hw, I217_MEMPWR, phy_data);
+
+ /* Enable LCD reset */
+ hw->phy.reset_disable = false;
+ }
+
return e1000e_pm_thaw(dev);
}
@@ -7898,22 +7927,22 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_adp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 2e02cc68cd3f..80c5cecaf2b5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -144,6 +144,7 @@ enum i40e_state_t {
__I40E_VIRTCHNL_OP_PENDING,
__I40E_RECOVERY_MODE,
__I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */
+ __I40E_IN_REMOVE,
__I40E_VFS_RELEASING,
/* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f70c478dafdb..31b03fe78d3b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -10853,6 +10853,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
bool lock_acquired)
{
int ret;
+
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return;
/* Now we wait for GRST to settle out.
* We don't have to delete the VEBs or VSIs from the hw switch
* because the reset will make them disappear.
@@ -12212,6 +12215,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
vsi->req_queue_pairs = queue_count;
i40e_prep_for_reset(pf);
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return pf->alloc_rss_size;
pf->alloc_rss_size = new_rss_size;
@@ -13038,6 +13043,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
if (need_reset)
i40e_prep_for_reset(pf);
+ /* VSI shall be deleted in a moment, just return EINVAL */
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return -EINVAL;
+
old_prog = xchg(&vsi->xdp_prog, prog);
if (need_reset) {
@@ -15928,8 +15937,13 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
- while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ /* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE
+ * flags, once they are set, i40e_rebuild should not be called as
+ * i40e_prep_for_reset always returns early.
+ */
+ while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
usleep_range(1000, 2000);
+ set_bit(__I40E_IN_REMOVE, pf->state);
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
@@ -16128,6 +16142,9 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return;
+
i40e_reset_and_rebuild(pf, false, false);
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 59806d1f7e79..89423947ee65 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -201,6 +201,10 @@ enum iavf_state_t {
__IAVF_RUNNING, /* opened, working */
};
+enum iavf_critical_section_t {
+ __IAVF_IN_REMOVE_TASK, /* device being removed */
+};
+
#define IAVF_CLOUD_FIELD_OMAC 0x01
#define IAVF_CLOUD_FIELD_IMAC 0x02
#define IAVF_CLOUD_FIELD_IVLAN 0x04
@@ -246,7 +250,6 @@ struct iavf_adapter {
struct list_head mac_filter_list;
struct mutex crit_lock;
struct mutex client_lock;
- struct mutex remove_lock;
/* Lock to protect accesses to MAC and VLAN lists */
spinlock_t mac_vlan_list_lock;
char misc_vector_name[IFNAMSIZ + 9];
@@ -284,6 +287,7 @@ struct iavf_adapter {
#define IAVF_FLAG_LEGACY_RX BIT(15)
#define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16)
#define IAVF_FLAG_QUEUES_DISABLED BIT(17)
+#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18)
/* duplicates for common code */
#define IAVF_FLAG_DCB_ENABLED 0
/* flags for admin queue service task */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 8125b9120615..dcf24264c7ea 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -302,8 +302,9 @@ static irqreturn_t iavf_msix_aq(int irq, void *data)
rd32(hw, IAVF_VFINT_ICR01);
rd32(hw, IAVF_VFINT_ICR0_ENA1);
- /* schedule work on the private workqueue */
- queue_work(iavf_wq, &adapter->adminq_task);
+ if (adapter->state != __IAVF_REMOVE)
+ /* schedule work on the private workqueue */
+ queue_work(iavf_wq, &adapter->adminq_task);
return IRQ_HANDLED;
}
@@ -1136,8 +1137,7 @@ void iavf_down(struct iavf_adapter *adapter)
rss->state = IAVF_ADV_RSS_DEL_REQUEST;
spin_unlock_bh(&adapter->adv_rss_lock);
- if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
- adapter->state != __IAVF_RESETTING) {
+ if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
/* cancel any current operation */
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
/* Schedule operations to close down the HW. Don't wait
@@ -2374,17 +2374,22 @@ static void iavf_watchdog_task(struct work_struct *work)
struct iavf_hw *hw = &adapter->hw;
u32 reg_val;
- if (!mutex_trylock(&adapter->crit_lock))
+ if (!mutex_trylock(&adapter->crit_lock)) {
+ if (adapter->state == __IAVF_REMOVE)
+ return;
+
goto restart_watchdog;
+ }
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
iavf_change_state(adapter, __IAVF_COMM_FAILED);
- if (adapter->flags & IAVF_FLAG_RESET_NEEDED &&
- adapter->state != __IAVF_RESETTING) {
- iavf_change_state(adapter, __IAVF_RESETTING);
+ if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ mutex_unlock(&adapter->crit_lock);
+ queue_work(iavf_wq, &adapter->reset_task);
+ return;
}
switch (adapter->state) {
@@ -2419,6 +2424,15 @@ static void iavf_watchdog_task(struct work_struct *work)
msecs_to_jiffies(1));
return;
case __IAVF_INIT_FAILED:
+ if (test_bit(__IAVF_IN_REMOVE_TASK,
+ &adapter->crit_section)) {
+ /* Do not update the state and do not reschedule
+ * watchdog task, iavf_remove should handle this state
+ * as it can loop forever
+ */
+ mutex_unlock(&adapter->crit_lock);
+ return;
+ }
if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
dev_err(&adapter->pdev->dev,
"Failed to communicate with PF; waiting before retry\n");
@@ -2435,6 +2449,17 @@ static void iavf_watchdog_task(struct work_struct *work)
queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
return;
case __IAVF_COMM_FAILED:
+ if (test_bit(__IAVF_IN_REMOVE_TASK,
+ &adapter->crit_section)) {
+ /* Set state to __IAVF_INIT_FAILED and perform remove
+ * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task
+ * doesn't bring the state back to __IAVF_COMM_FAILED.
+ */
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+ adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
+ mutex_unlock(&adapter->crit_lock);
+ return;
+ }
reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
@@ -2507,7 +2532,8 @@ static void iavf_watchdog_task(struct work_struct *work)
schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
mutex_unlock(&adapter->crit_lock);
restart_watchdog:
- queue_work(iavf_wq, &adapter->adminq_task);
+ if (adapter->state >= __IAVF_DOWN)
+ queue_work(iavf_wq, &adapter->adminq_task);
if (adapter->aq_required)
queue_delayed_work(iavf_wq, &adapter->watchdog_task,
msecs_to_jiffies(20));
@@ -2601,13 +2627,13 @@ static void iavf_reset_task(struct work_struct *work)
/* When device is being removed it doesn't make sense to run the reset
* task, just return in such a case.
*/
- if (mutex_is_locked(&adapter->remove_lock))
- return;
+ if (!mutex_trylock(&adapter->crit_lock)) {
+ if (adapter->state != __IAVF_REMOVE)
+ queue_work(iavf_wq, &adapter->reset_task);
- if (iavf_lock_timeout(&adapter->crit_lock, 200)) {
- schedule_work(&adapter->reset_task);
return;
}
+
while (!mutex_trylock(&adapter->client_lock))
usleep_range(500, 1000);
if (CLIENT_ENABLED(adapter)) {
@@ -2662,6 +2688,7 @@ static void iavf_reset_task(struct work_struct *work)
reg_val);
iavf_disable_vf(adapter);
mutex_unlock(&adapter->client_lock);
+ mutex_unlock(&adapter->crit_lock);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@@ -2670,8 +2697,7 @@ continue_reset:
* ndo_open() returning, so we can't assume it means all our open
* tasks have finished, since we're not holding the rtnl_lock here.
*/
- running = ((adapter->state == __IAVF_RUNNING) ||
- (adapter->state == __IAVF_RESETTING));
+ running = adapter->state == __IAVF_RUNNING;
if (running) {
netdev->flags &= ~IFF_UP;
@@ -2826,13 +2852,19 @@ static void iavf_adminq_task(struct work_struct *work)
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
goto out;
+ if (!mutex_trylock(&adapter->crit_lock)) {
+ if (adapter->state == __IAVF_REMOVE)
+ return;
+
+ queue_work(iavf_wq, &adapter->adminq_task);
+ goto out;
+ }
+
event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf)
goto out;
- if (iavf_lock_timeout(&adapter->crit_lock, 200))
- goto freedom;
do {
ret = iavf_clean_arq_element(hw, &event, &pending);
v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
@@ -2848,6 +2880,24 @@ static void iavf_adminq_task(struct work_struct *work)
} while (pending);
mutex_unlock(&adapter->crit_lock);
+ if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) {
+ if (adapter->netdev_registered ||
+ !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
+ struct net_device *netdev = adapter->netdev;
+
+ rtnl_lock();
+ netdev_update_features(netdev);
+ rtnl_unlock();
+ /* Request VLAN offload settings */
+ if (VLAN_V2_ALLOWED(adapter))
+ iavf_set_vlan_offload_features
+ (adapter, 0, netdev->features);
+
+ iavf_set_queue_vlan_tag_loc(adapter);
+ }
+
+ adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
+ }
if ((adapter->flags &
(IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
adapter->state == __IAVF_RESETTING)
@@ -3800,11 +3850,12 @@ static int iavf_close(struct net_device *netdev)
struct iavf_adapter *adapter = netdev_priv(netdev);
int status;
- if (adapter->state <= __IAVF_DOWN_PENDING)
- return 0;
+ mutex_lock(&adapter->crit_lock);
- while (!mutex_trylock(&adapter->crit_lock))
- usleep_range(500, 1000);
+ if (adapter->state <= __IAVF_DOWN_PENDING) {
+ mutex_unlock(&adapter->crit_lock);
+ return 0;
+ }
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
if (CLIENT_ENABLED(adapter))
@@ -3853,8 +3904,11 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
iavf_notify_client_l2_params(&adapter->vsi);
adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
}
- adapter->flags |= IAVF_FLAG_RESET_NEEDED;
- queue_work(iavf_wq, &adapter->reset_task);
+
+ if (netif_running(netdev)) {
+ adapter->flags |= IAVF_FLAG_RESET_NEEDED;
+ queue_work(iavf_wq, &adapter->reset_task);
+ }
return 0;
}
@@ -4431,7 +4485,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
mutex_init(&adapter->crit_lock);
mutex_init(&adapter->client_lock);
- mutex_init(&adapter->remove_lock);
mutex_init(&hw->aq.asq_mutex);
mutex_init(&hw->aq.arq_mutex);
@@ -4547,7 +4600,6 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
static void iavf_remove(struct pci_dev *pdev)
{
struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
- enum iavf_state_t prev_state = adapter->last_state;
struct net_device *netdev = adapter->netdev;
struct iavf_fdir_fltr *fdir, *fdirtmp;
struct iavf_vlan_filter *vlf, *vlftmp;
@@ -4556,14 +4608,30 @@ static void iavf_remove(struct pci_dev *pdev)
struct iavf_cloud_filter *cf, *cftmp;
struct iavf_hw *hw = &adapter->hw;
int err;
- /* Indicate we are in remove and not to run reset_task */
- mutex_lock(&adapter->remove_lock);
- cancel_work_sync(&adapter->reset_task);
+
+ set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
+ /* Wait until port initialization is complete.
+ * There are flows where register/unregister netdev may race.
+ */
+ while (1) {
+ mutex_lock(&adapter->crit_lock);
+ if (adapter->state == __IAVF_RUNNING ||
+ adapter->state == __IAVF_DOWN ||
+ adapter->state == __IAVF_INIT_FAILED) {
+ mutex_unlock(&adapter->crit_lock);
+ break;
+ }
+
+ mutex_unlock(&adapter->crit_lock);
+ usleep_range(500, 1000);
+ }
cancel_delayed_work_sync(&adapter->watchdog_task);
- cancel_delayed_work_sync(&adapter->client_task);
+
if (adapter->netdev_registered) {
- unregister_netdev(netdev);
+ rtnl_lock();
+ unregister_netdevice(netdev);
adapter->netdev_registered = false;
+ rtnl_unlock();
}
if (CLIENT_ALLOWED(adapter)) {
err = iavf_lan_del_device(adapter);
@@ -4572,6 +4640,10 @@ static void iavf_remove(struct pci_dev *pdev)
err);
}
+ mutex_lock(&adapter->crit_lock);
+ dev_info(&adapter->pdev->dev, "Remove device\n");
+ iavf_change_state(adapter, __IAVF_REMOVE);
+
iavf_request_reset(adapter);
msleep(50);
/* If the FW isn't responding, kick it once, but only once. */
@@ -4579,37 +4651,24 @@ static void iavf_remove(struct pci_dev *pdev)
iavf_request_reset(adapter);
msleep(50);
}
- if (iavf_lock_timeout(&adapter->crit_lock, 5000))
- dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
- dev_info(&adapter->pdev->dev, "Removing device\n");
+ iavf_misc_irq_disable(adapter);
/* Shut down all the garbage mashers on the detention level */
- iavf_change_state(adapter, __IAVF_REMOVE);
+ cancel_work_sync(&adapter->reset_task);
+ cancel_delayed_work_sync(&adapter->watchdog_task);
+ cancel_work_sync(&adapter->adminq_task);
+ cancel_delayed_work_sync(&adapter->client_task);
+
adapter->aq_required = 0;
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
iavf_free_all_tx_resources(adapter);
iavf_free_all_rx_resources(adapter);
- iavf_misc_irq_disable(adapter);
iavf_free_misc_irq(adapter);
- /* In case we enter iavf_remove from erroneous state, free traffic irqs
- * here, so as to not cause a kernel crash, when calling
- * iavf_reset_interrupt_capability.
- */
- if ((adapter->last_state == __IAVF_RESETTING &&
- prev_state != __IAVF_DOWN) ||
- (adapter->last_state == __IAVF_RUNNING &&
- !(netdev->flags & IFF_UP)))
- iavf_free_traffic_irqs(adapter);
-
iavf_reset_interrupt_capability(adapter);
iavf_free_q_vectors(adapter);
- cancel_delayed_work_sync(&adapter->watchdog_task);
-
- cancel_work_sync(&adapter->adminq_task);
-
iavf_free_rss(adapter);
if (hw->aq.asq.count)
@@ -4621,8 +4680,6 @@ static void iavf_remove(struct pci_dev *pdev)
mutex_destroy(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
mutex_destroy(&adapter->crit_lock);
- mutex_unlock(&adapter->remove_lock);
- mutex_destroy(&adapter->remove_lock);
iounmap(hw->hw_addr);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 5ee1d118fd30..88844d68e150 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -2146,29 +2146,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
sizeof(adapter->vlan_v2_caps)));
iavf_process_config(adapter);
-
- /* unlock crit_lock before acquiring rtnl_lock as other
- * processes holding rtnl_lock could be waiting for the same
- * crit_lock
- */
- mutex_unlock(&adapter->crit_lock);
- /* VLAN capabilities can change during VFR, so make sure to
- * update the netdev features with the new capabilities
- */
- rtnl_lock();
- netdev_update_features(netdev);
- rtnl_unlock();
- if (iavf_lock_timeout(&adapter->crit_lock, 10000))
- dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n",
- __FUNCTION__);
-
- /* Request VLAN offload settings */
- if (VLAN_V2_ALLOWED(adapter))
- iavf_set_vlan_offload_features(adapter, 0,
- netdev->features);
-
- iavf_set_queue_vlan_tag_loc(adapter);
-
+ adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
}
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 4e16d185077d..473b1f6be9de 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -280,7 +280,6 @@ enum ice_pf_state {
ICE_VFLR_EVENT_PENDING,
ICE_FLTR_OVERFLOW_PROMISC,
ICE_VF_DIS,
- ICE_VF_DEINIT_IN_PROGRESS,
ICE_CFG_BUSY,
ICE_SERVICE_SCHED,
ICE_SERVICE_DIS,
@@ -483,6 +482,7 @@ enum ice_pf_flags {
ICE_FLAG_VF_TRUE_PROMISC_ENA,
ICE_FLAG_MDD_AUTO_RESET_VF,
ICE_FLAG_LINK_LENIENT_MODE_ENA,
+ ICE_FLAG_PLUG_AUX_DEV,
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -887,7 +887,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
set_bit(ICE_FLAG_AUX_ENA, pf->flags);
- ice_plug_aux_dev(pf);
+ set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 408d15a5b0e3..e2af99a763ed 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -3340,9 +3340,10 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
!ice_fw_supports_report_dflt_cfg(hw)) {
- struct ice_link_default_override_tlv tlv;
+ struct ice_link_default_override_tlv tlv = { 0 };
- if (ice_get_link_default_override(&tlv, pi))
+ status = ice_get_link_default_override(&tlv, pi);
+ if (status)
goto out;
if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 864692b157b6..73edc24d81d5 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -44,6 +44,7 @@ ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac)
ctrl_vsi->rxq_map[vf->vf_id];
rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
rule_info.flags_info.act_valid = true;
+ rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
vf->repr->mac_rule);
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index e375ac849aec..4f954db01b92 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -204,11 +204,7 @@ ice_lag_unlink(struct ice_lag *lag,
lag->upper_netdev = NULL;
}
- if (lag->peer_netdev) {
- dev_put(lag->peer_netdev);
- lag->peer_netdev = NULL;
- }
-
+ lag->peer_netdev = NULL;
ice_set_sriov_cap(pf);
ice_set_rdma_cap(pf);
lag->bonded = false;
@@ -216,6 +212,32 @@ ice_lag_unlink(struct ice_lag *lag,
}
/**
+ * ice_lag_unregister - handle netdev unregister events
+ * @lag: LAG info struct
+ * @netdev: netdev reporting the event
+ */
+static void ice_lag_unregister(struct ice_lag *lag, struct net_device *netdev)
+{
+ struct ice_pf *pf = lag->pf;
+
+ /* check to see if this event is for this netdev
+ * check that we are in an aggregate
+ */
+ if (netdev != lag->netdev || !lag->bonded)
+ return;
+
+ if (lag->upper_netdev) {
+ dev_put(lag->upper_netdev);
+ lag->upper_netdev = NULL;
+ ice_set_sriov_cap(pf);
+ ice_set_rdma_cap(pf);
+ }
+ /* perform some cleanup in case we come back */
+ lag->bonded = false;
+ lag->role = ICE_LAG_NONE;
+}
+
+/**
* ice_lag_changeupper_event - handle LAG changeupper event
* @lag: LAG info struct
* @ptr: opaque pointer data
@@ -307,7 +329,7 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
ice_lag_info_event(lag, ptr);
break;
case NETDEV_UNREGISTER:
- ice_lag_unlink(lag, ptr);
+ ice_lag_unregister(lag, netdev);
break;
default:
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index d981dc6f2323..85a612838a89 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -568,6 +568,7 @@ struct ice_tx_ctx_desc {
(0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S)
#define ICE_TXD_CTX_QW1_MSS_S 50
+#define ICE_TXD_CTX_MIN_MSS 64
#define ICE_TXD_CTX_QW1_VSI_S 50
#define ICE_TXD_CTX_QW1_VSI_M (0x3FFULL << ICE_TXD_CTX_QW1_VSI_S)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 0c187cf04fcf..53256aca27c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1684,6 +1684,12 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
vsi_num, status);
+
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI,
+ ICE_FLOW_SEG_HDR_ESP);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n",
+ vsi_num, status);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 30814435f779..f3c346e13b7a 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1799,7 +1799,9 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
* reset, so print the event prior to reset.
*/
ice_print_vf_rx_mdd_event(vf);
+ mutex_lock(&pf->vf[i].cfg_lock);
ice_reset_vf(&pf->vf[i], false);
+ mutex_unlock(&pf->vf[i].cfg_lock);
}
}
}
@@ -2253,6 +2255,9 @@ static void ice_service_task(struct work_struct *work)
return;
}
+ if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+ ice_plug_aux_dev(pf);
+
ice_clean_adminq_subtask(pf);
ice_check_media_subtask(pf);
ice_check_for_hang_subtask(pf);
@@ -8525,6 +8530,7 @@ ice_features_check(struct sk_buff *skb,
struct net_device __always_unused *netdev,
netdev_features_t features)
{
+ bool gso = skb_is_gso(skb);
size_t len;
/* No point in doing any of this if neither checksum nor GSO are
@@ -8537,24 +8543,32 @@ ice_features_check(struct sk_buff *skb,
/* We cannot support GSO if the MSS is going to be less than
* 64 bytes. If it is then we need to drop support for GSO.
*/
- if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
+ if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
features &= ~NETIF_F_GSO_MASK;
- len = skb_network_header(skb) - skb->data;
+ len = skb_network_offset(skb);
if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
goto out_rm_features;
- len = skb_transport_header(skb) - skb_network_header(skb);
+ len = skb_network_header_len(skb);
if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
if (skb->encapsulation) {
- len = skb_inner_network_header(skb) - skb_transport_header(skb);
- if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
- goto out_rm_features;
+ /* this must work for VXLAN frames AND IPIP/SIT frames, and in
+ * the case of IPIP frames, the transport header pointer is
+ * after the inner header! So check to make sure that this
+ * is a GRE or UDP_TUNNEL frame before doing that math.
+ */
+ if (gso && (skb_shinfo(skb)->gso_type &
+ (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
+ len = skb_inner_network_header(skb) -
+ skb_transport_header(skb);
+ if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
+ goto out_rm_features;
+ }
- len = skb_inner_transport_header(skb) -
- skb_inner_network_header(skb);
+ len = skb_inner_network_header_len(skb);
if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index dc1b0e9e6df5..695b6dd61dc2 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -47,6 +47,7 @@ enum ice_protocol_type {
enum ice_sw_tunnel_type {
ICE_NON_TUN = 0,
+ ICE_SW_TUN_AND_NON_TUN,
ICE_SW_TUN_VXLAN,
ICE_SW_TUN_GENEVE,
ICE_SW_TUN_NVGRE,
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index ae291d442539..000c39d163a2 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -1533,9 +1533,12 @@ exit:
static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
{
struct timespec64 now, then;
+ int ret;
then = ns_to_timespec64(delta);
- ice_ptp_gettimex64(info, &now, NULL);
+ ret = ice_ptp_gettimex64(info, &now, NULL);
+ if (ret)
+ return ret;
now = timespec64_add(now, then);
return ice_ptp_settime64(info, (const struct timespec64 *)&now);
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 11ae0bee3590..475ec2afa210 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -4537,6 +4537,7 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
case ICE_SW_TUN_NVGRE:
prof_type = ICE_PROF_TUN_GRE;
break;
+ case ICE_SW_TUN_AND_NON_TUN:
default:
prof_type = ICE_PROF_ALL;
break;
@@ -5305,7 +5306,8 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
if (status)
goto err_ice_add_adv_rule;
- if (rinfo->tun_type != ICE_NON_TUN) {
+ if (rinfo->tun_type != ICE_NON_TUN &&
+ rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
s_rule->pdata.lkup_tx_rx.hdr,
pkt_offsets);
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index e8aab664270a..65cf32eb4046 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -709,7 +709,7 @@ ice_tc_set_port(struct flow_match_ports match,
fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT;
else
fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
- fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
+
headers->l4_key.dst_port = match.key->dst;
headers->l4_mask.dst_port = match.mask->dst;
}
@@ -718,7 +718,7 @@ ice_tc_set_port(struct flow_match_ports match,
fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT;
else
fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
- fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
+
headers->l4_key.src_port = match.key->src;
headers->l4_mask.src_port = match.mask->src;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 39b80124d282..408f78e3eb13 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -500,8 +500,6 @@ void ice_free_vfs(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
unsigned int tmp, i;
- set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
-
if (!pf->vf)
return;
@@ -519,22 +517,26 @@ void ice_free_vfs(struct ice_pf *pf)
else
dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
- /* Avoid wait time by stopping all VFs at the same time */
- ice_for_each_vf(pf, i)
- ice_dis_vf_qs(&pf->vf[i]);
-
tmp = pf->num_alloc_vfs;
pf->num_qps_per_vf = 0;
pf->num_alloc_vfs = 0;
for (i = 0; i < tmp; i++) {
- if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ mutex_lock(&vf->cfg_lock);
+
+ ice_dis_vf_qs(vf);
+
+ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
/* disable VF qp mappings and set VF disable state */
- ice_dis_vf_mappings(&pf->vf[i]);
- set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
- ice_free_vf_res(&pf->vf[i]);
+ ice_dis_vf_mappings(vf);
+ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ ice_free_vf_res(vf);
}
- mutex_destroy(&pf->vf[i].cfg_lock);
+ mutex_unlock(&vf->cfg_lock);
+
+ mutex_destroy(&vf->cfg_lock);
}
if (ice_sriov_free_msix_res(pf))
@@ -570,7 +572,6 @@ void ice_free_vfs(struct ice_pf *pf)
i);
clear_bit(ICE_VF_DIS, pf->state);
- clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
}
@@ -1498,6 +1499,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
ice_for_each_vf(pf, v) {
vf = &pf->vf[v];
+ mutex_lock(&vf->cfg_lock);
+
vf->driver_caps = 0;
ice_vc_set_default_allowlist(vf);
@@ -1512,6 +1515,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
ice_vf_pre_vsi_rebuild(vf);
ice_vf_rebuild_vsi(vf);
ice_vf_post_vsi_rebuild(vf);
+
+ mutex_unlock(&vf->cfg_lock);
}
if (ice_is_eswitch_mode_switchdev(pf))
@@ -1562,6 +1567,8 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
u32 reg;
int i;
+ lockdep_assert_held(&vf->cfg_lock);
+
dev = ice_pf_to_dev(pf);
if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
@@ -2061,9 +2068,12 @@ void ice_process_vflr_event(struct ice_pf *pf)
bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
/* read GLGEN_VFLRSTAT register to find out the flr VFs */
reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
- if (reg & BIT(bit_idx))
+ if (reg & BIT(bit_idx)) {
/* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
+ mutex_lock(&vf->cfg_lock);
ice_reset_vf(vf, true);
+ mutex_unlock(&vf->cfg_lock);
+ }
}
}
@@ -2140,7 +2150,9 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
if (!vf)
return;
+ mutex_lock(&vf->cfg_lock);
ice_vc_reset_vf(vf);
+ mutex_unlock(&vf->cfg_lock);
}
/**
@@ -4625,10 +4637,6 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
struct device *dev;
int err = 0;
- /* if de-init is underway, don't process messages from VF */
- if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state))
- return;
-
dev = ice_pf_to_dev(pf);
if (ice_validate_vf_id(pf, vf_id)) {
err = -EINVAL;
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 5cad31c3c7b0..40dbf4b43234 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -746,8 +746,6 @@ s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
if (ret_val)
return ret_val;
ret_val = igc_write_phy_reg_mdic(hw, offset, data);
- if (ret_val)
- return ret_val;
hw->phy.ops.release(hw);
} else {
ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr,
@@ -779,8 +777,6 @@ s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data)
if (ret_val)
return ret_val;
ret_val = igc_read_phy_reg_mdic(hw, offset, data);
- if (ret_val)
- return ret_val;
hw->phy.ops.release(hw);
} else {
ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index b3fd8e5cd85b..6a5e9cf6b5da 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -390,12 +390,14 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
u32 cmd_type;
while (budget-- > 0) {
- if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
- !netif_carrier_ok(xdp_ring->netdev)) {
+ if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
work_done = false;
break;
}
+ if (!netif_carrier_ok(xdp_ring->netdev))
+ break;
+
if (!xsk_tx_peek_desc(pool, &desc))
break;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 0015fcf1df2b..0f293acd17e8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1984,14 +1984,15 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
return;
- set_ring_build_skb_enabled(rx_ring);
+ if (PAGE_SIZE < 8192)
+ if (max_frame > IXGBEVF_MAX_FRAME_BUILD_SKB)
+ set_ring_uses_large_buffer(rx_ring);
- if (PAGE_SIZE < 8192) {
- if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
- return;
+ /* 82599 can't rely on RXDCTL.RLPML to restrict the size of the frame */
+ if (adapter->hw.mac.type == ixgbe_mac_82599_vf && !ring_uses_large_buffer(rx_ring))
+ return;
- set_ring_uses_large_buffer(rx_ring);
- }
+ set_ring_build_skb_enabled(rx_ring);
}
/**
diff --git a/drivers/net/ethernet/litex/Kconfig b/drivers/net/ethernet/litex/Kconfig
index f99adbf26ab4..04345b929d8e 100644
--- a/drivers/net/ethernet/litex/Kconfig
+++ b/drivers/net/ethernet/litex/Kconfig
@@ -17,7 +17,7 @@ if NET_VENDOR_LITEX
config LITEX_LITEETH
tristate "LiteX Ethernet support"
- depends on OF
+ depends on OF && HAS_IOMEM
help
If you wish to compile a kernel for hardware with a LiteX LiteEth
device then you should answer Y to this.
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 105247582684..143ca8be5eb5 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2704,6 +2704,16 @@ MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
static struct platform_device *port_platdev[3];
+static void mv643xx_eth_shared_of_remove(void)
+{
+ int n;
+
+ for (n = 0; n < 3; n++) {
+ platform_device_del(port_platdev[n]);
+ port_platdev[n] = NULL;
+ }
+}
+
static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
struct device_node *pnp)
{
@@ -2740,7 +2750,9 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
return -EINVAL;
}
- of_get_mac_address(pnp, ppd.mac_addr);
+ ret = of_get_mac_address(pnp, ppd.mac_addr);
+ if (ret)
+ return ret;
mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
@@ -2804,21 +2816,13 @@ static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
if (ret) {
of_node_put(pnp);
+ mv643xx_eth_shared_of_remove();
return ret;
}
}
return 0;
}
-static void mv643xx_eth_shared_of_remove(void)
-{
- int n;
-
- for (n = 0; n < 3; n++) {
- platform_device_del(port_platdev[n]);
- port_platdev[n] = NULL;
- }
-}
#else
static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
{
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 7cdbf8b8bbf6..1a835b48791b 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -6870,6 +6870,9 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
dev->dev.of_node = port_node;
+ port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
+ port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
+
if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
port->phylink_config.dev = &dev->dev;
port->phylink_config.type = PHYLINK_NETDEV;
@@ -6940,9 +6943,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->phylink_config.supported_interfaces);
}
- port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
- port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
-
phylink = phylink_create(&port->phylink_config, port_fwnode,
phy_mode, &mvpp2_phylink_ops);
if (IS_ERR(phylink)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 812e6810cb3b..c14e06ca64d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -224,7 +224,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_eth_seg eth;
- struct mlx5_wqe_data_seg data[0];
+ struct mlx5_wqe_data_seg data[];
};
struct mlx5e_rx_wqe_ll {
@@ -241,8 +241,8 @@ struct mlx5e_umr_wqe {
struct mlx5_wqe_umr_ctrl_seg uctrl;
struct mlx5_mkey_seg mkc;
union {
- struct mlx5_mtt inline_mtts[0];
- struct mlx5_klm inline_klms[0];
+ DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts);
+ DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms);
};
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 00449df98a5e..c1e07496c89c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -570,7 +570,8 @@ static int mlx5e_htb_convert_rate(struct mlx5e_priv *priv, u64 rate,
static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw)
{
- *max_average_bw = div_u64(ceil, BYTES_IN_MBIT);
+ /* Hardware treats 0 as "unlimited", set at least 1. */
+ *max_average_bw = max_t(u32, div_u64(ceil, BYTES_IN_MBIT), 1);
qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
ceil, *max_average_bw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
index 9c076aa20306..b6f5c1bcdbcd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
@@ -183,18 +183,7 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
{
- struct mlx5e_rep_priv *rpriv;
- struct mlx5e_priv *priv;
-
- /* A given netdev is not a representor or not a slave of LAG configuration */
- if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev))
- return false;
-
- priv = netdev_priv(netdev);
- rpriv = priv->ppriv;
-
- /* Egress acl forward to vport is supported only non-uplink representor */
- return rpriv->rep->vport != MLX5_VPORT_UPLINK;
+ return netif_is_lag_port(netdev) && mlx5e_eswitch_vf_rep(netdev);
}
static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *ptr)
@@ -210,9 +199,6 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt
u16 fwd_vport_num;
int err;
- if (!mlx5e_rep_is_lag_netdev(netdev))
- return;
-
info = ptr;
lag_info = info->lower_state_info;
/* This is not an event of a representor becoming active slave */
@@ -266,9 +252,6 @@ static void mlx5e_rep_changeupper_event(struct net_device *netdev, void *ptr)
struct net_device *lag_dev;
struct mlx5e_priv *priv;
- if (!mlx5e_rep_is_lag_netdev(netdev))
- return;
-
priv = netdev_priv(netdev);
rpriv = priv->ppriv;
lag_dev = info->upper_dev;
@@ -293,6 +276,19 @@ static int mlx5e_rep_esw_bond_netevent(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_rep_bond *bond;
+ struct mlx5e_priv *priv;
+
+ if (!mlx5e_rep_is_lag_netdev(netdev))
+ return NOTIFY_DONE;
+
+ bond = container_of(nb, struct mlx5e_rep_bond, nb);
+ priv = netdev_priv(netdev);
+ rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch, REP_ETH);
+ /* Verify VF representor is on the same device of the bond handling the netevent. */
+ if (rpriv->uplink_priv.bond != bond)
+ return NOTIFY_DONE;
switch (event) {
case NETDEV_CHANGELOWERSTATE:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index c6d2f8c78db7..48dc121b2cb4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -491,7 +491,7 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
}
br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event;
- err = register_netdevice_notifier(&br_offloads->netdev_nb);
+ err = register_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
if (err) {
esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n",
err);
@@ -509,7 +509,9 @@ err_register_swdev_blk:
err_register_swdev:
destroy_workqueue(br_offloads->wq);
err_alloc_wq:
+ rtnl_lock();
mlx5_esw_bridge_cleanup(esw);
+ rtnl_unlock();
}
void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
@@ -524,7 +526,7 @@ void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
return;
cancel_delayed_work_sync(&br_offloads->update_work);
- unregister_netdevice_notifier(&br_offloads->netdev_nb);
+ unregister_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
unregister_switchdev_notifier(&br_offloads->nb);
destroy_workqueue(br_offloads->wq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
index 26efa33de56f..9cc844bd00f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
@@ -16,11 +16,13 @@ struct mlx5e_tc_act_parse_state {
unsigned int num_actions;
struct mlx5e_tc_flow *flow;
struct netlink_ext_ack *extack;
+ bool ct_clear;
bool encap;
bool decap;
bool mpls_push;
bool ptype_host;
const struct ip_tunnel_info *tun_info;
+ struct mlx5e_mpls_info mpls_info;
struct pedit_headers_action hdrs[__PEDIT_CMD_MAX];
int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
int if_count;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
index 06ec30cdb269..58cc33f1363d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
@@ -27,8 +27,13 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
+ bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
int err;
+ /* It's redundant to do ct clear more than once. */
+ if (clear_action && parse_state->ct_clear)
+ return 0;
+
err = mlx5_tc_ct_parse_action(parse_state->ct_priv, attr,
&attr->parse_attr->mod_hdr_acts,
act, parse_state->extack);
@@ -40,6 +45,8 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
if (mlx5e_is_eswitch_flow(parse_state->flow))
attr->esw_attr->split_count = attr->esw_attr->out_count;
+ parse_state->ct_clear = clear_action;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
index c614fc7fdc9c..2e615e0ba972 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
@@ -177,6 +177,12 @@ parse_mirred_encap(struct mlx5e_tc_act_parse_state *parse_state,
return -ENOMEM;
parse_state->encap = false;
+
+ if (parse_state->mpls_push) {
+ memcpy(&parse_attr->mpls_info[esw_attr->out_count],
+ &parse_state->mpls_info, sizeof(parse_state->mpls_info));
+ parse_state->mpls_push = false;
+ }
esw_attr->dests[esw_attr->out_count].flags |= MLX5_ESW_DEST_ENCAP;
esw_attr->out_count++;
/* attr->dests[].rep is resolved when we handle encap */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
index 784fc4f68b1e..89ca88c78840 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c
@@ -22,6 +22,16 @@ tc_act_can_offload_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
return true;
}
+static void
+copy_mpls_info(struct mlx5e_mpls_info *mpls_info,
+ const struct flow_action_entry *act)
+{
+ mpls_info->label = act->mpls_push.label;
+ mpls_info->tc = act->mpls_push.tc;
+ mpls_info->bos = act->mpls_push.bos;
+ mpls_info->ttl = act->mpls_push.ttl;
+}
+
static int
tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
@@ -29,6 +39,7 @@ tc_act_parse_mpls_push(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5_flow_attr *attr)
{
parse_state->mpls_push = true;
+ copy_mpls_info(&parse_state->mpls_info, act);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index f832c26ff2c3..70b40ae384e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -35,6 +35,7 @@ enum {
struct mlx5e_tc_flow_parse_attr {
const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
+ struct mlx5e_mpls_info mpls_info[MLX5_MAX_FLOW_FWD_VPORTS];
struct net_device *filter_dev;
struct mlx5_flow_spec spec;
struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 9918ed8c059b..d39d0dae22fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -750,6 +750,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
const struct ip_tunnel_info *tun_info;
+ const struct mlx5e_mpls_info *mpls_info;
unsigned long tbl_time_before = 0;
struct mlx5e_encap_entry *e;
struct mlx5e_encap_key key;
@@ -760,6 +761,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
parse_attr = attr->parse_attr;
tun_info = parse_attr->tun_info[out_index];
+ mpls_info = &parse_attr->mpls_info[out_index];
family = ip_tunnel_info_af(tun_info);
key.ip_tun_key = &tun_info->key;
key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
@@ -810,6 +812,7 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
goto out_err_init;
}
e->tun_info = tun_info;
+ memcpy(&e->mpls_info, mpls_info, sizeof(*mpls_info));
err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
if (err)
goto out_err_init;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
index 60952b33b568..c5b1617d556f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
@@ -30,16 +30,15 @@ static int generate_ip_tun_hdr(char buf[],
struct mlx5e_encap_entry *r)
{
const struct ip_tunnel_key *tun_key = &r->tun_info->key;
+ const struct mlx5e_mpls_info *mpls_info = &r->mpls_info;
struct udphdr *udp = (struct udphdr *)(buf);
struct mpls_shim_hdr *mpls;
- u32 tun_id;
- tun_id = be32_to_cpu(tunnel_id_to_key32(tun_key->tun_id));
mpls = (struct mpls_shim_hdr *)(udp + 1);
*ip_proto = IPPROTO_UDP;
udp->dest = tun_key->tp_dst;
- *mpls = mpls_entry_encode(tun_id, tun_key->ttl, tun_key->tos, true);
+ *mpls = mpls_entry_encode(mpls_info->label, mpls_info->ttl, mpls_info->tc, mpls_info->bos);
return 0;
}
@@ -60,37 +59,31 @@ static int parse_tunnel(struct mlx5e_priv *priv,
void *headers_v)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
- struct flow_match_enc_keyid enc_keyid;
struct flow_match_mpls match;
void *misc2_c;
void *misc2_v;
- misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- misc_parameters_2);
- misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
- misc_parameters_2);
-
- if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS))
- return 0;
-
- if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
- return 0;
-
- flow_rule_match_enc_keyid(rule, &enc_keyid);
-
- if (!enc_keyid.mask->keyid)
- return 0;
-
if (!MLX5_CAP_ETH(priv->mdev, tunnel_stateless_mpls_over_udp) &&
!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_CW_MPLS_UDP))
return -EOPNOTSUPP;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID))
+ return -EOPNOTSUPP;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS))
+ return 0;
+
flow_rule_match_mpls(rule, &match);
/* Only support matching the first LSE */
if (match.mask->used_lses != 1)
return -EOPNOTSUPP;
+ misc2_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters_2);
+ misc2_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters_2);
+
MLX5_SET(fte_match_set_misc2, misc2_c,
outer_first_mpls_over_udp.mpls_label,
match.mask->ls[0].mpls_label);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 4cdf8e5b24c2..b789af07829c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -167,6 +167,11 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
return pi;
}
+static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1);
+}
+
struct mlx5e_shampo_umr {
u16 len;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 338d65e2c9ce..56e10c84a706 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -341,8 +341,10 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
/* copy the inline part if required */
if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
- memcpy(eseg->inline_hdr.start, xdptxd->data, MLX5E_XDP_MIN_INLINE);
+ memcpy(eseg->inline_hdr.start, xdptxd->data, sizeof(eseg->inline_hdr.start));
eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
+ memcpy(dseg, xdptxd->data + sizeof(eseg->inline_hdr.start),
+ MLX5E_XDP_MIN_INLINE - sizeof(eseg->inline_hdr.start));
dma_len -= MLX5E_XDP_MIN_INLINE;
dma_addr += MLX5E_XDP_MIN_INLINE;
dseg++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 2db9573a3fe6..b56fea142c24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -157,11 +157,20 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
/* Tunnel mode */
if (mode == XFRM_MODE_TUNNEL) {
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
- eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
if (xo->proto == IPPROTO_IPV6)
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
- if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
+
+ switch (xo->inner_ipproto) {
+ case IPPROTO_UDP:
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ fallthrough;
+ case IPPROTO_TCP:
+ /* IP | ESP | IP | [TCP | UDP] */
+ eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
+ break;
+ default:
+ break;
+ }
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index b98db50c3418..428881e0adcb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -131,14 +131,17 @@ static inline bool
mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
{
- struct xfrm_offload *xo = xfrm_offload(skb);
+ u8 inner_ipproto;
if (!mlx5e_ipsec_eseg_meta(eseg))
return false;
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
- if (xo->inner_ipproto) {
- eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM;
+ inner_ipproto = xfrm_offload(skb)->inner_ipproto;
+ if (inner_ipproto) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
+ if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP)
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial_inner++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 57d755db1cf5..6e80585d731f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1792,7 +1792,7 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
if (size_read < 0) {
netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
__func__, size_read);
- return 0;
+ return size_read;
}
i += size_read;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index b01dacb6f527..b3f7520dfd08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -183,6 +183,13 @@ struct mlx5e_decap_entry {
struct rcu_head rcu;
};
+struct mlx5e_mpls_info {
+ u32 label;
+ u8 tc;
+ u8 bos;
+ u8 ttl;
+};
+
struct mlx5e_encap_entry {
/* attached neigh hash entry */
struct mlx5e_neigh_hash_entry *nhe;
@@ -196,6 +203,7 @@ struct mlx5e_encap_entry {
struct list_head route_list;
struct mlx5_pkt_reformat *pkt_reformat;
const struct ip_tunnel_info *tun_info;
+ struct mlx5e_mpls_info mpls_info;
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
struct net_device *out_dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index e86ccc22fb82..6530d7bd5045 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1117,7 +1117,7 @@ static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr
static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct tcphdr *skb_tcp_hd)
{
- u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index);
+ u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
struct tcphdr *last_tcp_hd;
void *last_hd_addr;
@@ -1349,7 +1349,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
}
/* True when explicitly set via priv flag, or XDP prog is loaded */
- if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
+ if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) ||
+ get_cqe_tls_offload(cqe))
goto csum_unnecessary;
/* CQE csum doesn't cover padding octets in short ethernet
@@ -1871,7 +1872,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return skb;
}
-static void
+static struct sk_buff *
mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 header_index)
{
@@ -1895,7 +1896,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size);
if (unlikely(!skb))
- return;
+ return NULL;
/* queue up for recycling/reuse */
page_ref_inc(head->page);
@@ -1907,7 +1908,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
ALIGN(head_size, sizeof(long)));
if (unlikely(!skb)) {
rq->stats->buff_alloc_err++;
- return;
+ return NULL;
}
prefetchw(skb->data);
@@ -1918,9 +1919,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
skb->tail += head_size;
skb->len += head_size;
}
- rq->hw_gro_data->skb = skb;
- NAPI_GRO_CB(skb)->count = 1;
- skb_shinfo(skb)->gso_size = mpwrq_get_cqe_byte_cnt(cqe) - head_size;
+ return skb;
}
static void
@@ -1973,13 +1972,14 @@ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size;
- u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index);
+ u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
u32 wqe_offset = be32_to_cpu(cqe->shampo.data_offset);
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u32 data_offset = wqe_offset & (PAGE_SIZE - 1);
u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id);
u32 page_idx = wqe_offset >> PAGE_SHIFT;
+ u16 head_size = cqe->shampo.header_size;
struct sk_buff **skb = &rq->hw_gro_data->skb;
bool flush = cqe->shampo.flush;
bool match = cqe->shampo.match;
@@ -2011,9 +2011,16 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
}
if (!*skb) {
- mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
+ if (likely(head_size))
+ *skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
+ else
+ *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe_bcnt, data_offset,
+ page_idx);
if (unlikely(!*skb))
goto free_hd_entry;
+
+ NAPI_GRO_CB(*skb)->count = 1;
+ skb_shinfo(*skb)->gso_size = cqe_bcnt - head_size;
} else {
NAPI_GRO_CB(*skb)->count++;
if (NAPI_GRO_CB(*skb)->count == 2 &&
@@ -2027,8 +2034,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
}
}
- di = &wi->umr.dma_info[page_idx];
- mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset);
+ if (likely(head_size)) {
+ di = &wi->umr.dma_info[page_idx];
+ mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset);
+ }
mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
if (flush)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 8c9163d2c646..08a75654f5f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -334,6 +334,7 @@ void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
netdev_info(ndev, "\t[%d] %s start..\n", i, st.name);
buf[count] = st.st_func(priv);
netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", i, st.name, buf[count]);
+ count++;
}
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 26e326fe503c..00f1d16db456 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -1254,9 +1254,6 @@ static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
- if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return;
-
MLX5_SET(ppcnt_reg, in, local_port, 1);
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical,
@@ -1272,6 +1269,9 @@ static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
struct ethtool_fec_stats *fec_stats)
{
+ if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
+ return;
+
fec_set_corrected_bits_total(priv, fec_stats);
fec_set_block_stats(priv, fec_stats);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 3d908a7e1406..b27532a9301e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1414,7 +1414,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_out;
- if (!attr->chain && esw_attr->int_port) {
+ if (!attr->chain && esw_attr->int_port &&
+ attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
/* If decap route device is internal port, change the
* source vport value in reg_c0 back to uplink just in
* case the rule performs goto chain > 0. If we have a miss
@@ -3191,6 +3192,30 @@ actions_match_supported(struct mlx5e_priv *priv,
return false;
}
+ if (!(~actions &
+ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
+ NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
+ return false;
+ }
+
+ if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+ actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
+ return false;
+ }
+
+ if (!(~actions &
+ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
+ NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
+ return false;
+ }
+
+ if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+ actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
+ return false;
+ }
+
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
!modify_header_match_supported(priv, &parse_attr->spec, flow_action,
actions, ct_flow, ct_clear, extack))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 7fd33b356cc8..ee7ecb88adc1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -208,7 +208,7 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
int cpy1_sz = 2 * ETH_ALEN;
int cpy2_sz = ihs - cpy1_sz;
- memcpy(vhdr, skb->data, cpy1_sz);
+ memcpy(&vhdr->addrs, skb->data, cpy1_sz);
vhdr->h_vlan_proto = skb->vlan_proto;
vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index f690f430f40f..05e08cec5a8c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -1574,6 +1574,8 @@ struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
{
struct mlx5_esw_bridge_offloads *br_offloads;
+ ASSERT_RTNL();
+
br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL);
if (!br_offloads)
return ERR_PTR(-ENOMEM);
@@ -1590,6 +1592,8 @@ void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
{
struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads;
+ ASSERT_RTNL();
+
if (!br_offloads)
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
index 3401188e0a60..51ac24e6ec3c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
@@ -21,7 +21,7 @@ DECLARE_EVENT_CLASS(mlx5_esw_bridge_fdb_template,
__field(unsigned int, used)
),
TP_fast_assign(
- strncpy(__entry->dev_name,
+ strscpy(__entry->dev_name,
netdev_name(fdb->dev),
IFNAMSIZ);
memcpy(__entry->addr, fdb->key.addr, ETH_ALEN);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 11bbcd5f5b8b..694c54066955 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -697,7 +697,7 @@ void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
}
int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- u32 min_rate, u32 max_rate)
+ u32 max_rate, u32 min_rate)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 9a7b25692505..cfcd72bad9af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -2838,10 +2838,6 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
return false;
- if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
- mlx5_ecpf_vport_exists(esw->dev))
- return false;
-
return true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index b628917e38e4..537c82b9aa53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2074,6 +2074,8 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
fte->node.del_hw_func = NULL;
up_write_ref_node(&fte->node, false);
tree_put_node(&fte->node, false);
+ } else {
+ up_write_ref_node(&fte->node, false);
}
kfree(handle);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 0b0234f9d694..84dbe46d5ede 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -132,7 +132,7 @@ static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
- del_timer(&fw_reset->timer);
+ del_timer_sync(&fw_reset->timer);
}
static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index d5e47630e284..1e8ec4f236b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -127,6 +127,10 @@ u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX;
+ if (!chains->dev->priv.eswitch ||
+ chains->dev->priv.eswitch->mode != MLX5_ESWITCH_OFFLOADS)
+ return 1;
+
/* We should get here only for eswitch case */
return FDB_TC_MAX_PRIO;
}
@@ -211,7 +215,7 @@ static int
create_chain_restore(struct fs_chain *chain)
{
struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
- char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)];
+ u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_fs_chains *chains = chain->chains;
enum mlx5e_tc_attr_to_reg chain_to_reg;
struct mlx5_modify_hdr *mod_hdr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 2c774f367199..bba72b220cc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -526,7 +526,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
/* Check log_max_qp from HCA caps to set in current profile */
if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) {
- prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
+ prof->log_max_qp = min_t(u8, 17, MLX5_CAP_GEN_MAX(dev, log_max_qp));
} else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
prof->log_max_qp,
@@ -1840,10 +1840,12 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
{ PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
{ PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
+ { PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */
+ { PCI_VDEVICE(MELLANOX, 0xa2df) }, /* BlueField-4 integrated ConnectX-8 network controller */
{ 0, }
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 1ef2b6a848c1..7b16a1188aab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -406,23 +406,24 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
switch (module_id) {
case MLX5_MODULE_ID_SFP:
- mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset);
+ mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &offset);
break;
case MLX5_MODULE_ID_QSFP:
case MLX5_MODULE_ID_QSFP_PLUS:
case MLX5_MODULE_ID_QSFP28:
- mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset);
+ mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &offset);
break;
default:
mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
return -EINVAL;
}
- if (query.offset + size > MLX5_EEPROM_PAGE_LENGTH)
+ if (offset + size > MLX5_EEPROM_PAGE_LENGTH)
/* Cross pages read, read until offset 256 in low page */
- size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
+ size = MLX5_EEPROM_PAGE_LENGTH - offset;
query.size = size;
+ query.offset = offset;
return mlx5_query_mcia(dev, &query, data);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index 7f6fd9c5e371..e289cfdbce07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -4,7 +4,6 @@
#include "dr_types.h"
#define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
-#define DR_ICM_SYNC_THRESHOLD_POOL (64 * 1024 * 1024)
struct mlx5dr_icm_pool {
enum mlx5dr_icm_type icm_type;
@@ -136,37 +135,35 @@ static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
kvfree(icm_mr);
}
-static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk)
+static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy)
{
- chunk->ste_arr = kvzalloc(chunk->num_of_entries *
- sizeof(chunk->ste_arr[0]), GFP_KERNEL);
- if (!chunk->ste_arr)
- return -ENOMEM;
-
- chunk->hw_ste_arr = kvzalloc(chunk->num_of_entries *
- DR_STE_SIZE_REDUCED, GFP_KERNEL);
- if (!chunk->hw_ste_arr)
- goto out_free_ste_arr;
-
- chunk->miss_list = kvmalloc(chunk->num_of_entries *
- sizeof(chunk->miss_list[0]), GFP_KERNEL);
- if (!chunk->miss_list)
- goto out_free_hw_ste_arr;
+ /* We support only one type of STE size, both for ConnectX-5 and later
+ * devices. Once the support for match STE which has a larger tag is
+ * added (32B instead of 16B), the STE size for devices later than
+ * ConnectX-5 needs to account for that.
+ */
+ return DR_STE_SIZE_REDUCED;
+}
- return 0;
+static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
+{
+ struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
+ int index = offset / DR_STE_SIZE;
-out_free_hw_ste_arr:
- kvfree(chunk->hw_ste_arr);
-out_free_ste_arr:
- kvfree(chunk->ste_arr);
- return -ENOMEM;
+ chunk->ste_arr = &buddy->ste_arr[index];
+ chunk->miss_list = &buddy->miss_list[index];
+ chunk->hw_ste_arr = buddy->hw_ste_arr +
+ index * dr_icm_buddy_get_ste_size(buddy);
}
static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
{
- kvfree(chunk->miss_list);
- kvfree(chunk->hw_ste_arr);
- kvfree(chunk->ste_arr);
+ struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
+
+ memset(chunk->hw_ste_arr, 0,
+ chunk->num_of_entries * dr_icm_buddy_get_ste_size(buddy));
+ memset(chunk->ste_arr, 0,
+ chunk->num_of_entries * sizeof(chunk->ste_arr[0]));
}
static enum mlx5dr_icm_type
@@ -189,6 +186,44 @@ static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk,
kvfree(chunk);
}
+static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
+{
+ int num_of_entries =
+ mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz);
+
+ buddy->ste_arr = kvcalloc(num_of_entries,
+ sizeof(struct mlx5dr_ste), GFP_KERNEL);
+ if (!buddy->ste_arr)
+ return -ENOMEM;
+
+ /* Preallocate full STE size on non-ConnectX-5 devices since
+ * we need to support both full and reduced with the same cache.
+ */
+ buddy->hw_ste_arr = kvcalloc(num_of_entries,
+ dr_icm_buddy_get_ste_size(buddy), GFP_KERNEL);
+ if (!buddy->hw_ste_arr)
+ goto free_ste_arr;
+
+ buddy->miss_list = kvmalloc(num_of_entries * sizeof(struct list_head), GFP_KERNEL);
+ if (!buddy->miss_list)
+ goto free_hw_ste_arr;
+
+ return 0;
+
+free_hw_ste_arr:
+ kvfree(buddy->hw_ste_arr);
+free_ste_arr:
+ kvfree(buddy->ste_arr);
+ return -ENOMEM;
+}
+
+static void dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
+{
+ kvfree(buddy->ste_arr);
+ kvfree(buddy->hw_ste_arr);
+ kvfree(buddy->miss_list);
+}
+
static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
{
struct mlx5dr_icm_buddy_mem *buddy;
@@ -208,11 +243,19 @@ static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
buddy->icm_mr = icm_mr;
buddy->pool = pool;
+ if (pool->icm_type == DR_ICM_TYPE_STE) {
+ /* Reduce allocations by preallocating and reusing the STE structures */
+ if (dr_icm_buddy_init_ste_cache(buddy))
+ goto err_cleanup_buddy;
+ }
+
/* add it to the -start- of the list in order to search in it first */
list_add(&buddy->list_node, &pool->buddy_mem_list);
return 0;
+err_cleanup_buddy:
+ mlx5dr_buddy_cleanup(buddy);
err_free_buddy:
kvfree(buddy);
free_mr:
@@ -234,6 +277,9 @@ static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
mlx5dr_buddy_cleanup(buddy);
+ if (buddy->pool->icm_type == DR_ICM_TYPE_STE)
+ dr_icm_buddy_cleanup_ste_cache(buddy);
+
kvfree(buddy);
}
@@ -261,34 +307,30 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
chunk->byte_size =
mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type);
chunk->seg = seg;
+ chunk->buddy_mem = buddy_mem_pool;
- if (pool->icm_type == DR_ICM_TYPE_STE && dr_icm_chunk_ste_init(chunk)) {
- mlx5dr_err(pool->dmn,
- "Failed to init ste arrays (order: %d)\n",
- chunk_size);
- goto out_free_chunk;
- }
+ if (pool->icm_type == DR_ICM_TYPE_STE)
+ dr_icm_chunk_ste_init(chunk, offset);
buddy_mem_pool->used_memory += chunk->byte_size;
- chunk->buddy_mem = buddy_mem_pool;
INIT_LIST_HEAD(&chunk->chunk_list);
/* chunk now is part of the used_list */
list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list);
return chunk;
-
-out_free_chunk:
- kvfree(chunk);
- return NULL;
}
static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
{
- if (pool->hot_memory_size > DR_ICM_SYNC_THRESHOLD_POOL)
- return true;
+ int allow_hot_size;
+
+ /* sync when hot memory reaches half of the pool size */
+ allow_hot_size =
+ mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
+ pool->icm_type) / 2;
- return false;
+ return pool->hot_memory_size > allow_hot_size;
}
static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index e87cf498c77b..38971fe1dfe1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -13,18 +13,6 @@ static bool dr_mask_is_dmac_set(struct mlx5dr_match_spec *spec)
return (spec->dmac_47_16 || spec->dmac_15_0);
}
-static bool dr_mask_is_src_addr_set(struct mlx5dr_match_spec *spec)
-{
- return (spec->src_ip_127_96 || spec->src_ip_95_64 ||
- spec->src_ip_63_32 || spec->src_ip_31_0);
-}
-
-static bool dr_mask_is_dst_addr_set(struct mlx5dr_match_spec *spec)
-{
- return (spec->dst_ip_127_96 || spec->dst_ip_95_64 ||
- spec->dst_ip_63_32 || spec->dst_ip_31_0);
-}
-
static bool dr_mask_is_l3_base_set(struct mlx5dr_match_spec *spec)
{
return (spec->ip_protocol || spec->frag || spec->tcp_flags ||
@@ -503,11 +491,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
&mask, inner, rx);
if (outer_ipv == DR_RULE_IPV6) {
- if (dr_mask_is_dst_addr_set(&mask.outer))
+ if (DR_MASK_IS_DST_IP_SET(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
&mask, inner, rx);
- if (dr_mask_is_src_addr_set(&mask.outer))
+ if (DR_MASK_IS_SRC_IP_SET(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
&mask, inner, rx);
@@ -610,11 +598,11 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
&mask, inner, rx);
if (inner_ipv == DR_RULE_IPV6) {
- if (dr_mask_is_dst_addr_set(&mask.inner))
+ if (DR_MASK_IS_DST_IP_SET(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
&mask, inner, rx);
- if (dr_mask_is_src_addr_set(&mask.inner))
+ if (DR_MASK_IS_SRC_IP_SET(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
&mask, inner, rx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 7e61742e58a0..187e29b409b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -602,12 +602,34 @@ int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
used_hw_action_num);
}
+static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn,
+ struct mlx5dr_match_spec *spec)
+{
+ if (spec->ip_version) {
+ if (spec->ip_version != 0xf) {
+ mlx5dr_err(dmn,
+ "Partial ip_version mask with src/dst IP is not supported\n");
+ return -EINVAL;
+ }
+ } else if (spec->ethertype != 0xffff &&
+ (DR_MASK_IS_SRC_IP_SET(spec) || DR_MASK_IS_DST_IP_SET(spec))) {
+ mlx5dr_err(dmn,
+ "Partial/no ethertype mask with src/dst IP is not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
u8 match_criteria,
struct mlx5dr_match_param *mask,
struct mlx5dr_match_param *value)
{
- if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
+ if (value)
+ return 0;
+
+ if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
mlx5dr_err(dmn,
"Partial mask source_port is not supported\n");
@@ -621,6 +643,14 @@ int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
}
}
+ if ((match_criteria & DR_MATCHER_CRITERIA_OUTER) &&
+ dr_ste_build_pre_check_spec(dmn, &mask->outer))
+ return -EINVAL;
+
+ if ((match_criteria & DR_MATCHER_CRITERIA_INNER) &&
+ dr_ste_build_pre_check_spec(dmn, &mask->inner))
+ return -EINVAL;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 1b3d484b99be..55fcb751e24a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -798,6 +798,16 @@ struct mlx5dr_match_param {
(_misc3)->icmpv4_code || \
(_misc3)->icmpv4_header_data)
+#define DR_MASK_IS_SRC_IP_SET(_spec) ((_spec)->src_ip_127_96 || \
+ (_spec)->src_ip_95_64 || \
+ (_spec)->src_ip_63_32 || \
+ (_spec)->src_ip_31_0)
+
+#define DR_MASK_IS_DST_IP_SET(_spec) ((_spec)->dst_ip_127_96 || \
+ (_spec)->dst_ip_95_64 || \
+ (_spec)->dst_ip_63_32 || \
+ (_spec)->dst_ip_31_0)
+
struct mlx5dr_esw_caps {
u64 drop_icm_address_rx;
u64 drop_icm_address_tx;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index a476da2424f8..3f311462bedf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -233,7 +233,11 @@ static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
}
-#define MLX5_FLOW_CONTEXT_ACTION_MAX 32
+/* We want to support a rule with 32 destinations, which means we need to
+ * account for 32 destinations plus usually a counter plus one more action
+ * for a multi-destination flow table.
+ */
+#define MLX5_FLOW_CONTEXT_ACTION_MAX 34
static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *group,
@@ -403,9 +407,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
enum mlx5_flow_destination_type type = dst->dest_attr.type;
u32 id;
- if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
- num_term_actions >= MLX5_FLOW_CONTEXT_ACTION_MAX) {
- err = -ENOSPC;
+ if (fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ num_term_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
goto free_actions;
}
@@ -478,8 +482,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
- if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
- err = -ENOSPC;
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
goto free_actions;
}
@@ -499,14 +504,28 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
params.match_sz = match_sz;
params.match_buf = (u64 *)fte->val;
if (num_term_actions == 1) {
- if (term_actions->reformat)
+ if (term_actions->reformat) {
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
actions[num_actions++] = term_actions->reformat;
+ }
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
actions[num_actions++] = term_actions->dest;
} else if (num_term_actions > 1) {
bool ignore_flow_level =
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
+ if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+ fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+ err = -EOPNOTSUPP;
+ goto free_actions;
+ }
tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
term_actions,
num_term_actions,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index c7c93131b762..dfa223415fe2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -160,6 +160,11 @@ struct mlx5dr_icm_buddy_mem {
* sync_ste command sets them free.
*/
struct list_head hot_list;
+
+ /* Memory optimisation */
+ struct mlx5dr_ste *ste_arr;
+ struct list_head *miss_list;
+ u8 *hw_ste_arr;
};
int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
index 59783fc46a7b..10b866e9f726 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
@@ -1103,7 +1103,7 @@ void sparx5_get_stats64(struct net_device *ndev,
stats->tx_carrier_errors = portstats[spx5_stats_tx_csense_cnt];
stats->tx_window_errors = portstats[spx5_stats_tx_late_coll_cnt];
stats->rx_dropped = portstats[spx5_stats_ana_ac_port_stat_lsb_cnt];
- for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++stats)
+ for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx)
stats->rx_dropped += portstats[spx5_stats_green_p0_rx_port_drop
+ idx];
stats->tx_dropped = portstats[spx5_stats_tx_local_drop];
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index a1acc9b461f2..d40e18ce3293 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -16,6 +16,8 @@
#include <linux/phylink.h>
#include <linux/hrtimer.h>
+#include "sparx5_main_regs.h"
+
/* Target chip type */
enum spx5_target_chiptype {
SPX5_TARGET_CT_7546 = 0x7546, /* SparX-5-64 Enterprise */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index dc7e5ea6ec15..148d431fcde4 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -145,9 +145,9 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
skb_put(skb, byte_cnt - ETH_FCS_LEN);
eth_skb_pad(skb);
skb->protocol = eth_type_trans(skb, netdev);
- netif_rx(skb);
netdev->stats.rx_bytes += skb->len;
netdev->stats.rx_packets++;
+ netif_rx(skb);
}
static int sparx5_inject(struct sparx5 *sparx5,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
index 4ce490a25f33..8e56ffa1c4f7 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
@@ -58,16 +58,6 @@ int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
struct sparx5 *sparx5 = port->sparx5;
int ret;
- /* Make the port a member of the VLAN */
- set_bit(port->portno, sparx5->vlan_mask[vid]);
- ret = sparx5_vlant_set_mask(sparx5, vid);
- if (ret)
- return ret;
-
- /* Default ingress vlan classification */
- if (pvid)
- port->pvid = vid;
-
/* Untagged egress vlan classification */
if (untagged && port->vid != vid) {
if (port->vid) {
@@ -79,6 +69,16 @@ int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
port->vid = vid;
}
+ /* Make the port a member of the VLAN */
+ set_bit(port->portno, sparx5->vlan_mask[vid]);
+ ret = sparx5_vlant_set_mask(sparx5, vid);
+ if (ret)
+ return ret;
+
+ /* Default ingress vlan classification */
+ if (pvid)
+ port->pvid = vid;
+
sparx5_vlan_port_apply(sparx5, port);
return 0;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 455293aa6343..fd3ceb74620d 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -549,14 +549,18 @@ EXPORT_SYMBOL(ocelot_vlan_add);
int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ bool del_pvid = false;
int err;
+ if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
+ del_pvid = true;
+
err = ocelot_vlan_member_del(ocelot, port, vid);
if (err)
return err;
/* Ingress */
- if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
+ if (del_pvid)
ocelot_port_set_pvid(ocelot, port, NULL);
/* Egress */
@@ -1432,6 +1436,8 @@ static void
ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV4;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv4.dport.value = PTP_EV_PORT;
trap->key.ipv4.dport.mask = 0xffff;
}
@@ -1440,6 +1446,8 @@ static void
ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV6;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv6.dport.value = PTP_EV_PORT;
trap->key.ipv6.dport.mask = 0xffff;
}
@@ -1448,6 +1456,8 @@ static void
ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV4;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv4.dport.value = PTP_GEN_PORT;
trap->key.ipv4.dport.mask = 0xffff;
}
@@ -1456,6 +1466,8 @@ static void
ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV6;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv6.dport.value = PTP_GEN_PORT;
trap->key.ipv6.dport.mask = 0xffff;
}
@@ -1737,12 +1749,11 @@ void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
}
EXPORT_SYMBOL(ocelot_get_strings);
+/* Caller must hold &ocelot->stats_lock */
static void ocelot_update_stats(struct ocelot *ocelot)
{
int i, j;
- mutex_lock(&ocelot->stats_lock);
-
for (i = 0; i < ocelot->num_phys_ports; i++) {
/* Configure the port to read the stats from */
ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG);
@@ -1761,8 +1772,6 @@ static void ocelot_update_stats(struct ocelot *ocelot)
~(u64)U32_MAX) + val;
}
}
-
- mutex_unlock(&ocelot->stats_lock);
}
static void ocelot_check_stats_work(struct work_struct *work)
@@ -1771,7 +1780,9 @@ static void ocelot_check_stats_work(struct work_struct *work)
struct ocelot *ocelot = container_of(del_work, struct ocelot,
stats_work);
+ mutex_lock(&ocelot->stats_lock);
ocelot_update_stats(ocelot);
+ mutex_unlock(&ocelot->stats_lock);
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
@@ -1781,12 +1792,16 @@ void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
{
int i;
+ mutex_lock(&ocelot->stats_lock);
+
/* check and update now */
ocelot_update_stats(ocelot);
/* Copy all counters */
for (i = 0; i < ocelot->num_stats; i++)
*data++ = ocelot->stats[port * ocelot->num_stats + i];
+
+ mutex_unlock(&ocelot->stats_lock);
}
EXPORT_SYMBOL(ocelot_get_ethtool_stats);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 784292b16290..1543e47456d5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -723,6 +723,8 @@ static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev)
return true;
if (netif_is_gretap(netdev))
return true;
+ if (netif_is_ip6gretap(netdev))
+ return true;
return false;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index dfb4468fe287..cb43651ea9ba 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -922,8 +922,8 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
int port, bool mod)
{
struct nfp_flower_priv *priv = app->priv;
- int ida_idx = NFP_MAX_MAC_INDEX, err;
struct nfp_tun_offloaded_mac *entry;
+ int ida_idx = -1, err;
u16 nfp_mac_idx = 0;
entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
@@ -997,7 +997,7 @@ err_remove_hash:
err_free_entry:
kfree(entry);
err_free_ida:
- if (ida_idx != NFP_MAX_MAC_INDEX)
+ if (ida_idx != -1)
ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
return err;
@@ -1011,6 +1011,7 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
struct nfp_flower_repr_priv *repr_priv;
struct nfp_tun_offloaded_mac *entry;
struct nfp_repr *repr;
+ u16 nfp_mac_idx;
int ida_idx;
entry = nfp_tunnel_lookup_offloaded_macs(app, mac);
@@ -1029,8 +1030,6 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
entry->bridge_count--;
if (!entry->bridge_count && entry->ref_count) {
- u16 nfp_mac_idx;
-
nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx,
false)) {
@@ -1046,7 +1045,6 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
/* If MAC is now used by 1 repr set the offloaded MAC index to port. */
if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
- u16 nfp_mac_idx;
int port, err;
repr_priv = list_first_entry(&entry->repr_list,
@@ -1074,8 +1072,14 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs,
&entry->ht_node,
offloaded_macs_params));
+
+ if (nfp_flower_is_supported_bridge(netdev))
+ nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
+ else
+ nfp_mac_idx = entry->index;
+
/* If MAC has global ID then extract and free the ida entry. */
- if (nfp_tunnel_is_mac_idx_global(entry->index)) {
+ if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx)) {
ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 32161a56726c..2881f5b2b5f4 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -2285,18 +2285,18 @@ static int __init sxgbe_cmdline_opt(char *str)
char *opt;
if (!str || !*str)
- return -EINVAL;
+ return 1;
while ((opt = strsep(&str, ",")) != NULL) {
if (!strncmp(opt, "eee_timer:", 10)) {
if (kstrtoint(opt + 10, 0, &eee_timer))
goto err;
}
}
- return 0;
+ return 1;
err:
pr_err("%s: ERROR broken module parameter conversion\n", __func__);
- return -EINVAL;
+ return 1;
}
__setup("sxgbeeth=", sxgbe_cmdline_opt);
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index be6bfd6b7ec7..50baf62b2cbc 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -163,9 +163,9 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
/* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
spin_lock_bh(&mcdi->iface_lock);
++mcdi->seqno;
+ seqno = mcdi->seqno & SEQ_MASK;
spin_unlock_bh(&mcdi->iface_lock);
- seqno = mcdi->seqno & SEQ_MASK;
xflags = 0;
if (mcdi->mode == MCDI_MODE_EVENTS)
xflags |= MCDI_HEADER_XFLAGS_EVREQ;
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index dd6f69ced4ee..fc9cef9dcefc 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1648,7 +1648,7 @@ static int smc911x_ethtool_geteeprom(struct net_device *dev,
return ret;
if ((ret=smc911x_ethtool_read_eeprom_byte(dev, &eebuf[i]))!=0)
return ret;
- }
+ }
memcpy(data, eebuf+eeprom->offset, eeprom->len);
return 0;
}
@@ -1667,11 +1667,11 @@ static int smc911x_ethtool_seteeprom(struct net_device *dev,
return ret;
/* write byte */
if ((ret=smc911x_ethtool_write_eeprom_byte(dev, *data))!=0)
- return ret;
+ return ret;
if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_WRITE_, i ))!=0)
return ret;
- }
- return 0;
+ }
+ return 0;
}
static int smc911x_ethtool_geteeprom_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index dde5b772a5af..c3f10a92b62b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -49,13 +49,15 @@ struct visconti_eth {
void __iomem *reg;
u32 phy_intf_sel;
struct clk *phy_ref_clk;
+ struct device *dev;
spinlock_t lock; /* lock to protect register update */
};
static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed)
{
struct visconti_eth *dwmac = priv;
- unsigned int val, clk_sel_val;
+ struct net_device *netdev = dev_get_drvdata(dwmac->dev);
+ unsigned int val, clk_sel_val = 0;
unsigned long flags;
spin_lock_irqsave(&dwmac->lock, flags);
@@ -85,7 +87,9 @@ static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed)
break;
default:
/* No bit control */
- break;
+ netdev_err(netdev, "Unsupported speed request (%d)", speed);
+ spin_unlock_irqrestore(&dwmac->lock, flags);
+ return;
}
writel(val, dwmac->reg + MAC_CTRL_REG);
@@ -229,6 +233,7 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev)
spin_lock_init(&dwmac->lock);
dwmac->reg = stmmac_res.addr;
+ dwmac->dev = &pdev->dev;
plat_dat->bsp_priv = dwmac;
plat_dat->fix_mac_speed = visconti_eth_fix_mac_speed;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 1914ad698cab..acd70b9a3173 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -150,6 +150,7 @@
#define NUM_DWMAC100_DMA_REGS 9
#define NUM_DWMAC1000_DMA_REGS 23
+#define NUM_DWMAC4_DMA_REGS 27
void dwmac_enable_dma_transmission(void __iomem *ioaddr);
void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 164dff5ec32e..abfb3cd5958d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -21,10 +21,18 @@
#include "dwxgmac2.h"
#define REG_SPACE_SIZE 0x1060
+#define GMAC4_REG_SPACE_SIZE 0x116C
#define MAC100_ETHTOOL_NAME "st_mac100"
#define GMAC_ETHTOOL_NAME "st_gmac"
#define XGMAC_ETHTOOL_NAME "st_xgmac"
+/* Same as DMA_CHAN_BASE_ADDR defined in dwmac4_dma.h
+ *
+ * It is here because dwmac_dma.h and dwmac4_dam.h can not be included at the
+ * same time due to the conflicting macro names.
+ */
+#define GMAC4_DMA_CHAN_BASE_ADDR 0x00001100
+
#define ETHTOOL_DMA_OFFSET 55
struct stmmac_stats {
@@ -434,6 +442,8 @@ static int stmmac_ethtool_get_regs_len(struct net_device *dev)
if (priv->plat->has_xgmac)
return XGMAC_REGSIZE * 4;
+ else if (priv->plat->has_gmac4)
+ return GMAC4_REG_SPACE_SIZE;
return REG_SPACE_SIZE;
}
@@ -446,8 +456,13 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
stmmac_dump_mac_regs(priv, priv->hw, reg_space);
stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space);
- if (!priv->plat->has_xgmac) {
- /* Copy DMA registers to where ethtool expects them */
+ /* Copy DMA registers to where ethtool expects them */
+ if (priv->plat->has_gmac4) {
+ /* GMAC4 dumps its DMA registers at its DMA_CHAN_BASE_ADDR */
+ memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
+ &reg_space[GMAC4_DMA_CHAN_BASE_ADDR / 4],
+ NUM_DWMAC4_DMA_REGS * 4);
+ } else if (!priv->plat->has_xgmac) {
memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
&reg_space[DMA_BUS_MODE / 4],
NUM_DWMAC1000_DMA_REGS * 4);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 074e2cdfb0fa..a7ec9f4d46ce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -145,15 +145,20 @@ static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
static void get_systime(void __iomem *ioaddr, u64 *systime)
{
- u64 ns;
-
- /* Get the TSSS value */
- ns = readl(ioaddr + PTP_STNSR);
- /* Get the TSS and convert sec time value to nanosecond */
- ns += readl(ioaddr + PTP_STSR) * 1000000000ULL;
+ u64 ns, sec0, sec1;
+
+ /* Get the TSS value */
+ sec1 = readl_relaxed(ioaddr + PTP_STSR);
+ do {
+ sec0 = sec1;
+ /* Get the TSSS value */
+ ns = readl_relaxed(ioaddr + PTP_STNSR);
+ /* Get the TSS value */
+ sec1 = readl_relaxed(ioaddr + PTP_STSR);
+ } while (sec0 != sec1);
if (systime)
- *systime = ns;
+ *systime = ns + (sec1 * 1000000000ULL);
}
static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 639a753266e6..422e3225f476 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2262,6 +2262,23 @@ static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
stmmac_stop_tx(priv, priv->ioaddr, chan);
}
+static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
+ u32 chan;
+
+ for (chan = 0; chan < dma_csr_ch; chan++) {
+ struct stmmac_channel *ch = &priv->channel[chan];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+}
+
/**
* stmmac_start_all_dma - start all RX and TX DMA channels
* @priv: driver private structure
@@ -2904,8 +2921,10 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
/* DMA CSR Channel configuration */
- for (chan = 0; chan < dma_csr_ch; chan++)
+ for (chan = 0; chan < dma_csr_ch; chan++) {
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ }
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_channels_count; chan++) {
@@ -3761,6 +3780,7 @@ static int stmmac_open(struct net_device *dev)
stmmac_enable_all_queues(priv);
netif_tx_start_all_queues(priv->dev);
+ stmmac_enable_all_dma_irq(priv);
return 0;
@@ -6510,8 +6530,10 @@ int stmmac_xdp_open(struct net_device *dev)
}
/* DMA CSR Channel configuration */
- for (chan = 0; chan < dma_csr_ch; chan++)
+ for (chan = 0; chan < dma_csr_ch; chan++) {
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ }
/* Adjust Split header */
sph_en = (priv->hw->rx_csum > 0) && priv->sph;
@@ -6572,6 +6594,7 @@ int stmmac_xdp_open(struct net_device *dev)
stmmac_enable_all_queues(priv);
netif_carrier_on(dev);
netif_tx_start_all_queues(dev);
+ stmmac_enable_all_dma_irq(priv);
return 0;
@@ -7252,6 +7275,10 @@ int stmmac_dvr_remove(struct device *dev)
netdev_info(priv->dev, "%s: removing driver", __func__);
+ pm_runtime_get_sync(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+
stmmac_stop_all_dma(priv);
stmmac_mac_set(priv, priv->ioaddr, false);
netif_carrier_off(ndev);
@@ -7270,8 +7297,6 @@ int stmmac_dvr_remove(struct device *dev)
if (priv->plat->stmmac_rst)
reset_control_assert(priv->plat->stmmac_rst);
reset_control_assert(priv->plat->stmmac_ahb_rst);
- pm_runtime_put(dev);
- pm_runtime_disable(dev);
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
@@ -7449,6 +7474,7 @@ int stmmac_resume(struct device *dev)
stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
stmmac_enable_all_queues(priv);
+ stmmac_enable_all_dma_irq(priv);
mutex_unlock(&priv->lock);
rtnl_unlock();
@@ -7465,7 +7491,7 @@ static int __init stmmac_cmdline_opt(char *str)
char *opt;
if (!str || !*str)
- return -EINVAL;
+ return 1;
while ((opt = strsep(&str, ",")) != NULL) {
if (!strncmp(opt, "debug:", 6)) {
if (kstrtoint(opt + 6, 0, &debug))
@@ -7496,11 +7522,11 @@ static int __init stmmac_cmdline_opt(char *str)
goto err;
}
}
- return 0;
+ return 1;
err:
pr_err("%s: ERROR broken module parameter conversion", __func__);
- return -EINVAL;
+ return 1;
}
__setup("stmmaceth=", stmmac_cmdline_opt);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index b900ab5aef2a..64c7e26c3b75 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1433,6 +1433,8 @@ static int temac_probe(struct platform_device *pdev)
lp->indirect_lock = devm_kmalloc(&pdev->dev,
sizeof(*lp->indirect_lock),
GFP_KERNEL);
+ if (!lp->indirect_lock)
+ return -ENOMEM;
spin_lock_init(lp->indirect_lock);
}
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index b1fc153125d9..45c3c4a1101b 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -668,11 +668,11 @@ static void sixpack_close(struct tty_struct *tty)
*/
netif_stop_queue(sp->dev);
+ unregister_netdev(sp->dev);
+
del_timer_sync(&sp->tx_t);
del_timer_sync(&sp->resync_t);
- unregister_netdev(sp->dev);
-
/* Free all 6pack frame buffers after unreg. */
kfree(sp->rbuff);
kfree(sp->xbuff);
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 7d67f41387f5..4f5ef8a9a9a8 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -100,6 +100,7 @@ struct at86rf230_local {
unsigned long cal_timeout;
bool is_tx;
bool is_tx_from_off;
+ bool was_tx;
u8 tx_retry;
struct sk_buff *tx_skb;
struct at86rf230_state_change tx;
@@ -343,7 +344,11 @@ at86rf230_async_error_recover_complete(void *context)
if (ctx->free)
kfree(ctx);
- ieee802154_wake_queue(lp->hw);
+ if (lp->was_tx) {
+ lp->was_tx = 0;
+ dev_kfree_skb_any(lp->tx_skb);
+ ieee802154_wake_queue(lp->hw);
+ }
}
static void
@@ -352,7 +357,11 @@ at86rf230_async_error_recover(void *context)
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- lp->is_tx = 0;
+ if (lp->is_tx) {
+ lp->was_tx = 1;
+ lp->is_tx = 0;
+ }
+
at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON,
at86rf230_async_error_recover_complete);
}
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index ece6ff6049f6..2bc730fd260e 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -1771,6 +1771,7 @@ static int ca8210_async_xmit_complete(
status
);
if (status != MAC_TRANSACTION_OVERFLOW) {
+ dev_kfree_skb_any(priv->tx_skb);
ieee802154_wake_queue(priv->hw);
return 0;
}
@@ -2974,8 +2975,8 @@ static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw)
ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND;
ca8210_hw->phy->cca_ed_level = -9800;
ca8210_hw->phy->symbol_duration = 16;
- ca8210_hw->phy->lifs_period = 40;
- ca8210_hw->phy->sifs_period = 12;
+ ca8210_hw->phy->lifs_period = 40 * ca8210_hw->phy->symbol_duration;
+ ca8210_hw->phy->sifs_period = 12 * ca8210_hw->phy->symbol_duration;
ca8210_hw->flags =
IEEE802154_HW_AFILT |
IEEE802154_HW_OMIT_CKSUM |
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index 8caa61ec718f..36f1c5aa98fc 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -786,6 +786,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
goto err_pib;
}
+ pib->channel = 13;
rcu_assign_pointer(phy->pib, pib);
phy->idx = idx;
INIT_LIST_HEAD(&phy->edges);
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index 8dc04e2590b1..383231b85464 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -976,8 +976,8 @@ static void mcr20a_hw_setup(struct mcr20a_local *lp)
dev_dbg(printdev(lp), "%s\n", __func__);
phy->symbol_duration = 16;
- phy->lifs_period = 40;
- phy->sifs_period = 12;
+ phy->lifs_period = 40 * phy->symbol_duration;
+ phy->sifs_period = 12 * phy->symbol_duration;
hw->flags = IEEE802154_HW_TX_OMIT_CKSUM |
IEEE802154_HW_AFILT |
diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig
index d037682fb7ad..6782c2cbf542 100644
--- a/drivers/net/ipa/Kconfig
+++ b/drivers/net/ipa/Kconfig
@@ -2,7 +2,9 @@ config QCOM_IPA
tristate "Qualcomm IPA support"
depends on NET && QCOM_SMEM
depends on ARCH_QCOM || COMPILE_TEST
+ depends on INTERCONNECT
depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
select QCOM_MDT_LOADER if ARCH_QCOM
select QCOM_SCM
select QCOM_QMI_HELPERS
diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
index b1c6c0fcb654..f2989aac47a6 100644
--- a/drivers/net/ipa/ipa_power.c
+++ b/drivers/net/ipa/ipa_power.c
@@ -11,6 +11,8 @@
#include <linux/pm_runtime.h>
#include <linux/bitops.h>
+#include "linux/soc/qcom/qcom_aoss.h"
+
#include "ipa.h"
#include "ipa_power.h"
#include "ipa_endpoint.h"
@@ -64,6 +66,7 @@ enum ipa_power_flag {
* struct ipa_power - IPA power management information
* @dev: IPA device pointer
* @core: IPA core clock
+ * @qmp: QMP handle for AOSS communication
* @spinlock: Protects modem TX queue enable/disable
* @flags: Boolean state flags
* @interconnect_count: Number of elements in interconnect[]
@@ -72,6 +75,7 @@ enum ipa_power_flag {
struct ipa_power {
struct device *dev;
struct clk *core;
+ struct qmp *qmp;
spinlock_t spinlock; /* used with STOPPED/STARTED power flags */
DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
u32 interconnect_count;
@@ -382,6 +386,47 @@ void ipa_power_modem_queue_active(struct ipa *ipa)
clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags);
}
+static int ipa_power_retention_init(struct ipa_power *power)
+{
+ struct qmp *qmp = qmp_get(power->dev);
+
+ if (IS_ERR(qmp)) {
+ if (PTR_ERR(qmp) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ /* We assume any other error means it's not defined/needed */
+ qmp = NULL;
+ }
+ power->qmp = qmp;
+
+ return 0;
+}
+
+static void ipa_power_retention_exit(struct ipa_power *power)
+{
+ qmp_put(power->qmp);
+ power->qmp = NULL;
+}
+
+/* Control register retention on power collapse */
+void ipa_power_retention(struct ipa *ipa, bool enable)
+{
+ static const char fmt[] = "{ class: bcm, res: ipa_pc, val: %c }";
+ struct ipa_power *power = ipa->power;
+ char buf[36]; /* Exactly enough for fmt[]; size a multiple of 4 */
+ int ret;
+
+ if (!power->qmp)
+ return; /* Not needed on this platform */
+
+ (void)snprintf(buf, sizeof(buf), fmt, enable ? '1' : '0');
+
+ ret = qmp_send(power->qmp, buf, sizeof(buf));
+ if (ret)
+ dev_err(power->dev, "error %d sending QMP %sable request\n",
+ ret, enable ? "en" : "dis");
+}
+
int ipa_power_setup(struct ipa *ipa)
{
int ret;
@@ -438,12 +483,18 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
if (ret)
goto err_kfree;
+ ret = ipa_power_retention_init(power);
+ if (ret)
+ goto err_interconnect_exit;
+
pm_runtime_set_autosuspend_delay(dev, IPA_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
return power;
+err_interconnect_exit:
+ ipa_interconnect_exit(power);
err_kfree:
kfree(power);
err_clk_put:
@@ -460,6 +511,7 @@ void ipa_power_exit(struct ipa_power *power)
pm_runtime_disable(dev);
pm_runtime_dont_use_autosuspend(dev);
+ ipa_power_retention_exit(power);
ipa_interconnect_exit(power);
kfree(power);
clk_put(clk);
diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h
index 2151805d7fbb..6f84f057a209 100644
--- a/drivers/net/ipa/ipa_power.h
+++ b/drivers/net/ipa/ipa_power.h
@@ -41,6 +41,13 @@ void ipa_power_modem_queue_wake(struct ipa *ipa);
void ipa_power_modem_queue_active(struct ipa *ipa);
/**
+ * ipa_power_retention() - Control register retention on power collapse
+ * @ipa: IPA pointer
+ * @enable: Whether retention should be enabled or disabled
+ */
+void ipa_power_retention(struct ipa *ipa, bool enable);
+
+/**
* ipa_power_setup() - Set up IPA power management
* @ipa: IPA pointer
*
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index 856e55a080a7..fe11910518d9 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -11,6 +11,7 @@
#include "ipa.h"
#include "ipa_uc.h"
+#include "ipa_power.h"
/**
* DOC: The IPA embedded microcontroller
@@ -154,6 +155,7 @@ static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id)
case IPA_UC_RESPONSE_INIT_COMPLETED:
if (ipa->uc_powered) {
ipa->uc_loaded = true;
+ ipa_power_retention(ipa, true);
pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
ipa->uc_powered = false;
@@ -184,6 +186,9 @@ void ipa_uc_deconfig(struct ipa *ipa)
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_1);
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_0);
+ if (ipa->uc_loaded)
+ ipa_power_retention(ipa, false);
+
if (!ipa->uc_powered)
return;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 16aa3a478e9e..3d0874331763 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3870,6 +3870,18 @@ static void macsec_common_dellink(struct net_device *dev, struct list_head *head
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (ops) {
+ ctx.secy = &macsec->secy;
+ macsec_offload(ops->mdo_del_secy, &ctx);
+ }
+ }
+
unregister_netdevice_queue(dev, head);
list_del_rcu(&macsec->secys);
macsec_del_dev(macsec);
@@ -3884,18 +3896,6 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head)
struct net_device *real_dev = macsec->real_dev;
struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
- /* If h/w offloading is available, propagate to the device */
- if (macsec_is_offloaded(macsec)) {
- const struct macsec_ops *ops;
- struct macsec_context ctx;
-
- ops = macsec_get_ops(netdev_priv(dev), &ctx);
- if (ops) {
- ctx.secy = &macsec->secy;
- macsec_offload(ops->mdo_del_secy, &ctx);
- }
- }
-
macsec_common_dellink(dev, head);
if (list_empty(&rxd->secys)) {
@@ -4018,6 +4018,15 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
!macsec_check_offload(macsec->offload, macsec))
return -EOPNOTSUPP;
+ /* send_sci must be set to true when transmit sci explicitly is set */
+ if ((data && data[IFLA_MACSEC_SCI]) &&
+ (data && data[IFLA_MACSEC_INC_SCI])) {
+ u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
+
+ if (!send_sci)
+ return -EINVAL;
+ }
+
if (data && data[IFLA_MACSEC_ICV_LEN])
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c
index eaa6fb3224bc..62723a7faa2d 100644
--- a/drivers/net/mctp/mctp-serial.c
+++ b/drivers/net/mctp/mctp-serial.c
@@ -403,8 +403,16 @@ static void mctp_serial_tty_receive_buf(struct tty_struct *tty,
mctp_serial_push(dev, c[i]);
}
+static void mctp_serial_uninit(struct net_device *ndev)
+{
+ struct mctp_serial *dev = netdev_priv(ndev);
+
+ cancel_work_sync(&dev->tx_work);
+}
+
static const struct net_device_ops mctp_serial_netdev_ops = {
.ndo_start_xmit = mctp_serial_tx,
+ .ndo_uninit = mctp_serial_uninit,
};
static void mctp_serial_setup(struct net_device *ndev)
@@ -483,7 +491,6 @@ static void mctp_serial_close(struct tty_struct *tty)
int idx = dev->idx;
unregister_netdev(dev->netdev);
- cancel_work_sync(&dev->tx_work);
ida_free(&mctp_serial_ida, idx);
}
diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c
index 966c3b4ad59d..e2273588c75b 100644
--- a/drivers/net/mdio/mdio-aspeed.c
+++ b/drivers/net/mdio/mdio-aspeed.c
@@ -148,6 +148,7 @@ static const struct of_device_id aspeed_mdio_of_match[] = {
{ .compatible = "aspeed,ast2600-mdio", },
{ },
};
+MODULE_DEVICE_TABLE(of, aspeed_mdio_of_match);
static struct platform_driver aspeed_mdio_driver = {
.driver = {
diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
index 5f4cd24a0241..4eba5a91075c 100644
--- a/drivers/net/mdio/mdio-ipq4019.c
+++ b/drivers/net/mdio/mdio-ipq4019.c
@@ -200,7 +200,11 @@ static int ipq_mdio_reset(struct mii_bus *bus)
if (ret)
return ret;
- return clk_prepare_enable(priv->mdio_clk);
+ ret = clk_prepare_enable(priv->mdio_clk);
+ if (ret == 0)
+ mdelay(10);
+
+ return ret;
}
static int ipq4019_mdio_probe(struct platform_device *pdev)
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 4300261e2f9e..378ee779061c 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -623,14 +623,14 @@ static int nsim_fib6_rt_append(struct nsim_fib_data *data,
if (err)
goto err_fib6_rt_nh_del;
- fib6_event->rt_arr[i]->trap = true;
+ WRITE_ONCE(fib6_event->rt_arr[i]->trap, true);
}
return 0;
err_fib6_rt_nh_del:
for (i--; i >= 0; i--) {
- fib6_event->rt_arr[i]->trap = false;
+ WRITE_ONCE(fib6_event->rt_arr[i]->trap, false);
nsim_fib6_rt_nh_del(fib6_rt, fib6_event->rt_arr[i]);
}
return err;
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 5b6c0d120e09..29aa811af430 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -1688,19 +1688,19 @@ static int qca808x_read_status(struct phy_device *phydev)
if (ret < 0)
return ret;
- if (phydev->link && phydev->speed == SPEED_2500)
- phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
- else
- phydev->interface = PHY_INTERFACE_MODE_SMII;
-
- /* generate seed as a lower random value to make PHY linked as SLAVE easily,
- * except for master/slave configuration fault detected.
- * the reason for not putting this code into the function link_change_notify is
- * the corner case where the link partner is also the qca8081 PHY and the seed
- * value is configured as the same value, the link can't be up and no link change
- * occurs.
- */
- if (!phydev->link) {
+ if (phydev->link) {
+ if (phydev->speed == SPEED_2500)
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ else
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ } else {
+ /* generate seed as a lower random value to make PHY linked as SLAVE easily,
+ * except for master/slave configuration fault detected.
+ * the reason for not putting this code into the function link_change_notify is
+ * the corner case where the link partner is also the qca8081 PHY and the seed
+ * value is configured as the same value, the link can't be up and no link change
+ * occurs.
+ */
if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR) {
qca808x_phy_ms_seed_enable(phydev, false);
} else {
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index fa71fb7a66b5..2429db614b59 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -553,9 +553,9 @@ static int m88e1121_config_aneg_rgmii_delays(struct phy_device *phydev)
else
mscr = 0;
- return phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
- MII_88E1121_PHY_MSCR_REG,
- MII_88E1121_PHY_MSCR_DELAY_MASK, mscr);
+ return phy_modify_paged_changed(phydev, MII_MARVELL_MSCR_PAGE,
+ MII_88E1121_PHY_MSCR_REG,
+ MII_88E1121_PHY_MSCR_DELAY_MASK, mscr);
}
static int m88e1121_config_aneg(struct phy_device *phydev)
@@ -569,11 +569,13 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
return err;
}
+ changed = err;
+
err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
if (err < 0)
return err;
- changed = err;
+ changed |= err;
err = genphy_config_aneg(phydev);
if (err < 0)
@@ -1213,16 +1215,15 @@ static int m88e1118_config_aneg(struct phy_device *phydev)
{
int err;
- err = genphy_soft_reset(phydev);
+ err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
if (err < 0)
return err;
- err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
+ err = genphy_config_aneg(phydev);
if (err < 0)
return err;
- err = genphy_config_aneg(phydev);
- return 0;
+ return genphy_soft_reset(phydev);
}
static int m88e1118_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/mediatek-ge.c b/drivers/net/phy/mediatek-ge.c
index b7a5ae20edd5..68ee434f9dea 100644
--- a/drivers/net/phy/mediatek-ge.c
+++ b/drivers/net/phy/mediatek-ge.c
@@ -55,9 +55,6 @@ static int mt7530_phy_config_init(struct phy_device *phydev)
static int mt7531_phy_config_init(struct phy_device *phydev)
{
- if (phydev->interface != PHY_INTERFACE_MODE_INTERNAL)
- return -EINVAL;
-
mtk_gephy_config_init(phydev);
/* PHY link down power saving enable */
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 1a627ba4b850..a31098981a65 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1468,58 +1468,68 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
u16 hdr_off;
u32 *pkt_hdr;
- /* This check is no longer done by usbnet */
- if (skb->len < dev->net->hard_header_len)
+ /* At the end of the SKB, there's a header telling us how many packets
+ * are bundled into this buffer and where we can find an array of
+ * per-packet metadata (which contains elements encoded into u16).
+ */
+ if (skb->len < 4)
return 0;
-
skb_trim(skb, skb->len - 4);
rx_hdr = get_unaligned_le32(skb_tail_pointer(skb));
-
pkt_cnt = (u16)rx_hdr;
hdr_off = (u16)(rx_hdr >> 16);
+
+ if (pkt_cnt == 0)
+ return 0;
+
+ /* Make sure that the bounds of the metadata array are inside the SKB
+ * (and in front of the counter at the end).
+ */
+ if (pkt_cnt * 2 + hdr_off > skb->len)
+ return 0;
pkt_hdr = (u32 *)(skb->data + hdr_off);
- while (pkt_cnt--) {
+ /* Packets must not overlap the metadata array */
+ skb_trim(skb, hdr_off);
+
+ for (; ; pkt_cnt--, pkt_hdr++) {
u16 pkt_len;
le32_to_cpus(pkt_hdr);
pkt_len = (*pkt_hdr >> 16) & 0x1fff;
- /* Check CRC or runt packet */
- if ((*pkt_hdr & AX_RXHDR_CRC_ERR) ||
- (*pkt_hdr & AX_RXHDR_DROP_ERR)) {
- skb_pull(skb, (pkt_len + 7) & 0xFFF8);
- pkt_hdr++;
- continue;
- }
-
- if (pkt_cnt == 0) {
- skb->len = pkt_len;
- /* Skip IP alignment pseudo header */
- skb_pull(skb, 2);
- skb_set_tail_pointer(skb, skb->len);
- skb->truesize = pkt_len + sizeof(struct sk_buff);
- ax88179_rx_checksum(skb, pkt_hdr);
- return 1;
- }
+ if (pkt_len > skb->len)
+ return 0;
- ax_skb = skb_clone(skb, GFP_ATOMIC);
- if (ax_skb) {
+ /* Check CRC or runt packet */
+ if (((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) == 0) &&
+ pkt_len >= 2 + ETH_HLEN) {
+ bool last = (pkt_cnt == 0);
+
+ if (last) {
+ ax_skb = skb;
+ } else {
+ ax_skb = skb_clone(skb, GFP_ATOMIC);
+ if (!ax_skb)
+ return 0;
+ }
ax_skb->len = pkt_len;
/* Skip IP alignment pseudo header */
skb_pull(ax_skb, 2);
skb_set_tail_pointer(ax_skb, ax_skb->len);
ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
ax88179_rx_checksum(ax_skb, pkt_hdr);
+
+ if (last)
+ return 1;
+
usbnet_skb_return(dev, ax_skb);
- } else {
- return 0;
}
- skb_pull(skb, (pkt_len + 7) & 0xFFF8);
- pkt_hdr++;
+ /* Trim this packet away from the SKB */
+ if (!skb_pull(skb, (pkt_len + 7) & 0xFFF8))
+ return 0;
}
- return 1;
}
static struct sk_buff *
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index eb3817d70f2b..9b4dfa3001d6 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -583,6 +583,11 @@ static const struct usb_device_id products[] = {
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
.bInterfaceProtocol = USB_CDC_PROTO_NONE
+#define ZAURUS_FAKE_INTERFACE \
+ .bInterfaceClass = USB_CLASS_COMM, \
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE
+
/* SA-1100 based Sharp Zaurus ("collie"), or compatible;
* wire-incompatible with true CDC Ethernet implementations.
* (And, it seems, needlessly so...)
@@ -640,6 +645,13 @@ static const struct usb_device_id products[] = {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
+ .idProduct = 0x9032, /* SL-6000 */
+ ZAURUS_FAKE_INTERFACE,
+ .driver_info = 0,
+}, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x04DD,
/* reported with some C860 units */
.idProduct = 0x9050, /* C-860 */
ZAURUS_MASTER_INTERFACE,
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 82bb5ed94c48..c0b8b4aa78f3 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -659,6 +659,11 @@ static const struct usb_device_id mbim_devs[] = {
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
},
+ /* Telit FN990 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1071, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
+ },
+
/* default entry */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_zlp,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index e303b522efb5..15f91d691bba 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1715,10 +1715,10 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
{
struct sk_buff *skb;
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
- int len;
+ unsigned int len;
int nframes;
int x;
- int offset;
+ unsigned int offset;
union {
struct usb_cdc_ncm_ndp16 *ndp16;
struct usb_cdc_ncm_ndp32 *ndp32;
@@ -1790,8 +1790,8 @@ next_ndp:
break;
}
- /* sanity checking */
- if (((offset + len) > skb_in->len) ||
+ /* sanity checking - watch out for integer wrap*/
+ if ((offset > skb_in->len) || (len > skb_in->len - offset) ||
(len > ctx->rx_max) || (len < ETH_HLEN)) {
netif_dbg(dev, rx_err, dev->net,
"invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n",
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index cd33955df0b6..6a769df0b421 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -121,7 +121,7 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone)
if (tx_buf == NULL)
goto free_rx_urb;
- rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE,
+ rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN,
GFP_KERNEL, &rx_urb->transfer_dma);
if (rx_buf == NULL)
goto free_tx_buf;
@@ -146,7 +146,7 @@ error_nomem:
static void ipheth_free_urbs(struct ipheth_device *iphone)
{
- usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf,
+ usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, iphone->rx_buf,
iphone->rx_urb->transfer_dma);
usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf,
iphone->tx_urb->transfer_dma);
@@ -317,7 +317,7 @@ static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags)
usb_fill_bulk_urb(dev->rx_urb, udev,
usb_rcvbulkpipe(udev, dev->bulk_in),
- dev->rx_buf, IPHETH_BUF_SIZE,
+ dev->rx_buf, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN,
ipheth_rcvbulk_callback,
dev);
dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 37e5f3495362..3353e761016d 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1400,6 +1400,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
{QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
+ {QMI_FIXED_INTF(0x413c, 0x81e4, 0)}, /* Dell Wireless 5829e with eSIM support*/
+ {QMI_FIXED_INTF(0x413c, 0x81e6, 0)}, /* Dell Wireless 5829e */
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
{QMI_QUIRK_SET_DTR(0x22de, 0x9051, 2)}, /* Hucom Wireless HM-211S/K */
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index b658510cc9a4..5a53e63d33a6 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -413,7 +413,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* ignore the CRC length */
len = (skb->data[1] | (skb->data[2] << 8)) - 4;
- if (len > ETH_FRAME_LEN)
+ if (len > ETH_FRAME_LEN || len > skb->len)
return 0;
/* the last packet of current skb */
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
index 8e717a0b559b..7984f2157d22 100644
--- a/drivers/net/usb/zaurus.c
+++ b/drivers/net/usb/zaurus.c
@@ -256,6 +256,11 @@ static const struct usb_device_id products [] = {
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
.bInterfaceProtocol = USB_CDC_PROTO_NONE
+#define ZAURUS_FAKE_INTERFACE \
+ .bInterfaceClass = USB_CLASS_COMM, \
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE
+
/* SA-1100 based Sharp Zaurus ("collie"), or compatible. */
{
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
@@ -315,6 +320,13 @@ static const struct usb_device_id products [] = {
.driver_info = ZAURUS_PXA_INFO,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x04DD,
+ .idProduct = 0x9032, /* SL-6000 */
+ ZAURUS_FAKE_INTERFACE,
+ .driver_info = (unsigned long)&bogus_mdlm_info,
+}, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
/* reported with some C860 units */
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 354a963075c5..d29fb9759cc9 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -265,9 +265,10 @@ static void __veth_xdp_flush(struct veth_rq *rq)
{
/* Write ptr_ring before reading rx_notify_masked */
smp_mb();
- if (!rq->rx_notify_masked) {
- rq->rx_notify_masked = true;
- napi_schedule(&rq->xdp_napi);
+ if (!READ_ONCE(rq->rx_notify_masked) &&
+ napi_schedule_prep(&rq->xdp_napi)) {
+ WRITE_ONCE(rq->rx_notify_masked, true);
+ __napi_schedule(&rq->xdp_napi);
}
}
@@ -912,8 +913,10 @@ static int veth_poll(struct napi_struct *napi, int budget)
/* Write rx_notify_masked before reading ptr_ring */
smp_store_mb(rq->rx_notify_masked, false);
if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
- rq->rx_notify_masked = true;
- napi_schedule(&rq->xdp_napi);
+ if (napi_schedule_prep(&rq->xdp_napi)) {
+ WRITE_ONCE(rq->rx_notify_masked, true);
+ __napi_schedule(&rq->xdp_napi);
+ }
}
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 0eb13e5df517..d99140960a82 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -693,7 +693,7 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
{
struct brcmf_fw_item *first = &req->items[0];
struct brcmf_fw *fwctx;
- char *alt_path;
+ char *alt_path = NULL;
int ret;
brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev));
@@ -712,7 +712,9 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
fwctx->done = fw_cb;
/* First try alternative board-specific path if any */
- alt_path = brcm_alt_fw_path(first->path, fwctx->req->board_type);
+ if (fwctx->req->board_type)
+ alt_path = brcm_alt_fw_path(first->path,
+ fwctx->req->board_type);
if (alt_path) {
ret = request_firmware_nowait(THIS_MODULE, true, alt_path,
fwctx->dev, GFP_KERNEL, fwctx,
diff --git a/drivers/net/wireless/intel/Makefile b/drivers/net/wireless/intel/Makefile
index 1364b0014488..208e73a16051 100644
--- a/drivers/net/wireless/intel/Makefile
+++ b/drivers/net/wireless/intel/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_IPW2200) += ipw2x00/
obj-$(CONFIG_IWLEGACY) += iwlegacy/
obj-$(CONFIG_IWLWIFI) += iwlwifi/
+obj-$(CONFIG_IWLMEI) += iwlwifi/
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index c21c0c68849a..85e704283755 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -80,19 +80,6 @@ config IWLWIFI_OPMODE_MODULAR
comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
depends on IWLDVM=n && IWLMVM=n
-config IWLWIFI_BCAST_FILTERING
- bool "Enable broadcast filtering"
- depends on IWLMVM
- help
- Say Y here to enable default bcast filtering configuration.
-
- Enabling broadcast filtering will drop any incoming wireless
- broadcast frames, except some very specific predefined
- patterns (e.g. incoming arp requests).
-
- If unsure, don't enable this option, as some programs might
- expect incoming broadcasts for their normal operations.
-
menu "Debugging Options"
config IWLWIFI_DEBUG
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 790c96df58cb..c17ab53fcd8f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2021 Intel Corporation
+ * Copyright (C) 2019-2022 Intel Corporation
*/
#include <linux/uuid.h>
#include "iwl-drv.h"
@@ -888,10 +888,11 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
* only one using version 36, so skip this version entirely.
*/
return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
- IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 ||
- (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
- ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
- CSR_HW_REV_TYPE_7265D));
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
+ fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
+ ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+ CSR_HW_REV_TYPE_7265D));
}
IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 0703e41403a6..35b8856e511f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -502,11 +502,6 @@ enum iwl_legacy_cmds {
DEBUG_LOG_MSG = 0xf7,
/**
- * @BCAST_FILTER_CMD: &struct iwl_bcast_filter_cmd
- */
- BCAST_FILTER_CMD = 0xcf,
-
- /**
* @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd
*/
MCAST_FILTER_CMD = 0xd0,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
index dd62a63956b3..e44c70b7c790 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
@@ -36,92 +36,4 @@ struct iwl_mcast_filter_cmd {
u8 addr_list[0];
} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
-#define MAX_BCAST_FILTERS 8
-#define MAX_BCAST_FILTER_ATTRS 2
-
-/**
- * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet
- * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start.
- * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e.
- * start of ip payload).
- */
-enum iwl_mvm_bcast_filter_attr_offset {
- BCAST_FILTER_OFFSET_PAYLOAD_START = 0,
- BCAST_FILTER_OFFSET_IP_END = 1,
-};
-
-/**
- * struct iwl_fw_bcast_filter_attr - broadcast filter attribute
- * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset.
- * @offset: starting offset of this pattern.
- * @reserved1: reserved
- * @val: value to match - big endian (MSB is the first
- * byte to match from offset pos).
- * @mask: mask to match (big endian).
- */
-struct iwl_fw_bcast_filter_attr {
- u8 offset_type;
- u8 offset;
- __le16 reserved1;
- __be32 val;
- __be32 mask;
-} __packed; /* BCAST_FILTER_ATT_S_VER_1 */
-
-/**
- * enum iwl_mvm_bcast_filter_frame_type - filter frame type
- * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames.
- * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames
- */
-enum iwl_mvm_bcast_filter_frame_type {
- BCAST_FILTER_FRAME_TYPE_ALL = 0,
- BCAST_FILTER_FRAME_TYPE_IPV4 = 1,
-};
-
-/**
- * struct iwl_fw_bcast_filter - broadcast filter
- * @discard: discard frame (1) or let it pass (0).
- * @frame_type: &enum iwl_mvm_bcast_filter_frame_type.
- * @reserved1: reserved
- * @num_attrs: number of valid attributes in this filter.
- * @attrs: attributes of this filter. a filter is considered matched
- * only when all its attributes are matched (i.e. AND relationship)
- */
-struct iwl_fw_bcast_filter {
- u8 discard;
- u8 frame_type;
- u8 num_attrs;
- u8 reserved1;
- struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
-} __packed; /* BCAST_FILTER_S_VER_1 */
-
-/**
- * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
- * @default_discard: default action for this mac (discard (1) / pass (0)).
- * @reserved1: reserved
- * @attached_filters: bitmap of relevant filters for this mac.
- */
-struct iwl_fw_bcast_mac {
- u8 default_discard;
- u8 reserved1;
- __le16 attached_filters;
-} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */
-
-/**
- * struct iwl_bcast_filter_cmd - broadcast filtering configuration
- * @disable: enable (0) / disable (1)
- * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS)
- * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER)
- * @reserved1: reserved
- * @filters: broadcast filters
- * @macs: broadcast filtering configuration per-mac
- */
-struct iwl_bcast_filter_cmd {
- u8 disable;
- u8 max_bcast_filters;
- u8 max_macs;
- u8 reserved1;
- struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS];
- struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER];
-} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */
-
#endif /* __iwl_fw_api_filter_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 173a6991587b..4a7723eb8c1d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -752,7 +752,6 @@ struct iwl_lq_cmd {
u8 iwl_fw_rate_idx_to_plcp(int idx);
u32 iwl_new_rate_from_v1(u32 rate_v1);
-u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags);
const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx);
const char *iwl_rs_pretty_ant(u8 ant);
const char *iwl_rs_pretty_bw(int bw);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index e4ebda64cd52..efc6540d31af 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -181,7 +181,6 @@ struct iwl_ucode_capa {
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
* @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
- * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
* @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
*/
enum iwl_ucode_tlv_flag {
@@ -196,7 +195,6 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
- IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
};
typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/rs.c b/drivers/net/wireless/intel/iwlwifi/fw/rs.c
index a21c3befd93b..a835214611ce 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/rs.c
@@ -91,6 +91,20 @@ const char *iwl_rs_pretty_bw(int bw)
}
IWL_EXPORT_SYMBOL(iwl_rs_pretty_bw);
+static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
+{
+ int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
+ int idx;
+ bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
+ int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
+ int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
+
+ for (idx = offset; idx < last; idx++)
+ if (iwl_fw_rate_idx_to_plcp(idx) == rate)
+ return idx - offset;
+ return IWL_RATE_INVALID;
+}
+
u32 iwl_new_rate_from_v1(u32 rate_v1)
{
u32 rate_v2 = 0;
@@ -144,7 +158,10 @@ u32 iwl_new_rate_from_v1(u32 rate_v1)
} else {
u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1);
- WARN_ON(legacy_rate < 0);
+ if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID))
+ legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ?
+ IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE;
+
rate_v2 |= legacy_rate;
if (!(rate_v1 & RATE_MCS_CCK_MSK_V1))
rate_v2 |= RATE_MCS_LEGACY_OFDM_MSK;
@@ -172,20 +189,6 @@ u32 iwl_new_rate_from_v1(u32 rate_v1)
}
IWL_EXPORT_SYMBOL(iwl_new_rate_from_v1);
-u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
-{
- int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
- int idx;
- bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
- int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
- int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
-
- for (idx = offset; idx < last; idx++)
- if (iwl_fw_rate_idx_to_plcp(idx) == rate)
- return idx - offset;
- return -1;
-}
-
int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
{
char *type;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index f90d4662c164..8e10ba88afb3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -329,6 +329,7 @@ enum {
#define CSR_HW_REV_TYPE_2x00 (0x0000100)
#define CSR_HW_REV_TYPE_105 (0x0000110)
#define CSR_HW_REV_TYPE_135 (0x0000120)
+#define CSR_HW_REV_TYPE_3160 (0x0000164)
#define CSR_HW_REV_TYPE_7265D (0x0000210)
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
#define CSR_HW_REV_TYPE_QNJ (0x0000360)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 83e3b731ad29..6651e78b39ec 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1707,6 +1707,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
out_unbind:
complete(&drv->request_firmware_complete);
device_release_driver(drv->trans->dev);
+ /* drv has just been freed by the release */
+ failure = false;
free:
if (failure)
iwl_dealloc_ucode(drv);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index dd58c8f9aa11..04addf964d83 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -553,8 +553,7 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
.has_he = true,
.he_cap_elem = {
.mac_cap_info[0] =
- IEEE80211_HE_MAC_CAP0_HTC_HE |
- IEEE80211_HE_MAC_CAP0_TWT_REQ,
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
.mac_cap_info[1] =
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c
index d9733aaf6f6e..2f7f0f994ca3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2021 Intel Corporation
+ * Copyright (C) 2021-2022 Intel Corporation
*/
#include <linux/etherdevice.h>
@@ -146,6 +146,7 @@ struct iwl_mei_filters {
* @csme_taking_ownership: true when CSME is taking ownership. Used to remember
* to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down
* flow.
+ * @link_prot_state: true when we are in link protection PASSIVE
* @csa_throttle_end_wk: used when &csa_throttled is true
* @data_q_lock: protects the access to the data queues which are
* accessed without the mutex.
@@ -165,6 +166,7 @@ struct iwl_mei {
bool amt_enabled;
bool csa_throttled;
bool csme_taking_ownership;
+ bool link_prot_state;
struct delayed_work csa_throttle_end_wk;
spinlock_t data_q_lock;
@@ -229,8 +231,6 @@ static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev)
if (IS_ERR(mem->ctrl)) {
int ret = PTR_ERR(mem->ctrl);
- dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
- ret);
mem->ctrl = NULL;
return ret;
@@ -669,6 +669,8 @@ iwl_mei_handle_conn_status(struct mei_cl_device *cldev,
iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info);
+ mei->link_prot_state = status->link_prot_state;
+
/*
* Update the Rfkill state in case the host does not own the device:
* if we are in Link Protection, ask to not touch the device, else,
@@ -1663,9 +1665,11 @@ int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
mei_cldev_get_drvdata(iwl_mei_global_cldev);
/* we have already a SAP connection */
- if (iwl_mei_is_connected())
+ if (iwl_mei_is_connected()) {
iwl_mei_send_sap_msg(mei->cldev,
SAP_MSG_NOTIF_WIFIDR_UP);
+ ops->rfkill(priv, mei->link_prot_state);
+ }
}
ret = 0;
@@ -1784,6 +1788,8 @@ static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {}
#endif /* CONFIG_DEBUG_FS */
+#define ALLOC_SHARED_MEM_RETRY_MAX_NUM 3
+
/*
* iwl_mei_probe - the probe function called by the mei bus enumeration
*
@@ -1795,6 +1801,7 @@ static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {}
static int iwl_mei_probe(struct mei_cl_device *cldev,
const struct mei_cl_device_id *id)
{
+ int alloc_retry = ALLOC_SHARED_MEM_RETRY_MAX_NUM;
struct iwl_mei *mei;
int ret;
@@ -1812,15 +1819,31 @@ static int iwl_mei_probe(struct mei_cl_device *cldev,
mei_cldev_set_drvdata(cldev, mei);
mei->cldev = cldev;
- /*
- * The CSME firmware needs to boot the internal WLAN client. Wait here
- * so that the DMA map request will succeed.
- */
- msleep(20);
+ do {
+ ret = iwl_mei_alloc_shared_mem(cldev);
+ if (!ret)
+ break;
+ /*
+ * The CSME firmware needs to boot the internal WLAN client.
+ * This can take time in certain configurations (usually
+ * upon resume and when the whole CSME firmware is shut down
+ * during suspend).
+ *
+ * Wait a bit before retrying and hope we'll succeed next time.
+ */
- ret = iwl_mei_alloc_shared_mem(cldev);
- if (ret)
+ dev_dbg(&cldev->dev,
+ "Couldn't allocate the shared memory: %d, attempt %d / %d\n",
+ ret, alloc_retry, ALLOC_SHARED_MEM_RETRY_MAX_NUM);
+ msleep(100);
+ alloc_retry--;
+ } while (alloc_retry);
+
+ if (ret) {
+ dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
+ ret);
goto free;
+ }
iwl_mei_init_shared_mem(mei);
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/net.c b/drivers/net/wireless/intel/iwlwifi/mei/net.c
index 5f966af69720..468102a95e1b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/net.c
+++ b/drivers/net/wireless/intel/iwlwifi/mei/net.c
@@ -195,8 +195,7 @@ static bool iwl_mei_rx_filter_ipv4(struct sk_buff *skb,
bool match;
if (!pskb_may_pull(skb, skb_network_offset(skb) + sizeof(*iphdr)) ||
- !pskb_may_pull(skb, skb_network_offset(skb) +
- sizeof(ip_hdrlen(skb) - sizeof(*iphdr))))
+ !pskb_may_pull(skb, skb_network_offset(skb) + ip_hdrlen(skb)))
return false;
iphdrlen = ip_hdrlen(skb);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index fb4920b01dbb..445c94adb076 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -5,6 +5,7 @@
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#include <linux/vmalloc.h>
+#include <linux/err.h>
#include <linux/ieee80211.h>
#include <linux/netdevice.h>
@@ -1369,189 +1370,6 @@ static ssize_t iwl_dbgfs_dbg_time_point_write(struct iwl_mvm *mvm,
return count;
}
-#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- struct iwl_bcast_filter_cmd cmd;
- const struct iwl_fw_bcast_filter *filter;
- char *buf;
- int bufsz = 1024;
- int i, j, pos = 0;
- ssize_t ret;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- mutex_lock(&mvm->mutex);
- if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
- ADD_TEXT("None\n");
- mutex_unlock(&mvm->mutex);
- goto out;
- }
- mutex_unlock(&mvm->mutex);
-
- for (i = 0; cmd.filters[i].attrs[0].mask; i++) {
- filter = &cmd.filters[i];
-
- ADD_TEXT("Filter [%d]:\n", i);
- ADD_TEXT("\tDiscard=%d\n", filter->discard);
- ADD_TEXT("\tFrame Type: %s\n",
- filter->frame_type ? "IPv4" : "Generic");
-
- for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) {
- const struct iwl_fw_bcast_filter_attr *attr;
-
- attr = &filter->attrs[j];
- if (!attr->mask)
- break;
-
- ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n",
- j, attr->offset,
- attr->offset_type ? "IP End" :
- "Payload Start",
- be32_to_cpu(attr->mask),
- be32_to_cpu(attr->val),
- le16_to_cpu(attr->reserved1));
- }
- }
-out:
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
- size_t count, loff_t *ppos)
-{
- int pos, next_pos;
- struct iwl_fw_bcast_filter filter = {};
- struct iwl_bcast_filter_cmd cmd;
- u32 filter_id, attr_id, mask, value;
- int err = 0;
-
- if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard,
- &filter.frame_type, &pos) != 3)
- return -EINVAL;
-
- if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) ||
- filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4)
- return -EINVAL;
-
- for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs);
- attr_id++) {
- struct iwl_fw_bcast_filter_attr *attr =
- &filter.attrs[attr_id];
-
- if (pos >= count)
- break;
-
- if (sscanf(&buf[pos], "%hhi %hhi %i %i %n",
- &attr->offset, &attr->offset_type,
- &mask, &value, &next_pos) != 4)
- return -EINVAL;
-
- attr->mask = cpu_to_be32(mask);
- attr->val = cpu_to_be32(value);
- if (mask)
- filter.num_attrs++;
-
- pos += next_pos;
- }
-
- mutex_lock(&mvm->mutex);
- memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id],
- &filter, sizeof(filter));
-
- /* send updated bcast filtering configuration */
- if (iwl_mvm_firmware_running(mvm) &&
- mvm->dbgfs_bcast_filtering.override &&
- iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
- err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
- sizeof(cmd), &cmd);
- mutex_unlock(&mvm->mutex);
-
- return err ?: count;
-}
-
-static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- struct iwl_bcast_filter_cmd cmd;
- char *buf;
- int bufsz = 1024;
- int i, pos = 0;
- ssize_t ret;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- mutex_lock(&mvm->mutex);
- if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
- ADD_TEXT("None\n");
- mutex_unlock(&mvm->mutex);
- goto out;
- }
- mutex_unlock(&mvm->mutex);
-
- for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) {
- const struct iwl_fw_bcast_mac *mac = &cmd.macs[i];
-
- ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n",
- i, mac->default_discard, mac->attached_filters);
- }
-out:
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
- char *buf, size_t count,
- loff_t *ppos)
-{
- struct iwl_bcast_filter_cmd cmd;
- struct iwl_fw_bcast_mac mac = {};
- u32 mac_id, attached_filters;
- int err = 0;
-
- if (!mvm->bcast_filters)
- return -ENOENT;
-
- if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard,
- &attached_filters) != 3)
- return -EINVAL;
-
- if (mac_id >= ARRAY_SIZE(cmd.macs) ||
- mac.default_discard > 1 ||
- attached_filters >= BIT(ARRAY_SIZE(cmd.filters)))
- return -EINVAL;
-
- mac.attached_filters = cpu_to_le16(attached_filters);
-
- mutex_lock(&mvm->mutex);
- memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id],
- &mac, sizeof(mac));
-
- /* send updated bcast filtering configuration */
- if (iwl_mvm_firmware_running(mvm) &&
- mvm->dbgfs_bcast_filtering.override &&
- iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
- err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
- sizeof(cmd), &cmd);
- mutex_unlock(&mvm->mutex);
-
- return err ?: count;
-}
-#endif
-
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -1881,11 +1699,6 @@ MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie_restore, 512);
MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids);
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
-#endif
-
#ifdef CONFIG_ACPI
MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile);
#endif
@@ -2045,7 +1858,6 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
{
struct dentry *bcast_dir __maybe_unused;
- char buf[100];
spin_lock_init(&mvm->drv_stats_lock);
@@ -2097,21 +1909,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR);
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
- bcast_dir = debugfs_create_dir("bcast_filtering",
- mvm->debugfs_dir);
-
- debugfs_create_bool("override", 0600, bcast_dir,
- &mvm->dbgfs_bcast_filtering.override);
-
- MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters,
- bcast_dir, 0600);
- MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs,
- bcast_dir, 0600);
- }
-#endif
-
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400);
debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir,
@@ -2142,6 +1939,11 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
* Create a symlink with mac80211. It will be removed when mac80211
* exists (before the opmode exists which removes the target.)
*/
- snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent);
- debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf);
+ if (!IS_ERR(mvm->debugfs_dir)) {
+ char buf[100];
+
+ snprintf(buf, 100, "../../%pd2", mvm->debugfs_dir->d_parent);
+ debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir,
+ buf);
+ }
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 6f4690e56a46..ae589b3b8c46 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1741,7 +1741,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = iwl_mvm_sar_init(mvm);
if (ret == 0)
ret = iwl_mvm_sar_geo_init(mvm);
- else if (ret < 0)
+ if (ret < 0)
goto error;
ret = iwl_mvm_sgom_init(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 65f4fe3ef504..709a3df57b10 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -55,79 +55,6 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
},
};
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-/*
- * Use the reserved field to indicate magic values.
- * these values will only be used internally by the driver,
- * and won't make it to the fw (reserved will be 0).
- * BC_FILTER_MAGIC_IP - configure the val of this attribute to
- * be the vif's ip address. in case there is not a single
- * ip address (0, or more than 1), this attribute will
- * be skipped.
- * BC_FILTER_MAGIC_MAC - set the val of this attribute to
- * the LSB bytes of the vif's mac address
- */
-enum {
- BC_FILTER_MAGIC_NONE = 0,
- BC_FILTER_MAGIC_IP,
- BC_FILTER_MAGIC_MAC,
-};
-
-static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
- {
- /* arp */
- .discard = 0,
- .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
- .attrs = {
- {
- /* frame type - arp, hw type - ethernet */
- .offset_type =
- BCAST_FILTER_OFFSET_PAYLOAD_START,
- .offset = sizeof(rfc1042_header),
- .val = cpu_to_be32(0x08060001),
- .mask = cpu_to_be32(0xffffffff),
- },
- {
- /* arp dest ip */
- .offset_type =
- BCAST_FILTER_OFFSET_PAYLOAD_START,
- .offset = sizeof(rfc1042_header) + 2 +
- sizeof(struct arphdr) +
- ETH_ALEN + sizeof(__be32) +
- ETH_ALEN,
- .mask = cpu_to_be32(0xffffffff),
- /* mark it as special field */
- .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
- },
- },
- },
- {
- /* dhcp offer bcast */
- .discard = 0,
- .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
- .attrs = {
- {
- /* udp dest port - 68 (bootp client)*/
- .offset_type = BCAST_FILTER_OFFSET_IP_END,
- .offset = offsetof(struct udphdr, dest),
- .val = cpu_to_be32(0x00440000),
- .mask = cpu_to_be32(0xffff0000),
- },
- {
- /* dhcp - lsb bytes of client hw address */
- .offset_type = BCAST_FILTER_OFFSET_IP_END,
- .offset = 38,
- .mask = cpu_to_be32(0xffffffff),
- /* mark it as special field */
- .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
- },
- },
- },
- /* last filter must be empty */
- {},
-};
-#endif
-
static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
.max_peers = IWL_MVM_TOF_MAX_APS,
.report_ap_tsf = 1,
@@ -299,7 +226,6 @@ static const u8 he_if_types_ext_capa_sta[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
[2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
- [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
};
static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = {
@@ -693,11 +619,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
#endif
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
- /* assign default bcast filtering configuration */
- mvm->bcast_filters = iwl_mvm_default_bcast_filters;
-#endif
-
ret = iwl_mvm_leds_init(mvm);
if (ret)
return ret;
@@ -1853,162 +1774,6 @@ static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
mutex_unlock(&mvm->mutex);
}
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-struct iwl_bcast_iter_data {
- struct iwl_mvm *mvm;
- struct iwl_bcast_filter_cmd *cmd;
- u8 current_filter;
-};
-
-static void
-iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
- const struct iwl_fw_bcast_filter *in_filter,
- struct iwl_fw_bcast_filter *out_filter)
-{
- struct iwl_fw_bcast_filter_attr *attr;
- int i;
-
- memcpy(out_filter, in_filter, sizeof(*out_filter));
-
- for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
- attr = &out_filter->attrs[i];
-
- if (!attr->mask)
- break;
-
- switch (attr->reserved1) {
- case cpu_to_le16(BC_FILTER_MAGIC_IP):
- if (vif->bss_conf.arp_addr_cnt != 1) {
- attr->mask = 0;
- continue;
- }
-
- attr->val = vif->bss_conf.arp_addr_list[0];
- break;
- case cpu_to_le16(BC_FILTER_MAGIC_MAC):
- attr->val = *(__be32 *)&vif->addr[2];
- break;
- default:
- break;
- }
- attr->reserved1 = 0;
- out_filter->num_attrs++;
- }
-}
-
-static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_bcast_iter_data *data = _data;
- struct iwl_mvm *mvm = data->mvm;
- struct iwl_bcast_filter_cmd *cmd = data->cmd;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_fw_bcast_mac *bcast_mac;
- int i;
-
- if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
- return;
-
- bcast_mac = &cmd->macs[mvmvif->id];
-
- /*
- * enable filtering only for associated stations, but not for P2P
- * Clients
- */
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
- !vif->bss_conf.assoc)
- return;
-
- bcast_mac->default_discard = 1;
-
- /* copy all configured filters */
- for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
- /*
- * Make sure we don't exceed our filters limit.
- * if there is still a valid filter to be configured,
- * be on the safe side and just allow bcast for this mac.
- */
- if (WARN_ON_ONCE(data->current_filter >=
- ARRAY_SIZE(cmd->filters))) {
- bcast_mac->default_discard = 0;
- bcast_mac->attached_filters = 0;
- break;
- }
-
- iwl_mvm_set_bcast_filter(vif,
- &mvm->bcast_filters[i],
- &cmd->filters[data->current_filter]);
-
- /* skip current filter if it contains no attributes */
- if (!cmd->filters[data->current_filter].num_attrs)
- continue;
-
- /* attach the filter to current mac */
- bcast_mac->attached_filters |=
- cpu_to_le16(BIT(data->current_filter));
-
- data->current_filter++;
- }
-}
-
-bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
- struct iwl_bcast_filter_cmd *cmd)
-{
- struct iwl_bcast_iter_data iter_data = {
- .mvm = mvm,
- .cmd = cmd,
- };
-
- if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
- return false;
-
- memset(cmd, 0, sizeof(*cmd));
- cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
- cmd->max_macs = ARRAY_SIZE(cmd->macs);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- /* use debugfs filters/macs if override is configured */
- if (mvm->dbgfs_bcast_filtering.override) {
- memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
- sizeof(cmd->filters));
- memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
- sizeof(cmd->macs));
- return true;
- }
-#endif
-
- /* if no filters are configured, do nothing */
- if (!mvm->bcast_filters)
- return false;
-
- /* configure and attach these filters for each associated sta vif */
- ieee80211_iterate_active_interfaces(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_bcast_filter_iterator, &iter_data);
-
- return true;
-}
-
-static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
-{
- struct iwl_bcast_filter_cmd cmd;
-
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
- return 0;
-
- if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
- return 0;
-
- return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
- sizeof(cmd), &cmd);
-}
-#else
-static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
-{
- return 0;
-}
-#endif
-
static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@@ -2520,7 +2285,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
}
iwl_mvm_recalc_multicast(mvm);
- iwl_mvm_configure_bcast_filter(mvm);
/* reset rssi values */
mvmvif->bf_data.ave_beacon_signal = 0;
@@ -2570,11 +2334,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
}
}
- if (changes & BSS_CHANGED_ARP_FILTER) {
- IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
- iwl_mvm_configure_bcast_filter(mvm);
- }
-
if (changes & BSS_CHANGED_BANDWIDTH)
iwl_mvm_apply_fw_smps_request(vif);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 1dcbb0eb63c3..d78f40730594 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -884,17 +884,6 @@ struct iwl_mvm {
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
- /* broadcast filters to configure for each associated station */
- const struct iwl_fw_bcast_filter *bcast_filters;
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- struct {
- bool override;
- struct iwl_bcast_filter_cmd cmd;
- } dbgfs_bcast_filtering;
-#endif
-#endif
-
/* Internal station */
struct iwl_mvm_int_sta aux_sta;
struct iwl_mvm_int_sta snif_sta;
@@ -1593,8 +1582,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm);
int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
-bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
- struct iwl_bcast_filter_cmd *cmd);
/*
* FW notifications / CMD responses handlers
@@ -2225,7 +2212,7 @@ static inline void iwl_mvm_mei_device_down(struct iwl_mvm *mvm)
static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm)
{
bool sw_rfkill =
- mvm->hw_registered ? rfkill_blocked(mvm->hw->wiphy->rfkill) : false;
+ mvm->hw_registered ? rfkill_soft_blocked(mvm->hw->wiphy->rfkill) : false;
if (mvm->mei_registered)
iwl_mei_set_rfkill_state(iwl_mvm_is_radio_killed(mvm),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 87630d38dc52..1f8b97995b94 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -469,7 +469,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(MCC_CHUB_UPDATE_CMD),
HCMD_NAME(MARKER_CMD),
HCMD_NAME(BT_PROFILE_NOTIFICATION),
- HCMD_NAME(BCAST_FILTER_CMD),
HCMD_NAME(MCAST_FILTER_CMD),
HCMD_NAME(REPLY_SF_CFG_CMD),
HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 6fa2c12f7955..9213f8518f10 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1427,7 +1427,7 @@ static void iwl_mvm_hwrate_to_tx_status(const struct iwl_fw *fw,
struct ieee80211_tx_rate *r = &info->status.rates[0];
if (iwl_fw_lookup_notif_ver(fw, LONG_GROUP,
- TX_CMD, 0) > 6)
+ TX_CMD, 0) <= 6)
rate_n_flags = iwl_new_rate_from_v1(rate_n_flags);
info->status.antenna =
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
index 78450366312b..080a1587caa5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c
@@ -71,12 +71,13 @@ static int iwl_mvm_vendor_host_get_ownership(struct wiphy *wiphy,
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
mutex_lock(&mvm->mutex);
- iwl_mvm_mei_get_ownership(mvm);
+ ret = iwl_mvm_mei_get_ownership(mvm);
mutex_unlock(&mvm->mutex);
- return 0;
+ return ret;
}
static const struct wiphy_vendor_command iwl_mvm_vendor_commands[] = {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 0febdcacbd42..94f40c4d2421 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -385,8 +385,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
/* This may fail if AMT took ownership of the device */
if (iwl_pcie_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
- ret = -EIO;
- goto out;
+ return -EIO;
}
iwl_enable_rfkill_int(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index a63386a01232..ef14584fc0a1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1329,8 +1329,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
/* This may fail if AMT took ownership of the device */
if (iwl_pcie_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
- ret = -EIO;
- goto out;
+ return -EIO;
}
iwl_enable_rfkill_int(trans);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 8d54f9face2f..fc5725f6daee 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2336,6 +2336,15 @@ static void hw_scan_work(struct work_struct *work)
if (req->ie_len)
skb_put_data(probe, req->ie, req->ie_len);
+ if (!ieee80211_tx_prepare_skb(hwsim->hw,
+ hwsim->hw_scan_vif,
+ probe,
+ hwsim->tmp_chan->band,
+ NULL)) {
+ kfree_skb(probe);
+ continue;
+ }
+
local_bh_disable();
mac80211_hwsim_tx_frame(hwsim->hw, probe,
hwsim->tmp_chan);
@@ -3770,6 +3779,10 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
}
txi->flags |= IEEE80211_TX_STAT_ACK;
}
+
+ if (hwsim_flags & HWSIM_TX_CTL_NO_ACK)
+ txi->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
ieee80211_tx_status_irqsafe(data2->hw, skb);
return 0;
out:
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index d24b7a7993aa..990360d75cb6 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -256,6 +256,7 @@ static void backend_disconnect(struct backend_info *be)
unsigned int queue_index;
xen_unregister_watchers(vif);
+ xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_delif(vif);
#endif /* CONFIG_DEBUG_FS */
@@ -675,7 +676,6 @@ static void hotplug_status_changed(struct xenbus_watch *watch,
/* Not interested in this watch anymore. */
unregister_hotplug_status_watch(be);
- xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
}
kfree(str);
}
@@ -824,15 +824,11 @@ static void connect(struct backend_info *be)
xenvif_carrier_on(be->vif);
unregister_hotplug_status_watch(be);
- if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) {
- err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
- NULL, hotplug_status_changed,
- "%s/%s", dev->nodename,
- "hotplug-status");
- if (err)
- goto err;
+ err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
+ hotplug_status_changed,
+ "%s/%s", dev->nodename, "hotplug-status");
+ if (!err)
be->have_hotplug_status_watch = 1;
- }
netif_tx_wake_all_queues(be->vif->dev);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 8b18246ad999..7748f07e2cf1 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -842,6 +842,28 @@ static int xennet_close(struct net_device *dev)
return 0;
}
+static void xennet_destroy_queues(struct netfront_info *info)
+{
+ unsigned int i;
+
+ for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
+ struct netfront_queue *queue = &info->queues[i];
+
+ if (netif_running(info->netdev))
+ napi_disable(&queue->napi);
+ netif_napi_del(&queue->napi);
+ }
+
+ kfree(info->queues);
+ info->queues = NULL;
+}
+
+static void xennet_uninit(struct net_device *dev)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ xennet_destroy_queues(np);
+}
+
static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
{
unsigned long flags;
@@ -1611,6 +1633,7 @@ static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
static const struct net_device_ops xennet_netdev_ops = {
+ .ndo_uninit = xennet_uninit,
.ndo_open = xennet_open,
.ndo_stop = xennet_close,
.ndo_start_xmit = xennet_start_xmit,
@@ -2103,22 +2126,6 @@ error:
return err;
}
-static void xennet_destroy_queues(struct netfront_info *info)
-{
- unsigned int i;
-
- for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
- struct netfront_queue *queue = &info->queues[i];
-
- if (netif_running(info->netdev))
- napi_disable(&queue->napi);
- netif_napi_del(&queue->napi);
- }
-
- kfree(info->queues);
- info->queues = NULL;
-}
-
static int xennet_create_page_pool(struct netfront_queue *queue)
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.c b/drivers/ntb/hw/intel/ntb_hw_gen4.c
index fede05151f69..4081fc538ff4 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen4.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen4.c
@@ -168,6 +168,18 @@ static enum ntb_topo gen4_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
return NTB_TOPO_NONE;
}
+static enum ntb_topo spr_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
+{
+ switch (ppd & SPR_PPD_TOPO_MASK) {
+ case SPR_PPD_TOPO_B2B_USD:
+ return NTB_TOPO_B2B_USD;
+ case SPR_PPD_TOPO_B2B_DSD:
+ return NTB_TOPO_B2B_DSD;
+ }
+
+ return NTB_TOPO_NONE;
+}
+
int gen4_init_dev(struct intel_ntb_dev *ndev)
{
struct pci_dev *pdev = ndev->ntb.pdev;
@@ -183,7 +195,10 @@ int gen4_init_dev(struct intel_ntb_dev *ndev)
}
ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET);
- ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1);
+ if (pdev_is_ICX(pdev))
+ ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1);
+ else if (pdev_is_SPR(pdev))
+ ndev->ntb.topo = spr_ppd_topo(ndev, ppd1);
dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd1,
ntb_topo_string(ndev->ntb.topo));
if (ndev->ntb.topo == NTB_TOPO_NONE)
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.h b/drivers/ntb/hw/intel/ntb_hw_gen4.h
index 3fcd3fdce9ed..f91323eaf5ce 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen4.h
+++ b/drivers/ntb/hw/intel/ntb_hw_gen4.h
@@ -49,10 +49,14 @@
#define GEN4_PPD_CLEAR_TRN 0x0001
#define GEN4_PPD_LINKTRN 0x0008
#define GEN4_PPD_CONN_MASK 0x0300
+#define SPR_PPD_CONN_MASK 0x0700
#define GEN4_PPD_CONN_B2B 0x0200
#define GEN4_PPD_DEV_MASK 0x1000
#define GEN4_PPD_DEV_DSD 0x1000
#define GEN4_PPD_DEV_USD 0x0000
+#define SPR_PPD_DEV_MASK 0x4000
+#define SPR_PPD_DEV_DSD 0x4000
+#define SPR_PPD_DEV_USD 0x0000
#define GEN4_LINK_CTRL_LINK_DISABLE 0x0010
#define GEN4_SLOTSTS 0xb05a
@@ -62,6 +66,10 @@
#define GEN4_PPD_TOPO_B2B_USD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_USD)
#define GEN4_PPD_TOPO_B2B_DSD (GEN4_PPD_CONN_B2B | GEN4_PPD_DEV_DSD)
+#define SPR_PPD_TOPO_MASK (SPR_PPD_CONN_MASK | SPR_PPD_DEV_MASK)
+#define SPR_PPD_TOPO_B2B_USD (GEN4_PPD_CONN_B2B | SPR_PPD_DEV_USD)
+#define SPR_PPD_TOPO_B2B_DSD (GEN4_PPD_CONN_B2B | SPR_PPD_DEV_DSD)
+
#define GEN4_DB_COUNT 32
#define GEN4_DB_LINK 32
#define GEN4_DB_LINK_BIT BIT_ULL(GEN4_DB_LINK)
@@ -112,4 +120,12 @@ static inline int pdev_is_ICX(struct pci_dev *pdev)
return 0;
}
+static inline int pdev_is_SPR(struct pci_dev *pdev)
+{
+ if (pdev_is_gen4(pdev) &&
+ pdev->revision > PCI_DEVICE_REVISION_ICX_MAX)
+ return 1;
+ return 0;
+}
+
#endif
diff --git a/drivers/ntb/msi.c b/drivers/ntb/msi.c
index dd683cb58d09..6295e55ef85e 100644
--- a/drivers/ntb/msi.c
+++ b/drivers/ntb/msi.c
@@ -33,7 +33,6 @@ int ntb_msi_init(struct ntb_dev *ntb,
{
phys_addr_t mw_phys_addr;
resource_size_t mw_size;
- size_t struct_size;
int peer_widx;
int peers;
int ret;
@@ -43,9 +42,8 @@ int ntb_msi_init(struct ntb_dev *ntb,
if (peers <= 0)
return -EINVAL;
- struct_size = sizeof(*ntb->msi) + sizeof(*ntb->msi->peer_mws) * peers;
-
- ntb->msi = devm_kzalloc(&ntb->dev, struct_size, GFP_KERNEL);
+ ntb->msi = devm_kzalloc(&ntb->dev, struct_size(ntb->msi, peer_mws, peers),
+ GFP_KERNEL);
if (!ntb->msi)
return -ENOMEM;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 5e0bfda04bd7..fd4720d37cc0 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -368,6 +368,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_rq);
void nvme_complete_batch_req(struct request *req)
{
+ trace_nvme_complete_rq(req);
nvme_cleanup_cmd(req);
nvme_end_req_zoned(req);
}
@@ -1722,7 +1723,7 @@ static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return 0;
}
-static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
+static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
{
struct nvme_ctrl *ctrl = ns->ctrl;
@@ -1738,7 +1739,8 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
- return 0;
+ return;
+
if (ctrl->ops->flags & NVME_F_FABRICS) {
/*
* The NVMe over Fabrics specification only supports metadata as
@@ -1746,7 +1748,7 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
* remap the separate metadata buffer from the block layer.
*/
if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
- return -EINVAL;
+ return;
ns->features |= NVME_NS_EXT_LBAS;
@@ -1773,8 +1775,6 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
else
ns->features |= NVME_NS_METADATA_SUPPORTED;
}
-
- return 0;
}
static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
@@ -1915,9 +1915,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
ns->lba_shift = id->lbaf[lbaf].ds;
nvme_set_queue_limits(ns->ctrl, ns->queue);
- ret = nvme_configure_metadata(ns, id);
- if (ret)
- goto out_unfreeze;
+ nvme_configure_metadata(ns, id);
nvme_set_chunk_sectors(ns, id);
nvme_update_disk_info(ns->disk, ns, id);
@@ -1933,7 +1931,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
if (blk_queue_is_zoned(ns->queue)) {
ret = nvme_revalidate_zones(ns);
if (ret && !nvme_first_scan(ns->disk))
- goto out;
+ return ret;
}
if (nvme_ns_head_multipath(ns->head)) {
@@ -1948,16 +1946,16 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
return 0;
out_unfreeze:
- blk_mq_unfreeze_queue(ns->disk->queue);
-out:
/*
* If probing fails due an unsupported feature, hide the block device,
* but still allow other access.
*/
if (ret == -ENODEV) {
ns->disk->flags |= GENHD_FL_HIDDEN;
+ set_bit(NVME_NS_READY, &ns->flags);
ret = 0;
}
+ blk_mq_unfreeze_queue(ns->disk->queue);
return ret;
}
@@ -4253,7 +4251,14 @@ static void nvme_async_event_work(struct work_struct *work)
container_of(work, struct nvme_ctrl, async_event_work);
nvme_aen_uevent(ctrl);
- ctrl->ops->submit_async_event(ctrl);
+
+ /*
+ * The transport drivers must guarantee AER submission here is safe by
+ * flushing ctrl async_event_work after changing the controller state
+ * from LIVE and before freeing the admin queue.
+ */
+ if (ctrl->state == NVME_CTRL_LIVE)
+ ctrl->ops->submit_async_event(ctrl);
}
static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
@@ -4566,7 +4571,7 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
return;
- blk_set_queue_dying(ns->queue);
+ blk_mark_disk_dead(ns->disk);
nvme_start_ns_queue(ns);
set_capacity_and_notify(ns->disk, 0);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index c3203ff1c654..1e3a09cad961 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -170,6 +170,7 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
struct nvmf_ctrl_options *opts)
{
if (ctrl->state == NVME_CTRL_DELETING ||
+ ctrl->state == NVME_CTRL_DELETING_NOIO ||
ctrl->state == NVME_CTRL_DEAD ||
strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index f8bf6606eb2f..ff775235534c 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -848,7 +848,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{
if (!head->disk)
return;
- blk_set_queue_dying(head->disk->queue);
+ blk_mark_disk_dead(head->disk);
/* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work);
flush_work(&head->requeue_work);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 850f84d204d0..9c55e4be8a39 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1200,6 +1200,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
struct nvme_rdma_ctrl, err_work);
nvme_stop_keep_alive(&ctrl->ctrl);
+ flush_work(&ctrl->ctrl.async_event_work);
nvme_rdma_teardown_io_queues(ctrl, false);
nvme_start_queues(&ctrl->ctrl);
nvme_rdma_teardown_admin_queue(ctrl, false);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 4ceb28675fdf..65e00c64a588 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -44,6 +44,8 @@ struct nvme_tcp_request {
u32 data_len;
u32 pdu_len;
u32 pdu_sent;
+ u32 h2cdata_left;
+ u32 h2cdata_offset;
u16 ttag;
__le16 status;
struct list_head entry;
@@ -95,6 +97,7 @@ struct nvme_tcp_queue {
struct nvme_tcp_request *request;
int queue_size;
+ u32 maxh2cdata;
size_t cmnd_capsule_len;
struct nvme_tcp_ctrl *ctrl;
unsigned long flags;
@@ -572,23 +575,26 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
return ret;
}
-static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
- struct nvme_tcp_r2t_pdu *pdu)
+static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
{
struct nvme_tcp_data_pdu *data = req->pdu;
struct nvme_tcp_queue *queue = req->queue;
struct request *rq = blk_mq_rq_from_pdu(req);
+ u32 h2cdata_sent = req->pdu_len;
u8 hdgst = nvme_tcp_hdgst_len(queue);
u8 ddgst = nvme_tcp_ddgst_len(queue);
req->state = NVME_TCP_SEND_H2C_PDU;
req->offset = 0;
- req->pdu_len = le32_to_cpu(pdu->r2t_length);
+ req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata);
req->pdu_sent = 0;
+ req->h2cdata_left -= req->pdu_len;
+ req->h2cdata_offset += h2cdata_sent;
memset(data, 0, sizeof(*data));
data->hdr.type = nvme_tcp_h2c_data;
- data->hdr.flags = NVME_TCP_F_DATA_LAST;
+ if (!req->h2cdata_left)
+ data->hdr.flags = NVME_TCP_F_DATA_LAST;
if (queue->hdr_digest)
data->hdr.flags |= NVME_TCP_F_HDGST;
if (queue->data_digest)
@@ -597,9 +603,9 @@ static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
data->hdr.pdo = data->hdr.hlen + hdgst;
data->hdr.plen =
cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
- data->ttag = pdu->ttag;
+ data->ttag = req->ttag;
data->command_id = nvme_cid(rq);
- data->data_offset = pdu->r2t_offset;
+ data->data_offset = cpu_to_le32(req->h2cdata_offset);
data->data_length = cpu_to_le32(req->pdu_len);
}
@@ -609,6 +615,7 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
struct nvme_tcp_request *req;
struct request *rq;
u32 r2t_length = le32_to_cpu(pdu->r2t_length);
+ u32 r2t_offset = le32_to_cpu(pdu->r2t_offset);
rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
if (!rq) {
@@ -633,14 +640,19 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
return -EPROTO;
}
- if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
+ if (unlikely(r2t_offset < req->data_sent)) {
dev_err(queue->ctrl->ctrl.device,
"req %d unexpected r2t offset %u (expected %zu)\n",
- rq->tag, le32_to_cpu(pdu->r2t_offset), req->data_sent);
+ rq->tag, r2t_offset, req->data_sent);
return -EPROTO;
}
- nvme_tcp_setup_h2c_data_pdu(req, pdu);
+ req->pdu_len = 0;
+ req->h2cdata_left = r2t_length;
+ req->h2cdata_offset = r2t_offset;
+ req->ttag = pdu->ttag;
+
+ nvme_tcp_setup_h2c_data_pdu(req);
nvme_tcp_queue_request(req, false, true);
return 0;
@@ -913,13 +925,22 @@ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
{
- nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
+ if (nvme_tcp_async_req(req)) {
+ union nvme_result res = {};
+
+ nvme_complete_async_event(&req->queue->ctrl->ctrl,
+ cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
+ } else {
+ nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
+ NVME_SC_HOST_PATH_ERROR);
+ }
}
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
{
struct nvme_tcp_queue *queue = req->queue;
int req_data_len = req->data_len;
+ u32 h2cdata_left = req->h2cdata_left;
while (true) {
struct page *page = nvme_tcp_req_cur_page(req);
@@ -964,7 +985,10 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
req->state = NVME_TCP_SEND_DDGST;
req->offset = 0;
} else {
- nvme_tcp_done_send_req(queue);
+ if (h2cdata_left)
+ nvme_tcp_setup_h2c_data_pdu(req);
+ else
+ nvme_tcp_done_send_req(queue);
}
return 1;
}
@@ -1022,9 +1046,14 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
if (queue->hdr_digest && !req->offset)
nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
- ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
- offset_in_page(pdu) + req->offset, len,
- MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
+ if (!req->h2cdata_left)
+ ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
+ offset_in_page(pdu) + req->offset, len,
+ MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
+ else
+ ret = sock_no_sendpage(queue->sock, virt_to_page(pdu),
+ offset_in_page(pdu) + req->offset, len,
+ MSG_DONTWAIT | MSG_MORE);
if (unlikely(ret <= 0))
return ret;
@@ -1044,6 +1073,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
{
struct nvme_tcp_queue *queue = req->queue;
size_t offset = req->offset;
+ u32 h2cdata_left = req->h2cdata_left;
int ret;
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
struct kvec iov = {
@@ -1061,7 +1091,10 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
return ret;
if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
- nvme_tcp_done_send_req(queue);
+ if (h2cdata_left)
+ nvme_tcp_setup_h2c_data_pdu(req);
+ else
+ nvme_tcp_done_send_req(queue);
return 1;
}
@@ -1253,6 +1286,7 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
struct msghdr msg = {};
struct kvec iov;
bool ctrl_hdgst, ctrl_ddgst;
+ u32 maxh2cdata;
int ret;
icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
@@ -1336,6 +1370,14 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
goto free_icresp;
}
+ maxh2cdata = le32_to_cpu(icresp->maxdata);
+ if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) {
+ pr_err("queue %d: invalid maxh2cdata returned %u\n",
+ nvme_tcp_queue_id(queue), maxh2cdata);
+ goto free_icresp;
+ }
+ queue->maxh2cdata = maxh2cdata;
+
ret = 0;
free_icresp:
kfree(icresp);
@@ -2096,6 +2138,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
nvme_stop_keep_alive(ctrl);
+ flush_work(&ctrl->async_event_work);
nvme_tcp_teardown_io_queues(ctrl, false);
/* unquiesce to fail fast pending requests */
nvme_start_queues(ctrl);
@@ -2320,6 +2363,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
req->data_sent = 0;
req->pdu_len = 0;
req->pdu_sent = 0;
+ req->h2cdata_left = 0;
req->data_len = blk_rq_nr_phys_segments(rq) ?
blk_rq_payload_bytes(rq) : 0;
req->curr_bio = rq->bio;
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 23a38dcf0fc4..9fd1602b539d 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -771,7 +771,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
if (config->wp_gpio)
nvmem->wp_gpio = config->wp_gpio;
- else
+ else if (!config->ignore_wp)
nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
GPIOD_OUT_HIGH);
if (IS_ERR(nvmem->wp_gpio)) {
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index ad85ff6474ff..ec315b060cd5 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -648,8 +648,8 @@ void __init early_init_fdt_scan_reserved_mem(void)
}
fdt_scan_reserved_mem();
- fdt_init_reserved_mem();
fdt_reserve_elfcorehdr();
+ fdt_init_reserved_mem();
}
/**
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 70992103c07d..2c2fb161b572 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -513,24 +513,24 @@ static void __init of_unittest_parse_phandle_with_args(void)
memset(&args, 0, sizeof(args));
EXPECT_BEGIN(KERN_INFO,
- "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+ "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1");
rc = of_parse_phandle_with_args(np, "phandle-list-bad-args",
"#phandle-cells", 1, &args);
EXPECT_END(KERN_INFO,
- "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+ "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1");
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
EXPECT_BEGIN(KERN_INFO,
- "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+ "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1");
rc = of_count_phandle_with_args(np, "phandle-list-bad-args",
"#phandle-cells");
EXPECT_END(KERN_INFO,
- "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found -1");
+ "OF: /testcase-data/phandle-tests/consumer-a: #phandle-cells = 3 found 1");
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
}
@@ -670,12 +670,12 @@ static void __init of_unittest_parse_phandle_with_args_map(void)
memset(&args, 0, sizeof(args));
EXPECT_BEGIN(KERN_INFO,
- "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found -1");
+ "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1");
rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-args",
"phandle", 1, &args);
EXPECT_END(KERN_INFO,
- "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found -1");
+ "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1");
unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
}
@@ -1257,12 +1257,12 @@ static void __init of_unittest_platform_populate(void)
unittest(pdev, "device 2 creation failed\n");
EXPECT_BEGIN(KERN_INFO,
- "platform testcase-data:testcase-device2: IRQ index 0 not found");
+ "platform testcase-data:testcase-device2: error -ENXIO: IRQ index 0 not found");
irq = platform_get_irq(pdev, 0);
EXPECT_END(KERN_INFO,
- "platform testcase-data:testcase-device2: IRQ index 0 not found");
+ "platform testcase-data:testcase-device2: error -ENXIO: IRQ index 0 not found");
unittest(irq < 0 && irq != -EPROBE_DEFER,
"device parsing error failed - %d\n", irq);
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 059566f54429..9be007c9420f 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1003,7 +1003,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
ioc->usg_calls++;
#endif
- while(sg_dma_len(sglist) && nents--) {
+ while (nents && sg_dma_len(sglist)) {
#ifdef CCIO_COLLECT_STATS
ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
@@ -1011,6 +1011,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
ccio_unmap_page(dev, sg_dma_address(sglist),
sg_dma_len(sglist), direction, 0);
++sglist;
+ nents--;
}
DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index e60690d38d67..374b9199878d 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -1047,7 +1047,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
- while (sg_dma_len(sglist) && nents--) {
+ while (nents && sg_dma_len(sglist)) {
sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
direction, 0);
@@ -1056,6 +1056,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
ioc->usingle_calls--; /* kluge since call is unmap_sg() */
#endif
++sglist;
+ nents--;
}
DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 489586a4cdc7..768d33f9ebc8 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -356,8 +356,8 @@ static int j721e_pcie_probe(struct platform_device *pdev)
const struct j721e_pcie_data *data;
struct cdns_pcie *cdns_pcie;
struct j721e_pcie *pcie;
- struct cdns_pcie_rc *rc;
- struct cdns_pcie_ep *ep;
+ struct cdns_pcie_rc *rc = NULL;
+ struct cdns_pcie_ep *ep = NULL;
struct gpio_desc *gpiod;
void __iomem *base;
struct clk *clk;
@@ -376,6 +376,46 @@ static int j721e_pcie_probe(struct platform_device *pdev)
if (!pcie)
return -ENOMEM;
+ switch (mode) {
+ case PCI_MODE_RC:
+ if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST))
+ return -ENODEV;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
+ if (!bridge)
+ return -ENOMEM;
+
+ if (!data->byte_access_allowed)
+ bridge->ops = &cdns_ti_pcie_host_ops;
+ rc = pci_host_bridge_priv(bridge);
+ rc->quirk_retrain_flag = data->quirk_retrain_flag;
+ rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
+
+ cdns_pcie = &rc->pcie;
+ cdns_pcie->dev = dev;
+ cdns_pcie->ops = &j721e_pcie_ops;
+ pcie->cdns_pcie = cdns_pcie;
+ break;
+ case PCI_MODE_EP:
+ if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP))
+ return -ENODEV;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
+
+ cdns_pcie = &ep->pcie;
+ cdns_pcie->dev = dev;
+ cdns_pcie->ops = &j721e_pcie_ops;
+ pcie->cdns_pcie = cdns_pcie;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
+ return 0;
+ }
+
pcie->mode = mode;
pcie->linkdown_irq_regfield = data->linkdown_irq_regfield;
@@ -426,28 +466,6 @@ static int j721e_pcie_probe(struct platform_device *pdev)
switch (mode) {
case PCI_MODE_RC:
- if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)) {
- ret = -ENODEV;
- goto err_get_sync;
- }
-
- bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
- if (!bridge) {
- ret = -ENOMEM;
- goto err_get_sync;
- }
-
- if (!data->byte_access_allowed)
- bridge->ops = &cdns_ti_pcie_host_ops;
- rc = pci_host_bridge_priv(bridge);
- rc->quirk_retrain_flag = data->quirk_retrain_flag;
- rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
-
- cdns_pcie = &rc->pcie;
- cdns_pcie->dev = dev;
- cdns_pcie->ops = &j721e_pcie_ops;
- pcie->cdns_pcie = cdns_pcie;
-
gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(gpiod)) {
ret = PTR_ERR(gpiod);
@@ -497,23 +515,6 @@ static int j721e_pcie_probe(struct platform_device *pdev)
break;
case PCI_MODE_EP:
- if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP)) {
- ret = -ENODEV;
- goto err_get_sync;
- }
-
- ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
- if (!ep) {
- ret = -ENOMEM;
- goto err_get_sync;
- }
- ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
-
- cdns_pcie = &ep->pcie;
- cdns_pcie->dev = dev;
- cdns_pcie->ops = &j721e_pcie_ops;
- pcie->cdns_pcie = cdns_pcie;
-
ret = cdns_pcie_init_phy(dev, cdns_pcie);
if (ret) {
dev_err(dev, "Failed to init phy\n");
@@ -525,8 +526,6 @@ static int j721e_pcie_probe(struct platform_device *pdev)
goto err_pcie_setup;
break;
- default:
- dev_err(dev, "INVALID device type %d\n", mode);
}
return 0;
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index fa6886d66488..c625fc6bb287 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -756,22 +756,28 @@ static int __exit kirin_pcie_remove(struct platform_device *pdev)
return 0;
}
+struct kirin_pcie_data {
+ enum pcie_kirin_phy_type phy_type;
+};
+
+static const struct kirin_pcie_data kirin_960_data = {
+ .phy_type = PCIE_KIRIN_INTERNAL_PHY,
+};
+
+static const struct kirin_pcie_data kirin_970_data = {
+ .phy_type = PCIE_KIRIN_EXTERNAL_PHY,
+};
+
static const struct of_device_id kirin_pcie_match[] = {
- {
- .compatible = "hisilicon,kirin960-pcie",
- .data = (void *)PCIE_KIRIN_INTERNAL_PHY
- },
- {
- .compatible = "hisilicon,kirin970-pcie",
- .data = (void *)PCIE_KIRIN_EXTERNAL_PHY
- },
+ { .compatible = "hisilicon,kirin960-pcie", .data = &kirin_960_data },
+ { .compatible = "hisilicon,kirin970-pcie", .data = &kirin_970_data },
{},
};
static int kirin_pcie_probe(struct platform_device *pdev)
{
- enum pcie_kirin_phy_type phy_type;
struct device *dev = &pdev->dev;
+ const struct kirin_pcie_data *data;
struct kirin_pcie *kirin_pcie;
struct dw_pcie *pci;
int ret;
@@ -781,13 +787,12 @@ static int kirin_pcie_probe(struct platform_device *pdev)
return -EINVAL;
}
- phy_type = (long)of_device_get_match_data(dev);
- if (!phy_type) {
+ data = of_device_get_match_data(dev);
+ if (!data) {
dev_err(dev, "OF data missing\n");
return -EINVAL;
}
-
kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
if (!kirin_pcie)
return -ENOMEM;
@@ -800,7 +805,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
pci->ops = &kirin_dw_pcie_ops;
pci->pp.ops = &kirin_pcie_host_ops;
kirin_pcie->pci = pci;
- kirin_pcie->type = phy_type;
+ kirin_pcie->type = data->phy_type;
ret = kirin_pcie_get_resource(kirin_pcie, pdev);
if (ret)
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 20ea2ee330b8..ae0bc2fee4ca 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -2155,8 +2155,17 @@ static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
if (!hv_dev)
continue;
- if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY)
- set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node);
+ if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY &&
+ hv_dev->desc.virtual_numa_node < num_possible_nodes())
+ /*
+ * The kernel may boot with some NUMA nodes offline
+ * (e.g. in a KDUMP kernel) or with NUMA disabled via
+ * "numa=off". In those cases, adjust the host provided
+ * NUMA node to a valid NUMA node used by the kernel.
+ */
+ set_dev_node(&dev->dev,
+ numa_map_to_online_node(
+ hv_dev->desc.virtual_numa_node));
put_pcichild(hv_dev);
}
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index 71258ea3d35f..f8e82c5e2d87 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -1329,7 +1329,8 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
* indirectly via kernel emulated PCI bridge driver.
*/
mvebu_pcie_setup_hw(port);
- mvebu_pcie_set_local_dev_nr(port, 0);
+ mvebu_pcie_set_local_dev_nr(port, 1);
+ mvebu_pcie_set_local_bus_nr(port, 0);
}
pcie->nports = i;
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index cc166c683638..eb05cceab964 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -99,11 +99,13 @@ struct vmd_irq {
* @srcu: SRCU struct for local synchronization.
* @count: number of child IRQs assigned to this vector; used to track
* sharing.
+ * @virq: The underlying VMD Linux interrupt number
*/
struct vmd_irq_list {
struct list_head irq_list;
struct srcu_struct srcu;
unsigned int count;
+ unsigned int virq;
};
struct vmd_dev {
@@ -253,7 +255,6 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
struct msi_desc *desc = arg->desc;
struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
- unsigned int index, vector;
if (!vmdirq)
return -ENOMEM;
@@ -261,10 +262,8 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
INIT_LIST_HEAD(&vmdirq->node);
vmdirq->irq = vmd_next_irq(vmd, desc);
vmdirq->virq = virq;
- index = index_from_irqs(vmd, vmdirq->irq);
- vector = pci_irq_vector(vmd->dev, index);
- irq_domain_set_info(domain, virq, vector, info->chip, vmdirq,
+ irq_domain_set_info(domain, virq, vmdirq->irq->virq, info->chip, vmdirq,
handle_untracked_irq, vmd, NULL);
return 0;
}
@@ -685,7 +684,8 @@ static int vmd_alloc_irqs(struct vmd_dev *vmd)
return err;
INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
- err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
+ vmd->irqs[i].virq = pci_irq_vector(dev, i);
+ err = devm_request_irq(&dev->dev, vmd->irqs[i].virq,
vmd_irq, IRQF_NO_THREAD,
vmd->name, &vmd->irqs[i]);
if (err)
@@ -969,7 +969,7 @@ static int vmd_suspend(struct device *dev)
int i;
for (i = 0; i < vmd->msix_count; i++)
- devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
+ devm_free_irq(dev, vmd->irqs[i].virq, &vmd->irqs[i]);
return 0;
}
@@ -981,7 +981,7 @@ static int vmd_resume(struct device *dev)
int err, i;
for (i = 0; i < vmd->msix_count; i++) {
- err = devm_request_irq(dev, pci_irq_vector(pdev, i),
+ err = devm_request_irq(dev, vmd->irqs[i].virq,
vmd_irq, IRQF_NO_THREAD,
vmd->name, &vmd->irqs[i]);
if (err)
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index c19c7ca58186..9037a7827eca 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -1111,7 +1111,8 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
if (!desc)
return cpu_possible_mask;
- if (WARN_ON_ONCE(!desc->affinity))
+ /* MSI[X] interrupts can be allocated without affinity descriptor */
+ if (!desc->affinity)
return NULL;
/*
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index bda630889f95..604feeb84ee4 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -166,6 +166,9 @@ static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
{
int ret, i;
+ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
+ irqs[i] = -1;
+
/*
* If we support PME but can't use MSI/MSI-X for it, we have to
* fall back to INTx or other interrupts, e.g., a system shared
@@ -314,10 +317,8 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq)
*/
int pcie_port_device_register(struct pci_dev *dev)
{
- int status, capabilities, irq_services, i, nr_service;
- int irqs[PCIE_PORT_DEVICE_MAXSERVICES] = {
- [0 ... PCIE_PORT_DEVICE_MAXSERVICES-1] = -1
- };
+ int status, capabilities, i, nr_service;
+ int irqs[PCIE_PORT_DEVICE_MAXSERVICES];
/* Enable PCI Express port device */
status = pci_enable_device(dev);
@@ -330,32 +331,18 @@ int pcie_port_device_register(struct pci_dev *dev)
return 0;
pci_set_master(dev);
-
- irq_services = 0;
- if (IS_ENABLED(CONFIG_PCIE_PME))
- irq_services |= PCIE_PORT_SERVICE_PME;
- if (IS_ENABLED(CONFIG_PCIEAER))
- irq_services |= PCIE_PORT_SERVICE_AER;
- if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
- irq_services |= PCIE_PORT_SERVICE_HP;
- if (IS_ENABLED(CONFIG_PCIE_DPC))
- irq_services |= PCIE_PORT_SERVICE_DPC;
- irq_services &= capabilities;
-
- if (irq_services) {
- /*
- * Initialize service IRQs. Don't use service devices that
- * require interrupts if there is no way to generate them.
- * However, some drivers may have a polling mode (e.g.
- * pciehp_poll_mode) that can be used in the absence of IRQs.
- * Allow them to determine if that is to be used.
- */
- status = pcie_init_service_irqs(dev, irqs, irq_services);
- if (status) {
- irq_services &= PCIE_PORT_SERVICE_HP;
- if (!irq_services)
- goto error_disable;
- }
+ /*
+ * Initialize service irqs. Don't use service devices that
+ * require interrupts if there is no way to generate them.
+ * However, some drivers may have a polling mode (e.g. pciehp_poll_mode)
+ * that can be used in the absence of irqs. Allow them to determine
+ * if that is to be used.
+ */
+ status = pcie_init_service_irqs(dev, irqs, capabilities);
+ if (status) {
+ capabilities &= PCIE_PORT_SERVICE_HP;
+ if (!capabilities)
+ goto error_disable;
}
/* Allocate child services if any */
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index d2dd6a6cda60..65f7f6b0576c 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -5344,11 +5344,6 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
*/
static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
{
- if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
- (pdev->device == 0x7340 && pdev->revision != 0xc5) ||
- (pdev->device == 0x7341 && pdev->revision != 0x00))
- return;
-
if (pdev->device == 0x15d8) {
if (pdev->revision == 0xcf &&
pdev->subsystem_vendor == 0xea50 &&
@@ -5370,10 +5365,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
/* AMD Iceland dGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
/* AMD Navi10 dGPU */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7310, quirk_amd_harvest_no_ats);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7318, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7319, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731a, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731b, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731e, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731f, quirk_amd_harvest_no_ats);
/* AMD Navi14 dGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7347, quirk_amd_harvest_no_ats);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x734f, quirk_amd_harvest_no_ats);
/* AMD Raven platform iGPU */
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
#endif /* CONFIG_PCI_ATS */
diff --git a/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
index cd2332bf0e31..fdbd64c03e12 100644
--- a/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
+++ b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
@@ -9,6 +9,7 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -250,7 +251,7 @@ static int phy_meson_axg_mipi_dphy_power_on(struct phy *phy)
(DIV_ROUND_UP(priv->config.clk_zero, temp) << 16) |
(DIV_ROUND_UP(priv->config.clk_prepare, temp) << 24));
regmap_write(priv->regmap, MIPI_DSI_CLK_TIM1,
- DIV_ROUND_UP(priv->config.clk_pre, temp));
+ DIV_ROUND_UP(priv->config.clk_pre, BITS_PER_BYTE));
regmap_write(priv->regmap, MIPI_DSI_HS_TIM,
DIV_ROUND_UP(priv->config.hs_exit, temp) |
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index f81e23742079..849c4204f550 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -97,8 +97,7 @@ config PHY_BRCM_USB
depends on OF
select GENERIC_PHY
select SOC_BRCMSTB if ARCH_BRCMSTB
- default ARCH_BCM4908
- default ARCH_BRCMSTB
+ default ARCH_BCM4908 || ARCH_BRCMSTB
help
Enable this to support the Broadcom STB USB PHY.
This driver is required by the USB XHCI, EHCI and OHCI
diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
index 116fb23aebd9..0f1deb6e0eab 100644
--- a/drivers/phy/broadcom/phy-brcm-usb.c
+++ b/drivers/phy/broadcom/phy-brcm-usb.c
@@ -18,6 +18,7 @@
#include <linux/soc/brcmstb/brcmstb.h>
#include <dt-bindings/phy/phy.h>
#include <linux/mfd/syscon.h>
+#include <linux/suspend.h>
#include "phy-brcm-usb-init.h"
@@ -70,12 +71,35 @@ struct brcm_usb_phy_data {
int init_count;
int wake_irq;
struct brcm_usb_phy phys[BRCM_USB_PHY_ID_MAX];
+ struct notifier_block pm_notifier;
+ bool pm_active;
};
static s8 *node_reg_names[BRCM_REGS_MAX] = {
"crtl", "xhci_ec", "xhci_gbl", "usb_phy", "usb_mdio", "bdc_ec"
};
+static int brcm_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event,
+ void *unused)
+{
+ struct brcm_usb_phy_data *priv =
+ container_of(notifier, struct brcm_usb_phy_data, pm_notifier);
+
+ switch (pm_event) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ priv->pm_active = true;
+ break;
+ case PM_POST_RESTORE:
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ priv->pm_active = false;
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id)
{
struct phy *gphy = dev_id;
@@ -91,6 +115,9 @@ static int brcm_usb_phy_init(struct phy *gphy)
struct brcm_usb_phy_data *priv =
container_of(phy, struct brcm_usb_phy_data, phys[phy->id]);
+ if (priv->pm_active)
+ return 0;
+
/*
* Use a lock to make sure a second caller waits until
* the base phy is inited before using it.
@@ -120,6 +147,9 @@ static int brcm_usb_phy_exit(struct phy *gphy)
struct brcm_usb_phy_data *priv =
container_of(phy, struct brcm_usb_phy_data, phys[phy->id]);
+ if (priv->pm_active)
+ return 0;
+
dev_dbg(&gphy->dev, "EXIT\n");
if (phy->id == BRCM_USB_PHY_2_0)
brcm_usb_uninit_eohci(&priv->ini);
@@ -488,6 +518,9 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
if (err)
return err;
+ priv->pm_notifier.notifier_call = brcm_pm_notifier;
+ register_pm_notifier(&priv->pm_notifier);
+
mutex_init(&priv->mutex);
/* make sure invert settings are correct */
@@ -528,7 +561,10 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
static int brcm_usb_phy_remove(struct platform_device *pdev)
{
+ struct brcm_usb_phy_data *priv = dev_get_drvdata(&pdev->dev);
+
sysfs_remove_group(&pdev->dev.kobj, &brcm_usb_phy_group);
+ unregister_pm_notifier(&priv->pm_notifier);
return 0;
}
@@ -539,6 +575,7 @@ static int brcm_usb_phy_suspend(struct device *dev)
struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
if (priv->init_count) {
+ dev_dbg(dev, "SUSPEND\n");
priv->ini.wake_enabled = device_may_wakeup(dev);
if (priv->phys[BRCM_USB_PHY_3_0].inited)
brcm_usb_uninit_xhci(&priv->ini);
@@ -578,6 +615,7 @@ static int brcm_usb_phy_resume(struct device *dev)
* Uninitialize anything that wasn't previously initialized.
*/
if (priv->init_count) {
+ dev_dbg(dev, "RESUME\n");
if (priv->wake_irq >= 0)
disable_irq_wake(priv->wake_irq);
brcm_usb_init_common(&priv->ini);
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index da24acd26666..e265647e29a2 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -1338,7 +1338,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct cdns_sierra_data *data;
unsigned int id_value;
- int i, ret, node = 0;
+ int ret, node = 0;
void __iomem *base;
struct device_node *dn = dev->of_node, *child;
@@ -1416,7 +1416,8 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
dev_err(dev, "failed to get reset %s\n",
child->full_name);
ret = PTR_ERR(sp->phys[node].lnk_rst);
- goto put_child2;
+ of_node_put(child);
+ goto put_control;
}
if (!sp->autoconf) {
@@ -1424,7 +1425,9 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "missing property in node %s\n",
child->name);
- goto put_child;
+ of_node_put(child);
+ reset_control_put(sp->phys[node].lnk_rst);
+ goto put_control;
}
}
@@ -1434,7 +1437,9 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
if (IS_ERR(gphy)) {
ret = PTR_ERR(gphy);
- goto put_child;
+ of_node_put(child);
+ reset_control_put(sp->phys[node].lnk_rst);
+ goto put_control;
}
sp->phys[node].phy = gphy;
phy_set_drvdata(gphy, &sp->phys[node]);
@@ -1446,26 +1451,28 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
if (sp->num_lanes > SIERRA_MAX_LANES) {
ret = -EINVAL;
dev_err(dev, "Invalid lane configuration\n");
- goto put_child2;
+ goto put_control;
}
/* If more than one subnode, configure the PHY as multilink */
if (!sp->autoconf && sp->nsubnodes > 1) {
ret = cdns_sierra_phy_configure_multilink(sp);
if (ret)
- goto put_child2;
+ goto put_control;
}
pm_runtime_enable(dev);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- return PTR_ERR_OR_ZERO(phy_provider);
-
-put_child:
- node++;
-put_child2:
- for (i = 0; i < node; i++)
- reset_control_put(sp->phys[i].lnk_rst);
- of_node_put(child);
+ if (IS_ERR(phy_provider)) {
+ ret = PTR_ERR(phy_provider);
+ goto put_control;
+ }
+
+ return 0;
+
+put_control:
+ while (--node >= 0)
+ reset_control_put(sp->phys[node].lnk_rst);
clk_disable:
cdns_sierra_phy_disable_clocks(sp);
reset_control_assert(sp->apb_rst);
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index 6d307102f4f6..8ee7682b8e93 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -992,7 +992,7 @@ static int phy_efuse_get(struct mtk_tphy *tphy, struct mtk_phy_instance *instanc
/* no efuse, ignore it */
if (!instance->efuse_intr &&
!instance->efuse_rx_imp &&
- !instance->efuse_rx_imp) {
+ !instance->efuse_tx_imp) {
dev_warn(dev, "no u3 intr efuse, but dts enable it\n");
instance->efuse_sw_en = 0;
break;
diff --git a/drivers/phy/phy-core-mipi-dphy.c b/drivers/phy/phy-core-mipi-dphy.c
index 288c9c67aa74..ccb4045685cd 100644
--- a/drivers/phy/phy-core-mipi-dphy.c
+++ b/drivers/phy/phy-core-mipi-dphy.c
@@ -36,7 +36,7 @@ int phy_mipi_dphy_get_default_config(unsigned long pixel_clock,
cfg->clk_miss = 0;
cfg->clk_post = 60000 + 52 * ui;
- cfg->clk_pre = 8000;
+ cfg->clk_pre = 8;
cfg->clk_prepare = 38000;
cfg->clk_settle = 95000;
cfg->clk_term_en = 0;
@@ -97,7 +97,7 @@ int phy_mipi_dphy_config_validate(struct phy_configure_opts_mipi_dphy *cfg)
if (cfg->clk_post < (60000 + 52 * ui))
return -EINVAL;
- if (cfg->clk_pre < 8000)
+ if (cfg->clk_pre < 8)
return -EINVAL;
if (cfg->clk_prepare < 38000 || cfg->clk_prepare > 95000)
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
index 347dc79a18c1..630e01b5c19b 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
@@ -5,6 +5,7 @@
* Author: Wyon Bi <bivvy.bi@rock-chips.com>
*/
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/iopoll.h>
@@ -364,7 +365,7 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
* The value of counter for HS Tclk-pre
* Tclk-pre = Tpin_txbyteclkhs * value
*/
- clk_pre = DIV_ROUND_UP(cfg->clk_pre, t_txbyteclkhs);
+ clk_pre = DIV_ROUND_UP(cfg->clk_pre, BITS_PER_BYTE);
/*
* The value of counter for HS Tlpx Time
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index 2ce9bfd783d4..007a23c78d56 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -304,7 +304,7 @@ static int stm32_usbphyc_pll_enable(struct stm32_usbphyc *usbphyc)
ret = __stm32_usbphyc_pll_disable(usbphyc);
if (ret)
- return ret;
+ goto dec_n_pll_cons;
}
ret = stm32_usbphyc_regulators_enable(usbphyc);
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index b3384c31637a..da546c35d1d5 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -233,6 +233,7 @@ static const struct clk_div_table clk_div_table[] = {
{ .val = 1, .div = 2, },
{ .val = 2, .div = 4, },
{ .val = 3, .div = 8, },
+ { /* sentinel */ },
};
static const struct wiz_clk_div_sel clk_div_sel[] = {
diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c
index f478d8a17115..9be9535ad7ab 100644
--- a/drivers/phy/xilinx/phy-zynqmp.c
+++ b/drivers/phy/xilinx/phy-zynqmp.c
@@ -134,7 +134,8 @@
#define PROT_BUS_WIDTH_10 0x0
#define PROT_BUS_WIDTH_20 0x1
#define PROT_BUS_WIDTH_40 0x2
-#define PROT_BUS_WIDTH_SHIFT 2
+#define PROT_BUS_WIDTH_SHIFT(n) ((n) * 2)
+#define PROT_BUS_WIDTH_MASK(n) GENMASK((n) * 2 + 1, (n) * 2)
/* Number of GT lanes */
#define NUM_LANES 4
@@ -445,12 +446,12 @@ static void xpsgtr_phy_init_sata(struct xpsgtr_phy *gtr_phy)
static void xpsgtr_phy_init_sgmii(struct xpsgtr_phy *gtr_phy)
{
struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
+ u32 mask = PROT_BUS_WIDTH_MASK(gtr_phy->lane);
+ u32 val = PROT_BUS_WIDTH_10 << PROT_BUS_WIDTH_SHIFT(gtr_phy->lane);
/* Set SGMII protocol TX and RX bus width to 10 bits. */
- xpsgtr_write(gtr_dev, TX_PROT_BUS_WIDTH,
- PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT));
- xpsgtr_write(gtr_dev, RX_PROT_BUS_WIDTH,
- PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT));
+ xpsgtr_clr_set(gtr_dev, TX_PROT_BUS_WIDTH, mask, val);
+ xpsgtr_clr_set(gtr_dev, RX_PROT_BUS_WIDTH, mask, val);
xpsgtr_bypass_scrambler_8b10b(gtr_phy);
}
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 08c364d611f5..f64d29f614ec 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -42,9 +42,9 @@ obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o
obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
+obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
obj-$(CONFIG_PINCTRL_STARFIVE) += pinctrl-starfive.o
obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o
-obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
obj-$(CONFIG_PINCTRL_SX150X) += pinctrl-sx150x.o
obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o
obj-$(CONFIG_PINCTRL_THUNDERBAY) += pinctrl-thunderbay.o
diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
index 5123f4c33854..ac1e400bbbac 100644
--- a/drivers/pinctrl/bcm/Kconfig
+++ b/drivers/pinctrl/bcm/Kconfig
@@ -35,6 +35,7 @@ config PINCTRL_BCM63XX
select PINCONF
select GENERIC_PINCONF
select GPIOLIB
+ select REGMAP
select GPIO_REGMAP
config PINCTRL_BCM6318
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index c4ebfa852b42..47e433e09c5c 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -1269,16 +1269,18 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
sizeof(*girq->parents),
GFP_KERNEL);
if (!girq->parents) {
- pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto out_remove;
}
if (is_7211) {
pc->wake_irq = devm_kcalloc(dev, BCM2835_NUM_IRQS,
sizeof(*pc->wake_irq),
GFP_KERNEL);
- if (!pc->wake_irq)
- return -ENOMEM;
+ if (!pc->wake_irq) {
+ err = -ENOMEM;
+ goto out_remove;
+ }
}
/*
@@ -1306,8 +1308,10 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
len = strlen(dev_name(pc->dev)) + 16;
name = devm_kzalloc(pc->dev, len, GFP_KERNEL);
- if (!name)
- return -ENOMEM;
+ if (!name) {
+ err = -ENOMEM;
+ goto out_remove;
+ }
snprintf(name, len, "%s:bank%d", dev_name(pc->dev), i);
@@ -1326,11 +1330,14 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
err = gpiochip_add_data(&pc->gpio_chip, pc);
if (err) {
dev_err(dev, "could not add GPIO chip\n");
- pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range);
- return err;
+ goto out_remove;
}
return 0;
+
+out_remove:
+ pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range);
+ return err;
}
static struct platform_driver bcm2835_pinctrl_driver = {
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index abffda1fd51e..1d5818269076 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1471,8 +1471,9 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
offset = cctx->intr_lines[intr_line];
if (offset == CHV_INVALID_HWIRQ) {
- dev_err(dev, "interrupt on unused interrupt line %u\n", intr_line);
- continue;
+ dev_warn_once(dev, "interrupt on unmapped interrupt line %u\n", intr_line);
+ /* Some boards expect hwirq 0 to trigger in this case */
+ offset = 0;
}
generic_handle_domain_irq(gc->irq.domain, offset);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 85750974d182..826d494f3cc6 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -451,8 +451,8 @@ static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
value &= ~PADCFG0_PMODE_MASK;
value |= PADCFG0_PMODE_GPIO;
- /* Disable input and output buffers */
- value |= PADCFG0_GPIORXDIS;
+ /* Disable TX buffer and enable RX (this will be input) */
+ value &= ~PADCFG0_GPIORXDIS;
value |= PADCFG0_GPIOTXDIS;
/* Disable SCI/SMI/NMI generation */
@@ -497,9 +497,6 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
intel_gpio_set_gpio_mode(padcfg0);
- /* Disable TX buffer and enable RX (this will be input) */
- __intel_gpio_set_direction(padcfg0, true);
-
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
@@ -1115,9 +1112,6 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned int type)
intel_gpio_set_gpio_mode(reg);
- /* Disable TX buffer and enable RX (this will be input) */
- __intel_gpio_set_direction(reg, true);
-
value = readl(reg);
value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV);
@@ -1216,6 +1210,39 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
return IRQ_RETVAL(ret);
}
+static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
+{
+ int i;
+
+ for (i = 0; i < pctrl->ncommunities; i++) {
+ const struct intel_community *community;
+ void __iomem *base;
+ unsigned int gpp;
+
+ community = &pctrl->communities[i];
+ base = community->regs;
+
+ for (gpp = 0; gpp < community->ngpps; gpp++) {
+ /* Mask and clear all interrupts */
+ writel(0, base + community->ie_offset + gpp * 4);
+ writel(0xffff, base + community->is_offset + gpp * 4);
+ }
+ }
+}
+
+static int intel_gpio_irq_init_hw(struct gpio_chip *gc)
+{
+ struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
+
+ /*
+ * Make sure the interrupt lines are in a proper state before
+ * further configuration.
+ */
+ intel_gpio_irq_init(pctrl);
+
+ return 0;
+}
+
static int intel_gpio_add_community_ranges(struct intel_pinctrl *pctrl,
const struct intel_community *community)
{
@@ -1320,6 +1347,7 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
girq->num_parents = 0;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
+ girq->init_hw = intel_gpio_irq_init_hw;
ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
if (ret) {
@@ -1695,26 +1723,6 @@ int intel_pinctrl_suspend_noirq(struct device *dev)
}
EXPORT_SYMBOL_GPL(intel_pinctrl_suspend_noirq);
-static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
-{
- size_t i;
-
- for (i = 0; i < pctrl->ncommunities; i++) {
- const struct intel_community *community;
- void __iomem *base;
- unsigned int gpp;
-
- community = &pctrl->communities[i];
- base = community->regs;
-
- for (gpp = 0; gpp < community->ngpps; gpp++) {
- /* Mask and clear all interrupts */
- writel(0, base + community->ie_offset + gpp * 4);
- writel(0xffff, base + community->is_offset + gpp * 4);
- }
- }
-}
-
static bool intel_gpio_update_reg(void __iomem *reg, u32 mask, u32 value)
{
u32 curr, updated;
diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c
index 0bcd19597e4a..3ddaeffc0415 100644
--- a/drivers/pinctrl/intel/pinctrl-tigerlake.c
+++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c
@@ -749,7 +749,6 @@ static const struct acpi_device_id tgl_pinctrl_acpi_match[] = {
{ "INT34C5", (kernel_ulong_t)&tgllp_soc_data },
{ "INT34C6", (kernel_ulong_t)&tglh_soc_data },
{ "INTC1055", (kernel_ulong_t)&tgllp_soc_data },
- { "INTC1057", (kernel_ulong_t)&tgllp_soc_data },
{ }
};
MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match);
diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c
index 49e32684dbb2..ecab6bf63dc6 100644
--- a/drivers/pinctrl/pinctrl-k210.c
+++ b/drivers/pinctrl/pinctrl-k210.c
@@ -482,7 +482,7 @@ static int k210_pinconf_get_drive(unsigned int max_strength_ua)
{
int i;
- for (i = K210_PC_DRIVE_MAX; i; i--) {
+ for (i = K210_PC_DRIVE_MAX; i >= 0; i--) {
if (k210_pinconf_drive_strength[i] <= max_strength_ua)
return i;
}
@@ -527,7 +527,7 @@ static int k210_pinconf_set_param(struct pinctrl_dev *pctldev,
case PIN_CONFIG_BIAS_PULL_UP:
if (!arg)
return -EINVAL;
- val |= K210_PC_PD;
+ val |= K210_PC_PU;
break;
case PIN_CONFIG_DRIVE_STRENGTH:
arg *= 1000;
diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
index 8e081c90bdb2..639f1130e989 100644
--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
+++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
@@ -137,7 +137,8 @@ static inline int sgpio_addr_to_pin(struct sgpio_priv *priv, int port, int bit)
static inline u32 sgpio_get_addr(struct sgpio_priv *priv, u32 rno, u32 off)
{
- return priv->properties->regoff[rno] + off;
+ return (priv->properties->regoff[rno] + off) *
+ regmap_get_reg_stride(priv->regs);
}
static u32 sgpio_readl(struct sgpio_priv *priv, u32 rno, u32 off)
diff --git a/drivers/pinctrl/pinctrl-starfive.c b/drivers/pinctrl/pinctrl-starfive.c
index 0b912152a405..266da41a6162 100644
--- a/drivers/pinctrl/pinctrl-starfive.c
+++ b/drivers/pinctrl/pinctrl-starfive.c
@@ -1164,6 +1164,7 @@ static int starfive_irq_set_type(struct irq_data *d, unsigned int trigger)
}
static struct irq_chip starfive_irq_chip = {
+ .name = "StarFive GPIO",
.irq_ack = starfive_irq_ack,
.irq_mask = starfive_irq_mask,
.irq_mask_ack = starfive_irq_mask_ack,
@@ -1308,7 +1309,6 @@ static int starfive_probe(struct platform_device *pdev)
sfp->gc.ngpio = NR_GPIOS;
starfive_irq_chip.parent_device = dev;
- starfive_irq_chip.name = sfp->gc.label;
sfp->gc.irq.chip = &starfive_irq_chip;
sfp->gc.irq.parent_handler = starfive_gpio_irq_handler;
diff --git a/drivers/pinctrl/pinctrl-thunderbay.c b/drivers/pinctrl/pinctrl-thunderbay.c
index b5b47f4dd774..79d44bca039e 100644
--- a/drivers/pinctrl/pinctrl-thunderbay.c
+++ b/drivers/pinctrl/pinctrl-thunderbay.c
@@ -773,63 +773,42 @@ static int thunderbay_build_groups(struct thunderbay_pinctrl *tpc)
static int thunderbay_add_functions(struct thunderbay_pinctrl *tpc, struct function_desc *funcs)
{
- struct function_desc *function = funcs;
int i;
/* Assign the groups for each function */
- for (i = 0; i < tpc->soc->npins; i++) {
- const struct pinctrl_pin_desc *pin_info = thunderbay_pins + i;
- struct thunderbay_mux_desc *pin_mux = pin_info->drv_data;
-
- while (pin_mux->name) {
- const char **grp;
- int j, grp_num, match = 0;
- size_t grp_size;
- struct function_desc *func;
-
- for (j = 0; j < tpc->nfuncs; j++) {
- if (!strcmp(pin_mux->name, function[j].name)) {
- match = 1;
- break;
- }
- }
-
- if (!match)
- return -EINVAL;
-
- func = function + j;
- grp_num = func->num_group_names;
- grp_size = sizeof(*func->group_names);
-
- if (!func->group_names) {
- func->group_names = devm_kcalloc(tpc->dev,
- grp_num,
- grp_size,
- GFP_KERNEL);
- if (!func->group_names) {
- kfree(func);
- return -ENOMEM;
- }
+ for (i = 0; i < tpc->nfuncs; i++) {
+ struct function_desc *func = &funcs[i];
+ const char **group_names;
+ unsigned int grp_idx = 0;
+ int j;
+
+ group_names = devm_kcalloc(tpc->dev, func->num_group_names,
+ sizeof(*group_names), GFP_KERNEL);
+ if (!group_names)
+ return -ENOMEM;
+
+ for (j = 0; j < tpc->soc->npins; j++) {
+ const struct pinctrl_pin_desc *pin_info = &thunderbay_pins[j];
+ struct thunderbay_mux_desc *pin_mux;
+
+ for (pin_mux = pin_info->drv_data; pin_mux->name; pin_mux++) {
+ if (!strcmp(pin_mux->name, func->name))
+ group_names[grp_idx++] = pin_info->name;
}
-
- grp = func->group_names;
- while (*grp)
- grp++;
-
- *grp = pin_info->name;
- pin_mux++;
}
+
+ func->group_names = group_names;
}
/* Add all functions */
for (i = 0; i < tpc->nfuncs; i++) {
pinmux_generic_add_function(tpc->pctrl,
- function[i].name,
- function[i].group_names,
- function[i].num_group_names,
- function[i].data);
+ funcs[i].name,
+ funcs[i].group_names,
+ funcs[i].num_group_names,
+ funcs[i].data);
}
- kfree(function);
+ kfree(funcs);
return 0;
}
@@ -839,27 +818,30 @@ static int thunderbay_build_functions(struct thunderbay_pinctrl *tpc)
void *ptr;
int pin;
- /* Total number of functions is unknown at this point. Allocate first. */
+ /*
+ * Allocate maximum possible number of functions. Assume every pin
+ * being part of 8 (hw maximum) globally unique muxes.
+ */
tpc->nfuncs = 0;
thunderbay_funcs = kcalloc(tpc->soc->npins * 8,
sizeof(*thunderbay_funcs), GFP_KERNEL);
if (!thunderbay_funcs)
return -ENOMEM;
- /* Find total number of functions and each's properties */
+ /* Setup 1 function for each unique mux */
for (pin = 0; pin < tpc->soc->npins; pin++) {
const struct pinctrl_pin_desc *pin_info = thunderbay_pins + pin;
- struct thunderbay_mux_desc *pin_mux = pin_info->drv_data;
+ struct thunderbay_mux_desc *pin_mux;
- while (pin_mux->name) {
- struct function_desc *func = thunderbay_funcs;
+ for (pin_mux = pin_info->drv_data; pin_mux->name; pin_mux++) {
+ struct function_desc *func;
- while (func->name) {
+ /* Check if we already have function for this mux */
+ for (func = thunderbay_funcs; func->name; func++) {
if (!strcmp(pin_mux->name, func->name)) {
func->num_group_names++;
break;
}
- func++;
}
if (!func->name) {
@@ -868,8 +850,6 @@ static int thunderbay_build_functions(struct thunderbay_pinctrl *tpc)
func->data = (int *)&pin_mux->mode;
tpc->nfuncs++;
}
-
- pin_mux++;
}
}
diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c
index 42da6bd399ee..e14012209992 100644
--- a/drivers/pinctrl/pinctrl-zynqmp.c
+++ b/drivers/pinctrl/pinctrl-zynqmp.c
@@ -809,7 +809,6 @@ static int zynqmp_pinctrl_prepare_pin_desc(struct device *dev,
unsigned int *npins)
{
struct pinctrl_pin_desc *pins, *pin;
- char **pin_names;
int ret;
int i;
@@ -821,14 +820,13 @@ static int zynqmp_pinctrl_prepare_pin_desc(struct device *dev,
if (!pins)
return -ENOMEM;
- pin_names = devm_kasprintf_strarray(dev, ZYNQMP_PIN_PREFIX, *npins);
- if (IS_ERR(pin_names))
- return PTR_ERR(pin_names);
-
for (i = 0; i < *npins; i++) {
pin = &pins[i];
pin->number = i;
- pin->name = pin_names[i];
+ pin->name = devm_kasprintf(dev, GFP_KERNEL, "%s%d",
+ ZYNQMP_PIN_PREFIX, i);
+ if (!pin->name)
+ return -ENOMEM;
}
*zynqmp_pins = pins;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
index ce1917e230f4..152b71226a80 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
@@ -363,16 +363,16 @@ static const struct sunxi_desc_pin h616_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "uart2"), /* CTS */
- SUNXI_FUNCTION(0x3, "i2s3"), /* DO0 */
+ SUNXI_FUNCTION(0x3, "i2s3_dout0"), /* DO0 */
SUNXI_FUNCTION(0x4, "spi1"), /* MISO */
- SUNXI_FUNCTION(0x5, "i2s3"), /* DI1 */
+ SUNXI_FUNCTION(0x5, "i2s3_din1"), /* DI1 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 8)), /* PH_EINT8 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 9),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "i2s3"), /* DI0 */
+ SUNXI_FUNCTION(0x3, "i2s3_din0"), /* DI0 */
SUNXI_FUNCTION(0x4, "spi1"), /* CS1 */
- SUNXI_FUNCTION(0x3, "i2s3"), /* DO1 */
+ SUNXI_FUNCTION(0x5, "i2s3_dout1"), /* DO1 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 9)), /* PH_EINT9 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 10),
SUNXI_FUNCTION(0x0, "gpio_in"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 80d6750c74a6..1f401377ff60 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -36,6 +36,13 @@
#include "../core.h"
#include "pinctrl-sunxi.h"
+/*
+ * These lock classes tell lockdep that GPIO IRQs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key sunxi_pinctrl_irq_lock_class;
+static struct lock_class_key sunxi_pinctrl_irq_request_class;
+
static struct irq_chip sunxi_pinctrl_edge_irq_chip;
static struct irq_chip sunxi_pinctrl_level_irq_chip;
@@ -837,7 +844,8 @@ static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
{
struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
- return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL, offset, true);
+ return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL,
+ chip->base + offset, true);
}
static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -890,7 +898,8 @@ static int sunxi_pinctrl_gpio_direction_output(struct gpio_chip *chip,
struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
sunxi_pinctrl_gpio_set(chip, offset, value);
- return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL, offset, false);
+ return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL,
+ chip->base + offset, false);
}
static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc,
@@ -1555,6 +1564,8 @@ int sunxi_pinctrl_init_with_variant(struct platform_device *pdev,
for (i = 0; i < (pctl->desc->irq_banks * IRQ_PER_BANK); i++) {
int irqno = irq_create_mapping(pctl->domain, i);
+ irq_set_lockdep_class(irqno, &sunxi_pinctrl_irq_lock_class,
+ &sunxi_pinctrl_irq_request_class);
irq_set_chip_and_handler(irqno, &sunxi_pinctrl_edge_irq_chip,
handle_edge_irq);
irq_set_chip_data(irqno, pctl);
diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
index 5f0578e25f71..463f1ec5c14e 100644
--- a/drivers/platform/surface/Kconfig
+++ b/drivers/platform/surface/Kconfig
@@ -5,6 +5,7 @@
menuconfig SURFACE_PLATFORMS
bool "Microsoft Surface Platform-Specific Device Drivers"
+ depends on ARM64 || X86 || COMPILE_TEST
default y
help
Say Y here to get to see options for platform-specific device drivers
diff --git a/drivers/platform/surface/surface3_power.c b/drivers/platform/surface/surface3_power.c
index abac3eec565e..444ec81ba02d 100644
--- a/drivers/platform/surface/surface3_power.c
+++ b/drivers/platform/surface/surface3_power.c
@@ -232,14 +232,21 @@ static int mshw0011_bix(struct mshw0011_data *cdata, struct bix *bix)
}
bix->last_full_charg_capacity = ret;
- /* get serial number */
+ /*
+ * Get serial number, on some devices (with unofficial replacement
+ * battery?) reading any of the serial number range addresses gets
+ * nacked in this case just leave the serial number empty.
+ */
ret = i2c_smbus_read_i2c_block_data(client, MSHW0011_BAT0_REG_SERIAL_NO,
sizeof(buf), buf);
- if (ret != sizeof(buf)) {
+ if (ret == -EREMOTEIO) {
+ /* no serial number available */
+ } else if (ret != sizeof(buf)) {
dev_err(&client->dev, "Error reading serial no: %d\n", ret);
return ret;
+ } else {
+ snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf);
}
- snprintf(bix->serial, ARRAY_SIZE(bix->serial), "%3pE%6pE", buf + 7, buf);
/* get cycle count */
ret = i2c_smbus_read_word_data(client, MSHW0011_BAT0_REG_CYCLE_CNT);
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index f794343d6aaa..b1103f85a85a 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
#include <linux/rtc.h>
#include <linux/suspend.h>
#include <linux/seq_file.h>
@@ -85,6 +86,9 @@
#define PMC_MSG_DELAY_MIN_US 50
#define RESPONSE_REGISTER_LOOP_MAX 20000
+/* QoS request for letting CPUs in idle states, but not the deepest */
+#define AMD_PMC_MAX_IDLE_STATE_LATENCY 3
+
#define SOC_SUBSYSTEM_IP_MAX 12
#define DELAY_MIN_US 2000
#define DELAY_MAX_US 3000
@@ -124,12 +128,14 @@ struct amd_pmc_dev {
u32 cpu_id;
u32 active_ips;
/* SMU version information */
- u16 major;
- u16 minor;
- u16 rev;
+ u8 smu_program;
+ u8 major;
+ u8 minor;
+ u8 rev;
struct device *dev;
struct pci_dev *rdev;
struct mutex lock; /* generic mutex lock */
+ struct pm_qos_request amd_pmc_pm_qos_req;
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs_dir;
#endif /* CONFIG_DEBUG_FS */
@@ -180,11 +186,13 @@ static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev)
if (rc)
return rc;
- dev->major = (val >> 16) & GENMASK(15, 0);
+ dev->smu_program = (val >> 24) & GENMASK(7, 0);
+ dev->major = (val >> 16) & GENMASK(7, 0);
dev->minor = (val >> 8) & GENMASK(7, 0);
dev->rev = (val >> 0) & GENMASK(7, 0);
- dev_dbg(dev->dev, "SMU version is %u.%u.%u\n", dev->major, dev->minor, dev->rev);
+ dev_dbg(dev->dev, "SMU program %u version is %u.%u.%u\n",
+ dev->smu_program, dev->major, dev->minor, dev->rev);
return 0;
}
@@ -226,7 +234,7 @@ static int amd_pmc_stb_debugfs_release(struct inode *inode, struct file *filp)
return 0;
}
-const struct file_operations amd_pmc_stb_debugfs_fops = {
+static const struct file_operations amd_pmc_stb_debugfs_fops = {
.owner = THIS_MODULE,
.open = amd_pmc_stb_debugfs_open,
.read = amd_pmc_stb_debugfs_read,
@@ -518,6 +526,14 @@ static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg)
rc = rtc_alarm_irq_enable(rtc_device, 0);
dev_dbg(pdev->dev, "wakeup timer programmed for %lld seconds\n", duration);
+ /*
+ * Prevent CPUs from getting into deep idle states while sending OS_HINT
+ * which is otherwise generally safe to send when at least one of the CPUs
+ * is not in deep idle states.
+ */
+ cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req, AMD_PMC_MAX_IDLE_STATE_LATENCY);
+ wake_up_all_idle_cpus();
+
return rc;
}
@@ -535,24 +551,31 @@ static int __maybe_unused amd_pmc_suspend(struct device *dev)
/* Activate CZN specific RTC functionality */
if (pdev->cpu_id == AMD_CPU_ID_CZN) {
rc = amd_pmc_verify_czn_rtc(pdev, &arg);
- if (rc < 0)
- return rc;
+ if (rc)
+ goto fail;
}
/* Dump the IdleMask before we send hint to SMU */
amd_pmc_idlemask_read(pdev, dev, NULL);
msg = amd_pmc_get_os_hint(pdev);
rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, 0);
- if (rc)
+ if (rc) {
dev_err(pdev->dev, "suspend failed\n");
+ goto fail;
+ }
if (enable_stb)
rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF);
- if (rc) {
+ if (rc) {
dev_err(pdev->dev, "error writing to STB\n");
- return rc;
+ goto fail;
}
+ return 0;
+fail:
+ if (pdev->cpu_id == AMD_CPU_ID_CZN)
+ cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
return rc;
}
@@ -576,12 +599,15 @@ static int __maybe_unused amd_pmc_resume(struct device *dev)
/* Write data incremented by 1 to distinguish in stb_read */
if (enable_stb)
rc = amd_pmc_write_stb(pdev, AMD_PMC_STB_PREDEF + 1);
- if (rc) {
+ if (rc)
dev_err(pdev->dev, "error writing to STB\n");
- return rc;
- }
- return 0;
+ /* Restore the QoS request back to defaults if it was set */
+ if (pdev->cpu_id == AMD_CPU_ID_CZN)
+ cpu_latency_qos_update_request(&pdev->amd_pmc_pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
+
+ return rc;
}
static const struct dev_pm_ops amd_pmc_pm_ops = {
@@ -719,6 +745,7 @@ static int amd_pmc_probe(struct platform_device *pdev)
amd_pmc_get_smu_version(dev);
platform_set_drvdata(pdev, dev);
amd_pmc_dbgfs_register(dev);
+ cpu_latency_qos_add_request(&dev->amd_pmc_pm_qos_req, PM_QOS_DEFAULT_VALUE);
return 0;
err_pci_dev_put:
diff --git a/drivers/platform/x86/asus-tf103c-dock.c b/drivers/platform/x86/asus-tf103c-dock.c
index d4ef8f362ee6..6fd0c9fea82d 100644
--- a/drivers/platform/x86/asus-tf103c-dock.c
+++ b/drivers/platform/x86/asus-tf103c-dock.c
@@ -250,7 +250,7 @@ static int tf103c_dock_hid_raw_request(struct hid_device *hid, u8 reportnum,
return 0;
}
-struct hid_ll_driver tf103c_dock_hid_ll_driver = {
+static struct hid_ll_driver tf103c_dock_hid_ll_driver = {
.parse = tf103c_dock_hid_parse,
.start = tf103c_dock_hid_start,
.stop = tf103c_dock_hid_stop,
@@ -921,7 +921,7 @@ static int __maybe_unused tf103c_dock_resume(struct device *dev)
return 0;
}
-SIMPLE_DEV_PM_OPS(tf103c_dock_pm_ops, tf103c_dock_suspend, tf103c_dock_resume);
+static SIMPLE_DEV_PM_OPS(tf103c_dock_pm_ops, tf103c_dock_suspend, tf103c_dock_resume);
static const struct acpi_device_id tf103c_dock_acpi_match[] = {
{"NPCE69A"},
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index a3b83b22a3b1..2104a2621e50 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -2223,7 +2223,7 @@ static int fan_curve_check_present(struct asus_wmi *asus, bool *available,
err = fan_curve_get_factory_default(asus, fan_dev);
if (err) {
- if (err == -ENODEV)
+ if (err == -ENODEV || err == -ENODATA)
return 0;
return err;
}
diff --git a/drivers/platform/x86/intel/crystal_cove_charger.c b/drivers/platform/x86/intel/crystal_cove_charger.c
index 0374bc742513..e4299cfa2205 100644
--- a/drivers/platform/x86/intel/crystal_cove_charger.c
+++ b/drivers/platform/x86/intel/crystal_cove_charger.c
@@ -17,6 +17,7 @@
#include <linux/regmap.h>
#define CHGRIRQ_REG 0x0a
+#define MCHGRIRQ_REG 0x17
struct crystal_cove_charger_data {
struct mutex buslock; /* irq_bus_lock */
@@ -25,8 +26,8 @@ struct crystal_cove_charger_data {
struct irq_domain *irq_domain;
int irq;
int charger_irq;
- bool irq_enabled;
- bool irq_is_enabled;
+ u8 mask;
+ u8 new_mask;
};
static irqreturn_t crystal_cove_charger_irq(int irq, void *data)
@@ -53,13 +54,9 @@ static void crystal_cove_charger_irq_bus_sync_unlock(struct irq_data *data)
{
struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data);
- if (charger->irq_is_enabled != charger->irq_enabled) {
- if (charger->irq_enabled)
- enable_irq(charger->irq);
- else
- disable_irq(charger->irq);
-
- charger->irq_is_enabled = charger->irq_enabled;
+ if (charger->mask != charger->new_mask) {
+ regmap_write(charger->regmap, MCHGRIRQ_REG, charger->new_mask);
+ charger->mask = charger->new_mask;
}
mutex_unlock(&charger->buslock);
@@ -69,14 +66,14 @@ static void crystal_cove_charger_irq_unmask(struct irq_data *data)
{
struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data);
- charger->irq_enabled = true;
+ charger->new_mask &= ~BIT(data->hwirq);
}
static void crystal_cove_charger_irq_mask(struct irq_data *data)
{
struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data);
- charger->irq_enabled = false;
+ charger->new_mask |= BIT(data->hwirq);
}
static void crystal_cove_charger_rm_irq_domain(void *data)
@@ -130,10 +127,13 @@ static int crystal_cove_charger_probe(struct platform_device *pdev)
irq_set_nested_thread(charger->charger_irq, true);
irq_set_noprobe(charger->charger_irq);
+ /* Mask the single 2nd level IRQ before enabling the 1st level IRQ */
+ charger->mask = charger->new_mask = BIT(0);
+ regmap_write(charger->regmap, MCHGRIRQ_REG, charger->mask);
+
ret = devm_request_threaded_irq(&pdev->dev, charger->irq, NULL,
crystal_cove_charger_irq,
- IRQF_ONESHOT | IRQF_NO_AUTOEN,
- KBUILD_MODNAME, charger);
+ IRQF_ONESHOT, KBUILD_MODNAME, charger);
if (ret)
return dev_err_probe(&pdev->dev, ret, "requesting irq\n");
diff --git a/drivers/platform/x86/intel/int3472/tps68470_board_data.c b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
index f93d437fd192..525f09a3b5ff 100644
--- a/drivers/platform/x86/intel/int3472/tps68470_board_data.c
+++ b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
@@ -100,7 +100,8 @@ static struct gpiod_lookup_table surface_go_tps68470_gpios = {
.dev_id = "i2c-INT347A:00",
.table = {
GPIO_LOOKUP("tps68470-gpio", 9, "reset", GPIO_ACTIVE_LOW),
- GPIO_LOOKUP("tps68470-gpio", 7, "powerdown", GPIO_ACTIVE_LOW)
+ GPIO_LOOKUP("tps68470-gpio", 7, "powerdown", GPIO_ACTIVE_LOW),
+ { }
}
};
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index c9a85eb2e860..e8424e70d81d 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -596,7 +596,10 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
return ret;
}
-static DEFINE_MUTEX(punit_misc_dev_lock);
+/* Lock to prevent module registration when already opened by user space */
+static DEFINE_MUTEX(punit_misc_dev_open_lock);
+/* Lock to allow one share misc device for all ISST interace */
+static DEFINE_MUTEX(punit_misc_dev_reg_lock);
static int misc_usage_count;
static int misc_device_ret;
static int misc_device_open;
@@ -606,7 +609,7 @@ static int isst_if_open(struct inode *inode, struct file *file)
int i, ret = 0;
/* Fail open, if a module is going away */
- mutex_lock(&punit_misc_dev_lock);
+ mutex_lock(&punit_misc_dev_open_lock);
for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
struct isst_if_cmd_cb *cb = &punit_callbacks[i];
@@ -628,7 +631,7 @@ static int isst_if_open(struct inode *inode, struct file *file)
} else {
misc_device_open++;
}
- mutex_unlock(&punit_misc_dev_lock);
+ mutex_unlock(&punit_misc_dev_open_lock);
return ret;
}
@@ -637,7 +640,7 @@ static int isst_if_relase(struct inode *inode, struct file *f)
{
int i;
- mutex_lock(&punit_misc_dev_lock);
+ mutex_lock(&punit_misc_dev_open_lock);
misc_device_open--;
for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
struct isst_if_cmd_cb *cb = &punit_callbacks[i];
@@ -645,7 +648,7 @@ static int isst_if_relase(struct inode *inode, struct file *f)
if (cb->registered)
module_put(cb->owner);
}
- mutex_unlock(&punit_misc_dev_lock);
+ mutex_unlock(&punit_misc_dev_open_lock);
return 0;
}
@@ -662,6 +665,43 @@ static struct miscdevice isst_if_char_driver = {
.fops = &isst_if_char_driver_ops,
};
+static int isst_misc_reg(void)
+{
+ mutex_lock(&punit_misc_dev_reg_lock);
+ if (misc_device_ret)
+ goto unlock_exit;
+
+ if (!misc_usage_count) {
+ misc_device_ret = isst_if_cpu_info_init();
+ if (misc_device_ret)
+ goto unlock_exit;
+
+ misc_device_ret = misc_register(&isst_if_char_driver);
+ if (misc_device_ret) {
+ isst_if_cpu_info_exit();
+ goto unlock_exit;
+ }
+ }
+ misc_usage_count++;
+
+unlock_exit:
+ mutex_unlock(&punit_misc_dev_reg_lock);
+
+ return misc_device_ret;
+}
+
+static void isst_misc_unreg(void)
+{
+ mutex_lock(&punit_misc_dev_reg_lock);
+ if (misc_usage_count)
+ misc_usage_count--;
+ if (!misc_usage_count && !misc_device_ret) {
+ misc_deregister(&isst_if_char_driver);
+ isst_if_cpu_info_exit();
+ }
+ mutex_unlock(&punit_misc_dev_reg_lock);
+}
+
/**
* isst_if_cdev_register() - Register callback for IOCTL
* @device_type: The device type this callback handling.
@@ -679,38 +719,31 @@ static struct miscdevice isst_if_char_driver = {
*/
int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
{
- if (misc_device_ret)
- return misc_device_ret;
+ int ret;
if (device_type >= ISST_IF_DEV_MAX)
return -EINVAL;
- mutex_lock(&punit_misc_dev_lock);
+ mutex_lock(&punit_misc_dev_open_lock);
+ /* Device is already open, we don't want to add new callbacks */
if (misc_device_open) {
- mutex_unlock(&punit_misc_dev_lock);
+ mutex_unlock(&punit_misc_dev_open_lock);
return -EAGAIN;
}
- if (!misc_usage_count) {
- int ret;
-
- misc_device_ret = misc_register(&isst_if_char_driver);
- if (misc_device_ret)
- goto unlock_exit;
-
- ret = isst_if_cpu_info_init();
- if (ret) {
- misc_deregister(&isst_if_char_driver);
- misc_device_ret = ret;
- goto unlock_exit;
- }
- }
memcpy(&punit_callbacks[device_type], cb, sizeof(*cb));
punit_callbacks[device_type].registered = 1;
- misc_usage_count++;
-unlock_exit:
- mutex_unlock(&punit_misc_dev_lock);
+ mutex_unlock(&punit_misc_dev_open_lock);
- return misc_device_ret;
+ ret = isst_misc_reg();
+ if (ret) {
+ /*
+ * No need of mutex as the misc device register failed
+ * as no one can open device yet. Hence no contention.
+ */
+ punit_callbacks[device_type].registered = 0;
+ return ret;
+ }
+ return 0;
}
EXPORT_SYMBOL_GPL(isst_if_cdev_register);
@@ -725,16 +758,12 @@ EXPORT_SYMBOL_GPL(isst_if_cdev_register);
*/
void isst_if_cdev_unregister(int device_type)
{
- mutex_lock(&punit_misc_dev_lock);
- misc_usage_count--;
+ isst_misc_unreg();
+ mutex_lock(&punit_misc_dev_open_lock);
punit_callbacks[device_type].registered = 0;
if (device_type == ISST_IF_DEV_MBOX)
isst_delete_hash();
- if (!misc_usage_count && !misc_device_ret) {
- misc_deregister(&isst_if_char_driver);
- isst_if_cpu_info_exit();
- }
- mutex_unlock(&punit_misc_dev_lock);
+ mutex_unlock(&punit_misc_dev_open_lock);
}
EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 098180fb1cfc..3424b080db77 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -8679,9 +8679,10 @@ static const struct attribute_group fan_driver_attr_group = {
.attrs = fan_driver_attributes,
};
-#define TPACPI_FAN_Q1 0x0001 /* Unitialized HFSP */
-#define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */
-#define TPACPI_FAN_2CTL 0x0004 /* selects fan2 control */
+#define TPACPI_FAN_Q1 0x0001 /* Uninitialized HFSP */
+#define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */
+#define TPACPI_FAN_2CTL 0x0004 /* selects fan2 control */
+#define TPACPI_FAN_NOFAN 0x0008 /* no fan available */
static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1),
@@ -8702,6 +8703,8 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_Q_LNV3('N', '4', '0', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (4nd gen) */
TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */
TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL), /* X1 Carbon (9th gen) */
+ TPACPI_Q_LNV3('N', '3', '7', TPACPI_FAN_2CTL), /* T15g (2nd gen) */
+ TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */
};
static int __init fan_init(struct ibm_init_struct *iibm)
@@ -8730,6 +8733,11 @@ static int __init fan_init(struct ibm_init_struct *iibm)
quirks = tpacpi_check_quirks(fan_quirk_table,
ARRAY_SIZE(fan_quirk_table));
+ if (quirks & TPACPI_FAN_NOFAN) {
+ pr_info("No integrated ThinkPad fan available\n");
+ return -ENODEV;
+ }
+
if (gfan_handle) {
/* 570, 600e/x, 770e, 770x */
fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN;
@@ -10112,6 +10120,9 @@ static struct ibm_struct proxsensor_driver_data = {
#define DYTC_CMD_MMC_GET 8 /* To get current MMC function and mode */
#define DYTC_CMD_RESET 0x1ff /* To reset back to default */
+#define DYTC_CMD_FUNC_CAP 3 /* To get DYTC capabilities */
+#define DYTC_FC_MMC 27 /* MMC Mode supported */
+
#define DYTC_GET_FUNCTION_BIT 8 /* Bits 8-11 - function setting */
#define DYTC_GET_MODE_BIT 12 /* Bits 12-15 - mode setting */
@@ -10324,6 +10335,15 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
if (dytc_version < 5)
return -ENODEV;
+ /* Check what capabilities are supported. Currently MMC is needed */
+ err = dytc_command(DYTC_CMD_FUNC_CAP, &output);
+ if (err)
+ return err;
+ if (!(output & BIT(DYTC_FC_MMC))) {
+ dbg_printk(TPACPI_DBG_INIT, " DYTC MMC mode not supported\n");
+ return -ENODEV;
+ }
+
dbg_printk(TPACPI_DBG_INIT,
"DYTC version %d: thermal mode available\n", dytc_version);
/*
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 494f23052678..bc97bfa8e8a6 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -770,6 +770,21 @@ static const struct ts_dmi_data predia_basic_data = {
.properties = predia_basic_props,
};
+static const struct property_entry rwc_nanote_p8_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 46),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rwc-nanote-p8.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data rwc_nanote_p8_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = rwc_nanote_p8_props,
+};
+
static const struct property_entry schneider_sct101ctm_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1715),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
@@ -1395,6 +1410,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* RWC NANOTE P8 */
+ .driver_data = (void *)&rwc_nanote_p8_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Default string"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AY07J"),
+ DMI_MATCH(DMI_PRODUCT_SKU, "0001")
+ },
+ },
+ {
/* Schneider SCT101CTM */
.driver_data = (void *)&schneider_sct101ctm_data,
.matches = {
diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c
index 3ba63ad91b28..9360a8a92486 100644
--- a/drivers/platform/x86/x86-android-tablets.c
+++ b/drivers/platform/x86/x86-android-tablets.c
@@ -26,6 +26,7 @@
#include <linux/string.h>
/* For gpio_get_desc() which is EXPORT_SYMBOL_GPL() */
#include "../../gpio/gpiolib.h"
+#include "../../gpio/gpiolib-acpi.h"
/*
* Helper code to get Linux IRQ numbers given a description of the IRQ source
@@ -47,7 +48,7 @@ struct x86_acpi_irq_data {
int polarity; /* ACPI_ACTIVE_HIGH / ACPI_ACTIVE_LOW / ACPI_ACTIVE_BOTH */
};
-static int x86_acpi_irq_helper_gpiochip_find(struct gpio_chip *gc, void *data)
+static int gpiochip_find_match_label(struct gpio_chip *gc, void *data)
{
return gc->label && !strcmp(gc->label, data);
}
@@ -73,7 +74,7 @@ static int x86_acpi_irq_helper_get(const struct x86_acpi_irq_data *data)
return irq;
case X86_ACPI_IRQ_TYPE_GPIOINT:
/* Like acpi_dev_gpio_irq_get(), but without parsing ACPI resources */
- chip = gpiochip_find(data->chip, x86_acpi_irq_helper_gpiochip_find);
+ chip = gpiochip_find(data->chip, gpiochip_find_match_label);
if (!chip) {
pr_err("error cannot find GPIO chip %s\n", data->chip);
return -ENODEV;
@@ -143,14 +144,17 @@ struct x86_serdev_info {
};
struct x86_dev_info {
+ char *invalid_aei_gpiochip;
const char * const *modules;
- struct gpiod_lookup_table **gpiod_lookup_tables;
+ struct gpiod_lookup_table * const *gpiod_lookup_tables;
const struct x86_i2c_client_info *i2c_client_info;
const struct platform_device_info *pdev_info;
const struct x86_serdev_info *serdev_info;
int i2c_client_count;
int pdev_count;
int serdev_count;
+ int (*init)(void);
+ void (*exit)(void);
};
/* Generic / shared bq24190 settings */
@@ -187,8 +191,8 @@ static struct bq24190_platform_data bq24190_pdata = {
};
static const char * const bq24190_modules[] __initconst = {
- "crystal_cove_charger", /* For the bq24190 IRQ */
- "bq24190_charger", /* For the Vbus regulator for intel-int3496 */
+ "intel_crystal_cove_charger", /* For the bq24190 IRQ */
+ "bq24190_charger", /* For the Vbus regulator for intel-int3496 */
NULL
};
@@ -302,7 +306,7 @@ static struct gpiod_lookup_table asus_me176c_goodix_gpios = {
},
};
-static struct gpiod_lookup_table *asus_me176c_gpios[] = {
+static struct gpiod_lookup_table * const asus_me176c_gpios[] = {
&int3496_gpo2_pin22_gpios,
&asus_me176c_goodix_gpios,
NULL
@@ -317,6 +321,7 @@ static const struct x86_dev_info asus_me176c_info __initconst = {
.serdev_count = ARRAY_SIZE(asus_me176c_serdevs),
.gpiod_lookup_tables = asus_me176c_gpios,
.modules = bq24190_modules,
+ .invalid_aei_gpiochip = "INT33FC:02",
};
/* Asus TF103C tablets have an Android factory img with everything hardcoded */
@@ -405,7 +410,7 @@ static const struct x86_i2c_client_info asus_tf103c_i2c_clients[] __initconst =
},
};
-static struct gpiod_lookup_table *asus_tf103c_gpios[] = {
+static struct gpiod_lookup_table * const asus_tf103c_gpios[] = {
&int3496_gpo2_pin22_gpios,
NULL
};
@@ -417,6 +422,7 @@ static const struct x86_dev_info asus_tf103c_info __initconst = {
.pdev_count = ARRAY_SIZE(int3496_pdevs),
.gpiod_lookup_tables = asus_tf103c_gpios,
.modules = bq24190_modules,
+ .invalid_aei_gpiochip = "INT33FC:02",
};
/*
@@ -490,6 +496,39 @@ static const struct x86_dev_info chuwi_hi8_info __initconst = {
.i2c_client_count = ARRAY_SIZE(chuwi_hi8_i2c_clients),
};
+#define CZC_EC_EXTRA_PORT 0x68
+#define CZC_EC_ANDROID_KEYS 0x63
+
+static int __init czc_p10t_init(void)
+{
+ /*
+ * The device boots up in "Windows 7" mode, when the home button sends a
+ * Windows specific key sequence (Left Meta + D) and the second button
+ * sends an unknown one while also toggling the Radio Kill Switch.
+ * This is a surprising behavior when the second button is labeled "Back".
+ *
+ * The vendor-supplied Android-x86 build switches the device to a "Android"
+ * mode by writing value 0x63 to the I/O port 0x68. This just seems to just
+ * set bit 6 on address 0x96 in the EC region; switching the bit directly
+ * seems to achieve the same result. It uses a "p10t_switcher" to do the
+ * job. It doesn't seem to be able to do anything else, and no other use
+ * of the port 0x68 is known.
+ *
+ * In the Android mode, the home button sends just a single scancode,
+ * which can be handled in Linux userspace more reasonably and the back
+ * button only sends a scancode without toggling the kill switch.
+ * The scancode can then be mapped either to Back or RF Kill functionality
+ * in userspace, depending on how the button is labeled on that particular
+ * model.
+ */
+ outb(CZC_EC_ANDROID_KEYS, CZC_EC_EXTRA_PORT);
+ return 0;
+}
+
+static const struct x86_dev_info czc_p10t __initconst = {
+ .init = czc_p10t_init,
+};
+
/*
* Whitelabel (sold as various brands) TM800A550L tablets.
* These tablet's DSDT contains a whole bunch of bogus ACPI I2C devices
@@ -559,7 +598,7 @@ static struct gpiod_lookup_table whitelabel_tm800a550l_goodix_gpios = {
},
};
-static struct gpiod_lookup_table *whitelabel_tm800a550l_gpios[] = {
+static struct gpiod_lookup_table * const whitelabel_tm800a550l_gpios[] = {
&whitelabel_tm800a550l_goodix_gpios,
NULL
};
@@ -642,6 +681,24 @@ static const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
.driver_data = (void *)&chuwi_hi8_info,
},
{
+ /* CZC P10T */
+ .ident = "CZC ODEON TPC-10 (\"P10T\")",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "CZC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ODEON*TPC-10"),
+ },
+ .driver_data = (void *)&czc_p10t,
+ },
+ {
+ /* A variant of CZC P10T */
+ .ident = "ViewSonic ViewPad 10",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ViewSonic"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VPAD10"),
+ },
+ .driver_data = (void *)&czc_p10t,
+ },
+ {
/* Whitelabel (sold as various brands) TM800A550L */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
@@ -669,7 +726,8 @@ static int serdev_count;
static struct i2c_client **i2c_clients;
static struct platform_device **pdevs;
static struct serdev_device **serdevs;
-static struct gpiod_lookup_table **gpiod_lookup_tables;
+static struct gpiod_lookup_table * const *gpiod_lookup_tables;
+static void (*exit_handler)(void);
static __init int x86_instantiate_i2c_client(const struct x86_dev_info *dev_info,
int idx)
@@ -787,6 +845,9 @@ static void x86_android_tablet_cleanup(void)
kfree(i2c_clients);
+ if (exit_handler)
+ exit_handler();
+
for (i = 0; gpiod_lookup_tables && gpiod_lookup_tables[i]; i++)
gpiod_remove_lookup_table(gpiod_lookup_tables[i]);
}
@@ -795,6 +856,7 @@ static __init int x86_android_tablet_init(void)
{
const struct x86_dev_info *dev_info;
const struct dmi_system_id *id;
+ struct gpio_chip *chip;
int i, ret = 0;
id = dmi_first_match(x86_android_tablet_ids);
@@ -804,6 +866,20 @@ static __init int x86_android_tablet_init(void)
dev_info = id->driver_data;
/*
+ * The broken DSDTs on these devices often also include broken
+ * _AEI (ACPI Event Interrupt) handlers, disable these.
+ */
+ if (dev_info->invalid_aei_gpiochip) {
+ chip = gpiochip_find(dev_info->invalid_aei_gpiochip,
+ gpiochip_find_match_label);
+ if (!chip) {
+ pr_err("error cannot find GPIO chip %s\n", dev_info->invalid_aei_gpiochip);
+ return -ENODEV;
+ }
+ acpi_gpiochip_free_interrupts(chip);
+ }
+
+ /*
* Since this runs from module_init() it cannot use -EPROBE_DEFER,
* instead pre-load any modules which are listed as requirements.
*/
@@ -814,6 +890,15 @@ static __init int x86_android_tablet_init(void)
for (i = 0; gpiod_lookup_tables && gpiod_lookup_tables[i]; i++)
gpiod_add_lookup_table(gpiod_lookup_tables[i]);
+ if (dev_info->init) {
+ ret = dev_info->init();
+ if (ret < 0) {
+ x86_android_tablet_cleanup();
+ return ret;
+ }
+ exit_handler = dev_info->exit;
+ }
+
i2c_clients = kcalloc(dev_info->i2c_client_count, sizeof(*i2c_clients), GFP_KERNEL);
if (!i2c_clients) {
x86_android_tablet_cleanup();
@@ -865,6 +950,6 @@ static __init int x86_android_tablet_init(void)
module_init(x86_android_tablet_init);
module_exit(x86_android_tablet_cleanup);
-MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_DESCRIPTION("X86 Android tablets DSDT fixups driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c
index b274942dc46a..01ad84fd147c 100644
--- a/drivers/power/supply/bq256xx_charger.c
+++ b/drivers/power/supply/bq256xx_charger.c
@@ -1523,6 +1523,9 @@ static int bq256xx_hw_init(struct bq256xx_device *bq)
BQ256XX_WDT_BIT_SHIFT);
ret = power_supply_get_battery_info(bq->charger, &bat_info);
+ if (ret == -ENOMEM)
+ return ret;
+
if (ret) {
dev_warn(bq->dev, "battery info missing, default values will be applied\n");
diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
index 0c87ad0dbf71..728e2a6cc9c3 100644
--- a/drivers/power/supply/cw2015_battery.c
+++ b/drivers/power/supply/cw2015_battery.c
@@ -689,7 +689,7 @@ static int cw_bat_probe(struct i2c_client *client)
if (ret) {
/* Allocate an empty battery */
cw_bat->battery = devm_kzalloc(&client->dev,
- sizeof(cw_bat->battery),
+ sizeof(*cw_bat->battery),
GFP_KERNEL);
if (!cw_bat->battery)
return -ENOMEM;
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 0f1b5a7d2a89..17ad5f0d13b2 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -607,7 +607,7 @@ ptp_ocp_settime(struct ptp_clock_info *ptp_info, const struct timespec64 *ts)
}
static void
-__ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u64 adj_val)
+__ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u32 adj_val)
{
u32 select, ctrl;
@@ -615,7 +615,7 @@ __ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u64 adj_val)
iowrite32(OCP_SELECT_CLK_REG, &bp->reg->select);
iowrite32(adj_val, &bp->reg->offset_ns);
- iowrite32(adj_val & 0x7f, &bp->reg->offset_window_ns);
+ iowrite32(NSEC_PER_SEC, &bp->reg->offset_window_ns);
ctrl = OCP_CTRL_ADJUST_OFFSET | OCP_CTRL_ENABLE;
iowrite32(ctrl, &bp->reg->ctrl);
@@ -624,6 +624,22 @@ __ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u64 adj_val)
iowrite32(select >> 16, &bp->reg->select);
}
+static void
+ptp_ocp_adjtime_coarse(struct ptp_ocp *bp, u64 delta_ns)
+{
+ struct timespec64 ts;
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&bp->lock, flags);
+ err = __ptp_ocp_gettime_locked(bp, &ts, NULL);
+ if (likely(!err)) {
+ timespec64_add_ns(&ts, delta_ns);
+ __ptp_ocp_settime_locked(bp, &ts);
+ }
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
static int
ptp_ocp_adjtime(struct ptp_clock_info *ptp_info, s64 delta_ns)
{
@@ -631,6 +647,11 @@ ptp_ocp_adjtime(struct ptp_clock_info *ptp_info, s64 delta_ns)
unsigned long flags;
u32 adj_ns, sign;
+ if (delta_ns > NSEC_PER_SEC || -delta_ns > NSEC_PER_SEC) {
+ ptp_ocp_adjtime_coarse(bp, delta_ns);
+ return 0;
+ }
+
sign = delta_ns < 0 ? BIT(31) : 0;
adj_ns = sign ? -delta_ns : delta_ns;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 86aa4141efa9..d2553970a67b 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -6014,9 +6014,8 @@ core_initcall(regulator_init);
static int regulator_late_cleanup(struct device *dev, void *data)
{
struct regulator_dev *rdev = dev_to_rdev(dev);
- const struct regulator_ops *ops = rdev->desc->ops;
struct regulation_constraints *c = rdev->constraints;
- int enabled, ret;
+ int ret;
if (c && c->always_on)
return 0;
@@ -6029,14 +6028,8 @@ static int regulator_late_cleanup(struct device *dev, void *data)
if (rdev->use_count)
goto unlock;
- /* If we can't read the status assume it's always on. */
- if (ops->is_enabled)
- enabled = ops->is_enabled(rdev);
- else
- enabled = 1;
-
- /* But if reading the status failed, assume that it's off. */
- if (enabled <= 0)
+ /* If reading the status failed, assume that it's off. */
+ if (_regulator_is_enabled(rdev) <= 0)
goto unlock;
if (have_full_constraints()) {
diff --git a/drivers/regulator/da9121-regulator.c b/drivers/regulator/da9121-regulator.c
index 6f21223a488e..eb9df485bd8a 100644
--- a/drivers/regulator/da9121-regulator.c
+++ b/drivers/regulator/da9121-regulator.c
@@ -87,16 +87,16 @@ static struct da9121_range da9121_3A_1phase_current = {
};
static struct da9121_range da914x_40A_4phase_current = {
- .val_min = 14000000,
- .val_max = 80000000,
- .val_stp = 2000000,
+ .val_min = 26000000,
+ .val_max = 78000000,
+ .val_stp = 4000000,
.reg_min = 1,
.reg_max = 14,
};
static struct da9121_range da914x_20A_2phase_current = {
- .val_min = 7000000,
- .val_max = 40000000,
+ .val_min = 13000000,
+ .val_max = 39000000,
.val_stp = 2000000,
.reg_min = 1,
.reg_max = 14,
@@ -561,7 +561,7 @@ static const struct regulator_desc da9217_reg = {
};
#define DA914X_MIN_MV 500
-#define DA914X_MAX_MV 1000
+#define DA914X_MAX_MV 1300
#define DA914X_STEP_MV 10
#define DA914X_MIN_SEL (DA914X_MIN_MV / DA914X_STEP_MV)
#define DA914X_N_VOLTAGES (((DA914X_MAX_MV - DA914X_MIN_MV) / DA914X_STEP_MV) \
@@ -585,10 +585,6 @@ static const struct regulator_desc da9141_reg = {
.vsel_mask = DA9121_MASK_BUCK_BUCKx_5_CHx_A_VOUT,
.enable_reg = DA9121_REG_BUCK_BUCK1_0,
.enable_mask = DA9121_MASK_BUCK_BUCKx_0_CHx_EN,
- /* Default value of BUCK_BUCK1_0.CH1_SRC_DVC_UP */
- .ramp_delay = 20000,
- /* tBUCK_EN */
- .enable_time = 20,
};
static const struct regulator_desc da9142_reg = {
diff --git a/drivers/regulator/max20086-regulator.c b/drivers/regulator/max20086-regulator.c
index fbc56b043071..b8bf76c170fe 100644
--- a/drivers/regulator/max20086-regulator.c
+++ b/drivers/regulator/max20086-regulator.c
@@ -7,6 +7,7 @@
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/regmap.h>
@@ -140,7 +141,7 @@ static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on)
node = of_get_child_by_name(chip->dev->of_node, "regulators");
if (!node) {
dev_err(chip->dev, "regulators node not found\n");
- return PTR_ERR(node);
+ return -ENODEV;
}
for (i = 0; i < chip->info->num_outputs; ++i)
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index cd938a26b76c..3b1cd0c96a74 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1180,7 +1180,7 @@ static int io_subchannel_chp_event(struct subchannel *sch,
else
path_event[chpid] = PE_NONE;
}
- if (cdev)
+ if (cdev && cdev->drv && cdev->drv->path_event)
cdev->drv->path_event(cdev, path_event);
break;
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 9be273c320e2..a826456c6075 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -508,7 +508,8 @@ static int bnx2fc_l2_rcv_thread(void *arg)
static void bnx2fc_recv_frame(struct sk_buff *skb)
{
- u32 fr_len;
+ u64 crc_err;
+ u32 fr_len, fr_crc;
struct fc_lport *lport;
struct fcoe_rcv_info *fr;
struct fc_stats *stats;
@@ -542,6 +543,11 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
skb_pull(skb, sizeof(struct fcoe_hdr));
fr_len = skb->len - sizeof(struct fcoe_crc_eof);
+ stats = per_cpu_ptr(lport->stats, get_cpu());
+ stats->RxFrames++;
+ stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+ put_cpu();
+
fp = (struct fc_frame *)skb;
fc_frame_init(fp);
fr_dev(fp) = lport;
@@ -624,16 +630,15 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
return;
}
- stats = per_cpu_ptr(lport->stats, smp_processor_id());
- stats->RxFrames++;
- stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+ fr_crc = le32_to_cpu(fr_crc(fp));
- if (le32_to_cpu(fr_crc(fp)) !=
- ~crc32(~0, skb->data, fr_len)) {
- if (stats->InvalidCRCCount < 5)
+ if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) {
+ stats = per_cpu_ptr(lport->stats, get_cpu());
+ crc_err = (stats->InvalidCRCCount++);
+ put_cpu();
+ if (crc_err < 5)
printk(KERN_WARNING PFX "dropping frame with "
"CRC error\n");
- stats->InvalidCRCCount++;
kfree_skb(skb);
return;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 2f53a2ee024a..ebf5ec38891b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -400,8 +400,7 @@ void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot,
struct hisi_sas_dq *dq,
struct hisi_sas_device *sas_dev,
- struct hisi_sas_internal_abort *abort,
- struct hisi_sas_tmf_task *tmf)
+ struct hisi_sas_internal_abort *abort)
{
struct hisi_sas_cmd_hdr *cmd_hdr_base;
int dlvry_queue_slot, dlvry_queue;
@@ -427,8 +426,6 @@ void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
- slot->tmf = tmf;
- slot->is_internal = tmf;
task->lldd_task = slot;
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
@@ -587,7 +584,7 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
slot->is_internal = tmf;
/* protect task_prep and start_delivery sequence */
- hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, NULL, tmf);
+ hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, NULL);
return 0;
@@ -1380,12 +1377,13 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
int s = sizeof(struct host_to_dev_fis);
+ struct hisi_sas_tmf_task tmf = {};
ata_for_each_link(link, ap, EDGE) {
int pmp = sata_srst_pmp(link);
hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
- rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
+ rc = hisi_sas_exec_internal_tmf_task(device, fis, s, &tmf);
if (rc != TMF_RESP_FUNC_COMPLETE)
break;
}
@@ -1396,7 +1394,7 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
rc = hisi_sas_exec_internal_tmf_task(device, fis,
- s, NULL);
+ s, &tmf);
if (rc != TMF_RESP_FUNC_COMPLETE)
dev_err(dev, "ata disk %016llx de-reset failed\n",
SAS_ADDR(device->sas_addr));
@@ -2067,7 +2065,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
slot->port = port;
slot->is_internal = true;
- hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, abort, NULL);
+ hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, abort);
return 0;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 4878c94761f9..98cabe09c040 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -592,6 +592,7 @@ struct lpfc_vport {
#define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */
#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
#define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/
+#define FC_PT2PT_NO_NVME 0x1000 /* Don't send NVME PRLI */
#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
@@ -1161,6 +1162,16 @@ struct lpfc_hba {
uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
uint32_t cfg_enable_fc4_type;
+#define LPFC_ENABLE_FCP 1
+#define LPFC_ENABLE_NVME 2
+#define LPFC_ENABLE_BOTH 3
+#if (IS_ENABLED(CONFIG_NVME_FC))
+#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_BOTH
+#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_BOTH
+#else
+#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_FCP
+#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_FCP
+#endif
uint32_t cfg_aer_support;
uint32_t cfg_sriov_nr_virtfn;
uint32_t cfg_request_firmware_upgrade;
@@ -1182,9 +1193,6 @@ struct lpfc_hba {
uint32_t cfg_ras_fwlog_func;
uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */
uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */
-#define LPFC_ENABLE_FCP 1
-#define LPFC_ENABLE_NVME 2
-#define LPFC_ENABLE_BOTH 3
uint32_t cfg_enable_pbde;
uint32_t cfg_enable_mi;
struct nvmet_fc_target_port *targetport;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 7a7f17d71811..fa8415259cb8 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1315,6 +1315,9 @@ lpfc_issue_lip(struct Scsi_Host *shost)
pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
pmboxq->u.mb.mbxOwner = OWN_HOST;
+ if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME))
+ vport->fc_flag &= ~FC_PT2PT_NO_NVME;
+
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
if ((mbxstatus == MBX_SUCCESS) &&
@@ -3978,8 +3981,8 @@ LPFC_ATTR_R(nvmet_mrq_post,
* 3 - register both FCP and NVME
* Supported values are [1,3]. Default value is 3
*/
-LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
- LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
+LPFC_ATTR_R(enable_fc4_type, LPFC_DEF_ENBL_FC4_TYPE,
+ LPFC_ENABLE_FCP, LPFC_MAX_ENBL_FC4_TYPE,
"Enable FC4 Protocol support - FCP / NVME");
/*
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index db5ccae1b63d..f936833c9909 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1072,7 +1072,8 @@ stop_rr_fcf_flogi:
/* FLOGI failed, so there is no fabric */
spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP |
+ FC_PT2PT_NO_NVME);
spin_unlock_irq(shost->host_lock);
/* If private loop, then allow max outstanding els to be
@@ -4607,6 +4608,23 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Added for Vendor specifc support
* Just keep retrying for these Rsn / Exp codes
*/
+ if ((vport->fc_flag & FC_PT2PT) &&
+ cmd == ELS_CMD_NVMEPRLI) {
+ switch (stat.un.b.lsRjtRsnCode) {
+ case LSRJT_UNABLE_TPC:
+ case LSRJT_INVALID_CMD:
+ case LSRJT_LOGICAL_ERR:
+ case LSRJT_CMD_UNSUPPORTED:
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "0168 NVME PRLI LS_RJT "
+ "reason %x port doesn't "
+ "support NVME, disabling NVME\n",
+ stat.un.b.lsRjtRsnCode);
+ retry = 0;
+ vport->fc_flag |= FC_PT2PT_NO_NVME;
+ goto out_retry;
+ }
+ }
switch (stat.un.b.lsRjtRsnCode) {
case LSRJT_UNABLE_TPC:
/* The driver has a VALID PLOGI but the rport has
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index a56f01f659f8..558f7d2559c4 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2104,7 +2104,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
}
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3143 Port Down: Firmware Update "
"Detected\n");
en_rn_msg = false;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 7d717a4ac14d..fdf5e777bf11 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1961,8 +1961,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
* is configured try it.
*/
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
- if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+ if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) &&
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
+ vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
/* We need to update the localport also */
lpfc_nvme_update_localport(vport);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 1bc0db572d9e..430abebf99f1 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -13363,6 +13363,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
uint32_t uerr_sta_hi, uerr_sta_lo;
uint32_t if_type, portsmphr;
struct lpfc_register portstat_reg;
+ u32 logmask;
/*
* For now, use the SLI4 device internal unrecoverable error
@@ -13413,7 +13414,12 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
phba->work_status[1] =
readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ logmask = LOG_TRACE_EVENT;
+ if (phba->work_status[0] ==
+ SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
+ logmask = LOG_SLI;
+ lpfc_printf_log(phba, KERN_ERR, logmask,
"2885 Port Status Event: "
"port status reg 0x%x, "
"port smphr reg 0x%x, "
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index c814e5071712..9ec310b795c3 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -2692,7 +2692,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 tag = le32_to_cpu(psataPayload->tag);
u32 port_id = le32_to_cpu(psataPayload->port_id);
u32 dev_id = le32_to_cpu(psataPayload->device_id);
- unsigned long flags;
if (event)
pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event);
@@ -2724,8 +2723,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
- if (pm8001_dev)
- atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
@@ -2767,7 +2764,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2853,20 +2849,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_OPEN_TO;
break;
}
- spin_lock_irqsave(&t->task_state_lock, flags);
- t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
- t->task_state_flags |= SAS_TASK_STATE_DONE;
- if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_dbg(pm8001_ha, FAIL,
- "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
- t, event, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
- }
}
/*See the comments for mpi_ssp_completion */
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 160ee8b228c9..32edda3e55c6 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -769,8 +769,13 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
res = -TMF_RESP_FUNC_FAILED;
/* Even TMF timed out, return direct. */
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+ struct pm8001_ccb_info *ccb = task->lldd_task;
+
pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n",
tmf->tmf);
+
+ if (ccb)
+ ccb->task = NULL;
goto ex_err;
}
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 2530d1365556..9d20f8009b89 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -2185,9 +2185,9 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
if (t->slow_task)
complete(&t->slow_task->completion);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
@@ -2794,9 +2794,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
if (t->slow_task)
complete(&t->slow_task->completion);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
spin_unlock_irqrestore(&circularQ->oq_lock,
@@ -2821,7 +2821,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
u32 tag = le32_to_cpu(psataPayload->tag);
u32 port_id = le32_to_cpu(psataPayload->port_id);
u32 dev_id = le32_to_cpu(psataPayload->device_id);
- unsigned long flags;
if (event)
pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event);
@@ -2854,8 +2853,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
- if (pm8001_dev)
- atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
@@ -2904,11 +2901,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_QUEUE_FULL;
- spin_unlock_irqrestore(&circularQ->oq_lock,
- circularQ->lock_flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
- spin_lock_irqsave(&circularQ->oq_lock,
- circularQ->lock_flags);
return;
}
break;
@@ -3008,24 +3000,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
ts->stat = SAS_OPEN_TO;
break;
}
- spin_lock_irqsave(&t->task_state_lock, flags);
- t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
- t->task_state_flags |= SAS_TASK_STATE_DONE;
- if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_dbg(pm8001_ha, FAIL,
- "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
- t, event, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- spin_unlock_irqrestore(&circularQ->oq_lock,
- circularQ->lock_flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
- spin_lock_irqsave(&circularQ->oq_lock,
- circularQ->lock_flags);
- }
}
/*See the comments for mpi_ssp_completion */
@@ -3931,6 +3905,7 @@ static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha,
/**
* process_one_iomb - process one outbound Queue memory block
* @pm8001_ha: our hba card information
+ * @circularQ: outbound circular queue
* @piomb: IO message buffer
*/
static void process_one_iomb(struct pm8001_hba_info *pm8001_ha,
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 5916ed7662d5..4eb89aa4a39d 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -771,11 +771,10 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
qedi_cmd->list_tmf_work = NULL;
}
}
+ spin_unlock_bh(&qedi_conn->tmf_work_lock);
- if (!found) {
- spin_unlock_bh(&qedi_conn->tmf_work_lock);
+ if (!found)
goto check_cleanup_reqs;
- }
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
"TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
@@ -806,7 +805,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
qedi_cmd->state = CLEANUP_RECV;
unlock:
spin_unlock_bh(&conn->session->back_lock);
- spin_unlock_bh(&qedi_conn->tmf_work_lock);
wake_up_interruptible(&qedi_conn->wait_queue);
return;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 3520b9384428..f4e6c68ac99e 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -214,6 +214,48 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
SCSI_TIMEOUT, 3, NULL);
}
+static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
+ unsigned int depth)
+{
+ int new_shift = sbitmap_calculate_shift(depth);
+ bool need_alloc = !sdev->budget_map.map;
+ bool need_free = false;
+ int ret;
+ struct sbitmap sb_backup;
+
+ /*
+ * realloc if new shift is calculated, which is caused by setting
+ * up one new default queue depth after calling ->slave_configure
+ */
+ if (!need_alloc && new_shift != sdev->budget_map.shift)
+ need_alloc = need_free = true;
+
+ if (!need_alloc)
+ return 0;
+
+ /*
+ * Request queue has to be frozen for reallocating budget map,
+ * and here disk isn't added yet, so freezing is pretty fast
+ */
+ if (need_free) {
+ blk_mq_freeze_queue(sdev->request_queue);
+ sb_backup = sdev->budget_map;
+ }
+ ret = sbitmap_init_node(&sdev->budget_map,
+ scsi_device_max_queue_depth(sdev),
+ new_shift, GFP_KERNEL,
+ sdev->request_queue->node, false, true);
+ if (need_free) {
+ if (ret)
+ sdev->budget_map = sb_backup;
+ else
+ sbitmap_free(&sb_backup);
+ ret = 0;
+ blk_mq_unfreeze_queue(sdev->request_queue);
+ }
+ return ret;
+}
+
/**
* scsi_alloc_sdev - allocate and setup a scsi_Device
* @starget: which target to allocate a &scsi_device for
@@ -306,11 +348,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
* default device queue depth to figure out sbitmap shift
* since we use this queue depth most of times.
*/
- if (sbitmap_init_node(&sdev->budget_map,
- scsi_device_max_queue_depth(sdev),
- sbitmap_calculate_shift(depth),
- GFP_KERNEL, sdev->request_queue->node,
- false, true)) {
+ if (scsi_realloc_sdev_budget_map(sdev, depth)) {
put_device(&starget->dev);
kfree(sdev);
goto out;
@@ -1017,6 +1055,13 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
}
return SCSI_SCAN_NO_RESPONSE;
}
+
+ /*
+ * The queue_depth is often changed in ->slave_configure.
+ * Set up budget map again since memory consumption of
+ * the map depends on actual queue depth.
+ */
+ scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
}
if (sdev->scsi_level >= SCSI_3)
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 50b12d60dc1b..9349557b8a01 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -2681,7 +2681,7 @@ static int ufshcd_map_queues(struct Scsi_Host *shost)
break;
case HCTX_TYPE_READ:
map->nr_queues = 0;
- break;
+ continue;
default:
WARN_ON_ONCE(true);
}
diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
index 72771e018c42..258894ed234b 100644
--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
@@ -306,10 +306,9 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
}
lpc_ctrl->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(lpc_ctrl->clk)) {
- dev_err(dev, "couldn't get clock\n");
- return PTR_ERR(lpc_ctrl->clk);
- }
+ if (IS_ERR(lpc_ctrl->clk))
+ return dev_err_probe(dev, PTR_ERR(lpc_ctrl->clk),
+ "couldn't get clock\n");
rc = clk_prepare_enable(lpc_ctrl->clk);
if (rc) {
dev_err(dev, "couldn't enable clock\n");
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index 072473a16f4d..5ed2fc1c53a0 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -28,7 +28,6 @@ struct fsl_soc_die_attr {
static struct guts *guts;
static struct soc_device_attribute soc_dev_attr;
static struct soc_device *soc_dev;
-static struct device_node *root;
/* SoC die attribute definition for QorIQ platform */
@@ -138,7 +137,7 @@ static u32 fsl_guts_get_svr(void)
static int fsl_guts_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *root, *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
const struct fsl_soc_die_attr *soc_die;
const char *machine;
@@ -159,8 +158,14 @@ static int fsl_guts_probe(struct platform_device *pdev)
root = of_find_node_by_path("/");
if (of_property_read_string(root, "model", &machine))
of_property_read_string_index(root, "compatible", 0, &machine);
- if (machine)
- soc_dev_attr.machine = machine;
+ if (machine) {
+ soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
+ if (!soc_dev_attr.machine) {
+ of_node_put(root);
+ return -ENOMEM;
+ }
+ }
+ of_node_put(root);
svr = fsl_guts_get_svr();
soc_die = fsl_soc_die_match(svr, fsl_soc_die);
@@ -195,7 +200,6 @@ static int fsl_guts_probe(struct platform_device *pdev)
static int fsl_guts_remove(struct platform_device *dev)
{
soc_device_unregister(soc_dev);
- of_node_put(root);
return 0;
}
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 4d38c80f8be8..b3c226eb5292 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -147,7 +147,7 @@ EXPORT_SYMBOL(qe_issue_cmd);
* memory mapped space.
* The BRG clock is the QE clock divided by 2.
* It was set up long ago during the initial boot phase and is
- * is given to us.
+ * given to us.
* Baud rate clocks are zero-based in the driver code (as that maps
* to port numbers). Documentation uses 1-based numbering.
*/
@@ -421,7 +421,7 @@ static void qe_upload_microcode(const void *base,
for (i = 0; i < be32_to_cpu(ucode->count); i++)
iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
-
+
/* Set I-RAM Ready Register */
iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready);
}
diff --git a/drivers/soc/fsl/qe/qe_io.c b/drivers/soc/fsl/qe/qe_io.c
index e277c827bdf3..a5e2d0e5ab51 100644
--- a/drivers/soc/fsl/qe/qe_io.c
+++ b/drivers/soc/fsl/qe/qe_io.c
@@ -35,6 +35,8 @@ int par_io_init(struct device_node *np)
if (ret)
return ret;
par_io = ioremap(res.start, resource_size(&res));
+ if (!par_io)
+ return -ENOMEM;
if (!of_property_read_u32(np, "num-ports", &num_ports))
num_par_io_ports = num_ports;
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 3e59d479d001..3cb123016b3e 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -382,7 +382,8 @@ static int imx_pgc_power_down(struct generic_pm_domain *genpd)
return 0;
out_clk_disable:
- clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
+ if (!domain->keep_clocks)
+ clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
return ret;
}
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index 670cc82d17dc..ca75b14931ec 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -411,17 +411,12 @@ out:
return ret;
}
-static int init_clks(struct platform_device *pdev, struct clk **clk)
+static void init_clks(struct platform_device *pdev, struct clk **clk)
{
int i;
- for (i = CLK_NONE + 1; i < CLK_MAX; i++) {
+ for (i = CLK_NONE + 1; i < CLK_MAX; i++)
clk[i] = devm_clk_get(&pdev->dev, clk_names[i]);
- if (IS_ERR(clk[i]))
- return PTR_ERR(clk[i]);
- }
-
- return 0;
}
static struct scp *init_scp(struct platform_device *pdev,
@@ -431,7 +426,7 @@ static struct scp *init_scp(struct platform_device *pdev,
{
struct genpd_onecell_data *pd_data;
struct resource *res;
- int i, j, ret;
+ int i, j;
struct scp *scp;
struct clk *clk[CLK_MAX];
@@ -486,9 +481,7 @@ static struct scp *init_scp(struct platform_device *pdev,
pd_data->num_domains = num;
- ret = init_clks(pdev, clk);
- if (ret)
- return ERR_PTR(ret);
+ init_clks(pdev, clk);
for (i = 0; i < num; i++) {
struct scp_domain *scpd = &scp->domains[i];
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index a9f8b224322e..02e319508cc6 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -31,7 +31,7 @@ config EXYNOS_USI
help
Enable support for USI block. USI (Universal Serial Interface) is an
IP-core found in modern Samsung Exynos SoCs, like Exynos850 and
- ExynosAutoV0. USI block can be configured to provide one of the
+ ExynosAutoV9. USI block can be configured to provide one of the
following serial protocols: UART, SPI or High Speed I2C.
This driver allows one to configure USI for desired protocol, which
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index c9a769b8594b..86c76211b3d3 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -585,7 +585,7 @@ static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
u32 rd = 0;
u32 wr = 0;
- if (qspi->base[CHIP_SELECT]) {
+ if (cs >= 0 && qspi->base[CHIP_SELECT]) {
rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
wr = (rd & ~0xff) | (1 << cs);
if (rd == wr)
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index c208efeadd18..0bc7daa7afc8 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -693,6 +693,11 @@ static int meson_spicc_probe(struct platform_device *pdev)
writel_relaxed(0, spicc->base + SPICC_INTREG);
irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto out_master;
+ }
+
ret = devm_request_irq(&pdev->dev, irq, meson_spicc_irq,
0, NULL, spicc);
if (ret) {
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index a15de10ee286..753bd313e6fd 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -624,7 +624,7 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
else
mdata->state = MTK_SPI_IDLE;
- if (!master->can_dma(master, master->cur_msg->spi, trans)) {
+ if (!master->can_dma(master, NULL, trans)) {
if (trans->rx_buf) {
cnt = mdata->xfer_len / 4;
ioread32_rep(mdata->base + SPI_RX_DATA_REG,
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 553b6b9d0222..c6a1bb09be05 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -585,6 +585,12 @@ static int rockchip_spi_slave_abort(struct spi_controller *ctlr)
{
struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
+ if (atomic_read(&rs->state) & RXDMA)
+ dmaengine_terminate_sync(ctlr->dma_rx);
+ if (atomic_read(&rs->state) & TXDMA)
+ dmaengine_terminate_sync(ctlr->dma_tx);
+ atomic_set(&rs->state, 0);
+ spi_enable_chip(rs, false);
rs->slave_abort = true;
spi_finalize_current_transfer(ctlr);
@@ -654,7 +660,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
struct spi_controller *ctlr;
struct resource *mem;
struct device_node *np = pdev->dev.of_node;
- u32 rsd_nsecs;
+ u32 rsd_nsecs, num_cs;
bool slave_mode;
slave_mode = of_property_read_bool(np, "spi-slave");
@@ -764,8 +770,9 @@ static int rockchip_spi_probe(struct platform_device *pdev)
* rk spi0 has two native cs, spi1..5 one cs only
* if num-cs is missing in the dts, default to 1
*/
- if (of_property_read_u16(np, "num-cs", &ctlr->num_chipselect))
- ctlr->num_chipselect = 1;
+ if (of_property_read_u32(np, "num-cs", &num_cs))
+ num_cs = 1;
+ ctlr->num_chipselect = num_cs;
ctlr->use_gpio_descriptors = true;
}
ctlr->dev.of_node = pdev->dev.of_node;
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 514337c86d2c..ffdc55f87e82 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -688,7 +688,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
struct resource *res;
int ret, irq;
- ctrl = spi_alloc_master(dev, sizeof(*qspi));
+ ctrl = devm_spi_alloc_master(dev, sizeof(*qspi));
if (!ctrl)
return -ENOMEM;
@@ -697,58 +697,46 @@ static int stm32_qspi_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi");
qspi->io_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(qspi->io_base)) {
- ret = PTR_ERR(qspi->io_base);
- goto err_master_put;
- }
+ if (IS_ERR(qspi->io_base))
+ return PTR_ERR(qspi->io_base);
qspi->phys_base = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm");
qspi->mm_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(qspi->mm_base)) {
- ret = PTR_ERR(qspi->mm_base);
- goto err_master_put;
- }
+ if (IS_ERR(qspi->mm_base))
+ return PTR_ERR(qspi->mm_base);
qspi->mm_size = resource_size(res);
- if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ) {
- ret = -EINVAL;
- goto err_master_put;
- }
+ if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ)
+ return -EINVAL;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto err_master_put;
- }
+ if (irq < 0)
+ return irq;
ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
dev_name(dev), qspi);
if (ret) {
dev_err(dev, "failed to request irq\n");
- goto err_master_put;
+ return ret;
}
init_completion(&qspi->data_completion);
init_completion(&qspi->match_completion);
qspi->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(qspi->clk)) {
- ret = PTR_ERR(qspi->clk);
- goto err_master_put;
- }
+ if (IS_ERR(qspi->clk))
+ return PTR_ERR(qspi->clk);
qspi->clk_rate = clk_get_rate(qspi->clk);
- if (!qspi->clk_rate) {
- ret = -EINVAL;
- goto err_master_put;
- }
+ if (!qspi->clk_rate)
+ return -EINVAL;
ret = clk_prepare_enable(qspi->clk);
if (ret) {
dev_err(dev, "can not enable the clock\n");
- goto err_master_put;
+ return ret;
}
rstc = devm_reset_control_get_exclusive(dev, NULL);
@@ -784,7 +772,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_noresume(dev);
- ret = devm_spi_register_master(dev, ctrl);
+ ret = spi_register_master(ctrl);
if (ret)
goto err_pm_runtime_free;
@@ -806,8 +794,6 @@ err_dma_free:
stm32_qspi_dma_free(qspi);
err_clk_disable:
clk_disable_unprepare(qspi->clk);
-err_master_put:
- spi_master_put(qspi->ctrl);
return ret;
}
@@ -817,6 +803,7 @@ static int stm32_qspi_remove(struct platform_device *pdev)
struct stm32_qspi *qspi = platform_get_drvdata(pdev);
pm_runtime_get_sync(qspi->dev);
+ spi_unregister_master(qspi->ctrl);
/* disable qspi */
writel_relaxed(0, qspi->io_base + QSPI_CR);
stm32_qspi_dma_free(qspi);
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 9bd3fd1652f7..7fc24505a72c 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -221,7 +221,6 @@ struct stm32_spi;
* time between frames (if driver has this functionality)
* @set_number_of_data: optional routine to configure registers to desired
* number of data (if driver has this functionality)
- * @can_dma: routine to determine if the transfer is eligible for DMA use
* @transfer_one_dma_start: routine to start transfer a single spi_transfer
* using DMA
* @dma_rx_cb: routine to call after DMA RX channel operation is complete
@@ -232,7 +231,7 @@ struct stm32_spi;
* @baud_rate_div_min: minimum baud rate divisor
* @baud_rate_div_max: maximum baud rate divisor
* @has_fifo: boolean to know if fifo is used for driver
- * @has_startbit: boolean to know if start bit is used to start transfer
+ * @flags: compatible specific SPI controller flags used at registration time
*/
struct stm32_spi_cfg {
const struct stm32_spi_regspec *regs;
@@ -253,6 +252,7 @@ struct stm32_spi_cfg {
unsigned int baud_rate_div_min;
unsigned int baud_rate_div_max;
bool has_fifo;
+ u16 flags;
};
/**
@@ -1722,6 +1722,7 @@ static const struct stm32_spi_cfg stm32f4_spi_cfg = {
.baud_rate_div_min = STM32F4_SPI_BR_DIV_MIN,
.baud_rate_div_max = STM32F4_SPI_BR_DIV_MAX,
.has_fifo = false,
+ .flags = SPI_MASTER_MUST_TX,
};
static const struct stm32_spi_cfg stm32h7_spi_cfg = {
@@ -1854,7 +1855,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
master->prepare_message = stm32_spi_prepare_msg;
master->transfer_one = stm32_spi_transfer_one;
master->unprepare_message = stm32_spi_unprepare_msg;
- master->flags = SPI_MASTER_MUST_TX;
+ master->flags = spi->cfg->flags;
spi->dma_tx = dma_request_chan(spi->dev, "tx");
if (IS_ERR(spi->dma_tx)) {
diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c
index 342ee8d2c476..cc0da4822231 100644
--- a/drivers/spi/spi-uniphier.c
+++ b/drivers/spi/spi-uniphier.c
@@ -726,7 +726,7 @@ static int uniphier_spi_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n",
ret);
- goto out_disable_clk;
+ goto out_release_dma;
}
dma_tx_burst = caps.max_burst;
}
@@ -735,7 +735,7 @@ static int uniphier_spi_probe(struct platform_device *pdev)
if (IS_ERR_OR_NULL(master->dma_rx)) {
if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
- goto out_disable_clk;
+ goto out_release_dma;
}
master->dma_rx = NULL;
dma_rx_burst = INT_MAX;
@@ -744,7 +744,7 @@ static int uniphier_spi_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n",
ret);
- goto out_disable_clk;
+ goto out_release_dma;
}
dma_rx_burst = caps.max_burst;
}
@@ -753,10 +753,20 @@ static int uniphier_spi_probe(struct platform_device *pdev)
ret = devm_spi_register_master(&pdev->dev, master);
if (ret)
- goto out_disable_clk;
+ goto out_release_dma;
return 0;
+out_release_dma:
+ if (!IS_ERR_OR_NULL(master->dma_rx)) {
+ dma_release_channel(master->dma_rx);
+ master->dma_rx = NULL;
+ }
+ if (!IS_ERR_OR_NULL(master->dma_tx)) {
+ dma_release_channel(master->dma_tx);
+ master->dma_tx = NULL;
+ }
+
out_disable_clk:
clk_disable_unprepare(priv->clk);
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index cfa222c9bd5e..78f31b61a2aa 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -570,6 +570,9 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
if (op->dummy.nbytes) {
tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL);
+ if (!tmpbuf)
+ return -ENOMEM;
+
memset(tmpbuf, 0xff, op->dummy.nbytes);
reinit_completion(&xqspi->data_completion);
xqspi->txbuf = tmpbuf;
diff --git a/drivers/staging/fbtft/fb_st7789v.c b/drivers/staging/fbtft/fb_st7789v.c
index abe9395a0aef..861a154144e6 100644
--- a/drivers/staging/fbtft/fb_st7789v.c
+++ b/drivers/staging/fbtft/fb_st7789v.c
@@ -144,6 +144,8 @@ static int init_display(struct fbtft_par *par)
{
int rc;
+ par->fbtftops.reset(par);
+
rc = init_tearing_effect_line(par);
if (rc)
return rc;
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 4cdec34e23d2..55677efc0138 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -334,7 +334,10 @@ static int __init fbtft_driver_module_init(void) \
ret = spi_register_driver(&fbtft_driver_spi_driver); \
if (ret < 0) \
return ret; \
- return platform_driver_register(&fbtft_driver_platform_driver); \
+ ret = platform_driver_register(&fbtft_driver_platform_driver); \
+ if (ret < 0) \
+ spi_unregister_driver(&fbtft_driver_spi_driver); \
+ return ret; \
} \
\
static void __exit fbtft_driver_module_exit(void) \
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 6759a6261500..3a2e4582db8e 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -1058,15 +1058,27 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ rcu_read_lock();
service = handle_to_service(handle);
- if (WARN_ON(!service))
+ if (WARN_ON(!service)) {
+ rcu_read_unlock();
return VCHIQ_SUCCESS;
+ }
user_service = (struct user_service *)service->base.userdata;
instance = user_service->instance;
- if (!instance || instance->closing)
+ if (!instance || instance->closing) {
+ rcu_read_unlock();
return VCHIQ_SUCCESS;
+ }
+
+ /*
+ * As hopping around different synchronization mechanism,
+ * taking an extra reference results in simpler implementation.
+ */
+ vchiq_service_get(service);
+ rcu_read_unlock();
vchiq_log_trace(vchiq_arm_log_level,
"%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
@@ -1097,6 +1109,7 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
bulk_userdata);
if (status != VCHIQ_SUCCESS) {
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ vchiq_service_put(service);
return status;
}
}
@@ -1105,10 +1118,12 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
if (wait_for_completion_interruptible(&user_service->remove_event)) {
vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ vchiq_service_put(service);
return VCHIQ_RETRY;
} else if (instance->closing) {
vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ vchiq_service_put(service);
return VCHIQ_ERROR;
}
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
@@ -1137,6 +1152,7 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
header = NULL;
}
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ vchiq_service_put(service);
if (skip_completion)
return VCHIQ_SUCCESS;
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 1ca320885fad..17a6f51d3089 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -158,6 +158,7 @@ void optee_remove_common(struct optee *optee)
optee_unregister_devices();
optee_notif_uninit(optee);
+ teedev_close_context(optee->ctx);
/*
* The two devices have to be unregistered before we can free the
* other resources.
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
index 20a1b1a3d965..f744ab15bf2c 100644
--- a/drivers/tee/optee/ffa_abi.c
+++ b/drivers/tee/optee/ffa_abi.c
@@ -424,6 +424,7 @@ static struct tee_shm_pool_mgr *optee_ffa_shm_pool_alloc_pages(void)
*/
static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
+ struct optee *optee,
struct optee_msg_arg *arg)
{
struct tee_shm *shm;
@@ -439,7 +440,7 @@ static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
shm = optee_rpc_cmd_alloc_suppl(ctx, arg->params[0].u.value.b);
break;
case OPTEE_RPC_SHM_TYPE_KERNEL:
- shm = tee_shm_alloc(ctx, arg->params[0].u.value.b,
+ shm = tee_shm_alloc(optee->ctx, arg->params[0].u.value.b,
TEE_SHM_MAPPED | TEE_SHM_PRIV);
break;
default:
@@ -493,14 +494,13 @@ err_bad_param:
}
static void handle_ffa_rpc_func_cmd(struct tee_context *ctx,
+ struct optee *optee,
struct optee_msg_arg *arg)
{
- struct optee *optee = tee_get_drvdata(ctx->teedev);
-
arg->ret_origin = TEEC_ORIGIN_COMMS;
switch (arg->cmd) {
case OPTEE_RPC_CMD_SHM_ALLOC:
- handle_ffa_rpc_func_cmd_shm_alloc(ctx, arg);
+ handle_ffa_rpc_func_cmd_shm_alloc(ctx, optee, arg);
break;
case OPTEE_RPC_CMD_SHM_FREE:
handle_ffa_rpc_func_cmd_shm_free(ctx, optee, arg);
@@ -510,12 +510,12 @@ static void handle_ffa_rpc_func_cmd(struct tee_context *ctx,
}
}
-static void optee_handle_ffa_rpc(struct tee_context *ctx, u32 cmd,
- struct optee_msg_arg *arg)
+static void optee_handle_ffa_rpc(struct tee_context *ctx, struct optee *optee,
+ u32 cmd, struct optee_msg_arg *arg)
{
switch (cmd) {
case OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD:
- handle_ffa_rpc_func_cmd(ctx, arg);
+ handle_ffa_rpc_func_cmd(ctx, optee, arg);
break;
case OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT:
/* Interrupt delivered by now */
@@ -582,7 +582,7 @@ static int optee_ffa_yielding_call(struct tee_context *ctx,
* above.
*/
cond_resched();
- optee_handle_ffa_rpc(ctx, data->data1, rpc_arg);
+ optee_handle_ffa_rpc(ctx, optee, data->data1, rpc_arg);
cmd = OPTEE_FFA_YIELDING_CALL_RESUME;
data->data0 = cmd;
data->data1 = 0;
@@ -619,9 +619,18 @@ static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
.data2 = (u32)(shm->sec_world_id >> 32),
.data3 = shm->offset,
};
- struct optee_msg_arg *arg = tee_shm_get_va(shm, 0);
- unsigned int rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
- struct optee_msg_arg *rpc_arg = tee_shm_get_va(shm, rpc_arg_offs);
+ struct optee_msg_arg *arg;
+ unsigned int rpc_arg_offs;
+ struct optee_msg_arg *rpc_arg;
+
+ arg = tee_shm_get_va(shm, 0);
+ if (IS_ERR(arg))
+ return PTR_ERR(arg);
+
+ rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
+ rpc_arg = tee_shm_get_va(shm, rpc_arg_offs);
+ if (IS_ERR(rpc_arg))
+ return PTR_ERR(rpc_arg);
return optee_ffa_yielding_call(ctx, &data, rpc_arg);
}
@@ -793,7 +802,9 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
{
const struct ffa_dev_ops *ffa_ops;
unsigned int rpc_arg_count;
+ struct tee_shm_pool *pool;
struct tee_device *teedev;
+ struct tee_context *ctx;
struct optee *optee;
int rc;
@@ -813,12 +824,12 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
if (!optee)
return -ENOMEM;
- optee->pool = optee_ffa_config_dyn_shm();
- if (IS_ERR(optee->pool)) {
- rc = PTR_ERR(optee->pool);
- optee->pool = NULL;
- goto err;
+ pool = optee_ffa_config_dyn_shm();
+ if (IS_ERR(pool)) {
+ rc = PTR_ERR(pool);
+ goto err_free_optee;
}
+ optee->pool = pool;
optee->ops = &optee_ffa_ops;
optee->ffa.ffa_dev = ffa_dev;
@@ -829,7 +840,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
optee);
if (IS_ERR(teedev)) {
rc = PTR_ERR(teedev);
- goto err;
+ goto err_free_pool;
}
optee->teedev = teedev;
@@ -837,50 +848,59 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
optee);
if (IS_ERR(teedev)) {
rc = PTR_ERR(teedev);
- goto err;
+ goto err_unreg_teedev;
}
optee->supp_teedev = teedev;
rc = tee_device_register(optee->teedev);
if (rc)
- goto err;
+ goto err_unreg_supp_teedev;
rc = tee_device_register(optee->supp_teedev);
if (rc)
- goto err;
+ goto err_unreg_supp_teedev;
rc = rhashtable_init(&optee->ffa.global_ids, &shm_rhash_params);
if (rc)
- goto err;
+ goto err_unreg_supp_teedev;
mutex_init(&optee->ffa.mutex);
mutex_init(&optee->call_queue.mutex);
INIT_LIST_HEAD(&optee->call_queue.waiters);
optee_supp_init(&optee->supp);
ffa_dev_set_drvdata(ffa_dev, optee);
- rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE);
- if (rc) {
- optee_ffa_remove(ffa_dev);
- return rc;
+ ctx = teedev_open(optee->teedev);
+ if (IS_ERR(ctx)) {
+ rc = PTR_ERR(ctx);
+ goto err_rhashtable_free;
}
+ optee->ctx = ctx;
+ rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE);
+ if (rc)
+ goto err_close_ctx;
rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
- if (rc) {
- optee_ffa_remove(ffa_dev);
- return rc;
- }
+ if (rc)
+ goto err_unregister_devices;
pr_info("initialized driver\n");
return 0;
-err:
- /*
- * tee_device_unregister() is safe to call even if the
- * devices hasn't been registered with
- * tee_device_register() yet.
- */
+
+err_unregister_devices:
+ optee_unregister_devices();
+ optee_notif_uninit(optee);
+err_close_ctx:
+ teedev_close_context(ctx);
+err_rhashtable_free:
+ rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL);
+ optee_supp_uninit(&optee->supp);
+ mutex_destroy(&optee->call_queue.mutex);
+err_unreg_supp_teedev:
tee_device_unregister(optee->supp_teedev);
+err_unreg_teedev:
tee_device_unregister(optee->teedev);
- if (optee->pool)
- tee_shm_pool_free(optee->pool);
+err_free_pool:
+ tee_shm_pool_free(pool);
+err_free_optee:
kfree(optee);
return rc;
}
diff --git a/drivers/tee/optee/notif.c b/drivers/tee/optee/notif.c
index a28fa03dcd0e..05212842b0a5 100644
--- a/drivers/tee/optee/notif.c
+++ b/drivers/tee/optee/notif.c
@@ -121,5 +121,5 @@ int optee_notif_init(struct optee *optee, u_int max_key)
void optee_notif_uninit(struct optee *optee)
{
- kfree(optee->notif.bitmap);
+ bitmap_free(optee->notif.bitmap);
}
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index 46f74ab07c7e..92bc47bef95f 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -53,7 +53,6 @@ struct optee_call_queue {
struct optee_notif {
u_int max_key;
- struct tee_context *ctx;
/* Serializes access to the elements below in this struct */
spinlock_t lock;
struct list_head db;
@@ -134,9 +133,10 @@ struct optee_ops {
/**
* struct optee - main service struct
* @supp_teedev: supplicant device
+ * @teedev: client device
* @ops: internal callbacks for different ways to reach secure
* world
- * @teedev: client device
+ * @ctx: driver internal TEE context
* @smc: specific to SMC ABI
* @ffa: specific to FF-A ABI
* @call_queue: queue of threads waiting to call @invoke_fn
@@ -152,6 +152,7 @@ struct optee {
struct tee_device *supp_teedev;
struct tee_device *teedev;
const struct optee_ops *ops;
+ struct tee_context *ctx;
union {
struct optee_smc smc;
struct optee_ffa ffa;
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index 449d6a72d289..c517d310249f 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -75,16 +75,6 @@ static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr,
p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
p->u.memref.shm = shm;
- /* Check that the memref is covered by the shm object */
- if (p->u.memref.size) {
- size_t o = p->u.memref.shm_offs +
- p->u.memref.size - 1;
-
- rc = tee_shm_get_pa(shm, o, NULL);
- if (rc)
- return rc;
- }
-
return 0;
}
@@ -622,6 +612,7 @@ static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
}
static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
+ struct optee *optee,
struct optee_msg_arg *arg,
struct optee_call_ctx *call_ctx)
{
@@ -651,7 +642,8 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
break;
case OPTEE_RPC_SHM_TYPE_KERNEL:
- shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
+ shm = tee_shm_alloc(optee->ctx, sz,
+ TEE_SHM_MAPPED | TEE_SHM_PRIV);
break;
default:
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
@@ -747,7 +739,7 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
switch (arg->cmd) {
case OPTEE_RPC_CMD_SHM_ALLOC:
free_pages_list(call_ctx);
- handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
+ handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx);
break;
case OPTEE_RPC_CMD_SHM_FREE:
handle_rpc_func_cmd_shm_free(ctx, arg);
@@ -776,7 +768,7 @@ static void optee_handle_rpc(struct tee_context *ctx,
switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
case OPTEE_SMC_RPC_FUNC_ALLOC:
- shm = tee_shm_alloc(ctx, param->a1,
+ shm = tee_shm_alloc(optee->ctx, param->a1,
TEE_SHM_MAPPED | TEE_SHM_PRIV);
if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
reg_pair_from_64(&param->a1, &param->a2, pa);
@@ -954,57 +946,34 @@ static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
{
struct optee *optee = dev_id;
- optee_smc_do_bottom_half(optee->notif.ctx);
+ optee_smc_do_bottom_half(optee->ctx);
return IRQ_HANDLED;
}
static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
{
- struct tee_context *ctx;
int rc;
- ctx = teedev_open(optee->teedev);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- optee->notif.ctx = ctx;
rc = request_threaded_irq(irq, notif_irq_handler,
notif_irq_thread_fn,
0, "optee_notification", optee);
if (rc)
- goto err_close_ctx;
+ return rc;
optee->smc.notif_irq = irq;
return 0;
-
-err_close_ctx:
- teedev_close_context(optee->notif.ctx);
- optee->notif.ctx = NULL;
-
- return rc;
}
static void optee_smc_notif_uninit_irq(struct optee *optee)
{
- if (optee->notif.ctx) {
- optee_smc_stop_async_notif(optee->notif.ctx);
+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
+ optee_smc_stop_async_notif(optee->ctx);
if (optee->smc.notif_irq) {
free_irq(optee->smc.notif_irq, optee);
irq_dispose_mapping(optee->smc.notif_irq);
}
-
- /*
- * The thread normally working with optee->notif.ctx was
- * stopped with free_irq() above.
- *
- * Note we're not using teedev_close_context() or
- * tee_client_close_context() since we have already called
- * tee_device_put() while initializing to avoid a circular
- * reference counting.
- */
- teedev_close_context(optee->notif.ctx);
}
}
@@ -1366,6 +1335,7 @@ static int optee_probe(struct platform_device *pdev)
struct optee *optee = NULL;
void *memremaped_shm = NULL;
struct tee_device *teedev;
+ struct tee_context *ctx;
u32 max_notif_value;
u32 sec_caps;
int rc;
@@ -1446,9 +1416,15 @@ static int optee_probe(struct platform_device *pdev)
optee->pool = pool;
platform_set_drvdata(pdev, optee);
+ ctx = teedev_open(optee->teedev);
+ if (IS_ERR(ctx)) {
+ rc = PTR_ERR(ctx);
+ goto err_supp_uninit;
+ }
+ optee->ctx = ctx;
rc = optee_notif_init(optee, max_notif_value);
if (rc)
- goto err_supp_uninit;
+ goto err_close_ctx;
if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
unsigned int irq;
@@ -1496,6 +1472,8 @@ err_disable_shm_cache:
optee_unregister_devices();
err_notif_uninit:
optee_notif_uninit(optee);
+err_close_ctx:
+ teedev_close_context(ctx);
err_supp_uninit:
optee_supp_uninit(&optee->supp);
mutex_destroy(&optee->call_queue.mutex);
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 72acb1f61849..4f478812cb51 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -404,6 +404,10 @@ static void int3400_notify(acpi_handle handle,
thermal_prop[3] = kasprintf(GFP_KERNEL, "EVENT=%d", therm_event);
thermal_prop[4] = NULL;
kobject_uevent_env(&priv->thermal->device.kobj, KOBJ_CHANGE, thermal_prop);
+ kfree(thermal_prop[0]);
+ kfree(thermal_prop[1]);
+ kfree(thermal_prop[2]);
+ kfree(thermal_prop[3]);
}
static int int3400_thermal_get_temp(struct thermal_zone_device *thermal,
diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c
index a16dd4d5d710..73e68cce292e 100644
--- a/drivers/thermal/thermal_netlink.c
+++ b/drivers/thermal/thermal_netlink.c
@@ -419,11 +419,12 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p)
for (i = 0; i < tz->trips; i++) {
enum thermal_trip_type type;
- int temp, hyst;
+ int temp, hyst = 0;
tz->ops->get_trip_type(tz, i, &type);
tz->ops->get_trip_temp(tz, i, &temp);
- tz->ops->get_trip_hyst(tz, i, &hyst);
+ if (tz->ops->get_trip_hyst)
+ tz->ops->get_trip_hyst(tz, i, &hyst);
if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, i) ||
nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, type) ||
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 0b1808e3a912..fa92f727fdf8 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -439,7 +439,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
modembits |= MDM_RTR;
if (dlci->modem_tx & TIOCM_RI)
modembits |= MDM_IC;
- if (dlci->modem_tx & TIOCM_CD)
+ if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator)
modembits |= MDM_DV;
return modembits;
}
@@ -448,7 +448,7 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
* gsm_print_packet - display a frame for debug
* @hdr: header to print before decode
* @addr: address EA from the frame
- * @cr: C/R bit from the frame
+ * @cr: C/R bit seen as initiator
* @control: control including PF bit
* @data: following data bytes
* @dlen: length of data
@@ -548,7 +548,7 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
* gsm_send - send a control frame
* @gsm: our GSM mux
* @addr: address for control frame
- * @cr: command/response bit
+ * @cr: command/response bit seen as initiator
* @control: control byte including PF bit
*
* Format up and transmit a control frame. These do not go via the
@@ -563,11 +563,15 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
int len;
u8 cbuf[10];
u8 ibuf[3];
+ int ocr;
+
+ /* toggle C/R coding if not initiator */
+ ocr = cr ^ (gsm->initiator ? 0 : 1);
switch (gsm->encoding) {
case 0:
cbuf[0] = GSM0_SOF;
- cbuf[1] = (addr << 2) | (cr << 1) | EA;
+ cbuf[1] = (addr << 2) | (ocr << 1) | EA;
cbuf[2] = control;
cbuf[3] = EA; /* Length of data = 0 */
cbuf[4] = 0xFF - gsm_fcs_add_block(INIT_FCS, cbuf + 1, 3);
@@ -577,7 +581,7 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
case 1:
case 2:
/* Control frame + packing (but not frame stuffing) in mode 1 */
- ibuf[0] = (addr << 2) | (cr << 1) | EA;
+ ibuf[0] = (addr << 2) | (ocr << 1) | EA;
ibuf[1] = control;
ibuf[2] = 0xFF - gsm_fcs_add_block(INIT_FCS, ibuf, 2);
/* Stuffing may double the size worst case */
@@ -611,7 +615,7 @@ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
static inline void gsm_response(struct gsm_mux *gsm, int addr, int control)
{
- gsm_send(gsm, addr, 1, control);
+ gsm_send(gsm, addr, 0, control);
}
/**
@@ -1017,25 +1021,25 @@ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, const u8 *data,
* @tty: virtual tty bound to the DLCI
* @dlci: DLCI to affect
* @modem: modem bits (full EA)
- * @clen: command length
+ * @slen: number of signal octets
*
* Used when a modem control message or line state inline in adaption
* layer 2 is processed. Sort out the local modem state and throttles
*/
static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
- u32 modem, int clen)
+ u32 modem, int slen)
{
int mlines = 0;
u8 brk = 0;
int fc;
- /* The modem status command can either contain one octet (v.24 signals)
- or two octets (v.24 signals + break signals). The length field will
- either be 2 or 3 respectively. This is specified in section
- 5.4.6.3.7 of the 27.010 mux spec. */
+ /* The modem status command can either contain one octet (V.24 signals)
+ * or two octets (V.24 signals + break signals). This is specified in
+ * section 5.4.6.3.7 of the 07.10 mux spec.
+ */
- if (clen == 2)
+ if (slen == 1)
modem = modem & 0x7f;
else {
brk = modem & 0x7f;
@@ -1092,6 +1096,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
unsigned int brk = 0;
struct gsm_dlci *dlci;
int len = clen;
+ int slen;
const u8 *dp = data;
struct tty_struct *tty;
@@ -1111,6 +1116,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
return;
dlci = gsm->dlci[addr];
+ slen = len;
while (gsm_read_ea(&modem, *dp++) == 0) {
len--;
if (len == 0)
@@ -1127,7 +1133,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
modem |= (brk & 0x7f);
}
tty = tty_port_tty_get(&dlci->port);
- gsm_process_modem(tty, dlci, modem, clen);
+ gsm_process_modem(tty, dlci, modem, slen);
if (tty) {
tty_wakeup(tty);
tty_kref_put(tty);
@@ -1451,6 +1457,9 @@ static void gsm_dlci_close(struct gsm_dlci *dlci)
if (dlci->addr != 0) {
tty_port_tty_hangup(&dlci->port, false);
kfifo_reset(&dlci->fifo);
+ /* Ensure that gsmtty_open() can return. */
+ tty_port_set_initialized(&dlci->port, 0);
+ wake_up_interruptible(&dlci->port.open_wait);
} else
dlci->gsm->dead = true;
/* Unregister gsmtty driver,report gsmtty dev remove uevent for user */
@@ -1514,7 +1523,7 @@ static void gsm_dlci_t1(struct timer_list *t)
dlci->mode = DLCI_MODE_ADM;
gsm_dlci_open(dlci);
} else {
- gsm_dlci_close(dlci);
+ gsm_dlci_begin_close(dlci); /* prevent half open link */
}
break;
@@ -1593,6 +1602,7 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
struct tty_struct *tty;
unsigned int modem = 0;
int len = clen;
+ int slen = 0;
if (debug & 16)
pr_debug("%d bytes for tty\n", len);
@@ -1605,12 +1615,14 @@ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
case 2: /* Asynchronous serial with line state in each frame */
while (gsm_read_ea(&modem, *data++) == 0) {
len--;
+ slen++;
if (len == 0)
return;
}
+ slen++;
tty = tty_port_tty_get(port);
if (tty) {
- gsm_process_modem(tty, dlci, modem, clen);
+ gsm_process_modem(tty, dlci, modem, slen);
tty_kref_put(tty);
}
fallthrough;
@@ -1748,7 +1760,12 @@ static void gsm_dlci_release(struct gsm_dlci *dlci)
gsm_destroy_network(dlci);
mutex_unlock(&dlci->mutex);
- tty_hangup(tty);
+ /* We cannot use tty_hangup() because in tty_kref_put() the tty
+ * driver assumes that the hangup queue is free and reuses it to
+ * queue release_one_tty() -> NULL pointer panic in
+ * process_one_work().
+ */
+ tty_vhangup(tty);
tty_port_tty_set(&dlci->port, NULL);
tty_kref_put(tty);
@@ -1800,10 +1817,10 @@ static void gsm_queue(struct gsm_mux *gsm)
goto invalid;
cr = gsm->address & 1; /* C/R bit */
+ cr ^= gsm->initiator ? 0 : 1; /* Flip so 1 always means command */
gsm_print_packet("<--", address, cr, gsm->control, gsm->buf, gsm->len);
- cr ^= 1 - gsm->initiator; /* Flip so 1 always means command */
dlci = gsm->dlci[address];
switch (gsm->control) {
@@ -3234,9 +3251,9 @@ static void gsmtty_throttle(struct tty_struct *tty)
if (dlci->state == DLCI_CLOSED)
return;
if (C_CRTSCTS(tty))
- dlci->modem_tx &= ~TIOCM_DTR;
+ dlci->modem_tx &= ~TIOCM_RTS;
dlci->throttled = true;
- /* Send an MSC with DTR cleared */
+ /* Send an MSC with RTS cleared */
gsmtty_modem_update(dlci, 0);
}
@@ -3246,9 +3263,9 @@ static void gsmtty_unthrottle(struct tty_struct *tty)
if (dlci->state == DLCI_CLOSED)
return;
if (C_CRTSCTS(tty))
- dlci->modem_tx |= TIOCM_DTR;
+ dlci->modem_tx |= TIOCM_RTS;
dlci->throttled = false;
- /* Send an MSC with DTR set */
+ /* Send an MSC with RTS set */
gsmtty_modem_update(dlci, 0);
}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 8933ef1f83c0..efc72104c840 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1329,7 +1329,7 @@ handle_newline:
put_tty_queue(c, ldata);
smp_store_release(&ldata->canon_head, ldata->read_head);
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
- wake_up_interruptible_poll(&tty->read_wait, EPOLLIN);
+ wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM);
return;
}
}
@@ -1561,7 +1561,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
if (read_cnt(ldata)) {
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
- wake_up_interruptible_poll(&tty->read_wait, EPOLLIN);
+ wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM);
}
}
@@ -1926,7 +1926,7 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
return false;
canon_head = smp_load_acquire(&ldata->canon_head);
- n = min(*nr + 1, canon_head - ldata->read_tail);
+ n = min(*nr, canon_head - ldata->read_tail);
tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
size = min_t(size_t, tail + n, N_TTY_BUF_SIZE);
@@ -1948,10 +1948,8 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
n += N_TTY_BUF_SIZE;
c = n + found;
- if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) {
- c = min(*nr, c);
+ if (!found || read_buf(ldata, eol) != __DISABLED_CHAR)
n = c;
- }
n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n",
__func__, eol, found, n, c, tail, more);
diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c
index 673cda3d011d..948d0a1c6ae8 100644
--- a/drivers/tty/serial/8250/8250_gsc.c
+++ b/drivers/tty/serial/8250/8250_gsc.c
@@ -26,7 +26,7 @@ static int __init serial_init_chip(struct parisc_device *dev)
unsigned long address;
int err;
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) && defined(CONFIG_IOSAPIC)
if (!dev->irq && (dev->id.sversion == 0xad))
dev->irq = iosapic_serial_irq(dev);
#endif
diff --git a/drivers/tty/serial/8250/8250_pericom.c b/drivers/tty/serial/8250/8250_pericom.c
index 025b055363c3..95ff10f25d58 100644
--- a/drivers/tty/serial/8250/8250_pericom.c
+++ b/drivers/tty/serial/8250/8250_pericom.c
@@ -117,7 +117,7 @@ static int pericom8250_probe(struct pci_dev *pdev, const struct pci_device_id *i
uart.port.private_data = pericom;
uart.port.iotype = UPIO_PORT;
uart.port.uartclk = 921600 * 16;
- uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ | UPF_MAGIC_MULTIPLIER;
+ uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
uart.port.set_divisor = pericom_do_set_divisor;
for (i = 0; i < nr && i < maxnr; i++) {
unsigned int offset = (i == 3 && nr == 4) ? 0x38 : i * 0x8;
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 64e7e6c8145f..38d1c0748533 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -734,12 +734,15 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
static void sc16is7xx_tx_proc(struct kthread_work *ws)
{
struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port);
+ struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
if ((port->rs485.flags & SER_RS485_ENABLED) &&
(port->rs485.delay_rts_before_send > 0))
msleep(port->rs485.delay_rts_before_send);
+ mutex_lock(&s->efr_lock);
sc16is7xx_handle_tx(port);
+ mutex_unlock(&s->efr_lock);
}
static void sc16is7xx_reconf_rs485(struct uart_port *port)
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 3639bb6dc372..58013698635f 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -599,8 +599,8 @@ static int vt_setactivate(struct vt_setactivate __user *sa)
if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
return -ENXIO;
- vsa.console = array_index_nospec(vsa.console, MAX_NR_CONSOLES + 1);
vsa.console--;
+ vsa.console = array_index_nospec(vsa.console, MAX_NR_CONSOLES);
console_lock();
ret = vc_allocate(vsa.console);
if (ret) {
@@ -845,6 +845,7 @@ int vt_ioctl(struct tty_struct *tty,
return -ENXIO;
arg--;
+ arg = array_index_nospec(arg, MAX_NR_CONSOLES);
console_lock();
ret = vc_allocate(arg);
console_unlock();
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index 8f8405b0d608..5509d3847af4 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -130,6 +130,7 @@ static const struct attribute_group *ulpi_dev_attr_groups[] = {
static void ulpi_dev_release(struct device *dev)
{
+ of_node_put(dev->of_node);
kfree(to_ulpi_dev(dev));
}
@@ -247,12 +248,16 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi)
return ret;
ret = ulpi_read_id(ulpi);
- if (ret)
+ if (ret) {
+ of_node_put(ulpi->dev.of_node);
return ret;
+ }
ret = device_register(&ulpi->dev);
- if (ret)
+ if (ret) {
+ put_device(&ulpi->dev);
return ret;
+ }
dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n",
ulpi->id.vendor, ulpi->id.product);
@@ -299,7 +304,6 @@ EXPORT_SYMBOL_GPL(ulpi_register_interface);
*/
void ulpi_unregister_interface(struct ulpi *ulpi)
{
- of_node_put(ulpi->dev.of_node);
device_unregister(&ulpi->dev);
}
EXPORT_SYMBOL_GPL(ulpi_unregister_interface);
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index c2bbf97a79be..d5bc36ca5b1f 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -602,11 +602,14 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
return retval;
}
- find_and_link_peer(hub, port1);
-
retval = component_add(&port_dev->dev, &connector_ops);
- if (retval)
+ if (retval) {
dev_warn(&port_dev->dev, "failed to add component\n");
+ device_unregister(&port_dev->dev);
+ return retval;
+ }
+
+ find_and_link_peer(hub, port1);
/*
* Enable runtime pm and hold a refernce that hub_configure()
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 8a63da3ab39d..88c337bf564f 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -1418,6 +1418,7 @@ void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg);
void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2);
int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode);
#define dwc2_is_device_connected(hsotg) (hsotg->connected)
+#define dwc2_is_device_enabled(hsotg) (hsotg->enabled)
int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg);
int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup);
int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg);
@@ -1454,6 +1455,7 @@ static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg,
int testmode)
{ return 0; }
#define dwc2_is_device_connected(hsotg) (0)
+#define dwc2_is_device_enabled(hsotg) (0)
static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
{ return 0; }
static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg,
diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c
index 1b39c4776369..d8d6493bc457 100644
--- a/drivers/usb/dwc2/drd.c
+++ b/drivers/usb/dwc2/drd.c
@@ -130,8 +130,10 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role)
already = dwc2_ovr_avalid(hsotg, true);
} else if (role == USB_ROLE_DEVICE) {
already = dwc2_ovr_bvalid(hsotg, true);
- /* This clear DCTL.SFTDISCON bit */
- dwc2_hsotg_core_connect(hsotg);
+ if (dwc2_is_device_enabled(hsotg)) {
+ /* This clear DCTL.SFTDISCON bit */
+ dwc2_hsotg_core_connect(hsotg);
+ }
} else {
if (dwc2_is_device_mode(hsotg)) {
if (!dwc2_ovr_bvalid(hsotg, false))
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 7ff8fc8f79a9..06d0e88ec8af 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -43,6 +43,7 @@
#define PCI_DEVICE_ID_INTEL_ADLP 0x51ee
#define PCI_DEVICE_ID_INTEL_ADLM 0x54ee
#define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1
+#define PCI_DEVICE_ID_INTEL_RPLS 0x7a61
#define PCI_DEVICE_ID_INTEL_TGL 0x9a15
#define PCI_DEVICE_ID_AMD_MR 0x163a
@@ -85,8 +86,8 @@ static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = {
static struct gpiod_lookup_table platform_bytcr_gpios = {
.dev_id = "0000:00:16.0",
.table = {
- GPIO_LOOKUP("INT33FC:00", 54, "reset", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("INT33FC:02", 14, "cs", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:00", 54, "cs", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:02", 14, "reset", GPIO_ACTIVE_HIGH),
{}
},
};
@@ -119,6 +120,13 @@ static const struct property_entry dwc3_pci_intel_properties[] = {
{}
};
+static const struct property_entry dwc3_pci_intel_byt_properties[] = {
+ PROPERTY_ENTRY_STRING("dr_mode", "peripheral"),
+ PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+ PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
+ {}
+};
+
static const struct property_entry dwc3_pci_mrfld_properties[] = {
PROPERTY_ENTRY_STRING("dr_mode", "otg"),
PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
@@ -161,6 +169,10 @@ static const struct software_node dwc3_pci_intel_swnode = {
.properties = dwc3_pci_intel_properties,
};
+static const struct software_node dwc3_pci_intel_byt_swnode = {
+ .properties = dwc3_pci_intel_byt_properties,
+};
+
static const struct software_node dwc3_pci_intel_mrfld_swnode = {
.properties = dwc3_pci_mrfld_properties,
};
@@ -344,7 +356,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BYT),
- (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ (kernel_ulong_t) &dwc3_pci_intel_byt_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
(kernel_ulong_t) &dwc3_pci_intel_mrfld_swnode, },
@@ -409,6 +421,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPLS),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
index e14ac15e24c3..a6f3a9b38789 100644
--- a/drivers/usb/dwc3/dwc3-xilinx.c
+++ b/drivers/usb/dwc3/dwc3-xilinx.c
@@ -99,7 +99,7 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
struct device *dev = priv_data->dev;
struct reset_control *crst, *hibrst, *apbrst;
struct phy *usb3_phy;
- int ret;
+ int ret = 0;
u32 reg;
usb3_phy = devm_phy_optional_get(dev, "usb3-phy");
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 520031ba38aa..a0c883f19a41 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1291,6 +1291,19 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
+ /*
+ * As per data book 4.2.3.2TRB Control Bit Rules section
+ *
+ * The controller autonomously checks the HWO field of a TRB to determine if the
+ * entire TRB is valid. Therefore, software must ensure that the rest of the TRB
+ * is valid before setting the HWO field to '1'. In most systems, this means that
+ * software must update the fourth DWORD of a TRB last.
+ *
+ * However there is a possibility of CPU re-ordering here which can cause
+ * controller to observe the HWO bit set prematurely.
+ * Add a write memory barrier to prevent CPU re-ordering.
+ */
+ wmb();
trb->ctrl |= DWC3_TRB_CTRL_HWO;
dwc3_ep_inc_enq(dep);
@@ -4147,9 +4160,11 @@ static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
unsigned long flags;
irqreturn_t ret = IRQ_NONE;
+ local_bh_disable();
spin_lock_irqsave(&dwc->lock, flags);
ret = dwc3_process_event_buf(evt);
spin_unlock_irqrestore(&dwc->lock, flags);
+ local_bh_enable();
return ret;
}
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 16f9e3423c9f..9315313108c9 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1988,6 +1988,9 @@ unknown:
if (w_index != 0x5 || (w_value >> 8))
break;
interface = w_value & 0xFF;
+ if (interface >= MAX_CONFIG_INTERFACES ||
+ !os_desc_cfg->interface[interface])
+ break;
buf[6] = w_index;
count = count_ext_prop(os_desc_cfg,
interface);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 25ad1e97a458..1922fd02043c 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1711,16 +1711,24 @@ static void ffs_data_put(struct ffs_data *ffs)
static void ffs_data_closed(struct ffs_data *ffs)
{
+ struct ffs_epfile *epfiles;
+ unsigned long flags;
+
ENTER();
if (atomic_dec_and_test(&ffs->opened)) {
if (ffs->no_disconnect) {
ffs->state = FFS_DEACTIVATED;
- if (ffs->epfiles) {
- ffs_epfiles_destroy(ffs->epfiles,
- ffs->eps_count);
- ffs->epfiles = NULL;
- }
+ spin_lock_irqsave(&ffs->eps_lock, flags);
+ epfiles = ffs->epfiles;
+ ffs->epfiles = NULL;
+ spin_unlock_irqrestore(&ffs->eps_lock,
+ flags);
+
+ if (epfiles)
+ ffs_epfiles_destroy(epfiles,
+ ffs->eps_count);
+
if (ffs->setup_state == FFS_SETUP_PENDING)
__ffs_ep0_stall(ffs);
} else {
@@ -1767,14 +1775,27 @@ static struct ffs_data *ffs_data_new(const char *dev_name)
static void ffs_data_clear(struct ffs_data *ffs)
{
+ struct ffs_epfile *epfiles;
+ unsigned long flags;
+
ENTER();
ffs_closed(ffs);
BUG_ON(ffs->gadget);
- if (ffs->epfiles) {
- ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
+ spin_lock_irqsave(&ffs->eps_lock, flags);
+ epfiles = ffs->epfiles;
+ ffs->epfiles = NULL;
+ spin_unlock_irqrestore(&ffs->eps_lock, flags);
+
+ /*
+ * potential race possible between ffs_func_eps_disable
+ * & ffs_epfile_release therefore maintaining a local
+ * copy of epfile will save us from use-after-free.
+ */
+ if (epfiles) {
+ ffs_epfiles_destroy(epfiles, ffs->eps_count);
ffs->epfiles = NULL;
}
@@ -1922,12 +1943,15 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
static void ffs_func_eps_disable(struct ffs_function *func)
{
- struct ffs_ep *ep = func->eps;
- struct ffs_epfile *epfile = func->ffs->epfiles;
- unsigned count = func->ffs->eps_count;
+ struct ffs_ep *ep;
+ struct ffs_epfile *epfile;
+ unsigned short count;
unsigned long flags;
spin_lock_irqsave(&func->ffs->eps_lock, flags);
+ count = func->ffs->eps_count;
+ epfile = func->ffs->epfiles;
+ ep = func->eps;
while (count--) {
/* pending requests get nuked */
if (ep->ep)
@@ -1945,14 +1969,18 @@ static void ffs_func_eps_disable(struct ffs_function *func)
static int ffs_func_eps_enable(struct ffs_function *func)
{
- struct ffs_data *ffs = func->ffs;
- struct ffs_ep *ep = func->eps;
- struct ffs_epfile *epfile = ffs->epfiles;
- unsigned count = ffs->eps_count;
+ struct ffs_data *ffs;
+ struct ffs_ep *ep;
+ struct ffs_epfile *epfile;
+ unsigned short count;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&func->ffs->eps_lock, flags);
+ ffs = func->ffs;
+ ep = func->eps;
+ epfile = ffs->epfiles;
+ count = ffs->eps_count;
while(count--) {
ep->ep->driver_data = ep;
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 36fa6ef0581b..097a709549d6 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -203,7 +203,7 @@ static struct uac2_input_terminal_descriptor io_in_it_desc = {
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
/* .bTerminalID = DYNAMIC */
- .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_UNDEFINED),
+ .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_MICROPHONE),
.bAssocTerminal = 0,
/* .bCSourceID = DYNAMIC */
.iChannelNames = 0,
@@ -231,7 +231,7 @@ static struct uac2_output_terminal_descriptor io_out_ot_desc = {
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
/* .bTerminalID = DYNAMIC */
- .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_UNDEFINED),
+ .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_SPEAKER),
.bAssocTerminal = 0,
/* .bSourceID = DYNAMIC */
/* .bCSourceID = DYNAMIC */
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index 431d5a7d737e..00b3f6b3bb31 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -637,14 +637,17 @@ static int rndis_set_response(struct rndis_params *params,
rndis_set_cmplt_type *resp;
rndis_resp_t *r;
+ BufLength = le32_to_cpu(buf->InformationBufferLength);
+ BufOffset = le32_to_cpu(buf->InformationBufferOffset);
+ if ((BufLength > RNDIS_MAX_TOTAL_SIZE) ||
+ (BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE))
+ return -EINVAL;
+
r = rndis_add_response(params, sizeof(rndis_set_cmplt_type));
if (!r)
return -ENOMEM;
resp = (rndis_set_cmplt_type *)r->buf;
- BufLength = le32_to_cpu(buf->InformationBufferLength);
- BufOffset = le32_to_cpu(buf->InformationBufferOffset);
-
#ifdef VERBOSE_DEBUG
pr_debug("%s: Length: %d\n", __func__, BufLength);
pr_debug("%s: Offset: %d\n", __func__, BufOffset);
@@ -919,6 +922,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
params->resp_avail = resp_avail;
params->v = v;
INIT_LIST_HEAD(&params->resp_queue);
+ spin_lock_init(&params->resp_lock);
pr_debug("%s: configNr = %d\n", __func__, i);
return params;
@@ -1012,12 +1016,14 @@ void rndis_free_response(struct rndis_params *params, u8 *buf)
{
rndis_resp_t *r, *n;
+ spin_lock(&params->resp_lock);
list_for_each_entry_safe(r, n, &params->resp_queue, list) {
if (r->buf == buf) {
list_del(&r->list);
kfree(r);
}
}
+ spin_unlock(&params->resp_lock);
}
EXPORT_SYMBOL_GPL(rndis_free_response);
@@ -1027,14 +1033,17 @@ u8 *rndis_get_next_response(struct rndis_params *params, u32 *length)
if (!length) return NULL;
+ spin_lock(&params->resp_lock);
list_for_each_entry_safe(r, n, &params->resp_queue, list) {
if (!r->send) {
r->send = 1;
*length = r->length;
+ spin_unlock(&params->resp_lock);
return r->buf;
}
}
+ spin_unlock(&params->resp_lock);
return NULL;
}
EXPORT_SYMBOL_GPL(rndis_get_next_response);
@@ -1051,7 +1060,9 @@ static rndis_resp_t *rndis_add_response(struct rndis_params *params, u32 length)
r->length = length;
r->send = 0;
+ spin_lock(&params->resp_lock);
list_add_tail(&r->list, &params->resp_queue);
+ spin_unlock(&params->resp_lock);
return r;
}
diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
index f6167f7fea82..6206b8b7490f 100644
--- a/drivers/usb/gadget/function/rndis.h
+++ b/drivers/usb/gadget/function/rndis.h
@@ -174,6 +174,7 @@ typedef struct rndis_params {
void (*resp_avail)(void *v);
void *v;
struct list_head resp_queue;
+ spinlock_t resp_lock;
} rndis_params;
/* RNDIS Message parser and other useless functions */
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
index c5a2c734234a..d86c3a36441e 100644
--- a/drivers/usb/gadget/legacy/raw_gadget.c
+++ b/drivers/usb/gadget/legacy/raw_gadget.c
@@ -1004,7 +1004,7 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
ret = -EBUSY;
goto out_unlock;
}
- if ((in && !ep->ep->caps.dir_in) || (!in && ep->ep->caps.dir_in)) {
+ if (in != usb_endpoint_dir_in(ep->ep->desc)) {
dev_dbg(&dev->gadget->dev, "fail, wrong direction\n");
ret = -EINVAL;
goto out_unlock;
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 57d417a7c3e0..601829a6b4ba 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -2378,6 +2378,8 @@ static void handle_ext_role_switch_states(struct device *dev,
switch (role) {
case USB_ROLE_NONE:
usb3->connection_state = USB_ROLE_NONE;
+ if (cur_role == USB_ROLE_HOST)
+ device_release_driver(host);
if (usb3->driver)
usb3_disconnect(usb3);
usb3_vbus_out(usb3, false);
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 6ce886fb7bfe..2907fad04e2c 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -1615,6 +1615,8 @@ static void xudc_getstatus(struct xusb_udc *udc)
break;
case USB_RECIP_ENDPOINT:
epnum = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
+ if (epnum >= XUSB_MAX_ENDPOINTS)
+ goto stall;
target_ep = &udc->ep[epnum];
epcfgreg = udc->read_fn(udc->addr + target_ep->offset);
halt = epcfgreg & XUSB_EP_CFG_STALL_MASK;
@@ -1682,6 +1684,10 @@ static void xudc_set_clear_feature(struct xusb_udc *udc)
case USB_RECIP_ENDPOINT:
if (!udc->setup.wValue) {
endpoint = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
+ if (endpoint >= XUSB_MAX_ENDPOINTS) {
+ xudc_ep0_stall(udc);
+ return;
+ }
target_ep = &udc->ep[endpoint];
outinbit = udc->setup.wIndex & USB_ENDPOINT_DIR_MASK;
outinbit = outinbit >> 7;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index dc357cabb265..2d378543bc3a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1091,6 +1091,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
int retval = 0;
bool comp_timer_running = false;
bool pending_portevent = false;
+ bool reinit_xhc = false;
if (!hcd->state)
return 0;
@@ -1107,10 +1108,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
spin_lock_irq(&xhci->lock);
- if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
- hibernated = true;
- if (!hibernated) {
+ if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
+ reinit_xhc = true;
+
+ if (!reinit_xhc) {
/*
* Some controllers might lose power during suspend, so wait
* for controller not ready bit to clear, just as in xHC init.
@@ -1143,12 +1145,17 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
}
- temp = readl(&xhci->op_regs->status);
}
- /* If restore operation fails, re-initialize the HC during resume */
- if ((temp & STS_SRE) || hibernated) {
+ temp = readl(&xhci->op_regs->status);
+ /* re-initialize the HC on Restore Error, or Host Controller Error */
+ if (temp & (STS_SRE | STS_HCE)) {
+ reinit_xhc = true;
+ xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
+ }
+
+ if (reinit_xhc) {
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
!(xhci_all_ports_seen_u0(xhci))) {
del_timer_sync(&xhci->comp_mode_recovery_timer);
@@ -1604,9 +1611,12 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
struct urb_priv *urb_priv;
int num_tds;
- if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
- true, true, __func__) <= 0)
+ if (!urb)
return -EINVAL;
+ ret = xhci_check_args(hcd, urb->dev, urb->ep,
+ true, true, __func__);
+ if (ret <= 0)
+ return ret ? ret : -EINVAL;
slot_id = urb->dev->slot_id;
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
@@ -3323,7 +3333,7 @@ static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
return -EINVAL;
ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
if (ret <= 0)
- return -EINVAL;
+ return ret ? ret : -EINVAL;
if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
" descriptor for ep 0x%x does not support streams\n",
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 507deef1f709..04c4e3fed094 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -543,6 +543,9 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
if (of_property_read_u16_array(np, "language-id", &hub->lang_id, 1))
hub->lang_id = USB251XB_DEF_LANGUAGE_ID;
+ if (of_property_read_u8(np, "boost-up", &hub->boost_up))
+ hub->boost_up = USB251XB_DEF_BOOST_UP;
+
cproperty_char = of_get_property(np, "manufacturer", NULL);
strlcpy(str, cproperty_char ? : USB251XB_DEF_MANUFACTURER_STRING,
sizeof(str));
@@ -584,7 +587,6 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
* may be as soon as needed.
*/
hub->bat_charge_en = USB251XB_DEF_BATTERY_CHARGING_ENABLE;
- hub->boost_up = USB251XB_DEF_BOOST_UP;
hub->boost_57 = USB251XB_DEF_BOOST_57;
hub->boost_14 = USB251XB_DEF_BOOST_14;
hub->port_map12 = USB251XB_DEF_PORT_MAP_12;
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 29f4b87a9e74..2798fca71261 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -81,10 +81,10 @@
#define CH341_QUIRK_SIMULATE_BREAK BIT(1)
static const struct usb_device_id id_table[] = {
- { USB_DEVICE(0x1a86, 0x5512) },
{ USB_DEVICE(0x1a86, 0x5523) },
{ USB_DEVICE(0x1a86, 0x7522) },
{ USB_DEVICE(0x1a86, 0x7523) },
+ { USB_DEVICE(0x2184, 0x0057) },
{ USB_DEVICE(0x4348, 0x5523) },
{ USB_DEVICE(0x9986, 0x7523) },
{ },
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 8a60c0d56863..a27f7efcec6a 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -51,6 +51,7 @@ static void cp210x_enable_event_mode(struct usb_serial_port *port);
static void cp210x_disable_event_mode(struct usb_serial_port *port);
static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x0404, 0x034C) }, /* NCR Retail IO Box */
{ USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */
{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
@@ -68,6 +69,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */
{ USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */
{ USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */
+ { USB_DEVICE(0x106F, 0x0003) }, /* CPI / Money Controls Bulk Coin Recycler */
{ USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */
{ USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */
{ USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 4edebd14ef29..49c08f07c969 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -969,6 +969,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_023_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_034_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_101_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_159_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_1_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_2_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_3_PID) },
@@ -977,12 +978,14 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_6_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_7_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_8_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_235_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_257_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_1_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_2_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_3_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_4_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_313_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_320_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_324_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_1_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_2_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 755858ca20ba..d1a9564697a4 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1506,6 +1506,9 @@
#define BRAINBOXES_VX_023_PID 0x1003 /* VX-023 ExpressCard 1 Port RS422/485 */
#define BRAINBOXES_VX_034_PID 0x1004 /* VX-034 ExpressCard 2 Port RS422/485 */
#define BRAINBOXES_US_101_PID 0x1011 /* US-101 1xRS232 */
+#define BRAINBOXES_US_159_PID 0x1021 /* US-159 1xRS232 */
+#define BRAINBOXES_US_235_PID 0x1017 /* US-235 1xRS232 */
+#define BRAINBOXES_US_320_PID 0x1019 /* US-320 1xRS422/485 */
#define BRAINBOXES_US_324_PID 0x1013 /* US-324 1xRS422/485 1Mbaud */
#define BRAINBOXES_US_606_1_PID 0x2001 /* US-606 6 Port RS232 Serial Port 1 and 2 */
#define BRAINBOXES_US_606_2_PID 0x2002 /* US-606 6 Port RS232 Serial Port 3 and 4 */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 42420bfc983c..e7755d9cfc61 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -198,6 +198,8 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5821E 0x81d7
#define DELL_PRODUCT_5821E_ESIM 0x81e0
+#define DELL_PRODUCT_5829E_ESIM 0x81e4
+#define DELL_PRODUCT_5829E 0x81e6
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
@@ -1063,6 +1065,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E_ESIM),
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E),
+ .driver_info = RSVD(0) | RSVD(6) },
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM),
+ .driver_info = RSVD(0) | RSVD(6) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@@ -1273,10 +1279,16 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */
.driver_info = NCTRL(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701a, 0xff), /* Telit LE910R1 (RNDIS) */
+ .driver_info = NCTRL(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */
+ .driver_info = NCTRL(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
.driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */
.driver_info = NCTRL(0) | ZLP },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x9201), /* Telit LE910R1 flashing device */
+ .driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
@@ -1649,6 +1661,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1485, 0xff, 0xff, 0xff), /* ZTE MF286D */
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index 6d27a5b5e3ca..7ffcda94d323 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -761,12 +761,12 @@ static int tps6598x_probe(struct i2c_client *client)
ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
if (ret < 0)
- return ret;
+ goto err_clear_mask;
trace_tps6598x_status(status);
ret = tps6598x_read32(tps, TPS_REG_SYSTEM_CONF, &conf);
if (ret < 0)
- return ret;
+ goto err_clear_mask;
/*
* This fwnode has a "compatible" property, but is never populated as a
@@ -855,7 +855,8 @@ err_role_put:
usb_role_switch_put(tps->role_sw);
err_fwnode_put:
fwnode_handle_put(fwnode);
-
+err_clear_mask:
+ tps6598x_write64(tps, TPS_REG_INT_MASK1, 0);
return ret;
}
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index d6ca1c7ad513..37f0b4274113 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -629,16 +629,18 @@ err:
return ret;
}
-static int vhost_vsock_stop(struct vhost_vsock *vsock)
+static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner)
{
size_t i;
- int ret;
+ int ret = 0;
mutex_lock(&vsock->dev.mutex);
- ret = vhost_dev_check_owner(&vsock->dev);
- if (ret)
- goto err;
+ if (check_owner) {
+ ret = vhost_dev_check_owner(&vsock->dev);
+ if (ret)
+ goto err;
+ }
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
struct vhost_virtqueue *vq = &vsock->vqs[i];
@@ -753,7 +755,12 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
* inefficient. Room for improvement here. */
vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
- vhost_vsock_stop(vsock);
+ /* Don't check the owner, because we are in the release path, so we
+ * need to stop the vsock device in any case.
+ * vhost_vsock_stop() can not fail in this case, so we don't need to
+ * check the return code.
+ */
+ vhost_vsock_stop(vsock, false);
vhost_vsock_flush(vsock);
vhost_dev_stop(&vsock->dev);
@@ -868,7 +875,7 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
if (start)
return vhost_vsock_start(vsock);
else
- return vhost_vsock_stop(vsock);
+ return vhost_vsock_stop(vsock, true);
case VHOST_GET_FEATURES:
features = VHOST_VSOCK_FEATURES;
if (copy_to_user(argp, &features, sizeof(features)))
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 840d9813b0bc..fcc46380e7c9 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -78,6 +78,26 @@ config FRAMEBUFFER_CONSOLE
help
Low-level framebuffer-based console driver.
+config FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
+ bool "Enable legacy fbcon hardware acceleration code"
+ depends on FRAMEBUFFER_CONSOLE
+ default y if PARISC
+ default n
+ help
+ This option enables the fbcon (framebuffer text-based) hardware
+ acceleration for graphics drivers which were written for the fbdev
+ graphics interface.
+
+ On modern machines, on mainstream machines (like x86-64) or when
+ using a modern Linux distribution those fbdev drivers usually aren't used.
+ So enabling this option wouldn't have any effect, which is why you want
+ to disable this option on such newer machines.
+
+ If you compile this kernel for older machines which still require the
+ fbdev drivers, you may want to say Y.
+
+ If unsure, select n.
+
config FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
bool "Map the console to the primary display device"
depends on FRAMEBUFFER_CONSOLE
diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c
index 01fae2c96965..f98e8f298bc1 100644
--- a/drivers/video/fbdev/core/bitblit.c
+++ b/drivers/video/fbdev/core/bitblit.c
@@ -43,6 +43,21 @@ static void update_attr(u8 *dst, u8 *src, int attribute,
}
}
+static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct fb_copyarea area;
+
+ area.sx = sx * vc->vc_font.width;
+ area.sy = sy * vc->vc_font.height;
+ area.dx = dx * vc->vc_font.width;
+ area.dy = dy * vc->vc_font.height;
+ area.height = height * vc->vc_font.height;
+ area.width = width * vc->vc_font.width;
+
+ info->fbops->fb_copyarea(info, &area);
+}
+
static void bit_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
@@ -378,6 +393,7 @@ static int bit_update_start(struct fb_info *info)
void fbcon_set_bitops(struct fbcon_ops *ops)
{
+ ops->bmove = bit_bmove;
ops->clear = bit_clear;
ops->putcs = bit_putcs;
ops->clear_margins = bit_clear_margins;
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 99ecd9a6d844..2fc1b80a26ad 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -173,6 +173,8 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
int count, int ypos, int xpos);
static void fbcon_clear_margins(struct vc_data *vc, int bottom_only);
static void fbcon_cursor(struct vc_data *vc, int mode);
+static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
+ int height, int width);
static int fbcon_switch(struct vc_data *vc);
static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch);
static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table);
@@ -180,8 +182,16 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table);
/*
* Internal routines
*/
+static __inline__ void ywrap_up(struct vc_data *vc, int count);
+static __inline__ void ywrap_down(struct vc_data *vc, int count);
+static __inline__ void ypan_up(struct vc_data *vc, int count);
+static __inline__ void ypan_down(struct vc_data *vc, int count);
+static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx,
+ int dy, int dx, int height, int width, u_int y_break);
static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
int unit);
+static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p,
+ int line, int count, int dy);
static void fbcon_modechanged(struct fb_info *info);
static void fbcon_set_all_vcs(struct fb_info *info);
static void fbcon_start(void);
@@ -1125,6 +1135,14 @@ static void fbcon_init(struct vc_data *vc, int init)
ops->graphics = 0;
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
+ if ((info->flags & FBINFO_HWACCEL_COPYAREA) &&
+ !(info->flags & FBINFO_HWACCEL_DISABLED))
+ p->scrollmode = SCROLL_MOVE;
+ else /* default to something safe */
+ p->scrollmode = SCROLL_REDRAW;
+#endif
+
/*
* ++guenther: console.c:vc_allocate() relies on initializing
* vc_{cols,rows}, but we must not set those if we are only
@@ -1211,13 +1229,14 @@ finished:
* This system is now divided into two levels because of complications
* caused by hardware scrolling. Top level functions:
*
- * fbcon_clear(), fbcon_putc(), fbcon_clear_margins()
+ * fbcon_bmove(), fbcon_clear(), fbcon_putc(), fbcon_clear_margins()
*
* handles y values in range [0, scr_height-1] that correspond to real
* screen positions. y_wrap shift means that first line of bitmap may be
* anywhere on this display. These functions convert lineoffsets to
* bitmap offsets and deal with the wrap-around case by splitting blits.
*
+ * fbcon_bmove_physical_8() -- These functions fast implementations
* fbcon_clear_physical_8() -- of original fbcon_XXX fns.
* fbcon_putc_physical_8() -- (font width != 8) may be added later
*
@@ -1390,6 +1409,224 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
}
}
+static __inline__ void ywrap_up(struct vc_data *vc, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+
+ p->yscroll += count;
+ if (p->yscroll >= p->vrows) /* Deal with wrap */
+ p->yscroll -= p->vrows;
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode |= FB_VMODE_YWRAP;
+ ops->update_start(info);
+ scrollback_max += count;
+ if (scrollback_max > scrollback_phys_max)
+ scrollback_max = scrollback_phys_max;
+ scrollback_current = 0;
+}
+
+static __inline__ void ywrap_down(struct vc_data *vc, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+
+ p->yscroll -= count;
+ if (p->yscroll < 0) /* Deal with wrap */
+ p->yscroll += p->vrows;
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode |= FB_VMODE_YWRAP;
+ ops->update_start(info);
+ scrollback_max -= count;
+ if (scrollback_max < 0)
+ scrollback_max = 0;
+ scrollback_current = 0;
+}
+
+static __inline__ void ypan_up(struct vc_data *vc, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+ struct fbcon_ops *ops = info->fbcon_par;
+
+ p->yscroll += count;
+ if (p->yscroll > p->vrows - vc->vc_rows) {
+ ops->bmove(vc, info, p->vrows - vc->vc_rows,
+ 0, 0, 0, vc->vc_rows, vc->vc_cols);
+ p->yscroll -= p->vrows - vc->vc_rows;
+ }
+
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode &= ~FB_VMODE_YWRAP;
+ ops->update_start(info);
+ fbcon_clear_margins(vc, 1);
+ scrollback_max += count;
+ if (scrollback_max > scrollback_phys_max)
+ scrollback_max = scrollback_phys_max;
+ scrollback_current = 0;
+}
+
+static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+
+ p->yscroll += count;
+
+ if (p->yscroll > p->vrows - vc->vc_rows) {
+ p->yscroll -= p->vrows - vc->vc_rows;
+ fbcon_redraw_move(vc, p, t + count, vc->vc_rows - count, t);
+ }
+
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode &= ~FB_VMODE_YWRAP;
+ ops->update_start(info);
+ fbcon_clear_margins(vc, 1);
+ scrollback_max += count;
+ if (scrollback_max > scrollback_phys_max)
+ scrollback_max = scrollback_phys_max;
+ scrollback_current = 0;
+}
+
+static __inline__ void ypan_down(struct vc_data *vc, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+ struct fbcon_ops *ops = info->fbcon_par;
+
+ p->yscroll -= count;
+ if (p->yscroll < 0) {
+ ops->bmove(vc, info, 0, 0, p->vrows - vc->vc_rows,
+ 0, vc->vc_rows, vc->vc_cols);
+ p->yscroll += p->vrows - vc->vc_rows;
+ }
+
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode &= ~FB_VMODE_YWRAP;
+ ops->update_start(info);
+ fbcon_clear_margins(vc, 1);
+ scrollback_max -= count;
+ if (scrollback_max < 0)
+ scrollback_max = 0;
+ scrollback_current = 0;
+}
+
+static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+
+ p->yscroll -= count;
+
+ if (p->yscroll < 0) {
+ p->yscroll += p->vrows - vc->vc_rows;
+ fbcon_redraw_move(vc, p, t, vc->vc_rows - count, t + count);
+ }
+
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode &= ~FB_VMODE_YWRAP;
+ ops->update_start(info);
+ fbcon_clear_margins(vc, 1);
+ scrollback_max -= count;
+ if (scrollback_max < 0)
+ scrollback_max = 0;
+ scrollback_current = 0;
+}
+
+static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p,
+ int line, int count, int dy)
+{
+ unsigned short *s = (unsigned short *)
+ (vc->vc_origin + vc->vc_size_row * line);
+
+ while (count--) {
+ unsigned short *start = s;
+ unsigned short *le = advance_row(s, 1);
+ unsigned short c;
+ int x = 0;
+ unsigned short attr = 1;
+
+ do {
+ c = scr_readw(s);
+ if (attr != (c & 0xff00)) {
+ attr = c & 0xff00;
+ if (s > start) {
+ fbcon_putcs(vc, start, s - start,
+ dy, x);
+ x += s - start;
+ start = s;
+ }
+ }
+ console_conditional_schedule();
+ s++;
+ } while (s < le);
+ if (s > start)
+ fbcon_putcs(vc, start, s - start, dy, x);
+ console_conditional_schedule();
+ dy++;
+ }
+}
+
+static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info,
+ struct fbcon_display *p, int line, int count, int ycount)
+{
+ int offset = ycount * vc->vc_cols;
+ unsigned short *d = (unsigned short *)
+ (vc->vc_origin + vc->vc_size_row * line);
+ unsigned short *s = d + offset;
+ struct fbcon_ops *ops = info->fbcon_par;
+
+ while (count--) {
+ unsigned short *start = s;
+ unsigned short *le = advance_row(s, 1);
+ unsigned short c;
+ int x = 0;
+
+ do {
+ c = scr_readw(s);
+
+ if (c == scr_readw(d)) {
+ if (s > start) {
+ ops->bmove(vc, info, line + ycount, x,
+ line, x, 1, s-start);
+ x += s - start + 1;
+ start = s + 1;
+ } else {
+ x++;
+ start++;
+ }
+ }
+
+ scr_writew(c, d);
+ console_conditional_schedule();
+ s++;
+ d++;
+ } while (s < le);
+ if (s > start)
+ ops->bmove(vc, info, line + ycount, x, line, x, 1,
+ s-start);
+ console_conditional_schedule();
+ if (ycount > 0)
+ line++;
+ else {
+ line--;
+ /* NOTE: We subtract two lines from these pointers */
+ s -= vc->vc_size_row;
+ d -= vc->vc_size_row;
+ }
+ }
+}
+
static void fbcon_redraw(struct vc_data *vc, struct fbcon_display *p,
int line, int count, int offset)
{
@@ -1450,6 +1687,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct fbcon_display *p = &fb_display[vc->vc_num];
+ int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK;
if (fbcon_is_inactive(vc, info))
return true;
@@ -1466,32 +1704,291 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
case SM_UP:
if (count > vc->vc_rows) /* Maximum realistic size */
count = vc->vc_rows;
- fbcon_redraw(vc, p, t, b - t - count,
- count * vc->vc_cols);
- fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
- scr_memsetw((unsigned short *) (vc->vc_origin +
- vc->vc_size_row *
- (b - count)),
- vc->vc_video_erase_char,
- vc->vc_size_row * count);
- return true;
+ if (logo_shown >= 0)
+ goto redraw_up;
+ switch (fb_scrollmode(p)) {
+ case SCROLL_MOVE:
+ fbcon_redraw_blit(vc, info, p, t, b - t - count,
+ count);
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ scr_memsetw((unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ (b - count)),
+ vc->vc_video_erase_char,
+ vc->vc_size_row * count);
+ return true;
+
+ case SCROLL_WRAP_MOVE:
+ if (b - t - count > 3 * vc->vc_rows >> 2) {
+ if (t > 0)
+ fbcon_bmove(vc, 0, 0, count, 0, t,
+ vc->vc_cols);
+ ywrap_up(vc, count);
+ if (vc->vc_rows - b > 0)
+ fbcon_bmove(vc, b - count, 0, b, 0,
+ vc->vc_rows - b,
+ vc->vc_cols);
+ } else if (info->flags & FBINFO_READS_FAST)
+ fbcon_bmove(vc, t + count, 0, t, 0,
+ b - t - count, vc->vc_cols);
+ else
+ goto redraw_up;
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_PAN_REDRAW:
+ if ((p->yscroll + count <=
+ 2 * (p->vrows - vc->vc_rows))
+ && ((!scroll_partial && (b - t == vc->vc_rows))
+ || (scroll_partial
+ && (b - t - count >
+ 3 * vc->vc_rows >> 2)))) {
+ if (t > 0)
+ fbcon_redraw_move(vc, p, 0, t, count);
+ ypan_up_redraw(vc, t, count);
+ if (vc->vc_rows - b > 0)
+ fbcon_redraw_move(vc, p, b,
+ vc->vc_rows - b, b);
+ } else
+ fbcon_redraw_move(vc, p, t + count, b - t - count, t);
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_PAN_MOVE:
+ if ((p->yscroll + count <=
+ 2 * (p->vrows - vc->vc_rows))
+ && ((!scroll_partial && (b - t == vc->vc_rows))
+ || (scroll_partial
+ && (b - t - count >
+ 3 * vc->vc_rows >> 2)))) {
+ if (t > 0)
+ fbcon_bmove(vc, 0, 0, count, 0, t,
+ vc->vc_cols);
+ ypan_up(vc, count);
+ if (vc->vc_rows - b > 0)
+ fbcon_bmove(vc, b - count, 0, b, 0,
+ vc->vc_rows - b,
+ vc->vc_cols);
+ } else if (info->flags & FBINFO_READS_FAST)
+ fbcon_bmove(vc, t + count, 0, t, 0,
+ b - t - count, vc->vc_cols);
+ else
+ goto redraw_up;
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_REDRAW:
+ redraw_up:
+ fbcon_redraw(vc, p, t, b - t - count,
+ count * vc->vc_cols);
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ scr_memsetw((unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ (b - count)),
+ vc->vc_video_erase_char,
+ vc->vc_size_row * count);
+ return true;
+ }
+ break;
case SM_DOWN:
if (count > vc->vc_rows) /* Maximum realistic size */
count = vc->vc_rows;
- fbcon_redraw(vc, p, b - 1, b - t - count,
- -count * vc->vc_cols);
- fbcon_clear(vc, t, 0, count, vc->vc_cols);
- scr_memsetw((unsigned short *) (vc->vc_origin +
- vc->vc_size_row *
- t),
- vc->vc_video_erase_char,
- vc->vc_size_row * count);
- return true;
+ if (logo_shown >= 0)
+ goto redraw_down;
+ switch (fb_scrollmode(p)) {
+ case SCROLL_MOVE:
+ fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
+ -count);
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ scr_memsetw((unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ t),
+ vc->vc_video_erase_char,
+ vc->vc_size_row * count);
+ return true;
+
+ case SCROLL_WRAP_MOVE:
+ if (b - t - count > 3 * vc->vc_rows >> 2) {
+ if (vc->vc_rows - b > 0)
+ fbcon_bmove(vc, b, 0, b - count, 0,
+ vc->vc_rows - b,
+ vc->vc_cols);
+ ywrap_down(vc, count);
+ if (t > 0)
+ fbcon_bmove(vc, count, 0, 0, 0, t,
+ vc->vc_cols);
+ } else if (info->flags & FBINFO_READS_FAST)
+ fbcon_bmove(vc, t, 0, t + count, 0,
+ b - t - count, vc->vc_cols);
+ else
+ goto redraw_down;
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_PAN_MOVE:
+ if ((count - p->yscroll <= p->vrows - vc->vc_rows)
+ && ((!scroll_partial && (b - t == vc->vc_rows))
+ || (scroll_partial
+ && (b - t - count >
+ 3 * vc->vc_rows >> 2)))) {
+ if (vc->vc_rows - b > 0)
+ fbcon_bmove(vc, b, 0, b - count, 0,
+ vc->vc_rows - b,
+ vc->vc_cols);
+ ypan_down(vc, count);
+ if (t > 0)
+ fbcon_bmove(vc, count, 0, 0, 0, t,
+ vc->vc_cols);
+ } else if (info->flags & FBINFO_READS_FAST)
+ fbcon_bmove(vc, t, 0, t + count, 0,
+ b - t - count, vc->vc_cols);
+ else
+ goto redraw_down;
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_PAN_REDRAW:
+ if ((count - p->yscroll <= p->vrows - vc->vc_rows)
+ && ((!scroll_partial && (b - t == vc->vc_rows))
+ || (scroll_partial
+ && (b - t - count >
+ 3 * vc->vc_rows >> 2)))) {
+ if (vc->vc_rows - b > 0)
+ fbcon_redraw_move(vc, p, b, vc->vc_rows - b,
+ b - count);
+ ypan_down_redraw(vc, t, count);
+ if (t > 0)
+ fbcon_redraw_move(vc, p, count, t, 0);
+ } else
+ fbcon_redraw_move(vc, p, t, b - t - count, t + count);
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_REDRAW:
+ redraw_down:
+ fbcon_redraw(vc, p, b - 1, b - t - count,
+ -count * vc->vc_cols);
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ scr_memsetw((unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ t),
+ vc->vc_video_erase_char,
+ vc->vc_size_row * count);
+ return true;
+ }
}
return false;
}
+
+static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
+ int height, int width)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+
+ if (fbcon_is_inactive(vc, info))
+ return;
+
+ if (!width || !height)
+ return;
+
+ /* Split blits that cross physical y_wrap case.
+ * Pathological case involves 4 blits, better to use recursive
+ * code rather than unrolled case
+ *
+ * Recursive invocations don't need to erase the cursor over and
+ * over again, so we use fbcon_bmove_rec()
+ */
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, height, width,
+ p->vrows - p->yscroll);
+}
+
+static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx,
+ int dy, int dx, int height, int width, u_int y_break)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ u_int b;
+
+ if (sy < y_break && sy + height > y_break) {
+ b = y_break - sy;
+ if (dy < sy) { /* Avoid trashing self */
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ } else {
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ }
+ return;
+ }
+
+ if (dy < y_break && dy + height > y_break) {
+ b = y_break - dy;
+ if (dy < sy) { /* Avoid trashing self */
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ } else {
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ }
+ return;
+ }
+ ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
+ height, width);
+}
+
+static void updatescrollmode_accel(struct fbcon_display *p,
+ struct fb_info *info,
+ struct vc_data *vc)
+{
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
+ struct fbcon_ops *ops = info->fbcon_par;
+ int cap = info->flags;
+ u16 t = 0;
+ int ypan = FBCON_SWAP(ops->rotate, info->fix.ypanstep,
+ info->fix.xpanstep);
+ int ywrap = FBCON_SWAP(ops->rotate, info->fix.ywrapstep, t);
+ int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual,
+ info->var.xres_virtual);
+ int good_pan = (cap & FBINFO_HWACCEL_YPAN) &&
+ divides(ypan, vc->vc_font.height) && vyres > yres;
+ int good_wrap = (cap & FBINFO_HWACCEL_YWRAP) &&
+ divides(ywrap, vc->vc_font.height) &&
+ divides(vc->vc_font.height, vyres) &&
+ divides(vc->vc_font.height, yres);
+ int reading_fast = cap & FBINFO_READS_FAST;
+ int fast_copyarea = (cap & FBINFO_HWACCEL_COPYAREA) &&
+ !(cap & FBINFO_HWACCEL_DISABLED);
+ int fast_imageblit = (cap & FBINFO_HWACCEL_IMAGEBLIT) &&
+ !(cap & FBINFO_HWACCEL_DISABLED);
+
+ if (good_wrap || good_pan) {
+ if (reading_fast || fast_copyarea)
+ p->scrollmode = good_wrap ?
+ SCROLL_WRAP_MOVE : SCROLL_PAN_MOVE;
+ else
+ p->scrollmode = good_wrap ? SCROLL_REDRAW :
+ SCROLL_PAN_REDRAW;
+ } else {
+ if (reading_fast || (fast_copyarea && !fast_imageblit))
+ p->scrollmode = SCROLL_MOVE;
+ else
+ p->scrollmode = SCROLL_REDRAW;
+ }
+#endif
+}
+
static void updatescrollmode(struct fbcon_display *p,
struct fb_info *info,
struct vc_data *vc)
@@ -1507,6 +2004,9 @@ static void updatescrollmode(struct fbcon_display *p,
p->vrows -= (yres - (fh * vc->vc_rows)) / fh;
if ((yres % fh) && (vyres % fh < yres % fh))
p->vrows--;
+
+ /* update scrollmode in case hardware acceleration is used */
+ updatescrollmode_accel(p, info, vc);
}
#define PITCH(w) (((w) + 7) >> 3)
@@ -1664,7 +2164,21 @@ static int fbcon_switch(struct vc_data *vc)
updatescrollmode(p, info, vc);
- scrollback_phys_max = 0;
+ switch (fb_scrollmode(p)) {
+ case SCROLL_WRAP_MOVE:
+ scrollback_phys_max = p->vrows - vc->vc_rows;
+ break;
+ case SCROLL_PAN_MOVE:
+ case SCROLL_PAN_REDRAW:
+ scrollback_phys_max = p->vrows - 2 * vc->vc_rows;
+ if (scrollback_phys_max < 0)
+ scrollback_phys_max = 0;
+ break;
+ default:
+ scrollback_phys_max = 0;
+ break;
+ }
+
scrollback_max = 0;
scrollback_current = 0;
diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
index a00603b4451a..969d41ecede5 100644
--- a/drivers/video/fbdev/core/fbcon.h
+++ b/drivers/video/fbdev/core/fbcon.h
@@ -29,6 +29,9 @@ struct fbcon_display {
/* Filled in by the low-level console driver */
const u_char *fontdata;
int userfont; /* != 0 if fontdata kmalloc()ed */
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
+ u_short scrollmode; /* Scroll Method, use fb_scrollmode() */
+#endif
u_short inverse; /* != 0 text black on white as default */
short yscroll; /* Hardware scrolling */
int vrows; /* number of virtual rows */
@@ -51,6 +54,8 @@ struct fbcon_display {
};
struct fbcon_ops {
+ void (*bmove)(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width);
void (*clear)(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width);
void (*putcs)(struct vc_data *vc, struct fb_info *info,
@@ -149,6 +154,73 @@ static inline int attr_col_ec(int shift, struct vc_data *vc,
#define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0)
#define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1)
+ /*
+ * Scroll Method
+ */
+
+/* There are several methods fbcon can use to move text around the screen:
+ *
+ * Operation Pan Wrap
+ *---------------------------------------------
+ * SCROLL_MOVE copyarea No No
+ * SCROLL_PAN_MOVE copyarea Yes No
+ * SCROLL_WRAP_MOVE copyarea No Yes
+ * SCROLL_REDRAW imageblit No No
+ * SCROLL_PAN_REDRAW imageblit Yes No
+ * SCROLL_WRAP_REDRAW imageblit No Yes
+ *
+ * (SCROLL_WRAP_REDRAW is not implemented yet)
+ *
+ * In general, fbcon will choose the best scrolling
+ * method based on the rule below:
+ *
+ * Pan/Wrap > accel imageblit > accel copyarea >
+ * soft imageblit > (soft copyarea)
+ *
+ * Exception to the rule: Pan + accel copyarea is
+ * preferred over Pan + accel imageblit.
+ *
+ * The above is typical for PCI/AGP cards. Unless
+ * overridden, fbcon will never use soft copyarea.
+ *
+ * If you need to override the above rule, set the
+ * appropriate flags in fb_info->flags. For example,
+ * to prefer copyarea over imageblit, set
+ * FBINFO_READS_FAST.
+ *
+ * Other notes:
+ * + use the hardware engine to move the text
+ * (hw-accelerated copyarea() and fillrect())
+ * + use hardware-supported panning on a large virtual screen
+ * + amifb can not only pan, but also wrap the display by N lines
+ * (i.e. visible line i = physical line (i+N) % yres).
+ * + read what's already rendered on the screen and
+ * write it in a different place (this is cfb_copyarea())
+ * + re-render the text to the screen
+ *
+ * Whether to use wrapping or panning can only be figured out at
+ * runtime (when we know whether our font height is a multiple
+ * of the pan/wrap step)
+ *
+ */
+
+#define SCROLL_MOVE 0x001
+#define SCROLL_PAN_MOVE 0x002
+#define SCROLL_WRAP_MOVE 0x003
+#define SCROLL_REDRAW 0x004
+#define SCROLL_PAN_REDRAW 0x005
+
+static inline u_short fb_scrollmode(struct fbcon_display *fb)
+{
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
+ return fb->scrollmode;
+#else
+ /* hardcoded to SCROLL_REDRAW if acceleration was disabled. */
+ return SCROLL_REDRAW;
+#endif
+}
+
+
#ifdef CONFIG_FB_TILEBLITTING
extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info);
#endif
diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c
index ffa78936eaab..2789ace79634 100644
--- a/drivers/video/fbdev/core/fbcon_ccw.c
+++ b/drivers/video/fbdev/core/fbcon_ccw.c
@@ -59,12 +59,31 @@ static void ccw_update_attr(u8 *dst, u8 *src, int attribute,
}
}
+
+static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fb_copyarea area;
+ u32 vyres = GETVYRES(ops->p, info);
+
+ area.sx = sy * vc->vc_font.height;
+ area.sy = vyres - ((sx + width) * vc->vc_font.width);
+ area.dx = dy * vc->vc_font.height;
+ area.dy = vyres - ((dx + width) * vc->vc_font.width);
+ area.width = height * vc->vc_font.height;
+ area.height = width * vc->vc_font.width;
+
+ info->fbops->fb_copyarea(info, &area);
+}
+
static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
+ struct fbcon_ops *ops = info->fbcon_par;
struct fb_fillrect region;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- u32 vyres = info->var.yres;
+ u32 vyres = GETVYRES(ops->p, info);
region.color = attr_bgcol_ec(bgshift,vc,info);
region.dx = sy * vc->vc_font.height;
@@ -121,7 +140,7 @@ static void ccw_putcs(struct vc_data *vc, struct fb_info *info,
u32 cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
- u32 vyres = info->var.yres;
+ u32 vyres = GETVYRES(ops->p, info);
if (!ops->fontbuffer)
return;
@@ -210,7 +229,7 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1, dx, dy;
char *src;
- u32 vyres = info->var.yres;
+ u32 vyres = GETVYRES(ops->p, info);
if (!ops->fontbuffer)
return;
@@ -368,7 +387,7 @@ static int ccw_update_start(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
u32 yoffset;
- u32 vyres = info->var.yres;
+ u32 vyres = GETVYRES(ops->p, info);
int err;
yoffset = (vyres - info->var.yres) - ops->var.xoffset;
@@ -383,6 +402,7 @@ static int ccw_update_start(struct fb_info *info)
void fbcon_rotate_ccw(struct fbcon_ops *ops)
{
+ ops->bmove = ccw_bmove;
ops->clear = ccw_clear;
ops->putcs = ccw_putcs;
ops->clear_margins = ccw_clear_margins;
diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c
index 92e5b7fb51ee..86a254c1b2b7 100644
--- a/drivers/video/fbdev/core/fbcon_cw.c
+++ b/drivers/video/fbdev/core/fbcon_cw.c
@@ -44,12 +44,31 @@ static void cw_update_attr(u8 *dst, u8 *src, int attribute,
}
}
+
+static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fb_copyarea area;
+ u32 vxres = GETVXRES(ops->p, info);
+
+ area.sx = vxres - ((sy + height) * vc->vc_font.height);
+ area.sy = sx * vc->vc_font.width;
+ area.dx = vxres - ((dy + height) * vc->vc_font.height);
+ area.dy = dx * vc->vc_font.width;
+ area.width = height * vc->vc_font.height;
+ area.height = width * vc->vc_font.width;
+
+ info->fbops->fb_copyarea(info, &area);
+}
+
static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
+ struct fbcon_ops *ops = info->fbcon_par;
struct fb_fillrect region;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- u32 vxres = info->var.xres;
+ u32 vxres = GETVXRES(ops->p, info);
region.color = attr_bgcol_ec(bgshift,vc,info);
region.dx = vxres - ((sy + height) * vc->vc_font.height);
@@ -106,7 +125,7 @@ static void cw_putcs(struct vc_data *vc, struct fb_info *info,
u32 cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
- u32 vxres = info->var.xres;
+ u32 vxres = GETVXRES(ops->p, info);
if (!ops->fontbuffer)
return;
@@ -193,7 +212,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1, dx, dy;
char *src;
- u32 vxres = info->var.xres;
+ u32 vxres = GETVXRES(ops->p, info);
if (!ops->fontbuffer)
return;
@@ -350,7 +369,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
static int cw_update_start(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
- u32 vxres = info->var.xres;
+ u32 vxres = GETVXRES(ops->p, info);
u32 xoffset;
int err;
@@ -366,6 +385,7 @@ static int cw_update_start(struct fb_info *info)
void fbcon_rotate_cw(struct fbcon_ops *ops)
{
+ ops->bmove = cw_bmove;
ops->clear = cw_clear;
ops->putcs = cw_putcs;
ops->clear_margins = cw_clear_margins;
diff --git a/drivers/video/fbdev/core/fbcon_rotate.h b/drivers/video/fbdev/core/fbcon_rotate.h
index b528b2e54283..01cbe303b8a2 100644
--- a/drivers/video/fbdev/core/fbcon_rotate.h
+++ b/drivers/video/fbdev/core/fbcon_rotate.h
@@ -11,6 +11,15 @@
#ifndef _FBCON_ROTATE_H
#define _FBCON_ROTATE_H
+#define GETVYRES(s,i) ({ \
+ (fb_scrollmode(s) == SCROLL_REDRAW || fb_scrollmode(s) == SCROLL_MOVE) ? \
+ (i)->var.yres : (i)->var.yres_virtual; })
+
+#define GETVXRES(s,i) ({ \
+ (fb_scrollmode(s) == SCROLL_REDRAW || fb_scrollmode(s) == SCROLL_MOVE || !(i)->fix.xpanstep) ? \
+ (i)->var.xres : (i)->var.xres_virtual; })
+
+
static inline int pattern_test_bit(u32 x, u32 y, u32 pitch, const char *pat)
{
u32 tmp = (y * pitch) + x, index = tmp / 8, bit = tmp % 8;
diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c
index 09619bd8e021..23bc045769d0 100644
--- a/drivers/video/fbdev/core/fbcon_ud.c
+++ b/drivers/video/fbdev/core/fbcon_ud.c
@@ -44,13 +44,33 @@ static void ud_update_attr(u8 *dst, u8 *src, int attribute,
}
}
+
+static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fb_copyarea area;
+ u32 vyres = GETVYRES(ops->p, info);
+ u32 vxres = GETVXRES(ops->p, info);
+
+ area.sy = vyres - ((sy + height) * vc->vc_font.height);
+ area.sx = vxres - ((sx + width) * vc->vc_font.width);
+ area.dy = vyres - ((dy + height) * vc->vc_font.height);
+ area.dx = vxres - ((dx + width) * vc->vc_font.width);
+ area.height = height * vc->vc_font.height;
+ area.width = width * vc->vc_font.width;
+
+ info->fbops->fb_copyarea(info, &area);
+}
+
static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
+ struct fbcon_ops *ops = info->fbcon_par;
struct fb_fillrect region;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- u32 vyres = info->var.yres;
- u32 vxres = info->var.xres;
+ u32 vyres = GETVYRES(ops->p, info);
+ u32 vxres = GETVXRES(ops->p, info);
region.color = attr_bgcol_ec(bgshift,vc,info);
region.dy = vyres - ((sy + height) * vc->vc_font.height);
@@ -142,8 +162,8 @@ static void ud_putcs(struct vc_data *vc, struct fb_info *info,
u32 mod = vc->vc_font.width % 8, cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
- u32 vyres = info->var.yres;
- u32 vxres = info->var.xres;
+ u32 vyres = GETVYRES(ops->p, info);
+ u32 vxres = GETVXRES(ops->p, info);
if (!ops->fontbuffer)
return;
@@ -239,8 +259,8 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode,
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1, dx, dy;
char *src;
- u32 vyres = info->var.yres;
- u32 vxres = info->var.xres;
+ u32 vyres = GETVYRES(ops->p, info);
+ u32 vxres = GETVXRES(ops->p, info);
if (!ops->fontbuffer)
return;
@@ -390,8 +410,8 @@ static int ud_update_start(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
int xoffset, yoffset;
- u32 vyres = info->var.yres;
- u32 vxres = info->var.xres;
+ u32 vyres = GETVYRES(ops->p, info);
+ u32 vxres = GETVXRES(ops->p, info);
int err;
xoffset = vxres - info->var.xres - ops->var.xoffset;
@@ -409,6 +429,7 @@ static int ud_update_start(struct fb_info *info)
void fbcon_rotate_ud(struct fbcon_ops *ops)
{
+ ops->bmove = ud_bmove;
ops->clear = ud_clear;
ops->putcs = ud_putcs;
ops->clear_margins = ud_clear_margins;
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 0fa7ede94fa6..13083ad8d751 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1160,6 +1160,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
ret = fbcon_set_con2fb_map_ioctl(argp);
break;
case FBIOBLANK:
+ if (arg > FB_BLANK_POWERDOWN)
+ return -EINVAL;
console_lock();
lock_fb_info(info);
ret = fb_blank(info, arg);
diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c
index 72af95053bcb..2768eff247ba 100644
--- a/drivers/video/fbdev/core/tileblit.c
+++ b/drivers/video/fbdev/core/tileblit.c
@@ -16,6 +16,21 @@
#include <asm/types.h>
#include "fbcon.h"
+static void tile_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct fb_tilearea area;
+
+ area.sx = sx;
+ area.sy = sy;
+ area.dx = dx;
+ area.dy = dy;
+ area.height = height;
+ area.width = width;
+
+ info->tileops->fb_tilecopy(info, &area);
+}
+
static void tile_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
@@ -118,6 +133,7 @@ void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info)
struct fb_tilemap map;
struct fbcon_ops *ops = info->fbcon_par;
+ ops->bmove = tile_bmove;
ops->clear = tile_clear;
ops->putcs = tile_putcs;
ops->clear_margins = tile_clear_margins;
diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c
index 0fe922f726e9..bcacfb6934fa 100644
--- a/drivers/video/fbdev/skeletonfb.c
+++ b/drivers/video/fbdev/skeletonfb.c
@@ -505,15 +505,15 @@ void xxxfb_fillrect(struct fb_info *p, const struct fb_fillrect *region)
}
/**
- * xxxfb_copyarea - OBSOLETE function.
+ * xxxfb_copyarea - REQUIRED function. Can use generic routines if
+ * non acclerated hardware and packed pixel based.
* Copies one area of the screen to another area.
- * Will be deleted in a future version
*
* @info: frame buffer structure that represents a single frame buffer
* @area: Structure providing the data to copy the framebuffer contents
* from one region to another.
*
- * This drawing operation copied a rectangular area from one area of the
+ * This drawing operation copies a rectangular area from one area of the
* screen to another area.
*/
void xxxfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
@@ -645,9 +645,9 @@ static const struct fb_ops xxxfb_ops = {
.fb_setcolreg = xxxfb_setcolreg,
.fb_blank = xxxfb_blank,
.fb_pan_display = xxxfb_pan_display,
- .fb_fillrect = xxxfb_fillrect, /* Needed !!! */
- .fb_copyarea = xxxfb_copyarea, /* Obsolete */
- .fb_imageblit = xxxfb_imageblit, /* Needed !!! */
+ .fb_fillrect = xxxfb_fillrect, /* Needed !!! */
+ .fb_copyarea = xxxfb_copyarea, /* Needed !!! */
+ .fb_imageblit = xxxfb_imageblit, /* Needed !!! */
.fb_cursor = xxxfb_cursor, /* Optional !!! */
.fb_sync = xxxfb_sync,
.fb_ioctl = xxxfb_ioctl,
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 2c890f4f2cbc..72d4e3f193af 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -264,7 +264,7 @@ struct xen_device_domain_owner {
};
static DEFINE_SPINLOCK(dev_domain_list_spinlock);
-static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list);
+static LIST_HEAD(dev_domain_list);
static struct xen_device_domain_owner *find_device(struct pci_dev *dev)
{
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index 6aab046c98e2..79df61fe0e59 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -96,12 +96,8 @@ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, kuid_t uid, int any)
dentry, dentry, from_kuid(&init_user_ns, uid),
any);
ret = NULL;
-
- if (d_inode(dentry))
- ret = v9fs_fid_find_inode(d_inode(dentry), uid);
-
/* we'll recheck under lock if there's anything to look in */
- if (!ret && dentry->d_fsdata) {
+ if (dentry->d_fsdata) {
struct hlist_head *h = (struct hlist_head *)&dentry->d_fsdata;
spin_lock(&dentry->d_lock);
@@ -113,6 +109,9 @@ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, kuid_t uid, int any)
}
}
spin_unlock(&dentry->d_lock);
+ } else {
+ if (dentry->d_inode)
+ ret = v9fs_fid_find_inode(dentry->d_inode, uid);
}
return ret;
diff --git a/fs/Kconfig b/fs/Kconfig
index 7a2b11c0b803..6c7dc1387beb 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -369,8 +369,8 @@ source "fs/ksmbd/Kconfig"
config SMBFS_COMMON
tristate
- default y if CIFS=y
- default m if CIFS=m
+ default y if CIFS=y || SMB_SERVER=y
+ default m if CIFS=m || SMB_SERVER=m
source "fs/coda/Kconfig"
source "fs/afs/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index dab324aea08f..208a74e0b00e 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -96,7 +96,7 @@ obj-$(CONFIG_EXPORTFS) += exportfs/
obj-$(CONFIG_NFSD) += nfsd/
obj-$(CONFIG_LOCKD) += lockd/
obj-$(CONFIG_NLS) += nls/
-obj-$(CONFIG_UNICODE) += unicode/
+obj-y += unicode/
obj-$(CONFIG_SYSV_FS) += sysv/
obj-$(CONFIG_SMBFS_COMMON) += smbfs_common/
obj-$(CONFIG_CIFS) += cifs/
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 605017eb9349..d61543fbd652 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1117,7 +1117,7 @@ out_free_interp:
* without MAP_FIXED nor MAP_FIXED_NOREPLACE).
*/
alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
- if (alignment > ELF_MIN_ALIGN) {
+ if (interpreter || alignment > ELF_MIN_ALIGN) {
load_bias = ELF_ET_DYN_BASE;
if (current->flags & PF_RANDOMIZE)
load_bias += arch_mmap_rnd();
@@ -1135,14 +1135,25 @@ out_free_interp:
* is then page aligned.
*/
load_bias = ELF_PAGESTART(load_bias - vaddr);
- }
- /*
- * Calculate the entire size of the ELF mapping (total_size).
- * (Note that load_addr_set is set to true later once the
- * initial mapping is performed.)
- */
- if (!load_addr_set) {
+ /*
+ * Calculate the entire size of the ELF mapping
+ * (total_size), used for the initial mapping,
+ * due to load_addr_set which is set to true later
+ * once the initial mapping is performed.
+ *
+ * Note that this is only sensible when the LOAD
+ * segments are contiguous (or overlapping). If
+ * used for LOADs that are far apart, this would
+ * cause the holes between LOADs to be mapped,
+ * running the risk of having the mapping fail,
+ * as it would be larger than the ELF file itself.
+ *
+ * As a result, only ET_DYN does this, since
+ * some ET_EXEC (e.g. ia64) may have large virtual
+ * memory holes between LOADs.
+ *
+ */
total_size = total_mapping_size(elf_phdata,
elf_ex->e_phnum);
if (!total_size) {
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index c07f35719ee3..e1eae7ea823a 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -817,20 +817,16 @@ static struct file_system_type bm_fs_type = {
};
MODULE_ALIAS_FS("binfmt_misc");
-static struct ctl_table_header *binfmt_misc_header;
-
static int __init init_misc_binfmt(void)
{
int err = register_filesystem(&bm_fs_type);
if (!err)
insert_binfmt(&misc_format);
- binfmt_misc_header = register_sysctl_mount_point("fs/binfmt_misc");
- return 0;
+ return err;
}
static void __exit exit_misc_binfmt(void)
{
- unregister_sysctl_table(binfmt_misc_header);
unregister_binfmt(&misc_format);
unregister_filesystem(&bm_fs_type);
}
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 1db24e6d6d90..8202ad6aa131 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -124,7 +124,16 @@ void btrfs_put_block_group(struct btrfs_block_group *cache)
{
if (refcount_dec_and_test(&cache->refs)) {
WARN_ON(cache->pinned > 0);
- WARN_ON(cache->reserved > 0);
+ /*
+ * If there was a failure to cleanup a log tree, very likely due
+ * to an IO failure on a writeback attempt of one or more of its
+ * extent buffers, we could not do proper (and cheap) unaccounting
+ * of their reserved space, so don't warn on reserved > 0 in that
+ * case.
+ */
+ if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) ||
+ !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info))
+ WARN_ON(cache->reserved > 0);
/*
* A block_group shouldn't be on the discard_list anymore.
@@ -2544,6 +2553,19 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
int ret;
bool dirty_bg_running;
+ /*
+ * This can only happen when we are doing read-only scrub on read-only
+ * mount.
+ * In that case we should not start a new transaction on read-only fs.
+ * Thus here we skip all chunk allocations.
+ */
+ if (sb_rdonly(fs_info->sb)) {
+ mutex_lock(&fs_info->ro_block_group_mutex);
+ ret = inc_block_group_ro(cache, 0);
+ mutex_unlock(&fs_info->ro_block_group_mutex);
+ return ret;
+ }
+
do {
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
@@ -3974,9 +3996,22 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
* important and indicates a real bug if this happens.
*/
if (WARN_ON(space_info->bytes_pinned > 0 ||
- space_info->bytes_reserved > 0 ||
space_info->bytes_may_use > 0))
btrfs_dump_space_info(info, space_info, 0, 0);
+
+ /*
+ * If there was a failure to cleanup a log tree, very likely due
+ * to an IO failure on a writeback attempt of one or more of its
+ * extent buffers, we could not do proper (and cheap) unaccounting
+ * of their reserved space, so don't warn on bytes_reserved > 0 in
+ * that case.
+ */
+ if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) ||
+ !BTRFS_FS_LOG_CLEANUP_ERROR(info)) {
+ if (WARN_ON(space_info->bytes_reserved > 0))
+ btrfs_dump_space_info(info, space_info, 0, 0);
+ }
+
WARN_ON(space_info->reclaim_size > 0);
list_del(&space_info->list);
btrfs_sysfs_remove_space_info(space_info);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index b4a9b1c58d22..947f04789389 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -145,6 +145,9 @@ enum {
BTRFS_FS_STATE_DUMMY_FS_INFO,
BTRFS_FS_STATE_NO_CSUMS,
+
+ /* Indicates there was an error cleaning up a log tree. */
+ BTRFS_FS_STATE_LOG_CLEANUP_ERROR,
};
#define BTRFS_BACKREF_REV_MAX 256
@@ -3288,7 +3291,7 @@ void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
int __init btrfs_auto_defrag_init(void);
void __cold btrfs_auto_defrag_exit(void);
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
- struct btrfs_inode *inode);
+ struct btrfs_inode *inode, u32 extent_thresh);
int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info);
void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info);
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
@@ -3593,6 +3596,9 @@ do { \
#define BTRFS_FS_ERROR(fs_info) (unlikely(test_bit(BTRFS_FS_STATE_ERROR, \
&(fs_info)->fs_state)))
+#define BTRFS_FS_LOG_CLEANUP_ERROR(fs_info) \
+ (unlikely(test_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR, \
+ &(fs_info)->fs_state)))
__printf(5, 6)
__cold
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 5a36add21305..c28ceddefae4 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -261,6 +261,7 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
em->mod_start = merge->mod_start;
em->generation = max(em->generation, merge->generation);
+ set_bit(EXTENT_FLAG_MERGED, &em->flags);
rb_erase_cached(&merge->rb_node, &tree->map);
RB_CLEAR_NODE(&merge->rb_node);
@@ -278,6 +279,7 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
RB_CLEAR_NODE(&merge->rb_node);
em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
em->generation = max(em->generation, merge->generation);
+ set_bit(EXTENT_FLAG_MERGED, &em->flags);
free_extent_map(merge);
}
}
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index 8e217337dff9..d2fa32ffe304 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -25,6 +25,8 @@ enum {
EXTENT_FLAG_FILLING,
/* filesystem extent mapping type */
EXTENT_FLAG_FS_MAPPING,
+ /* This em is merged from two or more physically adjacent ems */
+ EXTENT_FLAG_MERGED,
};
struct extent_map {
@@ -40,6 +42,12 @@ struct extent_map {
u64 ram_bytes;
u64 block_start;
u64 block_len;
+
+ /*
+ * Generation of the extent map, for merged em it's the highest
+ * generation of all merged ems.
+ * For non-merged extents, it's from btrfs_file_extent_item::generation.
+ */
u64 generation;
unsigned long flags;
/* Used for chunk mappings, flag EXTENT_FLAG_FS_MAPPING must be set */
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 11204dbbe053..a0179cc62913 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -50,11 +50,14 @@ struct inode_defrag {
/* root objectid */
u64 root;
- /* last offset we were able to defrag */
- u64 last_offset;
-
- /* if we've wrapped around back to zero once already */
- int cycled;
+ /*
+ * The extent size threshold for autodefrag.
+ *
+ * This value is different for compressed/non-compressed extents,
+ * thus needs to be passed from higher layer.
+ * (aka, inode_should_defrag())
+ */
+ u32 extent_thresh;
};
static int __compare_inode_defrag(struct inode_defrag *defrag1,
@@ -107,8 +110,8 @@ static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
*/
if (defrag->transid < entry->transid)
entry->transid = defrag->transid;
- if (defrag->last_offset > entry->last_offset)
- entry->last_offset = defrag->last_offset;
+ entry->extent_thresh = min(defrag->extent_thresh,
+ entry->extent_thresh);
return -EEXIST;
}
}
@@ -134,7 +137,7 @@ static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
* enabled
*/
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
- struct btrfs_inode *inode)
+ struct btrfs_inode *inode, u32 extent_thresh)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -160,6 +163,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
defrag->ino = btrfs_ino(inode);
defrag->transid = transid;
defrag->root = root->root_key.objectid;
+ defrag->extent_thresh = extent_thresh;
spin_lock(&fs_info->defrag_inodes_lock);
if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
@@ -179,34 +183,6 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
}
/*
- * Requeue the defrag object. If there is a defrag object that points to
- * the same inode in the tree, we will merge them together (by
- * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
- */
-static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
- struct inode_defrag *defrag)
-{
- struct btrfs_fs_info *fs_info = inode->root->fs_info;
- int ret;
-
- if (!__need_auto_defrag(fs_info))
- goto out;
-
- /*
- * Here we don't check the IN_DEFRAG flag, because we need merge
- * them together.
- */
- spin_lock(&fs_info->defrag_inodes_lock);
- ret = __btrfs_add_inode_defrag(inode, defrag);
- spin_unlock(&fs_info->defrag_inodes_lock);
- if (ret)
- goto out;
- return;
-out:
- kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
-}
-
-/*
* pick the defragable inode that we want, if it doesn't exist, we will get
* the next one.
*/
@@ -278,8 +254,14 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
struct btrfs_root *inode_root;
struct inode *inode;
struct btrfs_ioctl_defrag_range_args range;
- int num_defrag;
- int ret;
+ int ret = 0;
+ u64 cur = 0;
+
+again:
+ if (test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state))
+ goto cleanup;
+ if (!__need_auto_defrag(fs_info))
+ goto cleanup;
/* get the inode */
inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
@@ -295,39 +277,30 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
goto cleanup;
}
+ if (cur >= i_size_read(inode)) {
+ iput(inode);
+ goto cleanup;
+ }
+
/* do a chunk of defrag */
clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
memset(&range, 0, sizeof(range));
range.len = (u64)-1;
- range.start = defrag->last_offset;
+ range.start = cur;
+ range.extent_thresh = defrag->extent_thresh;
sb_start_write(fs_info->sb);
- num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
+ ret = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
BTRFS_DEFRAG_BATCH);
sb_end_write(fs_info->sb);
- /*
- * if we filled the whole defrag batch, there
- * must be more work to do. Queue this defrag
- * again
- */
- if (num_defrag == BTRFS_DEFRAG_BATCH) {
- defrag->last_offset = range.start;
- btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
- } else if (defrag->last_offset && !defrag->cycled) {
- /*
- * we didn't fill our defrag batch, but
- * we didn't start at zero. Make sure we loop
- * around to the start of the file.
- */
- defrag->last_offset = 0;
- defrag->cycled = 1;
- btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
- } else {
- kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
- }
-
iput(inode);
- return 0;
+
+ if (ret < 0)
+ goto cleanup;
+
+ cur = max(cur + fs_info->sectorsize, range.start);
+ goto again;
+
cleanup:
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
return ret;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 3b2403b6127f..76e530f76e3c 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -560,12 +560,12 @@ static inline int inode_need_compress(struct btrfs_inode *inode, u64 start,
}
static inline void inode_should_defrag(struct btrfs_inode *inode,
- u64 start, u64 end, u64 num_bytes, u64 small_write)
+ u64 start, u64 end, u64 num_bytes, u32 small_write)
{
/* If this is a small write inside eof, kick off a defrag */
if (num_bytes < small_write &&
(start > 0 || end + 1 < inode->disk_i_size))
- btrfs_add_inode_defrag(NULL, inode);
+ btrfs_add_inode_defrag(NULL, inode, small_write);
}
/*
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index d8af6620a941..8d47ec5fc4f4 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -805,10 +805,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
goto fail;
}
- spin_lock(&fs_info->trans_lock);
- list_add(&pending_snapshot->list,
- &trans->transaction->pending_snapshots);
- spin_unlock(&fs_info->trans_lock);
+ trans->pending_snapshot = pending_snapshot;
ret = btrfs_commit_transaction(trans);
if (ret)
@@ -1015,8 +1012,155 @@ out:
return ret;
}
+/*
+ * Defrag specific helper to get an extent map.
+ *
+ * Differences between this and btrfs_get_extent() are:
+ *
+ * - No extent_map will be added to inode->extent_tree
+ * To reduce memory usage in the long run.
+ *
+ * - Extra optimization to skip file extents older than @newer_than
+ * By using btrfs_search_forward() we can skip entire file ranges that
+ * have extents created in past transactions, because btrfs_search_forward()
+ * will not visit leaves and nodes with a generation smaller than given
+ * minimal generation threshold (@newer_than).
+ *
+ * Return valid em if we find a file extent matching the requirement.
+ * Return NULL if we can not find a file extent matching the requirement.
+ *
+ * Return ERR_PTR() for error.
+ */
+static struct extent_map *defrag_get_extent(struct btrfs_inode *inode,
+ u64 start, u64 newer_than)
+{
+ struct btrfs_root *root = inode->root;
+ struct btrfs_file_extent_item *fi;
+ struct btrfs_path path = { 0 };
+ struct extent_map *em;
+ struct btrfs_key key;
+ u64 ino = btrfs_ino(inode);
+ int ret;
+
+ em = alloc_extent_map();
+ if (!em) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ key.objectid = ino;
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = start;
+
+ if (newer_than) {
+ ret = btrfs_search_forward(root, &key, &path, newer_than);
+ if (ret < 0)
+ goto err;
+ /* Can't find anything newer */
+ if (ret > 0)
+ goto not_found;
+ } else {
+ ret = btrfs_search_slot(NULL, root, &key, &path, 0, 0);
+ if (ret < 0)
+ goto err;
+ }
+ if (path.slots[0] >= btrfs_header_nritems(path.nodes[0])) {
+ /*
+ * If btrfs_search_slot() makes path to point beyond nritems,
+ * we should not have an empty leaf, as this inode must at
+ * least have its INODE_ITEM.
+ */
+ ASSERT(btrfs_header_nritems(path.nodes[0]));
+ path.slots[0] = btrfs_header_nritems(path.nodes[0]) - 1;
+ }
+ btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
+ /* Perfect match, no need to go one slot back */
+ if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY &&
+ key.offset == start)
+ goto iterate;
+
+ /* We didn't find a perfect match, needs to go one slot back */
+ if (path.slots[0] > 0) {
+ btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
+ if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
+ path.slots[0]--;
+ }
+
+iterate:
+ /* Iterate through the path to find a file extent covering @start */
+ while (true) {
+ u64 extent_end;
+
+ if (path.slots[0] >= btrfs_header_nritems(path.nodes[0]))
+ goto next;
+
+ btrfs_item_key_to_cpu(path.nodes[0], &key, path.slots[0]);
+
+ /*
+ * We may go one slot back to INODE_REF/XATTR item, then
+ * need to go forward until we reach an EXTENT_DATA.
+ * But we should still has the correct ino as key.objectid.
+ */
+ if (WARN_ON(key.objectid < ino) || key.type < BTRFS_EXTENT_DATA_KEY)
+ goto next;
+
+ /* It's beyond our target range, definitely not extent found */
+ if (key.objectid > ino || key.type > BTRFS_EXTENT_DATA_KEY)
+ goto not_found;
+
+ /*
+ * | |<- File extent ->|
+ * \- start
+ *
+ * This means there is a hole between start and key.offset.
+ */
+ if (key.offset > start) {
+ em->start = start;
+ em->orig_start = start;
+ em->block_start = EXTENT_MAP_HOLE;
+ em->len = key.offset - start;
+ break;
+ }
+
+ fi = btrfs_item_ptr(path.nodes[0], path.slots[0],
+ struct btrfs_file_extent_item);
+ extent_end = btrfs_file_extent_end(&path);
+
+ /*
+ * |<- file extent ->| |
+ * \- start
+ *
+ * We haven't reached start, search next slot.
+ */
+ if (extent_end <= start)
+ goto next;
+
+ /* Now this extent covers @start, convert it to em */
+ btrfs_extent_item_to_extent_map(inode, &path, fi, false, em);
+ break;
+next:
+ ret = btrfs_next_item(root, &path);
+ if (ret < 0)
+ goto err;
+ if (ret > 0)
+ goto not_found;
+ }
+ btrfs_release_path(&path);
+ return em;
+
+not_found:
+ btrfs_release_path(&path);
+ free_extent_map(em);
+ return NULL;
+
+err:
+ btrfs_release_path(&path);
+ free_extent_map(em);
+ return ERR_PTR(ret);
+}
+
static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
- bool locked)
+ u64 newer_than, bool locked)
{
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
@@ -1031,6 +1175,20 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
em = lookup_extent_mapping(em_tree, start, sectorsize);
read_unlock(&em_tree->lock);
+ /*
+ * We can get a merged extent, in that case, we need to re-search
+ * tree to get the original em for defrag.
+ *
+ * If @newer_than is 0 or em::generation < newer_than, we can trust
+ * this em, as either we don't care about the generation, or the
+ * merged extent map will be rejected anyway.
+ */
+ if (em && test_bit(EXTENT_FLAG_MERGED, &em->flags) &&
+ newer_than && em->generation >= newer_than) {
+ free_extent_map(em);
+ em = NULL;
+ }
+
if (!em) {
struct extent_state *cached = NULL;
u64 end = start + sectorsize - 1;
@@ -1038,7 +1196,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
/* get the big lock and read metadata off disk */
if (!locked)
lock_extent_bits(io_tree, start, end, &cached);
- em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, sectorsize);
+ em = defrag_get_extent(BTRFS_I(inode), start, newer_than);
if (!locked)
unlock_extent_cached(io_tree, start, end, &cached);
@@ -1049,23 +1207,42 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start,
return em;
}
+static u32 get_extent_max_capacity(const struct extent_map *em)
+{
+ if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
+ return BTRFS_MAX_COMPRESSED;
+ return BTRFS_MAX_EXTENT_SIZE;
+}
+
static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
bool locked)
{
struct extent_map *next;
- bool ret = true;
+ bool ret = false;
/* this is the last extent */
if (em->start + em->len >= i_size_read(inode))
return false;
- next = defrag_lookup_extent(inode, em->start + em->len, locked);
+ /*
+ * We want to check if the next extent can be merged with the current
+ * one, which can be an extent created in a past generation, so we pass
+ * a minimum generation of 0 to defrag_lookup_extent().
+ */
+ next = defrag_lookup_extent(inode, em->start + em->len, 0, locked);
+ /* No more em or hole */
if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
- ret = false;
- else if ((em->block_start + em->block_len == next->block_start) &&
- (em->block_len > SZ_128K && next->block_len > SZ_128K))
- ret = false;
-
+ goto out;
+ if (test_bit(EXTENT_FLAG_PREALLOC, &next->flags))
+ goto out;
+ /*
+ * If the next extent is at its max capacity, defragging current extent
+ * makes no sense, as the total number of extents won't change.
+ */
+ if (next->len >= get_extent_max_capacity(em))
+ goto out;
+ ret = true;
+out:
free_extent_map(next);
return ret;
}
@@ -1189,8 +1366,10 @@ struct defrag_target_range {
static int defrag_collect_targets(struct btrfs_inode *inode,
u64 start, u64 len, u32 extent_thresh,
u64 newer_than, bool do_compress,
- bool locked, struct list_head *target_list)
+ bool locked, struct list_head *target_list,
+ u64 *last_scanned_ret)
{
+ bool last_is_target = false;
u64 cur = start;
int ret = 0;
@@ -1200,7 +1379,9 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
bool next_mergeable = true;
u64 range_len;
- em = defrag_lookup_extent(&inode->vfs_inode, cur, locked);
+ last_is_target = false;
+ em = defrag_lookup_extent(&inode->vfs_inode, cur,
+ newer_than, locked);
if (!em)
break;
@@ -1213,6 +1394,10 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
if (em->generation < newer_than)
goto next;
+ /* This em is under writeback, no need to defrag */
+ if (em->generation == (u64)-1)
+ goto next;
+
/*
* Our start offset might be in the middle of an existing extent
* map, so take that into account.
@@ -1253,6 +1438,13 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
if (range_len >= extent_thresh)
goto next;
+ /*
+ * Skip extents already at its max capacity, this is mostly for
+ * compressed extents, which max cap is only 128K.
+ */
+ if (em->len >= get_extent_max_capacity(em))
+ goto next;
+
next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
locked);
if (!next_mergeable) {
@@ -1271,6 +1463,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
}
add:
+ last_is_target = true;
range_len = min(extent_map_end(em), start + len) - cur;
/*
* This one is a good target, check if it can be merged into
@@ -1314,6 +1507,17 @@ next:
kfree(entry);
}
}
+ if (!ret && last_scanned_ret) {
+ /*
+ * If the last extent is not a target, the caller can skip to
+ * the end of that extent.
+ * Otherwise, we can only go the end of the specified range.
+ */
+ if (!last_is_target)
+ *last_scanned_ret = max(cur, *last_scanned_ret);
+ else
+ *last_scanned_ret = max(start + len, *last_scanned_ret);
+ }
return ret;
}
@@ -1372,7 +1576,8 @@ static int defrag_one_locked_target(struct btrfs_inode *inode,
}
static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
- u32 extent_thresh, u64 newer_than, bool do_compress)
+ u32 extent_thresh, u64 newer_than, bool do_compress,
+ u64 *last_scanned_ret)
{
struct extent_state *cached_state = NULL;
struct defrag_target_range *entry;
@@ -1418,7 +1623,7 @@ static int defrag_one_range(struct btrfs_inode *inode, u64 start, u32 len,
*/
ret = defrag_collect_targets(inode, start, len, extent_thresh,
newer_than, do_compress, true,
- &target_list);
+ &target_list, last_scanned_ret);
if (ret < 0)
goto unlock_extent;
@@ -1453,7 +1658,8 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
u64 start, u32 len, u32 extent_thresh,
u64 newer_than, bool do_compress,
unsigned long *sectors_defragged,
- unsigned long max_sectors)
+ unsigned long max_sectors,
+ u64 *last_scanned_ret)
{
const u32 sectorsize = inode->root->fs_info->sectorsize;
struct defrag_target_range *entry;
@@ -1464,7 +1670,7 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
BUILD_BUG_ON(!IS_ALIGNED(CLUSTER_SIZE, PAGE_SIZE));
ret = defrag_collect_targets(inode, start, len, extent_thresh,
newer_than, do_compress, false,
- &target_list);
+ &target_list, NULL);
if (ret < 0)
goto out;
@@ -1481,6 +1687,15 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
range_len = min_t(u32, range_len,
(max_sectors - *sectors_defragged) * sectorsize);
+ /*
+ * If defrag_one_range() has updated last_scanned_ret,
+ * our range may already be invalid (e.g. hole punched).
+ * Skip if our range is before last_scanned_ret, as there is
+ * no need to defrag the range anymore.
+ */
+ if (entry->start + range_len <= *last_scanned_ret)
+ continue;
+
if (ra)
page_cache_sync_readahead(inode->vfs_inode.i_mapping,
ra, NULL, entry->start >> PAGE_SHIFT,
@@ -1493,7 +1708,8 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
* accounting.
*/
ret = defrag_one_range(inode, entry->start, range_len,
- extent_thresh, newer_than, do_compress);
+ extent_thresh, newer_than, do_compress,
+ last_scanned_ret);
if (ret < 0)
break;
*sectors_defragged += range_len >>
@@ -1504,6 +1720,8 @@ out:
list_del_init(&entry->list);
kfree(entry);
}
+ if (ret >= 0)
+ *last_scanned_ret = max(*last_scanned_ret, start + len);
return ret;
}
@@ -1589,6 +1807,7 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
while (cur < last_byte) {
const unsigned long prev_sectors_defragged = sectors_defragged;
+ u64 last_scanned = cur;
u64 cluster_end;
/* The cluster size 256K should always be page aligned */
@@ -1618,8 +1837,8 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
BTRFS_I(inode)->defrag_compress = compress_type;
ret = defrag_one_cluster(BTRFS_I(inode), ra, cur,
cluster_end + 1 - cur, extent_thresh,
- newer_than, do_compress,
- &sectors_defragged, max_to_defrag);
+ newer_than, do_compress, &sectors_defragged,
+ max_to_defrag, &last_scanned);
if (sectors_defragged > prev_sectors_defragged)
balance_dirty_pages_ratelimited(inode->i_mapping);
@@ -1627,11 +1846,12 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
btrfs_inode_unlock(inode, 0);
if (ret < 0)
break;
- cur = cluster_end + 1;
+ cur = max(cluster_end + 1, last_scanned);
if (ret > 0) {
ret = 0;
break;
}
+ cond_resched();
}
if (ra_allocated)
@@ -3354,7 +3574,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
struct block_device *bdev = NULL;
fmode_t mode;
int ret;
- bool cancel;
+ bool cancel = false;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 0fb90cbe7669..e6e28a9c7987 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -380,6 +380,17 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
kunmap(cur_page);
cur_in += LZO_LEN;
+ if (seg_len > lzo1x_worst_compress(PAGE_SIZE)) {
+ /*
+ * seg_len shouldn't be larger than we have allocated
+ * for workspace->cbuf
+ */
+ btrfs_err(fs_info, "unexpectedly large lzo segment len %u",
+ seg_len);
+ ret = -EIO;
+ goto out;
+ }
+
/* Copy the compressed segment payload into workspace */
copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 8928275823a1..f12dc687350c 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1185,9 +1185,24 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
struct btrfs_trans_handle *trans = NULL;
int ret = 0;
+ /*
+ * We need to have subvol_sem write locked, to prevent races between
+ * concurrent tasks trying to disable quotas, because we will unlock
+ * and relock qgroup_ioctl_lock across BTRFS_FS_QUOTA_ENABLED changes.
+ */
+ lockdep_assert_held_write(&fs_info->subvol_sem);
+
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (!fs_info->quota_root)
goto out;
+
+ /*
+ * Request qgroup rescan worker to complete and wait for it. This wait
+ * must be done before transaction start for quota disable since it may
+ * deadlock with transaction by the qgroup rescan worker.
+ */
+ clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+ btrfs_qgroup_wait_for_completion(fs_info, false);
mutex_unlock(&fs_info->qgroup_ioctl_lock);
/*
@@ -1205,14 +1220,13 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
+ set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
goto out;
}
if (!fs_info->quota_root)
goto out;
- clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
- btrfs_qgroup_wait_for_completion(fs_info, false);
spin_lock(&fs_info->qgroup_lock);
quota_root = fs_info->quota_root;
fs_info->quota_root = NULL;
@@ -3383,6 +3397,9 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
btrfs_warn(fs_info,
"qgroup rescan init failed, qgroup is not enabled");
ret = -EINVAL;
+ } else if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
+ /* Quota disable is in progress */
+ ret = -EBUSY;
}
if (ret) {
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index d8ccb62aa7d2..201eb2628aea 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4999,6 +4999,10 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
lock_page(page);
if (!PageUptodate(page)) {
unlock_page(page);
+ btrfs_err(fs_info,
+ "send: IO error at offset %llu for inode %llu root %llu",
+ page_offset(page), sctx->cur_ino,
+ sctx->send_root->root_key.objectid);
put_page(page);
ret = -EIO;
break;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 03de89b45f27..c3cfdfd8de9b 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1981,16 +1981,24 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
{
/*
- * We use writeback_inodes_sb here because if we used
+ * We use try_to_writeback_inodes_sb() here because if we used
* btrfs_start_delalloc_roots we would deadlock with fs freeze.
* Currently are holding the fs freeze lock, if we do an async flush
* we'll do btrfs_join_transaction() and deadlock because we need to
* wait for the fs freeze lock. Using the direct flushing we benefit
* from already being in a transaction and our join_transaction doesn't
* have to re-take the fs freeze lock.
+ *
+ * Note that try_to_writeback_inodes_sb() will only trigger writeback
+ * if it can read lock sb->s_umount. It will always be able to lock it,
+ * except when the filesystem is being unmounted or being frozen, but in
+ * those cases sync_filesystem() is called, which results in calling
+ * writeback_inodes_sb() while holding a write lock on sb->s_umount.
+ * Note that we don't call writeback_inodes_sb() directly, because it
+ * will emit a warning if sb->s_umount is not locked.
*/
if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
- writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
+ try_to_writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
return 0;
}
@@ -2000,6 +2008,27 @@ static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
}
+/*
+ * Add a pending snapshot associated with the given transaction handle to the
+ * respective handle. This must be called after the transaction commit started
+ * and while holding fs_info->trans_lock.
+ * This serves to guarantee a caller of btrfs_commit_transaction() that it can
+ * safely free the pending snapshot pointer in case btrfs_commit_transaction()
+ * returns an error.
+ */
+static void add_pending_snapshot(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_transaction *cur_trans = trans->transaction;
+
+ if (!trans->pending_snapshot)
+ return;
+
+ lockdep_assert_held(&trans->fs_info->trans_lock);
+ ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_START);
+
+ list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots);
+}
+
int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -2073,6 +2102,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
+ add_pending_snapshot(trans);
+
spin_unlock(&fs_info->trans_lock);
refcount_inc(&cur_trans->use_count);
@@ -2163,6 +2194,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
* COMMIT_DOING so make sure to wait for num_writers to == 1 again.
*/
spin_lock(&fs_info->trans_lock);
+ add_pending_snapshot(trans);
cur_trans->state = TRANS_STATE_COMMIT_DOING;
spin_unlock(&fs_info->trans_lock);
wait_event(cur_trans->writer_wait,
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 1852ed9de7fd..9402d8d94484 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -123,6 +123,8 @@ struct btrfs_trans_handle {
struct btrfs_transaction *transaction;
struct btrfs_block_rsv *block_rsv;
struct btrfs_block_rsv *orig_rsv;
+ /* Set by a task that wants to create a snapshot. */
+ struct btrfs_pending_snapshot *pending_snapshot;
refcount_t use_count;
unsigned int type;
/*
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 72e1c942197d..9fd145f1c4bc 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -965,6 +965,7 @@ static int check_dev_item(struct extent_buffer *leaf,
struct btrfs_key *key, int slot)
{
struct btrfs_dev_item *ditem;
+ const u32 item_size = btrfs_item_size(leaf, slot);
if (unlikely(key->objectid != BTRFS_DEV_ITEMS_OBJECTID)) {
dev_item_err(leaf, slot,
@@ -972,6 +973,13 @@ static int check_dev_item(struct extent_buffer *leaf,
key->objectid, BTRFS_DEV_ITEMS_OBJECTID);
return -EUCLEAN;
}
+
+ if (unlikely(item_size != sizeof(*ditem))) {
+ dev_item_err(leaf, slot, "invalid item size: has %u expect %zu",
+ item_size, sizeof(*ditem));
+ return -EUCLEAN;
+ }
+
ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item);
if (unlikely(btrfs_device_id(leaf, ditem) != key->offset)) {
dev_item_err(leaf, slot,
@@ -1007,6 +1015,7 @@ static int check_inode_item(struct extent_buffer *leaf,
struct btrfs_inode_item *iitem;
u64 super_gen = btrfs_super_generation(fs_info->super_copy);
u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777);
+ const u32 item_size = btrfs_item_size(leaf, slot);
u32 mode;
int ret;
u32 flags;
@@ -1016,6 +1025,12 @@ static int check_inode_item(struct extent_buffer *leaf,
if (unlikely(ret < 0))
return ret;
+ if (unlikely(item_size != sizeof(*iitem))) {
+ generic_err(leaf, slot, "invalid item size: has %u expect %zu",
+ item_size, sizeof(*iitem));
+ return -EUCLEAN;
+ }
+
iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item);
/* Here we use super block generation + 1 to handle log tree */
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index c1ddbe800897..3ee014c06b82 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3414,6 +3414,29 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
if (log->node) {
ret = walk_log_tree(trans, log, &wc);
if (ret) {
+ /*
+ * We weren't able to traverse the entire log tree, the
+ * typical scenario is getting an -EIO when reading an
+ * extent buffer of the tree, due to a previous writeback
+ * failure of it.
+ */
+ set_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR,
+ &log->fs_info->fs_state);
+
+ /*
+ * Some extent buffers of the log tree may still be dirty
+ * and not yet written back to storage, because we may
+ * have updates to a log tree without syncing a log tree,
+ * such as during rename and link operations. So flush
+ * them out and wait for their writeback to complete, so
+ * that we properly cleanup their state and pages.
+ */
+ btrfs_write_marked_extents(log->fs_info,
+ &log->dirty_log_pages,
+ EXTENT_DIRTY | EXTENT_NEW);
+ btrfs_wait_tree_log_extents(log,
+ EXTENT_DIRTY | EXTENT_NEW);
+
if (trans)
btrfs_abort_transaction(trans, ret);
else
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index 51c968cd00a6..ae93cee9d25d 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -254,7 +254,7 @@ static bool cachefiles_shorten_object(struct cachefiles_object *object,
ret = cachefiles_inject_write_error();
if (ret == 0)
ret = vfs_fallocate(file, FALLOC_FL_ZERO_RANGE,
- new_size, dio_size);
+ new_size, dio_size - new_size);
if (ret < 0) {
trace_cachefiles_io_error(object, file_inode(file), ret,
cachefiles_trace_fallocate_error);
diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c
index 04eb52736990..753986ea1583 100644
--- a/fs/cachefiles/io.c
+++ b/fs/cachefiles/io.c
@@ -192,6 +192,64 @@ presubmission_error:
}
/*
+ * Query the occupancy of the cache in a region, returning where the next chunk
+ * of data starts and how long it is.
+ */
+static int cachefiles_query_occupancy(struct netfs_cache_resources *cres,
+ loff_t start, size_t len, size_t granularity,
+ loff_t *_data_start, size_t *_data_len)
+{
+ struct cachefiles_object *object;
+ struct file *file;
+ loff_t off, off2;
+
+ *_data_start = -1;
+ *_data_len = 0;
+
+ if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
+ return -ENOBUFS;
+
+ object = cachefiles_cres_object(cres);
+ file = cachefiles_cres_file(cres);
+ granularity = max_t(size_t, object->volume->cache->bsize, granularity);
+
+ _enter("%pD,%li,%llx,%zx/%llx",
+ file, file_inode(file)->i_ino, start, len,
+ i_size_read(file_inode(file)));
+
+ off = cachefiles_inject_read_error();
+ if (off == 0)
+ off = vfs_llseek(file, start, SEEK_DATA);
+ if (off == -ENXIO)
+ return -ENODATA; /* Beyond EOF */
+ if (off < 0 && off >= (loff_t)-MAX_ERRNO)
+ return -ENOBUFS; /* Error. */
+ if (round_up(off, granularity) >= start + len)
+ return -ENODATA; /* No data in range */
+
+ off2 = cachefiles_inject_read_error();
+ if (off2 == 0)
+ off2 = vfs_llseek(file, off, SEEK_HOLE);
+ if (off2 == -ENXIO)
+ return -ENODATA; /* Beyond EOF */
+ if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO)
+ return -ENOBUFS; /* Error. */
+
+ /* Round away partial blocks */
+ off = round_up(off, granularity);
+ off2 = round_down(off2, granularity);
+ if (off2 <= off)
+ return -ENODATA;
+
+ *_data_start = off;
+ if (off2 > start + len)
+ *_data_len = len;
+ else
+ *_data_len = off2 - off;
+ return 0;
+}
+
+/*
* Handle completion of a write to the cache.
*/
static void cachefiles_write_complete(struct kiocb *iocb, long ret)
@@ -545,6 +603,7 @@ static const struct netfs_cache_ops cachefiles_netfs_cache_ops = {
.write = cachefiles_write,
.prepare_read = cachefiles_prepare_read,
.prepare_write = cachefiles_prepare_write,
+ .query_occupancy = cachefiles_query_occupancy,
};
/*
diff --git a/fs/cifs/cifs_swn.c b/fs/cifs/cifs_swn.c
index 463ebe34892b..cdce1609c5c2 100644
--- a/fs/cifs/cifs_swn.c
+++ b/fs/cifs/cifs_swn.c
@@ -396,11 +396,11 @@ static int cifs_swn_resource_state_changed(struct cifs_swn_reg *swnreg, const ch
switch (state) {
case CIFS_SWN_RESOURCE_STATE_UNAVAILABLE:
cifs_dbg(FYI, "%s: resource name '%s' become unavailable\n", __func__, name);
- cifs_reconnect(swnreg->tcon->ses->server, true);
+ cifs_mark_tcp_ses_conns_for_reconnect(swnreg->tcon->ses->server, true);
break;
case CIFS_SWN_RESOURCE_STATE_AVAILABLE:
cifs_dbg(FYI, "%s: resource name '%s' become available\n", __func__, name);
- cifs_reconnect(swnreg->tcon->ses->server, true);
+ cifs_mark_tcp_ses_conns_for_reconnect(swnreg->tcon->ses->server, true);
break;
case CIFS_SWN_RESOURCE_STATE_UNKNOWN:
cifs_dbg(FYI, "%s: resource name '%s' changed to unknown state\n", __func__, name);
@@ -498,7 +498,7 @@ static int cifs_swn_reconnect(struct cifs_tcon *tcon, struct sockaddr_storage *a
goto unlock;
}
- cifs_reconnect(tcon->ses->server, false);
+ cifs_mark_tcp_ses_conns_for_reconnect(tcon->ses->server, false);
unlock:
mutex_unlock(&tcon->ses->server->srv_mutex);
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index ee3aab3dd4ac..bf861fef2f0c 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -949,6 +949,9 @@ static void populate_new_aces(char *nacl_base,
pnntace = (struct cifs_ace *) (nacl_base + nsize);
nsize += setup_special_mode_ACE(pnntace, nmode);
num_aces++;
+ pnntace = (struct cifs_ace *) (nacl_base + nsize);
+ nsize += setup_authusers_ACE(pnntace);
+ num_aces++;
goto set_size;
}
@@ -1297,7 +1300,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
if (uid_valid(uid)) { /* chown */
uid_t id;
- nowner_sid_ptr = kmalloc(sizeof(struct cifs_sid),
+ nowner_sid_ptr = kzalloc(sizeof(struct cifs_sid),
GFP_KERNEL);
if (!nowner_sid_ptr) {
rc = -ENOMEM;
@@ -1326,7 +1329,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
}
if (gid_valid(gid)) { /* chgrp */
gid_t id;
- ngroup_sid_ptr = kmalloc(sizeof(struct cifs_sid),
+ ngroup_sid_ptr = kzalloc(sizeof(struct cifs_sid),
GFP_KERNEL);
if (!ngroup_sid_ptr) {
rc = -ENOMEM;
@@ -1613,7 +1616,7 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
nsecdesclen = secdesclen;
if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */
if (mode_from_sid)
- nsecdesclen += sizeof(struct cifs_ace);
+ nsecdesclen += 2 * sizeof(struct cifs_ace);
else /* cifsacl */
nsecdesclen += 5 * sizeof(struct cifs_ace);
} else { /* chown */
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 199edac0cb59..082c21478686 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -919,6 +919,7 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
out_super:
deactivate_locked_super(sb);
+ return root;
out:
if (cifs_sb) {
kfree(cifs_sb->prepath);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 11a22a30ee14..053cb449eb16 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -162,7 +162,7 @@ static void cifs_resolve_server(struct work_struct *work)
mutex_unlock(&server->srv_mutex);
}
-/**
+/*
* Mark all sessions and tcons for reconnect.
*
* @server needs to be previously set to CifsNeedReconnect.
@@ -175,11 +175,6 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
struct TCP_Server_Info *pserver;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
- struct mid_q_entry *mid, *nmid;
- struct list_head retry_list;
-
- server->maxBuf = 0;
- server->max_read = 0;
/*
* before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they
@@ -219,6 +214,16 @@ next_session:
spin_unlock(&ses->chan_lock);
}
spin_unlock(&cifs_tcp_ses_lock);
+}
+
+static void
+cifs_abort_connection(struct TCP_Server_Info *server)
+{
+ struct mid_q_entry *mid, *nmid;
+ struct list_head retry_list;
+
+ server->maxBuf = 0;
+ server->max_read = 0;
/* do not want to be sending data on a socket we are freeing */
cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
@@ -310,6 +315,8 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
+ cifs_abort_connection(server);
+
do {
try_to_freeze();
mutex_lock(&server->srv_mutex);
@@ -434,6 +441,8 @@ reconnect_dfs_server(struct TCP_Server_Info *server,
cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
+ cifs_abort_connection(server);
+
do {
try_to_freeze();
mutex_lock(&server->srv_mutex);
@@ -639,6 +648,7 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
if (server->tcpStatus == CifsNeedReconnect) {
spin_unlock(&cifs_tcp_ses_lock);
+ cifs_reconnect(server, false);
return -ECONNABORTED;
}
spin_unlock(&cifs_tcp_ses_lock);
@@ -1831,13 +1841,9 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
int i;
for (i = 1; i < chan_count; i++) {
- /*
- * note: for now, we're okay accessing ses->chans
- * without chan_lock. But when chans can go away, we'll
- * need to introduce ref counting to make sure that chan
- * is not freed from under us.
- */
+ spin_unlock(&ses->chan_lock);
cifs_put_tcp_session(ses->chans[i].server, 0);
+ spin_lock(&ses->chan_lock);
ses->chans[i].server = NULL;
}
}
@@ -1981,6 +1987,19 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
}
}
+ ctx->workstation_name = kstrdup(ses->workstation_name, GFP_KERNEL);
+ if (!ctx->workstation_name) {
+ cifs_dbg(FYI, "Unable to allocate memory for workstation_name\n");
+ rc = -ENOMEM;
+ kfree(ctx->username);
+ ctx->username = NULL;
+ kfree_sensitive(ctx->password);
+ ctx->password = NULL;
+ kfree(ctx->domainname);
+ ctx->domainname = NULL;
+ goto out_key_put;
+ }
+
out_key_put:
up_read(&key->sem);
key_put(key);
@@ -2331,10 +2350,19 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
if (ses->server->posix_ext_supported) {
tcon->posix_extensions = true;
pr_warn_once("SMB3.11 POSIX Extensions are experimental\n");
- } else {
+ } else if ((ses->server->vals->protocol_id == SMB311_PROT_ID) ||
+ (strcmp(ses->server->vals->version_string,
+ SMB3ANY_VERSION_STRING) == 0) ||
+ (strcmp(ses->server->vals->version_string,
+ SMBDEFAULT_VERSION_STRING) == 0)) {
cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions\n");
rc = -EOPNOTSUPP;
goto out_fail;
+ } else {
+ cifs_dbg(VFS, "Check vers= mount option. SMB3.11 "
+ "disabled but required for POSIX extensions\n");
+ rc = -EOPNOTSUPP;
+ goto out_fail;
}
}
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index dd9643751671..831f42458bf6 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -1355,7 +1355,7 @@ static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cach
}
cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
- cifs_reconnect(tcon->ses->server, true);
+ cifs_mark_tcp_ses_conns_for_reconnect(tcon->ses->server, true);
}
/* Refresh dfs referral of tcon and mark it for reconnect if needed */
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 59334be9ed3b..e7af802dcfa6 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -4269,8 +4269,6 @@ cifs_readv_complete(struct work_struct *work)
for (i = 0; i < rdata->nr_pages; i++) {
struct page *page = rdata->pages[i];
- lru_cache_add(page);
-
if (rdata->result == 0 ||
(rdata->result == -EAGAIN && got_bytes)) {
flush_dcache_page(page);
@@ -4278,12 +4276,12 @@ cifs_readv_complete(struct work_struct *work)
} else
SetPageError(page);
- unlock_page(page);
-
if (rdata->result == 0 ||
(rdata->result == -EAGAIN && got_bytes))
cifs_readpage_to_fscache(rdata->mapping->host, page);
+ unlock_page(page);
+
got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
put_page(page);
@@ -4340,7 +4338,6 @@ readpages_fill_pages(struct TCP_Server_Info *server,
* fill them until the writes are flushed.
*/
zero_user(page, 0, PAGE_SIZE);
- lru_cache_add(page);
flush_dcache_page(page);
SetPageUptodate(page);
unlock_page(page);
@@ -4350,7 +4347,6 @@ readpages_fill_pages(struct TCP_Server_Info *server,
continue;
} else {
/* no need to hold page hostage */
- lru_cache_add(page);
unlock_page(page);
put_page(page);
rdata->pages[i] = NULL;
@@ -4393,92 +4389,20 @@ cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
return readpages_fill_pages(server, rdata, iter, iter->count);
}
-static int
-readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
- unsigned int rsize, struct list_head *tmplist,
- unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
-{
- struct page *page, *tpage;
- unsigned int expected_index;
- int rc;
- gfp_t gfp = readahead_gfp_mask(mapping);
-
- INIT_LIST_HEAD(tmplist);
-
- page = lru_to_page(page_list);
-
- /*
- * Lock the page and put it in the cache. Since no one else
- * should have access to this page, we're safe to simply set
- * PG_locked without checking it first.
- */
- __SetPageLocked(page);
- rc = add_to_page_cache_locked(page, mapping,
- page->index, gfp);
-
- /* give up if we can't stick it in the cache */
- if (rc) {
- __ClearPageLocked(page);
- return rc;
- }
-
- /* move first page to the tmplist */
- *offset = (loff_t)page->index << PAGE_SHIFT;
- *bytes = PAGE_SIZE;
- *nr_pages = 1;
- list_move_tail(&page->lru, tmplist);
-
- /* now try and add more pages onto the request */
- expected_index = page->index + 1;
- list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
- /* discontinuity ? */
- if (page->index != expected_index)
- break;
-
- /* would this page push the read over the rsize? */
- if (*bytes + PAGE_SIZE > rsize)
- break;
-
- __SetPageLocked(page);
- rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
- if (rc) {
- __ClearPageLocked(page);
- break;
- }
- list_move_tail(&page->lru, tmplist);
- (*bytes) += PAGE_SIZE;
- expected_index++;
- (*nr_pages)++;
- }
- return rc;
-}
-
-static int cifs_readpages(struct file *file, struct address_space *mapping,
- struct list_head *page_list, unsigned num_pages)
+static void cifs_readahead(struct readahead_control *ractl)
{
int rc;
- int err = 0;
- struct list_head tmplist;
- struct cifsFileInfo *open_file = file->private_data;
- struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
+ struct cifsFileInfo *open_file = ractl->file->private_data;
+ struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(ractl->file);
struct TCP_Server_Info *server;
pid_t pid;
- unsigned int xid;
+ unsigned int xid, nr_pages, last_batch_size = 0, cache_nr_pages = 0;
+ pgoff_t next_cached = ULONG_MAX;
+ bool caching = fscache_cookie_enabled(cifs_inode_cookie(ractl->mapping->host)) &&
+ cifs_inode_cookie(ractl->mapping->host)->cache_priv;
+ bool check_cache = caching;
xid = get_xid();
- /*
- * Reads as many pages as possible from fscache. Returns -ENOBUFS
- * immediately if the cookie is negative
- *
- * After this point, every page in the list might have PG_fscache set,
- * so we will need to clean that up off of every page we don't use.
- */
- rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
- &num_pages);
- if (rc == 0) {
- free_xid(xid);
- return rc;
- }
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
@@ -4489,39 +4413,73 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
- __func__, file, mapping, num_pages);
+ __func__, ractl->file, ractl->mapping, readahead_count(ractl));
/*
- * Start with the page at end of list and move it to private
- * list. Do the same with any following pages until we hit
- * the rsize limit, hit an index discontinuity, or run out of
- * pages. Issue the async read and then start the loop again
- * until the list is empty.
- *
- * Note that list order is important. The page_list is in
- * the order of declining indexes. When we put the pages in
- * the rdata->pages, then we want them in increasing order.
+ * Chop the readahead request up into rsize-sized read requests.
*/
- while (!list_empty(page_list) && !err) {
- unsigned int i, nr_pages, bytes, rsize;
- loff_t offset;
- struct page *page, *tpage;
+ while ((nr_pages = readahead_count(ractl) - last_batch_size)) {
+ unsigned int i, got, rsize;
+ struct page *page;
struct cifs_readdata *rdata;
struct cifs_credits credits_on_stack;
struct cifs_credits *credits = &credits_on_stack;
+ pgoff_t index = readahead_index(ractl) + last_batch_size;
+
+ /*
+ * Find out if we have anything cached in the range of
+ * interest, and if so, where the next chunk of cached data is.
+ */
+ if (caching) {
+ if (check_cache) {
+ rc = cifs_fscache_query_occupancy(
+ ractl->mapping->host, index, nr_pages,
+ &next_cached, &cache_nr_pages);
+ if (rc < 0)
+ caching = false;
+ check_cache = false;
+ }
+
+ if (index == next_cached) {
+ /*
+ * TODO: Send a whole batch of pages to be read
+ * by the cache.
+ */
+ page = readahead_page(ractl);
+ last_batch_size = 1 << thp_order(page);
+ if (cifs_readpage_from_fscache(ractl->mapping->host,
+ page) < 0) {
+ /*
+ * TODO: Deal with cache read failure
+ * here, but for the moment, delegate
+ * that to readpage.
+ */
+ caching = false;
+ }
+ unlock_page(page);
+ next_cached++;
+ cache_nr_pages--;
+ if (cache_nr_pages == 0)
+ check_cache = true;
+ continue;
+ }
+ }
if (open_file->invalidHandle) {
rc = cifs_reopen_file(open_file, true);
- if (rc == -EAGAIN)
- continue;
- else if (rc)
+ if (rc) {
+ if (rc == -EAGAIN)
+ continue;
break;
+ }
}
rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
&rsize, credits);
if (rc)
break;
+ nr_pages = min_t(size_t, rsize / PAGE_SIZE, readahead_count(ractl));
+ nr_pages = min_t(size_t, nr_pages, next_cached - index);
/*
* Give up immediately if rsize is too small to read an entire
@@ -4529,16 +4487,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
* reach this point however since we set ra_pages to 0 when the
* rsize is smaller than a cache page.
*/
- if (unlikely(rsize < PAGE_SIZE)) {
- add_credits_and_wake_if(server, credits, 0);
- free_xid(xid);
- return 0;
- }
-
- nr_pages = 0;
- err = readpages_get_pages(mapping, page_list, rsize, &tmplist,
- &nr_pages, &offset, &bytes);
- if (!nr_pages) {
+ if (unlikely(!nr_pages)) {
add_credits_and_wake_if(server, credits, 0);
break;
}
@@ -4546,36 +4495,31 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
if (!rdata) {
/* best to give up if we're out of mem */
- list_for_each_entry_safe(page, tpage, &tmplist, lru) {
- list_del(&page->lru);
- lru_cache_add(page);
- unlock_page(page);
- put_page(page);
- }
- rc = -ENOMEM;
add_credits_and_wake_if(server, credits, 0);
break;
}
- rdata->cfile = cifsFileInfo_get(open_file);
- rdata->server = server;
- rdata->mapping = mapping;
- rdata->offset = offset;
- rdata->bytes = bytes;
- rdata->pid = pid;
- rdata->pagesz = PAGE_SIZE;
- rdata->tailsz = PAGE_SIZE;
+ got = __readahead_batch(ractl, rdata->pages, nr_pages);
+ if (got != nr_pages) {
+ pr_warn("__readahead_batch() returned %u/%u\n",
+ got, nr_pages);
+ nr_pages = got;
+ }
+
+ rdata->nr_pages = nr_pages;
+ rdata->bytes = readahead_batch_length(ractl);
+ rdata->cfile = cifsFileInfo_get(open_file);
+ rdata->server = server;
+ rdata->mapping = ractl->mapping;
+ rdata->offset = readahead_pos(ractl);
+ rdata->pid = pid;
+ rdata->pagesz = PAGE_SIZE;
+ rdata->tailsz = PAGE_SIZE;
rdata->read_into_pages = cifs_readpages_read_into_pages;
rdata->copy_into_pages = cifs_readpages_copy_into_pages;
- rdata->credits = credits_on_stack;
-
- list_for_each_entry_safe(page, tpage, &tmplist, lru) {
- list_del(&page->lru);
- rdata->pages[rdata->nr_pages++] = page;
- }
+ rdata->credits = credits_on_stack;
rc = adjust_credits(server, &rdata->credits, rdata->bytes);
-
if (!rc) {
if (rdata->cfile->invalidHandle)
rc = -EAGAIN;
@@ -4587,7 +4531,6 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
add_credits_and_wake_if(server, &rdata->credits, 0);
for (i = 0; i < rdata->nr_pages; i++) {
page = rdata->pages[i];
- lru_cache_add(page);
unlock_page(page);
put_page(page);
}
@@ -4597,10 +4540,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
}
kref_put(&rdata->refcount, cifs_readdata_release);
+ last_batch_size = nr_pages;
}
free_xid(xid);
- return rc;
}
/*
@@ -4924,7 +4867,7 @@ oplock_break_done:
* In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
* so this method should never be called.
*
- * Direct IO is not yet supported in the cached mode.
+ * Direct IO is not yet supported in the cached mode.
*/
static ssize_t
cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
@@ -5006,7 +4949,7 @@ static int cifs_set_page_dirty(struct page *page)
const struct address_space_operations cifs_addr_ops = {
.readpage = cifs_readpage,
- .readpages = cifs_readpages,
+ .readahead = cifs_readahead,
.writepage = cifs_writepage,
.writepages = cifs_writepages,
.write_begin = cifs_write_begin,
diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
index 7ec35f3f0a5f..a92e9eec521f 100644
--- a/fs/cifs/fs_context.c
+++ b/fs/cifs/fs_context.c
@@ -149,7 +149,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
fsparam_u32("echo_interval", Opt_echo_interval),
fsparam_u32("max_credits", Opt_max_credits),
fsparam_u32("handletimeout", Opt_handletimeout),
- fsparam_u32("snapshot", Opt_snapshot),
+ fsparam_u64("snapshot", Opt_snapshot),
fsparam_u32("max_channels", Opt_max_channels),
/* Mount options which take string value */
@@ -1078,7 +1078,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
ctx->echo_interval = result.uint_32;
break;
case Opt_snapshot:
- ctx->snapshot_time = result.uint_32;
+ ctx->snapshot_time = result.uint_64;
break;
case Opt_max_credits:
if (result.uint_32 < 20 || result.uint_32 > 60000) {
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index efaac4d5ff55..33af72e0ac0c 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -134,37 +134,127 @@ void cifs_fscache_release_inode_cookie(struct inode *inode)
}
}
+static inline void fscache_end_operation(struct netfs_cache_resources *cres)
+{
+ const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
+
+ if (ops)
+ ops->end_operation(cres);
+}
+
/*
- * Retrieve a page from FS-Cache
+ * Fallback page reading interface.
*/
-int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
+static int fscache_fallback_read_page(struct inode *inode, struct page *page)
{
- cifs_dbg(FYI, "%s: (fsc:%p, p:%p, i:0x%p\n",
- __func__, CIFS_I(inode)->fscache, page, inode);
- return -ENOBUFS; // Needs conversion to using netfslib
+ struct netfs_cache_resources cres;
+ struct fscache_cookie *cookie = cifs_inode_cookie(inode);
+ struct iov_iter iter;
+ struct bio_vec bvec[1];
+ int ret;
+
+ memset(&cres, 0, sizeof(cres));
+ bvec[0].bv_page = page;
+ bvec[0].bv_offset = 0;
+ bvec[0].bv_len = PAGE_SIZE;
+ iov_iter_bvec(&iter, READ, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
+
+ ret = fscache_begin_read_operation(&cres, cookie);
+ if (ret < 0)
+ return ret;
+
+ ret = fscache_read(&cres, page_offset(page), &iter, NETFS_READ_HOLE_FAIL,
+ NULL, NULL);
+ fscache_end_operation(&cres);
+ return ret;
}
/*
- * Retrieve a set of pages from FS-Cache
+ * Fallback page writing interface.
*/
-int __cifs_readpages_from_fscache(struct inode *inode,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages)
+static int fscache_fallback_write_page(struct inode *inode, struct page *page,
+ bool no_space_allocated_yet)
{
- cifs_dbg(FYI, "%s: (0x%p/%u/0x%p)\n",
- __func__, CIFS_I(inode)->fscache, *nr_pages, inode);
- return -ENOBUFS; // Needs conversion to using netfslib
+ struct netfs_cache_resources cres;
+ struct fscache_cookie *cookie = cifs_inode_cookie(inode);
+ struct iov_iter iter;
+ struct bio_vec bvec[1];
+ loff_t start = page_offset(page);
+ size_t len = PAGE_SIZE;
+ int ret;
+
+ memset(&cres, 0, sizeof(cres));
+ bvec[0].bv_page = page;
+ bvec[0].bv_offset = 0;
+ bvec[0].bv_len = PAGE_SIZE;
+ iov_iter_bvec(&iter, WRITE, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
+
+ ret = fscache_begin_write_operation(&cres, cookie);
+ if (ret < 0)
+ return ret;
+
+ ret = cres.ops->prepare_write(&cres, &start, &len, i_size_read(inode),
+ no_space_allocated_yet);
+ if (ret == 0)
+ ret = fscache_write(&cres, page_offset(page), &iter, NULL, NULL);
+ fscache_end_operation(&cres);
+ return ret;
}
-void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
+/*
+ * Retrieve a page from FS-Cache
+ */
+int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
{
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
+ int ret;
- WARN_ON(!cifsi->fscache);
+ cifs_dbg(FYI, "%s: (fsc:%p, p:%p, i:0x%p\n",
+ __func__, cifs_inode_cookie(inode), page, inode);
+ ret = fscache_fallback_read_page(inode, page);
+ if (ret < 0)
+ return ret;
+
+ /* Read completed synchronously */
+ SetPageUptodate(page);
+ return 0;
+}
+
+void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
+{
cifs_dbg(FYI, "%s: (fsc: %p, p: %p, i: %p)\n",
- __func__, cifsi->fscache, page, inode);
+ __func__, cifs_inode_cookie(inode), page, inode);
+
+ fscache_fallback_write_page(inode, page, true);
+}
+
+/*
+ * Query the cache occupancy.
+ */
+int __cifs_fscache_query_occupancy(struct inode *inode,
+ pgoff_t first, unsigned int nr_pages,
+ pgoff_t *_data_first,
+ unsigned int *_data_nr_pages)
+{
+ struct netfs_cache_resources cres;
+ struct fscache_cookie *cookie = cifs_inode_cookie(inode);
+ loff_t start, data_start;
+ size_t len, data_len;
+ int ret;
- // Needs conversion to using netfslib
+ ret = fscache_begin_read_operation(&cres, cookie);
+ if (ret < 0)
+ return ret;
+
+ start = first * PAGE_SIZE;
+ len = nr_pages * PAGE_SIZE;
+ ret = cres.ops->query_occupancy(&cres, start, len, PAGE_SIZE,
+ &data_start, &data_len);
+ if (ret == 0) {
+ *_data_first = data_start / PAGE_SIZE;
+ *_data_nr_pages = len / PAGE_SIZE;
+ }
+
+ fscache_end_operation(&cres);
+ return ret;
}
diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h
index c6ca49ac33d4..55129908e2c1 100644
--- a/fs/cifs/fscache.h
+++ b/fs/cifs/fscache.h
@@ -9,6 +9,7 @@
#ifndef _CIFS_FSCACHE_H
#define _CIFS_FSCACHE_H
+#include <linux/swap.h>
#include <linux/fscache.h>
#include "cifsglob.h"
@@ -58,14 +59,6 @@ void cifs_fscache_fill_coherency(struct inode *inode,
}
-extern int cifs_fscache_release_page(struct page *page, gfp_t gfp);
-extern int __cifs_readpage_from_fscache(struct inode *, struct page *);
-extern int __cifs_readpages_from_fscache(struct inode *,
- struct address_space *,
- struct list_head *,
- unsigned *);
-extern void __cifs_readpage_to_fscache(struct inode *, struct page *);
-
static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode)
{
return CIFS_I(inode)->fscache;
@@ -80,33 +73,52 @@ static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags
i_size_read(inode), flags);
}
-static inline int cifs_readpage_from_fscache(struct inode *inode,
- struct page *page)
-{
- if (CIFS_I(inode)->fscache)
- return __cifs_readpage_from_fscache(inode, page);
+extern int __cifs_fscache_query_occupancy(struct inode *inode,
+ pgoff_t first, unsigned int nr_pages,
+ pgoff_t *_data_first,
+ unsigned int *_data_nr_pages);
- return -ENOBUFS;
+static inline int cifs_fscache_query_occupancy(struct inode *inode,
+ pgoff_t first, unsigned int nr_pages,
+ pgoff_t *_data_first,
+ unsigned int *_data_nr_pages)
+{
+ if (!cifs_inode_cookie(inode))
+ return -ENOBUFS;
+ return __cifs_fscache_query_occupancy(inode, first, nr_pages,
+ _data_first, _data_nr_pages);
}
-static inline int cifs_readpages_from_fscache(struct inode *inode,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages)
+extern int __cifs_readpage_from_fscache(struct inode *pinode, struct page *ppage);
+extern void __cifs_readpage_to_fscache(struct inode *pinode, struct page *ppage);
+
+
+static inline int cifs_readpage_from_fscache(struct inode *inode,
+ struct page *page)
{
- if (CIFS_I(inode)->fscache)
- return __cifs_readpages_from_fscache(inode, mapping, pages,
- nr_pages);
+ if (cifs_inode_cookie(inode))
+ return __cifs_readpage_from_fscache(inode, page);
return -ENOBUFS;
}
static inline void cifs_readpage_to_fscache(struct inode *inode,
struct page *page)
{
- if (PageFsCache(page))
+ if (cifs_inode_cookie(inode))
__cifs_readpage_to_fscache(inode, page);
}
+static inline int cifs_fscache_release_page(struct page *page, gfp_t gfp)
+{
+ if (PageFsCache(page)) {
+ if (current_is_kswapd() || !(gfp & __GFP_FS))
+ return false;
+ wait_on_page_fscache(page);
+ fscache_note_page_release(cifs_inode_cookie(page->mapping->host));
+ }
+ return true;
+}
+
#else /* CONFIG_CIFS_FSCACHE */
static inline
void cifs_fscache_fill_coherency(struct inode *inode,
@@ -123,22 +135,29 @@ static inline void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool upd
static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode) { return NULL; }
static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags) {}
-static inline int
-cifs_readpage_from_fscache(struct inode *inode, struct page *page)
+static inline int cifs_fscache_query_occupancy(struct inode *inode,
+ pgoff_t first, unsigned int nr_pages,
+ pgoff_t *_data_first,
+ unsigned int *_data_nr_pages)
{
+ *_data_first = ULONG_MAX;
+ *_data_nr_pages = 0;
return -ENOBUFS;
}
-static inline int cifs_readpages_from_fscache(struct inode *inode,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages)
+static inline int
+cifs_readpage_from_fscache(struct inode *inode, struct page *page)
{
return -ENOBUFS;
}
-static inline void cifs_readpage_to_fscache(struct inode *inode,
- struct page *page) {}
+static inline
+void cifs_readpage_to_fscache(struct inode *inode, struct page *page) {}
+
+static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp)
+{
+ return true; /* May release page */
+}
#endif /* CONFIG_CIFS_FSCACHE */
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 7d8b3ceb2af3..60d853c92f6a 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -83,6 +83,7 @@ static void cifs_set_ops(struct inode *inode)
static void
cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
{
+ struct cifs_fscache_inode_coherency_data cd;
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
cifs_dbg(FYI, "%s: revalidating inode %llu\n",
@@ -113,6 +114,9 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
cifs_dbg(FYI, "%s: invalidating inode %llu mapping\n",
__func__, cifs_i->uniqueid);
set_bit(CIFS_INO_INVALID_MAPPING, &cifs_i->flags);
+ /* Invalidate fscache cookie */
+ cifs_fscache_fill_coherency(&cifs_i->vfs_inode, &cd);
+ fscache_invalidate(cifs_inode_cookie(inode), &cd, i_size_read(inode), 0);
}
/*
@@ -2261,8 +2265,6 @@ cifs_dentry_needs_reval(struct dentry *dentry)
int
cifs_invalidate_mapping(struct inode *inode)
{
- struct cifs_fscache_inode_coherency_data cd;
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
int rc = 0;
if (inode->i_mapping && inode->i_mapping->nrpages != 0) {
@@ -2272,8 +2274,6 @@ cifs_invalidate_mapping(struct inode *inode)
__func__, inode);
}
- cifs_fscache_fill_coherency(&cifsi->vfs_inode, &cd);
- fscache_invalidate(cifs_inode_cookie(inode), &cd, i_size_read(inode), 0);
return rc;
}
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index dc3b16d1be09..32f478c7a66d 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -127,11 +127,6 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
struct cifs_server_iface *ifaces = NULL;
size_t iface_count;
- if (ses->server->dialect < SMB30_PROT_ID) {
- cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
- return 0;
- }
-
spin_lock(&ses->chan_lock);
new_chan_count = old_chan_count = ses->chan_count;
@@ -145,6 +140,12 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
return 0;
}
+ if (ses->server->dialect < SMB30_PROT_ID) {
+ spin_unlock(&ses->chan_lock);
+ cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
+ return 0;
+ }
+
if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
ses->chan_max = 1;
spin_unlock(&ses->chan_lock);
@@ -713,7 +714,11 @@ static int size_of_ntlmssp_blob(struct cifs_ses *ses, int base_size)
else
sz += sizeof(__le16);
- sz += sizeof(__le16) * strnlen(ses->workstation_name, CIFS_MAX_WORKSTATION_LEN);
+ if (ses->workstation_name)
+ sz += sizeof(__le16) * strnlen(ses->workstation_name,
+ CIFS_MAX_WORKSTATION_LEN);
+ else
+ sz += sizeof(__le16);
return sz;
}
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 8272c91e15ef..b2fb7bd11936 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -228,9 +228,7 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
spin_unlock(&GlobalMid_Lock);
if (reconnect) {
- spin_lock(&cifs_tcp_ses_lock);
- server->tcpStatus = CifsNeedReconnect;
- spin_unlock(&cifs_tcp_ses_lock);
+ cifs_mark_tcp_ses_conns_for_reconnect(server, false);
}
return mid;
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 8540f7c13eae..a4c3e027cca2 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -430,10 +430,7 @@ unmask:
* be taken as the remainder of this one. We need to kill the
* socket so the server throws away the partial SMB
*/
- spin_lock(&cifs_tcp_ses_lock);
- if (server->tcpStatus != CifsExiting)
- server->tcpStatus = CifsNeedReconnect;
- spin_unlock(&cifs_tcp_ses_lock);
+ cifs_mark_tcp_ses_conns_for_reconnect(server, false);
trace_smb3_partial_send_reconnect(server->CurrentMid,
server->conn_id, server->hostname);
}
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 7d8b72d67c80..9d486fbbfbbd 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -175,11 +175,13 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
switch (handler->flags) {
case XATTR_CIFS_NTSD_FULL:
aclflags = (CIFS_ACL_OWNER |
+ CIFS_ACL_GROUP |
CIFS_ACL_DACL |
CIFS_ACL_SACL);
break;
case XATTR_CIFS_NTSD:
aclflags = (CIFS_ACL_OWNER |
+ CIFS_ACL_GROUP |
CIFS_ACL_DACL);
break;
case XATTR_CIFS_ACL:
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index d3cd2a94d1e8..d1f9d2632202 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -34,6 +34,14 @@
*/
DEFINE_SPINLOCK(configfs_dirent_lock);
+/*
+ * All of link_obj/unlink_obj/link_group/unlink_group require that
+ * subsys->su_mutex is held.
+ * But parent configfs_subsystem is NULL when config_item is root.
+ * Use this mutex when config_item is root.
+ */
+static DEFINE_MUTEX(configfs_subsystem_mutex);
+
static void configfs_d_iput(struct dentry * dentry,
struct inode * inode)
{
@@ -1859,7 +1867,9 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
group->cg_item.ci_name = group->cg_item.ci_namebuf;
sd = root->d_fsdata;
+ mutex_lock(&configfs_subsystem_mutex);
link_group(to_config_group(sd->s_element), group);
+ mutex_unlock(&configfs_subsystem_mutex);
inode_lock_nested(d_inode(root), I_MUTEX_PARENT);
@@ -1884,7 +1894,9 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
inode_unlock(d_inode(root));
if (err) {
+ mutex_lock(&configfs_subsystem_mutex);
unlink_group(group);
+ mutex_unlock(&configfs_subsystem_mutex);
configfs_release_fs();
}
put_fragment(frag);
@@ -1931,7 +1943,9 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
dput(dentry);
+ mutex_lock(&configfs_subsystem_mutex);
unlink_group(group);
+ mutex_unlock(&configfs_subsystem_mutex);
configfs_release_fs();
}
diff --git a/fs/erofs/data.c b/fs/erofs/data.c
index fa7ddb7ad980..226a57c57ee6 100644
--- a/fs/erofs/data.c
+++ b/fs/erofs/data.c
@@ -252,12 +252,10 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
return ret;
iomap->offset = map.m_la;
- if (flags & IOMAP_DAX) {
+ if (flags & IOMAP_DAX)
iomap->dax_dev = mdev.m_daxdev;
- iomap->offset += mdev.m_dax_part_off;
- } else {
+ else
iomap->bdev = mdev.m_bdev;
- }
iomap->length = map.m_llen;
iomap->flags = 0;
iomap->private = NULL;
@@ -284,6 +282,8 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
} else {
iomap->type = IOMAP_MAPPED;
iomap->addr = mdev.m_pa;
+ if (flags & IOMAP_DAX)
+ iomap->addr += mdev.m_dax_part_off;
}
return 0;
}
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index b8272fb95fd6..5aa2cf2c2f80 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -325,7 +325,7 @@ struct erofs_inode {
unsigned char z_algorithmtype[2];
unsigned char z_logical_clusterbits;
unsigned long z_tailextent_headlcn;
- unsigned int z_idataoff;
+ erofs_off_t z_idataoff;
unsigned short z_idata_size;
};
#endif /* CONFIG_EROFS_FS_ZIP */
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 498b7666efe8..423bc1a61da5 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -810,68 +810,11 @@ static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi,
return false;
}
-static void z_erofs_decompressqueue_work(struct work_struct *work);
-static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
- bool sync, int bios)
-{
- struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
-
- /* wake up the caller thread for sync decompression */
- if (sync) {
- unsigned long flags;
-
- spin_lock_irqsave(&io->u.wait.lock, flags);
- if (!atomic_add_return(bios, &io->pending_bios))
- wake_up_locked(&io->u.wait);
- spin_unlock_irqrestore(&io->u.wait.lock, flags);
- return;
- }
-
- if (atomic_add_return(bios, &io->pending_bios))
- return;
- /* Use workqueue and sync decompression for atomic contexts only */
- if (in_atomic() || irqs_disabled()) {
- queue_work(z_erofs_workqueue, &io->u.work);
- /* enable sync decompression for readahead */
- if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
- sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
- return;
- }
- z_erofs_decompressqueue_work(&io->u.work);
-}
-
static bool z_erofs_page_is_invalidated(struct page *page)
{
return !page->mapping && !z_erofs_is_shortlived_page(page);
}
-static void z_erofs_decompressqueue_endio(struct bio *bio)
-{
- tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
- struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
- blk_status_t err = bio->bi_status;
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
-
- bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
-
- DBG_BUGON(PageUptodate(page));
- DBG_BUGON(z_erofs_page_is_invalidated(page));
-
- if (err)
- SetPageError(page);
-
- if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
- if (!err)
- SetPageUptodate(page);
- unlock_page(page);
- }
- }
- z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
- bio_put(bio);
-}
-
static int z_erofs_decompress_pcluster(struct super_block *sb,
struct z_erofs_pcluster *pcl,
struct page **pagepool)
@@ -1123,6 +1066,35 @@ static void z_erofs_decompressqueue_work(struct work_struct *work)
kvfree(bgq);
}
+static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
+ bool sync, int bios)
+{
+ struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
+
+ /* wake up the caller thread for sync decompression */
+ if (sync) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&io->u.wait.lock, flags);
+ if (!atomic_add_return(bios, &io->pending_bios))
+ wake_up_locked(&io->u.wait);
+ spin_unlock_irqrestore(&io->u.wait.lock, flags);
+ return;
+ }
+
+ if (atomic_add_return(bios, &io->pending_bios))
+ return;
+ /* Use workqueue and sync decompression for atomic contexts only */
+ if (in_atomic() || irqs_disabled()) {
+ queue_work(z_erofs_workqueue, &io->u.work);
+ /* enable sync decompression for readahead */
+ if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
+ sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
+ return;
+ }
+ z_erofs_decompressqueue_work(&io->u.work);
+}
+
static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
unsigned int nr,
struct page **pagepool,
@@ -1300,6 +1272,33 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
qtail[JQ_BYPASS] = &pcl->next;
}
+static void z_erofs_decompressqueue_endio(struct bio *bio)
+{
+ tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
+ struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
+ blk_status_t err = bio->bi_status;
+ struct bio_vec *bvec;
+ struct bvec_iter_all iter_all;
+
+ bio_for_each_segment_all(bvec, bio, iter_all) {
+ struct page *page = bvec->bv_page;
+
+ DBG_BUGON(PageUptodate(page));
+ DBG_BUGON(z_erofs_page_is_invalidated(page));
+
+ if (err)
+ SetPageError(page);
+
+ if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
+ if (!err)
+ SetPageUptodate(page);
+ unlock_page(page);
+ }
+ }
+ z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
+ bio_put(bio);
+}
+
static void z_erofs_submit_queue(struct super_block *sb,
struct z_erofs_decompress_frontend *f,
struct page **pagepool,
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index 18d7fd1a5064..361b1d6e4bf9 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -630,6 +630,13 @@ static int z_erofs_do_map_blocks(struct inode *inode,
if (endoff >= m.clusterofs) {
m.headtype = m.type;
map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
+ /*
+ * For ztailpacking files, in order to inline data more
+ * effectively, special EOF lclusters are now supported
+ * which can have three parts at most.
+ */
+ if (ztailpacking && end > inode->i_size)
+ end = inode->i_size;
break;
}
/* m.lcn should be >= 1 if endoff < m.clusterofs */
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index 5a35768d6149..57e82e25f8e2 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -139,7 +139,7 @@ fail:
/*
* Inode operation get_posix_acl().
*
- * inode->i_mutex: don't care
+ * inode->i_rwsem: don't care
*/
struct posix_acl *
ext4_get_acl(struct inode *inode, int type, bool rcu)
@@ -183,7 +183,7 @@ ext4_get_acl(struct inode *inode, int type, bool rcu)
/*
* Set the access or default ACL of an inode.
*
- * inode->i_mutex: down unless called from ext4_new_inode
+ * inode->i_rwsem: down unless called from ext4_new_inode
*/
static int
__ext4_set_acl(handle_t *handle, struct inode *inode, int type,
@@ -271,8 +271,8 @@ out_stop:
/*
* Initialize the ACLs of a new inode. Called from ext4_new_inode.
*
- * dir->i_mutex: down
- * inode->i_mutex: up (access to inode is still exclusive)
+ * dir->i_rwsem: down
+ * inode->i_rwsem: up (access to inode is still exclusive)
*/
int
ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 71a3cdceaa03..bcd3b9bf8069 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1028,7 +1028,7 @@ struct ext4_inode_info {
/*
* Extended attributes can be read independently of the main file
- * data. Taking i_mutex even when reading would cause contention
+ * data. Taking i_rwsem even when reading would cause contention
* between readers of EAs and writers of regular file data, so
* instead we synchronize on xattr_sem when reading or changing
* EAs.
@@ -1750,6 +1750,7 @@ struct ext4_sb_info {
spinlock_t s_fc_lock;
struct buffer_head *s_fc_bh;
struct ext4_fc_stats s_fc_stats;
+ tid_t s_fc_ineligible_tid;
#ifdef CONFIG_EXT4_DEBUG
int s_fc_debug_max_replay;
#endif
@@ -1795,10 +1796,7 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
enum {
EXT4_MF_MNTDIR_SAMPLED,
EXT4_MF_FS_ABORTED, /* Fatal error detected */
- EXT4_MF_FC_INELIGIBLE, /* Fast commit ineligible */
- EXT4_MF_FC_COMMITTING /* File system underoing a fast
- * commit.
- */
+ EXT4_MF_FC_INELIGIBLE /* Fast commit ineligible */
};
static inline void ext4_set_mount_flag(struct super_block *sb, int bit)
@@ -2485,7 +2483,7 @@ struct ext4_filename {
#ifdef CONFIG_FS_ENCRYPTION
struct fscrypt_str crypto_buf;
#endif
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
struct fscrypt_str cf_name;
#endif
};
@@ -2721,7 +2719,7 @@ extern unsigned ext4_free_clusters_after_init(struct super_block *sb,
struct ext4_group_desc *gdp);
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
extern int ext4_fname_setup_ci_filename(struct inode *dir,
const struct qstr *iname,
struct ext4_filename *fname);
@@ -2754,7 +2752,7 @@ static inline int ext4_fname_setup_filename(struct inode *dir,
ext4_fname_from_fscrypt_name(fname, &name);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
err = ext4_fname_setup_ci_filename(dir, iname, fname);
#endif
return err;
@@ -2773,7 +2771,7 @@ static inline int ext4_fname_prepare_lookup(struct inode *dir,
ext4_fname_from_fscrypt_name(fname, &name);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
err = ext4_fname_setup_ci_filename(dir, &dentry->d_name, fname);
#endif
return err;
@@ -2790,7 +2788,7 @@ static inline void ext4_fname_free_filename(struct ext4_filename *fname)
fname->usr_fname = NULL;
fname->disk_name.name = NULL;
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
kfree(fname->cf_name.name);
fname->cf_name.name = NULL;
#endif
@@ -2806,7 +2804,7 @@ static inline int ext4_fname_setup_filename(struct inode *dir,
fname->disk_name.name = (unsigned char *) iname->name;
fname->disk_name.len = iname->len;
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
err = ext4_fname_setup_ci_filename(dir, iname, fname);
#endif
@@ -2822,7 +2820,7 @@ static inline int ext4_fname_prepare_lookup(struct inode *dir,
static inline void ext4_fname_free_filename(struct ext4_filename *fname)
{
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
kfree(fname->cf_name.name);
fname->cf_name.name = NULL;
#endif
@@ -2926,7 +2924,7 @@ void __ext4_fc_track_create(handle_t *handle, struct inode *inode,
struct dentry *dentry);
void ext4_fc_track_create(handle_t *handle, struct dentry *dentry);
void ext4_fc_track_inode(handle_t *handle, struct inode *inode);
-void ext4_fc_mark_ineligible(struct super_block *sb, int reason);
+void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handle);
void ext4_fc_start_update(struct inode *inode);
void ext4_fc_stop_update(struct inode *inode);
void ext4_fc_del(struct inode *inode);
@@ -2935,6 +2933,9 @@ void ext4_fc_replay_cleanup(struct super_block *sb);
int ext4_fc_commit(journal_t *journal, tid_t commit_tid);
int __init ext4_fc_init_dentry_cache(void);
void ext4_fc_destroy_dentry_cache(void);
+int ext4_fc_record_regions(struct super_block *sb, int ino,
+ ext4_lblk_t lblk, ext4_fsblk_t pblk,
+ int len, int replay);
/* mballoc.c */
extern const struct seq_operations ext4_mb_seq_groups_ops;
@@ -3407,7 +3408,7 @@ do { \
#define EXT4_FREECLUSTERS_WATERMARK 0
#endif
-/* Update i_disksize. Requires i_mutex to avoid races with truncate */
+/* Update i_disksize. Requires i_rwsem to avoid races with truncate */
static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
{
WARN_ON_ONCE(S_ISREG(inode->i_mode) &&
@@ -3418,7 +3419,7 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
up_write(&EXT4_I(inode)->i_data_sem);
}
-/* Update i_size, i_disksize. Requires i_mutex to avoid races with truncate */
+/* Update i_size, i_disksize. Requires i_rwsem to avoid races with truncate */
static inline int ext4_update_inode_size(struct inode *inode, loff_t newsize)
{
int changed = 0;
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index 0e4fa644df01..db2ae4a2b38d 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -491,7 +491,7 @@ static inline int ext4_free_data_revoke_credits(struct inode *inode, int blocks)
/*
* This function controls whether or not we should try to go down the
* dioread_nolock code paths, which makes it safe to avoid taking
- * i_mutex for direct I/O reads. This only works for extent-based
+ * i_rwsem for direct I/O reads. This only works for extent-based
* files, and it doesn't work if data journaling is enabled, since the
* dioread_nolock code uses b_private to pass information back to the
* I/O completion handler, and this conflicts with the jbd's use of
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 74c91da585d7..c0f3f83e0c1b 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -97,7 +97,7 @@ static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
* Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
* moment, get_block can be called only for blocks inside i_size since
* page cache has been already dropped and writes are blocked by
- * i_mutex. So we can safely drop the i_data_sem here.
+ * i_rwsem. So we can safely drop the i_data_sem here.
*/
BUG_ON(EXT4_JOURNAL(inode) == NULL);
ext4_discard_preallocations(inode, 0);
@@ -4572,7 +4572,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
- /* Wait all existing dio workers, newcomers will block on i_mutex */
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
inode_dio_wait(inode);
/* Preallocate the range including the unaligned edges */
@@ -4738,7 +4738,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
goto out;
}
- /* Wait all existing dio workers, newcomers will block on i_mutex */
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
inode_dio_wait(inode);
ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
@@ -5334,7 +5334,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
ret = PTR_ERR(handle);
goto out_mmap;
}
- ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE);
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode, 0);
@@ -5474,7 +5474,7 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
ret = PTR_ERR(handle);
goto out_mmap;
}
- ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE);
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
/* Expand file to avoid data loss if there is error while shifting */
inode->i_size += len;
@@ -5571,7 +5571,7 @@ out_mutex:
* stuff such as page-cache locking consistency, bh mapping consistency or
* extent's data copying must be performed by caller.
* Locking:
- * i_mutex is held for both inodes
+ * i_rwsem is held for both inodes
* i_data_sem is locked for write for both inodes
* Assumptions:
* All pages from requested range are locked for both inodes
@@ -6091,11 +6091,15 @@ int ext4_ext_clear_bb(struct inode *inode)
ext4_mb_mark_bb(inode->i_sb,
path[j].p_block, 1, 0);
+ ext4_fc_record_regions(inode->i_sb, inode->i_ino,
+ 0, path[j].p_block, 1, 1);
}
ext4_ext_drop_refs(path);
kfree(path);
}
ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0);
+ ext4_fc_record_regions(inode->i_sb, inode->i_ino,
+ map.m_lblk, map.m_pblk, map.m_len, 1);
}
cur = cur + map.m_len;
}
diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
index 5ae8026a0c56..7964ee34e322 100644
--- a/fs/ext4/fast_commit.c
+++ b/fs/ext4/fast_commit.c
@@ -300,18 +300,32 @@ restart:
}
/*
- * Mark file system as fast commit ineligible. This means that next commit
- * operation would result in a full jbd2 commit.
+ * Mark file system as fast commit ineligible, and record latest
+ * ineligible transaction tid. This means until the recorded
+ * transaction, commit operation would result in a full jbd2 commit.
*/
-void ext4_fc_mark_ineligible(struct super_block *sb, int reason)
+void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handle)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ tid_t tid;
if (!test_opt2(sb, JOURNAL_FAST_COMMIT) ||
(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))
return;
ext4_set_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
+ if (handle && !IS_ERR(handle))
+ tid = handle->h_transaction->t_tid;
+ else {
+ read_lock(&sbi->s_journal->j_state_lock);
+ tid = sbi->s_journal->j_running_transaction ?
+ sbi->s_journal->j_running_transaction->t_tid : 0;
+ read_unlock(&sbi->s_journal->j_state_lock);
+ }
+ spin_lock(&sbi->s_fc_lock);
+ if (sbi->s_fc_ineligible_tid < tid)
+ sbi->s_fc_ineligible_tid = tid;
+ spin_unlock(&sbi->s_fc_lock);
WARN_ON(reason >= EXT4_FC_REASON_MAX);
sbi->s_fc_stats.fc_ineligible_reason_count[reason]++;
}
@@ -361,7 +375,8 @@ static int ext4_fc_track_template(
spin_lock(&sbi->s_fc_lock);
if (list_empty(&EXT4_I(inode)->i_fc_list))
list_add_tail(&EXT4_I(inode)->i_fc_list,
- (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_COMMITTING)) ?
+ (sbi->s_journal->j_flags & JBD2_FULL_COMMIT_ONGOING ||
+ sbi->s_journal->j_flags & JBD2_FAST_COMMIT_ONGOING) ?
&sbi->s_fc_q[FC_Q_STAGING] :
&sbi->s_fc_q[FC_Q_MAIN]);
spin_unlock(&sbi->s_fc_lock);
@@ -387,7 +402,7 @@ static int __track_dentry_update(struct inode *inode, void *arg, bool update)
mutex_unlock(&ei->i_fc_lock);
node = kmem_cache_alloc(ext4_fc_dentry_cachep, GFP_NOFS);
if (!node) {
- ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM);
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL);
mutex_lock(&ei->i_fc_lock);
return -ENOMEM;
}
@@ -400,7 +415,7 @@ static int __track_dentry_update(struct inode *inode, void *arg, bool update)
if (!node->fcd_name.name) {
kmem_cache_free(ext4_fc_dentry_cachep, node);
ext4_fc_mark_ineligible(inode->i_sb,
- EXT4_FC_REASON_NOMEM);
+ EXT4_FC_REASON_NOMEM, NULL);
mutex_lock(&ei->i_fc_lock);
return -ENOMEM;
}
@@ -414,7 +429,8 @@ static int __track_dentry_update(struct inode *inode, void *arg, bool update)
node->fcd_name.len = dentry->d_name.len;
spin_lock(&sbi->s_fc_lock);
- if (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_COMMITTING))
+ if (sbi->s_journal->j_flags & JBD2_FULL_COMMIT_ONGOING ||
+ sbi->s_journal->j_flags & JBD2_FAST_COMMIT_ONGOING)
list_add_tail(&node->fcd_list,
&sbi->s_fc_dentry_q[FC_Q_STAGING]);
else
@@ -502,7 +518,7 @@ void ext4_fc_track_inode(handle_t *handle, struct inode *inode)
if (ext4_should_journal_data(inode)) {
ext4_fc_mark_ineligible(inode->i_sb,
- EXT4_FC_REASON_INODE_JOURNAL_DATA);
+ EXT4_FC_REASON_INODE_JOURNAL_DATA, handle);
return;
}
@@ -879,7 +895,6 @@ static int ext4_fc_submit_inode_data_all(journal_t *journal)
int ret = 0;
spin_lock(&sbi->s_fc_lock);
- ext4_set_mount_flag(sb, EXT4_MF_FC_COMMITTING);
list_for_each_entry(ei, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
ext4_set_inode_state(&ei->vfs_inode, EXT4_STATE_FC_COMMITTING);
while (atomic_read(&ei->i_fc_updates)) {
@@ -1179,7 +1194,7 @@ fallback:
* Fast commit cleanup routine. This is called after every fast commit and
* full commit. full is true if we are called after a full commit.
*/
-static void ext4_fc_cleanup(journal_t *journal, int full)
+static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
{
struct super_block *sb = journal->j_private;
struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -1197,7 +1212,8 @@ static void ext4_fc_cleanup(journal_t *journal, int full)
list_del_init(&iter->i_fc_list);
ext4_clear_inode_state(&iter->vfs_inode,
EXT4_STATE_FC_COMMITTING);
- ext4_fc_reset_inode(&iter->vfs_inode);
+ if (iter->i_sync_tid <= tid)
+ ext4_fc_reset_inode(&iter->vfs_inode);
/* Make sure EXT4_STATE_FC_COMMITTING bit is clear */
smp_mb();
#if (BITS_PER_LONG < 64)
@@ -1226,8 +1242,10 @@ static void ext4_fc_cleanup(journal_t *journal, int full)
list_splice_init(&sbi->s_fc_q[FC_Q_STAGING],
&sbi->s_fc_q[FC_Q_MAIN]);
- ext4_clear_mount_flag(sb, EXT4_MF_FC_COMMITTING);
- ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
+ if (tid >= sbi->s_fc_ineligible_tid) {
+ sbi->s_fc_ineligible_tid = 0;
+ ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
+ }
if (full)
sbi->s_fc_bytes = 0;
@@ -1392,14 +1410,15 @@ static int ext4_fc_record_modified_inode(struct super_block *sb, int ino)
if (state->fc_modified_inodes[i] == ino)
return 0;
if (state->fc_modified_inodes_used == state->fc_modified_inodes_size) {
- state->fc_modified_inodes_size +=
- EXT4_FC_REPLAY_REALLOC_INCREMENT;
state->fc_modified_inodes = krealloc(
- state->fc_modified_inodes, sizeof(int) *
- state->fc_modified_inodes_size,
- GFP_KERNEL);
+ state->fc_modified_inodes,
+ sizeof(int) * (state->fc_modified_inodes_size +
+ EXT4_FC_REPLAY_REALLOC_INCREMENT),
+ GFP_KERNEL);
if (!state->fc_modified_inodes)
return -ENOMEM;
+ state->fc_modified_inodes_size +=
+ EXT4_FC_REPLAY_REALLOC_INCREMENT;
}
state->fc_modified_inodes[state->fc_modified_inodes_used++] = ino;
return 0;
@@ -1431,7 +1450,9 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl,
}
inode = NULL;
- ext4_fc_record_modified_inode(sb, ino);
+ ret = ext4_fc_record_modified_inode(sb, ino);
+ if (ret)
+ goto out;
raw_fc_inode = (struct ext4_inode *)
(val + offsetof(struct ext4_fc_inode, fc_raw_inode));
@@ -1563,16 +1584,23 @@ out:
}
/*
- * Record physical disk regions which are in use as per fast commit area. Our
- * simple replay phase allocator excludes these regions from allocation.
+ * Record physical disk regions which are in use as per fast commit area,
+ * and used by inodes during replay phase. Our simple replay phase
+ * allocator excludes these regions from allocation.
*/
-static int ext4_fc_record_regions(struct super_block *sb, int ino,
- ext4_lblk_t lblk, ext4_fsblk_t pblk, int len)
+int ext4_fc_record_regions(struct super_block *sb, int ino,
+ ext4_lblk_t lblk, ext4_fsblk_t pblk, int len, int replay)
{
struct ext4_fc_replay_state *state;
struct ext4_fc_alloc_region *region;
state = &EXT4_SB(sb)->s_fc_replay_state;
+ /*
+ * during replay phase, the fc_regions_valid may not same as
+ * fc_regions_used, update it when do new additions.
+ */
+ if (replay && state->fc_regions_used != state->fc_regions_valid)
+ state->fc_regions_used = state->fc_regions_valid;
if (state->fc_regions_used == state->fc_regions_size) {
state->fc_regions_size +=
EXT4_FC_REPLAY_REALLOC_INCREMENT;
@@ -1590,6 +1618,9 @@ static int ext4_fc_record_regions(struct super_block *sb, int ino,
region->pblk = pblk;
region->len = len;
+ if (replay)
+ state->fc_regions_valid++;
+
return 0;
}
@@ -1621,6 +1652,8 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
}
ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
+ if (ret)
+ goto out;
start = le32_to_cpu(ex->ee_block);
start_pblk = ext4_ext_pblock(ex);
@@ -1638,18 +1671,14 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
map.m_pblk = 0;
ret = ext4_map_blocks(NULL, inode, &map, 0);
- if (ret < 0) {
- iput(inode);
- return 0;
- }
+ if (ret < 0)
+ goto out;
if (ret == 0) {
/* Range is not mapped */
path = ext4_find_extent(inode, cur, NULL, 0);
- if (IS_ERR(path)) {
- iput(inode);
- return 0;
- }
+ if (IS_ERR(path))
+ goto out;
memset(&newex, 0, sizeof(newex));
newex.ee_block = cpu_to_le32(cur);
ext4_ext_store_pblock(
@@ -1663,10 +1692,8 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
up_write((&EXT4_I(inode)->i_data_sem));
ext4_ext_drop_refs(path);
kfree(path);
- if (ret) {
- iput(inode);
- return 0;
- }
+ if (ret)
+ goto out;
goto next;
}
@@ -1679,10 +1706,8 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
ext4_ext_is_unwritten(ex),
start_pblk + cur - start);
- if (ret) {
- iput(inode);
- return 0;
- }
+ if (ret)
+ goto out;
/*
* Mark the old blocks as free since they aren't used
* anymore. We maintain an array of all the modified
@@ -1702,10 +1727,8 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
ext4_ext_is_unwritten(ex), map.m_pblk);
ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
ext4_ext_is_unwritten(ex), map.m_pblk);
- if (ret) {
- iput(inode);
- return 0;
- }
+ if (ret)
+ goto out;
/*
* We may have split the extent tree while toggling the state.
* Try to shrink the extent tree now.
@@ -1717,6 +1740,7 @@ next:
}
ext4_ext_replay_shrink_inode(inode, i_size_read(inode) >>
sb->s_blocksize_bits);
+out:
iput(inode);
return 0;
}
@@ -1746,6 +1770,8 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
}
ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
+ if (ret)
+ goto out;
jbd_debug(1, "DEL_RANGE, inode %ld, lblk %d, len %d\n",
inode->i_ino, le32_to_cpu(lrange.fc_lblk),
@@ -1755,10 +1781,8 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
map.m_len = remaining;
ret = ext4_map_blocks(NULL, inode, &map, 0);
- if (ret < 0) {
- iput(inode);
- return 0;
- }
+ if (ret < 0)
+ goto out;
if (ret > 0) {
remaining -= ret;
cur += ret;
@@ -1770,18 +1794,17 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
}
down_write(&EXT4_I(inode)->i_data_sem);
- ret = ext4_ext_remove_space(inode, lrange.fc_lblk,
- lrange.fc_lblk + lrange.fc_len - 1);
+ ret = ext4_ext_remove_space(inode, le32_to_cpu(lrange.fc_lblk),
+ le32_to_cpu(lrange.fc_lblk) +
+ le32_to_cpu(lrange.fc_len) - 1);
up_write(&EXT4_I(inode)->i_data_sem);
- if (ret) {
- iput(inode);
- return 0;
- }
+ if (ret)
+ goto out;
ext4_ext_replay_shrink_inode(inode,
i_size_read(inode) >> sb->s_blocksize_bits);
ext4_mark_inode_dirty(NULL, inode);
+out:
iput(inode);
-
return 0;
}
@@ -1937,7 +1960,7 @@ static int ext4_fc_replay_scan(journal_t *journal,
ret = ext4_fc_record_regions(sb,
le32_to_cpu(ext.fc_ino),
le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex),
- ext4_ext_get_actual_len(ex));
+ ext4_ext_get_actual_len(ex), 0);
if (ret < 0)
break;
ret = JBD2_FC_REPLAY_CONTINUE;
diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
index f34f4176c1e7..147b5241dd94 100644
--- a/fs/ext4/hash.c
+++ b/fs/ext4/hash.c
@@ -290,7 +290,7 @@ static int __ext4fs_dirhash(const struct inode *dir, const char *name, int len,
int ext4fs_dirhash(const struct inode *dir, const char *name, int len,
struct dx_hash_info *hinfo)
{
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
const struct unicode_map *um = dir->i_sb->s_encoding;
int r, dlen;
unsigned char *buff;
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 89efa78ed4b2..07a8c75b65ed 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -696,7 +696,7 @@ static int ext4_ind_trunc_restart_fn(handle_t *handle, struct inode *inode,
* Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
* moment, get_block can be called only for blocks inside i_size since
* page cache has been already dropped and writes are blocked by
- * i_mutex. So we can safely drop the i_data_sem here.
+ * i_rwsem. So we can safely drop the i_data_sem here.
*/
BUG_ON(EXT4_JOURNAL(inode) == NULL);
ext4_discard_preallocations(inode, 0);
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 635bcf68a67e..e42941803605 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -911,7 +911,7 @@ int ext4_da_write_inline_data_begin(struct address_space *mapping,
struct page **pagep,
void **fsdata)
{
- int ret, inline_size;
+ int ret;
handle_t *handle;
struct page *page;
struct ext4_iloc iloc;
@@ -928,14 +928,9 @@ retry_journal:
goto out;
}
- inline_size = ext4_get_max_inline_size(inode);
-
- ret = -ENOSPC;
- if (inline_size >= pos + len) {
- ret = ext4_prepare_inline_data(handle, inode, pos + len);
- if (ret && ret != -ENOSPC)
- goto out_journal;
- }
+ ret = ext4_prepare_inline_data(handle, inode, pos + len);
+ if (ret && ret != -ENOSPC)
+ goto out_journal;
/*
* We cannot recurse into the filesystem as the transaction
@@ -1133,7 +1128,15 @@ static void ext4_restore_inline_data(handle_t *handle, struct inode *inode,
struct ext4_iloc *iloc,
void *buf, int inline_size)
{
- ext4_create_inline_data(handle, inode, inline_size);
+ int ret;
+
+ ret = ext4_create_inline_data(handle, inode, inline_size);
+ if (ret) {
+ ext4_msg(inode->i_sb, KERN_EMERG,
+ "error restoring inline_data for inode -- potential data loss! (inode %lu, error %d)",
+ inode->i_ino, ret);
+ return;
+ }
ext4_write_inline_data(inode, iloc, buf, 0, inline_size);
ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 5f79d265d06a..01c9e4f743ba 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -338,7 +338,7 @@ stop_handle:
return;
no_delete:
if (!list_empty(&EXT4_I(inode)->i_fc_list))
- ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM);
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL);
ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
}
@@ -1224,7 +1224,7 @@ retry_journal:
/*
* __block_write_begin may have instantiated a few blocks
* outside i_size. Trim these off again. Don't need
- * i_size_read because we hold i_mutex.
+ * i_size_read because we hold i_rwsem.
*
* Add inode to orphan list in case we crash before
* truncate finishes
@@ -3979,7 +3979,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
}
- /* Wait all existing dio workers, newcomers will block on i_mutex */
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
inode_dio_wait(inode);
/*
@@ -4129,7 +4129,7 @@ int ext4_truncate(struct inode *inode)
/*
* There is a possibility that we're either freeing the inode
* or it's a completely new inode. In those cases we might not
- * have i_mutex locked because it's not necessary.
+ * have i_rwsem locked because it's not necessary.
*/
if (!(inode->i_state & (I_NEW|I_FREEING)))
WARN_ON(!inode_is_locked(inode));
@@ -5271,7 +5271,7 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
* transaction are already on disk (truncate waits for pages under
* writeback).
*
- * Called with inode->i_mutex down.
+ * Called with inode->i_rwsem down.
*/
int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
struct iattr *attr)
@@ -5983,7 +5983,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
return PTR_ERR(handle);
ext4_fc_mark_ineligible(inode->i_sb,
- EXT4_FC_REASON_JOURNAL_FLAG_CHANGE);
+ EXT4_FC_REASON_JOURNAL_FLAG_CHANGE, handle);
err = ext4_mark_inode_dirty(handle, inode);
ext4_handle_sync(handle);
ext4_journal_stop(handle);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index bbbedf27b71c..a8022c2c6a58 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -411,7 +411,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
err = -EINVAL;
goto err_out;
}
- ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_SWAP_BOOT);
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_SWAP_BOOT, handle);
/* Protect extent tree against block allocations via delalloc */
ext4_double_down_write_data_sem(inode, inode_bl);
@@ -1373,7 +1373,7 @@ mext_out:
err = ext4_resize_fs(sb, n_blocks_count);
if (EXT4_SB(sb)->s_journal) {
- ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_RESIZE);
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_RESIZE, NULL);
jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal, 0);
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 9f86dd947032..67ac95c4cd9b 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -5753,7 +5753,8 @@ static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
struct super_block *sb = ar->inode->i_sb;
ext4_group_t group;
ext4_grpblk_t blkoff;
- int i = sb->s_blocksize;
+ ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
+ ext4_grpblk_t i = 0;
ext4_fsblk_t goal, block;
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
@@ -5775,19 +5776,26 @@ static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
ext4_get_group_no_and_offset(sb,
max(ext4_group_first_block_no(sb, group), goal),
NULL, &blkoff);
- i = mb_find_next_zero_bit(bitmap_bh->b_data, sb->s_blocksize,
+ while (1) {
+ i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
blkoff);
+ if (i >= max)
+ break;
+ if (ext4_fc_replay_check_excluded(sb,
+ ext4_group_first_block_no(sb, group) + i)) {
+ blkoff = i + 1;
+ } else
+ break;
+ }
brelse(bitmap_bh);
- if (i >= sb->s_blocksize)
- continue;
- if (ext4_fc_replay_check_excluded(sb,
- ext4_group_first_block_no(sb, group) + i))
- continue;
- break;
+ if (i < max)
+ break;
}
- if (group >= ext4_get_groups_count(sb) && i >= sb->s_blocksize)
+ if (group >= ext4_get_groups_count(sb) || i >= max) {
+ *errp = -ENOSPC;
return 0;
+ }
block = ext4_group_first_block_no(sb, group) + i;
ext4_mb_mark_bb(sb, block, 1, 1);
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index ff8916e1d38e..7a5353a8cfd7 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -485,7 +485,7 @@ int ext4_ext_migrate(struct inode *inode)
* when we add extents we extent the journal
*/
/*
- * Even though we take i_mutex we can still cause block
+ * Even though we take i_rwsem we can still cause block
* allocation via mmap write to holes. If we have allocated
* new blocks we fail migrate. New block allocation will
* clear EXT4_STATE_EXT_MIGRATE flag. The flag is updated
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 52c9bd154122..8cf0a924a49b 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1317,7 +1317,7 @@ static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block)
dx_set_count(entries, count + 1);
}
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
/*
* Test whether a case-insensitive directory entry matches the filename
* being searched for. If quick is set, assume the name being looked up
@@ -1428,7 +1428,7 @@ static bool ext4_match(struct inode *parent,
f.crypto_buf = fname->crypto_buf;
#endif
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
if (parent->i_sb->s_encoding && IS_CASEFOLDED(parent) &&
(!IS_ENCRYPTED(parent) || fscrypt_has_encryption_key(parent))) {
if (fname->cf_name.name) {
@@ -1800,7 +1800,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
}
}
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
if (!inode && IS_CASEFOLDED(dir)) {
/* Eventually we want to call d_add_ci(dentry, NULL)
* for negative dentries in the encoding case as
@@ -2308,7 +2308,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
if (fscrypt_is_nokey_name(dentry))
return -ENOKEY;
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
if (sb_has_strict_encoding(sb) && IS_CASEFOLDED(dir) &&
sb->s_encoding && utf8_validate(sb->s_encoding, &dentry->d_name))
return -EINVAL;
@@ -3126,7 +3126,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
ext4_fc_track_unlink(handle, dentry);
retval = ext4_mark_inode_dirty(handle, dir);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
/* VFS negative dentries are incompatible with Encoding and
* Case-insensitiveness. Eventually we'll want avoid
* invalidating the dentries here, alongside with returning the
@@ -3231,7 +3231,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
retval = __ext4_unlink(handle, dir, &dentry->d_name, d_inode(dentry));
if (!retval)
ext4_fc_track_unlink(handle, dentry);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
/* VFS negative dentries are incompatible with Encoding and
* Case-insensitiveness. Eventually we'll want avoid
* invalidating the dentries here, alongside with returning the
@@ -3889,7 +3889,7 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
* dirents in directories.
*/
ext4_fc_mark_ineligible(old.inode->i_sb,
- EXT4_FC_REASON_RENAME_DIR);
+ EXT4_FC_REASON_RENAME_DIR, handle);
} else {
if (new.inode)
ext4_fc_track_unlink(handle, new.dentry);
@@ -4049,7 +4049,7 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
if (unlikely(retval))
goto end_rename;
ext4_fc_mark_ineligible(new.inode->i_sb,
- EXT4_FC_REASON_CROSS_RENAME);
+ EXT4_FC_REASON_CROSS_RENAME, handle);
if (old.dir_bh) {
retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino);
if (retval)
diff --git a/fs/ext4/orphan.c b/fs/ext4/orphan.c
index 53adc8f570a3..7de0612eb42d 100644
--- a/fs/ext4/orphan.c
+++ b/fs/ext4/orphan.c
@@ -93,7 +93,7 @@ static int ext4_orphan_file_add(handle_t *handle, struct inode *inode)
* At filesystem recovery time, we walk this list deleting unlinked
* inodes and truncating linked inodes in ext4_orphan_cleanup().
*
- * Orphan list manipulation functions must be called under i_mutex unless
+ * Orphan list manipulation functions must be called under i_rwsem unless
* we are just creating the inode or deleting it.
*/
int ext4_orphan_add(handle_t *handle, struct inode *inode)
@@ -119,7 +119,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
/*
* Orphan handling is only valid for files with data blocks
* being truncated, or files being unlinked. Note that we either
- * hold i_mutex, or the inode can not be referenced from outside,
+ * hold i_rwsem, or the inode can not be referenced from outside,
* so i_nlink should not be bumped due to race
*/
ASSERT((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index eee0d9ebfa6c..c5021ca0a28a 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1301,7 +1301,7 @@ static void ext4_put_super(struct super_block *sb)
kfree(sbi->s_blockgroup_lock);
fs_put_dax(sbi->s_daxdev);
fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
utf8_unload(sb->s_encoding);
#endif
kfree(sbi);
@@ -1961,7 +1961,7 @@ static const struct mount_opts {
{Opt_err, 0, 0}
};
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
static const struct ext4_sb_encodings {
__u16 magic;
char *name;
@@ -3606,7 +3606,7 @@ int ext4_feature_set_ok(struct super_block *sb, int readonly)
return 0;
}
-#ifndef CONFIG_UNICODE
+#if !IS_ENABLED(CONFIG_UNICODE)
if (ext4_has_feature_casefold(sb)) {
ext4_msg(sb, KERN_ERR,
"Filesystem with casefold feature cannot be "
@@ -4610,7 +4610,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
if (err < 0)
goto failed_mount;
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
if (ext4_has_feature_casefold(sb) && !sb->s_encoding) {
const struct ext4_sb_encodings *encoding_info;
struct unicode_map *encoding;
@@ -5082,7 +5082,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_STAGING]);
sbi->s_fc_bytes = 0;
ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
- ext4_clear_mount_flag(sb, EXT4_MF_FC_COMMITTING);
+ sbi->s_fc_ineligible_tid = 0;
spin_lock_init(&sbi->s_fc_lock);
memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats));
sbi->s_fc_replay_state.fc_regions = NULL;
@@ -5514,7 +5514,7 @@ failed_mount:
if (sbi->s_chksum_driver)
crypto_free_shash(sbi->s_chksum_driver);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
utf8_unload(sb->s_encoding);
#endif
@@ -5540,7 +5540,7 @@ static int ext4_fill_super(struct super_block *sb, struct fs_context *fc)
sbi = ext4_alloc_sbi(sb);
if (!sbi)
- ret = -ENOMEM;
+ return -ENOMEM;
fc->s_fs_info = sbi;
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index f61e65ae27d8..d233c24ea342 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -309,7 +309,7 @@ EXT4_ATTR_FEATURE(meta_bg_resize);
EXT4_ATTR_FEATURE(encryption);
EXT4_ATTR_FEATURE(test_dummy_encryption_v2);
#endif
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
EXT4_ATTR_FEATURE(casefold);
#endif
#ifdef CONFIG_FS_VERITY
@@ -317,7 +317,7 @@ EXT4_ATTR_FEATURE(verity);
#endif
EXT4_ATTR_FEATURE(metadata_csum_seed);
EXT4_ATTR_FEATURE(fast_commit);
-#if defined(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
+#if IS_ENABLED(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
EXT4_ATTR_FEATURE(encrypted_casefold);
#endif
@@ -329,7 +329,7 @@ static struct attribute *ext4_feat_attrs[] = {
ATTR_LIST(encryption),
ATTR_LIST(test_dummy_encryption_v2),
#endif
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
ATTR_LIST(casefold),
#endif
#ifdef CONFIG_FS_VERITY
@@ -337,7 +337,7 @@ static struct attribute *ext4_feat_attrs[] = {
#endif
ATTR_LIST(metadata_csum_seed),
ATTR_LIST(fast_commit),
-#if defined(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
+#if IS_ENABLED(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
ATTR_LIST(encrypted_casefold),
#endif
NULL,
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 1e0fc1ed845b..042325349098 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -2408,7 +2408,7 @@ retry_inode:
if (IS_SYNC(inode))
ext4_handle_sync(handle);
}
- ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR);
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR, handle);
cleanup:
brelse(is.iloc.bh);
@@ -2486,7 +2486,7 @@ retry:
if (error == 0)
error = error2;
}
- ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR);
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR, NULL);
return error;
}
@@ -2920,7 +2920,7 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
error);
goto cleanup;
}
- ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR);
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR, handle);
}
error = 0;
cleanup:
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 1820e9c106f7..166f08623362 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -16,7 +16,7 @@
#include "xattr.h"
#include <trace/events/f2fs.h>
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
extern struct kmem_cache *f2fs_cf_name_slab;
#endif
@@ -79,7 +79,7 @@ unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de)
int f2fs_init_casefolded_name(const struct inode *dir,
struct f2fs_filename *fname)
{
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
struct super_block *sb = dir->i_sb;
if (IS_CASEFOLDED(dir)) {
@@ -174,7 +174,7 @@ void f2fs_free_filename(struct f2fs_filename *fname)
kfree(fname->crypto_buf.name);
fname->crypto_buf.name = NULL;
#endif
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
if (fname->cf_name.name) {
kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name);
fname->cf_name.name = NULL;
@@ -208,7 +208,7 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir,
return f2fs_find_target_dentry(&d, fname, max_slots);
}
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
/*
* Test whether a case-insensitive directory entry matches the filename
* being searched for.
@@ -266,7 +266,7 @@ static inline int f2fs_match_name(const struct inode *dir,
{
struct fscrypt_name f;
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
if (fname->cf_name.name) {
struct qstr cf = FSTR_TO_QSTR(&fname->cf_name);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index eb22fa91c2b2..68b44015514f 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -488,7 +488,7 @@ struct f2fs_filename {
*/
struct fscrypt_str crypto_buf;
#endif
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
/*
* For casefolded directories: the casefolded name, but it's left NULL
* if the original name is not valid Unicode, if the directory is both
diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c
index e3beac546c63..3cb1e7a24740 100644
--- a/fs/f2fs/hash.c
+++ b/fs/f2fs/hash.c
@@ -105,7 +105,7 @@ void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname)
return;
}
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
if (IS_CASEFOLDED(dir)) {
/*
* If the casefolded name is provided, hash it instead of the
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index a728a0af9ce0..5f213f05556d 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -561,7 +561,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
goto out_iput;
}
out_splice:
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
if (!inode && IS_CASEFOLDED(dir)) {
/* Eventually we want to call d_add_ci(dentry, NULL)
* for negative dentries in the encoding case as
@@ -622,7 +622,7 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
goto fail;
}
f2fs_delete_entry(de, page, dir, inode);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
/* VFS negative dentries are incompatible with Encoding and
* Case-insensitiveness. Eventually we'll want avoid
* invalidating the dentries here, alongside with returning the
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 9683c80ff8c2..79773d322c47 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -46,7 +46,7 @@
static struct kmem_cache *fsync_entry_slab;
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
extern struct kmem_cache *f2fs_cf_name_slab;
#endif
@@ -149,7 +149,7 @@ static int init_recovered_filename(const struct inode *dir,
if (err)
return err;
f2fs_hash_filename(dir, fname);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
/* Case-sensitive match is fine for recovery */
kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name);
fname->cf_name.name = NULL;
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 76e6a3df9aba..baefd398ec1a 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -257,7 +257,7 @@ void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
va_end(args);
}
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
static const struct f2fs_sb_encodings {
__u16 magic;
char *name;
@@ -1259,7 +1259,7 @@ default_check:
return -EINVAL;
}
#endif
-#ifndef CONFIG_UNICODE
+#if !IS_ENABLED(CONFIG_UNICODE)
if (f2fs_sb_has_casefold(sbi)) {
f2fs_err(sbi,
"Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
@@ -1619,7 +1619,7 @@ static void f2fs_put_super(struct super_block *sb)
f2fs_destroy_iostat(sbi);
for (i = 0; i < NR_PAGE_TYPE; i++)
kvfree(sbi->write_io[i]);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
utf8_unload(sb->s_encoding);
#endif
kfree(sbi);
@@ -3903,7 +3903,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
{
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) {
const struct f2fs_sb_encodings *encoding_info;
struct unicode_map *encoding;
@@ -4458,7 +4458,7 @@ free_bio_info:
for (i = 0; i < NR_PAGE_TYPE; i++)
kvfree(sbi->write_io[i]);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
utf8_unload(sb->s_encoding);
sb->s_encoding = NULL;
#endif
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index df406c16b2eb..8ac506671245 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -201,7 +201,7 @@ static ssize_t unusable_show(struct f2fs_attr *a,
static ssize_t encoding_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
struct super_block *sb = sbi->sb;
if (f2fs_sb_has_casefold(sbi))
@@ -778,7 +778,7 @@ F2FS_GENERAL_RO_ATTR(avg_vblocks);
#ifdef CONFIG_FS_ENCRYPTION
F2FS_FEATURE_RO_ATTR(encryption);
F2FS_FEATURE_RO_ATTR(test_dummy_encryption_v2);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
F2FS_FEATURE_RO_ATTR(encrypted_casefold);
#endif
#endif /* CONFIG_FS_ENCRYPTION */
@@ -797,7 +797,7 @@ F2FS_FEATURE_RO_ATTR(lost_found);
F2FS_FEATURE_RO_ATTR(verity);
#endif
F2FS_FEATURE_RO_ATTR(sb_checksum);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
F2FS_FEATURE_RO_ATTR(casefold);
#endif
F2FS_FEATURE_RO_ATTR(readonly);
@@ -910,7 +910,7 @@ static struct attribute *f2fs_feat_attrs[] = {
#ifdef CONFIG_FS_ENCRYPTION
ATTR_LIST(encryption),
ATTR_LIST(test_dummy_encryption_v2),
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
ATTR_LIST(encrypted_casefold),
#endif
#endif /* CONFIG_FS_ENCRYPTION */
@@ -929,7 +929,7 @@ static struct attribute *f2fs_feat_attrs[] = {
ATTR_LIST(verity),
#endif
ATTR_LIST(sb_checksum),
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
ATTR_LIST(casefold),
#endif
ATTR_LIST(readonly),
diff --git a/fs/file_table.c b/fs/file_table.c
index 57edef16dce4..7d2e692b66a9 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -27,6 +27,7 @@
#include <linux/task_work.h>
#include <linux/ima.h>
#include <linux/swap.h>
+#include <linux/kmemleak.h>
#include <linux/atomic.h>
@@ -119,6 +120,11 @@ static struct ctl_table fs_stat_sysctls[] = {
static int __init init_fs_stat_sysctls(void)
{
register_sysctl_init("fs", fs_stat_sysctls);
+ if (IS_ENABLED(CONFIG_BINFMT_MISC)) {
+ struct ctl_table_header *hdr;
+ hdr = register_sysctl_mount_point("fs/binfmt_misc");
+ kmemleak_not_leak(hdr);
+ }
return 0;
}
fs_initcall(init_fs_stat_sysctls);
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 3e718cfc19a7..8c39a8571b1f 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -704,10 +704,11 @@ static int gfs2_release(struct inode *inode, struct file *file)
kfree(file->private_data);
file->private_data = NULL;
- if (gfs2_rs_active(&ip->i_res))
- gfs2_rs_delete(ip, &inode->i_writecount);
- if (file->f_mode & FMODE_WRITE)
+ if (file->f_mode & FMODE_WRITE) {
+ if (gfs2_rs_active(&ip->i_res))
+ gfs2_rs_delete(ip, &inode->i_writecount);
gfs2_qa_put(ip);
+ }
return 0;
}
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index b7ab8430333c..6b23399eaee0 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -301,9 +301,6 @@ void gfs2_glock_queue_put(struct gfs2_glock *gl)
void gfs2_glock_put(struct gfs2_glock *gl)
{
- /* last put could call sleepable dlm api */
- might_sleep();
-
if (lockref_put_or_lock(&gl->gl_lockref))
return;
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 2e04f718319d..4715980e9015 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4567,6 +4567,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
} else {
list_add_tail(&buf->list, &(*head)->list);
}
+ cond_resched();
}
return i ? i : -ENOMEM;
@@ -5228,7 +5229,6 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
min_ret = iov_iter_count(&msg.msg_iter);
ret = sock_recvmsg(sock, &msg, flags);
-out_free:
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock)
return -EAGAIN;
@@ -5236,9 +5236,9 @@ out_free:
ret = -EINTR;
req_set_fail(req);
} else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
+out_free:
req_set_fail(req);
}
-
__io_req_complete(req, issue_flags, ret, io_put_kbuf(req));
return 0;
}
@@ -7694,7 +7694,7 @@ static int io_run_task_work_sig(void)
/* when returns >0, the caller should retry */
static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq,
- signed long *timeout)
+ ktime_t timeout)
{
int ret;
@@ -7706,8 +7706,9 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
if (test_bit(0, &ctx->check_cq_overflow))
return 1;
- *timeout = schedule_timeout(*timeout);
- return !*timeout ? -ETIME : 1;
+ if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
+ return -ETIME;
+ return 1;
}
/*
@@ -7720,7 +7721,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
{
struct io_wait_queue iowq;
struct io_rings *rings = ctx->rings;
- signed long timeout = MAX_SCHEDULE_TIMEOUT;
+ ktime_t timeout = KTIME_MAX;
int ret;
do {
@@ -7736,7 +7737,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
if (get_timespec64(&ts, uts))
return -EFAULT;
- timeout = timespec64_to_jiffies(&ts);
+ timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
}
if (sig) {
@@ -7768,7 +7769,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
}
prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
TASK_INTERRUPTIBLE);
- ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
+ ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
finish_wait(&ctx->cq_wait, &iowq.wq);
cond_resched();
} while (ret > 0);
@@ -7925,7 +7926,15 @@ static __cold int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
ret = wait_for_completion_interruptible(&data->done);
if (!ret) {
mutex_lock(&ctx->uring_lock);
- break;
+ if (atomic_read(&data->refs) > 0) {
+ /*
+ * it has been revived by another thread while
+ * we were unlocked
+ */
+ mutex_unlock(&ctx->uring_lock);
+ } else {
+ break;
+ }
}
atomic_inc(&data->refs);
@@ -8933,10 +8942,9 @@ static void io_mem_free(void *ptr)
static void *io_mem_alloc(size_t size)
{
- gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
- __GFP_NORETRY | __GFP_ACCOUNT;
+ gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
- return (void *) __get_free_pages(gfp_flags, get_order(size));
+ return (void *) __get_free_pages(gfp, get_order(size));
}
static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index c938bbad075e..6c51a75d0be6 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -21,6 +21,8 @@
#include "../internal.h"
+#define IOEND_BATCH_SIZE 4096
+
/*
* Structure allocated for each folio when block size < folio size
* to track sub-folio uptodate status and I/O completions.
@@ -1039,7 +1041,7 @@ static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
* state, release holds on bios, and finally free up memory. Do not use the
* ioend after this.
*/
-static void
+static u32
iomap_finish_ioend(struct iomap_ioend *ioend, int error)
{
struct inode *inode = ioend->io_inode;
@@ -1048,6 +1050,7 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error)
u64 start = bio->bi_iter.bi_sector;
loff_t offset = ioend->io_offset;
bool quiet = bio_flagged(bio, BIO_QUIET);
+ u32 folio_count = 0;
for (bio = &ioend->io_inline_bio; bio; bio = next) {
struct folio_iter fi;
@@ -1062,9 +1065,11 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error)
next = bio->bi_private;
/* walk all folios in bio, ending page IO on them */
- bio_for_each_folio_all(fi, bio)
+ bio_for_each_folio_all(fi, bio) {
iomap_finish_folio_write(inode, fi.folio, fi.length,
error);
+ folio_count++;
+ }
bio_put(bio);
}
/* The ioend has been freed by bio_put() */
@@ -1074,20 +1079,36 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error)
"%s: writeback error on inode %lu, offset %lld, sector %llu",
inode->i_sb->s_id, inode->i_ino, offset, start);
}
+ return folio_count;
}
+/*
+ * Ioend completion routine for merged bios. This can only be called from task
+ * contexts as merged ioends can be of unbound length. Hence we have to break up
+ * the writeback completions into manageable chunks to avoid long scheduler
+ * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
+ * good batch processing throughput without creating adverse scheduler latency
+ * conditions.
+ */
void
iomap_finish_ioends(struct iomap_ioend *ioend, int error)
{
struct list_head tmp;
+ u32 completions;
+
+ might_sleep();
list_replace_init(&ioend->io_list, &tmp);
- iomap_finish_ioend(ioend, error);
+ completions = iomap_finish_ioend(ioend, error);
while (!list_empty(&tmp)) {
+ if (completions > IOEND_BATCH_SIZE * 8) {
+ cond_resched();
+ completions = 0;
+ }
ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
list_del_init(&ioend->io_list);
- iomap_finish_ioend(ioend, error);
+ completions += iomap_finish_ioend(ioend, error);
}
}
EXPORT_SYMBOL_GPL(iomap_finish_ioends);
@@ -1108,6 +1129,18 @@ iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
return false;
if (ioend->io_offset + ioend->io_size != next->io_offset)
return false;
+ /*
+ * Do not merge physically discontiguous ioends. The filesystem
+ * completion functions will have to iterate the physical
+ * discontiguities even if we merge the ioends at a logical level, so
+ * we don't gain anything by merging physical discontiguities here.
+ *
+ * We cannot use bio->bi_iter.bi_sector here as it is modified during
+ * submission so does not point to the start sector of the bio at
+ * completion.
+ */
+ if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector)
+ return false;
return true;
}
@@ -1209,8 +1242,10 @@ iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
ioend->io_flags = wpc->iomap.flags;
ioend->io_inode = inode;
ioend->io_size = 0;
+ ioend->io_folios = 0;
ioend->io_offset = offset;
ioend->io_bio = bio;
+ ioend->io_sector = sector;
return ioend;
}
@@ -1251,6 +1286,13 @@ iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
return false;
if (sector != bio_end_sector(wpc->ioend->io_bio))
return false;
+ /*
+ * Limit ioend bio chain lengths to minimise IO completion latency. This
+ * also prevents long tight loops ending page writeback on all the
+ * folios in the ioend.
+ */
+ if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE)
+ return false;
return true;
}
@@ -1335,6 +1377,8 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
&submit_list);
count++;
}
+ if (count)
+ wpc->ioend->io_folios++;
WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
WARN_ON_ONCE(!folio_test_locked(folio));
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 3cc4ab2ba7f4..5b9408e3b370 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -484,22 +484,9 @@ void jbd2_journal_commit_transaction(journal_t *journal)
stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
stats.run.rs_locked);
- spin_lock(&commit_transaction->t_handle_lock);
- while (atomic_read(&commit_transaction->t_updates)) {
- DEFINE_WAIT(wait);
+ // waits for any t_updates to finish
+ jbd2_journal_wait_updates(journal);
- prepare_to_wait(&journal->j_wait_updates, &wait,
- TASK_UNINTERRUPTIBLE);
- if (atomic_read(&commit_transaction->t_updates)) {
- spin_unlock(&commit_transaction->t_handle_lock);
- write_unlock(&journal->j_state_lock);
- schedule();
- write_lock(&journal->j_state_lock);
- spin_lock(&commit_transaction->t_handle_lock);
- }
- finish_wait(&journal->j_wait_updates, &wait);
- }
- spin_unlock(&commit_transaction->t_handle_lock);
commit_transaction->t_state = T_SWITCH;
write_unlock(&journal->j_state_lock);
@@ -817,7 +804,7 @@ start_journal_io:
commit_transaction->t_state = T_COMMIT_DFLUSH;
write_unlock(&journal->j_state_lock);
- /*
+ /*
* If the journal is not located on the file system device,
* then we must flush the file system device before we issue
* the commit record
@@ -1170,7 +1157,7 @@ restart_loop:
if (journal->j_commit_callback)
journal->j_commit_callback(journal, commit_transaction);
if (journal->j_fc_cleanup_callback)
- journal->j_fc_cleanup_callback(journal, 1);
+ journal->j_fc_cleanup_callback(journal, 1, commit_transaction->t_tid);
trace_jbd2_end_commit(journal, commit_transaction);
jbd_debug(1, "JBD2: commit %d complete, head %d\n",
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index bf108d4493d2..c2cf74b01ddb 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -771,7 +771,7 @@ static int __jbd2_fc_end_commit(journal_t *journal, tid_t tid, bool fallback)
{
jbd2_journal_unlock_updates(journal);
if (journal->j_fc_cleanup_callback)
- journal->j_fc_cleanup_callback(journal, 0);
+ journal->j_fc_cleanup_callback(journal, 0, tid);
write_lock(&journal->j_state_lock);
journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING;
if (fallback)
@@ -1287,6 +1287,8 @@ static int jbd2_min_tag_size(void)
/**
* jbd2_journal_shrink_scan()
+ * @shrink: shrinker to work on
+ * @sc: reclaim request to process
*
* Scan the checkpointed buffer on the checkpoint list and release the
* journal_head.
@@ -1312,6 +1314,8 @@ static unsigned long jbd2_journal_shrink_scan(struct shrinker *shrink,
/**
* jbd2_journal_shrink_count()
+ * @shrink: shrinker to work on
+ * @sc: reclaim request to process
*
* Count the number of checkpoint buffers on the checkpoint list.
*/
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 6a3caedd2285..8e2f8275a253 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -449,7 +449,7 @@ repeat:
}
/* OK, account for the buffers that this operation expects to
- * use and add the handle to the running transaction.
+ * use and add the handle to the running transaction.
*/
update_t_max_wait(transaction, ts);
handle->h_transaction = transaction;
@@ -836,6 +836,35 @@ int jbd2_journal_restart(handle_t *handle, int nblocks)
}
EXPORT_SYMBOL(jbd2_journal_restart);
+/*
+ * Waits for any outstanding t_updates to finish.
+ * This is called with write j_state_lock held.
+ */
+void jbd2_journal_wait_updates(journal_t *journal)
+{
+ transaction_t *commit_transaction = journal->j_running_transaction;
+
+ if (!commit_transaction)
+ return;
+
+ spin_lock(&commit_transaction->t_handle_lock);
+ while (atomic_read(&commit_transaction->t_updates)) {
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&journal->j_wait_updates, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (atomic_read(&commit_transaction->t_updates)) {
+ spin_unlock(&commit_transaction->t_handle_lock);
+ write_unlock(&journal->j_state_lock);
+ schedule();
+ write_lock(&journal->j_state_lock);
+ spin_lock(&commit_transaction->t_handle_lock);
+ }
+ finish_wait(&journal->j_wait_updates, &wait);
+ }
+ spin_unlock(&commit_transaction->t_handle_lock);
+}
+
/**
* jbd2_journal_lock_updates () - establish a transaction barrier.
* @journal: Journal to establish a barrier on.
@@ -863,27 +892,9 @@ void jbd2_journal_lock_updates(journal_t *journal)
write_lock(&journal->j_state_lock);
}
- /* Wait until there are no running updates */
- while (1) {
- transaction_t *transaction = journal->j_running_transaction;
-
- if (!transaction)
- break;
+ /* Wait until there are no running t_updates */
+ jbd2_journal_wait_updates(journal);
- spin_lock(&transaction->t_handle_lock);
- prepare_to_wait(&journal->j_wait_updates, &wait,
- TASK_UNINTERRUPTIBLE);
- if (!atomic_read(&transaction->t_updates)) {
- spin_unlock(&transaction->t_handle_lock);
- finish_wait(&journal->j_wait_updates, &wait);
- break;
- }
- spin_unlock(&transaction->t_handle_lock);
- write_unlock(&journal->j_state_lock);
- schedule();
- finish_wait(&journal->j_wait_updates, &wait);
- write_lock(&journal->j_state_lock);
- }
write_unlock(&journal->j_state_lock);
/*
diff --git a/fs/ksmbd/auth.c b/fs/ksmbd/auth.c
index dc3d061edda9..911444d21267 100644
--- a/fs/ksmbd/auth.c
+++ b/fs/ksmbd/auth.c
@@ -29,6 +29,7 @@
#include "mgmt/user_config.h"
#include "crypto_ctx.h"
#include "transport_ipc.h"
+#include "../smbfs_common/arc4.h"
/*
* Fixed format data defining GSS header and fixed string
@@ -336,6 +337,29 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
nt_len - CIFS_ENCPWD_SIZE,
domain_name, conn->ntlmssp.cryptkey);
kfree(domain_name);
+
+ /* The recovered secondary session key */
+ if (conn->ntlmssp.client_flags & NTLMSSP_NEGOTIATE_KEY_XCH) {
+ struct arc4_ctx *ctx_arc4;
+ unsigned int sess_key_off, sess_key_len;
+
+ sess_key_off = le32_to_cpu(authblob->SessionKey.BufferOffset);
+ sess_key_len = le16_to_cpu(authblob->SessionKey.Length);
+
+ if (blob_len < (u64)sess_key_off + sess_key_len)
+ return -EINVAL;
+
+ ctx_arc4 = kmalloc(sizeof(*ctx_arc4), GFP_KERNEL);
+ if (!ctx_arc4)
+ return -ENOMEM;
+
+ cifs_arc4_setkey(ctx_arc4, sess->sess_key,
+ SMB2_NTLMV2_SESSKEY_SIZE);
+ cifs_arc4_crypt(ctx_arc4, sess->sess_key,
+ (char *)authblob + sess_key_off, sess_key_len);
+ kfree_sensitive(ctx_arc4);
+ }
+
return ret;
}
@@ -408,6 +432,9 @@ ksmbd_build_ntlmssp_challenge_blob(struct challenge_message *chgblob,
(cflags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
flags |= NTLMSSP_NEGOTIATE_EXTENDED_SEC;
+ if (cflags & NTLMSSP_NEGOTIATE_KEY_XCH)
+ flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+
chgblob->NegotiateFlags = cpu_to_le32(flags);
len = strlen(ksmbd_netbios_name());
name = kmalloc(2 + UNICODE_LEN(len), GFP_KERNEL);
diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
index 1866c81c5c99..67e8e28e3fc3 100644
--- a/fs/ksmbd/smb2pdu.c
+++ b/fs/ksmbd/smb2pdu.c
@@ -2688,7 +2688,7 @@ int smb2_open(struct ksmbd_work *work)
(struct create_posix *)context;
if (le16_to_cpu(context->DataOffset) +
le32_to_cpu(context->DataLength) <
- sizeof(struct create_posix)) {
+ sizeof(struct create_posix) - 4) {
rc = -EINVAL;
goto err_out1;
}
@@ -3422,9 +3422,9 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
goto free_conv_name;
}
- struct_sz = readdir_info_level_struct_sz(info_level);
- next_entry_offset = ALIGN(struct_sz - 1 + conv_len,
- KSMBD_DIR_INFO_ALIGNMENT);
+ struct_sz = readdir_info_level_struct_sz(info_level) - 1 + conv_len;
+ next_entry_offset = ALIGN(struct_sz, KSMBD_DIR_INFO_ALIGNMENT);
+ d_info->last_entry_off_align = next_entry_offset - struct_sz;
if (next_entry_offset > d_info->out_buf_len) {
d_info->out_buf_len = 0;
@@ -3976,6 +3976,7 @@ int smb2_query_dir(struct ksmbd_work *work)
((struct file_directory_info *)
((char *)rsp->Buffer + d_info.last_entry_offset))
->NextEntryOffset = 0;
+ d_info.data_count -= d_info.last_entry_off_align;
rsp->StructureSize = cpu_to_le16(9);
rsp->OutputBufferOffset = cpu_to_le16(72);
@@ -6126,13 +6127,26 @@ static int smb2_set_remote_key_for_rdma(struct ksmbd_work *work,
__le16 ChannelInfoOffset,
__le16 ChannelInfoLength)
{
+ unsigned int i, ch_count;
+
if (work->conn->dialect == SMB30_PROT_ID &&
Channel != SMB2_CHANNEL_RDMA_V1)
return -EINVAL;
- if (ChannelInfoOffset == 0 ||
- le16_to_cpu(ChannelInfoLength) < sizeof(*desc))
+ ch_count = le16_to_cpu(ChannelInfoLength) / sizeof(*desc);
+ if (ksmbd_debug_types & KSMBD_DEBUG_RDMA) {
+ for (i = 0; i < ch_count; i++) {
+ pr_info("RDMA r/w request %#x: token %#x, length %#x\n",
+ i,
+ le32_to_cpu(desc[i].token),
+ le32_to_cpu(desc[i].length));
+ }
+ }
+ if (ch_count != 1) {
+ ksmbd_debug(RDMA, "RDMA multiple buffer descriptors %d are not supported yet\n",
+ ch_count);
return -EINVAL;
+ }
work->need_invalidate_rkey =
(Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE);
@@ -6185,9 +6199,15 @@ int smb2_read(struct ksmbd_work *work)
if (req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE ||
req->Channel == SMB2_CHANNEL_RDMA_V1) {
+ unsigned int ch_offset = le16_to_cpu(req->ReadChannelInfoOffset);
+
+ if (ch_offset < offsetof(struct smb2_read_req, Buffer)) {
+ err = -EINVAL;
+ goto out;
+ }
err = smb2_set_remote_key_for_rdma(work,
(struct smb2_buffer_desc_v1 *)
- &req->Buffer[0],
+ ((char *)req + ch_offset),
req->Channel,
req->ReadChannelInfoOffset,
req->ReadChannelInfoLength);
@@ -6428,11 +6448,16 @@ int smb2_write(struct ksmbd_work *work)
if (req->Channel == SMB2_CHANNEL_RDMA_V1 ||
req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE) {
- if (req->Length != 0 || req->DataOffset != 0)
- return -EINVAL;
+ unsigned int ch_offset = le16_to_cpu(req->WriteChannelInfoOffset);
+
+ if (req->Length != 0 || req->DataOffset != 0 ||
+ ch_offset < offsetof(struct smb2_write_req, Buffer)) {
+ err = -EINVAL;
+ goto out;
+ }
err = smb2_set_remote_key_for_rdma(work,
(struct smb2_buffer_desc_v1 *)
- &req->Buffer[0],
+ ((char *)req + ch_offset),
req->Channel,
req->WriteChannelInfoOffset,
req->WriteChannelInfoLength);
diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
index ef7f42b0290a..9a7e211dbf4f 100644
--- a/fs/ksmbd/smb_common.c
+++ b/fs/ksmbd/smb_common.c
@@ -308,14 +308,17 @@ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
for (i = 0; i < 2; i++) {
struct kstat kstat;
struct ksmbd_kstat ksmbd_kstat;
+ struct dentry *dentry;
if (!dir->dot_dotdot[i]) { /* fill dot entry info */
if (i == 0) {
d_info->name = ".";
d_info->name_len = 1;
+ dentry = dir->filp->f_path.dentry;
} else {
d_info->name = "..";
d_info->name_len = 2;
+ dentry = dir->filp->f_path.dentry->d_parent;
}
if (!match_pattern(d_info->name, d_info->name_len,
@@ -327,7 +330,7 @@ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
ksmbd_kstat.kstat = &kstat;
ksmbd_vfs_fill_dentry_attrs(work,
user_ns,
- dir->filp->f_path.dentry->d_parent,
+ dentry,
&ksmbd_kstat);
rc = fn(conn, info_level, d_info, &ksmbd_kstat);
if (rc)
diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c
index 3c1ec1ac0b27..ba5a22bc2e6d 100644
--- a/fs/ksmbd/transport_rdma.c
+++ b/fs/ksmbd/transport_rdma.c
@@ -80,7 +80,7 @@ static int smb_direct_max_fragmented_recv_size = 1024 * 1024;
/* The maximum single-message size which can be received */
static int smb_direct_max_receive_size = 8192;
-static int smb_direct_max_read_write_size = 1048512;
+static int smb_direct_max_read_write_size = 524224;
static int smb_direct_max_outstanding_rw_ops = 8;
diff --git a/fs/ksmbd/vfs.h b/fs/ksmbd/vfs.h
index adf94a4f22fa..8c37aaf936ab 100644
--- a/fs/ksmbd/vfs.h
+++ b/fs/ksmbd/vfs.h
@@ -47,6 +47,7 @@ struct ksmbd_dir_info {
int last_entry_offset;
bool hide_dot_file;
int flags;
+ int last_entry_off_align;
};
struct ksmbd_readdir_data {
diff --git a/fs/libfs.c b/fs/libfs.c
index ba7438ab9371..974125270a42 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -1379,7 +1379,7 @@ bool is_empty_dir_inode(struct inode *inode)
(inode->i_op == &empty_dir_inode_operations);
}
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
/*
* Determine if the name of a dentry should be casefolded.
*
@@ -1473,7 +1473,7 @@ static const struct dentry_operations generic_encrypted_dentry_ops = {
};
#endif
-#if defined(CONFIG_FS_ENCRYPTION) && defined(CONFIG_UNICODE)
+#if defined(CONFIG_FS_ENCRYPTION) && IS_ENABLED(CONFIG_UNICODE)
static const struct dentry_operations generic_encrypted_ci_dentry_ops = {
.d_hash = generic_ci_d_hash,
.d_compare = generic_ci_d_compare,
@@ -1508,10 +1508,10 @@ void generic_set_encrypted_ci_d_ops(struct dentry *dentry)
#ifdef CONFIG_FS_ENCRYPTION
bool needs_encrypt_ops = dentry->d_flags & DCACHE_NOKEY_NAME;
#endif
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
bool needs_ci_ops = dentry->d_sb->s_encoding;
#endif
-#if defined(CONFIG_FS_ENCRYPTION) && defined(CONFIG_UNICODE)
+#if defined(CONFIG_FS_ENCRYPTION) && IS_ENABLED(CONFIG_UNICODE)
if (needs_encrypt_ops && needs_ci_ops) {
d_set_d_op(dentry, &generic_encrypted_ci_dentry_ops);
return;
@@ -1523,7 +1523,7 @@ void generic_set_encrypted_ci_d_ops(struct dentry *dentry)
return;
}
#endif
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
if (needs_ci_ops) {
d_set_d_op(dentry, &generic_ci_dentry_ops);
return;
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index cb3a7512c33e..0a22a2faf552 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -179,19 +179,21 @@ nlm_delete_file(struct nlm_file *file)
static int nlm_unlock_files(struct nlm_file *file)
{
struct file_lock lock;
- struct file *f;
+ locks_init_lock(&lock);
lock.fl_type = F_UNLCK;
lock.fl_start = 0;
lock.fl_end = OFFSET_MAX;
- for (f = file->f_file[0]; f <= file->f_file[1]; f++) {
- if (f && vfs_lock_file(f, F_SETLK, &lock, NULL) < 0) {
- pr_warn("lockd: unlock failure in %s:%d\n",
- __FILE__, __LINE__);
- return 1;
- }
- }
+ if (file->f_file[O_RDONLY] &&
+ vfs_lock_file(file->f_file[O_RDONLY], F_SETLK, &lock, NULL))
+ goto out_err;
+ if (file->f_file[O_WRONLY] &&
+ vfs_lock_file(file->f_file[O_WRONLY], F_SETLK, &lock, NULL))
+ goto out_err;
return 0;
+out_err:
+ pr_warn("lockd: unlock failure in %s:%d\n", __FILE__, __LINE__);
+ return 1;
}
/*
diff --git a/fs/namespace.c b/fs/namespace.c
index 40b994a29e90..de6fae84f1a1 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -469,6 +469,24 @@ void mnt_drop_write_file(struct file *file)
}
EXPORT_SYMBOL(mnt_drop_write_file);
+/**
+ * mnt_hold_writers - prevent write access to the given mount
+ * @mnt: mnt to prevent write access to
+ *
+ * Prevents write access to @mnt if there are no active writers for @mnt.
+ * This function needs to be called and return successfully before changing
+ * properties of @mnt that need to remain stable for callers with write access
+ * to @mnt.
+ *
+ * After this functions has been called successfully callers must pair it with
+ * a call to mnt_unhold_writers() in order to stop preventing write access to
+ * @mnt.
+ *
+ * Context: This function expects lock_mount_hash() to be held serializing
+ * setting MNT_WRITE_HOLD.
+ * Return: On success 0 is returned.
+ * On error, -EBUSY is returned.
+ */
static inline int mnt_hold_writers(struct mount *mnt)
{
mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
@@ -500,6 +518,18 @@ static inline int mnt_hold_writers(struct mount *mnt)
return 0;
}
+/**
+ * mnt_unhold_writers - stop preventing write access to the given mount
+ * @mnt: mnt to stop preventing write access to
+ *
+ * Stop preventing write access to @mnt allowing callers to gain write access
+ * to @mnt again.
+ *
+ * This function can only be called after a successful call to
+ * mnt_hold_writers().
+ *
+ * Context: This function expects lock_mount_hash() to be held.
+ */
static inline void mnt_unhold_writers(struct mount *mnt)
{
/*
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index f18e80fda9bf..d1f34229e11a 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -177,6 +177,7 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
INIT_LIST_HEAD(&clp->cl_superblocks);
clp->cl_rpcclient = ERR_PTR(-EINVAL);
+ clp->cl_flags = cl_init->init_flags;
clp->cl_proto = cl_init->proto;
clp->cl_nconnect = cl_init->nconnect;
clp->cl_max_connect = cl_init->max_connect ? cl_init->max_connect : 1;
@@ -423,7 +424,6 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
list_add_tail(&new->cl_share_link,
&nn->nfs_client_list);
spin_unlock(&nn->nfs_client_lock);
- new->cl_flags = cl_init->init_flags;
return rpc_ops->init_client(new, cl_init);
}
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 848f3b8fb821..75cb1cbe4cde 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -80,6 +80,7 @@ static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir
ctx->dir_cookie = 0;
ctx->dup_cookie = 0;
ctx->page_index = 0;
+ ctx->eof = false;
spin_lock(&dir->i_lock);
if (list_empty(&nfsi->open_files) &&
(nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER))
@@ -168,6 +169,7 @@ struct nfs_readdir_descriptor {
unsigned int cache_entry_index;
signed char duped;
bool plus;
+ bool eob;
bool eof;
};
@@ -867,7 +869,8 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc,
status = nfs_readdir_page_filler(desc, entry, pages, pglen,
arrays, narrays);
- } while (!status && nfs_readdir_page_needs_filling(page));
+ } while (!status && nfs_readdir_page_needs_filling(page) &&
+ page_mapping(page));
nfs_readdir_free_pages(pages, array_size);
out:
@@ -988,7 +991,7 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc,
ent = &array->array[i];
if (!dir_emit(desc->ctx, ent->name, ent->name_len,
nfs_compat_user_ino64(ent->ino), ent->d_type)) {
- desc->eof = true;
+ desc->eob = true;
break;
}
memcpy(desc->verf, verf, sizeof(desc->verf));
@@ -1004,7 +1007,7 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc,
desc->duped = 1;
}
if (array->page_is_eof)
- desc->eof = true;
+ desc->eof = !desc->eob;
kunmap(desc->page);
dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %llu\n",
@@ -1041,12 +1044,13 @@ static int uncached_readdir(struct nfs_readdir_descriptor *desc)
goto out;
desc->page_index = 0;
+ desc->cache_entry_index = 0;
desc->last_cookie = desc->dir_cookie;
desc->duped = 0;
status = nfs_readdir_xdr_to_array(desc, desc->verf, verf, arrays, sz);
- for (i = 0; !desc->eof && i < sz && arrays[i]; i++) {
+ for (i = 0; !desc->eob && i < sz && arrays[i]; i++) {
desc->page = arrays[i];
nfs_do_filldir(desc, verf);
}
@@ -1105,9 +1109,15 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
desc->duped = dir_ctx->duped;
page_index = dir_ctx->page_index;
desc->attr_gencount = dir_ctx->attr_gencount;
+ desc->eof = dir_ctx->eof;
memcpy(desc->verf, dir_ctx->verf, sizeof(desc->verf));
spin_unlock(&file->f_lock);
+ if (desc->eof) {
+ res = 0;
+ goto out_free;
+ }
+
if (test_and_clear_bit(NFS_INO_FORCE_READDIR, &nfsi->flags) &&
list_is_singular(&nfsi->open_files))
invalidate_mapping_pages(inode->i_mapping, page_index + 1, -1);
@@ -1141,7 +1151,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
nfs_do_filldir(desc, nfsi->cookieverf);
nfs_readdir_page_unlock_and_put_cached(desc);
- } while (!desc->eof);
+ } while (!desc->eob && !desc->eof);
spin_lock(&file->f_lock);
dir_ctx->dir_cookie = desc->dir_cookie;
@@ -1149,9 +1159,10 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
dir_ctx->duped = desc->duped;
dir_ctx->attr_gencount = desc->attr_gencount;
dir_ctx->page_index = desc->page_index;
+ dir_ctx->eof = desc->eof;
memcpy(dir_ctx->verf, desc->verf, sizeof(dir_ctx->verf));
spin_unlock(&file->f_lock);
-
+out_free:
kfree(desc);
out:
@@ -1193,6 +1204,7 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence)
if (offset == 0)
memset(dir_ctx->verf, 0, sizeof(dir_ctx->verf));
dir_ctx->duped = 0;
+ dir_ctx->eof = false;
}
spin_unlock(&filp->f_lock);
return offset;
@@ -1998,14 +2010,14 @@ no_open:
if (!res) {
inode = d_inode(dentry);
if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
- !S_ISDIR(inode->i_mode))
+ !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
res = ERR_PTR(-ENOTDIR);
else if (inode && S_ISREG(inode->i_mode))
res = ERR_PTR(-EOPENSTALE);
} else if (!IS_ERR(res)) {
inode = d_inode(res);
if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
- !S_ISDIR(inode->i_mode)) {
+ !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) {
dput(res);
res = ERR_PTR(-ENOTDIR);
} else if (inode && S_ISREG(inode->i_mode)) {
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index a918c3a834b6..d96baa4450e3 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -853,12 +853,9 @@ int nfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
}
/* Flush out writes to the server in order to update c/mtime. */
- if ((request_mask & (STATX_CTIME|STATX_MTIME)) &&
- S_ISREG(inode->i_mode)) {
- err = filemap_write_and_wait(inode->i_mapping);
- if (err)
- goto out;
- }
+ if ((request_mask & (STATX_CTIME | STATX_MTIME)) &&
+ S_ISREG(inode->i_mode))
+ filemap_write_and_wait(inode->i_mapping);
/*
* We may force a getattr if the user cares about atime.
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index b18f31b2c9e7..0e0db6c27619 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1229,8 +1229,7 @@ nfs4_update_changeattr_locked(struct inode *inode,
NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER |
NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK |
- NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR |
- NFS_INO_REVAL_PAGECACHE;
+ NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR;
nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
}
nfsi->attrtimeo_timestamp = jiffies;
@@ -8032,7 +8031,8 @@ static int _nfs41_proc_get_locations(struct nfs_server *server,
/**
* nfs4_proc_get_locations - discover locations for a migrated FSID
- * @inode: inode on FSID that is migrating
+ * @server: pointer to nfs_server to process
+ * @fhandle: pointer to the kernel NFS client file handle
* @locations: result of query
* @page: buffer
* @cred: credential to use for this operation
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 8ef53f6726ec..936eebd4c56d 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -150,13 +150,17 @@ nfsd3_proc_read(struct svc_rqst *rqstp)
unsigned int len;
int v;
- argp->count = min_t(u32, argp->count, max_blocksize);
-
dprintk("nfsd: READ(3) %s %lu bytes at %Lu\n",
SVCFH_fmt(&argp->fh),
(unsigned long) argp->count,
(unsigned long long) argp->offset);
+ argp->count = min_t(u32, argp->count, max_blocksize);
+ if (argp->offset > (u64)OFFSET_MAX)
+ argp->offset = (u64)OFFSET_MAX;
+ if (argp->offset + argp->count > (u64)OFFSET_MAX)
+ argp->count = (u64)OFFSET_MAX - argp->offset;
+
v = 0;
len = argp->count;
resp->pages = rqstp->rq_next_page;
@@ -199,6 +203,11 @@ nfsd3_proc_write(struct svc_rqst *rqstp)
(unsigned long long) argp->offset,
argp->stable? " stable" : "");
+ resp->status = nfserr_fbig;
+ if (argp->offset > (u64)OFFSET_MAX ||
+ argp->offset + argp->len > (u64)OFFSET_MAX)
+ return rpc_success;
+
fh_copy(&resp->fh, &argp->fh);
resp->committed = argp->stable;
nvecs = svc_fill_write_vector(rqstp, &argp->payload);
@@ -651,15 +660,9 @@ nfsd3_proc_commit(struct svc_rqst *rqstp)
argp->count,
(unsigned long long) argp->offset);
- if (argp->offset > NFS_OFFSET_MAX) {
- resp->status = nfserr_inval;
- goto out;
- }
-
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_commit(rqstp, &resp->fh, argp->offset,
argp->count, resp->verf);
-out:
return rpc_success;
}
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 7c45ba4db61b..0293b8d65f10 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -254,7 +254,7 @@ svcxdr_decode_sattr3(struct svc_rqst *rqstp, struct xdr_stream *xdr,
if (xdr_stream_decode_u64(xdr, &newsize) < 0)
return false;
iap->ia_valid |= ATTR_SIZE;
- iap->ia_size = min_t(u64, newsize, NFS_OFFSET_MAX);
+ iap->ia_size = newsize;
}
if (xdr_stream_decode_u32(xdr, &set_it) < 0)
return false;
@@ -1060,7 +1060,7 @@ svcxdr_encode_entry3_common(struct nfsd3_readdirres *resp, const char *name,
return false;
/* cookie */
resp->cookie_offset = dirlist->len;
- if (xdr_stream_encode_u64(xdr, NFS_OFFSET_MAX) < 0)
+ if (xdr_stream_encode_u64(xdr, OFFSET_MAX) < 0)
return false;
return true;
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index ed1ee25647be..b207c76a873f 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -782,12 +782,16 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
__be32 status;
read->rd_nf = NULL;
- if (read->rd_offset >= OFFSET_MAX)
- return nfserr_inval;
trace_nfsd_read_start(rqstp, &cstate->current_fh,
read->rd_offset, read->rd_length);
+ read->rd_length = min_t(u32, read->rd_length, svc_max_payload(rqstp));
+ if (read->rd_offset > (u64)OFFSET_MAX)
+ read->rd_offset = (u64)OFFSET_MAX;
+ if (read->rd_offset + read->rd_length > (u64)OFFSET_MAX)
+ read->rd_length = (u64)OFFSET_MAX - read->rd_offset;
+
/*
* If we do a zero copy read, then a client will see read data
* that reflects the state of the file *after* performing the
@@ -1018,8 +1022,9 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
unsigned long cnt;
int nvecs;
- if (write->wr_offset >= OFFSET_MAX)
- return nfserr_inval;
+ if (write->wr_offset > (u64)OFFSET_MAX ||
+ write->wr_offset + write->wr_buflen > (u64)OFFSET_MAX)
+ return nfserr_fbig;
cnt = write->wr_buflen;
trace_nfsd_write_start(rqstp, &cstate->current_fh,
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 72900b89cf84..32063733443d 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -4130,8 +4130,10 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
status = nfserr_clid_inuse;
if (client_has_state(old)
&& !same_creds(&unconf->cl_cred,
- &old->cl_cred))
+ &old->cl_cred)) {
+ old = NULL;
goto out;
+ }
status = mark_client_expired_locked(old);
if (status) {
old = NULL;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 899de438e529..714a3a3bd50c 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -3495,7 +3495,7 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
p = xdr_reserve_space(xdr, 3*4 + namlen);
if (!p)
goto fail;
- p = xdr_encode_hyper(p, NFS_OFFSET_MAX); /* offset of next entry */
+ p = xdr_encode_hyper(p, OFFSET_MAX); /* offset of next entry */
p = xdr_encode_array(p, name, namlen); /* name length & name */
nfserr = nfsd4_encode_dirent_fattr(xdr, cd, name, namlen);
@@ -3986,10 +3986,8 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
}
xdr_commit_encode(xdr);
- maxcount = svc_max_payload(resp->rqstp);
- maxcount = min_t(unsigned long, maxcount,
+ maxcount = min_t(unsigned long, read->rd_length,
(xdr->buf->buflen - xdr->buf->len));
- maxcount = min_t(unsigned long, maxcount, read->rd_length);
if (file->f_op->splice_read &&
test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags))
@@ -4826,10 +4824,8 @@ nfsd4_encode_read_plus(struct nfsd4_compoundres *resp, __be32 nfserr,
return nfserr_resource;
xdr_commit_encode(xdr);
- maxcount = svc_max_payload(resp->rqstp);
- maxcount = min_t(unsigned long, maxcount,
+ maxcount = min_t(unsigned long, read->rd_length,
(xdr->buf->buflen - xdr->buf->len));
- maxcount = min_t(unsigned long, maxcount, read->rd_length);
count = maxcount;
eof = read->rd_offset >= i_size_read(file_inode(file));
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index c4cf56327843..5889db66409d 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -306,14 +306,14 @@ TRACE_EVENT(nfsd_export_update,
DECLARE_EVENT_CLASS(nfsd_io_class,
TP_PROTO(struct svc_rqst *rqstp,
struct svc_fh *fhp,
- loff_t offset,
- unsigned long len),
+ u64 offset,
+ u32 len),
TP_ARGS(rqstp, fhp, offset, len),
TP_STRUCT__entry(
__field(u32, xid)
__field(u32, fh_hash)
- __field(loff_t, offset)
- __field(unsigned long, len)
+ __field(u64, offset)
+ __field(u32, len)
),
TP_fast_assign(
__entry->xid = be32_to_cpu(rqstp->rq_xid);
@@ -321,7 +321,7 @@ DECLARE_EVENT_CLASS(nfsd_io_class,
__entry->offset = offset;
__entry->len = len;
),
- TP_printk("xid=0x%08x fh_hash=0x%08x offset=%lld len=%lu",
+ TP_printk("xid=0x%08x fh_hash=0x%08x offset=%llu len=%u",
__entry->xid, __entry->fh_hash,
__entry->offset, __entry->len)
)
@@ -330,8 +330,8 @@ DECLARE_EVENT_CLASS(nfsd_io_class,
DEFINE_EVENT(nfsd_io_class, nfsd_##name, \
TP_PROTO(struct svc_rqst *rqstp, \
struct svc_fh *fhp, \
- loff_t offset, \
- unsigned long len), \
+ u64 offset, \
+ u32 len), \
TP_ARGS(rqstp, fhp, offset, len))
DEFINE_NFSD_IO_EVENT(read_start);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 99c2b9dfbb10..91600e71be19 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -435,6 +435,10 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
.ia_size = iap->ia_size,
};
+ host_err = -EFBIG;
+ if (iap->ia_size < 0)
+ goto out_unlock;
+
host_err = notify_change(&init_user_ns, dentry, &size_attr, NULL);
if (host_err)
goto out_unlock;
@@ -1110,42 +1114,61 @@ out:
}
#ifdef CONFIG_NFSD_V3
-/*
- * Commit all pending writes to stable storage.
+/**
+ * nfsd_commit - Commit pending writes to stable storage
+ * @rqstp: RPC request being processed
+ * @fhp: NFS filehandle
+ * @offset: raw offset from beginning of file
+ * @count: raw count of bytes to sync
+ * @verf: filled in with the server's current write verifier
*
- * Note: we only guarantee that data that lies within the range specified
- * by the 'offset' and 'count' parameters will be synced.
+ * Note: we guarantee that data that lies within the range specified
+ * by the 'offset' and 'count' parameters will be synced. The server
+ * is permitted to sync data that lies outside this range at the
+ * same time.
*
* Unfortunately we cannot lock the file to make sure we return full WCC
* data to the client, as locking happens lower down in the filesystem.
+ *
+ * Return values:
+ * An nfsstat value in network byte order.
*/
__be32
-nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
- loff_t offset, unsigned long count, __be32 *verf)
+nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, u64 offset,
+ u32 count, __be32 *verf)
{
+ u64 maxbytes;
+ loff_t start, end;
struct nfsd_net *nn;
struct nfsd_file *nf;
- loff_t end = LLONG_MAX;
- __be32 err = nfserr_inval;
-
- if (offset < 0)
- goto out;
- if (count != 0) {
- end = offset + (loff_t)count - 1;
- if (end < offset)
- goto out;
- }
+ __be32 err;
err = nfsd_file_acquire(rqstp, fhp,
NFSD_MAY_WRITE|NFSD_MAY_NOT_BREAK_LEASE, &nf);
if (err)
goto out;
+
+ /*
+ * Convert the client-provided (offset, count) range to a
+ * (start, end) range. If the client-provided range falls
+ * outside the maximum file size of the underlying FS,
+ * clamp the sync range appropriately.
+ */
+ start = 0;
+ end = LLONG_MAX;
+ maxbytes = (u64)fhp->fh_dentry->d_sb->s_maxbytes;
+ if (offset < maxbytes) {
+ start = offset;
+ if (count && (offset + count - 1 < maxbytes))
+ end = offset + count - 1;
+ }
+
nn = net_generic(nf->nf_net, nfsd_net_id);
if (EX_ISSYNC(fhp->fh_export)) {
errseq_t since = READ_ONCE(nf->nf_file->f_wb_err);
int err2;
- err2 = vfs_fsync_range(nf->nf_file, offset, end, 0);
+ err2 = vfs_fsync_range(nf->nf_file, start, end, 0);
switch (err2) {
case 0:
nfsd_copy_write_verifier(verf, nn);
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index 9f56dcb22ff7..2c43d10e3cab 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -74,8 +74,8 @@ __be32 do_nfsd_create(struct svc_rqst *, struct svc_fh *,
char *name, int len, struct iattr *attrs,
struct svc_fh *res, int createmode,
u32 *verifier, bool *truncp, bool *created);
-__be32 nfsd_commit(struct svc_rqst *, struct svc_fh *,
- loff_t, unsigned long, __be32 *verf);
+__be32 nfsd_commit(struct svc_rqst *rqst, struct svc_fh *fhp,
+ u64 offset, u32 count, __be32 *verf);
#endif /* CONFIG_NFSD_V3 */
#ifdef CONFIG_NFSD_V4
__be32 nfsd_getxattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 1026f67b1d1e..2ff6bd85ba8f 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -701,9 +701,6 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
if (fanotify_is_perm_event(event->mask))
FANOTIFY_PERM(event)->fd = fd;
- if (f)
- fd_install(fd, f);
-
if (info_mode) {
ret = copy_info_records_to_user(event, info, info_mode, pidfd,
buf, count);
@@ -711,6 +708,9 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
goto out_close_fd;
}
+ if (f)
+ fd_install(fd, f);
+
return metadata.event_len;
out_close_fd:
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index b193d08a3dc3..e040970408d4 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -145,7 +145,7 @@ static int ovl_copy_fileattr(struct inode *inode, struct path *old,
if (err == -ENOTTY || err == -EINVAL)
return 0;
pr_warn("failed to retrieve lower fileattr (%pd2, err=%i)\n",
- old, err);
+ old->dentry, err);
return err;
}
@@ -157,7 +157,9 @@ static int ovl_copy_fileattr(struct inode *inode, struct path *old,
*/
if (oldfa.flags & OVL_PROT_FS_FLAGS_MASK) {
err = ovl_set_protattr(inode, new->dentry, &oldfa);
- if (err)
+ if (err == -EPERM)
+ pr_warn_once("copying fileattr: no xattr on upper\n");
+ else if (err)
return err;
}
@@ -167,8 +169,16 @@ static int ovl_copy_fileattr(struct inode *inode, struct path *old,
err = ovl_real_fileattr_get(new, &newfa);
if (err) {
+ /*
+ * Returning an error if upper doesn't support fileattr will
+ * result in a regression, so revert to the old behavior.
+ */
+ if (err == -ENOTTY || err == -EINVAL) {
+ pr_warn_once("copying fileattr: no support on upper\n");
+ return 0;
+ }
pr_warn("failed to retrieve upper fileattr (%pd2, err=%i)\n",
- new, err);
+ new->dentry, err);
return err;
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 18f8c3acbb85..f46060eb91b5 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -309,7 +309,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
name = arch_vma_name(vma);
if (!name) {
- const char *anon_name;
+ struct anon_vma_name *anon_name;
if (!mm) {
name = "[vdso]";
@@ -327,10 +327,10 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
goto done;
}
- anon_name = vma_anon_name(vma);
+ anon_name = anon_vma_name(vma);
if (anon_name) {
seq_pad(m, ' ');
- seq_printf(m, "[anon:%s]", anon_name);
+ seq_printf(m, "[anon:%s]", anon_name->name);
}
}
@@ -440,7 +440,8 @@ static void smaps_page_accumulate(struct mem_size_stats *mss,
}
static void smaps_account(struct mem_size_stats *mss, struct page *page,
- bool compound, bool young, bool dirty, bool locked)
+ bool compound, bool young, bool dirty, bool locked,
+ bool migration)
{
int i, nr = compound ? compound_nr(page) : 1;
unsigned long size = nr * PAGE_SIZE;
@@ -467,8 +468,15 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
* page_count(page) == 1 guarantees the page is mapped exactly once.
* If any subpage of the compound page mapped with PTE it would elevate
* page_count().
+ *
+ * The page_mapcount() is called to get a snapshot of the mapcount.
+ * Without holding the page lock this snapshot can be slightly wrong as
+ * we cannot always read the mapcount atomically. It is not safe to
+ * call page_mapcount() even with PTL held if the page is not mapped,
+ * especially for migration entries. Treat regular migration entries
+ * as mapcount == 1.
*/
- if (page_count(page) == 1) {
+ if ((page_count(page) == 1) || migration) {
smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
locked, true);
return;
@@ -517,6 +525,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
struct vm_area_struct *vma = walk->vma;
bool locked = !!(vma->vm_flags & VM_LOCKED);
struct page *page = NULL;
+ bool migration = false;
if (pte_present(*pte)) {
page = vm_normal_page(vma, addr, *pte);
@@ -536,8 +545,11 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
} else {
mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
}
- } else if (is_pfn_swap_entry(swpent))
+ } else if (is_pfn_swap_entry(swpent)) {
+ if (is_migration_entry(swpent))
+ migration = true;
page = pfn_swap_entry_to_page(swpent);
+ }
} else {
smaps_pte_hole_lookup(addr, walk);
return;
@@ -546,7 +558,8 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
if (!page)
return;
- smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
+ smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
+ locked, migration);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -557,6 +570,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
struct vm_area_struct *vma = walk->vma;
bool locked = !!(vma->vm_flags & VM_LOCKED);
struct page *page = NULL;
+ bool migration = false;
if (pmd_present(*pmd)) {
/* FOLL_DUMP will return -EFAULT on huge zero page */
@@ -564,8 +578,10 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
swp_entry_t entry = pmd_to_swp_entry(*pmd);
- if (is_migration_entry(entry))
+ if (is_migration_entry(entry)) {
+ migration = true;
page = pfn_swap_entry_to_page(entry);
+ }
}
if (IS_ERR_OR_NULL(page))
return;
@@ -577,7 +593,9 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
/* pass */;
else
mss->file_thp += HPAGE_PMD_SIZE;
- smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
+
+ smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
+ locked, migration);
}
#else
static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
@@ -1378,6 +1396,7 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
{
u64 frame = 0, flags = 0;
struct page *page = NULL;
+ bool migration = false;
if (pte_present(pte)) {
if (pm->show_pfn)
@@ -1399,13 +1418,14 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
frame = swp_type(entry) |
(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
flags |= PM_SWAP;
+ migration = is_migration_entry(entry);
if (is_pfn_swap_entry(entry))
page = pfn_swap_entry_to_page(entry);
}
if (page && !PageAnon(page))
flags |= PM_FILE;
- if (page && page_mapcount(page) == 1)
+ if (page && !migration && page_mapcount(page) == 1)
flags |= PM_MMAP_EXCLUSIVE;
if (vma->vm_flags & VM_SOFTDIRTY)
flags |= PM_SOFT_DIRTY;
@@ -1421,8 +1441,9 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
spinlock_t *ptl;
pte_t *pte, *orig_pte;
int err = 0;
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ bool migration = false;
+
ptl = pmd_trans_huge_lock(pmdp, vma);
if (ptl) {
u64 flags = 0, frame = 0;
@@ -1461,11 +1482,12 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
if (pmd_swp_uffd_wp(pmd))
flags |= PM_UFFD_WP;
VM_BUG_ON(!is_pmd_migration_entry(pmd));
+ migration = is_migration_entry(entry);
page = pfn_swap_entry_to_page(entry);
}
#endif
- if (page && page_mapcount(page) == 1)
+ if (page && !migration && page_mapcount(page) == 1)
flags |= PM_MMAP_EXCLUSIVE;
for (; addr != end; addr += PAGE_SIZE) {
@@ -1575,7 +1597,8 @@ static const struct mm_walk_ops pagemap_ops = {
* Bits 5-54 swap offset if swapped
* Bit 55 pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
* Bit 56 page exclusively mapped
- * Bits 57-60 zero
+ * Bit 57 pte is uffd-wp write-protected
+ * Bits 58-60 zero
* Bit 61 page is file-page or shared-anon
* Bit 62 page swapped
* Bit 63 page present
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 22d904bde6ab..a74aef99bd3d 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -690,9 +690,14 @@ int dquot_quota_sync(struct super_block *sb, int type)
/* This is not very clever (and fast) but currently I don't know about
* any other simple way of getting quota data to disk and we must get
* them there for userspace to be visible... */
- if (sb->s_op->sync_fs)
- sb->s_op->sync_fs(sb, 1);
- sync_blockdev(sb->s_bdev);
+ if (sb->s_op->sync_fs) {
+ ret = sb->s_op->sync_fs(sb, 1);
+ if (ret)
+ return ret;
+ }
+ ret = sync_blockdev(sb->s_bdev);
+ if (ret)
+ return ret;
/*
* Now when everything is written we can discard the pagecache so
diff --git a/fs/super.c b/fs/super.c
index 7af820ba5ad5..f1d4a193602d 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -1616,11 +1616,9 @@ static void lockdep_sb_freeze_acquire(struct super_block *sb)
percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
}
-static void sb_freeze_unlock(struct super_block *sb)
+static void sb_freeze_unlock(struct super_block *sb, int level)
{
- int level;
-
- for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
+ for (level--; level >= 0; level--)
percpu_up_write(sb->s_writers.rw_sem + level);
}
@@ -1691,7 +1689,14 @@ int freeze_super(struct super_block *sb)
sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
/* All writers are done so after syncing there won't be dirty data */
- sync_filesystem(sb);
+ ret = sync_filesystem(sb);
+ if (ret) {
+ sb->s_writers.frozen = SB_UNFROZEN;
+ sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
+ wake_up(&sb->s_writers.wait_unfrozen);
+ deactivate_locked_super(sb);
+ return ret;
+ }
/* Now wait for internal filesystem counter */
sb->s_writers.frozen = SB_FREEZE_FS;
@@ -1703,7 +1708,7 @@ int freeze_super(struct super_block *sb)
printk(KERN_ERR
"VFS:Filesystem freeze failed\n");
sb->s_writers.frozen = SB_UNFROZEN;
- sb_freeze_unlock(sb);
+ sb_freeze_unlock(sb, SB_FREEZE_FS);
wake_up(&sb->s_writers.wait_unfrozen);
deactivate_locked_super(sb);
return ret;
@@ -1748,7 +1753,7 @@ static int thaw_super_locked(struct super_block *sb)
}
sb->s_writers.frozen = SB_UNFROZEN;
- sb_freeze_unlock(sb);
+ sb_freeze_unlock(sb, SB_FREEZE_FS);
out:
wake_up(&sb->s_writers.wait_unfrozen);
deactivate_locked_super(sb);
diff --git a/fs/sync.c b/fs/sync.c
index 3ce8e2137f31..c7690016453e 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -29,7 +29,7 @@
*/
int sync_filesystem(struct super_block *sb)
{
- int ret;
+ int ret = 0;
/*
* We need to be protected against the filesystem going from
@@ -52,15 +52,21 @@ int sync_filesystem(struct super_block *sb)
* at a time.
*/
writeback_inodes_sb(sb, WB_REASON_SYNC);
- if (sb->s_op->sync_fs)
- sb->s_op->sync_fs(sb, 0);
+ if (sb->s_op->sync_fs) {
+ ret = sb->s_op->sync_fs(sb, 0);
+ if (ret)
+ return ret;
+ }
ret = sync_blockdev_nowait(sb->s_bdev);
- if (ret < 0)
+ if (ret)
return ret;
sync_inodes_sb(sb);
- if (sb->s_op->sync_fs)
- sb->s_op->sync_fs(sb, 1);
+ if (sb->s_op->sync_fs) {
+ ret = sb->s_op->sync_fs(sb, 1);
+ if (ret)
+ return ret;
+ }
return sync_blockdev(sb->s_bdev);
}
EXPORT_SYMBOL(sync_filesystem);
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index bafc02bf8220..de7252715b12 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -264,7 +264,6 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
if (!gid_valid(gid))
return -EINVAL;
opts->gid = gid;
- set_gid(tracefs_mount->mnt_root, gid);
break;
case Opt_mode:
if (match_octal(&args[0], &option))
@@ -291,7 +290,9 @@ static int tracefs_apply_options(struct super_block *sb)
inode->i_mode |= opts->mode;
inode->i_uid = opts->uid;
- inode->i_gid = opts->gid;
+
+ /* Set all the group ids to the mount option */
+ set_gid(sb->s_root, opts->gid);
return 0;
}
diff --git a/fs/unicode/Kconfig b/fs/unicode/Kconfig
index 610d7bc05d6e..da786a687fdc 100644
--- a/fs/unicode/Kconfig
+++ b/fs/unicode/Kconfig
@@ -3,21 +3,13 @@
# UTF-8 normalization
#
config UNICODE
- bool "UTF-8 normalization and casefolding support"
+ tristate "UTF-8 normalization and casefolding support"
help
Say Y here to enable UTF-8 NFD normalization and NFD+CF casefolding
- support.
-
-config UNICODE_UTF8_DATA
- tristate "UTF-8 normalization and casefolding tables"
- depends on UNICODE
- default UNICODE
- help
- This contains a large table of case foldings, which can be loaded as
- a separate module if you say M here. To be on the safe side stick
- to the default of Y. Saying N here makes no sense, if you do not want
- utf8 casefolding support, disable CONFIG_UNICODE instead.
+ support. If you say M here the large table of case foldings will
+ be a separate loadable module that gets requested only when a file
+ system actually use it.
config UNICODE_NORMALIZATION_SELFTEST
tristate "Test UTF-8 normalization support"
- depends on UNICODE_UTF8_DATA
+ depends on UNICODE
diff --git a/fs/unicode/Makefile b/fs/unicode/Makefile
index 2f9d9188852b..0cc87423de82 100644
--- a/fs/unicode/Makefile
+++ b/fs/unicode/Makefile
@@ -1,8 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_UNICODE) += unicode.o
+ifneq ($(CONFIG_UNICODE),)
+obj-y += unicode.o
+endif
+obj-$(CONFIG_UNICODE) += utf8data.o
obj-$(CONFIG_UNICODE_NORMALIZATION_SELFTEST) += utf8-selftest.o
-obj-$(CONFIG_UNICODE_UTF8_DATA) += utf8data.o
unicode-y := utf8-norm.o utf8-core.o
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index e26b10132d47..8e03b3d3f5fa 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -878,7 +878,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
new_flags, vma->anon_vma,
vma->vm_file, vma->vm_pgoff,
vma_policy(vma),
- NULL_VM_UFFD_CTX, vma_anon_name(vma));
+ NULL_VM_UFFD_CTX, anon_vma_name(vma));
if (prev)
vma = prev;
else
@@ -1438,7 +1438,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
vma->anon_vma, vma->vm_file, vma->vm_pgoff,
vma_policy(vma),
((struct vm_userfaultfd_ctx){ ctx }),
- vma_anon_name(vma));
+ anon_vma_name(vma));
if (prev) {
vma = prev;
goto next;
@@ -1615,7 +1615,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
prev = vma_merge(mm, prev, start, vma_end, new_flags,
vma->anon_vma, vma->vm_file, vma->vm_pgoff,
vma_policy(vma),
- NULL_VM_UFFD_CTX, vma_anon_name(vma));
+ NULL_VM_UFFD_CTX, anon_vma_name(vma));
if (prev) {
vma = prev;
goto next;
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 2705f91bdd0d..9d6a67c7d227 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -136,7 +136,20 @@ done:
memalloc_nofs_restore(nofs_flag);
}
-/* Finish all pending io completions. */
+/*
+ * Finish all pending IO completions that require transactional modifications.
+ *
+ * We try to merge physical and logically contiguous ioends before completion to
+ * minimise the number of transactions we need to perform during IO completion.
+ * Both unwritten extent conversion and COW remapping need to iterate and modify
+ * one physical extent at a time, so we gain nothing by merging physically
+ * discontiguous extents here.
+ *
+ * The ioend chain length that we can be processing here is largely unbound in
+ * length and we may have to perform significant amounts of work on each ioend
+ * to complete it. Hence we have to be careful about holding the CPU for too
+ * long in this loop.
+ */
void
xfs_end_io(
struct work_struct *work)
@@ -157,6 +170,7 @@ xfs_end_io(
list_del_init(&ioend->io_list);
iomap_ioend_try_merge(ioend, &tmp);
xfs_end_ioend(ioend);
+ cond_resched();
}
}
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index d4a387d3d0ce..eb2e387ba528 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -850,9 +850,6 @@ xfs_alloc_file_space(
rblocks = 0;
}
- /*
- * Allocate and setup the transaction.
- */
error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
dblocks, rblocks, false, &tp);
if (error)
@@ -869,9 +866,9 @@ xfs_alloc_file_space(
if (error)
goto error;
- /*
- * Complete the transaction
- */
+ ip->i_diflags |= XFS_DIFLAG_PREALLOC;
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
error = xfs_trans_commit(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
if (error)
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 22ad207bedf4..5bddb1e9e0b3 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -66,40 +66,6 @@ xfs_is_falloc_aligned(
return !((pos | len) & mask);
}
-int
-xfs_update_prealloc_flags(
- struct xfs_inode *ip,
- enum xfs_prealloc_flags flags)
-{
- struct xfs_trans *tp;
- int error;
-
- error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
- 0, 0, 0, &tp);
- if (error)
- return error;
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-
- if (!(flags & XFS_PREALLOC_INVISIBLE)) {
- VFS_I(ip)->i_mode &= ~S_ISUID;
- if (VFS_I(ip)->i_mode & S_IXGRP)
- VFS_I(ip)->i_mode &= ~S_ISGID;
- xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
- }
-
- if (flags & XFS_PREALLOC_SET)
- ip->i_diflags |= XFS_DIFLAG_PREALLOC;
- if (flags & XFS_PREALLOC_CLEAR)
- ip->i_diflags &= ~XFS_DIFLAG_PREALLOC;
-
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- if (flags & XFS_PREALLOC_SYNC)
- xfs_trans_set_sync(tp);
- return xfs_trans_commit(tp);
-}
-
/*
* Fsync operations on directories are much simpler than on regular files,
* as there is no file data to flush, and thus also no need for explicit
@@ -895,6 +861,21 @@ xfs_break_layouts(
return error;
}
+/* Does this file, inode, or mount want synchronous writes? */
+static inline bool xfs_file_sync_writes(struct file *filp)
+{
+ struct xfs_inode *ip = XFS_I(file_inode(filp));
+
+ if (xfs_has_wsync(ip->i_mount))
+ return true;
+ if (filp->f_flags & (__O_SYNC | O_DSYNC))
+ return true;
+ if (IS_SYNC(file_inode(filp)))
+ return true;
+
+ return false;
+}
+
#define XFS_FALLOC_FL_SUPPORTED \
(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
@@ -910,7 +891,6 @@ xfs_file_fallocate(
struct inode *inode = file_inode(file);
struct xfs_inode *ip = XFS_I(inode);
long error;
- enum xfs_prealloc_flags flags = 0;
uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
loff_t new_size = 0;
bool do_file_insert = false;
@@ -955,6 +935,10 @@ xfs_file_fallocate(
goto out_unlock;
}
+ error = file_modified(file);
+ if (error)
+ goto out_unlock;
+
if (mode & FALLOC_FL_PUNCH_HOLE) {
error = xfs_free_file_space(ip, offset, len);
if (error)
@@ -1004,8 +988,6 @@ xfs_file_fallocate(
}
do_file_insert = true;
} else {
- flags |= XFS_PREALLOC_SET;
-
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
offset + len > i_size_read(inode)) {
new_size = offset + len;
@@ -1057,13 +1039,6 @@ xfs_file_fallocate(
}
}
- if (file->f_flags & O_DSYNC)
- flags |= XFS_PREALLOC_SYNC;
-
- error = xfs_update_prealloc_flags(ip, flags);
- if (error)
- goto out_unlock;
-
/* Change file size if needed */
if (new_size) {
struct iattr iattr;
@@ -1082,8 +1057,14 @@ xfs_file_fallocate(
* leave shifted extents past EOF and hence losing access to
* the data that is contained within them.
*/
- if (do_file_insert)
+ if (do_file_insert) {
error = xfs_insert_file_space(ip, offset, len);
+ if (error)
+ goto out_unlock;
+ }
+
+ if (xfs_file_sync_writes(file))
+ error = xfs_log_force_inode(ip);
out_unlock:
xfs_iunlock(ip, iolock);
@@ -1115,21 +1096,6 @@ xfs_file_fadvise(
return ret;
}
-/* Does this file, inode, or mount want synchronous writes? */
-static inline bool xfs_file_sync_writes(struct file *filp)
-{
- struct xfs_inode *ip = XFS_I(file_inode(filp));
-
- if (xfs_has_wsync(ip->i_mount))
- return true;
- if (filp->f_flags & (__O_SYNC | O_DSYNC))
- return true;
- if (IS_SYNC(file_inode(filp)))
- return true;
-
- return false;
-}
-
STATIC loff_t
xfs_file_remap_range(
struct file *file_in,
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index c447bf04205a..b7e8f14d9fca 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -462,15 +462,6 @@ xfs_itruncate_extents(
}
/* from xfs_file.c */
-enum xfs_prealloc_flags {
- XFS_PREALLOC_SET = (1 << 1),
- XFS_PREALLOC_CLEAR = (1 << 2),
- XFS_PREALLOC_SYNC = (1 << 3),
- XFS_PREALLOC_INVISIBLE = (1 << 4),
-};
-
-int xfs_update_prealloc_flags(struct xfs_inode *ip,
- enum xfs_prealloc_flags flags);
int xfs_break_layouts(struct inode *inode, uint *iolock,
enum layout_break_reason reason);
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 03a6198c97f6..2515fe8299e1 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -1464,7 +1464,7 @@ xfs_ioc_getbmap(
if (bmx.bmv_count < 2)
return -EINVAL;
- if (bmx.bmv_count > ULONG_MAX / recsize)
+ if (bmx.bmv_count >= INT_MAX / recsize)
return -ENOMEM;
buf = kvcalloc(bmx.bmv_count, sizeof(*buf), GFP_KERNEL);
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index d6334abbc0b3..4abe17312c2b 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -71,6 +71,40 @@ xfs_fs_get_uuid(
}
/*
+ * We cannot use file based VFS helpers such as file_modified() to update
+ * inode state as we modify the data/metadata in the inode here. Hence we have
+ * to open code the timestamp updates and SUID/SGID stripping. We also need
+ * to set the inode prealloc flag to ensure that the extents we allocate are not
+ * removed if the inode is reclaimed from memory before xfs_fs_block_commit()
+ * is from the client to indicate that data has been written and the file size
+ * can be extended.
+ */
+static int
+xfs_fs_map_update_inode(
+ struct xfs_inode *ip)
+{
+ struct xfs_trans *tp;
+ int error;
+
+ error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
+ 0, 0, 0, &tp);
+ if (error)
+ return error;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+ VFS_I(ip)->i_mode &= ~S_ISUID;
+ if (VFS_I(ip)->i_mode & S_IXGRP)
+ VFS_I(ip)->i_mode &= ~S_ISGID;
+ xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ ip->i_diflags |= XFS_DIFLAG_PREALLOC;
+
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ return xfs_trans_commit(tp);
+}
+
+/*
* Get a layout for the pNFS client.
*/
int
@@ -164,10 +198,12 @@ xfs_fs_map_blocks(
* that the blocks allocated and handed out to the client are
* guaranteed to be present even after a server crash.
*/
- error = xfs_update_prealloc_flags(ip,
- XFS_PREALLOC_SET | XFS_PREALLOC_SYNC);
+ error = xfs_fs_map_update_inode(ip);
+ if (!error)
+ error = xfs_log_force_inode(ip);
if (error)
goto out_unlock;
+
} else {
xfs_iunlock(ip, lock_flags);
}
@@ -255,7 +291,7 @@ xfs_fs_commit_blocks(
length = end - start;
if (!length)
continue;
-
+
/*
* Make sure reads through the pagecache see the new data.
*/
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index e8f37bdc8354..d84714e4e46a 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -735,6 +735,7 @@ xfs_fs_sync_fs(
int wait)
{
struct xfs_mount *mp = XFS_M(sb);
+ int error;
trace_xfs_fs_sync_fs(mp, __return_address);
@@ -744,7 +745,10 @@ xfs_fs_sync_fs(
if (!wait)
return 0;
- xfs_log_force(mp, XFS_LOG_SYNC);
+ error = xfs_log_force(mp, XFS_LOG_SYNC);
+ if (error)
+ return error;
+
if (laptop_mode) {
/*
* The disk must be active because we're syncing.
@@ -1749,6 +1753,11 @@ xfs_remount_ro(
};
int error;
+ /* Flush all the dirty data to disk. */
+ error = sync_filesystem(mp->m_super);
+ if (error)
+ return error;
+
/*
* Cancel background eofb scanning so it cannot race with the final
* log force+buftarg wait and deadlock the remount.
@@ -1827,8 +1836,6 @@ xfs_fs_reconfigure(
if (error)
return error;
- sync_filesystem(mp->m_super);
-
/* inode32 -> inode64 */
if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
diff --git a/include/crypto/internal/blake2s.h b/include/crypto/internal/blake2s.h
index d39cfa0d333e..52363eee2b20 100644
--- a/include/crypto/internal/blake2s.h
+++ b/include/crypto/internal/blake2s.h
@@ -24,14 +24,11 @@ static inline void blake2s_set_lastblock(struct blake2s_state *state)
state->f[0] = -1;
}
-typedef void (*blake2s_compress_t)(struct blake2s_state *state,
- const u8 *block, size_t nblocks, u32 inc);
-
/* Helper functions for BLAKE2s shared by the library and shash APIs */
-static inline void __blake2s_update(struct blake2s_state *state,
- const u8 *in, size_t inlen,
- blake2s_compress_t compress)
+static __always_inline void
+__blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen,
+ bool force_generic)
{
const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
@@ -39,7 +36,12 @@ static inline void __blake2s_update(struct blake2s_state *state,
return;
if (inlen > fill) {
memcpy(state->buf + state->buflen, in, fill);
- (*compress)(state, state->buf, 1, BLAKE2S_BLOCK_SIZE);
+ if (force_generic)
+ blake2s_compress_generic(state, state->buf, 1,
+ BLAKE2S_BLOCK_SIZE);
+ else
+ blake2s_compress(state, state->buf, 1,
+ BLAKE2S_BLOCK_SIZE);
state->buflen = 0;
in += fill;
inlen -= fill;
@@ -47,7 +49,12 @@ static inline void __blake2s_update(struct blake2s_state *state,
if (inlen > BLAKE2S_BLOCK_SIZE) {
const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
/* Hash one less (full) block than strictly possible */
- (*compress)(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
+ if (force_generic)
+ blake2s_compress_generic(state, in, nblocks - 1,
+ BLAKE2S_BLOCK_SIZE);
+ else
+ blake2s_compress(state, in, nblocks - 1,
+ BLAKE2S_BLOCK_SIZE);
in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
}
@@ -55,13 +62,16 @@ static inline void __blake2s_update(struct blake2s_state *state,
state->buflen += inlen;
}
-static inline void __blake2s_final(struct blake2s_state *state, u8 *out,
- blake2s_compress_t compress)
+static __always_inline void
+__blake2s_final(struct blake2s_state *state, u8 *out, bool force_generic)
{
blake2s_set_lastblock(state);
memset(state->buf + state->buflen, 0,
BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
- (*compress)(state, state->buf, 1, state->buflen);
+ if (force_generic)
+ blake2s_compress_generic(state, state->buf, 1, state->buflen);
+ else
+ blake2s_compress(state, state->buf, 1, state->buflen);
cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
memcpy(out, state->h, state->outlen);
}
@@ -99,20 +109,20 @@ static inline int crypto_blake2s_init(struct shash_desc *desc)
static inline int crypto_blake2s_update(struct shash_desc *desc,
const u8 *in, unsigned int inlen,
- blake2s_compress_t compress)
+ bool force_generic)
{
struct blake2s_state *state = shash_desc_ctx(desc);
- __blake2s_update(state, in, inlen, compress);
+ __blake2s_update(state, in, inlen, force_generic);
return 0;
}
static inline int crypto_blake2s_final(struct shash_desc *desc, u8 *out,
- blake2s_compress_t compress)
+ bool force_generic)
{
struct blake2s_state *state = shash_desc_ctx(desc);
- __blake2s_final(state, out, compress);
+ __blake2s_final(state, out, force_generic);
return 0;
}
diff --git a/include/dt-bindings/clock/dra7.h b/include/dt-bindings/clock/dra7.h
index 7d57063b8a65..29ff6b895848 100644
--- a/include/dt-bindings/clock/dra7.h
+++ b/include/dt-bindings/clock/dra7.h
@@ -84,17 +84,10 @@
#define DRA7_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
#define DRA7_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-/* iva clocks */
-#define DRA7_IVA_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_SL2IF_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-
/* dss clocks */
#define DRA7_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
#define DRA7_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
-/* gpu clocks */
-#define DRA7_GPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-
/* l3init clocks */
#define DRA7_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
#define DRA7_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
@@ -267,10 +260,17 @@
#define DRA7_L3INSTR_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
#define DRA7_L3INSTR_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+/* iva clocks */
+#define DRA7_IVA_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_SL2IF_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+
/* dss clocks */
#define DRA7_DSS_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
#define DRA7_DSS_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
+/* gpu clocks */
+#define DRA7_GPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
/* l3init clocks */
#define DRA7_L3INIT_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
#define DRA7_L3INIT_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 199e47e97d64..21292b5bbb55 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -324,12 +324,12 @@ enum {
ATA_LOG_NCQ_NON_DATA = 0x12,
ATA_LOG_NCQ_SEND_RECV = 0x13,
ATA_LOG_IDENTIFY_DEVICE = 0x30,
+ ATA_LOG_CONCURRENT_POSITIONING_RANGES = 0x47,
/* Identify device log pages: */
ATA_LOG_SECURITY = 0x06,
ATA_LOG_SATA_SETTINGS = 0x08,
ATA_LOG_ZONED_INFORMATION = 0x09,
- ATA_LOG_CONCURRENT_POSITIONING_RANGES = 0x47,
/* Identify device SATA settings log:*/
ATA_LOG_DEVSLP_OFFSET = 0x30,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f35aea98bc35..16b47035e4b0 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -748,7 +748,8 @@ extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
bool __must_check blk_get_queue(struct request_queue *);
extern void blk_put_queue(struct request_queue *);
-extern void blk_set_queue_dying(struct request_queue *);
+
+void blk_mark_disk_dead(struct gendisk *disk);
#ifdef CONFIG_BLOCK
/*
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index fa517ae604ad..d0ad379d1e62 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -209,11 +209,9 @@ static inline bool map_value_has_timer(const struct bpf_map *map)
static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
{
if (unlikely(map_value_has_spin_lock(map)))
- *(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
- (struct bpf_spin_lock){};
+ memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock));
if (unlikely(map_value_has_timer(map)))
- *(struct bpf_timer *)(dst + map->timer_off) =
- (struct bpf_timer){};
+ memset(dst + map->timer_off, 0, sizeof(struct bpf_timer));
}
/* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */
@@ -224,7 +222,8 @@ static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
if (unlikely(map_value_has_spin_lock(map))) {
s_off = map->spin_lock_off;
s_sz = sizeof(struct bpf_spin_lock);
- } else if (unlikely(map_value_has_timer(map))) {
+ }
+ if (unlikely(map_value_has_timer(map))) {
t_off = map->timer_off;
t_sz = sizeof(struct bpf_timer);
}
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 6a89ea410e43..edf62eaa6285 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -35,6 +35,7 @@
#define CEPH_OPT_TCP_NODELAY (1<<4) /* TCP_NODELAY on TCP sockets */
#define CEPH_OPT_NOMSGSIGN (1<<5) /* don't sign msgs (msgr1) */
#define CEPH_OPT_ABORT_ON_FULL (1<<6) /* abort w/ ENOSPC when full */
+#define CEPH_OPT_RXBOUNCE (1<<7) /* double-buffer read data */
#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY)
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index ff99ce094cfa..e7f2fb2fc207 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -383,6 +383,10 @@ struct ceph_connection_v2_info {
struct ceph_gcm_nonce in_gcm_nonce;
struct ceph_gcm_nonce out_gcm_nonce;
+ struct page **in_enc_pages;
+ int in_enc_page_cnt;
+ int in_enc_resid;
+ int in_enc_i;
struct page **out_enc_pages;
int out_enc_page_cnt;
int out_enc_resid;
@@ -457,6 +461,7 @@ struct ceph_connection {
struct ceph_msg *out_msg; /* sending message (== tail of
out_sent) */
+ struct page *bounce_page;
u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */
struct timespec64 last_keepalive_ack; /* keepalive2 ack stamp */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 429dcebe2b99..0f7fd205ab7e 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -117,14 +117,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
*/
#define __stringify_label(n) #n
-#define __annotate_reachable(c) ({ \
- asm volatile(__stringify_label(c) ":\n\t" \
- ".pushsection .discard.reachable\n\t" \
- ".long " __stringify_label(c) "b - .\n\t" \
- ".popsection\n\t" : : "i" (c)); \
-})
-#define annotate_reachable() __annotate_reachable(__COUNTER__)
-
#define __annotate_unreachable(c) ({ \
asm volatile(__stringify_label(c) ":\n\t" \
".pushsection .discard.unreachable\n\t" \
@@ -133,24 +125,21 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
})
#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
-#define ASM_UNREACHABLE \
- "999:\n\t" \
- ".pushsection .discard.unreachable\n\t" \
- ".long 999b - .\n\t" \
+#define ASM_REACHABLE \
+ "998:\n\t" \
+ ".pushsection .discard.reachable\n\t" \
+ ".long 998b - .\n\t" \
".popsection\n\t"
/* Annotate a C jump table to allow objtool to follow the code flow */
#define __annotate_jump_table __section(".rodata..c_jump_table")
#else
-#define annotate_reachable()
#define annotate_unreachable()
+# define ASM_REACHABLE
#define __annotate_jump_table
#endif
-#ifndef ASM_UNREACHABLE
-# define ASM_UNREACHABLE
-#endif
#ifndef unreachable
# define unreachable() do { \
annotate_unreachable(); \
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 1ab29e61b078..3522a272b74d 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -382,6 +382,9 @@ struct cpufreq_driver {
int (*suspend)(struct cpufreq_policy *policy);
int (*resume)(struct cpufreq_policy *policy);
+ /* Will be called after the driver is fully initialized */
+ void (*ready)(struct cpufreq_policy *policy);
+
struct freq_attr **attr;
/* platform specific boost support code */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index dca2b1355bb1..6150d11a607e 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -62,6 +62,14 @@
#define DMA_ATTR_PRIVILEGED (1UL << 9)
/*
+ * This is a hint to the DMA-mapping subsystem that the device is expected
+ * to overwrite the entire mapped size, thus the caller does not require any
+ * of the previous buffer contents to be preserved. This allows
+ * bounce-buffering implementations to optimise DMA_FROM_DEVICE transfers.
+ */
+#define DMA_ATTR_OVERWRITE (1UL << 10)
+
+/*
* A dma_addr_t can hold any valid DMA or bus address for the platform. It can
* be given to a device to use as a DMA source or target. It is specific to a
* given device and there may be a translation between the CPU physical address
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 3da95842b207..02f362c661c8 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -262,7 +262,7 @@ struct fb_ops {
/* Draws a rectangle */
void (*fb_fillrect) (struct fb_info *info, const struct fb_fillrect *rect);
- /* Copy data from area to another. Obsolete. */
+ /* Copy data from area to another */
void (*fb_copyarea) (struct fb_info *info, const struct fb_copyarea *region);
/* Draws a image to the display */
void (*fb_imageblit) (struct fb_info *info, const struct fb_image *image);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index f3daaea16554..e2d892b201b0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1483,7 +1483,7 @@ struct super_block {
#ifdef CONFIG_FS_VERITY
const struct fsverity_operations *s_vop;
#endif
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
struct unicode_map *s_encoding;
__u16 s_encoding_flags;
#endif
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index f565a8938836..fe2e0179ed51 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1262,6 +1262,7 @@ struct hv_device {
struct vmbus_channel *channel;
struct kset *channels_kset;
struct device_dma_parameters dma_parms;
+ u64 dma_mask;
/* place holder to keep track of the dir for hv device in debugfs */
struct dentry *debug_dir;
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 8420fe504927..2be4dd7e90a9 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -46,8 +46,10 @@ struct vlan_hdr {
* @h_vlan_encapsulated_proto: packet type ID or len
*/
struct vlan_ethhdr {
- unsigned char h_dest[ETH_ALEN];
- unsigned char h_source[ETH_ALEN];
+ struct_group(addrs,
+ unsigned char h_dest[ETH_ALEN];
+ unsigned char h_source[ETH_ALEN];
+ );
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index b55bd49e55f5..97a3a2edb585 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -263,9 +263,11 @@ struct iomap_ioend {
struct list_head io_list; /* next ioend in chain */
u16 io_type;
u16 io_flags; /* IOMAP_F_* */
+ u32 io_folios; /* folios added to ioend */
struct inode *io_inode; /* file being written to */
size_t io_size; /* size of the extent */
loff_t io_offset; /* offset in the file */
+ sector_t io_sector; /* start sector of ioend */
struct bio *io_bio; /* bio being built */
struct bio io_inline_bio; /* MUST BE LAST! */
};
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index fd933c45281a..9c3ada74ffb1 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -594,7 +594,7 @@ struct transaction_s
*/
unsigned long t_log_start;
- /*
+ /*
* Number of buffers on the t_buffers list [j_list_lock, no locks
* needed for jbd2 thread]
*/
@@ -1295,7 +1295,7 @@ struct journal_s
* Clean-up after fast commit or full commit. JBD2 calls this function
* after every commit operation.
*/
- void (*j_fc_cleanup_callback)(struct journal_s *journal, int);
+ void (*j_fc_cleanup_callback)(struct journal_s *journal, int full, tid_t tid);
/**
* @j_fc_replay_callback:
@@ -1419,9 +1419,7 @@ extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
extern bool __jbd2_journal_refile_buffer(struct journal_head *);
extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *);
extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_free_buffer(struct journal_head *bh);
extern void jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_clean_data_list(transaction_t *transaction);
static inline void jbd2_file_log_bh(struct list_head *head, struct buffer_head *bh)
{
list_add_tail(&bh->b_assoc_buffers, head);
@@ -1486,9 +1484,6 @@ extern int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
struct buffer_head **bh_out,
sector_t blocknr);
-/* Transaction locking */
-extern void __wait_on_journal (journal_t *);
-
/* Transaction cache support */
extern void jbd2_journal_destroy_transaction_cache(void);
extern int __init jbd2_journal_init_transaction_cache(void);
@@ -1543,6 +1538,8 @@ extern int jbd2_journal_flush(journal_t *journal, unsigned int flags);
extern void jbd2_journal_lock_updates (journal_t *);
extern void jbd2_journal_unlock_updates (journal_t *);
+void jbd2_journal_wait_updates(journal_t *);
+
extern journal_t * jbd2_journal_init_dev(struct block_device *bdev,
struct block_device *fs_dev,
unsigned long long start, int len, int bsize);
@@ -1774,8 +1771,6 @@ static inline unsigned long jbd2_log_space_left(journal_t *journal)
#define BJ_Reserved 4 /* Buffer is reserved for access by journal */
#define BJ_Types 5
-extern int jbd_blocks_per_page(struct inode *inode);
-
/* JBD uses a CRC32 checksum */
#define JBD_MAX_CHECKSUM_SIZE 4
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 4b5e3679a72c..f49e64222628 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -17,6 +17,8 @@
#include <linux/atomic.h>
#include <linux/static_key.h>
+extern unsigned long kfence_sample_interval;
+
/*
* We allocate an even number of pages, as it simplifies calculations to map
* address to metadata indices; effectively, the very first page serves as an
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 06912d6b39d0..f11039944c08 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -29,7 +29,9 @@
#include <linux/refcount.h>
#include <linux/nospec.h>
#include <linux/notifier.h>
+#include <linux/ftrace.h>
#include <linux/hashtable.h>
+#include <linux/instrumentation.h>
#include <linux/interval_tree.h>
#include <linux/rbtree.h>
#include <linux/xarray.h>
@@ -368,8 +370,11 @@ struct kvm_vcpu {
u64 last_used_slot_gen;
};
-/* must be called with irqs disabled */
-static __always_inline void guest_enter_irqoff(void)
+/*
+ * Start accounting time towards a guest.
+ * Must be called before entering guest context.
+ */
+static __always_inline void guest_timing_enter_irqoff(void)
{
/*
* This is running in ioctl context so its safe to assume that it's the
@@ -378,7 +383,18 @@ static __always_inline void guest_enter_irqoff(void)
instrumentation_begin();
vtime_account_guest_enter();
instrumentation_end();
+}
+/*
+ * Enter guest context and enter an RCU extended quiescent state.
+ *
+ * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
+ * unsafe to use any code which may directly or indirectly use RCU, tracing
+ * (including IRQ flag tracing), or lockdep. All code in this period must be
+ * non-instrumentable.
+ */
+static __always_inline void guest_context_enter_irqoff(void)
+{
/*
* KVM does not hold any references to rcu protected data when it
* switches CPU into a guest mode. In fact switching to a guest mode
@@ -394,16 +410,79 @@ static __always_inline void guest_enter_irqoff(void)
}
}
-static __always_inline void guest_exit_irqoff(void)
+/*
+ * Deprecated. Architectures should move to guest_timing_enter_irqoff() and
+ * guest_state_enter_irqoff().
+ */
+static __always_inline void guest_enter_irqoff(void)
+{
+ guest_timing_enter_irqoff();
+ guest_context_enter_irqoff();
+}
+
+/**
+ * guest_state_enter_irqoff - Fixup state when entering a guest
+ *
+ * Entry to a guest will enable interrupts, but the kernel state is interrupts
+ * disabled when this is invoked. Also tell RCU about it.
+ *
+ * 1) Trace interrupts on state
+ * 2) Invoke context tracking if enabled to adjust RCU state
+ * 3) Tell lockdep that interrupts are enabled
+ *
+ * Invoked from architecture specific code before entering a guest.
+ * Must be called with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke guest_timing_enter_irqoff() before this.
+ *
+ * Note: this is analogous to exit_to_user_mode().
+ */
+static __always_inline void guest_state_enter_irqoff(void)
+{
+ instrumentation_begin();
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ instrumentation_end();
+
+ guest_context_enter_irqoff();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
+/*
+ * Exit guest context and exit an RCU extended quiescent state.
+ *
+ * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
+ * unsafe to use any code which may directly or indirectly use RCU, tracing
+ * (including IRQ flag tracing), or lockdep. All code in this period must be
+ * non-instrumentable.
+ */
+static __always_inline void guest_context_exit_irqoff(void)
{
context_tracking_guest_exit();
+}
+/*
+ * Stop accounting time towards a guest.
+ * Must be called after exiting guest context.
+ */
+static __always_inline void guest_timing_exit_irqoff(void)
+{
instrumentation_begin();
/* Flush the guest cputime we spent on the guest */
vtime_account_guest_exit();
instrumentation_end();
}
+/*
+ * Deprecated. Architectures should move to guest_state_exit_irqoff() and
+ * guest_timing_exit_irqoff().
+ */
+static __always_inline void guest_exit_irqoff(void)
+{
+ guest_context_exit_irqoff();
+ guest_timing_exit_irqoff();
+}
+
static inline void guest_exit(void)
{
unsigned long flags;
@@ -413,6 +492,33 @@ static inline void guest_exit(void)
local_irq_restore(flags);
}
+/**
+ * guest_state_exit_irqoff - Establish state when returning from guest mode
+ *
+ * Entry from a guest disables interrupts, but guest mode is traced as
+ * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
+ *
+ * 1) Tell lockdep that interrupts are disabled
+ * 2) Invoke context tracking if enabled to reactivate RCU
+ * 3) Trace interrupts off state
+ *
+ * Invoked from architecture specific code after exiting a guest.
+ * Must be invoked with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke guest_timing_exit_irqoff() after this.
+ *
+ * Note: this is analogous to enter_from_user_mode().
+ */
+static __always_inline void guest_state_exit_irqoff(void)
+{
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ guest_context_exit_irqoff();
+
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+}
+
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
{
/*
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 605756f645be..7f99b4d78822 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -380,6 +380,7 @@ enum {
ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */
ATA_HORKAGE_NO_NCQ_ON_ATI = (1 << 27), /* Disable NCQ on ATI chipset */
ATA_HORKAGE_NO_ID_DEV_LOG = (1 << 28), /* Identify device log missing */
+ ATA_HORKAGE_NO_LOG_DIR = (1 << 29), /* Do not read log directory */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index b72d75141e12..0abbd685703b 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -219,7 +219,7 @@ struct obj_cgroup {
struct mem_cgroup *memcg;
atomic_t nr_charged_bytes;
union {
- struct list_head list;
+ struct list_head list; /* protected by objcg_lock */
struct rcu_head rcu;
};
};
@@ -315,7 +315,8 @@ struct mem_cgroup {
#ifdef CONFIG_MEMCG_KMEM
int kmemcg_id;
struct obj_cgroup __rcu *objcg;
- struct list_head objcg_list; /* list of inherited objcgs */
+ /* list of inherited objcgs, protected by objcg_lock */
+ struct list_head objcg_list;
#endif
MEMCG_PADDING(_pad2_);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 213cc569b192..5744a3fc4716 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2626,7 +2626,7 @@ static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
extern struct vm_area_struct *vma_merge(struct mm_struct *,
struct vm_area_struct *prev, unsigned long addr, unsigned long end,
unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
- struct mempolicy *, struct vm_userfaultfd_ctx, const char *);
+ struct mempolicy *, struct vm_userfaultfd_ctx, struct anon_vma_name *);
extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
unsigned long addr, int new_below);
@@ -3372,11 +3372,12 @@ static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
#ifdef CONFIG_ANON_VMA_NAME
int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
- unsigned long len_in, const char *name);
+ unsigned long len_in,
+ struct anon_vma_name *anon_name);
#else
static inline int
madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
- unsigned long len_in, const char *name) {
+ unsigned long len_in, struct anon_vma_name *anon_name) {
return 0;
}
#endif
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index b725839dfe71..cf90b1fa2c60 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -140,50 +140,91 @@ static __always_inline void del_page_from_lru_list(struct page *page,
#ifdef CONFIG_ANON_VMA_NAME
/*
- * mmap_lock should be read-locked when calling vma_anon_name() and while using
- * the returned pointer.
+ * mmap_lock should be read-locked when calling anon_vma_name(). Caller should
+ * either keep holding the lock while using the returned pointer or it should
+ * raise anon_vma_name refcount before releasing the lock.
*/
-extern const char *vma_anon_name(struct vm_area_struct *vma);
+extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
+extern struct anon_vma_name *anon_vma_name_alloc(const char *name);
+extern void anon_vma_name_free(struct kref *kref);
-/*
- * mmap_lock should be read-locked for orig_vma->vm_mm.
- * mmap_lock should be write-locked for new_vma->vm_mm or new_vma should be
- * isolated.
- */
-extern void dup_vma_anon_name(struct vm_area_struct *orig_vma,
- struct vm_area_struct *new_vma);
+/* mmap_lock should be read-locked */
+static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
+{
+ if (anon_name)
+ kref_get(&anon_name->kref);
+}
-/*
- * mmap_lock should be write-locked or vma should have been isolated under
- * write-locked mmap_lock protection.
- */
-extern void free_vma_anon_name(struct vm_area_struct *vma);
+static inline void anon_vma_name_put(struct anon_vma_name *anon_name)
+{
+ if (anon_name)
+ kref_put(&anon_name->kref, anon_vma_name_free);
+}
-/* mmap_lock should be read-locked */
-static inline bool is_same_vma_anon_name(struct vm_area_struct *vma,
- const char *name)
+static inline
+struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name)
+{
+ /* Prevent anon_name refcount saturation early on */
+ if (kref_read(&anon_name->kref) < REFCOUNT_MAX) {
+ anon_vma_name_get(anon_name);
+ return anon_name;
+
+ }
+ return anon_vma_name_alloc(anon_name->name);
+}
+
+static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
+ struct vm_area_struct *new_vma)
+{
+ struct anon_vma_name *anon_name = anon_vma_name(orig_vma);
+
+ if (anon_name)
+ new_vma->anon_name = anon_vma_name_reuse(anon_name);
+}
+
+static inline void free_anon_vma_name(struct vm_area_struct *vma)
{
- const char *vma_name = vma_anon_name(vma);
+ /*
+ * Not using anon_vma_name because it generates a warning if mmap_lock
+ * is not held, which might be the case here.
+ */
+ if (!vma->vm_file)
+ anon_vma_name_put(vma->anon_name);
+}
- /* either both NULL, or pointers to same string */
- if (vma_name == name)
+static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
+ struct anon_vma_name *anon_name2)
+{
+ if (anon_name1 == anon_name2)
return true;
- return name && vma_name && !strcmp(name, vma_name);
+ return anon_name1 && anon_name2 &&
+ !strcmp(anon_name1->name, anon_name2->name);
}
+
#else /* CONFIG_ANON_VMA_NAME */
-static inline const char *vma_anon_name(struct vm_area_struct *vma)
+static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
{
return NULL;
}
-static inline void dup_vma_anon_name(struct vm_area_struct *orig_vma,
- struct vm_area_struct *new_vma) {}
-static inline void free_vma_anon_name(struct vm_area_struct *vma) {}
-static inline bool is_same_vma_anon_name(struct vm_area_struct *vma,
- const char *name)
+
+static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
+{
+ return NULL;
+}
+
+static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
+static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
+static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
+ struct vm_area_struct *new_vma) {}
+static inline void free_anon_vma_name(struct vm_area_struct *vma) {}
+
+static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
+ struct anon_vma_name *anon_name2)
{
return true;
}
+
#endif /* CONFIG_ANON_VMA_NAME */
static inline void init_tlb_flush_pending(struct mm_struct *mm)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5140e5feb486..0f549870da6a 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -416,7 +416,10 @@ struct vm_area_struct {
struct rb_node rb;
unsigned long rb_subtree_last;
} shared;
- /* Serialized by mmap_sem. */
+ /*
+ * Serialized by mmap_sem. Never use directly because it is
+ * valid only when vm_file is NULL. Use anon_vma_name instead.
+ */
struct anon_vma_name *anon_name;
};
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e490b84732d1..8b5a314db167 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2158,7 +2158,7 @@ struct net_device {
struct netdev_queue *_tx ____cacheline_aligned_in_smp;
unsigned int num_tx_queues;
unsigned int real_num_tx_queues;
- struct Qdisc *qdisc;
+ struct Qdisc __rcu *qdisc;
unsigned int tx_queue_len;
spinlock_t tx_global_lock;
diff --git a/include/linux/netfilter_netdev.h b/include/linux/netfilter_netdev.h
index b4dd96e4dc8d..e6487a691136 100644
--- a/include/linux/netfilter_netdev.h
+++ b/include/linux/netfilter_netdev.h
@@ -101,7 +101,11 @@ static inline struct sk_buff *nf_hook_egress(struct sk_buff *skb, int *rc,
nf_hook_state_init(&state, NF_NETDEV_EGRESS,
NFPROTO_NETDEV, dev, NULL, NULL,
dev_net(dev), NULL);
+
+ /* nf assumes rcu_read_lock, not just read_lock_bh */
+ rcu_read_lock();
ret = nf_hook_slow(skb, &state, e, 0);
+ rcu_read_unlock();
if (ret == 1) {
return skb;
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index b46c39d98bbd..614f22213e21 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -244,6 +244,13 @@ struct netfs_cache_ops {
int (*prepare_write)(struct netfs_cache_resources *cres,
loff_t *_start, size_t *_len, loff_t i_size,
bool no_space_allocated_yet);
+
+ /* Query the occupancy of the cache in a region, returning where the
+ * next chunk of data starts and how long it is.
+ */
+ int (*query_occupancy)(struct netfs_cache_resources *cres,
+ loff_t start, size_t len, size_t granularity,
+ loff_t *_data_start, size_t *_data_len);
};
struct readahead_control;
diff --git a/include/linux/nfs.h b/include/linux/nfs.h
index 0dc7ad38a0da..b06375e88e58 100644
--- a/include/linux/nfs.h
+++ b/include/linux/nfs.h
@@ -36,14 +36,6 @@ static inline void nfs_copy_fh(struct nfs_fh *target, const struct nfs_fh *sourc
memcpy(target->data, source->data, source->size);
}
-
-/*
- * This is really a general kernel constant, but since nothing like
- * this is defined in the kernel headers, I have to do it here.
- */
-#define NFS_OFFSET_MAX ((__s64)((~(__u64)0) >> 1))
-
-
enum nfs3_stable_how {
NFS_UNSTABLE = 0,
NFS_DATA_SYNC = 1,
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 02aa49323d1d..68f81d8d36de 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -107,6 +107,7 @@ struct nfs_open_dir_context {
__u64 dup_cookie;
pgoff_t page_index;
signed char duped;
+ bool eof;
};
/*
diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h
index 959e0bd9a913..75470159a194 100644
--- a/include/linux/nvme-tcp.h
+++ b/include/linux/nvme-tcp.h
@@ -12,6 +12,7 @@
#define NVME_TCP_DISC_PORT 8009
#define NVME_TCP_ADMIN_CCSZ SZ_8K
#define NVME_TCP_DIGEST_LENGTH 4
+#define NVME_TCP_MIN_MAXH2CDATA 4096
enum nvme_tcp_pfv {
NVME_TCP_PFV_1_0 = 0x0,
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 98efb7b5660d..c9a3ac9efeaa 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -70,7 +70,8 @@ struct nvmem_keepout {
* @word_size: Minimum read/write access granularity.
* @stride: Minimum read/write access stride.
* @priv: User context passed to read/write callbacks.
- * @wp-gpio: Write protect pin
+ * @wp-gpio: Write protect pin
+ * @ignore_wp: Write Protect pin is managed by the provider.
*
* Note: A default "nvmem<id>" name will be assigned to the device if
* no name is specified in its configuration. In such case "<id>" is
@@ -92,6 +93,7 @@ struct nvmem_config {
enum nvmem_type type;
bool read_only;
bool root_only;
+ bool ignore_wp;
struct device_node *of_node;
bool no_of_node;
nvmem_reg_read_t reg_read;
diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h
index 38cace1da7b6..01e16c7696ec 100644
--- a/include/linux/page_table_check.h
+++ b/include/linux/page_table_check.h
@@ -26,6 +26,9 @@ void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd);
void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud);
+void __page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd);
static inline void page_table_check_alloc(struct page *page, unsigned int order)
{
@@ -100,6 +103,16 @@ static inline void page_table_check_pud_set(struct mm_struct *mm,
__page_table_check_pud_set(mm, addr, pudp, pud);
}
+static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pte_clear_range(mm, addr, pmd);
+}
+
#else
static inline void page_table_check_alloc(struct page *page, unsigned int order)
@@ -143,5 +156,11 @@ static inline void page_table_check_pud_set(struct mm_struct *mm,
{
}
+static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd)
+{
+}
+
#endif /* CONFIG_PAGE_TABLE_CHECK */
#endif /* __LINUX_PAGE_TABLE_CHECK_H */
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index bc8713a76e03..f4f4077b97aa 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -62,6 +62,7 @@ static inline unsigned long pte_index(unsigned long address)
{
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
}
+#define pte_index pte_index
#ifndef pmd_index
static inline unsigned long pmd_index(unsigned long address)
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index c35f3962dc4f..373003ace639 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -308,6 +308,11 @@ static inline bool rfkill_blocked(struct rfkill *rfkill)
return false;
}
+static inline bool rfkill_soft_blocked(struct rfkill *rfkill)
+{
+ return false;
+}
+
static inline enum rfkill_type rfkill_find_type(const char *name)
{
return RFKILL_TYPE_ALL;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f5b2be39a78c..75ba8aa60248 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1680,7 +1680,6 @@ extern struct pid *cad_pid;
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */
#define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */
-#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */
#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
#define PF_FROZEN 0x00010000 /* Frozen for system suspend */
#define PF_KSWAPD 0x00020000 /* I am kswapd */
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index b9198a1b3a84..e84e54d1b490 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -54,8 +54,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
-extern void sched_post_fork(struct task_struct *p,
- struct kernel_clone_args *kargs);
+extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
+extern void sched_post_fork(struct task_struct *p);
extern void sched_dead(struct task_struct *p);
void __noreturn do_task_dead(void);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 37bde99b74af..5b6193fd8bd9 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -660,8 +660,7 @@ static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flag
* allocator where we care about the real place the memory allocation
* request comes from.
*/
-extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
- __alloc_size(1);
+extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller);
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_)
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 3e8ecdebe601..300273ff40cc 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -497,14 +497,14 @@ extern void ksys_sync_helper(void);
/* drivers/base/power/wakeup.c */
extern bool events_check_enabled;
-extern unsigned int pm_wakeup_irq;
extern suspend_state_t pm_suspend_target_state;
extern bool pm_wakeup_pending(void);
extern void pm_system_wakeup(void);
extern void pm_system_cancel_wakeup(void);
-extern void pm_wakeup_clear(bool reset);
+extern void pm_wakeup_clear(unsigned int irq_number);
extern void pm_system_irq_wakeup(unsigned int irq_number);
+extern unsigned int pm_wakeup_irq(void);
extern bool pm_get_wakeup_count(unsigned int *count, bool block);
extern bool pm_save_wakeup_count(unsigned int count);
extern void pm_wakep_autosleep_enabled(bool set);
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 70c069aef02c..dcea51fb60e2 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -699,6 +699,8 @@ event_triggers_post_call(struct trace_event_file *file,
bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
+bool __trace_trigger_soft_disabled(struct trace_event_file *file);
+
/**
* trace_trigger_soft_disabled - do triggers and test if soft disabled
* @file: The file pointer of the event to test
@@ -708,20 +710,20 @@ bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
* triggers that require testing the fields, it will return true,
* otherwise false.
*/
-static inline bool
+static __always_inline bool
trace_trigger_soft_disabled(struct trace_event_file *file)
{
unsigned long eflags = file->flags;
- if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
- if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
- event_triggers_call(file, NULL, NULL, NULL);
- if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
- return true;
- if (eflags & EVENT_FILE_FL_PID_FILTER)
- return trace_event_ignore_this_pid(file);
- }
- return false;
+ if (likely(!(eflags & (EVENT_FILE_FL_TRIGGER_MODE |
+ EVENT_FILE_FL_SOFT_DISABLED |
+ EVENT_FILE_FL_PID_FILTER))))
+ return false;
+
+ if (likely(eflags & EVENT_FILE_FL_TRIGGER_COND))
+ return false;
+
+ return __trace_trigger_soft_disabled(file);
}
#ifdef CONFIG_BPF_EVENTS
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index e7ce719838b5..59940e230b78 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -109,8 +109,6 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
int ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
const struct in6_addr *daddr, unsigned int srcprefs,
struct in6_addr *saddr);
-int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
- u32 banned_flags);
int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
u32 banned_flags);
bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 526e49589197..8221af1811df 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -239,6 +239,7 @@ typedef struct ax25_dev {
#if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER)
ax25_dama_info dama;
#endif
+ refcount_t refcount;
} ax25_dev;
typedef struct ax25_cb {
@@ -293,6 +294,17 @@ static __inline__ void ax25_cb_put(ax25_cb *ax25)
}
}
+static inline void ax25_dev_hold(ax25_dev *ax25_dev)
+{
+ refcount_inc(&ax25_dev->refcount);
+}
+
+static inline void ax25_dev_put(ax25_dev *ax25_dev)
+{
+ if (refcount_dec_and_test(&ax25_dev->refcount)) {
+ kfree(ax25_dev);
+ }
+}
static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev)
{
skb->dev = dev;
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 4b3d0b16c185..a647e5fabdbd 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -506,8 +506,7 @@ static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk,
tmp = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom);
if (IS_ERR(tmp)) {
- kfree_skb(skb);
- return tmp;
+ return skb;
}
len -= tmp->len;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 586f69d084a2..e336e9c1dda4 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -1489,6 +1489,14 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
/* Extended advertising support */
#define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
+/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 1789:
+ *
+ * C24: Mandatory if the LE Controller supports Connection State and either
+ * LE Feature (LL Privacy) or LE Feature (Extended Advertising) is supported
+ */
+#define use_enhanced_conn_complete(dev) (ll_privacy_capable(dev) || \
+ ext_adv_capable(dev))
+
/* ----- HCI protocols ----- */
#define HCI_PROTO_DEFER 0x01
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index 38785d48baff..184105d68294 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -262,7 +262,7 @@ struct ad_system {
struct ad_bond_info {
struct ad_system system; /* 802.3ad system structure */
struct bond_3ad_stats stats;
- u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
+ atomic_t agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
u16 aggregator_identifier;
};
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 5218041e5c8f..79c67f14c448 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -22,7 +22,7 @@
#include <asm/checksum.h>
#ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
-static inline
+static __always_inline
__wsum csum_and_copy_from_user (const void __user *src, void *dst,
int len)
{
@@ -33,7 +33,7 @@ __wsum csum_and_copy_from_user (const void __user *src, void *dst,
#endif
#ifndef HAVE_CSUM_COPY_USER
-static __inline__ __wsum csum_and_copy_to_user
+static __always_inline __wsum csum_and_copy_to_user
(const void *src, void __user *dst, int len)
{
__wsum sum = csum_partial(src, len, ~0U);
@@ -45,7 +45,7 @@ static __inline__ __wsum csum_and_copy_to_user
#endif
#ifndef _HAVE_ARCH_CSUM_AND_COPY
-static inline __wsum
+static __always_inline __wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
memcpy(dst, src, len);
@@ -54,7 +54,7 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len)
#endif
#ifndef HAVE_ARCH_CSUM_ADD
-static inline __wsum csum_add(__wsum csum, __wsum addend)
+static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
{
u32 res = (__force u32)csum;
res += (__force u32)addend;
@@ -62,12 +62,12 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
}
#endif
-static inline __wsum csum_sub(__wsum csum, __wsum addend)
+static __always_inline __wsum csum_sub(__wsum csum, __wsum addend)
{
return csum_add(csum, ~addend);
}
-static inline __sum16 csum16_add(__sum16 csum, __be16 addend)
+static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend)
{
u16 res = (__force u16)csum;
@@ -75,12 +75,12 @@ static inline __sum16 csum16_add(__sum16 csum, __be16 addend)
return (__force __sum16)(res + (res < (__force u16)addend));
}
-static inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
+static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
{
return csum16_add(csum, ~addend);
}
-static inline __wsum csum_shift(__wsum sum, int offset)
+static __always_inline __wsum csum_shift(__wsum sum, int offset)
{
/* rotate sum to align it with a 16b boundary */
if (offset & 1)
@@ -88,42 +88,43 @@ static inline __wsum csum_shift(__wsum sum, int offset)
return sum;
}
-static inline __wsum
+static __always_inline __wsum
csum_block_add(__wsum csum, __wsum csum2, int offset)
{
return csum_add(csum, csum_shift(csum2, offset));
}
-static inline __wsum
+static __always_inline __wsum
csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
{
return csum_block_add(csum, csum2, offset);
}
-static inline __wsum
+static __always_inline __wsum
csum_block_sub(__wsum csum, __wsum csum2, int offset)
{
return csum_block_add(csum, ~csum2, offset);
}
-static inline __wsum csum_unfold(__sum16 n)
+static __always_inline __wsum csum_unfold(__sum16 n)
{
return (__force __wsum)n;
}
-static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
+static __always_inline
+__wsum csum_partial_ext(const void *buff, int len, __wsum sum)
{
return csum_partial(buff, len, sum);
}
#define CSUM_MANGLED_0 ((__force __sum16)0xffff)
-static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
+static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
{
*sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
}
-static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
+static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
{
__wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
@@ -136,11 +137,16 @@ static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
* m : old value of a 16bit field
* m' : new value of a 16bit field
*/
-static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
+static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
{
*sum = ~csum16_add(csum16_sub(~(*sum), old), new);
}
+static inline void csum_replace(__wsum *csum, __wsum old, __wsum new)
+{
+ *csum = csum_add(csum_sub(*csum, old), new);
+}
+
struct sk_buff;
void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
__be32 from, __be32 to, bool pseudohdr);
@@ -150,16 +156,16 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
__wsum diff, bool pseudohdr);
-static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
- __be16 from, __be16 to,
- bool pseudohdr)
+static __always_inline
+void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
+ __be16 from, __be16 to, bool pseudohdr)
{
inet_proto_csum_replace4(sum, skb, (__force __be32)from,
(__force __be32)to, pseudohdr);
}
-static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
- int start, int offset)
+static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum,
+ int start, int offset)
{
__sum16 *psum = (__sum16 *)(ptr + offset);
__wsum delta;
@@ -175,12 +181,12 @@ static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
return delta;
}
-static inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
+static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
{
*psum = csum_fold(csum_sub(delta, (__force __wsum)*psum));
}
-static inline __wsum wsum_negate(__wsum val)
+static __always_inline __wsum wsum_negate(__wsum val)
{
return (__force __wsum)-((__force u32)val);
}
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 57b3e4e7413b..85a5ba3772f5 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -1187,6 +1187,7 @@ void dsa_unregister_switch(struct dsa_switch *ds);
int dsa_register_switch(struct dsa_switch *ds);
void dsa_switch_shutdown(struct dsa_switch *ds);
struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
+void dsa_flush_workqueue(void);
#ifdef CONFIG_PM_SLEEP
int dsa_switch_suspend(struct dsa_switch *ds);
int dsa_switch_resume(struct dsa_switch *ds);
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index 14efa0ded75d..adab27ba1ecb 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -123,8 +123,20 @@ static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
sizeof(struct ip_tunnel_info) + md_size);
+#ifdef CONFIG_DST_CACHE
+ /* Unclone the dst cache if there is one */
+ if (new_md->u.tun_info.dst_cache.cache) {
+ int ret;
+
+ ret = dst_cache_init(&new_md->u.tun_info.dst_cache, GFP_ATOMIC);
+ if (ret) {
+ metadata_dst_free(new_md);
+ return ERR_PTR(ret);
+ }
+ }
+#endif
+
skb_dst_drop(skb);
- dst_hold(&new_md->dst);
skb_dst_set(skb, &new_md->dst);
return new_md;
}
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 40ae8f1b18e5..2048bc8748cb 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -190,14 +190,16 @@ struct fib6_info {
u32 fib6_metric;
u8 fib6_protocol;
u8 fib6_type;
+
+ u8 offload;
+ u8 trap;
+ u8 offload_failed;
+
u8 should_flush:1,
dst_nocount:1,
dst_nopolicy:1,
fib6_destroying:1,
- offload:1,
- trap:1,
- offload_failed:1,
- unused:1;
+ unused:4;
struct rcu_head rcu;
struct nexthop *nh;
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 3afcb128e064..92eec13d1693 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -393,17 +393,20 @@ static inline void txopt_put(struct ipv6_txoptions *opt)
kfree_rcu(opt, rcu);
}
+#if IS_ENABLED(CONFIG_IPV6)
struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label);
extern struct static_key_false_deferred ipv6_flowlabel_exclusive;
static inline struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk,
__be32 label)
{
- if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key))
+ if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key) &&
+ READ_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl))
return __fl6_sock_lookup(sk, label) ? : ERR_PTR(-ENOENT);
return NULL;
}
+#endif
struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
struct ip6_flowlabel *fl,
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 53cb8de0e589..47ffb360ddfa 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -475,9 +475,9 @@ int igmp6_late_init(void);
void igmp6_cleanup(void);
void igmp6_late_cleanup(void);
-int igmp6_event_query(struct sk_buff *skb);
+void igmp6_event_query(struct sk_buff *skb);
-int igmp6_event_report(struct sk_buff *skb);
+void igmp6_event_report(struct sk_buff *skb);
#ifdef CONFIG_SYSCTL
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 937389e04c8e..87419f7f5421 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -350,7 +350,8 @@ static inline struct neighbour *neigh_create(struct neigh_table *tbl,
return __neigh_create(tbl, pkey, dev, true);
}
void neigh_destroy(struct neighbour *neigh);
-int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
+int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
+ const bool immediate_ok);
int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags,
u32 nlmsg_pid);
void __neigh_set_probe_once(struct neighbour *neigh);
@@ -460,17 +461,24 @@ static inline struct neighbour * neigh_clone(struct neighbour *neigh)
#define neigh_hold(n) refcount_inc(&(n)->refcnt)
-static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
+static __always_inline int neigh_event_send_probe(struct neighbour *neigh,
+ struct sk_buff *skb,
+ const bool immediate_ok)
{
unsigned long now = jiffies;
-
+
if (READ_ONCE(neigh->used) != now)
WRITE_ONCE(neigh->used, now);
- if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE)))
- return __neigh_event_send(neigh, skb);
+ if (!(neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)))
+ return __neigh_event_send(neigh, skb, immediate_ok);
return 0;
}
+static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
+{
+ return neigh_event_send_probe(neigh, skb, true);
+}
+
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
{
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index a3647fadf1cc..bd59e950f4d6 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -96,6 +96,7 @@ enum flow_offload_xmit_type {
FLOW_OFFLOAD_XMIT_NEIGH,
FLOW_OFFLOAD_XMIT_XFRM,
FLOW_OFFLOAD_XMIT_DIRECT,
+ FLOW_OFFLOAD_XMIT_TC,
};
#define NF_FLOW_TABLE_ENCAP_MAX 2
@@ -127,7 +128,7 @@ struct flow_offload_tuple {
struct { } __hash;
u8 dir:2,
- xmit_type:2,
+ xmit_type:3,
encap_num:2,
in_vlan_ingress:2;
u16 mtu;
@@ -142,6 +143,9 @@ struct flow_offload_tuple {
u8 h_source[ETH_ALEN];
u8 h_dest[ETH_ALEN];
} out;
+ struct {
+ u32 iifidx;
+ } tc;
};
};
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 9eed51e920e8..980daa6e1e3a 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -37,7 +37,7 @@ void nf_register_queue_handler(const struct nf_queue_handler *qh);
void nf_unregister_queue_handler(void);
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
-void nf_queue_entry_get_refs(struct nf_queue_entry *entry);
+bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
void nf_queue_entry_free(struct nf_queue_entry *entry);
static inline void init_hashrandom(u32 *jhash_initval)
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index eaf55da9a205..c4c0861deac1 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -905,9 +905,9 @@ struct nft_expr_ops {
int (*offload)(struct nft_offload_ctx *ctx,
struct nft_flow_rule *flow,
const struct nft_expr *expr);
+ bool (*offload_action)(const struct nft_expr *expr);
void (*offload_stats)(struct nft_expr *expr,
const struct flow_stats *stats);
- u32 offload_flags;
const struct nft_expr_type *type;
void *data;
};
diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
index f9d95ff82df8..797147843958 100644
--- a/include/net/netfilter/nf_tables_offload.h
+++ b/include/net/netfilter/nf_tables_offload.h
@@ -67,8 +67,6 @@ struct nft_flow_rule {
struct flow_rule *rule;
};
-#define NFT_OFFLOAD_F_ACTION (1 << 0)
-
void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
enum flow_dissector_key_id addr_type);
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index a4b550380316..6bd7e5a85ce7 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -77,9 +77,10 @@ struct netns_ipv6 {
spinlock_t fib6_gc_lock;
unsigned int ip6_rt_gc_expire;
unsigned long ip6_rt_last_gc;
+ unsigned char flowlabel_has_excl;
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
- unsigned int fib6_rules_require_fldissect;
bool fib6_has_custom_rules;
+ unsigned int fib6_rules_require_fldissect;
#ifdef CONFIG_IPV6_SUBTREES
unsigned int fib6_routes_require_src;
#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index ff9b508d9c5f..50aecd28b355 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -507,7 +507,7 @@ struct sock {
#endif
u16 sk_tsflags;
u8 sk_shutdown;
- u32 sk_tskey;
+ atomic_t sk_tskey;
atomic_t sk_zckey;
u8 sk_clockid;
@@ -2667,7 +2667,7 @@ static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
__sock_tx_timestamp(tsflags, tx_flags);
if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
- *tskey = sk->sk_tskey++;
+ *tskey = atomic_inc_return(&sk->sk_tskey) - 1;
}
if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
*tx_flags |= SKBTX_WIFI_STATUS;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index fdb41e8bb626..76aa6f11a540 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1568,7 +1568,6 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
int xfrm_init_replay(struct xfrm_state *x);
-u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu);
u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload);
int xfrm_init_state(struct xfrm_state *x);
@@ -1681,14 +1680,15 @@ int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
const struct xfrm_migrate *m, int num_bundles,
const struct xfrm_kmaddress *k,
const struct xfrm_encap_tmpl *encap);
-struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net);
+struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
+ u32 if_id);
struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
struct xfrm_migrate *m,
struct xfrm_encap_tmpl *encap);
int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
struct xfrm_migrate *m, int num_bundles,
struct xfrm_kmaddress *k, struct net *net,
- struct xfrm_encap_tmpl *encap);
+ struct xfrm_encap_tmpl *encap, u32 if_id);
#endif
int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
diff --git a/include/soc/fsl/dpaa2-fd.h b/include/soc/fsl/dpaa2-fd.h
index 90ae8d191f1a..bae490cac0aa 100644
--- a/include/soc/fsl/dpaa2-fd.h
+++ b/include/soc/fsl/dpaa2-fd.h
@@ -7,7 +7,8 @@
#ifndef __FSL_DPAA2_FD_H
#define __FSL_DPAA2_FD_H
-#include <linux/kernel.h>
+#include <linux/byteorder/generic.h>
+#include <linux/types.h>
/**
* DOC: DPAA2 FD - Frame Descriptor APIs for DPAA2
diff --git a/include/soc/fsl/qe/immap_qe.h b/include/soc/fsl/qe/immap_qe.h
index 7614fee532f1..edd601f53f5d 100644
--- a/include/soc/fsl/qe/immap_qe.h
+++ b/include/soc/fsl/qe/immap_qe.h
@@ -13,7 +13,8 @@
#define _ASM_POWERPC_IMMAP_QE_H
#ifdef __KERNEL__
-#include <linux/kernel.h>
+#include <linux/types.h>
+
#include <asm/io.h>
#define QE_IMMAP_SIZE (1024 * 1024) /* 1MB from 1MB+IMMR */
diff --git a/include/soc/fsl/qe/qe_tdm.h b/include/soc/fsl/qe/qe_tdm.h
index b6febe225071..43ea830cfe1f 100644
--- a/include/soc/fsl/qe/qe_tdm.h
+++ b/include/soc/fsl/qe/qe_tdm.h
@@ -10,8 +10,8 @@
#ifndef _QE_TDM_H_
#define _QE_TDM_H_
-#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/types.h>
#include <soc/fsl/qe/immap_qe.h>
#include <soc/fsl/qe/qe.h>
@@ -19,6 +19,8 @@
#include <soc/fsl/qe/ucc.h>
#include <soc/fsl/qe/ucc_fast.h>
+struct device_node;
+
/* SI RAM entries */
#define SIR_LAST 0x0001
#define SIR_BYTE 0x0002
diff --git a/include/soc/fsl/qe/ucc_fast.h b/include/soc/fsl/qe/ucc_fast.h
index 9696a5b9b5d1..ad60b87a3c69 100644
--- a/include/soc/fsl/qe/ucc_fast.h
+++ b/include/soc/fsl/qe/ucc_fast.h
@@ -10,7 +10,7 @@
#ifndef __UCC_FAST_H__
#define __UCC_FAST_H__
-#include <linux/kernel.h>
+#include <linux/types.h>
#include <soc/fsl/qe/immap_qe.h>
#include <soc/fsl/qe/qe.h>
diff --git a/include/soc/fsl/qe/ucc_slow.h b/include/soc/fsl/qe/ucc_slow.h
index 11a216e4e919..7548ce8a202d 100644
--- a/include/soc/fsl/qe/ucc_slow.h
+++ b/include/soc/fsl/qe/ucc_slow.h
@@ -11,7 +11,7 @@
#ifndef __UCC_SLOW_H__
#define __UCC_SLOW_H__
-#include <linux/kernel.h>
+#include <linux/types.h>
#include <soc/fsl/qe/immap_qe.h>
#include <soc/fsl/qe/qe.h>
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 9b187d86e1bd..36da42cd0774 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -617,6 +617,7 @@ void snd_pcm_stream_unlock(struct snd_pcm_substream *substream);
void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream);
void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream);
unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream);
+unsigned long _snd_pcm_stream_lock_irqsave_nested(struct snd_pcm_substream *substream);
/**
* snd_pcm_stream_lock_irqsave - Lock the PCM stream
@@ -636,6 +637,20 @@ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
unsigned long flags);
/**
+ * snd_pcm_stream_lock_irqsave_nested - Single-nested PCM stream locking
+ * @substream: PCM substream
+ * @flags: irq flags
+ *
+ * This locks the PCM stream like snd_pcm_stream_lock_irqsave() but with
+ * the single-depth lockdep subclass.
+ */
+#define snd_pcm_stream_lock_irqsave_nested(substream, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _snd_pcm_stream_lock_irqsave_nested(substream); \
+ } while (0)
+
+/**
* snd_pcm_group_for_each_entry - iterate over the linked substreams
* @s: the iterator
* @substream: the substream
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 225ec87d4f22..7989d9483ea7 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -278,7 +278,8 @@
#define KEY_PAUSECD 201
#define KEY_PROG3 202
#define KEY_PROG4 203
-#define KEY_DASHBOARD 204 /* AL Dashboard */
+#define KEY_ALL_APPLICATIONS 204 /* AC Desktop Show All Applications */
+#define KEY_DASHBOARD KEY_ALL_APPLICATIONS
#define KEY_SUSPEND 205
#define KEY_CLOSE 206 /* AC Close */
#define KEY_PLAY 207
@@ -612,6 +613,7 @@
#define KEY_ASSISTANT 0x247 /* AL Context-aware desktop assistant */
#define KEY_KBD_LAYOUT_NEXT 0x248 /* AC Next Keyboard Layout Select */
#define KEY_EMOJI_PICKER 0x249 /* Show/hide emoji picker (HUTRR101) */
+#define KEY_DICTATE 0x24a /* Start or Stop Voice Dictation Session (HUTRR99) */
#define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */
#define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index b46bcdb0cab1..507ee1f2aa96 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1134,6 +1134,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_VM_GPA_BITS 207
#define KVM_CAP_XSAVE2 208
#define KVM_CAP_SYS_ATTRIBUTES 209
+#define KVM_CAP_PPC_AIL_MODE_3 210
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1624,9 +1625,6 @@ struct kvm_enc_region {
#define KVM_S390_NORMAL_RESET _IO(KVMIO, 0xc3)
#define KVM_S390_CLEAR_RESET _IO(KVMIO, 0xc4)
-/* Available with KVM_CAP_XSAVE2 */
-#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave)
-
struct kvm_s390_pv_sec_parm {
__u64 origin;
__u64 length;
@@ -2048,4 +2046,7 @@ struct kvm_stats_desc {
#define KVM_GET_STATS_FD _IO(KVMIO, 0xce)
+/* Available with KVM_CAP_XSAVE2 */
+#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave)
+
#endif /* __LINUX_KVM_H */
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index 4b3395082d15..26071021e986 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -106,7 +106,7 @@ enum ip_conntrack_status {
IPS_NAT_CLASH = IPS_UNTRACKED,
#endif
- /* Conntrack got a helper explicitly attached via CT target. */
+ /* Conntrack got a helper explicitly attached (ruleset, ctnetlink). */
IPS_HELPER_BIT = 13,
IPS_HELPER = (1 << IPS_HELPER_BIT),
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 1b65042ab1db..82858b697c05 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -465,6 +465,8 @@ struct perf_event_attr {
/*
* User provided data if sigtrap=1, passed back to user via
* siginfo_t::si_perf_data, e.g. to permit user to identify the event.
+ * Note, siginfo_t::si_perf_data is long-sized, and sig_data will be
+ * truncated accordingly on 32 bit architectures.
*/
__u64 sig_data;
};
diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h
index c7008d87f1a4..8cb3a6fef553 100644
--- a/include/uapi/linux/smc_diag.h
+++ b/include/uapi/linux/smc_diag.h
@@ -84,12 +84,11 @@ struct smc_diag_conninfo {
/* SMC_DIAG_LINKINFO */
struct smc_diag_linkinfo {
- __u8 link_id; /* link identifier */
- __u8 ibname[IB_DEVICE_NAME_MAX]; /* name of the RDMA device */
- __u8 ibport; /* RDMA device port number */
- __u8 gid[40]; /* local GID */
- __u8 peer_gid[40]; /* peer GID */
- __aligned_u64 net_cookie; /* RDMA device net namespace */
+ __u8 link_id; /* link identifier */
+ __u8 ibname[IB_DEVICE_NAME_MAX]; /* name of the RDMA device */
+ __u8 ibport; /* RDMA device port number */
+ __u8 gid[40]; /* local GID */
+ __u8 peer_gid[40]; /* peer GID */
};
struct smc_diag_lgrinfo {
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 4e29d7851890..65e13a099b1a 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -511,6 +511,12 @@ struct xfrm_user_offload {
int ifindex;
__u8 flags;
};
+/* This flag was exposed without any kernel code that supporting it.
+ * Unfortunately, strongswan has the code that uses sets this flag,
+ * which makes impossible to reuse this bit.
+ *
+ * So leave it here to make sure that it won't be reused by mistake.
+ */
#define XFRM_OFFLOAD_IPV6 1
#define XFRM_OFFLOAD_INBOUND 2
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index ef0cafe295b2..2d3e5df39a59 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -56,8 +56,10 @@
* *
****************************************************************************/
+#define AES_IEC958_STATUS_SIZE 24
+
struct snd_aes_iec958 {
- unsigned char status[24]; /* AES/IEC958 channel status bits */
+ unsigned char status[AES_IEC958_STATUS_SIZE]; /* AES/IEC958 channel status bits */
unsigned char subcode[147]; /* AES/IEC958 subcode bits */
unsigned char pad; /* nothing */
unsigned char dig_subframe[4]; /* AES/IEC958 subframe bits */
diff --git a/include/uapi/xen/gntdev.h b/include/uapi/xen/gntdev.h
index 9ac5515b9bc2..7a7145395c09 100644
--- a/include/uapi/xen/gntdev.h
+++ b/include/uapi/xen/gntdev.h
@@ -47,7 +47,13 @@ struct ioctl_gntdev_grant_ref {
/*
* Inserts the grant references into the mapping table of an instance
* of gntdev. N.B. This does not perform the mapping, which is deferred
- * until mmap() is called with @index as the offset.
+ * until mmap() is called with @index as the offset. @index should be
+ * considered opaque to userspace, with one exception: if no grant
+ * references have ever been inserted into the mapping table of this
+ * instance, @index will be set to 0. This is necessary to use gntdev
+ * with userspace APIs that expect a file descriptor that can be
+ * mmap()'d at offset 0, such as Wayland. If @count is set to 0, this
+ * ioctl will fail.
*/
#define IOCTL_GNTDEV_MAP_GRANT_REF \
_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref))
diff --git a/include/xen/xenbus_dev.h b/include/xen/xenbus_dev.h
index bbee8c6a349d..4dc45a51c044 100644
--- a/include/xen/xenbus_dev.h
+++ b/include/xen/xenbus_dev.h
@@ -1,6 +1,4 @@
/******************************************************************************
- * evtchn.h
- *
* Interface to /dev/xen/xenbus_backend.
*
* Copyright (c) 2011 Bastian Blank <waldi@debian.org>
diff --git a/ipc/sem.c b/ipc/sem.c
index 6693daf4fe11..0dbdb98fdf2d 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1964,6 +1964,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
*/
un = lookup_undo(ulp, semid);
if (un) {
+ spin_unlock(&ulp->lock);
kvfree(new);
goto success;
}
@@ -1976,9 +1977,8 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
ipc_assert_locked_object(&sma->sem_perm);
list_add(&new->list_id, &sma->list_id);
un = new;
-
-success:
spin_unlock(&ulp->lock);
+success:
sem_unlock(sma, -1);
out:
return un;
diff --git a/kernel/async.c b/kernel/async.c
index b8d7a663497f..b2c4ba5686ee 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -205,9 +205,6 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
atomic_inc(&entry_count);
spin_unlock_irqrestore(&async_lock, flags);
- /* mark that this task has queued an async job, used by module init */
- current->flags |= PF_USED_ASYNC;
-
/* schedule for execution */
queue_work_node(node, system_unbound_wq, &entry->work);
diff --git a/kernel/audit.c b/kernel/audit.c
index e4bbe2c70c26..7690c29d4ee4 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -541,20 +541,22 @@ static void kauditd_printk_skb(struct sk_buff *skb)
/**
* kauditd_rehold_skb - Handle a audit record send failure in the hold queue
* @skb: audit record
+ * @error: error code (unused)
*
* Description:
* This should only be used by the kauditd_thread when it fails to flush the
* hold queue.
*/
-static void kauditd_rehold_skb(struct sk_buff *skb)
+static void kauditd_rehold_skb(struct sk_buff *skb, __always_unused int error)
{
- /* put the record back in the queue at the same place */
- skb_queue_head(&audit_hold_queue, skb);
+ /* put the record back in the queue */
+ skb_queue_tail(&audit_hold_queue, skb);
}
/**
* kauditd_hold_skb - Queue an audit record, waiting for auditd
* @skb: audit record
+ * @error: error code
*
* Description:
* Queue the audit record, waiting for an instance of auditd. When this
@@ -564,19 +566,31 @@ static void kauditd_rehold_skb(struct sk_buff *skb)
* and queue it, if we have room. If we want to hold on to the record, but we
* don't have room, record a record lost message.
*/
-static void kauditd_hold_skb(struct sk_buff *skb)
+static void kauditd_hold_skb(struct sk_buff *skb, int error)
{
/* at this point it is uncertain if we will ever send this to auditd so
* try to send the message via printk before we go any further */
kauditd_printk_skb(skb);
/* can we just silently drop the message? */
- if (!audit_default) {
- kfree_skb(skb);
- return;
+ if (!audit_default)
+ goto drop;
+
+ /* the hold queue is only for when the daemon goes away completely,
+ * not -EAGAIN failures; if we are in a -EAGAIN state requeue the
+ * record on the retry queue unless it's full, in which case drop it
+ */
+ if (error == -EAGAIN) {
+ if (!audit_backlog_limit ||
+ skb_queue_len(&audit_retry_queue) < audit_backlog_limit) {
+ skb_queue_tail(&audit_retry_queue, skb);
+ return;
+ }
+ audit_log_lost("kauditd retry queue overflow");
+ goto drop;
}
- /* if we have room, queue the message */
+ /* if we have room in the hold queue, queue the message */
if (!audit_backlog_limit ||
skb_queue_len(&audit_hold_queue) < audit_backlog_limit) {
skb_queue_tail(&audit_hold_queue, skb);
@@ -585,24 +599,32 @@ static void kauditd_hold_skb(struct sk_buff *skb)
/* we have no other options - drop the message */
audit_log_lost("kauditd hold queue overflow");
+drop:
kfree_skb(skb);
}
/**
* kauditd_retry_skb - Queue an audit record, attempt to send again to auditd
* @skb: audit record
+ * @error: error code (unused)
*
* Description:
* Not as serious as kauditd_hold_skb() as we still have a connected auditd,
* but for some reason we are having problems sending it audit records so
* queue the given record and attempt to resend.
*/
-static void kauditd_retry_skb(struct sk_buff *skb)
+static void kauditd_retry_skb(struct sk_buff *skb, __always_unused int error)
{
- /* NOTE: because records should only live in the retry queue for a
- * short period of time, before either being sent or moved to the hold
- * queue, we don't currently enforce a limit on this queue */
- skb_queue_tail(&audit_retry_queue, skb);
+ if (!audit_backlog_limit ||
+ skb_queue_len(&audit_retry_queue) < audit_backlog_limit) {
+ skb_queue_tail(&audit_retry_queue, skb);
+ return;
+ }
+
+ /* we have to drop the record, send it via printk as a last effort */
+ kauditd_printk_skb(skb);
+ audit_log_lost("kauditd retry queue overflow");
+ kfree_skb(skb);
}
/**
@@ -640,7 +662,7 @@ static void auditd_reset(const struct auditd_connection *ac)
/* flush the retry queue to the hold queue, but don't touch the main
* queue since we need to process that normally for multicast */
while ((skb = skb_dequeue(&audit_retry_queue)))
- kauditd_hold_skb(skb);
+ kauditd_hold_skb(skb, -ECONNREFUSED);
}
/**
@@ -714,16 +736,18 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
struct sk_buff_head *queue,
unsigned int retry_limit,
void (*skb_hook)(struct sk_buff *skb),
- void (*err_hook)(struct sk_buff *skb))
+ void (*err_hook)(struct sk_buff *skb, int error))
{
int rc = 0;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
+ struct sk_buff *skb_tail;
unsigned int failed = 0;
/* NOTE: kauditd_thread takes care of all our locking, we just use
* the netlink info passed to us (e.g. sk and portid) */
- while ((skb = skb_dequeue(queue))) {
+ skb_tail = skb_peek_tail(queue);
+ while ((skb != skb_tail) && (skb = skb_dequeue(queue))) {
/* call the skb_hook for each skb we touch */
if (skb_hook)
(*skb_hook)(skb);
@@ -731,7 +755,7 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
/* can we send to anyone via unicast? */
if (!sk) {
if (err_hook)
- (*err_hook)(skb);
+ (*err_hook)(skb, -ECONNREFUSED);
continue;
}
@@ -745,7 +769,7 @@ retry:
rc == -ECONNREFUSED || rc == -EPERM) {
sk = NULL;
if (err_hook)
- (*err_hook)(skb);
+ (*err_hook)(skb, rc);
if (rc == -EAGAIN)
rc = 0;
/* continue to drain the queue */
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index fce5d43a933f..a83928cbdcb7 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -185,7 +185,7 @@ static int audit_match_perm(struct audit_context *ctx, int mask)
case AUDITSC_EXECVE:
return mask & AUDIT_PERM_EXEC;
case AUDITSC_OPENAT2:
- return mask & ACC_MODE((u32)((struct open_how *)ctx->argv[2])->flags);
+ return mask & ACC_MODE((u32)ctx->openat2.flags);
default:
return 0;
}
diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c
index 06062370c3b8..9e4ecc990647 100644
--- a/kernel/bpf/bpf_lsm.c
+++ b/kernel/bpf/bpf_lsm.c
@@ -207,7 +207,7 @@ BTF_ID(func, bpf_lsm_socket_socketpair)
BTF_ID(func, bpf_lsm_syslog)
BTF_ID(func, bpf_lsm_task_alloc)
-BTF_ID(func, bpf_lsm_task_getsecid_subj)
+BTF_ID(func, bpf_lsm_current_getsecid_subj)
BTF_ID(func, bpf_lsm_task_getsecid_obj)
BTF_ID(func, bpf_lsm_task_prctl)
BTF_ID(func, bpf_lsm_task_setscheduler)
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index e16dafeb2450..3e23b3fa79ff 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -5688,7 +5688,8 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
}
if (check_ptr_off_reg(env, reg, regno))
return -EINVAL;
- } else if (is_kfunc && (reg->type == PTR_TO_BTF_ID || reg2btf_ids[reg->type])) {
+ } else if (is_kfunc && (reg->type == PTR_TO_BTF_ID ||
+ (reg2btf_ids[base_type(reg->type)] && !type_flag(reg->type)))) {
const struct btf_type *reg_ref_t;
const struct btf *reg_btf;
const char *reg_ref_tname;
@@ -5706,7 +5707,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
reg_ref_id = reg->btf_id;
} else {
reg_btf = btf_vmlinux;
- reg_ref_id = *reg2btf_ids[reg->type];
+ reg_ref_id = *reg2btf_ids[base_type(reg->type)];
}
reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id,
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 01cfdf40c838..55c084251fab 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
*/
#include <linux/bpf.h>
+#include <linux/btf.h>
#include <linux/bpf-cgroup.h>
#include <linux/rcupdate.h>
#include <linux/random.h>
@@ -1075,6 +1076,7 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
void *key;
u32 idx;
+ BTF_TYPE_EMIT(struct bpf_timer);
callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held());
if (!callback_fn)
goto out;
diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
index 638d7fd7b375..710ba9de12ce 100644
--- a/kernel/bpf/ringbuf.c
+++ b/kernel/bpf/ringbuf.c
@@ -104,7 +104,7 @@ static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
}
rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages,
- VM_ALLOC | VM_USERMAP, PAGE_KERNEL);
+ VM_MAP | VM_USERMAP, PAGE_KERNEL);
if (rb) {
kmemleak_not_leak(pages);
rb->pages = pages;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index fa4505f9b611..ca70fe6fba38 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1355,6 +1355,7 @@ int generic_map_delete_batch(struct bpf_map *map,
maybe_wait_bpf_programs(map);
if (err)
break;
+ cond_resched();
}
if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
err = -EFAULT;
@@ -1412,6 +1413,7 @@ int generic_map_update_batch(struct bpf_map *map,
if (err)
break;
+ cond_resched();
}
if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
@@ -1509,6 +1511,7 @@ int generic_map_lookup_batch(struct bpf_map *map,
swap(prev_key, key);
retry = MAP_LOOKUP_RETRIES;
cp++;
+ cond_resched();
}
if (err == -EFAULT)
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 4b6974a195c1..5e7edf913060 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -550,11 +550,12 @@ static __always_inline u64 notrace bpf_prog_start_time(void)
static void notrace inc_misses_counter(struct bpf_prog *prog)
{
struct bpf_prog_stats *stats;
+ unsigned int flags;
stats = this_cpu_ptr(prog->stats);
- u64_stats_update_begin(&stats->syncp);
+ flags = u64_stats_update_begin_irqsave(&stats->syncp);
u64_stats_inc(&stats->misses);
- u64_stats_update_end(&stats->syncp);
+ u64_stats_update_end_irqrestore(&stats->syncp, flags);
}
/* The logic is similar to bpf_prog_run(), but with an explicit
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 41e0837a5a0b..afc6c0e9c966 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -546,9 +546,19 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off)
{
struct cgroup *cgrp;
+ struct cgroup_file_ctx *ctx;
BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
+ /*
+ * Release agent gets called with all capabilities,
+ * require capabilities to set release agent.
+ */
+ ctx = of->priv;
+ if ((ctx->ns->user_ns != &init_user_ns) ||
+ !file_ns_capable(of->file, &init_user_ns, CAP_SYS_ADMIN))
+ return -EPERM;
+
cgrp = cgroup_kn_lock_live(of->kn, false);
if (!cgrp)
return -ENODEV;
@@ -954,6 +964,12 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
/* Specifying two release agents is forbidden */
if (ctx->release_agent)
return invalfc(fc, "release_agent respecified");
+ /*
+ * Release agent gets called with all capabilities,
+ * require capabilities to set release agent.
+ */
+ if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN))
+ return invalfc(fc, "Setting release_agent not allowed");
ctx->release_agent = param->string;
param->string = NULL;
break;
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 9d05c3ca2d5e..a557eea7166f 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -6166,6 +6166,20 @@ static int cgroup_css_set_fork(struct kernel_clone_args *kargs)
if (ret)
goto err;
+ /*
+ * Spawning a task directly into a cgroup works by passing a file
+ * descriptor to the target cgroup directory. This can even be an O_PATH
+ * file descriptor. But it can never be a cgroup.procs file descriptor.
+ * This was done on purpose so spawning into a cgroup could be
+ * conceptualized as an atomic
+ *
+ * fd = openat(dfd_cgroup, "cgroup.procs", ...);
+ * write(fd, <child-pid>, ...);
+ *
+ * sequence, i.e. it's a shorthand for the caller opening and writing
+ * cgroup.procs of the cgroup indicated by @dfd_cgroup. This allows us
+ * to always use the caller's credentials.
+ */
ret = cgroup_attach_permissions(cset->dfl_cgrp, dst_cgrp, sb,
!(kargs->flags & CLONE_THREAD),
current->nsproxy->cgroup_ns);
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index dc653ab26e50..5de18448016c 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -591,6 +591,35 @@ static inline void free_cpuset(struct cpuset *cs)
}
/*
+ * validate_change_legacy() - Validate conditions specific to legacy (v1)
+ * behavior.
+ */
+static int validate_change_legacy(struct cpuset *cur, struct cpuset *trial)
+{
+ struct cgroup_subsys_state *css;
+ struct cpuset *c, *par;
+ int ret;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ /* Each of our child cpusets must be a subset of us */
+ ret = -EBUSY;
+ cpuset_for_each_child(c, css, cur)
+ if (!is_cpuset_subset(c, trial))
+ goto out;
+
+ /* On legacy hierarchy, we must be a subset of our parent cpuset. */
+ ret = -EACCES;
+ par = parent_cs(cur);
+ if (par && !is_cpuset_subset(trial, par))
+ goto out;
+
+ ret = 0;
+out:
+ return ret;
+}
+
+/*
* validate_change() - Used to validate that any proposed cpuset change
* follows the structural rules for cpusets.
*
@@ -614,20 +643,21 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
{
struct cgroup_subsys_state *css;
struct cpuset *c, *par;
- int ret;
-
- /* The checks don't apply to root cpuset */
- if (cur == &top_cpuset)
- return 0;
+ int ret = 0;
rcu_read_lock();
- par = parent_cs(cur);
- /* On legacy hierarchy, we must be a subset of our parent cpuset. */
- ret = -EACCES;
- if (!is_in_v2_mode() && !is_cpuset_subset(trial, par))
+ if (!is_in_v2_mode())
+ ret = validate_change_legacy(cur, trial);
+ if (ret)
+ goto out;
+
+ /* Remaining checks don't apply to root cpuset */
+ if (cur == &top_cpuset)
goto out;
+ par = parent_cs(cur);
+
/*
* If either I or some sibling (!= me) is exclusive, we can't
* overlap
@@ -1175,9 +1205,7 @@ enum subparts_cmd {
*
* Because of the implicit cpu exclusive nature of a partition root,
* cpumask changes that violates the cpu exclusivity rule will not be
- * permitted when checked by validate_change(). The validate_change()
- * function will also prevent any changes to the cpu list if it is not
- * a superset of children's cpu lists.
+ * permitted when checked by validate_change().
*/
static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
struct cpumask *newmask,
@@ -1522,10 +1550,15 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
struct cpuset *sibling;
struct cgroup_subsys_state *pos_css;
+ percpu_rwsem_assert_held(&cpuset_rwsem);
+
/*
* Check all its siblings and call update_cpumasks_hier()
* if their use_parent_ecpus flag is set in order for them
* to use the right effective_cpus value.
+ *
+ * The update_cpumasks_hier() function may sleep. So we have to
+ * release the RCU read lock before calling it.
*/
rcu_read_lock();
cpuset_for_each_child(sibling, pos_css, parent) {
@@ -1533,8 +1566,13 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
continue;
if (!sibling->use_parent_ecpus)
continue;
+ if (!css_tryget_online(&sibling->css))
+ continue;
+ rcu_read_unlock();
update_cpumasks_hier(sibling, tmp);
+ rcu_read_lock();
+ css_put(&sibling->css);
}
rcu_read_unlock();
}
@@ -1607,8 +1645,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
* Make sure that subparts_cpus is a subset of cpus_allowed.
*/
if (cs->nr_subparts_cpus) {
- cpumask_andnot(cs->subparts_cpus, cs->subparts_cpus,
- cs->cpus_allowed);
+ cpumask_and(cs->subparts_cpus, cs->subparts_cpus, cs->cpus_allowed);
cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus);
}
spin_unlock_irq(&callback_lock);
@@ -2252,6 +2289,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
cgroup_taskset_first(tset, &css);
cs = css_cs(css);
+ cpus_read_lock();
percpu_down_write(&cpuset_rwsem);
guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
@@ -2305,6 +2343,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
wake_up(&cpuset_attach_wq);
percpu_up_write(&cpuset_rwsem);
+ cpus_read_unlock();
}
/* The various types of files and directories in a cpuset file system */
@@ -3485,8 +3524,8 @@ static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
return cs;
}
-/**
- * cpuset_node_allowed - Can we allocate on a memory node?
+/*
+ * __cpuset_node_allowed - Can we allocate on a memory node?
* @node: is this an allowed node?
* @gfp_mask: memory allocation flags
*
@@ -3657,8 +3696,8 @@ void cpuset_print_current_mems_allowed(void)
int cpuset_memory_pressure_enabled __read_mostly;
-/**
- * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
+/*
+ * __cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
*
* Keep a running average of the rate of synchronous (direct)
* page reclaim efforts initiated by tasks in each cpuset.
@@ -3673,7 +3712,7 @@ int cpuset_memory_pressure_enabled __read_mostly;
* "memory_pressure". Value displayed is an integer
* representing the recent rate of entry into the synchronous
* (direct) page reclaim by any task attached to the cpuset.
- **/
+ */
void __cpuset_memory_pressure_bump(void)
{
diff --git a/kernel/configs/debug.config b/kernel/configs/debug.config
index e9ffb0cc1eec..07df6d93c4df 100644
--- a/kernel/configs/debug.config
+++ b/kernel/configs/debug.config
@@ -16,7 +16,7 @@ CONFIG_SYMBOLIC_ERRNAME=y
#
# Compile-time checks and compiler options
#
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_FRAME_WARN=2048
CONFIG_SECTION_MISMATCH_WARN_ONLY=y
diff --git a/kernel/cred.c b/kernel/cred.c
index 473d17c431f3..933155c96922 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -665,21 +665,16 @@ EXPORT_SYMBOL(cred_fscmp);
int set_cred_ucounts(struct cred *new)
{
- struct task_struct *task = current;
- const struct cred *old = task->real_cred;
struct ucounts *new_ucounts, *old_ucounts = new->ucounts;
- if (new->user == old->user && new->user_ns == old->user_ns)
- return 0;
-
/*
* This optimization is needed because alloc_ucounts() uses locks
* for table lookups.
*/
- if (old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid))
+ if (old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->uid))
return 0;
- if (!(new_ucounts = alloc_ucounts(new->user_ns, new->euid)))
+ if (!(new_ucounts = alloc_ucounts(new->user_ns, new->uid)))
return -EAGAIN;
new->ucounts = new_ucounts;
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index f1e7ea160b43..bfc56cb21705 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -628,7 +628,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
mem->slots[index + i].orig_addr = slot_addr(orig_addr, i);
tlb_addr = slot_addr(mem->start, index) + offset;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
+ (!(attrs & DMA_ATTR_OVERWRITE) || dir == DMA_TO_DEVICE ||
+ dir == DMA_BIDIRECTIONAL))
swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
return tlb_addr;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 76c754e45d01..6859229497b1 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -839,7 +839,7 @@ static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list);
*/
static void perf_cgroup_switch(struct task_struct *task, int mode)
{
- struct perf_cpu_context *cpuctx;
+ struct perf_cpu_context *cpuctx, *tmp;
struct list_head *list;
unsigned long flags;
@@ -850,7 +850,7 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
local_irq_save(flags);
list = this_cpu_ptr(&cgrp_cpuctx_list);
- list_for_each_entry(cpuctx, list, cgrp_cpuctx_entry) {
+ list_for_each_entry_safe(cpuctx, tmp, list, cgrp_cpuctx_entry) {
WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
@@ -3238,6 +3238,15 @@ static int perf_event_modify_breakpoint(struct perf_event *bp,
return err;
}
+/*
+ * Copy event-type-independent attributes that may be modified.
+ */
+static void perf_event_modify_copy_attr(struct perf_event_attr *to,
+ const struct perf_event_attr *from)
+{
+ to->sig_data = from->sig_data;
+}
+
static int perf_event_modify_attr(struct perf_event *event,
struct perf_event_attr *attr)
{
@@ -3260,10 +3269,17 @@ static int perf_event_modify_attr(struct perf_event *event,
WARN_ON_ONCE(event->ctx->parent_ctx);
mutex_lock(&event->child_mutex);
+ /*
+ * Event-type-independent attributes must be copied before event-type
+ * modification, which will validate that final attributes match the
+ * source attributes after all relevant attributes have been copied.
+ */
+ perf_event_modify_copy_attr(&event->attr, attr);
err = func(event, attr);
if (err)
goto out;
list_for_each_entry(child, &event->child_list, child_list) {
+ perf_event_modify_copy_attr(&child->attr, attr);
err = func(child, attr);
if (err)
goto out;
diff --git a/kernel/fork.c b/kernel/fork.c
index d75a528f7b21..f1e89007f228 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -366,14 +366,14 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
*new = data_race(*orig);
INIT_LIST_HEAD(&new->anon_vma_chain);
new->vm_next = new->vm_prev = NULL;
- dup_vma_anon_name(orig, new);
+ dup_anon_vma_name(orig, new);
}
return new;
}
void vm_area_free(struct vm_area_struct *vma)
{
- free_vma_anon_name(vma);
+ free_anon_vma_name(vma);
kmem_cache_free(vm_area_cachep, vma);
}
@@ -2021,18 +2021,18 @@ static __latent_entropy struct task_struct *copy_process(
#ifdef CONFIG_PROVE_LOCKING
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
+ retval = copy_creds(p, clone_flags);
+ if (retval < 0)
+ goto bad_fork_free;
+
retval = -EAGAIN;
if (is_ucounts_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) {
if (p->real_cred->user != INIT_USER &&
!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
- goto bad_fork_free;
+ goto bad_fork_cleanup_count;
}
current->flags &= ~PF_NPROC_EXCEEDED;
- retval = copy_creds(p, clone_flags);
- if (retval < 0)
- goto bad_fork_free;
-
/*
* If multiple threads are within copy_process(), then this check
* triggers too late. This doesn't hurt, the check is only there
@@ -2267,6 +2267,17 @@ static __latent_entropy struct task_struct *copy_process(
goto bad_fork_put_pidfd;
/*
+ * Now that the cgroups are pinned, re-clone the parent cgroup and put
+ * the new task on the correct runqueue. All this *before* the task
+ * becomes visible.
+ *
+ * This isn't part of ->can_fork() because while the re-cloning is
+ * cgroup specific, it unconditionally needs to place the task on a
+ * runqueue.
+ */
+ sched_cgroup_fork(p, args);
+
+ /*
* From this point on we must avoid any synchronous user-space
* communication until we take the tasklist-lock. In particular, we do
* not want user-space to be able to predict the process start-time by
@@ -2323,10 +2334,6 @@ static __latent_entropy struct task_struct *copy_process(
goto bad_fork_cancel_cgroup;
}
- /* past the last point of failure */
- if (pidfile)
- fd_install(pidfd, pidfile);
-
init_task_pid_links(p);
if (likely(p->pid)) {
ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
@@ -2375,8 +2382,11 @@ static __latent_entropy struct task_struct *copy_process(
syscall_tracepoint_update(p);
write_unlock_irq(&tasklist_lock);
+ if (pidfile)
+ fd_install(pidfd, pidfile);
+
proc_fork_connector(p);
- sched_post_fork(p, args);
+ sched_post_fork(p);
cgroup_post_fork(p, args);
perf_event_fork(p);
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 4a882f83aeb9..f8a0212189ca 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3462,7 +3462,7 @@ struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
u16 chain_hlock = chain_hlocks[chain->base + i];
unsigned int class_idx = chain_hlock_class_idx(chain_hlock);
- return lock_classes + class_idx - 1;
+ return lock_classes + class_idx;
}
/*
@@ -3530,7 +3530,7 @@ static void print_chain_keys_chain(struct lock_chain *chain)
hlock_id = chain_hlocks[chain->base + i];
chain_key = print_chain_key_iteration(hlock_id, chain_key);
- print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id) - 1);
+ print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id));
printk("\n");
}
}
diff --git a/kernel/module.c b/kernel/module.c
index 24dab046e16c..46a5c2ed1928 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3725,12 +3725,6 @@ static noinline int do_init_module(struct module *mod)
}
freeinit->module_init = mod->init_layout.base;
- /*
- * We want to find out whether @mod uses async during init. Clear
- * PF_USED_ASYNC. async_schedule*() will set it.
- */
- current->flags &= ~PF_USED_ASYNC;
-
do_mod_ctors(mod);
/* Start the module */
if (mod->init != NULL)
@@ -3756,22 +3750,13 @@ static noinline int do_init_module(struct module *mod)
/*
* We need to finish all async code before the module init sequence
- * is done. This has potential to deadlock. For example, a newly
- * detected block device can trigger request_module() of the
- * default iosched from async probing task. Once userland helper
- * reaches here, async_synchronize_full() will wait on the async
- * task waiting on request_module() and deadlock.
- *
- * This deadlock is avoided by perfomring async_synchronize_full()
- * iff module init queued any async jobs. This isn't a full
- * solution as it will deadlock the same if module loading from
- * async jobs nests more than once; however, due to the various
- * constraints, this hack seems to be the best option for now.
- * Please refer to the following thread for details.
+ * is done. This has potential to deadlock if synchronous module
+ * loading is requested from async (which is not allowed!).
*
- * http://thread.gmane.org/gmane.linux.kernel/1420814
+ * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous
+ * request_module() from async workers") for more details.
*/
- if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC))
+ if (!mod->async_probe_requested)
async_synchronize_full();
ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base +
diff --git a/kernel/module_decompress.c b/kernel/module_decompress.c
index b01c69c2ff99..ffef98a20320 100644
--- a/kernel/module_decompress.c
+++ b/kernel/module_decompress.c
@@ -250,6 +250,7 @@ void module_decompress_cleanup(struct load_info *info)
info->max_pages = info->used_pages = 0;
}
+#ifdef CONFIG_SYSFS
static ssize_t compression_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -269,3 +270,4 @@ static int __init module_decompress_sysfs_init(void)
return 0;
}
late_initcall(module_decompress_sysfs_init);
+#endif
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 44169f3081fd..7e646079fbeb 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -504,7 +504,10 @@ static ssize_t pm_wakeup_irq_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
- return pm_wakeup_irq ? sprintf(buf, "%u\n", pm_wakeup_irq) : -ENODATA;
+ if (!pm_wakeup_irq())
+ return -ENODATA;
+
+ return sprintf(buf, "%u\n", pm_wakeup_irq());
}
power_attr_ro(pm_wakeup_irq);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index b7e7798637b8..11b570fcf049 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -134,7 +134,7 @@ int freeze_processes(void)
if (!pm_freezing)
atomic_inc(&system_freezing_cnt);
- pm_wakeup_clear(true);
+ pm_wakeup_clear(0);
pr_info("Freezing user space processes ... ");
pm_freezing = true;
error = try_to_freeze_tasks(true);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 80cc1f0f502b..6fcdee7e87a5 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -136,8 +136,6 @@ static void s2idle_loop(void)
break;
}
- pm_wakeup_clear(false);
-
s2idle_enter();
}
diff --git a/kernel/printk/sysctl.c b/kernel/printk/sysctl.c
index 653ae04aab7f..c228343eeb97 100644
--- a/kernel/printk/sysctl.c
+++ b/kernel/printk/sysctl.c
@@ -12,7 +12,7 @@
static const int ten_thousand = 10000;
static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
if (write && !capable(CAP_SYS_ADMIN))
return -EPERM;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 848eaa0efe0e..9745613d531c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4424,6 +4424,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
init_entity_runnable_average(&p->se);
+
#ifdef CONFIG_SCHED_INFO
if (likely(sched_info_on()))
memset(&p->sched_info, 0, sizeof(p->sched_info));
@@ -4439,18 +4440,23 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
return 0;
}
-void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs)
+void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
{
unsigned long flags;
-#ifdef CONFIG_CGROUP_SCHED
- struct task_group *tg;
-#endif
+ /*
+ * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
+ * required yet, but lockdep gets upset if rules are violated.
+ */
raw_spin_lock_irqsave(&p->pi_lock, flags);
#ifdef CONFIG_CGROUP_SCHED
- tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
- struct task_group, css);
- p->sched_task_group = autogroup_task_group(p, tg);
+ if (1) {
+ struct task_group *tg;
+ tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
+ struct task_group, css);
+ tg = autogroup_task_group(p, tg);
+ p->sched_task_group = tg;
+ }
#endif
rseq_migrate(p);
/*
@@ -4461,7 +4467,10 @@ void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs)
if (p->sched_class->task_fork)
p->sched_class->task_fork(p);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+}
+void sched_post_fork(struct task_struct *p)
+{
uclamp_post_fork(p);
}
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 4d8f44a17727..db10e73d06e0 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -29,6 +29,9 @@
#include <linux/syscalls.h>
#include <linux/sysctl.h>
+/* Not exposed in headers: strictly internal use only. */
+#define SECCOMP_MODE_DEAD (SECCOMP_MODE_FILTER + 1)
+
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
#include <asm/syscall.h>
#endif
@@ -1010,6 +1013,7 @@ static void __secure_computing_strict(int this_syscall)
#ifdef SECCOMP_DEBUG
dump_stack();
#endif
+ current->seccomp.mode = SECCOMP_MODE_DEAD;
seccomp_log(this_syscall, SIGKILL, SECCOMP_RET_KILL_THREAD, true);
do_exit(SIGKILL);
}
@@ -1261,6 +1265,7 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
case SECCOMP_RET_KILL_THREAD:
case SECCOMP_RET_KILL_PROCESS:
default:
+ current->seccomp.mode = SECCOMP_MODE_DEAD;
seccomp_log(this_syscall, SIGSYS, action, true);
/* Dump core only if this is the last remaining thread. */
if (action != SECCOMP_RET_KILL_THREAD ||
@@ -1309,6 +1314,11 @@ int __secure_computing(const struct seccomp_data *sd)
return 0;
case SECCOMP_MODE_FILTER:
return __seccomp_filter(this_syscall, sd, false);
+ /* Surviving SECCOMP_RET_KILL_* must be proactively impossible. */
+ case SECCOMP_MODE_DEAD:
+ WARN_ON_ONCE(1);
+ do_exit(SIGKILL);
+ return -1;
default:
BUG();
}
diff --git a/kernel/signal.c b/kernel/signal.c
index 38602738866e..9b04631acde8 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1342,9 +1342,10 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
}
/*
* Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
- * debugging to leave init killable.
+ * debugging to leave init killable. But HANDLER_EXIT is always fatal.
*/
- if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
+ if (action->sa.sa_handler == SIG_DFL &&
+ (!t->ptrace || (handler == HANDLER_EXIT)))
t->signal->flags &= ~SIGNAL_UNKILLABLE;
ret = send_signal(sig, info, t, PIDTYPE_PID);
spin_unlock_irqrestore(&t->sighand->siglock, flags);
diff --git a/kernel/stackleak.c b/kernel/stackleak.c
index 66b8af394e58..ddb5a7f48d69 100644
--- a/kernel/stackleak.c
+++ b/kernel/stackleak.c
@@ -70,7 +70,7 @@ late_initcall(stackleak_sysctls_init);
#define skip_erasing() false
#endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
-asmlinkage void notrace stackleak_erase(void)
+asmlinkage void noinstr stackleak_erase(void)
{
/* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */
unsigned long kstack_ptr = current->lowest_stack;
@@ -124,9 +124,8 @@ asmlinkage void notrace stackleak_erase(void)
/* Reset the 'lowest_stack' value for the next syscall */
current->lowest_stack = current_top_of_stack() - THREAD_SIZE/64;
}
-NOKPROBE_SYMBOL(stackleak_erase);
-void __used __no_caller_saved_registers notrace stackleak_track_stack(void)
+void __used __no_caller_saved_registers noinstr stackleak_track_stack(void)
{
unsigned long sp = current_stack_pointer;
diff --git a/kernel/sys.c b/kernel/sys.c
index ecc4cf019242..5b0e172c4d47 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -7,6 +7,7 @@
#include <linux/export.h>
#include <linux/mm.h>
+#include <linux/mm_inline.h>
#include <linux/utsname.h>
#include <linux/mman.h>
#include <linux/reboot.h>
@@ -472,6 +473,16 @@ static int set_user(struct cred *new)
if (!new_user)
return -EAGAIN;
+ free_uid(new->user);
+ new->user = new_user;
+ return 0;
+}
+
+static void flag_nproc_exceeded(struct cred *new)
+{
+ if (new->ucounts == current_ucounts())
+ return;
+
/*
* We don't fail in case of NPROC limit excess here because too many
* poorly written programs don't check set*uid() return code, assuming
@@ -480,15 +491,10 @@ static int set_user(struct cred *new)
* failure to the execve() stage.
*/
if (is_ucounts_overlimit(new->ucounts, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC)) &&
- new_user != INIT_USER &&
- !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
+ new->user != INIT_USER)
current->flags |= PF_NPROC_EXCEEDED;
else
current->flags &= ~PF_NPROC_EXCEEDED;
-
- free_uid(new->user);
- new->user = new_user;
- return 0;
}
/*
@@ -563,6 +569,7 @@ long __sys_setreuid(uid_t ruid, uid_t euid)
if (retval < 0)
goto error;
+ flag_nproc_exceeded(new);
return commit_creds(new);
error:
@@ -625,6 +632,7 @@ long __sys_setuid(uid_t uid)
if (retval < 0)
goto error;
+ flag_nproc_exceeded(new);
return commit_creds(new);
error:
@@ -704,6 +712,7 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
if (retval < 0)
goto error;
+ flag_nproc_exceeded(new);
return commit_creds(new);
error:
@@ -2278,15 +2287,16 @@ static int prctl_set_vma(unsigned long opt, unsigned long addr,
{
struct mm_struct *mm = current->mm;
const char __user *uname;
- char *name, *pch;
+ struct anon_vma_name *anon_name = NULL;
int error;
switch (opt) {
case PR_SET_VMA_ANON_NAME:
uname = (const char __user *)arg;
if (uname) {
- name = strndup_user(uname, ANON_VMA_NAME_MAX_LEN);
+ char *name, *pch;
+ name = strndup_user(uname, ANON_VMA_NAME_MAX_LEN);
if (IS_ERR(name))
return PTR_ERR(name);
@@ -2296,15 +2306,18 @@ static int prctl_set_vma(unsigned long opt, unsigned long addr,
return -EINVAL;
}
}
- } else {
- /* Reset the name */
- name = NULL;
+ /* anon_vma has its own copy */
+ anon_name = anon_vma_name_alloc(name);
+ kfree(name);
+ if (!anon_name)
+ return -ENOMEM;
+
}
mmap_write_lock(mm);
- error = madvise_set_anon_name(mm, addr, size, name);
+ error = madvise_set_anon_name(mm, addr, size, anon_name);
mmap_write_unlock(mm);
- kfree(name);
+ anon_vma_name_put(anon_name);
break;
default:
error = -EINVAL;
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index af68a67179b4..21dea90eaa93 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -310,10 +310,20 @@ record_it:
local_irq_restore(flags);
}
-static void blk_trace_free(struct blk_trace *bt)
+static void blk_trace_free(struct request_queue *q, struct blk_trace *bt)
{
relay_close(bt->rchan);
- debugfs_remove(bt->dir);
+
+ /*
+ * If 'bt->dir' is not set, then both 'dropped' and 'msg' are created
+ * under 'q->debugfs_dir', thus lookup and remove them.
+ */
+ if (!bt->dir) {
+ debugfs_remove(debugfs_lookup("dropped", q->debugfs_dir));
+ debugfs_remove(debugfs_lookup("msg", q->debugfs_dir));
+ } else {
+ debugfs_remove(bt->dir);
+ }
free_percpu(bt->sequence);
free_percpu(bt->msg_data);
kfree(bt);
@@ -335,10 +345,10 @@ static void put_probe_ref(void)
mutex_unlock(&blk_probe_mutex);
}
-static void blk_trace_cleanup(struct blk_trace *bt)
+static void blk_trace_cleanup(struct request_queue *q, struct blk_trace *bt)
{
synchronize_rcu();
- blk_trace_free(bt);
+ blk_trace_free(q, bt);
put_probe_ref();
}
@@ -352,7 +362,7 @@ static int __blk_trace_remove(struct request_queue *q)
return -EINVAL;
if (bt->trace_state != Blktrace_running)
- blk_trace_cleanup(bt);
+ blk_trace_cleanup(q, bt);
return 0;
}
@@ -572,7 +582,7 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
ret = 0;
err:
if (ret)
- blk_trace_free(bt);
+ blk_trace_free(q, bt);
return ret;
}
@@ -1616,7 +1626,7 @@ static int blk_trace_remove_queue(struct request_queue *q)
put_probe_ref();
synchronize_rcu();
- blk_trace_free(bt);
+ blk_trace_free(q, bt);
return 0;
}
@@ -1647,7 +1657,7 @@ static int blk_trace_setup_queue(struct request_queue *q,
return 0;
free_bt:
- blk_trace_free(bt);
+ blk_trace_free(q, bt);
return ret;
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f9feb197b2da..a4b462b6f944 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -7191,7 +7191,6 @@ static int __init ftrace_nodyn_init(void)
core_initcall(ftrace_nodyn_init);
static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
-static inline void ftrace_startup_enable(int command) { }
static inline void ftrace_startup_all(int command) { }
# define ftrace_startup_sysctl() do { } while (0)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c860f582b078..eb44418574f9 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -235,7 +235,7 @@ static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
static int __init set_trace_boot_options(char *str)
{
strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
- return 0;
+ return 1;
}
__setup("trace_options=", set_trace_boot_options);
@@ -246,12 +246,16 @@ static int __init set_trace_boot_clock(char *str)
{
strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
trace_boot_clock = trace_boot_clock_buf;
- return 0;
+ return 1;
}
__setup("trace_clock=", set_trace_boot_clock);
static int __init set_tracepoint_printk(char *str)
{
+ /* Ignore the "tp_printk_stop_on_boot" param */
+ if (*str == '_')
+ return 0;
+
if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
tracepoint_printk = 1;
return 1;
@@ -1470,10 +1474,12 @@ static int __init set_buf_size(char *str)
if (!str)
return 0;
buf_size = memparse(str, &str);
- /* nr_entries can not be zero */
- if (buf_size == 0)
- return 0;
- trace_buf_size = buf_size;
+ /*
+ * nr_entries can not be zero and the startup
+ * tests require some buffer space. Therefore
+ * ensure we have at least 4096 bytes of buffer.
+ */
+ trace_buf_size = max(4096UL, buf_size);
return 1;
}
__setup("trace_buf_size=", set_buf_size);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index d038ddbf1bea..c5b09c31e077 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -136,7 +136,6 @@ struct kprobe_trace_entry_head {
struct eprobe_trace_entry_head {
struct trace_entry ent;
- unsigned int type;
};
struct kretprobe_trace_entry_head {
diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c
index 191db32dec46..541aa13581b9 100644
--- a/kernel/trace/trace_eprobe.c
+++ b/kernel/trace/trace_eprobe.c
@@ -242,7 +242,6 @@ static int trace_eprobe_tp_arg_update(struct trace_eprobe *ep, int i)
static int eprobe_event_define_fields(struct trace_event_call *event_call)
{
- int ret;
struct eprobe_trace_entry_head field;
struct trace_probe *tp;
@@ -250,8 +249,6 @@ static int eprobe_event_define_fields(struct trace_event_call *event_call)
if (WARN_ON_ONCE(!tp))
return -ENOENT;
- DEFINE_FIELD(unsigned int, type, FIELD_STRING_TYPE, 0);
-
return traceprobe_define_arg_fields(event_call, sizeof(field), tp);
}
@@ -270,7 +267,9 @@ print_eprobe_event(struct trace_iterator *iter, int flags,
struct trace_event_call *pevent;
struct trace_event *probed_event;
struct trace_seq *s = &iter->seq;
+ struct trace_eprobe *ep;
struct trace_probe *tp;
+ unsigned int type;
field = (struct eprobe_trace_entry_head *)iter->ent;
tp = trace_probe_primary_from_call(
@@ -278,15 +277,18 @@ print_eprobe_event(struct trace_iterator *iter, int flags,
if (WARN_ON_ONCE(!tp))
goto out;
+ ep = container_of(tp, struct trace_eprobe, tp);
+ type = ep->event->event.type;
+
trace_seq_printf(s, "%s: (", trace_probe_name(tp));
- probed_event = ftrace_find_event(field->type);
+ probed_event = ftrace_find_event(type);
if (probed_event) {
pevent = container_of(probed_event, struct trace_event_call, event);
trace_seq_printf(s, "%s.%s", pevent->class->system,
trace_event_name(pevent));
} else {
- trace_seq_printf(s, "%u", field->type);
+ trace_seq_printf(s, "%u", type);
}
trace_seq_putc(s, ')');
@@ -498,10 +500,6 @@ __eprobe_trace_func(struct eprobe_data *edata, void *rec)
return;
entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
- if (edata->ep->event)
- entry->type = edata->ep->event->event.type;
- else
- entry->type = 0;
store_trace_args(&entry[1], &edata->ep->tp, rec, sizeof(*entry), dsize);
trace_event_buffer_commit(&fbuffer);
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index ada87bfb5bb8..dc7f733b4cb3 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -2289,9 +2289,9 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
/*
* For backward compatibility, if field_name
* was "cpu", then we treat this the same as
- * common_cpu.
+ * common_cpu. This also works for "CPU".
*/
- if (strcmp(field_name, "cpu") == 0) {
+ if (field && field->filter_type == FILTER_CPU) {
*flags |= HIST_FIELD_FL_CPU;
} else {
hist_err(tr, HIST_ERR_FIELD_NOT_FOUND,
@@ -4832,7 +4832,7 @@ static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
cmp_fn = tracing_map_cmp_none;
- else if (!field)
+ else if (!field || hist_field->flags & HIST_FIELD_FL_CPU)
cmp_fn = tracing_map_cmp_num(hist_field->size,
hist_field->is_signed);
else if (is_string_field(field))
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index d00fee705f9c..7eb9d04f1c2e 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -84,6 +84,20 @@ event_triggers_call(struct trace_event_file *file,
}
EXPORT_SYMBOL_GPL(event_triggers_call);
+bool __trace_trigger_soft_disabled(struct trace_event_file *file)
+{
+ unsigned long eflags = file->flags;
+
+ if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
+ event_triggers_call(file, NULL, NULL, NULL);
+ if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
+ return true;
+ if (eflags & EVENT_FILE_FL_PID_FILTER)
+ return trace_event_ignore_this_pid(file);
+ return false;
+}
+EXPORT_SYMBOL_GPL(__trace_trigger_soft_disabled);
+
/**
* event_triggers_post_call - Call 'post_triggers' for a trace event
* @file: The trace_event_file associated with the event
@@ -1295,6 +1309,16 @@ traceon_trigger(struct event_trigger_data *data,
struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
+ struct trace_event_file *file = data->private_data;
+
+ if (file) {
+ if (tracer_tracing_is_on(file->tr))
+ return;
+
+ tracer_tracing_on(file->tr);
+ return;
+ }
+
if (tracing_is_on())
return;
@@ -1306,8 +1330,15 @@ traceon_count_trigger(struct event_trigger_data *data,
struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
- if (tracing_is_on())
- return;
+ struct trace_event_file *file = data->private_data;
+
+ if (file) {
+ if (tracer_tracing_is_on(file->tr))
+ return;
+ } else {
+ if (tracing_is_on())
+ return;
+ }
if (!data->count)
return;
@@ -1315,7 +1346,10 @@ traceon_count_trigger(struct event_trigger_data *data,
if (data->count != -1)
(data->count)--;
- tracing_on();
+ if (file)
+ tracer_tracing_on(file->tr);
+ else
+ tracing_on();
}
static void
@@ -1323,6 +1357,16 @@ traceoff_trigger(struct event_trigger_data *data,
struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
+ struct trace_event_file *file = data->private_data;
+
+ if (file) {
+ if (!tracer_tracing_is_on(file->tr))
+ return;
+
+ tracer_tracing_off(file->tr);
+ return;
+ }
+
if (!tracing_is_on())
return;
@@ -1334,8 +1378,15 @@ traceoff_count_trigger(struct event_trigger_data *data,
struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
- if (!tracing_is_on())
- return;
+ struct trace_event_file *file = data->private_data;
+
+ if (file) {
+ if (!tracer_tracing_is_on(file->tr))
+ return;
+ } else {
+ if (!tracing_is_on())
+ return;
+ }
if (!data->count)
return;
@@ -1343,7 +1394,10 @@ traceoff_count_trigger(struct event_trigger_data *data,
if (data->count != -1)
(data->count)--;
- tracing_off();
+ if (file)
+ tracer_tracing_off(file->tr);
+ else
+ tracing_off();
}
static int
@@ -1540,7 +1594,12 @@ stacktrace_trigger(struct event_trigger_data *data,
struct trace_buffer *buffer, void *rec,
struct ring_buffer_event *event)
{
- trace_dump_stack(STACK_SKIP);
+ struct trace_event_file *file = data->private_data;
+
+ if (file)
+ __trace_stack(file->tr, tracing_gen_ctx(), STACK_SKIP);
+ else
+ trace_dump_stack(STACK_SKIP);
}
static void
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 508f14af4f2c..b62fd785b599 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -32,7 +32,7 @@ static int __init set_kprobe_boot_events(char *str)
strlcpy(kprobe_boot_events_buf, str, COMMAND_LINE_SIZE);
disable_tracing_selftest("running kprobe events");
- return 0;
+ return 1;
}
__setup("kprobe_event=", set_kprobe_boot_events);
diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
index 870a08da5b48..cfddb30e65ab 100644
--- a/kernel/trace/trace_osnoise.c
+++ b/kernel/trace/trace_osnoise.c
@@ -1437,6 +1437,37 @@ static struct cpumask osnoise_cpumask;
static struct cpumask save_cpumask;
/*
+ * osnoise_sleep - sleep until the next period
+ */
+static void osnoise_sleep(void)
+{
+ u64 interval;
+ ktime_t wake_time;
+
+ mutex_lock(&interface_lock);
+ interval = osnoise_data.sample_period - osnoise_data.sample_runtime;
+ mutex_unlock(&interface_lock);
+
+ /*
+ * differently from hwlat_detector, the osnoise tracer can run
+ * without a pause because preemption is on.
+ */
+ if (!interval) {
+ /* Let synchronize_rcu_tasks() make progress */
+ cond_resched_tasks_rcu_qs();
+ return;
+ }
+
+ wake_time = ktime_add_us(ktime_get(), interval);
+ __set_current_state(TASK_INTERRUPTIBLE);
+
+ while (schedule_hrtimeout_range(&wake_time, 0, HRTIMER_MODE_ABS)) {
+ if (kthread_should_stop())
+ break;
+ }
+}
+
+/*
* osnoise_main - The osnoise detection kernel thread
*
* Calls run_osnoise() function to measure the osnoise for the configured runtime,
@@ -1444,30 +1475,10 @@ static struct cpumask save_cpumask;
*/
static int osnoise_main(void *data)
{
- u64 interval;
while (!kthread_should_stop()) {
-
run_osnoise();
-
- mutex_lock(&interface_lock);
- interval = osnoise_data.sample_period - osnoise_data.sample_runtime;
- mutex_unlock(&interface_lock);
-
- do_div(interval, USEC_PER_MSEC);
-
- /*
- * differently from hwlat_detector, the osnoise tracer can run
- * without a pause because preemption is on.
- */
- if (interval < 1) {
- /* Let synchronize_rcu_tasks() make progress */
- cond_resched_tasks_rcu_qs();
- continue;
- }
-
- if (msleep_interruptible(interval))
- break;
+ osnoise_sleep();
}
return 0;
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 73d90179b51b..80863c6508e5 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -871,15 +871,15 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len,
switch (ptype) {
case PROBE_PRINT_NORMAL:
fmt = "(%lx)";
- arg = "REC->" FIELD_STRING_IP;
+ arg = ", REC->" FIELD_STRING_IP;
break;
case PROBE_PRINT_RETURN:
fmt = "(%lx <- %lx)";
- arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
+ arg = ", REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP;
break;
case PROBE_PRINT_EVENT:
- fmt = "(%u)";
- arg = "REC->" FIELD_STRING_TYPE;
+ fmt = "";
+ arg = "";
break;
default:
WARN_ON_ONCE(1);
@@ -903,7 +903,7 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len,
parg->type->fmt);
}
- pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg);
+ pos += snprintf(buf + pos, LEN_OR_ZERO, "\"%s", arg);
for (i = 0; i < tp->nr_args; i++) {
parg = tp->args + i;
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 99e7a5df025e..92cc149af0fd 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -38,7 +38,6 @@
#define FIELD_STRING_IP "__probe_ip"
#define FIELD_STRING_RETIP "__probe_ret_ip"
#define FIELD_STRING_FUNC "__probe_func"
-#define FIELD_STRING_TYPE "__probe_type"
#undef DEFINE_FIELD
#define DEFINE_FIELD(type, item, name, is_signed) \
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index afd937a46496..abcadbe933bb 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -784,9 +784,7 @@ static struct fgraph_ops fgraph_ops __initdata = {
.retfunc = &trace_graph_return,
};
-#if defined(CONFIG_DYNAMIC_FTRACE) && \
- defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
-#define TEST_DIRECT_TRAMP
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
noinline __noclone static void trace_direct_tramp(void) { }
#endif
@@ -849,7 +847,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
goto out;
}
-#ifdef TEST_DIRECT_TRAMP
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
tracing_reset_online_cpus(&tr->array_buffer);
set_graph_array(tr);
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 65b597431c86..06ea04d44685 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -350,7 +350,8 @@ bool is_ucounts_overlimit(struct ucounts *ucounts, enum ucount_type type, unsign
if (rlimit > LONG_MAX)
max = LONG_MAX;
for (iter = ucounts; iter; iter = iter->ns->ucounts) {
- if (get_ucounts_value(iter, type) > max)
+ long val = get_ucounts_value(iter, type);
+ if (val < 0 || val > max)
return true;
max = READ_ONCE(iter->ns->ucount_max[type]);
}
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 6b2e3ca7ee99..5481ba44a8d6 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -58,6 +58,18 @@ static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
cred->user_ns = user_ns;
}
+static unsigned long enforced_nproc_rlimit(void)
+{
+ unsigned long limit = RLIM_INFINITY;
+
+ /* Is RLIMIT_NPROC currently enforced? */
+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) ||
+ (current_user_ns() != &init_user_ns))
+ limit = rlimit(RLIMIT_NPROC);
+
+ return limit;
+}
+
/*
* Create a new user namespace, deriving the creator from the user in the
* passed credentials, and replacing that user with the new root user for the
@@ -122,7 +134,7 @@ int create_user_ns(struct cred *new)
for (i = 0; i < MAX_PER_NAMESPACE_UCOUNTS; i++) {
ns->ucount_max[i] = INT_MAX;
}
- set_rlimit_ucount_max(ns, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC));
+ set_rlimit_ucount_max(ns, UCOUNT_RLIMIT_NPROC, enforced_nproc_rlimit());
set_rlimit_ucount_max(ns, UCOUNT_RLIMIT_MSGQUEUE, rlimit(RLIMIT_MSGQUEUE));
set_rlimit_ucount_max(ns, UCOUNT_RLIMIT_SIGPENDING, rlimit(RLIMIT_SIGPENDING));
set_rlimit_ucount_max(ns, UCOUNT_RLIMIT_MEMLOCK, rlimit(RLIMIT_MEMLOCK));
diff --git a/lib/Kconfig b/lib/Kconfig
index c80fde816a7e..9b5a692ce00c 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -45,7 +45,6 @@ config BITREVERSE
config HAVE_ARCH_BITREVERSE
bool
default n
- depends on BITREVERSE
help
This option enables the use of hardware bit-reversal instructions on
architectures which support such operations.
diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c
index 9364f79937b8..c71c09621c09 100644
--- a/lib/crypto/blake2s.c
+++ b/lib/crypto/blake2s.c
@@ -18,14 +18,14 @@
void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen)
{
- __blake2s_update(state, in, inlen, blake2s_compress);
+ __blake2s_update(state, in, inlen, false);
}
EXPORT_SYMBOL(blake2s_update);
void blake2s_final(struct blake2s_state *state, u8 *out)
{
WARN_ON(IS_ENABLED(DEBUG) && !out);
- __blake2s_final(state, out, blake2s_compress);
+ __blake2s_final(state, out, false);
memzero_explicit(state, sizeof(*state));
}
EXPORT_SYMBOL(blake2s_final);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index b0e0acdf96c1..6dd5330f7a99 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -414,6 +414,7 @@ static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t by
return 0;
buf->ops = &page_cache_pipe_buf_ops;
+ buf->flags = 0;
get_page(page);
buf->page = page;
buf->offset = offset;
@@ -577,6 +578,7 @@ static size_t push_pipe(struct iov_iter *i, size_t size,
break;
buf->ops = &default_pipe_buf_ops;
+ buf->flags = 0;
buf->page = page;
buf->offset = 0;
buf->len = min_t(ssize_t, left, PAGE_SIZE);
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 26a5c9007653..3b413f8c8a71 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -869,11 +869,14 @@ static void kmem_cache_invalid_free(struct kunit *test)
kmem_cache_destroy(cache);
}
+static void empty_cache_ctor(void *object) { }
+
static void kmem_cache_double_destroy(struct kunit *test)
{
struct kmem_cache *cache;
- cache = kmem_cache_create("test_cache", 200, 0, 0, NULL);
+ /* Provide a constructor to prevent cache merging. */
+ cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
kmem_cache_destroy(cache);
KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
index a7ac97c76762..db2abd9e415b 100644
--- a/mm/debug_vm_pgtable.c
+++ b/mm/debug_vm_pgtable.c
@@ -171,6 +171,8 @@ static void __init pte_advanced_tests(struct pgtable_debug_args *args)
ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep);
pte = ptep_get(args->ptep);
WARN_ON(pte_young(pte));
+
+ ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
}
static void __init pte_savedwrite_tests(struct pgtable_debug_args *args)
diff --git a/mm/gup.c b/mm/gup.c
index f0af462ac1e2..a9d4d724aef7 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -124,8 +124,8 @@ static inline struct page *try_get_compound_head(struct page *page, int refs)
* considered failure, and furthermore, a likely bug in the caller, so a warning
* is also emitted.
*/
-struct page *try_grab_compound_head(struct page *page,
- int refs, unsigned int flags)
+__maybe_unused struct page *try_grab_compound_head(struct page *page,
+ int refs, unsigned int flags)
{
if (flags & FOLL_GET)
return try_get_compound_head(page, refs);
@@ -208,10 +208,35 @@ static void put_compound_head(struct page *page, int refs, unsigned int flags)
*/
bool __must_check try_grab_page(struct page *page, unsigned int flags)
{
- if (!(flags & (FOLL_GET | FOLL_PIN)))
- return true;
+ WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN));
- return try_grab_compound_head(page, 1, flags);
+ if (flags & FOLL_GET)
+ return try_get_page(page);
+ else if (flags & FOLL_PIN) {
+ int refs = 1;
+
+ page = compound_head(page);
+
+ if (WARN_ON_ONCE(page_ref_count(page) <= 0))
+ return false;
+
+ if (hpage_pincount_available(page))
+ hpage_pincount_add(page, 1);
+ else
+ refs = GUP_PIN_COUNTING_BIAS;
+
+ /*
+ * Similar to try_grab_compound_head(): even if using the
+ * hpage_pincount_add/_sub() routines, be sure to
+ * *also* increment the normal page refcount field at least
+ * once, so that the page really is pinned.
+ */
+ page_ref_add(page, refs);
+
+ mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED, 1);
+ }
+
+ return true;
}
/**
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 61895cc01d09..f294db835f4b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4159,10 +4159,10 @@ static int __init hugepages_setup(char *s)
pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
return 0;
}
+ if (tmp >= nr_online_nodes)
+ goto invalid;
node = tmp;
p += count + 1;
- if (node < 0 || node >= nr_online_nodes)
- goto invalid;
/* Parse hugepages */
if (sscanf(p, "%lu%n", &tmp, &count) != 1)
goto invalid;
@@ -4851,14 +4851,13 @@ again:
}
static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
- unsigned long new_addr, pte_t *src_pte)
+ unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte)
{
struct hstate *h = hstate_vma(vma);
struct mm_struct *mm = vma->vm_mm;
- pte_t *dst_pte, pte;
spinlock_t *src_ptl, *dst_ptl;
+ pte_t pte;
- dst_pte = huge_pte_offset(mm, new_addr, huge_page_size(h));
dst_ptl = huge_pte_lock(h, mm, dst_pte);
src_ptl = huge_pte_lockptr(h, mm, src_pte);
@@ -4917,7 +4916,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
if (!dst_pte)
break;
- move_huge_pte(vma, old_addr, new_addr, src_pte);
+ move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte);
}
flush_tlb_range(vma, old_end - len, old_end);
mmu_notifier_invalidate_range_end(&range);
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 5ad40e3add45..13128fa13062 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -47,7 +47,8 @@
static bool kfence_enabled __read_mostly;
-static unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
+unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
+EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
index a22b1af85577..50dbb815a2a8 100644
--- a/mm/kfence/kfence_test.c
+++ b/mm/kfence/kfence_test.c
@@ -268,13 +268,13 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat
* 100x the sample interval should be more than enough to ensure we get
* a KFENCE allocation eventually.
*/
- timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
+ timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
/*
* Especially for non-preemption kernels, ensure the allocation-gate
* timer can catch up: after @resched_after, every failed allocation
* attempt yields, to ensure the allocation-gate timer is scheduled.
*/
- resched_after = jiffies + msecs_to_jiffies(CONFIG_KFENCE_SAMPLE_INTERVAL);
+ resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval);
do {
if (test_cache)
alloc = kmem_cache_alloc(test_cache, gfp);
@@ -608,7 +608,7 @@ static void test_gfpzero(struct kunit *test)
int i;
/* Skip if we think it'd take too long. */
- KFENCE_TEST_REQUIRES(test, CONFIG_KFENCE_SAMPLE_INTERVAL <= 100);
+ KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100);
setup_test_cache(test, size, 0, NULL);
buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
@@ -739,7 +739,7 @@ static void test_memcache_alloc_bulk(struct kunit *test)
* 100x the sample interval should be more than enough to ensure we get
* a KFENCE allocation eventually.
*/
- timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
+ timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
do {
void *objects[100];
int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects),
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 35f14d0a00a6..131492fd1148 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -16,6 +16,7 @@
#include <linux/hashtable.h>
#include <linux/userfaultfd_k.h>
#include <linux/page_idle.h>
+#include <linux/page_table_check.h>
#include <linux/swapops.h>
#include <linux/shmem_fs.h>
@@ -1416,6 +1417,21 @@ static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
return 0;
}
+static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp)
+{
+ spinlock_t *ptl;
+ pmd_t pmd;
+
+ mmap_assert_write_locked(mm);
+ ptl = pmd_lock(vma->vm_mm, pmdp);
+ pmd = pmdp_collapse_flush(vma, addr, pmdp);
+ spin_unlock(ptl);
+ mm_dec_nr_ptes(mm);
+ page_table_check_pte_clear_range(mm, addr, pmd);
+ pte_free(mm, pmd_pgtable(pmd));
+}
+
/**
* collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
* address haddr.
@@ -1433,7 +1449,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
struct vm_area_struct *vma = find_vma(mm, haddr);
struct page *hpage;
pte_t *start_pte, *pte;
- pmd_t *pmd, _pmd;
+ pmd_t *pmd;
spinlock_t *ptl;
int count = 0;
int i;
@@ -1509,12 +1525,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
}
/* step 4: collapse pmd */
- ptl = pmd_lock(vma->vm_mm, pmd);
- _pmd = pmdp_collapse_flush(vma, haddr, pmd);
- spin_unlock(ptl);
- mm_dec_nr_ptes(mm);
- pte_free(mm, pmd_pgtable(_pmd));
-
+ collapse_and_free_pmd(mm, vma, haddr, pmd);
drop_hpage:
unlock_page(hpage);
put_page(hpage);
@@ -1552,7 +1563,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
struct vm_area_struct *vma;
struct mm_struct *mm;
unsigned long addr;
- pmd_t *pmd, _pmd;
+ pmd_t *pmd;
i_mmap_lock_write(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
@@ -1591,14 +1602,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
* reverse order. Trylock is a way to avoid deadlock.
*/
if (mmap_write_trylock(mm)) {
- if (!khugepaged_test_exit(mm)) {
- spinlock_t *ptl = pmd_lock(mm, pmd);
- /* assume page table is clear */
- _pmd = pmdp_collapse_flush(vma, addr, pmd);
- spin_unlock(ptl);
- mm_dec_nr_ptes(mm);
- pte_free(mm, pmd_pgtable(_pmd));
- }
+ if (!khugepaged_test_exit(mm))
+ collapse_and_free_pmd(mm, vma, addr, pmd);
mmap_write_unlock(mm);
} else {
/* Try again later */
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index dc3758fdba68..7580baa76af1 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1410,7 +1410,8 @@ static void kmemleak_scan(void)
{
unsigned long flags;
struct kmemleak_object *object;
- int i;
+ struct zone *zone;
+ int __maybe_unused i;
int new_leaks = 0;
jiffies_last_scan = jiffies;
@@ -1450,9 +1451,9 @@ static void kmemleak_scan(void)
* Struct page scanning for each node.
*/
get_online_mems();
- for_each_online_node(i) {
- unsigned long start_pfn = node_start_pfn(i);
- unsigned long end_pfn = node_end_pfn(i);
+ for_each_populated_zone(zone) {
+ unsigned long start_pfn = zone->zone_start_pfn;
+ unsigned long end_pfn = zone_end_pfn(zone);
unsigned long pfn;
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
@@ -1461,8 +1462,8 @@ static void kmemleak_scan(void)
if (!page)
continue;
- /* only scan pages belonging to this node */
- if (page_to_nid(page) != i)
+ /* only scan pages belonging to this zone */
+ if (page_zone(page) != zone)
continue;
/* only scan if page is in use */
if (page_count(page) == 0)
diff --git a/mm/madvise.c b/mm/madvise.c
index 5604064df464..38d0f515d548 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -65,7 +65,7 @@ static int madvise_need_mmap_write(int behavior)
}
#ifdef CONFIG_ANON_VMA_NAME
-static struct anon_vma_name *anon_vma_name_alloc(const char *name)
+struct anon_vma_name *anon_vma_name_alloc(const char *name)
{
struct anon_vma_name *anon_name;
size_t count;
@@ -81,78 +81,48 @@ static struct anon_vma_name *anon_vma_name_alloc(const char *name)
return anon_name;
}
-static void vma_anon_name_free(struct kref *kref)
+void anon_vma_name_free(struct kref *kref)
{
struct anon_vma_name *anon_name =
container_of(kref, struct anon_vma_name, kref);
kfree(anon_name);
}
-static inline bool has_vma_anon_name(struct vm_area_struct *vma)
+struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
{
- return !vma->vm_file && vma->anon_name;
-}
-
-const char *vma_anon_name(struct vm_area_struct *vma)
-{
- if (!has_vma_anon_name(vma))
- return NULL;
-
mmap_assert_locked(vma->vm_mm);
- return vma->anon_name->name;
-}
-
-void dup_vma_anon_name(struct vm_area_struct *orig_vma,
- struct vm_area_struct *new_vma)
-{
- if (!has_vma_anon_name(orig_vma))
- return;
-
- kref_get(&orig_vma->anon_name->kref);
- new_vma->anon_name = orig_vma->anon_name;
-}
-
-void free_vma_anon_name(struct vm_area_struct *vma)
-{
- struct anon_vma_name *anon_name;
-
- if (!has_vma_anon_name(vma))
- return;
+ if (vma->vm_file)
+ return NULL;
- anon_name = vma->anon_name;
- vma->anon_name = NULL;
- kref_put(&anon_name->kref, vma_anon_name_free);
+ return vma->anon_name;
}
/* mmap_lock should be write-locked */
-static int replace_vma_anon_name(struct vm_area_struct *vma, const char *name)
+static int replace_anon_vma_name(struct vm_area_struct *vma,
+ struct anon_vma_name *anon_name)
{
- const char *anon_name;
+ struct anon_vma_name *orig_name = anon_vma_name(vma);
- if (!name) {
- free_vma_anon_name(vma);
+ if (!anon_name) {
+ vma->anon_name = NULL;
+ anon_vma_name_put(orig_name);
return 0;
}
- anon_name = vma_anon_name(vma);
- if (anon_name) {
- /* Same name, nothing to do here */
- if (!strcmp(name, anon_name))
- return 0;
+ if (anon_vma_name_eq(orig_name, anon_name))
+ return 0;
- free_vma_anon_name(vma);
- }
- vma->anon_name = anon_vma_name_alloc(name);
- if (!vma->anon_name)
- return -ENOMEM;
+ vma->anon_name = anon_vma_name_reuse(anon_name);
+ anon_vma_name_put(orig_name);
return 0;
}
#else /* CONFIG_ANON_VMA_NAME */
-static int replace_vma_anon_name(struct vm_area_struct *vma, const char *name)
+static int replace_anon_vma_name(struct vm_area_struct *vma,
+ struct anon_vma_name *anon_name)
{
- if (name)
+ if (anon_name)
return -EINVAL;
return 0;
@@ -161,17 +131,19 @@ static int replace_vma_anon_name(struct vm_area_struct *vma, const char *name)
/*
* Update the vm_flags on region of a vma, splitting it or merging it as
* necessary. Must be called with mmap_sem held for writing;
+ * Caller should ensure anon_name stability by raising its refcount even when
+ * anon_name belongs to a valid vma because this function might free that vma.
*/
static int madvise_update_vma(struct vm_area_struct *vma,
struct vm_area_struct **prev, unsigned long start,
unsigned long end, unsigned long new_flags,
- const char *name)
+ struct anon_vma_name *anon_name)
{
struct mm_struct *mm = vma->vm_mm;
int error;
pgoff_t pgoff;
- if (new_flags == vma->vm_flags && is_same_vma_anon_name(vma, name)) {
+ if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) {
*prev = vma;
return 0;
}
@@ -179,7 +151,7 @@ static int madvise_update_vma(struct vm_area_struct *vma,
pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
*prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
vma->vm_file, pgoff, vma_policy(vma),
- vma->vm_userfaultfd_ctx, name);
+ vma->vm_userfaultfd_ctx, anon_name);
if (*prev) {
vma = *prev;
goto success;
@@ -209,7 +181,7 @@ success:
*/
vma->vm_flags = new_flags;
if (!vma->vm_file) {
- error = replace_vma_anon_name(vma, name);
+ error = replace_anon_vma_name(vma, anon_name);
if (error)
return error;
}
@@ -975,6 +947,7 @@ static int madvise_vma_behavior(struct vm_area_struct *vma,
unsigned long behavior)
{
int error;
+ struct anon_vma_name *anon_name;
unsigned long new_flags = vma->vm_flags;
switch (behavior) {
@@ -1040,8 +1013,11 @@ static int madvise_vma_behavior(struct vm_area_struct *vma,
break;
}
+ anon_name = anon_vma_name(vma);
+ anon_vma_name_get(anon_name);
error = madvise_update_vma(vma, prev, start, end, new_flags,
- vma_anon_name(vma));
+ anon_name);
+ anon_vma_name_put(anon_name);
out:
/*
@@ -1225,7 +1201,7 @@ int madvise_walk_vmas(struct mm_struct *mm, unsigned long start,
static int madvise_vma_anon_name(struct vm_area_struct *vma,
struct vm_area_struct **prev,
unsigned long start, unsigned long end,
- unsigned long name)
+ unsigned long anon_name)
{
int error;
@@ -1234,7 +1210,7 @@ static int madvise_vma_anon_name(struct vm_area_struct *vma,
return -EBADF;
error = madvise_update_vma(vma, prev, start, end, vma->vm_flags,
- (const char *)name);
+ (struct anon_vma_name *)anon_name);
/*
* madvise() returns EAGAIN if kernel resources, such as
@@ -1246,7 +1222,7 @@ static int madvise_vma_anon_name(struct vm_area_struct *vma,
}
int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
- unsigned long len_in, const char *name)
+ unsigned long len_in, struct anon_vma_name *anon_name)
{
unsigned long end;
unsigned long len;
@@ -1266,7 +1242,7 @@ int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
if (end == start)
return 0;
- return madvise_walk_vmas(mm, start, end, (unsigned long)name,
+ return madvise_walk_vmas(mm, start, end, (unsigned long)anon_name,
madvise_vma_anon_name);
}
#endif /* CONFIG_ANON_VMA_NAME */
diff --git a/mm/memblock.c b/mm/memblock.c
index 1018e50566f3..b12a364f2766 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -366,14 +366,20 @@ void __init memblock_discard(void)
addr = __pa(memblock.reserved.regions);
size = PAGE_ALIGN(sizeof(struct memblock_region) *
memblock.reserved.max);
- memblock_free_late(addr, size);
+ if (memblock_reserved_in_slab)
+ kfree(memblock.reserved.regions);
+ else
+ memblock_free_late(addr, size);
}
if (memblock.memory.regions != memblock_memory_init_regions) {
addr = __pa(memblock.memory.regions);
size = PAGE_ALIGN(sizeof(struct memblock_region) *
memblock.memory.max);
- memblock_free_late(addr, size);
+ if (memblock_memory_in_slab)
+ kfree(memblock.memory.regions);
+ else
+ memblock_free_late(addr, size);
}
memblock_memory = NULL;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 09d342c7cbd0..36e9f38c919d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -254,7 +254,7 @@ struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
}
#ifdef CONFIG_MEMCG_KMEM
-extern spinlock_t css_set_lock;
+static DEFINE_SPINLOCK(objcg_lock);
bool mem_cgroup_kmem_disabled(void)
{
@@ -298,9 +298,9 @@ static void obj_cgroup_release(struct percpu_ref *ref)
if (nr_pages)
obj_cgroup_uncharge_pages(objcg, nr_pages);
- spin_lock_irqsave(&css_set_lock, flags);
+ spin_lock_irqsave(&objcg_lock, flags);
list_del(&objcg->list);
- spin_unlock_irqrestore(&css_set_lock, flags);
+ spin_unlock_irqrestore(&objcg_lock, flags);
percpu_ref_exit(ref);
kfree_rcu(objcg, rcu);
@@ -332,7 +332,7 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
- spin_lock_irq(&css_set_lock);
+ spin_lock_irq(&objcg_lock);
/* 1) Ready to reparent active objcg. */
list_add(&objcg->list, &memcg->objcg_list);
@@ -342,7 +342,7 @@ static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
/* 3) Move already reparented objcgs to the parent's list */
list_splice(&memcg->objcg_list, &parent->objcg_list);
- spin_unlock_irq(&css_set_lock);
+ spin_unlock_irq(&objcg_lock);
percpu_ref_kill(&objcg->refcnt);
}
diff --git a/mm/memfd.c b/mm/memfd.c
index 9f80f162791a..08f5f8304746 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -31,20 +31,28 @@
static void memfd_tag_pins(struct xa_state *xas)
{
struct page *page;
- unsigned int tagged = 0;
+ int latency = 0;
+ int cache_count;
lru_add_drain();
xas_lock_irq(xas);
xas_for_each(xas, page, ULONG_MAX) {
- if (xa_is_value(page))
- continue;
- page = find_subpage(page, xas->xa_index);
- if (page_count(page) - page_mapcount(page) > 1)
+ cache_count = 1;
+ if (!xa_is_value(page) &&
+ PageTransHuge(page) && !PageHuge(page))
+ cache_count = HPAGE_PMD_NR;
+
+ if (!xa_is_value(page) &&
+ page_count(page) - total_mapcount(page) != cache_count)
xas_set_mark(xas, MEMFD_TAG_PINNED);
+ if (cache_count != 1)
+ xas_set(xas, page->index + cache_count);
- if (++tagged % XA_CHECK_SCHED)
+ latency += cache_count;
+ if (latency < XA_CHECK_SCHED)
continue;
+ latency = 0;
xas_pause(xas);
xas_unlock_irq(xas);
@@ -73,7 +81,8 @@ static int memfd_wait_for_pins(struct address_space *mapping)
error = 0;
for (scan = 0; scan <= LAST_SCAN; scan++) {
- unsigned int tagged = 0;
+ int latency = 0;
+ int cache_count;
if (!xas_marked(&xas, MEMFD_TAG_PINNED))
break;
@@ -87,10 +96,14 @@ static int memfd_wait_for_pins(struct address_space *mapping)
xas_lock_irq(&xas);
xas_for_each_marked(&xas, page, ULONG_MAX, MEMFD_TAG_PINNED) {
bool clear = true;
- if (xa_is_value(page))
- continue;
- page = find_subpage(page, xas.xa_index);
- if (page_count(page) - page_mapcount(page) != 1) {
+
+ cache_count = 1;
+ if (!xa_is_value(page) &&
+ PageTransHuge(page) && !PageHuge(page))
+ cache_count = HPAGE_PMD_NR;
+
+ if (!xa_is_value(page) && cache_count !=
+ page_count(page) - total_mapcount(page)) {
/*
* On the last scan, we clean up all those tags
* we inserted; but make a note that we still
@@ -103,8 +116,11 @@ static int memfd_wait_for_pins(struct address_space *mapping)
}
if (clear)
xas_clear_mark(&xas, MEMFD_TAG_PINNED);
- if (++tagged % XA_CHECK_SCHED)
+
+ latency += cache_count;
+ if (latency < XA_CHECK_SCHED)
continue;
+ latency = 0;
xas_pause(&xas);
xas_unlock_irq(&xas);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 028e8dd82b44..69284d3b5e53 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -814,7 +814,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
vma->anon_vma, vma->vm_file, pgoff,
new_pol, vma->vm_userfaultfd_ctx,
- vma_anon_name(vma));
+ anon_vma_name(vma));
if (prev) {
vma = prev;
next = vma->vm_next;
diff --git a/mm/mlock.c b/mm/mlock.c
index 8f584eddd305..25934e7db3e1 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -512,7 +512,7 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
vma->vm_file, pgoff, vma_policy(vma),
- vma->vm_userfaultfd_ctx, vma_anon_name(vma));
+ vma->vm_userfaultfd_ctx, anon_vma_name(vma));
if (*prev) {
vma = *prev;
goto success;
diff --git a/mm/mmap.c b/mm/mmap.c
index 1e8fdb0b51ed..f61a15474dd6 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1031,7 +1031,7 @@ again:
static inline int is_mergeable_vma(struct vm_area_struct *vma,
struct file *file, unsigned long vm_flags,
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
- const char *anon_name)
+ struct anon_vma_name *anon_name)
{
/*
* VM_SOFTDIRTY should not prevent from VMA merging, if we
@@ -1049,7 +1049,7 @@ static inline int is_mergeable_vma(struct vm_area_struct *vma,
return 0;
if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
return 0;
- if (!is_same_vma_anon_name(vma, anon_name))
+ if (!anon_vma_name_eq(anon_vma_name(vma), anon_name))
return 0;
return 1;
}
@@ -1084,7 +1084,7 @@ can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file,
pgoff_t vm_pgoff,
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
- const char *anon_name)
+ struct anon_vma_name *anon_name)
{
if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
@@ -1106,7 +1106,7 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
struct anon_vma *anon_vma, struct file *file,
pgoff_t vm_pgoff,
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
- const char *anon_name)
+ struct anon_vma_name *anon_name)
{
if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
@@ -1167,7 +1167,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
struct anon_vma *anon_vma, struct file *file,
pgoff_t pgoff, struct mempolicy *policy,
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
- const char *anon_name)
+ struct anon_vma_name *anon_name)
{
pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
struct vm_area_struct *area, *next;
@@ -3186,6 +3186,7 @@ void exit_mmap(struct mm_struct *mm)
vma = remove_vma(vma);
cond_resched();
}
+ mm->mmap = NULL;
mmap_write_unlock(mm);
vm_unacct_memory(nr_accounted);
}
@@ -3255,7 +3256,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
return NULL; /* should never get here */
new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
- vma->vm_userfaultfd_ctx, vma_anon_name(vma));
+ vma->vm_userfaultfd_ctx, anon_vma_name(vma));
if (new_vma) {
/*
* Source vma may have been merged into new_vma
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 0138dfcdb1d8..2887644fd150 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -94,7 +94,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
/* Also skip shared copy-on-write pages */
if (is_cow_mapping(vma->vm_flags) &&
- page_mapcount(page) != 1)
+ page_count(page) != 1)
continue;
/*
@@ -464,7 +464,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
*pprev = vma_merge(mm, *pprev, start, end, newflags,
vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
- vma->vm_userfaultfd_ctx, vma_anon_name(vma));
+ vma->vm_userfaultfd_ctx, anon_vma_name(vma));
if (*pprev) {
vma = *pprev;
VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 6a0ddda6b3c5..f67c4c70f17f 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -115,7 +115,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
* onlining - just onlined memory won't immediately be considered for
* allocation.
*/
- if (!isolated_page && PageBuddy(page)) {
+ if (!isolated_page) {
nr_pages = move_freepages_block(zone, page, migratetype, NULL);
__mod_zone_freepage_state(zone, nr_pages, migratetype);
}
diff --git a/mm/page_table_check.c b/mm/page_table_check.c
index 7504e7caa2a1..3763bd077861 100644
--- a/mm/page_table_check.c
+++ b/mm/page_table_check.c
@@ -86,8 +86,8 @@ static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
{
struct page_ext *page_ext;
struct page *page;
+ unsigned long i;
bool anon;
- int i;
if (!pfn_valid(pfn))
return;
@@ -121,8 +121,8 @@ static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
{
struct page_ext *page_ext;
struct page *page;
+ unsigned long i;
bool anon;
- int i;
if (!pfn_valid(pfn))
return;
@@ -152,10 +152,10 @@ static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
void __page_table_check_zero(struct page *page, unsigned int order)
{
struct page_ext *page_ext = lookup_page_ext(page);
- int i;
+ unsigned long i;
BUG_ON(!page_ext);
- for (i = 0; i < (1 << order); i++) {
+ for (i = 0; i < (1ul << order); i++) {
struct page_table_check *ptc = get_page_table_check(page_ext);
BUG_ON(atomic_read(&ptc->anon_map_count));
@@ -206,17 +206,10 @@ EXPORT_SYMBOL(__page_table_check_pud_clear);
void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
- pte_t old_pte;
-
if (&init_mm == mm)
return;
- old_pte = *ptep;
- if (pte_user_accessible_page(old_pte)) {
- page_table_check_clear(mm, addr, pte_pfn(old_pte),
- PAGE_SIZE >> PAGE_SHIFT);
- }
-
+ __page_table_check_pte_clear(mm, addr, *ptep);
if (pte_user_accessible_page(pte)) {
page_table_check_set(mm, addr, pte_pfn(pte),
PAGE_SIZE >> PAGE_SHIFT,
@@ -228,17 +221,10 @@ EXPORT_SYMBOL(__page_table_check_pte_set);
void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
- pmd_t old_pmd;
-
if (&init_mm == mm)
return;
- old_pmd = *pmdp;
- if (pmd_user_accessible_page(old_pmd)) {
- page_table_check_clear(mm, addr, pmd_pfn(old_pmd),
- PMD_PAGE_SIZE >> PAGE_SHIFT);
- }
-
+ __page_table_check_pmd_clear(mm, addr, *pmdp);
if (pmd_user_accessible_page(pmd)) {
page_table_check_set(mm, addr, pmd_pfn(pmd),
PMD_PAGE_SIZE >> PAGE_SHIFT,
@@ -250,17 +236,10 @@ EXPORT_SYMBOL(__page_table_check_pmd_set);
void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud)
{
- pud_t old_pud;
-
if (&init_mm == mm)
return;
- old_pud = *pudp;
- if (pud_user_accessible_page(old_pud)) {
- page_table_check_clear(mm, addr, pud_pfn(old_pud),
- PUD_PAGE_SIZE >> PAGE_SHIFT);
- }
-
+ __page_table_check_pud_clear(mm, addr, *pudp);
if (pud_user_accessible_page(pud)) {
page_table_check_set(mm, addr, pud_pfn(pud),
PUD_PAGE_SIZE >> PAGE_SHIFT,
@@ -268,3 +247,23 @@ void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
}
}
EXPORT_SYMBOL(__page_table_check_pud_set);
+
+void __page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd)
+{
+ if (&init_mm == mm)
+ return;
+
+ if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
+ pte_t *ptep = pte_offset_map(&pmd, addr);
+ unsigned long i;
+
+ pte_unmap(ptep);
+ for (i = 0; i < PTRS_PER_PTE; i++) {
+ __page_table_check_pte_clear(mm, addr, *ptep);
+ addr += PAGE_SIZE;
+ ptep++;
+ }
+ }
+}
diff --git a/mm/util.c b/mm/util.c
index 7e43369064c8..d3102081add0 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -587,8 +587,10 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
return ret;
/* Don't even allow crazy sizes */
- if (WARN_ON_ONCE(size > INT_MAX))
+ if (unlikely(size > INT_MAX)) {
+ WARN_ON_ONCE(!(flags & __GFP_NOWARN));
return NULL;
+ }
return __vmalloc_node(size, 1, flags, node,
__builtin_return_address(0));
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 090bfb605ecf..59b14e0d696c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1066,8 +1066,10 @@ void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason)
* forward progress (e.g. journalling workqueues or kthreads).
*/
if (!current_is_kswapd() &&
- current->flags & (PF_IO_WORKER|PF_KTHREAD))
+ current->flags & (PF_IO_WORKER|PF_KTHREAD)) {
+ cond_resched();
return;
+ }
/*
* These figures are pulled out of thin air.
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 1a705a4ef7fa..5eaf38875554 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -129,6 +129,7 @@ void vlan_dev_set_ingress_priority(const struct net_device *dev,
u32 skb_prio, u16 vlan_prio);
int vlan_dev_set_egress_priority(const struct net_device *dev,
u32 skb_prio, u16 vlan_prio);
+void vlan_dev_free_egress_priority(const struct net_device *dev);
int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask);
void vlan_dev_get_realdev_name(const struct net_device *dev, char *result,
size_t size);
@@ -139,7 +140,6 @@ int vlan_check_real_dev(struct net_device *real_dev,
void vlan_setup(struct net_device *dev);
int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack);
void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
-void vlan_dev_uninit(struct net_device *dev);
bool vlan_dev_inherit_address(struct net_device *dev,
struct net_device *real_dev);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 26d031a43cc1..d1902828a18a 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -622,7 +622,7 @@ static int vlan_dev_init(struct net_device *dev)
}
/* Note: this function might be called multiple times for the same device. */
-void vlan_dev_uninit(struct net_device *dev)
+void vlan_dev_free_egress_priority(const struct net_device *dev)
{
struct vlan_priority_tci_mapping *pm;
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
@@ -636,6 +636,16 @@ void vlan_dev_uninit(struct net_device *dev)
}
}
+static void vlan_dev_uninit(struct net_device *dev)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+
+ vlan_dev_free_egress_priority(dev);
+
+ /* Get rid of the vlan's reference to real_dev */
+ dev_put_track(vlan->real_dev, &vlan->dev_tracker);
+}
+
static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -846,9 +856,6 @@ static void vlan_dev_free(struct net_device *dev)
free_percpu(vlan->vlan_pcpu_stats);
vlan->vlan_pcpu_stats = NULL;
-
- /* Get rid of the vlan's reference to real_dev */
- dev_put_track(vlan->real_dev, &vlan->dev_tracker);
}
void vlan_setup(struct net_device *dev)
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index 0db85aeb119b..53b1955b027f 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -183,10 +183,11 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
return -EINVAL;
err = vlan_changelink(dev, tb, data, extack);
- if (!err)
- err = register_vlan_dev(dev, extack);
if (err)
- vlan_dev_uninit(dev);
+ return err;
+ err = register_vlan_dev(dev, extack);
+ if (err)
+ vlan_dev_free_egress_priority(dev);
return err;
}
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 02f43f3e2c56..d53cbb4e2503 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -77,6 +77,7 @@ static void ax25_kill_by_device(struct net_device *dev)
{
ax25_dev *ax25_dev;
ax25_cb *s;
+ struct sock *sk;
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
return;
@@ -85,13 +86,17 @@ static void ax25_kill_by_device(struct net_device *dev)
again:
ax25_for_each(s, &ax25_list) {
if (s->ax25_dev == ax25_dev) {
+ sk = s->sk;
+ sock_hold(sk);
spin_unlock_bh(&ax25_list_lock);
- lock_sock(s->sk);
+ lock_sock(sk);
s->ax25_dev = NULL;
- release_sock(s->sk);
+ dev_put_track(ax25_dev->dev, &ax25_dev->dev_tracker);
+ ax25_dev_put(ax25_dev);
ax25_disconnect(s, ENETUNREACH);
+ release_sock(sk);
spin_lock_bh(&ax25_list_lock);
-
+ sock_put(sk);
/* The entry could have been deleted from the
* list meanwhile and thus the next pointer is
* no longer valid. Play it safe and restart
@@ -355,21 +360,25 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
if (copy_from_user(&ax25_ctl, arg, sizeof(ax25_ctl)))
return -EFAULT;
- if ((ax25_dev = ax25_addr_ax25dev(&ax25_ctl.port_addr)) == NULL)
- return -ENODEV;
-
if (ax25_ctl.digi_count > AX25_MAX_DIGIS)
return -EINVAL;
if (ax25_ctl.arg > ULONG_MAX / HZ && ax25_ctl.cmd != AX25_KILL)
return -EINVAL;
+ ax25_dev = ax25_addr_ax25dev(&ax25_ctl.port_addr);
+ if (!ax25_dev)
+ return -ENODEV;
+
digi.ndigi = ax25_ctl.digi_count;
for (k = 0; k < digi.ndigi; k++)
digi.calls[k] = ax25_ctl.digi_addr[k];
- if ((ax25 = ax25_find_cb(&ax25_ctl.source_addr, &ax25_ctl.dest_addr, &digi, ax25_dev->dev)) == NULL)
+ ax25 = ax25_find_cb(&ax25_ctl.source_addr, &ax25_ctl.dest_addr, &digi, ax25_dev->dev);
+ if (!ax25) {
+ ax25_dev_put(ax25_dev);
return -ENOTCONN;
+ }
switch (ax25_ctl.cmd) {
case AX25_KILL:
@@ -436,6 +445,7 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
}
out_put:
+ ax25_dev_put(ax25_dev);
ax25_cb_put(ax25);
return ret;
@@ -1107,8 +1117,10 @@ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
}
}
- if (ax25_dev != NULL)
+ if (ax25_dev) {
ax25_fillin_cb(ax25, ax25_dev);
+ dev_hold_track(ax25_dev->dev, &ax25_dev->dev_tracker, GFP_ATOMIC);
+ }
done:
ax25_cb_add(ax25);
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index 256fadb94df3..d2a244e1c260 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -37,6 +37,7 @@ ax25_dev *ax25_addr_ax25dev(ax25_address *addr)
for (ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
if (ax25cmp(addr, (const ax25_address *)ax25_dev->dev->dev_addr) == 0) {
res = ax25_dev;
+ ax25_dev_hold(ax25_dev);
}
spin_unlock_bh(&ax25_dev_lock);
@@ -56,6 +57,7 @@ void ax25_dev_device_up(struct net_device *dev)
return;
}
+ refcount_set(&ax25_dev->refcount, 1);
dev->ax25_ptr = ax25_dev;
ax25_dev->dev = dev;
dev_hold_track(dev, &ax25_dev->dev_tracker, GFP_ATOMIC);
@@ -84,6 +86,7 @@ void ax25_dev_device_up(struct net_device *dev)
ax25_dev->next = ax25_dev_list;
ax25_dev_list = ax25_dev;
spin_unlock_bh(&ax25_dev_lock);
+ ax25_dev_hold(ax25_dev);
ax25_register_dev_sysctl(ax25_dev);
}
@@ -113,9 +116,10 @@ void ax25_dev_device_down(struct net_device *dev)
if ((s = ax25_dev_list) == ax25_dev) {
ax25_dev_list = s->next;
spin_unlock_bh(&ax25_dev_lock);
+ ax25_dev_put(ax25_dev);
dev->ax25_ptr = NULL;
dev_put_track(dev, &ax25_dev->dev_tracker);
- kfree(ax25_dev);
+ ax25_dev_put(ax25_dev);
return;
}
@@ -123,9 +127,10 @@ void ax25_dev_device_down(struct net_device *dev)
if (s->next == ax25_dev) {
s->next = ax25_dev->next;
spin_unlock_bh(&ax25_dev_lock);
+ ax25_dev_put(ax25_dev);
dev->ax25_ptr = NULL;
dev_put_track(dev, &ax25_dev->dev_tracker);
- kfree(ax25_dev);
+ ax25_dev_put(ax25_dev);
return;
}
@@ -133,6 +138,7 @@ void ax25_dev_device_down(struct net_device *dev)
}
spin_unlock_bh(&ax25_dev_lock);
dev->ax25_ptr = NULL;
+ ax25_dev_put(ax25_dev);
}
int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd)
@@ -144,20 +150,32 @@ int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd)
switch (cmd) {
case SIOCAX25ADDFWD:
- if ((fwd_dev = ax25_addr_ax25dev(&fwd->port_to)) == NULL)
+ fwd_dev = ax25_addr_ax25dev(&fwd->port_to);
+ if (!fwd_dev) {
+ ax25_dev_put(ax25_dev);
return -EINVAL;
- if (ax25_dev->forward != NULL)
+ }
+ if (ax25_dev->forward) {
+ ax25_dev_put(fwd_dev);
+ ax25_dev_put(ax25_dev);
return -EINVAL;
+ }
ax25_dev->forward = fwd_dev->dev;
+ ax25_dev_put(fwd_dev);
+ ax25_dev_put(ax25_dev);
break;
case SIOCAX25DELFWD:
- if (ax25_dev->forward == NULL)
+ if (!ax25_dev->forward) {
+ ax25_dev_put(ax25_dev);
return -EINVAL;
+ }
ax25_dev->forward = NULL;
+ ax25_dev_put(ax25_dev);
break;
default:
+ ax25_dev_put(ax25_dev);
return -EINVAL;
}
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index d0b2e094bd55..9751207f7757 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -75,11 +75,13 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
ax25_dev *ax25_dev;
int i;
- if ((ax25_dev = ax25_addr_ax25dev(&route->port_addr)) == NULL)
- return -EINVAL;
if (route->digi_count > AX25_MAX_DIGIS)
return -EINVAL;
+ ax25_dev = ax25_addr_ax25dev(&route->port_addr);
+ if (!ax25_dev)
+ return -EINVAL;
+
write_lock_bh(&ax25_route_lock);
ax25_rt = ax25_route_list;
@@ -91,6 +93,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
if (route->digi_count != 0) {
if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
write_unlock_bh(&ax25_route_lock);
+ ax25_dev_put(ax25_dev);
return -ENOMEM;
}
ax25_rt->digipeat->lastrepeat = -1;
@@ -101,6 +104,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
}
}
write_unlock_bh(&ax25_route_lock);
+ ax25_dev_put(ax25_dev);
return 0;
}
ax25_rt = ax25_rt->next;
@@ -108,6 +112,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
if ((ax25_rt = kmalloc(sizeof(ax25_route), GFP_ATOMIC)) == NULL) {
write_unlock_bh(&ax25_route_lock);
+ ax25_dev_put(ax25_dev);
return -ENOMEM;
}
@@ -120,6 +125,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
write_unlock_bh(&ax25_route_lock);
kfree(ax25_rt);
+ ax25_dev_put(ax25_dev);
return -ENOMEM;
}
ax25_rt->digipeat->lastrepeat = -1;
@@ -132,6 +138,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route)
ax25_rt->next = ax25_route_list;
ax25_route_list = ax25_rt;
write_unlock_bh(&ax25_route_lock);
+ ax25_dev_put(ax25_dev);
return 0;
}
@@ -173,6 +180,7 @@ static int ax25_rt_del(struct ax25_routes_struct *route)
}
}
write_unlock_bh(&ax25_route_lock);
+ ax25_dev_put(ax25_dev);
return 0;
}
@@ -215,6 +223,7 @@ static int ax25_rt_opt(struct ax25_route_opt_struct *rt_option)
out:
write_unlock_bh(&ax25_route_lock);
+ ax25_dev_put(ax25_dev);
return err;
}
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 8a2b78f9c4b2..35fadb924849 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -149,22 +149,25 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
struct net *net = dev_net(net_dev);
struct net_device *parent_dev;
struct net *parent_net;
+ int iflink;
bool ret;
/* check if this is a batman-adv mesh interface */
if (batadv_softif_is_valid(net_dev))
return true;
- /* no more parents..stop recursion */
- if (dev_get_iflink(net_dev) == 0 ||
- dev_get_iflink(net_dev) == net_dev->ifindex)
+ iflink = dev_get_iflink(net_dev);
+ if (iflink == 0)
return false;
parent_net = batadv_getlink_net(net_dev, net);
+ /* iflink to itself, most likely physical device */
+ if (net == parent_net && iflink == net_dev->ifindex)
+ return false;
+
/* recurse over the parent device */
- parent_dev = __dev_get_by_index((struct net *)parent_net,
- dev_get_iflink(net_dev));
+ parent_dev = __dev_get_by_index((struct net *)parent_net, iflink);
/* if we got a NULL parent_dev there is something broken.. */
if (!parent_dev) {
pr_err("Cannot find parent device\n");
@@ -214,14 +217,15 @@ static struct net_device *batadv_get_real_netdevice(struct net_device *netdev)
struct net_device *real_netdev = NULL;
struct net *real_net;
struct net *net;
- int ifindex;
+ int iflink;
ASSERT_RTNL();
if (!netdev)
return NULL;
- if (netdev->ifindex == dev_get_iflink(netdev)) {
+ iflink = dev_get_iflink(netdev);
+ if (iflink == 0) {
dev_hold(netdev);
return netdev;
}
@@ -231,9 +235,16 @@ static struct net_device *batadv_get_real_netdevice(struct net_device *netdev)
goto out;
net = dev_net(hard_iface->soft_iface);
- ifindex = dev_get_iflink(netdev);
real_net = batadv_getlink_net(netdev, net);
- real_netdev = dev_get_by_index(real_net, ifindex);
+
+ /* iflink to itself, most likely physical device */
+ if (net == real_net && netdev->ifindex == iflink) {
+ real_netdev = netdev;
+ dev_hold(real_netdev);
+ goto out;
+ }
+
+ real_netdev = dev_get_by_index(real_net, iflink);
out:
batadv_hardif_put(hard_iface);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 2b7bd3655b07..2882bc7d79d7 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -2738,6 +2738,7 @@ void hci_release_dev(struct hci_dev *hdev)
hci_dev_unlock(hdev);
ida_simple_remove(&hci_index_ida, hdev->id);
+ kfree_skb(hdev->sent_cmd);
kfree(hdev);
}
EXPORT_SYMBOL(hci_release_dev);
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 0feb68f12545..9ba2a1a7d481 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -1841,6 +1841,7 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
struct bdaddr_list *b, *t;
u8 num_entries = 0;
bool pend_conn, pend_report;
+ u8 filter_policy;
int err;
/* Pause advertising if resolving list can be used as controllers are
@@ -1927,6 +1928,8 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
err = -EINVAL;
done:
+ filter_policy = err ? 0x00 : 0x01;
+
/* Enable address resolution when LL Privacy is enabled. */
err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
if (err)
@@ -1937,7 +1940,7 @@ done:
hci_resume_advertising_sync(hdev);
/* Select filter policy to use accept list */
- return err ? 0x00 : 0x01;
+ return filter_policy;
}
/* Returns true if an le connection is in the scanning state */
@@ -3262,10 +3265,10 @@ static int hci_le_set_event_mask_sync(struct hci_dev *hdev)
if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
events[0] |= 0x40; /* LE Data Length Change */
- /* If the controller supports LL Privacy feature, enable
- * the corresponding event.
+ /* If the controller supports LL Privacy feature or LE Extended Adv,
+ * enable the corresponding event.
*/
- if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
+ if (use_enhanced_conn_complete(hdev))
events[1] |= 0x02; /* LE Enhanced Connection Complete */
/* If the controller supports Extended Scanner Filter
@@ -4106,9 +4109,9 @@ int hci_dev_close_sync(struct hci_dev *hdev)
hci_inquiry_cache_flush(hdev);
hci_pend_le_actions_clear(hdev);
hci_conn_hash_flush(hdev);
- hci_dev_unlock(hdev);
-
+ /* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */
smp_unregister(hdev);
+ hci_dev_unlock(hdev);
hci_sock_dev_event(hdev, HCI_DEV_DOWN);
@@ -5185,7 +5188,7 @@ int hci_le_ext_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN,
plen, data,
HCI_EV_LE_ENHANCED_CONN_COMPLETE,
- HCI_CMD_TIMEOUT, NULL);
+ conn->conn_timeout, NULL);
}
int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn)
@@ -5270,9 +5273,18 @@ int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn)
cp.min_ce_len = cpu_to_le16(0x0000);
cp.max_ce_len = cpu_to_le16(0x0000);
+ /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261:
+ *
+ * If this event is unmasked and the HCI_LE_Connection_Complete event
+ * is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is
+ * sent when a new connection has been created.
+ */
err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN,
- sizeof(cp), &cp, HCI_EV_LE_CONN_COMPLETE,
- HCI_CMD_TIMEOUT, NULL);
+ sizeof(cp), &cp,
+ use_enhanced_conn_complete(hdev) ?
+ HCI_EV_LE_ENHANCED_CONN_COMPLETE :
+ HCI_EV_LE_CONN_COMPLETE,
+ conn->conn_timeout, NULL);
done:
/* Re-enable advertising after the connection attempt is finished. */
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 37087cf7dc5a..533cf60673a3 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -1218,7 +1218,13 @@ static int new_settings(struct hci_dev *hdev, struct sock *skip)
static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
{
struct mgmt_pending_cmd *cmd = data;
- struct mgmt_mode *cp = cmd->param;
+ struct mgmt_mode *cp;
+
+ /* Make sure cmd still outstanding. */
+ if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
+ return;
+
+ cp = cmd->param;
bt_dev_dbg(hdev, "err %d", err);
@@ -1242,7 +1248,7 @@ static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
mgmt_status(err));
}
- mgmt_pending_free(cmd);
+ mgmt_pending_remove(cmd);
}
static int set_powered_sync(struct hci_dev *hdev, void *data)
@@ -1281,7 +1287,7 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- cmd = mgmt_pending_new(sk, MGMT_OP_SET_POWERED, hdev, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -1290,6 +1296,9 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
mgmt_set_powered_complete);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
failed:
hci_dev_unlock(hdev);
return err;
@@ -1383,6 +1392,10 @@ static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "err %d", err);
+ /* Make sure cmd still outstanding. */
+ if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
+ return;
+
hci_dev_lock(hdev);
if (err) {
@@ -1402,7 +1415,7 @@ static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
new_settings(hdev, cmd->sk);
done:
- mgmt_pending_free(cmd);
+ mgmt_pending_remove(cmd);
hci_dev_unlock(hdev);
}
@@ -1511,7 +1524,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- cmd = mgmt_pending_new(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -1538,6 +1551,9 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
mgmt_set_discoverable_complete);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
failed:
hci_dev_unlock(hdev);
return err;
@@ -1550,6 +1566,10 @@ static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "err %d", err);
+ /* Make sure cmd still outstanding. */
+ if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
+ return;
+
hci_dev_lock(hdev);
if (err) {
@@ -1562,7 +1582,9 @@ static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
new_settings(hdev, cmd->sk);
done:
- mgmt_pending_free(cmd);
+ if (cmd)
+ mgmt_pending_remove(cmd);
+
hci_dev_unlock(hdev);
}
@@ -1634,7 +1656,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- cmd = mgmt_pending_new(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -1654,6 +1676,9 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
mgmt_set_connectable_complete);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
failed:
hci_dev_unlock(hdev);
return err;
@@ -1774,6 +1799,10 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
u8 enable = cp->val;
bool changed;
+ /* Make sure cmd still outstanding. */
+ if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
+ return;
+
if (err) {
u8 mgmt_err = mgmt_status(err);
@@ -3321,6 +3350,9 @@ static void set_name_complete(struct hci_dev *hdev, void *data, int err)
bt_dev_dbg(hdev, "err %d", err);
+ if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
+ return;
+
if (status) {
mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
status);
@@ -3493,6 +3525,9 @@ static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
struct sk_buff *skb = cmd->skb;
u8 status = mgmt_status(err);
+ if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
+ return;
+
if (!status) {
if (!skb)
status = MGMT_STATUS_FAILED;
@@ -3759,13 +3794,6 @@ static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
hci_dev_lock(hdev);
- if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
- err = mgmt_cmd_status(sk, hdev->id,
- MGMT_OP_SET_WIDEBAND_SPEECH,
- MGMT_STATUS_BUSY);
- goto unlock;
- }
-
if (hdev_is_powered(hdev) &&
!!cp->val != hci_dev_test_flag(hdev,
HCI_WIDEBAND_SPEECH_ENABLED)) {
@@ -5036,12 +5064,6 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
- if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
- MGMT_STATUS_BUSY);
- goto unlock;
- }
-
cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
if (!cmd)
err = -ENOMEM;
@@ -5261,11 +5283,16 @@ static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
{
struct mgmt_pending_cmd *cmd = data;
+ if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
+ cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
+ cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
+ return;
+
bt_dev_dbg(hdev, "err %d", err);
mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
cmd->param, 1);
- mgmt_pending_free(cmd);
+ mgmt_pending_remove(cmd);
hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
DISCOVERY_FINDING);
@@ -5327,7 +5354,7 @@ static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
else
hdev->discovery.limited = false;
- cmd = mgmt_pending_new(sk, op, hdev, data, len);
+ cmd = mgmt_pending_add(sk, op, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -5336,7 +5363,7 @@ static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
start_discovery_complete);
if (err < 0) {
- mgmt_pending_free(cmd);
+ mgmt_pending_remove(cmd);
goto failed;
}
@@ -5430,7 +5457,7 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
goto failed;
}
- cmd = mgmt_pending_new(sk, MGMT_OP_START_SERVICE_DISCOVERY,
+ cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
hdev, data, len);
if (!cmd) {
err = -ENOMEM;
@@ -5463,7 +5490,7 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
start_discovery_complete);
if (err < 0) {
- mgmt_pending_free(cmd);
+ mgmt_pending_remove(cmd);
goto failed;
}
@@ -5495,11 +5522,14 @@ static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
{
struct mgmt_pending_cmd *cmd = data;
+ if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
+ return;
+
bt_dev_dbg(hdev, "err %d", err);
mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
cmd->param, 1);
- mgmt_pending_free(cmd);
+ mgmt_pending_remove(cmd);
if (!err)
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
@@ -5535,7 +5565,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- cmd = mgmt_pending_new(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
+ cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto unlock;
@@ -5544,7 +5574,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
stop_discovery_complete);
if (err < 0) {
- mgmt_pending_free(cmd);
+ mgmt_pending_remove(cmd);
goto unlock;
}
@@ -7474,6 +7504,9 @@ static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
u8 status = mgmt_status(err);
u16 eir_len;
+ if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
+ return;
+
if (!status) {
if (!skb)
status = MGMT_STATUS_FAILED;
@@ -7969,11 +8002,7 @@ static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
static bool adv_busy(struct hci_dev *hdev)
{
- return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
- pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
- pending_find(MGMT_OP_SET_LE, hdev) ||
- pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
- pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
+ return pending_find(MGMT_OP_SET_LE, hdev);
}
static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
@@ -8563,9 +8592,7 @@ static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
- if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
- pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
- pending_find(MGMT_OP_SET_LE, hdev)) {
+ if (pending_find(MGMT_OP_SET_LE, hdev)) {
err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
MGMT_STATUS_BUSY);
goto unlock;
diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c
index edee60bbc7b4..37eef2ce55ae 100644
--- a/net/bluetooth/mgmt_util.c
+++ b/net/bluetooth/mgmt_util.c
@@ -77,11 +77,12 @@ int mgmt_send_event_skb(unsigned short channel, struct sk_buff *skb, int flag,
{
struct hci_dev *hdev;
struct mgmt_hdr *hdr;
- int len = skb->len;
+ int len;
if (!skb)
return -EINVAL;
+ len = skb->len;
hdev = bt_cb(skb)->mgmt.hdev;
/* Time stamp */
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index de2409889489..db4f2641d1cd 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -82,6 +82,9 @@ static void br_multicast_find_del_pg(struct net_bridge *br,
struct net_bridge_port_group *pg);
static void __br_multicast_stop(struct net_bridge_mcast *brmctx);
+static int br_mc_disabled_update(struct net_device *dev, bool value,
+ struct netlink_ext_ack *extack);
+
static struct net_bridge_port_group *
br_sg_port_find(struct net_bridge *br,
struct net_bridge_port_group_sg_key *sg_p)
@@ -1156,6 +1159,7 @@ struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
return mp;
if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
+ br_mc_disabled_update(br->dev, false, NULL);
br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
return ERR_PTR(-E2BIG);
}
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index eba0efe64d05..fbf858ddec35 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -49,7 +49,7 @@ static void nft_reject_br_send_v4_tcp_reset(struct net *net,
{
struct sk_buff *nskb;
- nskb = nf_reject_skb_v4_tcp_reset(net, oldskb, dev, hook);
+ nskb = nf_reject_skb_v4_tcp_reset(net, oldskb, NULL, hook);
if (!nskb)
return;
@@ -65,7 +65,7 @@ static void nft_reject_br_send_v4_unreach(struct net *net,
{
struct sk_buff *nskb;
- nskb = nf_reject_skb_v4_unreach(net, oldskb, dev, hook, code);
+ nskb = nf_reject_skb_v4_unreach(net, oldskb, NULL, hook, code);
if (!nskb)
return;
@@ -81,7 +81,7 @@ static void nft_reject_br_send_v6_tcp_reset(struct net *net,
{
struct sk_buff *nskb;
- nskb = nf_reject_skb_v6_tcp_reset(net, oldskb, dev, hook);
+ nskb = nf_reject_skb_v6_tcp_reset(net, oldskb, NULL, hook);
if (!nskb)
return;
@@ -98,7 +98,7 @@ static void nft_reject_br_send_v6_unreach(struct net *net,
{
struct sk_buff *nskb;
- nskb = nf_reject_skb_v6_unreach(net, oldskb, dev, hook, code);
+ nskb = nf_reject_skb_v6_unreach(net, oldskb, NULL, hook, code);
if (!nskb)
return;
diff --git a/net/can/isotp.c b/net/can/isotp.c
index 02cbcb2ecf0d..d2a430b6a13b 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -56,6 +56,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/spinlock.h>
#include <linux/hrtimer.h>
#include <linux/wait.h>
#include <linux/uio.h>
@@ -145,6 +146,7 @@ struct isotp_sock {
struct tpcon rx, tx;
struct list_head notifier;
wait_queue_head_t wait;
+ spinlock_t rx_lock; /* protect single thread state machine */
};
static LIST_HEAD(isotp_notifier_list);
@@ -615,11 +617,17 @@ static void isotp_rcv(struct sk_buff *skb, void *data)
n_pci_type = cf->data[ae] & 0xF0;
+ /* Make sure the state changes and data structures stay consistent at
+ * CAN frame reception time. This locking is not needed in real world
+ * use cases but the inconsistency can be triggered with syzkaller.
+ */
+ spin_lock(&so->rx_lock);
+
if (so->opt.flags & CAN_ISOTP_HALF_DUPLEX) {
/* check rx/tx path half duplex expectations */
if ((so->tx.state != ISOTP_IDLE && n_pci_type != N_PCI_FC) ||
(so->rx.state != ISOTP_IDLE && n_pci_type == N_PCI_FC))
- return;
+ goto out_unlock;
}
switch (n_pci_type) {
@@ -668,6 +676,9 @@ static void isotp_rcv(struct sk_buff *skb, void *data)
isotp_rcv_cf(sk, cf, ae, skb);
break;
}
+
+out_unlock:
+ spin_unlock(&so->rx_lock);
}
static void isotp_fill_dataframe(struct canfd_frame *cf, struct isotp_sock *so,
@@ -876,7 +887,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
if (!size || size > MAX_MSG_LENGTH) {
err = -EINVAL;
- goto err_out;
+ goto err_out_drop;
}
/* take care of a potential SF_DL ESC offset for TX_DL > 8 */
@@ -886,24 +897,24 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
if ((so->opt.flags & CAN_ISOTP_SF_BROADCAST) &&
(size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off)) {
err = -EINVAL;
- goto err_out;
+ goto err_out_drop;
}
err = memcpy_from_msg(so->tx.buf, msg, size);
if (err < 0)
- goto err_out;
+ goto err_out_drop;
dev = dev_get_by_index(sock_net(sk), so->ifindex);
if (!dev) {
err = -ENXIO;
- goto err_out;
+ goto err_out_drop;
}
skb = sock_alloc_send_skb(sk, so->ll.mtu + sizeof(struct can_skb_priv),
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb) {
dev_put(dev);
- goto err_out;
+ goto err_out_drop;
}
can_skb_reserve(skb);
@@ -965,7 +976,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
if (err) {
pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
__func__, ERR_PTR(err));
- goto err_out;
+ goto err_out_drop;
}
if (wait_tx_done) {
@@ -978,6 +989,9 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
return size;
+err_out_drop:
+ /* drop this PDU and unlock a potential wait queue */
+ old_state = ISOTP_IDLE;
err_out:
so->tx.state = old_state;
if (so->tx.state == ISOTP_IDLE)
@@ -1444,6 +1458,7 @@ static int isotp_init(struct sock *sk)
so->txtimer.function = isotp_tx_timer_handler;
init_waitqueue_head(&so->wait);
+ spin_lock_init(&so->rx_lock);
spin_lock(&isotp_notifier_lock);
list_add_tail(&so->notifier, &isotp_notifier_list);
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index a271688780a2..307ee1174a6e 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -2006,7 +2006,7 @@ struct j1939_session *j1939_tp_send(struct j1939_priv *priv,
/* set the end-packet for broadcast */
session->pkt.last = session->pkt.total;
- skcb->tskey = session->sk->sk_tskey++;
+ skcb->tskey = atomic_inc_return(&session->sk->sk_tskey) - 1;
session->tskey = skcb->tskey;
return session;
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index ecc400a0b7bb..4c6441536d55 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -246,6 +246,7 @@ enum {
Opt_cephx_sign_messages,
Opt_tcp_nodelay,
Opt_abort_on_full,
+ Opt_rxbounce,
};
enum {
@@ -295,6 +296,7 @@ static const struct fs_parameter_spec ceph_parameters[] = {
fsparam_u32 ("osdkeepalive", Opt_osdkeepalivetimeout),
fsparam_enum ("read_from_replica", Opt_read_from_replica,
ceph_param_read_from_replica),
+ fsparam_flag ("rxbounce", Opt_rxbounce),
fsparam_enum ("ms_mode", Opt_ms_mode,
ceph_param_ms_mode),
fsparam_string ("secret", Opt_secret),
@@ -584,6 +586,9 @@ int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt,
case Opt_abort_on_full:
opt->flags |= CEPH_OPT_ABORT_ON_FULL;
break;
+ case Opt_rxbounce:
+ opt->flags |= CEPH_OPT_RXBOUNCE;
+ break;
default:
BUG();
@@ -660,6 +665,8 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
seq_puts(m, "notcp_nodelay,");
if (show_all && (opt->flags & CEPH_OPT_ABORT_ON_FULL))
seq_puts(m, "abort_on_full,");
+ if (opt->flags & CEPH_OPT_RXBOUNCE)
+ seq_puts(m, "rxbounce,");
if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
seq_printf(m, "mount_timeout=%d,",
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 45eba2dcb67a..d3bb656308b4 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -515,6 +515,10 @@ static void ceph_con_reset_protocol(struct ceph_connection *con)
ceph_msg_put(con->out_msg);
con->out_msg = NULL;
}
+ if (con->bounce_page) {
+ __free_page(con->bounce_page);
+ con->bounce_page = NULL;
+ }
if (ceph_msgr2(from_msgr(con->msgr)))
ceph_con_v2_reset_protocol(con);
diff --git a/net/ceph/messenger_v1.c b/net/ceph/messenger_v1.c
index 2cb5ffdf071a..6b014eca3a13 100644
--- a/net/ceph/messenger_v1.c
+++ b/net/ceph/messenger_v1.c
@@ -992,8 +992,7 @@ static int read_partial_message_section(struct ceph_connection *con,
static int read_partial_msg_data(struct ceph_connection *con)
{
- struct ceph_msg *msg = con->in_msg;
- struct ceph_msg_data_cursor *cursor = &msg->cursor;
+ struct ceph_msg_data_cursor *cursor = &con->in_msg->cursor;
bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
struct page *page;
size_t page_offset;
@@ -1001,9 +1000,6 @@ static int read_partial_msg_data(struct ceph_connection *con)
u32 crc = 0;
int ret;
- if (!msg->num_data_items)
- return -EIO;
-
if (do_datacrc)
crc = con->in_data_crc;
while (cursor->total_resid) {
@@ -1031,6 +1027,46 @@ static int read_partial_msg_data(struct ceph_connection *con)
return 1; /* must return > 0 to indicate success */
}
+static int read_partial_msg_data_bounce(struct ceph_connection *con)
+{
+ struct ceph_msg_data_cursor *cursor = &con->in_msg->cursor;
+ struct page *page;
+ size_t off, len;
+ u32 crc;
+ int ret;
+
+ if (unlikely(!con->bounce_page)) {
+ con->bounce_page = alloc_page(GFP_NOIO);
+ if (!con->bounce_page) {
+ pr_err("failed to allocate bounce page\n");
+ return -ENOMEM;
+ }
+ }
+
+ crc = con->in_data_crc;
+ while (cursor->total_resid) {
+ if (!cursor->resid) {
+ ceph_msg_data_advance(cursor, 0);
+ continue;
+ }
+
+ page = ceph_msg_data_next(cursor, &off, &len, NULL);
+ ret = ceph_tcp_recvpage(con->sock, con->bounce_page, 0, len);
+ if (ret <= 0) {
+ con->in_data_crc = crc;
+ return ret;
+ }
+
+ crc = crc32c(crc, page_address(con->bounce_page), ret);
+ memcpy_to_page(page, off, page_address(con->bounce_page), ret);
+
+ ceph_msg_data_advance(cursor, ret);
+ }
+ con->in_data_crc = crc;
+
+ return 1; /* must return > 0 to indicate success */
+}
+
/*
* read (part of) a message.
*/
@@ -1141,7 +1177,13 @@ static int read_partial_message(struct ceph_connection *con)
/* (page) data */
if (data_len) {
- ret = read_partial_msg_data(con);
+ if (!m->num_data_items)
+ return -EIO;
+
+ if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE))
+ ret = read_partial_msg_data_bounce(con);
+ else
+ ret = read_partial_msg_data(con);
if (ret <= 0)
return ret;
}
diff --git a/net/ceph/messenger_v2.c b/net/ceph/messenger_v2.c
index c4099b641b38..c81379f93ad5 100644
--- a/net/ceph/messenger_v2.c
+++ b/net/ceph/messenger_v2.c
@@ -57,8 +57,9 @@
#define IN_S_HANDLE_CONTROL_REMAINDER 3
#define IN_S_PREPARE_READ_DATA 4
#define IN_S_PREPARE_READ_DATA_CONT 5
-#define IN_S_HANDLE_EPILOGUE 6
-#define IN_S_FINISH_SKIP 7
+#define IN_S_PREPARE_READ_ENC_PAGE 6
+#define IN_S_HANDLE_EPILOGUE 7
+#define IN_S_FINISH_SKIP 8
#define OUT_S_QUEUE_DATA 1
#define OUT_S_QUEUE_DATA_CONT 2
@@ -1032,22 +1033,41 @@ static int decrypt_control_remainder(struct ceph_connection *con)
padded_len(rem_len) + CEPH_GCM_TAG_LEN);
}
-static int decrypt_message(struct ceph_connection *con)
+static int decrypt_tail(struct ceph_connection *con)
{
+ struct sg_table enc_sgt = {};
struct sg_table sgt = {};
+ int tail_len;
int ret;
+ tail_len = tail_onwire_len(con->in_msg, true);
+ ret = sg_alloc_table_from_pages(&enc_sgt, con->v2.in_enc_pages,
+ con->v2.in_enc_page_cnt, 0, tail_len,
+ GFP_NOIO);
+ if (ret)
+ goto out;
+
ret = setup_message_sgs(&sgt, con->in_msg, FRONT_PAD(con->v2.in_buf),
MIDDLE_PAD(con->v2.in_buf), DATA_PAD(con->v2.in_buf),
con->v2.in_buf, true);
if (ret)
goto out;
- ret = gcm_crypt(con, false, sgt.sgl, sgt.sgl,
- tail_onwire_len(con->in_msg, true));
+ dout("%s con %p msg %p enc_page_cnt %d sg_cnt %d\n", __func__, con,
+ con->in_msg, con->v2.in_enc_page_cnt, sgt.orig_nents);
+ ret = gcm_crypt(con, false, enc_sgt.sgl, sgt.sgl, tail_len);
+ if (ret)
+ goto out;
+
+ WARN_ON(!con->v2.in_enc_page_cnt);
+ ceph_release_page_vector(con->v2.in_enc_pages,
+ con->v2.in_enc_page_cnt);
+ con->v2.in_enc_pages = NULL;
+ con->v2.in_enc_page_cnt = 0;
out:
sg_free_table(&sgt);
+ sg_free_table(&enc_sgt);
return ret;
}
@@ -1733,54 +1753,157 @@ static int prepare_read_control_remainder(struct ceph_connection *con)
return 0;
}
-static void prepare_read_data(struct ceph_connection *con)
+static int prepare_read_data(struct ceph_connection *con)
{
struct bio_vec bv;
- if (!con_secure(con))
- con->in_data_crc = -1;
+ con->in_data_crc = -1;
ceph_msg_data_cursor_init(&con->v2.in_cursor, con->in_msg,
data_len(con->in_msg));
get_bvec_at(&con->v2.in_cursor, &bv);
- set_in_bvec(con, &bv);
+ if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
+ if (unlikely(!con->bounce_page)) {
+ con->bounce_page = alloc_page(GFP_NOIO);
+ if (!con->bounce_page) {
+ pr_err("failed to allocate bounce page\n");
+ return -ENOMEM;
+ }
+ }
+
+ bv.bv_page = con->bounce_page;
+ bv.bv_offset = 0;
+ set_in_bvec(con, &bv);
+ } else {
+ set_in_bvec(con, &bv);
+ }
con->v2.in_state = IN_S_PREPARE_READ_DATA_CONT;
+ return 0;
}
static void prepare_read_data_cont(struct ceph_connection *con)
{
struct bio_vec bv;
- if (!con_secure(con))
+ if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
+ con->in_data_crc = crc32c(con->in_data_crc,
+ page_address(con->bounce_page),
+ con->v2.in_bvec.bv_len);
+
+ get_bvec_at(&con->v2.in_cursor, &bv);
+ memcpy_to_page(bv.bv_page, bv.bv_offset,
+ page_address(con->bounce_page),
+ con->v2.in_bvec.bv_len);
+ } else {
con->in_data_crc = ceph_crc32c_page(con->in_data_crc,
con->v2.in_bvec.bv_page,
con->v2.in_bvec.bv_offset,
con->v2.in_bvec.bv_len);
+ }
ceph_msg_data_advance(&con->v2.in_cursor, con->v2.in_bvec.bv_len);
if (con->v2.in_cursor.total_resid) {
get_bvec_at(&con->v2.in_cursor, &bv);
- set_in_bvec(con, &bv);
+ if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
+ bv.bv_page = con->bounce_page;
+ bv.bv_offset = 0;
+ set_in_bvec(con, &bv);
+ } else {
+ set_in_bvec(con, &bv);
+ }
WARN_ON(con->v2.in_state != IN_S_PREPARE_READ_DATA_CONT);
return;
}
/*
- * We've read all data. Prepare to read data padding (if any)
- * and epilogue.
+ * We've read all data. Prepare to read epilogue.
*/
reset_in_kvecs(con);
- if (con_secure(con)) {
- if (need_padding(data_len(con->in_msg)))
- add_in_kvec(con, DATA_PAD(con->v2.in_buf),
- padding_len(data_len(con->in_msg)));
- add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_SECURE_LEN);
+ add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
+ con->v2.in_state = IN_S_HANDLE_EPILOGUE;
+}
+
+static int prepare_read_tail_plain(struct ceph_connection *con)
+{
+ struct ceph_msg *msg = con->in_msg;
+
+ if (!front_len(msg) && !middle_len(msg)) {
+ WARN_ON(!data_len(msg));
+ return prepare_read_data(con);
+ }
+
+ reset_in_kvecs(con);
+ if (front_len(msg)) {
+ add_in_kvec(con, msg->front.iov_base, front_len(msg));
+ WARN_ON(msg->front.iov_len != front_len(msg));
+ }
+ if (middle_len(msg)) {
+ add_in_kvec(con, msg->middle->vec.iov_base, middle_len(msg));
+ WARN_ON(msg->middle->vec.iov_len != middle_len(msg));
+ }
+
+ if (data_len(msg)) {
+ con->v2.in_state = IN_S_PREPARE_READ_DATA;
} else {
add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
+ con->v2.in_state = IN_S_HANDLE_EPILOGUE;
}
+ return 0;
+}
+
+static void prepare_read_enc_page(struct ceph_connection *con)
+{
+ struct bio_vec bv;
+
+ dout("%s con %p i %d resid %d\n", __func__, con, con->v2.in_enc_i,
+ con->v2.in_enc_resid);
+ WARN_ON(!con->v2.in_enc_resid);
+
+ bv.bv_page = con->v2.in_enc_pages[con->v2.in_enc_i];
+ bv.bv_offset = 0;
+ bv.bv_len = min(con->v2.in_enc_resid, (int)PAGE_SIZE);
+
+ set_in_bvec(con, &bv);
+ con->v2.in_enc_i++;
+ con->v2.in_enc_resid -= bv.bv_len;
+
+ if (con->v2.in_enc_resid) {
+ con->v2.in_state = IN_S_PREPARE_READ_ENC_PAGE;
+ return;
+ }
+
+ /*
+ * We are set to read the last piece of ciphertext (ending
+ * with epilogue) + auth tag.
+ */
+ WARN_ON(con->v2.in_enc_i != con->v2.in_enc_page_cnt);
con->v2.in_state = IN_S_HANDLE_EPILOGUE;
}
+static int prepare_read_tail_secure(struct ceph_connection *con)
+{
+ struct page **enc_pages;
+ int enc_page_cnt;
+ int tail_len;
+
+ tail_len = tail_onwire_len(con->in_msg, true);
+ WARN_ON(!tail_len);
+
+ enc_page_cnt = calc_pages_for(0, tail_len);
+ enc_pages = ceph_alloc_page_vector(enc_page_cnt, GFP_NOIO);
+ if (IS_ERR(enc_pages))
+ return PTR_ERR(enc_pages);
+
+ WARN_ON(con->v2.in_enc_pages || con->v2.in_enc_page_cnt);
+ con->v2.in_enc_pages = enc_pages;
+ con->v2.in_enc_page_cnt = enc_page_cnt;
+ con->v2.in_enc_resid = tail_len;
+ con->v2.in_enc_i = 0;
+
+ prepare_read_enc_page(con);
+ return 0;
+}
+
static void __finish_skip(struct ceph_connection *con)
{
con->in_seq++;
@@ -2589,47 +2712,26 @@ static int __handle_control(struct ceph_connection *con, void *p)
}
msg = con->in_msg; /* set in process_message_header() */
- if (!front_len(msg) && !middle_len(msg)) {
- if (!data_len(msg))
- return process_message(con);
-
- prepare_read_data(con);
- return 0;
- }
-
- reset_in_kvecs(con);
if (front_len(msg)) {
WARN_ON(front_len(msg) > msg->front_alloc_len);
- add_in_kvec(con, msg->front.iov_base, front_len(msg));
msg->front.iov_len = front_len(msg);
-
- if (con_secure(con) && need_padding(front_len(msg)))
- add_in_kvec(con, FRONT_PAD(con->v2.in_buf),
- padding_len(front_len(msg)));
} else {
msg->front.iov_len = 0;
}
if (middle_len(msg)) {
WARN_ON(middle_len(msg) > msg->middle->alloc_len);
- add_in_kvec(con, msg->middle->vec.iov_base, middle_len(msg));
msg->middle->vec.iov_len = middle_len(msg);
-
- if (con_secure(con) && need_padding(middle_len(msg)))
- add_in_kvec(con, MIDDLE_PAD(con->v2.in_buf),
- padding_len(middle_len(msg)));
} else if (msg->middle) {
msg->middle->vec.iov_len = 0;
}
- if (data_len(msg)) {
- con->v2.in_state = IN_S_PREPARE_READ_DATA;
- } else {
- add_in_kvec(con, con->v2.in_buf,
- con_secure(con) ? CEPH_EPILOGUE_SECURE_LEN :
- CEPH_EPILOGUE_PLAIN_LEN);
- con->v2.in_state = IN_S_HANDLE_EPILOGUE;
- }
- return 0;
+ if (!front_len(msg) && !middle_len(msg) && !data_len(msg))
+ return process_message(con);
+
+ if (con_secure(con))
+ return prepare_read_tail_secure(con);
+
+ return prepare_read_tail_plain(con);
}
static int handle_preamble(struct ceph_connection *con)
@@ -2717,7 +2819,7 @@ static int handle_epilogue(struct ceph_connection *con)
int ret;
if (con_secure(con)) {
- ret = decrypt_message(con);
+ ret = decrypt_tail(con);
if (ret) {
if (ret == -EBADMSG)
con->error_msg = "integrity error, bad epilogue auth tag";
@@ -2785,13 +2887,16 @@ static int populate_in_iter(struct ceph_connection *con)
ret = handle_control_remainder(con);
break;
case IN_S_PREPARE_READ_DATA:
- prepare_read_data(con);
- ret = 0;
+ ret = prepare_read_data(con);
break;
case IN_S_PREPARE_READ_DATA_CONT:
prepare_read_data_cont(con);
ret = 0;
break;
+ case IN_S_PREPARE_READ_ENC_PAGE:
+ prepare_read_enc_page(con);
+ ret = 0;
+ break;
case IN_S_HANDLE_EPILOGUE:
ret = handle_epilogue(con);
break;
@@ -3326,20 +3431,16 @@ void ceph_con_v2_revoke(struct ceph_connection *con)
static void revoke_at_prepare_read_data(struct ceph_connection *con)
{
- int remaining; /* data + [data padding] + epilogue */
+ int remaining;
int resid;
+ WARN_ON(con_secure(con));
WARN_ON(!data_len(con->in_msg));
WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter));
resid = iov_iter_count(&con->v2.in_iter);
WARN_ON(!resid);
- if (con_secure(con))
- remaining = padded_len(data_len(con->in_msg)) +
- CEPH_EPILOGUE_SECURE_LEN;
- else
- remaining = data_len(con->in_msg) + CEPH_EPILOGUE_PLAIN_LEN;
-
+ remaining = data_len(con->in_msg) + CEPH_EPILOGUE_PLAIN_LEN;
dout("%s con %p resid %d remaining %d\n", __func__, con, resid,
remaining);
con->v2.in_iter.count -= resid;
@@ -3350,8 +3451,9 @@ static void revoke_at_prepare_read_data(struct ceph_connection *con)
static void revoke_at_prepare_read_data_cont(struct ceph_connection *con)
{
int recved, resid; /* current piece of data */
- int remaining; /* [data padding] + epilogue */
+ int remaining;
+ WARN_ON(con_secure(con));
WARN_ON(!data_len(con->in_msg));
WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
resid = iov_iter_count(&con->v2.in_iter);
@@ -3363,12 +3465,7 @@ static void revoke_at_prepare_read_data_cont(struct ceph_connection *con)
ceph_msg_data_advance(&con->v2.in_cursor, recved);
WARN_ON(resid > con->v2.in_cursor.total_resid);
- if (con_secure(con))
- remaining = padding_len(data_len(con->in_msg)) +
- CEPH_EPILOGUE_SECURE_LEN;
- else
- remaining = CEPH_EPILOGUE_PLAIN_LEN;
-
+ remaining = CEPH_EPILOGUE_PLAIN_LEN;
dout("%s con %p total_resid %zu remaining %d\n", __func__, con,
con->v2.in_cursor.total_resid, remaining);
con->v2.in_iter.count -= resid;
@@ -3376,11 +3473,26 @@ static void revoke_at_prepare_read_data_cont(struct ceph_connection *con)
con->v2.in_state = IN_S_FINISH_SKIP;
}
+static void revoke_at_prepare_read_enc_page(struct ceph_connection *con)
+{
+ int resid; /* current enc page (not necessarily data) */
+
+ WARN_ON(!con_secure(con));
+ WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
+ resid = iov_iter_count(&con->v2.in_iter);
+ WARN_ON(!resid || resid > con->v2.in_bvec.bv_len);
+
+ dout("%s con %p resid %d enc_resid %d\n", __func__, con, resid,
+ con->v2.in_enc_resid);
+ con->v2.in_iter.count -= resid;
+ set_in_skip(con, resid + con->v2.in_enc_resid);
+ con->v2.in_state = IN_S_FINISH_SKIP;
+}
+
static void revoke_at_handle_epilogue(struct ceph_connection *con)
{
int resid;
- WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter));
resid = iov_iter_count(&con->v2.in_iter);
WARN_ON(!resid);
@@ -3399,6 +3511,9 @@ void ceph_con_v2_revoke_incoming(struct ceph_connection *con)
case IN_S_PREPARE_READ_DATA_CONT:
revoke_at_prepare_read_data_cont(con);
break;
+ case IN_S_PREPARE_READ_ENC_PAGE:
+ revoke_at_prepare_read_enc_page(con);
+ break;
case IN_S_HANDLE_EPILOGUE:
revoke_at_handle_epilogue(con);
break;
@@ -3432,6 +3547,13 @@ void ceph_con_v2_reset_protocol(struct ceph_connection *con)
clear_out_sign_kvecs(con);
free_conn_bufs(con);
+ if (con->v2.in_enc_pages) {
+ WARN_ON(!con->v2.in_enc_page_cnt);
+ ceph_release_page_vector(con->v2.in_enc_pages,
+ con->v2.in_enc_page_cnt);
+ con->v2.in_enc_pages = NULL;
+ con->v2.in_enc_page_cnt = 0;
+ }
if (con->v2.out_enc_pages) {
WARN_ON(!con->v2.out_enc_page_cnt);
ceph_release_page_vector(con->v2.out_enc_pages,
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 7b288a121a41..d5dc6be2522c 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -283,13 +283,17 @@ static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi,
rcu_read_lock();
list_for_each_entry_rcu(new_stat, &hw_stats_list, list) {
+ struct net_device *dev;
+
/*
* only add a note to our monitor buffer if:
* 1) this is the dev we received on
* 2) its after the last_rx delta
* 3) our rx_dropped count has gone up
*/
- if ((new_stat->dev == napi->dev) &&
+ /* Paired with WRITE_ONCE() in dropmon_net_event() */
+ dev = READ_ONCE(new_stat->dev);
+ if ((dev == napi->dev) &&
(time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) &&
(napi->dev->stats.rx_dropped != new_stat->last_drop_val)) {
trace_drop_common(NULL, NULL);
@@ -1576,7 +1580,10 @@ static int dropmon_net_event(struct notifier_block *ev_block,
mutex_lock(&net_dm_mutex);
list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
if (new_stat->dev == dev) {
- new_stat->dev = NULL;
+
+ /* Paired with READ_ONCE() in trace_napi_poll_hit() */
+ WRITE_ONCE(new_stat->dev, NULL);
+
if (trace_state == TRACE_OFF) {
list_del_rcu(&new_stat->list);
kfree_rcu(new_stat, rcu);
diff --git a/net/core/filter.c b/net/core/filter.c
index 4603b7cd3cd1..9eb785842258 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2710,6 +2710,9 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
if (unlikely(flags))
return -EINVAL;
+ if (unlikely(len == 0))
+ return 0;
+
/* First find the starting scatterlist element */
i = msg->sg.start;
do {
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 6c2016f7f3d1..ec0bf737b076 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1133,7 +1133,8 @@ out:
neigh_release(neigh);
}
-int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
+int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
+ const bool immediate_ok)
{
int rc;
bool immediate_probe = false;
@@ -1154,12 +1155,17 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
atomic_set(&neigh->probes,
NEIGH_VAR(neigh->parms, UCAST_PROBES));
neigh_del_timer(neigh);
- neigh->nud_state = NUD_INCOMPLETE;
+ neigh->nud_state = NUD_INCOMPLETE;
neigh->updated = now;
- next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
- HZ/100);
+ if (!immediate_ok) {
+ next = now + 1;
+ } else {
+ immediate_probe = true;
+ next = now + max(NEIGH_VAR(neigh->parms,
+ RETRANS_TIME),
+ HZ / 100);
+ }
neigh_add_timer(neigh, next);
- immediate_probe = true;
} else {
neigh->nud_state = NUD_FAILED;
neigh->updated = jiffies;
@@ -1571,7 +1577,7 @@ static void neigh_managed_work(struct work_struct *work)
write_lock_bh(&tbl->lock);
list_for_each_entry(neigh, &tbl->managed_list, managed_list)
- neigh_event_send(neigh, NULL);
+ neigh_event_send_probe(neigh, NULL, false);
queue_delayed_work(system_power_efficient_wq, &tbl->managed_work,
NEIGH_VAR(&tbl->parms, DELAY_PROBE_TIME));
write_unlock_bh(&tbl->lock);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 53ea262ecafd..fbddf966206b 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -213,7 +213,7 @@ static ssize_t speed_show(struct device *dev,
if (!rtnl_trylock())
return restart_syscall();
- if (netif_running(netdev)) {
+ if (netif_running(netdev) && netif_device_present(netdev)) {
struct ethtool_link_ksettings cmd;
if (!__ethtool_get_link_ksettings(netdev, &cmd))
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index e476403231f0..2fb8eb6791e8 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1699,6 +1699,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
{
struct ifinfomsg *ifm;
struct nlmsghdr *nlh;
+ struct Qdisc *qdisc;
ASSERT_RTNL();
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
@@ -1716,6 +1717,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
goto nla_put_failure;
+ qdisc = rtnl_dereference(dev->qdisc);
if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
nla_put_u8(skb, IFLA_OPERSTATE,
@@ -1735,8 +1737,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
#endif
put_master_ifindex(skb, dev) ||
nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
- (dev->qdisc &&
- nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
+ (qdisc &&
+ nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) ||
nla_put_ifalias(skb, dev) ||
nla_put_u32(skb, IFLA_CARRIER_CHANGES,
atomic_read(&dev->carrier_up_count) +
@@ -3275,8 +3277,8 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
unsigned char name_assign_type = NET_NAME_USER;
struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
- const struct rtnl_link_ops *m_ops = NULL;
- struct net_device *master_dev = NULL;
+ const struct rtnl_link_ops *m_ops;
+ struct net_device *master_dev;
struct net *net = sock_net(skb->sk);
const struct rtnl_link_ops *ops;
struct nlattr *tb[IFLA_MAX + 1];
@@ -3314,6 +3316,8 @@ replay:
else
dev = NULL;
+ master_dev = NULL;
+ m_ops = NULL;
if (dev) {
master_dev = netdev_master_upper_dev_get(dev);
if (master_dev)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 0118f0afaa4f..ea51e23e9247 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -681,7 +681,7 @@ exit:
* while trying to recycle fragments on __skb_frag_unref() we need
* to make one SKB responsible for triggering the recycle path.
* So disable the recycling bit if an SKB is cloned and we have
- * additional references to to the fragmented part of the SKB.
+ * additional references to the fragmented part of the SKB.
* Eventually the last SKB will have the recycling bit set and it's
* dataref set to 0, which will trigger the recycling
*/
@@ -2276,7 +2276,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
/* Free pulled out fragments. */
while ((list = skb_shinfo(skb)->frag_list) != insp) {
skb_shinfo(skb)->frag_list = list->next;
- kfree_skb(list);
+ consume_skb(list);
}
/* And insert new clone at head. */
if (clone) {
@@ -3876,6 +3876,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
list_skb = list_skb->next;
err = 0;
+ delta_truesize += nskb->truesize;
if (skb_shared(nskb)) {
tmp = skb_clone(nskb, GFP_ATOMIC);
if (tmp) {
@@ -3900,7 +3901,6 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
tail = nskb;
delta_len += nskb->len;
- delta_truesize += nskb->truesize;
skb_push(nskb, -skb_network_offset(nskb) + offset);
@@ -4730,7 +4730,7 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
serr->ee.ee_data = skb_shinfo(skb)->tskey;
if (sk_is_tcp(sk))
- serr->ee.ee_data -= sk->sk_tskey;
+ serr->ee.ee_data -= atomic_read(&sk->sk_tskey);
}
err = sock_queue_err_skb(sk, skb);
@@ -6105,7 +6105,7 @@ static int pskb_carve_frag_list(struct sk_buff *skb,
/* Free pulled out fragments. */
while ((list = shinfo->frag_list) != insp) {
shinfo->frag_list = list->next;
- kfree_skb(list);
+ consume_skb(list);
}
/* And insert new clone at head. */
if (clone) {
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 8eb671c827f9..929a2b096b04 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -1153,7 +1153,7 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
struct sk_psock *psock;
struct bpf_prog *prog;
int ret = __SK_DROP;
- int len = skb->len;
+ int len = orig_len;
/* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
skb = skb_clone(skb, GFP_ATOMIC);
diff --git a/net/core/sock.c b/net/core/sock.c
index 4ff806d71921..6eb174805bf0 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -879,9 +879,9 @@ int sock_set_timestamping(struct sock *sk, int optname,
if ((1 << sk->sk_state) &
(TCPF_CLOSE | TCPF_LISTEN))
return -EINVAL;
- sk->sk_tskey = tcp_sk(sk)->snd_una;
+ atomic_set(&sk->sk_tskey, tcp_sk(sk)->snd_una);
} else {
- sk->sk_tskey = 0;
+ atomic_set(&sk->sk_tskey, 0);
}
}
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index b441ab330fd3..dc4fb699b56c 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -2073,8 +2073,52 @@ u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev)
}
EXPORT_SYMBOL(dcb_ieee_getapp_default_prio_mask);
+static void dcbnl_flush_dev(struct net_device *dev)
+{
+ struct dcb_app_type *itr, *tmp;
+
+ spin_lock_bh(&dcb_lock);
+
+ list_for_each_entry_safe(itr, tmp, &dcb_app_list, list) {
+ if (itr->ifindex == dev->ifindex) {
+ list_del(&itr->list);
+ kfree(itr);
+ }
+ }
+
+ spin_unlock_bh(&dcb_lock);
+}
+
+static int dcbnl_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ if (!dev->dcbnl_ops)
+ return NOTIFY_DONE;
+
+ dcbnl_flush_dev(dev);
+
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static struct notifier_block dcbnl_nb __read_mostly = {
+ .notifier_call = dcbnl_netdevice_event,
+};
+
static int __init dcbnl_init(void)
{
+ int err;
+
+ err = register_netdevice_notifier(&dcbnl_nb);
+ if (err)
+ return err;
+
rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, 0);
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index d9d0d227092c..c43f7446a75d 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -349,6 +349,7 @@ void dsa_flush_workqueue(void)
{
flush_workqueue(dsa_owq);
}
+EXPORT_SYMBOL_GPL(dsa_flush_workqueue);
int dsa_devlink_param_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 3d21521453fe..b4e67758e104 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -1261,7 +1261,7 @@ int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
info.tag_ops = tag_ops;
err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
if (err)
- return err;
+ goto out_unwind_tagger;
err = dsa_tree_bind_tag_proto(dst, tag_ops);
if (err)
@@ -1718,7 +1718,6 @@ EXPORT_SYMBOL_GPL(dsa_unregister_switch);
void dsa_switch_shutdown(struct dsa_switch *ds)
{
struct net_device *master, *slave_dev;
- LIST_HEAD(unregister_list);
struct dsa_port *dp;
mutex_lock(&dsa2_mutex);
@@ -1729,25 +1728,13 @@ void dsa_switch_shutdown(struct dsa_switch *ds)
slave_dev = dp->slave;
netdev_upper_dev_unlink(master, slave_dev);
- /* Just unlinking ourselves as uppers of the master is not
- * sufficient. When the master net device unregisters, that will
- * also call dev_close, which we will catch as NETDEV_GOING_DOWN
- * and trigger a dev_close on our own devices (dsa_slave_close).
- * In turn, that will call dev_mc_unsync on the master's net
- * device. If the master is also a DSA switch port, this will
- * trigger dsa_slave_set_rx_mode which will call dev_mc_sync on
- * its own master. Lockdep will complain about the fact that
- * all cascaded masters have the same dsa_master_addr_list_lock_key,
- * which it normally would not do if the cascaded masters would
- * be in a proper upper/lower relationship, which we've just
- * destroyed.
- * To suppress the lockdep warnings, let's actually unregister
- * the DSA slave interfaces too, to avoid the nonsensical
- * multicast address list synchronization on shutdown.
- */
- unregister_netdevice_queue(slave_dev, &unregister_list);
}
- unregister_netdevice_many(&unregister_list);
+
+ /* Disconnect from further netdevice notifiers on the master,
+ * since netdev_uses_dsa() will now return false.
+ */
+ dsa_switch_for_each_cpu_port(dp, ds)
+ dp->master->dsa_ptr = NULL;
rtnl_unlock();
mutex_unlock(&dsa2_mutex);
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 760306f0012f..23c79e91ac67 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -147,7 +147,6 @@ void dsa_tag_driver_put(const struct dsa_device_ops *ops);
const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
bool dsa_schedule_work(struct work_struct *work);
-void dsa_flush_workqueue(void);
const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
diff --git a/net/dsa/master.c b/net/dsa/master.c
index 2199104ca7df..880f910b23a9 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -260,11 +260,16 @@ static void dsa_netdev_ops_set(struct net_device *dev,
dev->dsa_ptr->netdev_ops = ops;
}
+/* Keep the master always promiscuous if the tagging protocol requires that
+ * (garbles MAC DA) or if it doesn't support unicast filtering, case in which
+ * it would revert to promiscuous mode as soon as we call dev_uc_add() on it
+ * anyway.
+ */
static void dsa_master_set_promiscuity(struct net_device *dev, int inc)
{
const struct dsa_device_ops *ops = dev->dsa_ptr->tag_ops;
- if (!ops->promisc_on_master)
+ if ((dev->priv_flags & IFF_UNICAST_FLT) && !ops->promisc_on_master)
return;
ASSERT_RTNL();
diff --git a/net/dsa/port.c b/net/dsa/port.c
index bd78192e0e47..1a40c52f5a42 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -395,10 +395,17 @@ void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
.tree_index = dp->ds->dst->index,
.sw_index = dp->ds->index,
.port = dp->index,
- .bridge = *dp->bridge,
};
int err;
+ /* If the port could not be offloaded to begin with, then
+ * there is nothing to do.
+ */
+ if (!dp->bridge)
+ return;
+
+ info.bridge = *dp->bridge;
+
/* Here the port is already unbridged. Reflect the current configuration
* so that drivers can program their chips accordingly.
*/
@@ -781,9 +788,15 @@ int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
struct dsa_port *cpu_dp = dp->cpu_dp;
int err;
- err = dev_uc_add(cpu_dp->master, addr);
- if (err)
- return err;
+ /* Avoid a call to __dev_set_promiscuity() on the master, which
+ * requires rtnl_lock(), since we can't guarantee that is held here,
+ * and we can't take it either.
+ */
+ if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) {
+ err = dev_uc_add(cpu_dp->master, addr);
+ if (err)
+ return err;
+ }
return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_ADD, &info);
}
@@ -800,9 +813,11 @@ int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
struct dsa_port *cpu_dp = dp->cpu_dp;
int err;
- err = dev_uc_del(cpu_dp->master, addr);
- if (err)
- return err;
+ if (cpu_dp->master->priv_flags & IFF_UNICAST_FLT) {
+ err = dev_uc_del(cpu_dp->master, addr);
+ if (err)
+ return err;
+ }
return dsa_port_notify(dp, DSA_NOTIFIER_HOST_FDB_DEL, &info);
}
diff --git a/net/dsa/tag_lan9303.c b/net/dsa/tag_lan9303.c
index cb548188f813..98d7d7120bab 100644
--- a/net/dsa/tag_lan9303.c
+++ b/net/dsa/tag_lan9303.c
@@ -77,7 +77,6 @@ static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev)
static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev)
{
- __be16 *lan9303_tag;
u16 lan9303_tag1;
unsigned int source_port;
@@ -87,14 +86,15 @@ static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev)
return NULL;
}
- lan9303_tag = dsa_etype_header_pos_rx(skb);
-
- if (lan9303_tag[0] != htons(ETH_P_8021Q)) {
- dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid VLAN marker\n");
- return NULL;
+ if (skb_vlan_tag_present(skb)) {
+ lan9303_tag1 = skb_vlan_tag_get(skb);
+ __vlan_hwaccel_clear_tag(skb);
+ } else {
+ skb_push_rcsum(skb, ETH_HLEN);
+ __skb_vlan_pop(skb, &lan9303_tag1);
+ skb_pull_rcsum(skb, ETH_HLEN);
}
- lan9303_tag1 = ntohs(lan9303_tag[1]);
source_port = lan9303_tag1 & 0x3;
skb->dev = dsa_master_find_slave(dev, 0, source_port);
@@ -103,13 +103,6 @@ static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev)
return NULL;
}
- /* remove the special VLAN tag between the MAC addresses
- * and the current ethertype field.
- */
- skb_pull_rcsum(skb, 2 + 2);
-
- dsa_strip_etype_header(skb, LAN9303_TAG_LEN);
-
if (!(lan9303_tag1 & LAN9303_TAG_RX_TRAPPED_TO_CPU))
dsa_default_offload_fwd_mark(skb);
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index 277124f206e0..e0b072aecf0f 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -1441,7 +1441,7 @@ static int nl802154_send_key(struct sk_buff *msg, u32 cmd, u32 portid,
hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
if (!hdr)
- return -1;
+ return -ENOBUFS;
if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure;
@@ -1634,7 +1634,7 @@ static int nl802154_send_device(struct sk_buff *msg, u32 cmd, u32 portid,
hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
if (!hdr)
- return -1;
+ return -ENOBUFS;
if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure;
@@ -1812,7 +1812,7 @@ static int nl802154_send_devkey(struct sk_buff *msg, u32 cmd, u32 portid,
hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
if (!hdr)
- return -1;
+ return -ENOBUFS;
if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure;
@@ -1988,7 +1988,7 @@ static int nl802154_send_seclevel(struct sk_buff *msg, u32 cmd, u32 portid,
hdr = nl802154hdr_put(msg, portid, seq, flags, cmd);
if (!hdr)
- return -1;
+ return -ENOBUFS;
if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 9c465bac1eb0..72fde2888ad2 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1376,8 +1376,11 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
}
ops = rcu_dereference(inet_offloads[proto]);
- if (likely(ops && ops->callbacks.gso_segment))
+ if (likely(ops && ops->callbacks.gso_segment)) {
segs = ops->callbacks.gso_segment(skb, features);
+ if (!segs)
+ skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
+ }
if (IS_ERR_OR_NULL(segs))
goto out;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 851f542928a3..e1b1d080e908 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -671,7 +671,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
u32 padto;
- padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached));
+ padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
if (skb->len < padto)
esp.tfclen = padto - skb->len;
}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 4d61ddd8a0ec..85117b45216d 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -436,6 +436,9 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
if (net->ipv4.fib_has_custom_local_routes ||
fib4_has_custom_rules(net))
goto full_check;
+ /* Within the same container, it is regarded as a martian source,
+ * and the same host but different containers are not.
+ */
if (inet_lookup_ifaddr_rcu(net, src))
return -EINVAL;
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index e184bcb19943..78e40ea42e58 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -16,10 +16,9 @@ struct fib_alias {
u8 fa_slen;
u32 tb_id;
s16 fa_default;
- u8 offload:1,
- trap:1,
- offload_failed:1,
- unused:5;
+ u8 offload;
+ u8 trap;
+ u8 offload_failed;
struct rcu_head rcu;
};
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index b4589861b84c..2dd375f7407b 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -525,9 +525,9 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
fri.dst_len = dst_len;
fri.tos = fa->fa_tos;
fri.type = fa->fa_type;
- fri.offload = fa->offload;
- fri.trap = fa->trap;
- fri.offload_failed = fa->offload_failed;
+ fri.offload = READ_ONCE(fa->offload);
+ fri.trap = READ_ONCE(fa->trap);
+ fri.offload_failed = READ_ONCE(fa->offload_failed);
err = fib_dump_info(skb, info->portid, seq, event, &fri, nlm_flags);
if (err < 0) {
/* -EMSGSIZE implies BUG in fib_nlmsg_size() */
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 8060524f4256..f7f74d5c14da 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1047,19 +1047,23 @@ void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri)
if (!fa_match)
goto out;
- if (fa_match->offload == fri->offload && fa_match->trap == fri->trap &&
- fa_match->offload_failed == fri->offload_failed)
+ /* These are paired with the WRITE_ONCE() happening in this function.
+ * The reason is that we are only protected by RCU at this point.
+ */
+ if (READ_ONCE(fa_match->offload) == fri->offload &&
+ READ_ONCE(fa_match->trap) == fri->trap &&
+ READ_ONCE(fa_match->offload_failed) == fri->offload_failed)
goto out;
- fa_match->offload = fri->offload;
- fa_match->trap = fri->trap;
+ WRITE_ONCE(fa_match->offload, fri->offload);
+ WRITE_ONCE(fa_match->trap, fri->trap);
/* 2 means send notifications only if offload_failed was changed. */
if (net->ipv4.sysctl_fib_notify_on_flag_change == 2 &&
- fa_match->offload_failed == fri->offload_failed)
+ READ_ONCE(fa_match->offload_failed) == fri->offload_failed)
goto out;
- fa_match->offload_failed = fri->offload_failed;
+ WRITE_ONCE(fa_match->offload_failed, fri->offload_failed);
if (!net->ipv4.sysctl_fib_notify_on_flag_change)
goto out;
@@ -2297,9 +2301,9 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
fri.dst_len = KEYLENGTH - fa->fa_slen;
fri.tos = fa->fa_tos;
fri.type = fa->fa_type;
- fri.offload = fa->offload;
- fri.trap = fa->trap;
- fri.offload_failed = fa->offload_failed;
+ fri.offload = READ_ONCE(fa->offload);
+ fri.trap = READ_ONCE(fa->trap);
+ fri.offload_failed = READ_ONCE(fa->offload_failed);
err = fib_dump_info(skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 139cec29ed06..7911916a480b 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -991,7 +991,7 @@ static int __ip_append_data(struct sock *sk,
if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
- tskey = sk->sk_tskey++;
+ tskey = atomic_inc_return(&sk->sk_tskey) - 1;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 07274619b9ea..29bbe2b08ae9 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -256,7 +256,9 @@ static int __net_init ipmr_rules_init(struct net *net)
return 0;
err2:
+ rtnl_lock();
ipmr_free_table(mrt);
+ rtnl_unlock();
err1:
fib_rules_unregister(ops);
return err;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 67087f95579f..aab384126f61 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -58,10 +58,6 @@ config NF_TABLES_ARP
endif # NF_TABLES
-config NF_FLOW_TABLE_IPV4
- tristate
- select NF_FLOW_TABLE_INET
-
config NF_DUP_IPV4
tristate "Netfilter IPv4 packet duplication to alternate destination"
depends on !NF_CONNTRACK || NF_CONNTRACK
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index bcf7bc71cb56..3ee947557b88 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -172,16 +172,22 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
struct sock *sk = NULL;
struct inet_sock *isk;
struct hlist_nulls_node *hnode;
- int dif = skb->dev->ifindex;
+ int dif, sdif;
if (skb->protocol == htons(ETH_P_IP)) {
+ dif = inet_iif(skb);
+ sdif = inet_sdif(skb);
pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n",
(int)ident, &ip_hdr(skb)->daddr, dif);
#if IS_ENABLED(CONFIG_IPV6)
} else if (skb->protocol == htons(ETH_P_IPV6)) {
+ dif = inet6_iif(skb);
+ sdif = inet6_sdif(skb);
pr_debug("try to find: num = %d, daddr = %pI6c, dif = %d\n",
(int)ident, &ipv6_hdr(skb)->daddr, dif);
#endif
+ } else {
+ return NULL;
}
read_lock_bh(&ping_table.lock);
@@ -221,7 +227,7 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
}
if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
- sk->sk_bound_dev_if != inet_sdif(skb))
+ sk->sk_bound_dev_if != sdif)
continue;
sock_hold(sk);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ff6f91cdb6c4..f33ad1f383b6 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -3395,8 +3395,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
fa->fa_tos == fri.tos &&
fa->fa_info == res.fi &&
fa->fa_type == fri.type) {
- fri.offload = fa->offload;
- fri.trap = fa->trap;
+ fri.offload = READ_ONCE(fa->offload);
+ fri.trap = READ_ONCE(fa->trap);
break;
}
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 78e81465f5f3..28ff2a820f7c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -937,6 +937,22 @@ void tcp_remove_empty_skb(struct sock *sk)
}
}
+/* skb changing from pure zc to mixed, must charge zc */
+static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb)
+{
+ if (unlikely(skb_zcopy_pure(skb))) {
+ u32 extra = skb->truesize -
+ SKB_TRUESIZE(skb_end_offset(skb));
+
+ if (!sk_wmem_schedule(sk, extra))
+ return -ENOMEM;
+
+ sk_mem_charge(sk, extra);
+ skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY;
+ }
+ return 0;
+}
+
static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
struct page *page, int offset, size_t *size)
{
@@ -972,7 +988,7 @@ new_segment:
tcp_mark_push(tp, skb);
goto new_segment;
}
- if (!sk_wmem_schedule(sk, copy))
+ if (tcp_downgrade_zcopy_pure(sk, skb) || !sk_wmem_schedule(sk, copy))
return NULL;
if (can_coalesce) {
@@ -1320,16 +1336,8 @@ new_segment:
copy = min_t(int, copy, pfrag->size - pfrag->offset);
- /* skb changing from pure zc to mixed, must charge zc */
- if (unlikely(skb_zcopy_pure(skb))) {
- if (!sk_wmem_schedule(sk, skb->data_len))
- goto wait_for_space;
-
- sk_mem_charge(sk, skb->data_len);
- skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY;
- }
-
- if (!sk_wmem_schedule(sk, copy))
+ if (tcp_downgrade_zcopy_pure(sk, skb) ||
+ !sk_wmem_schedule(sk, copy))
goto wait_for_space;
err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
@@ -1676,11 +1684,13 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
if (!copied)
copied = used;
break;
- } else if (used <= len) {
- seq += used;
- copied += used;
- offset += used;
}
+ if (WARN_ON_ONCE(used > len))
+ used = len;
+ seq += used;
+ copied += used;
+ offset += used;
+
/* If recv_actor drops the lock (e.g. TCP splice
* receive) the skb pointer might be invalid when
* getting here: tcp_collapse might have deleted it
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index dc49a3d551eb..bfe4112e000c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1660,6 +1660,8 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
(mss != tcp_skb_seglen(skb)))
goto out;
+ if (!tcp_skb_can_collapse(prev, skb))
+ goto out;
len = skb->len;
pcount = tcp_skb_pcount(skb);
if (tcp_skb_shift(prev, skb, pcount, len))
diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c
index b91003538d87..bc3a043a5d5c 100644
--- a/net/ipv4/udp_tunnel_nic.c
+++ b/net/ipv4/udp_tunnel_nic.c
@@ -846,7 +846,7 @@ udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
list_for_each_entry(node, &info->shared->devices, list)
if (node->dev == dev)
break;
- if (node->dev != dev)
+ if (list_entry_is_head(node, &info->shared->devices, list))
return;
list_del(&node->list);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index f927c199a93c..f908e2fd30b2 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1839,8 +1839,8 @@ out:
}
EXPORT_SYMBOL(ipv6_dev_get_saddr);
-int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
- u32 banned_flags)
+static int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
+ u32 banned_flags)
{
struct inet6_ifaddr *ifp;
int err = -EADDRNOTAVAIL;
@@ -3732,6 +3732,7 @@ static int addrconf_ifdown(struct net_device *dev, bool unregister)
struct inet6_dev *idev;
struct inet6_ifaddr *ifa, *tmp;
bool keep_addr = false;
+ bool was_ready;
int state, i;
ASSERT_RTNL();
@@ -3797,7 +3798,10 @@ restart:
addrconf_del_rs_timer(idev);
- /* Step 2: clear flags for stateless addrconf */
+ /* Step 2: clear flags for stateless addrconf, repeated down
+ * detection
+ */
+ was_ready = idev->if_flags & IF_READY;
if (!unregister)
idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
@@ -3871,7 +3875,7 @@ restart:
if (unregister) {
ipv6_ac_destroy_dev(idev);
ipv6_mc_destroy_dev(idev);
- } else {
+ } else if (was_ready) {
ipv6_mc_down(idev);
}
@@ -4998,6 +5002,7 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
goto error;
+ spin_lock_bh(&ifa->lock);
if (!((ifa->flags&IFA_F_PERMANENT) &&
(ifa->prefered_lft == INFINITY_LIFE_TIME))) {
preferred = ifa->prefered_lft;
@@ -5019,6 +5024,7 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
preferred = INFINITY_LIFE_TIME;
valid = INFINITY_LIFE_TIME;
}
+ spin_unlock_bh(&ifa->lock);
if (!ipv6_addr_any(&ifa->peer_addr)) {
if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 8bb2c407b46b..7591160edce1 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -707,7 +707,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
u32 padto;
- padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached));
+ padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
if (skb->len < padto)
esp.tfclen = padto - skb->len;
}
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index aa673a6a7e43..ceb85c67ce39 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -450,8 +450,10 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
err = -EINVAL;
goto done;
}
- if (fl_shared_exclusive(fl) || fl->opt)
+ if (fl_shared_exclusive(fl) || fl->opt) {
+ WRITE_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl, 1);
static_branch_deferred_inc(&ipv6_flowlabel_exclusive);
+ }
return fl;
done:
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index b29e9ba5e113..5f577e21459b 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -114,6 +114,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
if (likely(ops && ops->callbacks.gso_segment)) {
skb_reset_transport_header(skb);
segs = ops->callbacks.gso_segment(skb, features);
+ if (!segs)
+ skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
}
if (IS_ERR_OR_NULL(segs))
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 2995f8d89e7e..4788f6b37053 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1408,8 +1408,6 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
if (np->frag_size)
mtu = np->frag_size;
}
- if (mtu < IPV6_MIN_MTU)
- return -EINVAL;
cork->base.fragsize = mtu;
cork->base.gso_size = ipc6->gso_size;
cork->base.tx_flags = 0;
@@ -1465,14 +1463,12 @@ static int __ip6_append_data(struct sock *sk,
if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
- tskey = sk->sk_tskey++;
+ tskey = atomic_inc_return(&sk->sk_tskey) - 1;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
(opt ? opt->opt_nflen : 0);
- maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
- sizeof(struct frag_hdr);
headersize = sizeof(struct ipv6hdr) +
(opt ? opt->opt_flen + opt->opt_nflen : 0) +
@@ -1480,6 +1476,13 @@ static int __ip6_append_data(struct sock *sk,
sizeof(struct frag_hdr) : 0) +
rt->rt6i_nfheader_len;
+ if (mtu < fragheaderlen ||
+ ((mtu - fragheaderlen) & ~7) + fragheaderlen < sizeof(struct frag_hdr))
+ goto emsgsize;
+
+ maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
+ sizeof(struct frag_hdr);
+
/* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit
* the first fragment
*/
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 7cf73e60e619..8a2db926b5eb 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -243,7 +243,9 @@ static int __net_init ip6mr_rules_init(struct net *net)
return 0;
err2:
+ rtnl_lock();
ip6mr_free_table(mrt);
+ rtnl_unlock();
err1:
fib_rules_unregister(ops);
return err;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index bed8155508c8..909f937befd7 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1371,27 +1371,23 @@ static void mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
}
/* called with rcu_read_lock() */
-int igmp6_event_query(struct sk_buff *skb)
+void igmp6_event_query(struct sk_buff *skb)
{
struct inet6_dev *idev = __in6_dev_get(skb->dev);
- if (!idev)
- return -EINVAL;
-
- if (idev->dead) {
- kfree_skb(skb);
- return -ENODEV;
- }
+ if (!idev || idev->dead)
+ goto out;
spin_lock_bh(&idev->mc_query_lock);
if (skb_queue_len(&idev->mc_query_queue) < MLD_MAX_SKBS) {
__skb_queue_tail(&idev->mc_query_queue, skb);
if (!mod_delayed_work(mld_wq, &idev->mc_query_work, 0))
in6_dev_hold(idev);
+ skb = NULL;
}
spin_unlock_bh(&idev->mc_query_lock);
-
- return 0;
+out:
+ kfree_skb(skb);
}
static void __mld_query_work(struct sk_buff *skb)
@@ -1542,27 +1538,23 @@ static void mld_query_work(struct work_struct *work)
}
/* called with rcu_read_lock() */
-int igmp6_event_report(struct sk_buff *skb)
+void igmp6_event_report(struct sk_buff *skb)
{
struct inet6_dev *idev = __in6_dev_get(skb->dev);
- if (!idev)
- return -EINVAL;
-
- if (idev->dead) {
- kfree_skb(skb);
- return -ENODEV;
- }
+ if (!idev || idev->dead)
+ goto out;
spin_lock_bh(&idev->mc_report_lock);
if (skb_queue_len(&idev->mc_report_queue) < MLD_MAX_SKBS) {
__skb_queue_tail(&idev->mc_report_queue, skb);
if (!mod_delayed_work(mld_wq, &idev->mc_report_work, 0))
in6_dev_hold(idev);
+ skb = NULL;
}
spin_unlock_bh(&idev->mc_report_lock);
-
- return 0;
+out:
+ kfree_skb(skb);
}
static void __mld_report_work(struct sk_buff *skb)
@@ -1759,7 +1751,7 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
skb_reserve(skb, hlen);
skb_tailroom_reserve(skb, mtu, tlen);
- if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
+ if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
/* <draft-ietf-magma-mld-source-05.txt>:
* use unspecified address as the source address
* when a valid link-local address is not available.
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 97d3d1b36dbc..0ba62f4868f9 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -47,10 +47,6 @@ config NFT_FIB_IPV6
endif # NF_TABLES_IPV6
endif # NF_TABLES
-config NF_FLOW_TABLE_IPV6
- tristate
- select NF_FLOW_TABLE_INET
-
config NF_DUP_IPV6
tristate "Netfilter IPv6 packet duplication to alternate destination"
depends on !NF_CONNTRACK || NF_CONNTRACK
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index b85383606df7..b8d6dc9aeeb6 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -28,9 +28,6 @@ obj-$(CONFIG_NFT_REJECT_IPV6) += nft_reject_ipv6.o
obj-$(CONFIG_NFT_DUP_IPV6) += nft_dup_ipv6.o
obj-$(CONFIG_NFT_FIB_IPV6) += nft_fib_ipv6.o
-# flow table support
-obj-$(CONFIG_NF_FLOW_TABLE_IPV6) += nf_flow_table_ipv6.o
-
# matches
obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
diff --git a/net/ipv6/netfilter/nf_flow_table_ipv6.c b/net/ipv6/netfilter/nf_flow_table_ipv6.c
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/net/ipv6/netfilter/nf_flow_table_ipv6.c
+++ /dev/null
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index f4884cda13b9..ea1cf414a92e 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -5753,11 +5753,11 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
}
if (!dst) {
- if (rt->offload)
+ if (READ_ONCE(rt->offload))
rtm->rtm_flags |= RTM_F_OFFLOAD;
- if (rt->trap)
+ if (READ_ONCE(rt->trap))
rtm->rtm_flags |= RTM_F_TRAP;
- if (rt->offload_failed)
+ if (READ_ONCE(rt->offload_failed))
rtm->rtm_flags |= RTM_F_OFFLOAD_FAILED;
}
@@ -6215,19 +6215,20 @@ void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i,
struct sk_buff *skb;
int err;
- if (f6i->offload == offload && f6i->trap == trap &&
- f6i->offload_failed == offload_failed)
+ if (READ_ONCE(f6i->offload) == offload &&
+ READ_ONCE(f6i->trap) == trap &&
+ READ_ONCE(f6i->offload_failed) == offload_failed)
return;
- f6i->offload = offload;
- f6i->trap = trap;
+ WRITE_ONCE(f6i->offload, offload);
+ WRITE_ONCE(f6i->trap, trap);
/* 2 means send notifications only if offload_failed was changed. */
if (net->ipv6.sysctl.fib_notify_on_flag_change == 2 &&
- f6i->offload_failed == offload_failed)
+ READ_ONCE(f6i->offload_failed) == offload_failed)
return;
- f6i->offload_failed = offload_failed;
+ WRITE_ONCE(f6i->offload_failed, offload_failed);
if (!rcu_access_pointer(f6i->fib6_node))
/* The route was removed from the tree, do not send
diff --git a/net/key/af_key.c b/net/key/af_key.c
index de24a7d474df..9bf52a09b5ff 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2623,7 +2623,7 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
}
return xfrm_migrate(&sel, dir, XFRM_POLICY_TYPE_MAIN, m, i,
- kma ? &k : NULL, net, NULL);
+ kma ? &k : NULL, net, NULL, 0);
out:
return err;
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 74a878f213d3..1deb3d874a4b 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -9,7 +9,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2010, Intel Corporation
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2021 Intel Corporation
+ * Copyright (C) 2018 - 2022 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -626,6 +626,14 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
return -EINVAL;
}
+ if (test_sta_flag(sta, WLAN_STA_MFP) &&
+ !test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
+ ht_dbg(sdata,
+ "MFP STA not authorized - deny BA session request %pM tid %d\n",
+ sta->sta.addr, tid);
+ return -EINVAL;
+ }
+
/*
* 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a
* member of an IBSS, and has no other existing Block Ack agreement
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 330ea62231fa..e87bccaab561 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -376,7 +376,7 @@ struct ieee80211_mgd_auth_data {
u8 key[WLAN_KEY_LEN_WEP104];
u8 key_len, key_idx;
- bool done;
+ bool done, waiting;
bool peer_confirmed;
bool timeout_started;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 1eeabdf10052..744842c4513b 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -37,6 +37,7 @@
#define IEEE80211_AUTH_TIMEOUT_SAE (HZ * 2)
#define IEEE80211_AUTH_MAX_TRIES 3
#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5)
+#define IEEE80211_AUTH_WAIT_SAE_RETRY (HZ * 2)
#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
#define IEEE80211_ASSOC_TIMEOUT_LONG (HZ / 2)
#define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10)
@@ -666,7 +667,7 @@ static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata,
ieee80211_ie_build_he_6ghz_cap(sdata, skb);
}
-static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
+static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -686,6 +687,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
const struct ieee80211_sband_iftype_data *iftd;
struct ieee80211_prep_tx_info info = {};
+ int ret;
/* we know it's writable, cast away the const */
if (assoc_data->ie_len)
@@ -699,7 +701,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
if (WARN_ON(!chanctx_conf)) {
rcu_read_unlock();
- return;
+ return -EINVAL;
}
chan = chanctx_conf->def.chan;
rcu_read_unlock();
@@ -750,7 +752,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
(iftd ? iftd->vendor_elems.len : 0),
GFP_KERNEL);
if (!skb)
- return;
+ return -ENOMEM;
skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -1031,15 +1033,22 @@ skip_rates:
skb_put_data(skb, assoc_data->ie + offset, noffset - offset);
}
- if (assoc_data->fils_kek_len &&
- fils_encrypt_assoc_req(skb, assoc_data) < 0) {
- dev_kfree_skb(skb);
- return;
+ if (assoc_data->fils_kek_len) {
+ ret = fils_encrypt_assoc_req(skb, assoc_data);
+ if (ret < 0) {
+ dev_kfree_skb(skb);
+ return ret;
+ }
}
pos = skb_tail_pointer(skb);
kfree(ifmgd->assoc_req_ies);
ifmgd->assoc_req_ies = kmemdup(ie_start, pos - ie_start, GFP_ATOMIC);
+ if (!ifmgd->assoc_req_ies) {
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
ifmgd->assoc_req_ies_len = pos - ie_start;
drv_mgd_prepare_tx(local, sdata, &info);
@@ -1049,6 +1058,8 @@ skip_rates:
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_INTFL_MLME_CONN_TX;
ieee80211_tx_skb(sdata, skb);
+
+ return 0;
}
void ieee80211_send_pspoll(struct ieee80211_local *local,
@@ -3001,8 +3012,15 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
(status_code == WLAN_STATUS_ANTI_CLOG_REQUIRED ||
(auth_transaction == 1 &&
(status_code == WLAN_STATUS_SAE_HASH_TO_ELEMENT ||
- status_code == WLAN_STATUS_SAE_PK))))
+ status_code == WLAN_STATUS_SAE_PK)))) {
+ /* waiting for userspace now */
+ ifmgd->auth_data->waiting = true;
+ ifmgd->auth_data->timeout =
+ jiffies + IEEE80211_AUTH_WAIT_SAE_RETRY;
+ ifmgd->auth_data->timeout_started = true;
+ run_again(sdata, ifmgd->auth_data->timeout);
goto notify_driver;
+ }
sdata_info(sdata, "%pM denied authentication (status %d)\n",
mgmt->sa, status_code);
@@ -4497,6 +4515,7 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
struct ieee80211_local *local = sdata->local;
+ int ret;
sdata_assert_lock(sdata);
@@ -4517,7 +4536,9 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
sdata_info(sdata, "associate with %pM (try %d/%d)\n",
assoc_data->bss->bssid, assoc_data->tries,
IEEE80211_ASSOC_MAX_TRIES);
- ieee80211_send_assoc(sdata);
+ ret = ieee80211_send_assoc(sdata);
+ if (ret)
+ return ret;
if (!ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
@@ -4590,10 +4611,10 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
if (ifmgd->auth_data && ifmgd->auth_data->timeout_started &&
time_after(jiffies, ifmgd->auth_data->timeout)) {
- if (ifmgd->auth_data->done) {
+ if (ifmgd->auth_data->done || ifmgd->auth_data->waiting) {
/*
- * ok ... we waited for assoc but userspace didn't,
- * so let's just kill the auth data
+ * ok ... we waited for assoc or continuation but
+ * userspace didn't do it, so kill the auth data
*/
ieee80211_destroy_auth_data(sdata, false);
} else if (ieee80211_auth(sdata)) {
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 93680af62c47..48d9553dafe3 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2607,7 +2607,8 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
* address, so that the authenticator (e.g. hostapd) will see
* the frame, but bridge won't forward it anywhere else. Note
* that due to earlier filtering, the only other address can
- * be the PAE group address.
+ * be the PAE group address, unless the hardware allowed them
+ * through in 802.3 offloaded mode.
*/
if (unlikely(skb->protocol == sdata->control_port_protocol &&
!ether_addr_equal(ehdr->h_dest, sdata->vif.addr)))
@@ -2922,13 +2923,13 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
ether_addr_equal(sdata->vif.addr, hdr->addr3))
return RX_CONTINUE;
- ac = ieee80211_select_queue_80211(sdata, skb, hdr);
+ ac = ieee802_1d_to_ac[skb->priority];
q = sdata->vif.hw_queue[ac];
if (ieee80211_queue_stopped(&local->hw, q)) {
IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
return RX_DROP_MONITOR;
}
- skb_set_queue_mapping(skb, q);
+ skb_set_queue_mapping(skb, ac);
if (!--mesh_hdr->ttl) {
if (!is_multicast_ether_addr(hdr->addr1))
@@ -4514,12 +4515,7 @@ static void ieee80211_rx_8023(struct ieee80211_rx_data *rx,
/* deliver to local stack */
skb->protocol = eth_type_trans(skb, fast_rx->dev);
- memset(skb->cb, 0, sizeof(skb->cb));
- if (rx->list)
- list_add_tail(&skb->list, rx->list);
- else
- netif_receive_skb(skb);
-
+ ieee80211_deliver_skb_to_local_stack(skb, rx);
}
static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
diff --git a/net/mctp/route.c b/net/mctp/route.c
index 8d9f4ff3e285..e52cef750500 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -412,13 +412,14 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
* this function.
*/
rc = mctp_key_add(key, msk);
- if (rc)
+ if (rc) {
kfree(key);
+ } else {
+ trace_mctp_key_acquire(key);
- trace_mctp_key_acquire(key);
-
- /* we don't need to release key->lock on exit */
- mctp_key_unref(key);
+ /* we don't need to release key->lock on exit */
+ mctp_key_unref(key);
+ }
key = NULL;
} else {
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 48f75a56f4ae..d6fdc5782d33 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1607,6 +1607,7 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct mpls_dev *mdev;
unsigned int flags;
+ int err;
if (event == NETDEV_REGISTER) {
mdev = mpls_add_dev(dev);
@@ -1621,7 +1622,6 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
return NOTIFY_OK;
switch (event) {
- int err;
case NETDEV_DOWN:
err = mpls_ifdown(dev, event);
diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
index 3240b72271a7..7558802a1435 100644
--- a/net/mptcp/mib.c
+++ b/net/mptcp/mib.c
@@ -35,12 +35,14 @@ static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("AddAddr", MPTCP_MIB_ADDADDR),
SNMP_MIB_ITEM("EchoAdd", MPTCP_MIB_ECHOADD),
SNMP_MIB_ITEM("PortAdd", MPTCP_MIB_PORTADD),
+ SNMP_MIB_ITEM("AddAddrDrop", MPTCP_MIB_ADDADDRDROP),
SNMP_MIB_ITEM("MPJoinPortSynRx", MPTCP_MIB_JOINPORTSYNRX),
SNMP_MIB_ITEM("MPJoinPortSynAckRx", MPTCP_MIB_JOINPORTSYNACKRX),
SNMP_MIB_ITEM("MPJoinPortAckRx", MPTCP_MIB_JOINPORTACKRX),
SNMP_MIB_ITEM("MismatchPortSynRx", MPTCP_MIB_MISMATCHPORTSYNRX),
SNMP_MIB_ITEM("MismatchPortAckRx", MPTCP_MIB_MISMATCHPORTACKRX),
SNMP_MIB_ITEM("RmAddr", MPTCP_MIB_RMADDR),
+ SNMP_MIB_ITEM("RmAddrDrop", MPTCP_MIB_RMADDRDROP),
SNMP_MIB_ITEM("RmSubflow", MPTCP_MIB_RMSUBFLOW),
SNMP_MIB_ITEM("MPPrioTx", MPTCP_MIB_MPPRIOTX),
SNMP_MIB_ITEM("MPPrioRx", MPTCP_MIB_MPPRIORX),
diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
index ecd3d8b117e0..2966fcb6548b 100644
--- a/net/mptcp/mib.h
+++ b/net/mptcp/mib.h
@@ -28,12 +28,14 @@ enum linux_mptcp_mib_field {
MPTCP_MIB_ADDADDR, /* Received ADD_ADDR with echo-flag=0 */
MPTCP_MIB_ECHOADD, /* Received ADD_ADDR with echo-flag=1 */
MPTCP_MIB_PORTADD, /* Received ADD_ADDR with a port-number */
+ MPTCP_MIB_ADDADDRDROP, /* Dropped incoming ADD_ADDR */
MPTCP_MIB_JOINPORTSYNRX, /* Received a SYN MP_JOIN with a different port-number */
MPTCP_MIB_JOINPORTSYNACKRX, /* Received a SYNACK MP_JOIN with a different port-number */
MPTCP_MIB_JOINPORTACKRX, /* Received an ACK MP_JOIN with a different port-number */
MPTCP_MIB_MISMATCHPORTSYNRX, /* Received a SYN MP_JOIN with a mismatched port-number */
MPTCP_MIB_MISMATCHPORTACKRX, /* Received an ACK MP_JOIN with a mismatched port-number */
MPTCP_MIB_RMADDR, /* Received RM_ADDR */
+ MPTCP_MIB_RMADDRDROP, /* Dropped incoming RM_ADDR */
MPTCP_MIB_RMSUBFLOW, /* Remove a subflow */
MPTCP_MIB_MPPRIOTX, /* Transmit a MP_PRIO */
MPTCP_MIB_MPPRIORX, /* Received a MP_PRIO */
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 696b2c4613a7..7bea318ac5f2 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -213,6 +213,8 @@ void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
mptcp_pm_add_addr_send_ack(msk);
} else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
pm->remote = *addr;
+ } else {
+ __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
}
spin_unlock_bh(&pm->lock);
@@ -253,8 +255,10 @@ void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
mptcp_event_addr_removed(msk, rm_list->ids[i]);
spin_lock_bh(&pm->lock);
- mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED);
- pm->rm_list_rx = *rm_list;
+ if (mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED))
+ pm->rm_list_rx = *rm_list;
+ else
+ __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_RMADDRDROP);
spin_unlock_bh(&pm->lock);
}
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 782b1d452269..4b5d795383cd 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -546,6 +546,16 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
if (msk->pm.add_addr_signaled < add_addr_signal_max) {
local = select_signal_address(pernet, msk);
+ /* due to racing events on both ends we can reach here while
+ * previous add address is still running: if we invoke now
+ * mptcp_pm_announce_addr(), that will fail and the
+ * corresponding id will be marked as used.
+ * Instead let the PM machinery reschedule us when the
+ * current address announce will be completed.
+ */
+ if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL))
+ return;
+
if (local) {
if (mptcp_pm_alloc_anno_list(msk, local)) {
__clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
@@ -650,6 +660,7 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
unsigned int add_addr_accept_max;
struct mptcp_addr_info remote;
unsigned int subflows_max;
+ bool reset_port = false;
int i, nr;
add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
@@ -659,15 +670,19 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
msk->pm.add_addr_accepted, add_addr_accept_max,
msk->pm.remote.family);
- if (lookup_subflow_by_daddr(&msk->conn_list, &msk->pm.remote))
+ remote = msk->pm.remote;
+ if (lookup_subflow_by_daddr(&msk->conn_list, &remote))
goto add_addr_echo;
+ /* pick id 0 port, if none is provided the remote address */
+ if (!remote.port) {
+ reset_port = true;
+ remote.port = sk->sk_dport;
+ }
+
/* connect to the specified remote address, using whatever
* local address the routing configuration will pick.
*/
- remote = msk->pm.remote;
- if (!remote.port)
- remote.port = sk->sk_dport;
nr = fill_local_addresses_vec(msk, addrs);
msk->pm.add_addr_accepted++;
@@ -680,8 +695,12 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
__mptcp_subflow_connect(sk, &addrs[i], &remote);
spin_lock_bh(&msk->pm.lock);
+ /* be sure to echo exactly the received address */
+ if (reset_port)
+ remote.port = 0;
+
add_addr_echo:
- mptcp_pm_announce_addr(msk, &msk->pm.remote, true);
+ mptcp_pm_announce_addr(msk, &remote, true);
mptcp_pm_nl_addr_send_ack(msk);
}
@@ -925,6 +944,7 @@ out:
static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
struct mptcp_pm_addr_entry *entry)
{
+ int addrlen = sizeof(struct sockaddr_in);
struct sockaddr_storage addr;
struct mptcp_sock *msk;
struct socket *ssock;
@@ -949,8 +969,11 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
}
mptcp_info2sockaddr(&entry->addr, &addr, entry->addr.family);
- err = kernel_bind(ssock, (struct sockaddr *)&addr,
- sizeof(struct sockaddr_in));
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ if (entry->addr.family == AF_INET6)
+ addrlen = sizeof(struct sockaddr_in6);
+#endif
+ err = kernel_bind(ssock, (struct sockaddr *)&addr, addrlen);
if (err) {
pr_warn("kernel_bind error, err=%d", err);
goto out;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index f60f01b14fac..1c72f25f083e 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -466,9 +466,12 @@ static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq)
static void mptcp_set_datafin_timeout(const struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
+ u32 retransmits;
- mptcp_sk(sk)->timer_ival = min(TCP_RTO_MAX,
- TCP_RTO_MIN << icsk->icsk_retransmits);
+ retransmits = min_t(u32, icsk->icsk_retransmits,
+ ilog2(TCP_RTO_MAX / TCP_RTO_MIN));
+
+ mptcp_sk(sk)->timer_ival = TCP_RTO_MIN << retransmits;
}
static void __mptcp_set_timeout(struct sock *sk, long tout)
@@ -3294,6 +3297,17 @@ static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v)
return 0;
delta = msk->write_seq - v;
+ if (__mptcp_check_fallback(msk) && msk->first) {
+ struct tcp_sock *tp = tcp_sk(msk->first);
+
+ /* the first subflow is disconnected after close - see
+ * __mptcp_close_ssk(). tcp_disconnect() moves the write_seq
+ * so ignore that status, too.
+ */
+ if (!((1 << msk->first->sk_state) &
+ (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE)))
+ delta += READ_ONCE(tp->write_seq) - tp->snd_una;
+ }
if (delta > INT_MAX)
delta = INT_MAX;
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 354cb472f386..8a77a3fd69bc 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -428,14 +428,15 @@ static int __nf_register_net_hook(struct net *net, int pf,
p = nf_entry_dereference(*pp);
new_hooks = nf_hook_entries_grow(p, reg);
- if (!IS_ERR(new_hooks))
+ if (!IS_ERR(new_hooks)) {
+ hooks_validate(new_hooks);
rcu_assign_pointer(*pp, new_hooks);
+ }
mutex_unlock(&nf_hook_mutex);
if (IS_ERR(new_hooks))
return PTR_ERR(new_hooks);
- hooks_validate(new_hooks);
#ifdef CONFIG_NETFILTER_INGRESS
if (nf_ingress_hook(reg, pf))
net_inc_ingress_queue();
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index ac438370f94a..7032402ffd33 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2311,7 +2311,8 @@ ctnetlink_create_conntrack(struct net *net,
if (helper->from_nlattr)
helper->from_nlattr(helpinfo, ct);
- /* not in hash table yet so not strictly necessary */
+ /* disable helper auto-assignment for this entry */
+ ct->status |= IPS_HELPER;
RCU_INIT_POINTER(help->helper, helper);
}
} else {
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 2394238d01c9..5a936334b517 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -489,6 +489,15 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
pr_debug("Setting vtag %x for dir %d\n",
ih->init_tag, !dir);
ct->proto.sctp.vtag[!dir] = ih->init_tag;
+
+ /* don't renew timeout on init retransmit so
+ * port reuse by client or NAT middlebox cannot
+ * keep entry alive indefinitely (incl. nat info).
+ */
+ if (new_state == SCTP_CONNTRACK_CLOSED &&
+ old_state == SCTP_CONNTRACK_CLOSED &&
+ nf_ct_is_confirmed(ct))
+ ignore = true;
}
ct->proto.sctp.state = new_state;
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index af5115e127cf..d1582b888c0d 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -446,6 +446,32 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
}
}
+static void tcp_init_sender(struct ip_ct_tcp_state *sender,
+ struct ip_ct_tcp_state *receiver,
+ const struct sk_buff *skb,
+ unsigned int dataoff,
+ const struct tcphdr *tcph,
+ u32 end, u32 win)
+{
+ /* SYN-ACK in reply to a SYN
+ * or SYN from reply direction in simultaneous open.
+ */
+ sender->td_end =
+ sender->td_maxend = end;
+ sender->td_maxwin = (win == 0 ? 1 : win);
+
+ tcp_options(skb, dataoff, tcph, sender);
+ /* RFC 1323:
+ * Both sides must send the Window Scale option
+ * to enable window scaling in either direction.
+ */
+ if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
+ receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE)) {
+ sender->td_scale = 0;
+ receiver->td_scale = 0;
+ }
+}
+
static bool tcp_in_window(struct nf_conn *ct,
enum ip_conntrack_dir dir,
unsigned int index,
@@ -499,24 +525,9 @@ static bool tcp_in_window(struct nf_conn *ct,
* Initialize sender data.
*/
if (tcph->syn) {
- /*
- * SYN-ACK in reply to a SYN
- * or SYN from reply direction in simultaneous open.
- */
- sender->td_end =
- sender->td_maxend = end;
- sender->td_maxwin = (win == 0 ? 1 : win);
-
- tcp_options(skb, dataoff, tcph, sender);
- /*
- * RFC 1323:
- * Both sides must send the Window Scale option
- * to enable window scaling in either direction.
- */
- if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE
- && receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE))
- sender->td_scale =
- receiver->td_scale = 0;
+ tcp_init_sender(sender, receiver,
+ skb, dataoff, tcph,
+ end, win);
if (!tcph->ack)
/* Simultaneous open */
return true;
@@ -560,6 +571,18 @@ static bool tcp_in_window(struct nf_conn *ct,
sender->td_maxwin = (win == 0 ? 1 : win);
tcp_options(skb, dataoff, tcph, sender);
+ } else if (tcph->syn && dir == IP_CT_DIR_REPLY &&
+ state->state == TCP_CONNTRACK_SYN_SENT) {
+ /* Retransmitted syn-ack, or syn (simultaneous open).
+ *
+ * Re-init state for this direction, just like for the first
+ * syn(-ack) reply, it might differ in seq, ack or tcp options.
+ */
+ tcp_init_sender(sender, receiver,
+ skb, dataoff, tcph,
+ end, win);
+ if (!tcph->ack)
+ return true;
}
if (!(tcph->ack)) {
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index b561e0a44a45..fc4265acd9c4 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -110,7 +110,11 @@ static int nf_flow_rule_match(struct nf_flow_match *match,
nf_flow_rule_lwt_match(match, tun_info);
}
- key->meta.ingress_ifindex = tuple->iifidx;
+ if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_TC)
+ key->meta.ingress_ifindex = tuple->tc.iifidx;
+ else
+ key->meta.ingress_ifindex = tuple->iifidx;
+
mask->meta.ingress_ifindex = 0xffffffff;
if (tuple->encap_num > 0 && !(tuple->in_vlan_ingress & BIT(0)) &&
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 6d12afabfe8a..63d1516816b1 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -46,6 +46,15 @@ void nf_unregister_queue_handler(void)
}
EXPORT_SYMBOL(nf_unregister_queue_handler);
+static void nf_queue_sock_put(struct sock *sk)
+{
+#ifdef CONFIG_INET
+ sock_gen_put(sk);
+#else
+ sock_put(sk);
+#endif
+}
+
static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
{
struct nf_hook_state *state = &entry->state;
@@ -54,7 +63,7 @@ static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
dev_put(state->in);
dev_put(state->out);
if (state->sk)
- sock_put(state->sk);
+ nf_queue_sock_put(state->sk);
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
dev_put(entry->physin);
@@ -87,19 +96,21 @@ static void __nf_queue_entry_init_physdevs(struct nf_queue_entry *entry)
}
/* Bump dev refs so they don't vanish while packet is out */
-void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
+bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
{
struct nf_hook_state *state = &entry->state;
+ if (state->sk && !refcount_inc_not_zero(&state->sk->sk_refcnt))
+ return false;
+
dev_hold(state->in);
dev_hold(state->out);
- if (state->sk)
- sock_hold(state->sk);
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
dev_hold(entry->physin);
dev_hold(entry->physout);
#endif
+ return true;
}
EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
@@ -169,6 +180,18 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
break;
}
+ if (skb_sk_is_prefetched(skb)) {
+ struct sock *sk = skb->sk;
+
+ if (!sk_is_refcounted(sk)) {
+ if (!refcount_inc_not_zero(&sk->sk_refcnt))
+ return -ENOTCONN;
+
+ /* drop refcount on skb_orphan */
+ skb->destructor = sock_edemux;
+ }
+ }
+
entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC);
if (!entry)
return -ENOMEM;
@@ -187,7 +210,10 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
__nf_queue_entry_init_physdevs(entry);
- nf_queue_entry_get_refs(entry);
+ if (!nf_queue_entry_get_refs(entry)) {
+ kfree(entry);
+ return -ENOTCONN;
+ }
switch (entry->state.pf) {
case AF_INET:
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index cf454f8ca2b0..c86748b3873b 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2011,7 +2011,6 @@ static void nft_last_rule(struct nft_rule_blob *blob, const void *ptr)
prule = (struct nft_rule_dp *)ptr;
prule->is_last = 1;
- ptr += offsetof(struct nft_rule_dp, data);
/* blob size does not include the trailer rule */
}
@@ -4503,7 +4502,7 @@ static void nft_set_catchall_destroy(const struct nft_ctx *ctx,
list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
list_del_rcu(&catchall->list);
nft_set_elem_destroy(set, catchall->elem, true);
- kfree_rcu(catchall);
+ kfree_rcu(catchall, rcu);
}
}
@@ -5670,7 +5669,7 @@ static void nft_setelem_catchall_remove(const struct net *net,
list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
if (catchall->elem == elem->priv) {
list_del_rcu(&catchall->list);
- kfree_rcu(catchall);
+ kfree_rcu(catchall, rcu);
break;
}
}
@@ -6552,12 +6551,15 @@ static int nf_tables_updobj(const struct nft_ctx *ctx,
{
struct nft_object *newobj;
struct nft_trans *trans;
- int err;
+ int err = -ENOMEM;
+
+ if (!try_module_get(type->owner))
+ return -ENOENT;
trans = nft_trans_alloc(ctx, NFT_MSG_NEWOBJ,
sizeof(struct nft_trans_obj));
if (!trans)
- return -ENOMEM;
+ goto err_trans;
newobj = nft_obj_init(ctx, type, attr);
if (IS_ERR(newobj)) {
@@ -6574,6 +6576,8 @@ static int nf_tables_updobj(const struct nft_ctx *ctx,
err_free_trans:
kfree(trans);
+err_trans:
+ module_put(type->owner);
return err;
}
@@ -8186,7 +8190,7 @@ static void nft_obj_commit_update(struct nft_trans *trans)
if (obj->ops->update)
obj->ops->update(obj, newobj);
- kfree(newobj);
+ nft_obj_destroy(&trans->ctx, newobj);
}
static void nft_commit_release(struct nft_trans *trans)
@@ -8977,7 +8981,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
break;
case NFT_MSG_NEWOBJ:
if (nft_trans_obj_update(trans)) {
- kfree(nft_trans_obj_newobj(trans));
+ nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans));
nft_trans_destroy(trans);
} else {
trans->ctx.table->use--;
@@ -9637,10 +9641,13 @@ EXPORT_SYMBOL_GPL(__nft_release_basechain);
static void __nft_release_hook(struct net *net, struct nft_table *table)
{
+ struct nft_flowtable *flowtable;
struct nft_chain *chain;
list_for_each_entry(chain, &table->chains, list)
nf_tables_unregister_hook(net, table, chain);
+ list_for_each_entry(flowtable, &table->flowtables, list)
+ nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list);
}
static void __nft_release_hooks(struct net *net)
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index 9656c1646222..2d36952b1392 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -94,7 +94,8 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net,
expr = nft_expr_first(rule);
while (nft_expr_more(rule, expr)) {
- if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION)
+ if (expr->ops->offload_action &&
+ expr->ops->offload_action(expr))
num_actions++;
expr = nft_expr_next(expr);
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index ea2d9c2a44cf..64a6acb6aeae 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -710,9 +710,15 @@ static struct nf_queue_entry *
nf_queue_entry_dup(struct nf_queue_entry *e)
{
struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
- if (entry)
- nf_queue_entry_get_refs(entry);
- return entry;
+
+ if (!entry)
+ return NULL;
+
+ if (nf_queue_entry_get_refs(entry))
+ return entry;
+
+ kfree(entry);
+ return NULL;
}
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c
index 9d5947ab8d4e..e646e9ee4a98 100644
--- a/net/netfilter/nft_byteorder.c
+++ b/net/netfilter/nft_byteorder.c
@@ -167,12 +167,24 @@ nla_put_failure:
return -1;
}
+static bool nft_byteorder_reduce(struct nft_regs_track *track,
+ const struct nft_expr *expr)
+{
+ struct nft_byteorder *priv = nft_expr_priv(expr);
+
+ track->regs[priv->dreg].selector = NULL;
+ track->regs[priv->dreg].bitwise = NULL;
+
+ return false;
+}
+
static const struct nft_expr_ops nft_byteorder_ops = {
.type = &nft_byteorder_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_byteorder)),
.eval = nft_byteorder_eval,
.init = nft_byteorder_init,
.dump = nft_byteorder_dump,
+ .reduce = nft_byteorder_reduce,
};
struct nft_expr_type nft_byteorder_type __read_mostly = {
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 518d96c8c247..5adf8bb628a8 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -260,9 +260,12 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr,
ct = this_cpu_read(nft_ct_pcpu_template);
if (likely(refcount_read(&ct->ct_general.use) == 1)) {
+ refcount_inc(&ct->ct_general.use);
nf_ct_zone_add(ct, &zone);
} else {
- /* previous skb got queued to userspace */
+ /* previous skb got queued to userspace, allocate temporary
+ * one until percpu template can be reused.
+ */
ct = nf_ct_tmpl_alloc(nft_net(pkt), &zone, GFP_ATOMIC);
if (!ct) {
regs->verdict.code = NF_DROP;
diff --git a/net/netfilter/nft_dup_netdev.c b/net/netfilter/nft_dup_netdev.c
index bbf3fcba3df4..5b5c607fbf83 100644
--- a/net/netfilter/nft_dup_netdev.c
+++ b/net/netfilter/nft_dup_netdev.c
@@ -67,6 +67,11 @@ static int nft_dup_netdev_offload(struct nft_offload_ctx *ctx,
return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_MIRRED, oif);
}
+static bool nft_dup_netdev_offload_action(const struct nft_expr *expr)
+{
+ return true;
+}
+
static struct nft_expr_type nft_dup_netdev_type;
static const struct nft_expr_ops nft_dup_netdev_ops = {
.type = &nft_dup_netdev_type,
@@ -75,6 +80,7 @@ static const struct nft_expr_ops nft_dup_netdev_ops = {
.init = nft_dup_netdev_init,
.dump = nft_dup_netdev_dump,
.offload = nft_dup_netdev_offload,
+ .offload_action = nft_dup_netdev_offload_action,
};
static struct nft_expr_type nft_dup_netdev_type __read_mostly = {
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index dbe1f2e7dd9e..9e927ab4df15 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -167,7 +167,7 @@ nft_tcp_header_pointer(const struct nft_pktinfo *pkt,
{
struct tcphdr *tcph;
- if (pkt->tprot != IPPROTO_TCP)
+ if (pkt->tprot != IPPROTO_TCP || pkt->fragoff)
return NULL;
tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt), sizeof(*tcph), buffer);
diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
index fa9301ca6033..619e394a91de 100644
--- a/net/netfilter/nft_fwd_netdev.c
+++ b/net/netfilter/nft_fwd_netdev.c
@@ -79,6 +79,11 @@ static int nft_fwd_netdev_offload(struct nft_offload_ctx *ctx,
return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_REDIRECT, oif);
}
+static bool nft_fwd_netdev_offload_action(const struct nft_expr *expr)
+{
+ return true;
+}
+
struct nft_fwd_neigh {
u8 sreg_dev;
u8 sreg_addr;
@@ -222,6 +227,7 @@ static const struct nft_expr_ops nft_fwd_netdev_ops = {
.dump = nft_fwd_netdev_dump,
.validate = nft_fwd_validate,
.offload = nft_fwd_netdev_offload,
+ .offload_action = nft_fwd_netdev_offload_action,
};
static const struct nft_expr_ops *
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index 90c64d27ae53..d0f67d325bdf 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -213,6 +213,16 @@ static int nft_immediate_offload(struct nft_offload_ctx *ctx,
return 0;
}
+static bool nft_immediate_offload_action(const struct nft_expr *expr)
+{
+ const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+
+ if (priv->dreg == NFT_REG_VERDICT)
+ return true;
+
+ return false;
+}
+
static const struct nft_expr_ops nft_imm_ops = {
.type = &nft_imm_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)),
@@ -224,7 +234,7 @@ static const struct nft_expr_ops nft_imm_ops = {
.dump = nft_immediate_dump,
.validate = nft_immediate_validate,
.offload = nft_immediate_offload,
- .offload_flags = NFT_OFFLOAD_F_ACTION,
+ .offload_action = nft_immediate_offload_action,
};
struct nft_expr_type nft_imm_type __read_mostly = {
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
index c4f308460dd1..a726b623963d 100644
--- a/net/netfilter/nft_limit.c
+++ b/net/netfilter/nft_limit.c
@@ -340,11 +340,20 @@ static int nft_limit_obj_pkts_dump(struct sk_buff *skb,
return nft_limit_dump(skb, &priv->limit, NFT_LIMIT_PKTS);
}
+static void nft_limit_obj_pkts_destroy(const struct nft_ctx *ctx,
+ struct nft_object *obj)
+{
+ struct nft_limit_priv_pkts *priv = nft_obj_data(obj);
+
+ nft_limit_destroy(ctx, &priv->limit);
+}
+
static struct nft_object_type nft_limit_obj_type;
static const struct nft_object_ops nft_limit_obj_pkts_ops = {
.type = &nft_limit_obj_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_limit_priv_pkts)),
.init = nft_limit_obj_pkts_init,
+ .destroy = nft_limit_obj_pkts_destroy,
.eval = nft_limit_obj_pkts_eval,
.dump = nft_limit_obj_pkts_dump,
};
@@ -378,11 +387,20 @@ static int nft_limit_obj_bytes_dump(struct sk_buff *skb,
return nft_limit_dump(skb, priv, NFT_LIMIT_PKT_BYTES);
}
+static void nft_limit_obj_bytes_destroy(const struct nft_ctx *ctx,
+ struct nft_object *obj)
+{
+ struct nft_limit_priv *priv = nft_obj_data(obj);
+
+ nft_limit_destroy(ctx, priv);
+}
+
static struct nft_object_type nft_limit_obj_type;
static const struct nft_object_ops nft_limit_obj_bytes_ops = {
.type = &nft_limit_obj_type,
.size = sizeof(struct nft_limit_priv),
.init = nft_limit_obj_bytes_init,
+ .destroy = nft_limit_obj_bytes_destroy,
.eval = nft_limit_obj_bytes_eval,
.dump = nft_limit_obj_bytes_dump,
};
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 940fed9a760b..5cc06aef4345 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -83,7 +83,7 @@ static int __nft_payload_inner_offset(struct nft_pktinfo *pkt)
{
unsigned int thoff = nft_thoff(pkt);
- if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
+ if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
return -1;
switch (pkt->tprot) {
@@ -147,7 +147,7 @@ void nft_payload_eval(const struct nft_expr *expr,
offset = skb_network_offset(skb);
break;
case NFT_PAYLOAD_TRANSPORT_HEADER:
- if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
+ if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
goto err;
offset = nft_thoff(pkt);
break;
@@ -688,7 +688,7 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
offset = skb_network_offset(skb);
break;
case NFT_PAYLOAD_TRANSPORT_HEADER:
- if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
+ if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
goto err;
offset = nft_thoff(pkt);
break;
@@ -728,7 +728,8 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP &&
pkt->tprot == IPPROTO_SCTP &&
skb->ip_summed != CHECKSUM_PARTIAL) {
- if (nft_payload_csum_sctp(skb, nft_thoff(pkt)))
+ if (pkt->fragoff == 0 &&
+ nft_payload_csum_sctp(skb, nft_thoff(pkt)))
goto err;
}
diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c
index a0109fa1e92d..1133e06f3c40 100644
--- a/net/netfilter/nft_synproxy.c
+++ b/net/netfilter/nft_synproxy.c
@@ -191,8 +191,10 @@ static int nft_synproxy_do_init(const struct nft_ctx *ctx,
if (err)
goto nf_ct_failure;
err = nf_synproxy_ipv6_init(snet, ctx->net);
- if (err)
+ if (err) {
+ nf_synproxy_ipv4_fini(snet, ctx->net);
goto nf_ct_failure;
+ }
break;
}
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 5e6459e11605..7013f55f05d1 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -220,8 +220,10 @@ static void socket_mt_destroy(const struct xt_mtdtor_param *par)
{
if (par->family == NFPROTO_IPV4)
nf_defrag_ipv4_disable(par->net);
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
else if (par->family == NFPROTO_IPV6)
- nf_defrag_ipv4_disable(par->net);
+ nf_defrag_ipv6_disable(par->net);
+#endif
}
static struct xt_match socket_mt_reg[] __read_mostly = {
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 076774034bb9..780d9e2246f3 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -423,12 +423,43 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
memcpy(addr, new_addr, sizeof(__be32[4]));
}
-static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
+static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
{
+ u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
+
+ ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
+
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
+ (__force __wsum)(ipv6_tclass << 12));
+
+ ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
+}
+
+static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
+{
+ u32 ofl;
+
+ ofl = nh->flow_lbl[0] << 16 | nh->flow_lbl[1] << 8 | nh->flow_lbl[2];
+ fl = OVS_MASKED(ofl, fl, mask);
+
/* Bits 21-24 are always unmasked, so this retains their values. */
- OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
- OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
- OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
+ nh->flow_lbl[0] = (u8)(fl >> 16);
+ nh->flow_lbl[1] = (u8)(fl >> 8);
+ nh->flow_lbl[2] = (u8)fl;
+
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
+}
+
+static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
+{
+ new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
+
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
+ (__force __wsum)(new_ttl << 8));
+ nh->hop_limit = new_ttl;
}
static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
@@ -546,18 +577,17 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
}
}
if (mask->ipv6_tclass) {
- ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
+ set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
flow_key->ip.tos = ipv6_get_dsfield(nh);
}
if (mask->ipv6_label) {
- set_ipv6_fl(nh, ntohl(key->ipv6_label),
+ set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
ntohl(mask->ipv6_label));
flow_key->ipv6.label =
*(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
}
if (mask->ipv6_hlimit) {
- OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
- mask->ipv6_hlimit);
+ set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
flow_key->ip.ttl = nh->hop_limit;
}
return 0;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 85ea7ddb48db..ab87f22cc7ec 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1789,7 +1789,10 @@ static int fanout_add(struct sock *sk, struct fanout_args *args)
err = -ENOSPC;
if (refcount_read(&match->sk_ref) < match->max_num_members) {
__dev_remove_pack(&po->prot_hook);
- po->fanout = match;
+
+ /* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */
+ WRITE_ONCE(po->fanout, match);
+
po->rollover = rollover;
rollover = NULL;
refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
@@ -3934,7 +3937,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
}
case PACKET_FANOUT_DATA:
{
- if (!po->fanout)
+ /* Paired with the WRITE_ONCE() in fanout_add() */
+ if (!READ_ONCE(po->fanout))
return -EINVAL;
return fanout_set_data(po, optval, optlen);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 32563cef85bf..ca03e7284254 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -274,7 +274,7 @@ static int tcf_action_offload_add_ex(struct tc_action *action,
err = tc_setup_action(&fl_action->action, actions);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
- "Failed to setup tc actions for offload\n");
+ "Failed to setup tc actions for offload");
goto fl_err;
}
@@ -1037,6 +1037,7 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
restart_act_graph:
for (i = 0; i < nr_actions; i++) {
const struct tc_action *a = actions[i];
+ int repeat_ttl;
if (jmp_prgcnt > 0) {
jmp_prgcnt -= 1;
@@ -1045,11 +1046,17 @@ restart_act_graph:
if (tc_act_skip_sw(a->tcfa_flags))
continue;
+
+ repeat_ttl = 32;
repeat:
ret = a->ops->act(skb, a, res);
- if (ret == TC_ACT_REPEAT)
- goto repeat; /* we need a ttl - JHS */
-
+ if (unlikely(ret == TC_ACT_REPEAT)) {
+ if (--repeat_ttl != 0)
+ goto repeat;
+ /* suspicious opcode, stop pipeline */
+ net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n");
+ return TC_ACT_OK;
+ }
if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index f99247fc6468..ec19f625863a 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -361,6 +361,13 @@ static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
}
}
+static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry,
+ struct nf_conn_act_ct_ext *act_ct_ext, u8 dir)
+{
+ entry->tuplehash[dir].tuple.xmit_type = FLOW_OFFLOAD_XMIT_TC;
+ entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir];
+}
+
static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
struct nf_conn *ct,
bool tcp)
@@ -385,10 +392,8 @@ static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
act_ct_ext = nf_conn_act_ct_ext_find(ct);
if (act_ct_ext) {
- entry->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.iifidx =
- act_ct_ext->ifindex[IP_CT_DIR_ORIGINAL];
- entry->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.iifidx =
- act_ct_ext->ifindex[IP_CT_DIR_REPLY];
+ tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
+ tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
}
err = flow_offload_add(&ct_ft->nf_ft, entry);
@@ -533,11 +538,6 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
struct nf_conn *ct;
u8 dir;
- /* Previously seen or loopback */
- ct = nf_ct_get(skb, &ctinfo);
- if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
- return false;
-
switch (family) {
case NFPROTO_IPV4:
if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index d4e27c679123..5ce1208a6ea3 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1044,7 +1044,7 @@ static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
/* Find qdisc */
if (!*parent) {
- *q = dev->qdisc;
+ *q = rcu_dereference(dev->qdisc);
*parent = (*q)->handle;
} else {
*q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
@@ -1945,9 +1945,9 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
bool prio_allocate;
u32 parent;
u32 chain_index;
- struct Qdisc *q = NULL;
+ struct Qdisc *q;
struct tcf_chain_info chain_info;
- struct tcf_chain *chain = NULL;
+ struct tcf_chain *chain;
struct tcf_block *block;
struct tcf_proto *tp;
unsigned long cl;
@@ -1976,6 +1976,8 @@ replay:
tp = NULL;
cl = 0;
block = NULL;
+ q = NULL;
+ chain = NULL;
flags = 0;
if (prio == 0) {
@@ -2585,7 +2587,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
parent = tcm->tcm_parent;
if (!parent)
- q = dev->qdisc;
+ q = rtnl_dereference(dev->qdisc);
else
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
if (!q)
@@ -2798,8 +2800,8 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
struct tcmsg *t;
u32 parent;
u32 chain_index;
- struct Qdisc *q = NULL;
- struct tcf_chain *chain = NULL;
+ struct Qdisc *q;
+ struct tcf_chain *chain;
struct tcf_block *block;
unsigned long cl;
int err;
@@ -2809,6 +2811,7 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
return -EPERM;
replay:
+ q = NULL;
err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
rtm_tca_policy, extack);
if (err < 0)
@@ -2959,7 +2962,7 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
if (!tcm->tcm_parent)
- q = dev->qdisc;
+ q = rtnl_dereference(dev->qdisc);
else
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 179825a3b2fd..e3c0e8ea2dbb 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -301,7 +301,7 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
if (!handle)
return NULL;
- q = qdisc_match_from_root(dev->qdisc, handle);
+ q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle);
if (q)
goto out;
@@ -320,7 +320,7 @@ struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
if (!handle)
return NULL;
- q = qdisc_match_from_root(dev->qdisc, handle);
+ q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle);
if (q)
goto out;
@@ -1082,10 +1082,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
skip:
if (!ingress) {
notify_and_destroy(net, skb, n, classid,
- dev->qdisc, new);
+ rtnl_dereference(dev->qdisc), new);
if (new && !new->ops->attach)
qdisc_refcount_inc(new);
- dev->qdisc = new ? : &noop_qdisc;
+ rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
if (new && new->ops->attach)
new->ops->attach(new);
@@ -1451,7 +1451,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
q = dev_ingress_queue(dev)->qdisc_sleeping;
}
} else {
- q = dev->qdisc;
+ q = rtnl_dereference(dev->qdisc);
}
if (!q) {
NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
@@ -1540,7 +1540,7 @@ replay:
q = dev_ingress_queue(dev)->qdisc_sleeping;
}
} else {
- q = dev->qdisc;
+ q = rtnl_dereference(dev->qdisc);
}
/* It may be default qdisc, ignore it */
@@ -1762,7 +1762,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
s_q_idx = 0;
q_idx = 0;
- if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
+ if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc),
+ skb, cb, &q_idx, s_q_idx,
true, tca[TCA_DUMP_INVISIBLE]) < 0)
goto done;
@@ -2033,7 +2034,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
} else if (qid1) {
qid = qid1;
} else if (qid == 0)
- qid = dev->qdisc->handle;
+ qid = rtnl_dereference(dev->qdisc)->handle;
/* Now qid is genuine qdisc handle consistent
* both with parent and child.
@@ -2044,7 +2045,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
portid = TC_H_MAKE(qid, portid);
} else {
if (qid == 0)
- qid = dev->qdisc->handle;
+ qid = rtnl_dereference(dev->qdisc)->handle;
}
/* OK. Locate qdisc */
@@ -2205,7 +2206,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
s_t = cb->args[0];
t = 0;
- if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t, true) < 0)
+ if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc),
+ skb, tcm, cb, &t, s_t, true) < 0)
goto done;
dev_queue = dev_ingress_queue(dev);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index f893d9a81b01..5bab9f8b8f45 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -1164,30 +1164,33 @@ static void attach_default_qdiscs(struct net_device *dev)
if (!netif_is_multiqueue(dev) ||
dev->priv_flags & IFF_NO_QUEUE) {
netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
- dev->qdisc = txq->qdisc_sleeping;
- qdisc_refcount_inc(dev->qdisc);
+ qdisc = txq->qdisc_sleeping;
+ rcu_assign_pointer(dev->qdisc, qdisc);
+ qdisc_refcount_inc(qdisc);
} else {
qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
if (qdisc) {
- dev->qdisc = qdisc;
+ rcu_assign_pointer(dev->qdisc, qdisc);
qdisc->ops->attach(qdisc);
}
}
+ qdisc = rtnl_dereference(dev->qdisc);
/* Detect default qdisc setup/init failed and fallback to "noqueue" */
- if (dev->qdisc == &noop_qdisc) {
+ if (qdisc == &noop_qdisc) {
netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
default_qdisc_ops->id, noqueue_qdisc_ops.id);
dev->priv_flags |= IFF_NO_QUEUE;
netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
- dev->qdisc = txq->qdisc_sleeping;
- qdisc_refcount_inc(dev->qdisc);
+ qdisc = txq->qdisc_sleeping;
+ rcu_assign_pointer(dev->qdisc, qdisc);
+ qdisc_refcount_inc(qdisc);
dev->priv_flags ^= IFF_NO_QUEUE;
}
#ifdef CONFIG_NET_SCHED
- if (dev->qdisc != &noop_qdisc)
- qdisc_hash_add(dev->qdisc, false);
+ if (qdisc != &noop_qdisc)
+ qdisc_hash_add(qdisc, false);
#endif
}
@@ -1217,7 +1220,7 @@ void dev_activate(struct net_device *dev)
* and noqueue_qdisc for virtual interfaces
*/
- if (dev->qdisc == &noop_qdisc)
+ if (rtnl_dereference(dev->qdisc) == &noop_qdisc)
attach_default_qdiscs(dev);
if (!netif_carrier_ok(dev))
@@ -1383,7 +1386,7 @@ static int qdisc_change_tx_queue_len(struct net_device *dev,
void dev_qdisc_change_real_num_tx(struct net_device *dev,
unsigned int new_real_tx)
{
- struct Qdisc *qdisc = dev->qdisc;
+ struct Qdisc *qdisc = rtnl_dereference(dev->qdisc);
if (qdisc->ops->change_real_num_tx)
qdisc->ops->change_real_num_tx(qdisc, new_real_tx);
@@ -1447,7 +1450,7 @@ static void dev_init_scheduler_queue(struct net_device *dev,
void dev_init_scheduler(struct net_device *dev)
{
- dev->qdisc = &noop_qdisc;
+ rcu_assign_pointer(dev->qdisc, &noop_qdisc);
netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
if (dev_ingress_queue(dev))
dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
@@ -1475,8 +1478,8 @@ void dev_shutdown(struct net_device *dev)
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
if (dev_ingress_queue(dev))
shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
- qdisc_put(dev->qdisc);
- dev->qdisc = &noop_qdisc;
+ qdisc_put(rtnl_dereference(dev->qdisc));
+ rcu_assign_pointer(dev->qdisc, &noop_qdisc);
WARN_ON(timer_pending(&dev->watchdog_timer));
}
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index d5ea62b82bb8..284befa90967 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -183,7 +183,7 @@ static int smc_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct smc_sock *smc;
- int rc = 0;
+ int old_state, rc = 0;
if (!sk)
goto out;
@@ -191,8 +191,10 @@ static int smc_release(struct socket *sock)
sock_hold(sk); /* sock_put below */
smc = smc_sk(sk);
+ old_state = sk->sk_state;
+
/* cleanup for a dangling non-blocking connect */
- if (smc->connect_nonblock && sk->sk_state == SMC_INIT)
+ if (smc->connect_nonblock && old_state == SMC_INIT)
tcp_abort(smc->clcsock->sk, ECONNABORTED);
if (cancel_work_sync(&smc->connect_work))
@@ -206,6 +208,10 @@ static int smc_release(struct socket *sock)
else
lock_sock(sk);
+ if (old_state == SMC_INIT && sk->sk_state == SMC_ACTIVE &&
+ !smc->use_fallback)
+ smc_close_active_abort(smc);
+
rc = __smc_release(smc);
/* detach socket */
@@ -566,17 +572,118 @@ static void smc_stat_fallback(struct smc_sock *smc)
mutex_unlock(&net->smc.mutex_fback_rsn);
}
+/* must be called under rcu read lock */
+static void smc_fback_wakeup_waitqueue(struct smc_sock *smc, void *key)
+{
+ struct socket_wq *wq;
+ __poll_t flags;
+
+ wq = rcu_dereference(smc->sk.sk_wq);
+ if (!skwq_has_sleeper(wq))
+ return;
+
+ /* wake up smc sk->sk_wq */
+ if (!key) {
+ /* sk_state_change */
+ wake_up_interruptible_all(&wq->wait);
+ } else {
+ flags = key_to_poll(key);
+ if (flags & (EPOLLIN | EPOLLOUT))
+ /* sk_data_ready or sk_write_space */
+ wake_up_interruptible_sync_poll(&wq->wait, flags);
+ else if (flags & EPOLLERR)
+ /* sk_error_report */
+ wake_up_interruptible_poll(&wq->wait, flags);
+ }
+}
+
+static int smc_fback_mark_woken(wait_queue_entry_t *wait,
+ unsigned int mode, int sync, void *key)
+{
+ struct smc_mark_woken *mark =
+ container_of(wait, struct smc_mark_woken, wait_entry);
+
+ mark->woken = true;
+ mark->key = key;
+ return 0;
+}
+
+static void smc_fback_forward_wakeup(struct smc_sock *smc, struct sock *clcsk,
+ void (*clcsock_callback)(struct sock *sk))
+{
+ struct smc_mark_woken mark = { .woken = false };
+ struct socket_wq *wq;
+
+ init_waitqueue_func_entry(&mark.wait_entry,
+ smc_fback_mark_woken);
+ rcu_read_lock();
+ wq = rcu_dereference(clcsk->sk_wq);
+ if (!wq)
+ goto out;
+ add_wait_queue(sk_sleep(clcsk), &mark.wait_entry);
+ clcsock_callback(clcsk);
+ remove_wait_queue(sk_sleep(clcsk), &mark.wait_entry);
+
+ if (mark.woken)
+ smc_fback_wakeup_waitqueue(smc, mark.key);
+out:
+ rcu_read_unlock();
+}
+
+static void smc_fback_state_change(struct sock *clcsk)
+{
+ struct smc_sock *smc =
+ smc_clcsock_user_data(clcsk);
+
+ if (!smc)
+ return;
+ smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_state_change);
+}
+
+static void smc_fback_data_ready(struct sock *clcsk)
+{
+ struct smc_sock *smc =
+ smc_clcsock_user_data(clcsk);
+
+ if (!smc)
+ return;
+ smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_data_ready);
+}
+
+static void smc_fback_write_space(struct sock *clcsk)
+{
+ struct smc_sock *smc =
+ smc_clcsock_user_data(clcsk);
+
+ if (!smc)
+ return;
+ smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_write_space);
+}
+
+static void smc_fback_error_report(struct sock *clcsk)
+{
+ struct smc_sock *smc =
+ smc_clcsock_user_data(clcsk);
+
+ if (!smc)
+ return;
+ smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_error_report);
+}
+
static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
{
- wait_queue_head_t *smc_wait = sk_sleep(&smc->sk);
- wait_queue_head_t *clc_wait;
- unsigned long flags;
+ struct sock *clcsk;
+ int rc = 0;
mutex_lock(&smc->clcsock_release_lock);
if (!smc->clcsock) {
- mutex_unlock(&smc->clcsock_release_lock);
- return -EBADF;
+ rc = -EBADF;
+ goto out;
}
+ clcsk = smc->clcsock->sk;
+
+ if (smc->use_fallback)
+ goto out;
smc->use_fallback = true;
smc->fallback_rsn = reason_code;
smc_stat_fallback(smc);
@@ -587,19 +694,26 @@ static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
smc->clcsock->wq.fasync_list =
smc->sk.sk_socket->wq.fasync_list;
- /* There may be some entries remaining in
- * smc socket->wq, which should be removed
- * to clcsocket->wq during the fallback.
+ /* There might be some wait entries remaining
+ * in smc sk->sk_wq and they should be woken up
+ * as clcsock's wait queue is woken up.
*/
- clc_wait = sk_sleep(smc->clcsock->sk);
- spin_lock_irqsave(&smc_wait->lock, flags);
- spin_lock_nested(&clc_wait->lock, SINGLE_DEPTH_NESTING);
- list_splice_init(&smc_wait->head, &clc_wait->head);
- spin_unlock(&clc_wait->lock);
- spin_unlock_irqrestore(&smc_wait->lock, flags);
+ smc->clcsk_state_change = clcsk->sk_state_change;
+ smc->clcsk_data_ready = clcsk->sk_data_ready;
+ smc->clcsk_write_space = clcsk->sk_write_space;
+ smc->clcsk_error_report = clcsk->sk_error_report;
+
+ clcsk->sk_state_change = smc_fback_state_change;
+ clcsk->sk_data_ready = smc_fback_data_ready;
+ clcsk->sk_write_space = smc_fback_write_space;
+ clcsk->sk_error_report = smc_fback_error_report;
+
+ smc->clcsock->sk->sk_user_data =
+ (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
}
+out:
mutex_unlock(&smc->clcsock_release_lock);
- return 0;
+ return rc;
}
/* fall back during connect */
@@ -2115,10 +2229,9 @@ out:
static void smc_clcsock_data_ready(struct sock *listen_clcsock)
{
- struct smc_sock *lsmc;
+ struct smc_sock *lsmc =
+ smc_clcsock_user_data(listen_clcsock);
- lsmc = (struct smc_sock *)
- ((uintptr_t)listen_clcsock->sk_user_data & ~SK_USER_DATA_NOCOPY);
if (!lsmc)
return;
lsmc->clcsk_data_ready(listen_clcsock);
@@ -2974,12 +3087,14 @@ static int __init smc_init(void)
rc = tcp_register_ulp(&smc_ulp_ops);
if (rc) {
pr_err("%s: tcp_ulp_register fails with %d\n", __func__, rc);
- goto out_sock;
+ goto out_ib;
}
static_branch_enable(&tcp_have_smc);
return 0;
+out_ib:
+ smc_ib_unregister_client();
out_sock:
sock_unregister(PF_SMC);
out_proto6:
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 3d0b8e300deb..37b2001a0255 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -139,6 +139,12 @@ enum smc_urg_state {
SMC_URG_READ = 3, /* data was already read */
};
+struct smc_mark_woken {
+ bool woken;
+ void *key;
+ wait_queue_entry_t wait_entry;
+};
+
struct smc_connection {
struct rb_node alert_node;
struct smc_link_group *lgr; /* link group of connection */
@@ -228,8 +234,14 @@ struct smc_connection {
struct smc_sock { /* smc sock container */
struct sock sk;
struct socket *clcsock; /* internal tcp socket */
+ void (*clcsk_state_change)(struct sock *sk);
+ /* original stat_change fct. */
void (*clcsk_data_ready)(struct sock *sk);
- /* original data_ready fct. **/
+ /* original data_ready fct. */
+ void (*clcsk_write_space)(struct sock *sk);
+ /* original write_space fct. */
+ void (*clcsk_error_report)(struct sock *sk);
+ /* original error_report fct. */
struct smc_connection conn; /* smc connection */
struct smc_sock *listen_smc; /* listen parent */
struct work_struct connect_work; /* handle non-blocking connect*/
@@ -264,6 +276,12 @@ static inline struct smc_sock *smc_sk(const struct sock *sk)
return (struct smc_sock *)sk;
}
+static inline struct smc_sock *smc_clcsock_user_data(struct sock *clcsk)
+{
+ return (struct smc_sock *)
+ ((uintptr_t)clcsk->sk_user_data & ~SK_USER_DATA_NOCOPY);
+}
+
extern struct workqueue_struct *smc_hs_wq; /* wq for handshake work */
extern struct workqueue_struct *smc_close_wq; /* wq for close work */
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 29525d03b253..be7d704976ff 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -1161,8 +1161,8 @@ void smc_conn_free(struct smc_connection *conn)
cancel_work_sync(&conn->abort_work);
}
if (!list_empty(&lgr->list)) {
- smc_lgr_unregister_conn(conn);
smc_buf_unuse(conn, lgr); /* allow buffer reuse */
+ smc_lgr_unregister_conn(conn);
}
if (!lgr->conns_num)
@@ -1864,7 +1864,8 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
(ini->smcd_version == SMC_V2 ||
lgr->vlan_id == ini->vlan_id) &&
(role == SMC_CLNT || ini->is_smcd ||
- lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
+ (lgr->conns_num < SMC_RMBS_PER_LGR_MAX &&
+ !bitmap_full(lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX)))) {
/* link group found */
ini->first_contact_local = 0;
conn->lgr = lgr;
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index b8898c787d23..1fca2f90a9c7 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -146,13 +146,11 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
(req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) &&
!list_empty(&smc->conn.lgr->list)) {
struct smc_link *link = smc->conn.lnk;
- struct net *net = read_pnet(&link->smcibdev->ibdev->coredev.rdma_net);
struct smc_diag_lgrinfo linfo = {
.role = smc->conn.lgr->role,
.lnk[0].ibport = link->ibport,
.lnk[0].link_id = link->link_id,
- .lnk[0].net_cookie = net->net_cookie,
};
memcpy(linfo.lnk[0].ibname,
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 291f1484a1b7..29f0a559d884 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -113,7 +113,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
pnettable = &sn->pnettable;
/* remove table entry */
- write_lock(&pnettable->lock);
+ mutex_lock(&pnettable->lock);
list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist,
list) {
if (!pnet_name ||
@@ -131,7 +131,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
rc = 0;
}
}
- write_unlock(&pnettable->lock);
+ mutex_unlock(&pnettable->lock);
/* if this is not the initial namespace, stop here */
if (net != &init_net)
@@ -192,7 +192,7 @@ static int smc_pnet_add_by_ndev(struct net_device *ndev)
sn = net_generic(net, smc_net_id);
pnettable = &sn->pnettable;
- write_lock(&pnettable->lock);
+ mutex_lock(&pnettable->lock);
list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) {
if (pnetelem->type == SMC_PNET_ETH && !pnetelem->ndev &&
!strncmp(pnetelem->eth_name, ndev->name, IFNAMSIZ)) {
@@ -206,7 +206,7 @@ static int smc_pnet_add_by_ndev(struct net_device *ndev)
break;
}
}
- write_unlock(&pnettable->lock);
+ mutex_unlock(&pnettable->lock);
return rc;
}
@@ -224,7 +224,7 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev)
sn = net_generic(net, smc_net_id);
pnettable = &sn->pnettable;
- write_lock(&pnettable->lock);
+ mutex_lock(&pnettable->lock);
list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) {
if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev == ndev) {
dev_put_track(pnetelem->ndev, &pnetelem->dev_tracker);
@@ -237,7 +237,7 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev)
break;
}
}
- write_unlock(&pnettable->lock);
+ mutex_unlock(&pnettable->lock);
return rc;
}
@@ -368,12 +368,9 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net,
new_pe->type = SMC_PNET_ETH;
memcpy(new_pe->pnet_name, pnet_name, SMC_MAX_PNETID_LEN);
strncpy(new_pe->eth_name, eth_name, IFNAMSIZ);
- new_pe->ndev = ndev;
- if (ndev)
- netdev_tracker_alloc(ndev, &new_pe->dev_tracker, GFP_KERNEL);
rc = -EEXIST;
new_netdev = true;
- write_lock(&pnettable->lock);
+ mutex_lock(&pnettable->lock);
list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) {
if (tmp_pe->type == SMC_PNET_ETH &&
!strncmp(tmp_pe->eth_name, eth_name, IFNAMSIZ)) {
@@ -382,10 +379,15 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net,
}
}
if (new_netdev) {
+ if (ndev) {
+ new_pe->ndev = ndev;
+ netdev_tracker_alloc(ndev, &new_pe->dev_tracker,
+ GFP_ATOMIC);
+ }
list_add_tail(&new_pe->list, &pnettable->pnetlist);
- write_unlock(&pnettable->lock);
+ mutex_unlock(&pnettable->lock);
} else {
- write_unlock(&pnettable->lock);
+ mutex_unlock(&pnettable->lock);
kfree(new_pe);
goto out_put;
}
@@ -446,7 +448,7 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name,
new_pe->ib_port = ib_port;
new_ibdev = true;
- write_lock(&pnettable->lock);
+ mutex_lock(&pnettable->lock);
list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) {
if (tmp_pe->type == SMC_PNET_IB &&
!strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) {
@@ -456,9 +458,9 @@ static int smc_pnet_add_ib(struct smc_pnettable *pnettable, char *ib_name,
}
if (new_ibdev) {
list_add_tail(&new_pe->list, &pnettable->pnetlist);
- write_unlock(&pnettable->lock);
+ mutex_unlock(&pnettable->lock);
} else {
- write_unlock(&pnettable->lock);
+ mutex_unlock(&pnettable->lock);
kfree(new_pe);
}
return (new_ibdev) ? 0 : -EEXIST;
@@ -603,7 +605,7 @@ static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid,
pnettable = &sn->pnettable;
/* dump pnettable entries */
- read_lock(&pnettable->lock);
+ mutex_lock(&pnettable->lock);
list_for_each_entry(pnetelem, &pnettable->pnetlist, list) {
if (pnetid && !smc_pnet_match(pnetelem->pnet_name, pnetid))
continue;
@@ -618,7 +620,7 @@ static int _smc_pnet_dump(struct net *net, struct sk_buff *skb, u32 portid,
break;
}
}
- read_unlock(&pnettable->lock);
+ mutex_unlock(&pnettable->lock);
return idx;
}
@@ -862,7 +864,7 @@ int smc_pnet_net_init(struct net *net)
struct smc_pnetids_ndev *pnetids_ndev = &sn->pnetids_ndev;
INIT_LIST_HEAD(&pnettable->pnetlist);
- rwlock_init(&pnettable->lock);
+ mutex_init(&pnettable->lock);
INIT_LIST_HEAD(&pnetids_ndev->list);
rwlock_init(&pnetids_ndev->lock);
@@ -942,7 +944,7 @@ static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev,
sn = net_generic(net, smc_net_id);
pnettable = &sn->pnettable;
- read_lock(&pnettable->lock);
+ mutex_lock(&pnettable->lock);
list_for_each_entry(pnetelem, &pnettable->pnetlist, list) {
if (pnetelem->type == SMC_PNET_ETH && ndev == pnetelem->ndev) {
/* get pnetid of netdev device */
@@ -951,7 +953,7 @@ static int smc_pnet_find_ndev_pnetid_by_table(struct net_device *ndev,
break;
}
}
- read_unlock(&pnettable->lock);
+ mutex_unlock(&pnettable->lock);
return rc;
}
@@ -1154,7 +1156,7 @@ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port)
sn = net_generic(&init_net, smc_net_id);
pnettable = &sn->pnettable;
- read_lock(&pnettable->lock);
+ mutex_lock(&pnettable->lock);
list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) {
if (tmp_pe->type == SMC_PNET_IB &&
!strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX) &&
@@ -1164,7 +1166,7 @@ int smc_pnetid_by_table_ib(struct smc_ib_device *smcibdev, u8 ib_port)
break;
}
}
- read_unlock(&pnettable->lock);
+ mutex_unlock(&pnettable->lock);
return rc;
}
@@ -1183,7 +1185,7 @@ int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev)
sn = net_generic(&init_net, smc_net_id);
pnettable = &sn->pnettable;
- read_lock(&pnettable->lock);
+ mutex_lock(&pnettable->lock);
list_for_each_entry(tmp_pe, &pnettable->pnetlist, list) {
if (tmp_pe->type == SMC_PNET_IB &&
!strncmp(tmp_pe->ib_name, ib_name, IB_DEVICE_NAME_MAX)) {
@@ -1192,7 +1194,7 @@ int smc_pnetid_by_table_smcd(struct smcd_dev *smcddev)
break;
}
}
- read_unlock(&pnettable->lock);
+ mutex_unlock(&pnettable->lock);
return rc;
}
diff --git a/net/smc/smc_pnet.h b/net/smc/smc_pnet.h
index 14039272f7e4..80a88eea4949 100644
--- a/net/smc/smc_pnet.h
+++ b/net/smc/smc_pnet.h
@@ -29,7 +29,7 @@ struct smc_link_group;
* @pnetlist: List of PNETIDs
*/
struct smc_pnettable {
- rwlock_t lock;
+ struct mutex lock;
struct list_head pnetlist;
};
diff --git a/net/socket.c b/net/socket.c
index 50cf75730fd7..982eecad464c 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -3448,7 +3448,7 @@ EXPORT_SYMBOL(kernel_connect);
* @addr: address holder
*
* Fills the @addr pointer with the address which the socket is bound.
- * Returns 0 or an error code.
+ * Returns the length of the address in bytes or an error code.
*/
int kernel_getsockname(struct socket *sock, struct sockaddr *addr)
@@ -3463,7 +3463,7 @@ EXPORT_SYMBOL(kernel_getsockname);
* @addr: address holder
*
* Fills the @addr pointer with the address which the socket is connected.
- * Returns 0 or an error code.
+ * Returns the length of the address in bytes or an error code.
*/
int kernel_getpeername(struct socket *sock, struct sockaddr *addr)
diff --git a/net/sunrpc/sysfs.c b/net/sunrpc/sysfs.c
index b64a0286b182..05c758da6a92 100644
--- a/net/sunrpc/sysfs.c
+++ b/net/sunrpc/sysfs.c
@@ -115,11 +115,14 @@ static ssize_t rpc_sysfs_xprt_srcaddr_show(struct kobject *kobj,
}
sock = container_of(xprt, struct sock_xprt, xprt);
- if (kernel_getsockname(sock->sock, (struct sockaddr *)&saddr) < 0)
+ mutex_lock(&sock->recv_mutex);
+ if (sock->sock == NULL ||
+ kernel_getsockname(sock->sock, (struct sockaddr *)&saddr) < 0)
goto out;
ret = sprintf(buf, "%pISc\n", &saddr);
out:
+ mutex_unlock(&sock->recv_mutex);
xprt_put(xprt);
return ret + 1;
}
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index f172d1298013..7b5fce2faa10 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -413,6 +413,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
IB_POLL_WORKQUEUE);
if (IS_ERR(ep->re_attr.send_cq)) {
rc = PTR_ERR(ep->re_attr.send_cq);
+ ep->re_attr.send_cq = NULL;
goto out_destroy;
}
@@ -421,6 +422,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
IB_POLL_WORKQUEUE);
if (IS_ERR(ep->re_attr.recv_cq)) {
rc = PTR_ERR(ep->re_attr.recv_cq);
+ ep->re_attr.recv_cq = NULL;
goto out_destroy;
}
ep->re_receive_count = 0;
@@ -459,6 +461,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
ep->re_pd = ib_alloc_pd(device, 0);
if (IS_ERR(ep->re_pd)) {
rc = PTR_ERR(ep->re_pd);
+ ep->re_pd = NULL;
goto out_destroy;
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 69b6ee5a5fd1..0f39e08ee580 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1641,7 +1641,12 @@ static int xs_get_srcport(struct sock_xprt *transport)
unsigned short get_srcport(struct rpc_xprt *xprt)
{
struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt);
- return xs_sock_getport(sock->sock);
+ unsigned short ret = 0;
+ mutex_lock(&sock->recv_mutex);
+ if (sock->sock)
+ ret = xs_sock_getport(sock->sock);
+ mutex_unlock(&sock->recv_mutex);
+ return ret;
}
EXPORT_SYMBOL(get_srcport);
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index 9325479295b8..f09316a9035f 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -2276,7 +2276,7 @@ static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr)
struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
struct tipc_aead_key *skey = NULL;
u16 key_gen = msg_key_gen(hdr);
- u16 size = msg_data_sz(hdr);
+ u32 size = msg_data_sz(hdr);
u8 *data = msg_data(hdr);
unsigned int keylen;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 8d9e09f48f4c..1e14d7f8f28f 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -2200,7 +2200,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
struct tipc_msg *hdr = buf_msg(skb);
struct tipc_gap_ack_blks *ga = NULL;
bool reply = msg_probe(hdr), retransmitted = false;
- u16 dlen = msg_data_sz(hdr), glen = 0;
+ u32 dlen = msg_data_sz(hdr), glen = 0;
u16 peers_snd_nxt = msg_next_sent(hdr);
u16 peers_tol = msg_link_tolerance(hdr);
u16 peers_prio = msg_linkprio(hdr);
@@ -2214,6 +2214,10 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
void *data;
trace_tipc_proto_rcv(skb, false, l->name);
+
+ if (dlen > U16_MAX)
+ goto exit;
+
if (tipc_link_is_blocked(l) || !xmitq)
goto exit;
@@ -2309,7 +2313,8 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
/* Receive Gap ACK blocks from peer if any */
glen = tipc_get_gap_ack_blks(&ga, l, hdr, true);
-
+ if(glen > dlen)
+ break;
tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
&l->mon_state, l->bearer_id);
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index 407619697292..2f4d23238a7e 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -496,6 +496,8 @@ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr,
state->probing = false;
/* Sanity check received domain record */
+ if (new_member_cnt > MAX_MON_DOMAIN)
+ return;
if (dlen < dom_rec_len(arrv_dom, 0))
return;
if (dlen != dom_rec_len(arrv_dom, new_member_cnt))
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index bda902caa814..8267b751a526 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -313,7 +313,7 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
pr_warn_ratelimited("Failed to remove binding %u,%u from %u\n",
ua.sr.type, ua.sr.lower, node);
} else {
- pr_warn("Unrecognized name table message received\n");
+ pr_warn_ratelimited("Unknown name table message received\n");
}
return false;
}
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 01396dd1c899..1d8ba233d047 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -967,7 +967,7 @@ static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg,
list_for_each_entry(p, &sr->all_publ, all_publ)
if (p->key == *last_key)
break;
- if (p->key != *last_key)
+ if (list_entry_is_head(p, &sr->all_publ, all_publ))
return -EPIPE;
} else {
p = list_first_entry(&sr->all_publ,
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 9947b7dfe1d2..6ef95ce565bd 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -403,7 +403,7 @@ static void tipc_node_write_unlock(struct tipc_node *n)
u32 flags = n->action_flags;
struct list_head *publ_list;
struct tipc_uaddr ua;
- u32 bearer_id;
+ u32 bearer_id, node;
if (likely(!flags)) {
write_unlock_bh(&n->lock);
@@ -413,7 +413,8 @@ static void tipc_node_write_unlock(struct tipc_node *n)
tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_NODE_SCOPE,
TIPC_LINK_STATE, n->addr, n->addr);
sk.ref = n->link_id;
- sk.node = n->addr;
+ sk.node = tipc_own_addr(net);
+ node = n->addr;
bearer_id = n->link_id & 0xffff;
publ_list = &n->publ_list;
@@ -423,17 +424,17 @@ static void tipc_node_write_unlock(struct tipc_node *n)
write_unlock_bh(&n->lock);
if (flags & TIPC_NOTIFY_NODE_DOWN)
- tipc_publ_notify(net, publ_list, sk.node, n->capabilities);
+ tipc_publ_notify(net, publ_list, node, n->capabilities);
if (flags & TIPC_NOTIFY_NODE_UP)
- tipc_named_node_up(net, sk.node, n->capabilities);
+ tipc_named_node_up(net, node, n->capabilities);
if (flags & TIPC_NOTIFY_LINK_UP) {
- tipc_mon_peer_up(net, sk.node, bearer_id);
+ tipc_mon_peer_up(net, node, bearer_id);
tipc_nametbl_publish(net, &ua, &sk, sk.ref);
}
if (flags & TIPC_NOTIFY_LINK_DOWN) {
- tipc_mon_peer_down(net, sk.node, bearer_id);
+ tipc_mon_peer_down(net, node, bearer_id);
tipc_nametbl_withdraw(net, &ua, &sk, sk.ref);
}
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 3e63c83e641c..7545321c3440 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -3749,7 +3749,7 @@ static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
if (p->key == *last_publ)
break;
}
- if (p->key != *last_publ) {
+ if (list_entry_is_head(p, &tsk->publications, binding_sock)) {
/* We never set seq or call nl_dump_check_consistent()
* this means that setting prev_seq here will cause the
* consistence check to fail in the netlink callback
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 3235261f138d..38baeb189d4e 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1401,6 +1401,7 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
sock->state = SS_UNCONNECTED;
vsock_transport_cancel_pkt(vsk);
+ vsock_remove_connected(vsk);
goto out_wait;
} else if (timeout == 0) {
err = -ETIMEDOUT;
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 1e9be50469ce..527ae669f6f7 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -33,7 +33,7 @@ $(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.hex)
echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
) > $@
-$(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDI) \
+$(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR) \
$(wildcard $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR)/*.x509)
@$(kecho) " GEN $@"
$(Q)(set -e; \
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 3a54c8e6b6c6..f08d4b3bb148 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -5,7 +5,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -332,29 +332,20 @@ static void cfg80211_event_work(struct work_struct *work)
void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev)
{
struct wireless_dev *wdev, *tmp;
- bool found = false;
ASSERT_RTNL();
- list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
+ list_for_each_entry_safe(wdev, tmp, &rdev->wiphy.wdev_list, list) {
if (wdev->nl_owner_dead) {
if (wdev->netdev)
dev_close(wdev->netdev);
- found = true;
- }
- }
-
- if (!found)
- return;
- wiphy_lock(&rdev->wiphy);
- list_for_each_entry_safe(wdev, tmp, &rdev->wiphy.wdev_list, list) {
- if (wdev->nl_owner_dead) {
+ wiphy_lock(&rdev->wiphy);
cfg80211_leave(rdev, wdev);
rdev_del_virtual_intf(rdev, wdev);
+ wiphy_unlock(&rdev->wiphy);
}
}
- wiphy_unlock(&rdev->wiphy);
}
static void cfg80211_destroy_iface_wk(struct work_struct *work)
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 578bff9c378b..c01fbcc848e8 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -13411,6 +13411,9 @@ static int handle_nan_filter(struct nlattr *attr_filter,
i = 0;
nla_for_each_nested(attr, attr_filter, rem) {
filter[i].filter = nla_memdup(attr, GFP_KERNEL);
+ if (!filter[i].filter)
+ goto err;
+
filter[i].len = nla_len(attr);
i++;
}
@@ -13423,6 +13426,15 @@ static int handle_nan_filter(struct nlattr *attr_filter,
}
return 0;
+
+err:
+ i = 0;
+ nla_for_each_nested(attr, attr_filter, rem) {
+ kfree(filter[i].filter);
+ i++;
+ }
+ kfree(filter);
+ return -ENOMEM;
}
static int nl80211_nan_add_func(struct sk_buff *skb,
@@ -17816,7 +17828,8 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
wdev->chandef = *chandef;
wdev->preset_chandef = *chandef;
- if (wdev->iftype == NL80211_IFTYPE_STATION &&
+ if ((wdev->iftype == NL80211_IFTYPE_STATION ||
+ wdev->iftype == NL80211_IFTYPE_P2P_CLIENT) &&
!WARN_ON(!wdev->current_bss))
cfg80211_update_assoc_bss_entry(wdev, chandef->chan);
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 3fa066419d37..39bce5d764de 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -223,6 +223,9 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
if (x->encap || x->tfcpad)
return -EINVAL;
+ if (xuo->flags & ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND))
+ return -EINVAL;
+
dev = dev_get_by_index(net, xuo->ifindex);
if (!dev) {
if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
@@ -262,7 +265,8 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC);
xso->real_dev = dev;
xso->num_exthdrs = 1;
- xso->flags = xuo->flags;
+ /* Don't forward bit that is not implemented */
+ xso->flags = xuo->flags & ~XFRM_OFFLOAD_IPV6;
err = dev->xfrmdev_ops->xdo_dev_state_add(x);
if (err) {
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index 57448fc519fc..4e3c62d1ad9e 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -673,12 +673,12 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
struct net *net = xi->net;
struct xfrm_if_parms p = {};
+ xfrmi_netlink_parms(data, &p);
if (!p.if_id) {
NL_SET_ERR_MSG(extack, "if_id must be non zero");
return -EINVAL;
}
- xfrmi_netlink_parms(data, &p);
xi = xfrmi_locate(net, &p);
if (!xi) {
xi = netdev_priv(dev);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 04d1ce9b510f..882526159d3a 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -4256,7 +4256,7 @@ static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
}
static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
- u8 dir, u8 type, struct net *net)
+ u8 dir, u8 type, struct net *net, u32 if_id)
{
struct xfrm_policy *pol, *ret = NULL;
struct hlist_head *chain;
@@ -4265,7 +4265,8 @@ static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
hlist_for_each_entry(pol, chain, bydst) {
- if (xfrm_migrate_selector_match(sel, &pol->selector) &&
+ if ((if_id == 0 || pol->if_id == if_id) &&
+ xfrm_migrate_selector_match(sel, &pol->selector) &&
pol->type == type) {
ret = pol;
priority = ret->priority;
@@ -4277,7 +4278,8 @@ static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *
if ((pol->priority >= priority) && ret)
break;
- if (xfrm_migrate_selector_match(sel, &pol->selector) &&
+ if ((if_id == 0 || pol->if_id == if_id) &&
+ xfrm_migrate_selector_match(sel, &pol->selector) &&
pol->type == type) {
ret = pol;
break;
@@ -4393,7 +4395,7 @@ static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
struct xfrm_migrate *m, int num_migrate,
struct xfrm_kmaddress *k, struct net *net,
- struct xfrm_encap_tmpl *encap)
+ struct xfrm_encap_tmpl *encap, u32 if_id)
{
int i, err, nx_cur = 0, nx_new = 0;
struct xfrm_policy *pol = NULL;
@@ -4412,14 +4414,14 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
}
/* Stage 1 - find policy */
- if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
+ if ((pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id)) == NULL) {
err = -ENOENT;
goto out;
}
/* Stage 2 - find and update state(s) */
for (i = 0, mp = m; i < num_migrate; i++, mp++) {
- if ((x = xfrm_migrate_state_find(mp, net))) {
+ if ((x = xfrm_migrate_state_find(mp, net, if_id))) {
x_cur[nx_cur] = x;
nx_cur++;
xc = xfrm_state_migrate(x, mp, encap);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index ca6bee18346d..b749935152ba 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1579,9 +1579,6 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
memcpy(&x->mark, &orig->mark, sizeof(x->mark));
memcpy(&x->props.smark, &orig->props.smark, sizeof(x->props.smark));
- if (xfrm_init_state(x) < 0)
- goto error;
-
x->props.flags = orig->props.flags;
x->props.extra_flags = orig->props.extra_flags;
@@ -1606,7 +1603,8 @@ out:
return NULL;
}
-struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net)
+struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
+ u32 if_id)
{
unsigned int h;
struct xfrm_state *x = NULL;
@@ -1622,6 +1620,8 @@ struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *n
continue;
if (m->reqid && x->props.reqid != m->reqid)
continue;
+ if (if_id != 0 && x->if_id != if_id)
+ continue;
if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
m->old_family) ||
!xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
@@ -1637,6 +1637,8 @@ struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *n
if (x->props.mode != m->mode ||
x->id.proto != m->proto)
continue;
+ if (if_id != 0 && x->if_id != if_id)
+ continue;
if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
m->old_family) ||
!xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
@@ -1663,6 +1665,11 @@ struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
if (!xc)
return NULL;
+ xc->props.family = m->new_family;
+
+ if (xfrm_init_state(xc) < 0)
+ goto error;
+
memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
@@ -2572,7 +2579,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
}
EXPORT_SYMBOL(xfrm_state_delete_tunnel);
-u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu)
+u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
{
const struct xfrm_type *type = READ_ONCE(x->type);
struct crypto_aead *aead;
@@ -2603,17 +2610,7 @@ u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu)
return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
net_adj) & ~(blksize - 1)) + net_adj - 2;
}
-EXPORT_SYMBOL_GPL(__xfrm_state_mtu);
-
-u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
-{
- mtu = __xfrm_state_mtu(x, mtu);
-
- if (x->props.family == AF_INET6 && mtu < IPV6_MIN_MTU)
- return IPV6_MIN_MTU;
-
- return mtu;
-}
+EXPORT_SYMBOL_GPL(xfrm_state_mtu);
int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
{
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 8cd6c8129004..a4fb596e87af 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2608,6 +2608,7 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
int n = 0;
struct net *net = sock_net(skb->sk);
struct xfrm_encap_tmpl *encap = NULL;
+ u32 if_id = 0;
if (attrs[XFRMA_MIGRATE] == NULL)
return -EINVAL;
@@ -2632,7 +2633,10 @@ static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
return -ENOMEM;
}
- err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap);
+ if (attrs[XFRMA_IF_ID])
+ if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
+
+ err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap, if_id);
kfree(encap);
diff --git a/samples/seccomp/dropper.c b/samples/seccomp/dropper.c
index cc0648eb389e..4bca4b70f665 100644
--- a/samples/seccomp/dropper.c
+++ b/samples/seccomp/dropper.c
@@ -25,7 +25,7 @@
#include <sys/prctl.h>
#include <unistd.h>
-static int install_filter(int nr, int arch, int error)
+static int install_filter(int arch, int nr, int error)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD+BPF_W+BPF_ABS,
@@ -42,6 +42,10 @@ static int install_filter(int nr, int arch, int error)
.len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
.filter = filter,
};
+ if (error == -1) {
+ struct sock_filter kill = BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_KILL);
+ filter[4] = kill;
+ }
if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
perror("prctl(NO_NEW_PRIVS)");
return 1;
@@ -57,9 +61,10 @@ int main(int argc, char **argv)
{
if (argc < 5) {
fprintf(stderr, "Usage:\n"
- "dropper <syscall_nr> <arch> <errno> <prog> [<args>]\n"
+ "dropper <arch> <syscall_nr> <errno> <prog> [<args>]\n"
"Hint: AUDIT_ARCH_I386: 0x%X\n"
" AUDIT_ARCH_X86_64: 0x%X\n"
+ " errno == -1 means SECCOMP_RET_KILL\n"
"\n", AUDIT_ARCH_I386, AUDIT_ARCH_X86_64);
return 1;
}
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index d53825503874..8be892887d71 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -51,6 +51,7 @@ KBUILD_CFLAGS += -Wno-sign-compare
KBUILD_CFLAGS += -Wno-format-zero-length
KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast)
KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare
+KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access)
endif
endif
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 59717be31210..d3c3a61308ad 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -979,10 +979,10 @@ static int conf_write_autoconf_cmd(const char *autoconf_name)
fprintf(out, "\n$(deps_config): ;\n");
- if (ferror(out)) /* error check for all fprintf() calls */
- return -1;
-
+ ret = ferror(out); /* error check for all fprintf() calls */
fclose(out);
+ if (ret)
+ return -1;
if (rename(tmp, name)) {
perror("rename");
@@ -994,14 +994,19 @@ static int conf_write_autoconf_cmd(const char *autoconf_name)
static int conf_touch_deps(void)
{
- const char *name;
+ const char *name, *tmp;
struct symbol *sym;
int res, i;
- strcpy(depfile_path, "include/config/");
- depfile_prefix_len = strlen(depfile_path);
-
name = conf_get_autoconfig_name();
+ tmp = strrchr(name, '/');
+ depfile_prefix_len = tmp ? tmp - name + 1 : 0;
+ if (depfile_prefix_len + 1 > sizeof(depfile_path))
+ return -1;
+
+ strncpy(depfile_path, name, depfile_prefix_len);
+ depfile_path[depfile_prefix_len] = 0;
+
conf_read_simple(name, S_DEF_AUTO);
sym_calc_value(modules_sym);
@@ -1093,10 +1098,10 @@ static int __conf_write_autoconf(const char *filename,
print_symbol(file, sym);
/* check possible errors in conf_write_heading() and print_symbol() */
- if (ferror(file))
- return -1;
-
+ ret = ferror(file);
fclose(file);
+ if (ret)
+ return -1;
if (rename(tmp, filename)) {
perror("rename");
diff --git a/scripts/kconfig/preprocess.c b/scripts/kconfig/preprocess.c
index 0590f86df6e4..748da578b418 100644
--- a/scripts/kconfig/preprocess.c
+++ b/scripts/kconfig/preprocess.c
@@ -141,7 +141,7 @@ static char *do_lineno(int argc, char *argv[])
static char *do_shell(int argc, char *argv[])
{
FILE *p;
- char buf[256];
+ char buf[4096];
char *cmd;
size_t nread;
int i;
diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c
index 23240d793b07..895f4b9ce8c6 100644
--- a/security/integrity/digsig_asymmetric.c
+++ b/security/integrity/digsig_asymmetric.c
@@ -109,22 +109,25 @@ int asymmetric_verify(struct key *keyring, const char *sig,
pk = asymmetric_key_public_key(key);
pks.pkey_algo = pk->pkey_algo;
- if (!strcmp(pk->pkey_algo, "rsa"))
+ if (!strcmp(pk->pkey_algo, "rsa")) {
pks.encoding = "pkcs1";
- else if (!strncmp(pk->pkey_algo, "ecdsa-", 6))
+ } else if (!strncmp(pk->pkey_algo, "ecdsa-", 6)) {
/* edcsa-nist-p192 etc. */
pks.encoding = "x962";
- else if (!strcmp(pk->pkey_algo, "ecrdsa") ||
- !strcmp(pk->pkey_algo, "sm2"))
+ } else if (!strcmp(pk->pkey_algo, "ecrdsa") ||
+ !strcmp(pk->pkey_algo, "sm2")) {
pks.encoding = "raw";
- else
- return -ENOPKG;
+ } else {
+ ret = -ENOPKG;
+ goto out;
+ }
pks.digest = (u8 *)data;
pks.digest_size = datalen;
pks.s = hdr->sig;
pks.s_size = siglen;
ret = verify_signature(key, &pks);
+out:
key_put(key);
pr_debug("%s() = %d\n", __func__, ret);
return ret;
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index 3d8e9d5db5aa..3ad8f7734208 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -496,12 +496,12 @@ int __init ima_fs_init(void)
return 0;
out:
+ securityfs_remove(ima_policy);
securityfs_remove(violations);
securityfs_remove(runtime_measurements_count);
securityfs_remove(ascii_runtime_measurements);
securityfs_remove(binary_runtime_measurements);
securityfs_remove(ima_symlink);
securityfs_remove(ima_dir);
- securityfs_remove(ima_policy);
return -1;
}
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index 320ca80aacab..2a1f6418b10a 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -1967,6 +1967,14 @@ int ima_policy_show(struct seq_file *m, void *v)
rcu_read_lock();
+ /* Do not print rules with inactive LSM labels */
+ for (i = 0; i < MAX_LSM_RULES; i++) {
+ if (entry->lsm[i].args_p && !entry->lsm[i].rule) {
+ rcu_read_unlock();
+ return 0;
+ }
+ }
+
if (entry->action & MEASURE)
seq_puts(m, pt(Opt_measure));
if (entry->action & DONT_MEASURE)
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
index 694560396be0..db1ad6d7a57f 100644
--- a/security/integrity/ima/ima_template.c
+++ b/security/integrity/ima/ima_template.c
@@ -29,6 +29,7 @@ static struct ima_template_desc builtin_templates[] = {
static LIST_HEAD(defined_templates);
static DEFINE_SPINLOCK(template_list);
+static int template_setup_done;
static const struct ima_template_field supported_fields[] = {
{.field_id = "d", .field_init = ima_eventdigest_init,
@@ -101,10 +102,11 @@ static int __init ima_template_setup(char *str)
struct ima_template_desc *template_desc;
int template_len = strlen(str);
- if (ima_template)
+ if (template_setup_done)
return 1;
- ima_init_template_list();
+ if (!ima_template)
+ ima_init_template_list();
/*
* Verify that a template with the supplied name exists.
@@ -128,6 +130,7 @@ static int __init ima_template_setup(char *str)
}
ima_template = template_desc;
+ template_setup_done = 1;
return 1;
}
__setup("ima_template=", ima_template_setup);
@@ -136,7 +139,7 @@ static int __init ima_template_fmt_setup(char *str)
{
int num_templates = ARRAY_SIZE(builtin_templates);
- if (ima_template)
+ if (template_setup_done)
return 1;
if (template_desc_init_fields(str, NULL, NULL) < 0) {
@@ -147,6 +150,7 @@ static int __init ima_template_fmt_setup(char *str)
builtin_templates[num_templates - 1].fmt = str;
ima_template = builtin_templates + num_templates - 1;
+ template_setup_done = 1;
return 1;
}
diff --git a/security/integrity/integrity_audit.c b/security/integrity/integrity_audit.c
index 29220056207f..0ec5e4c22cb2 100644
--- a/security/integrity/integrity_audit.c
+++ b/security/integrity/integrity_audit.c
@@ -45,6 +45,8 @@ void integrity_audit_message(int audit_msgno, struct inode *inode,
return;
ab = audit_log_start(audit_context(), GFP_KERNEL, audit_msgno);
+ if (!ab)
+ return;
audit_log_format(ab, "pid=%d uid=%u auid=%u ses=%u",
task_pid_nr(current),
from_kuid(&init_user_ns, current_uid()),
diff --git a/security/selinux/ima.c b/security/selinux/ima.c
index 727c4e43219d..ff7aea6b3774 100644
--- a/security/selinux/ima.c
+++ b/security/selinux/ima.c
@@ -77,7 +77,7 @@ void selinux_ima_measure_state_locked(struct selinux_state *state)
size_t policy_len;
int rc = 0;
- WARN_ON(!mutex_is_locked(&state->policy_mutex));
+ lockdep_assert_held(&state->policy_mutex);
state_str = selinux_ima_collect_state(state);
if (!state_str) {
@@ -117,7 +117,7 @@ void selinux_ima_measure_state_locked(struct selinux_state *state)
*/
void selinux_ima_measure_state(struct selinux_state *state)
{
- WARN_ON(mutex_is_locked(&state->policy_mutex));
+ lockdep_assert_not_held(&state->policy_mutex);
mutex_lock(&state->policy_mutex);
selinux_ima_measure_state_locked(state);
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index 2ec6e5cd25d9..feb206f3acb4 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -152,6 +152,8 @@ static void cond_list_destroy(struct policydb *p)
for (i = 0; i < p->cond_list_len; i++)
cond_node_destroy(&p->cond_list[i]);
kfree(p->cond_list);
+ p->cond_list = NULL;
+ p->cond_list_len = 0;
}
void cond_policydb_destroy(struct policydb *p)
@@ -441,7 +443,6 @@ int cond_read_list(struct policydb *p, void *fp)
return 0;
err:
cond_list_destroy(p);
- p->cond_list = NULL;
return rc;
}
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index d1fcd1d5adae..6fd763d4d15b 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -511,7 +511,8 @@ static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
DEFAULT_GFP, 0);
if (!sgt)
return NULL;
- dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir);
+ dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
+ sg_dma_address(sgt->sgl));
p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
if (p)
dmab->private_data = sgt;
@@ -540,9 +541,9 @@ static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
if (mode == SNDRV_DMA_SYNC_CPU) {
if (dmab->dev.dir == DMA_TO_DEVICE)
return;
+ invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
dmab->dev.dir);
- invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
} else {
if (dmab->dev.dir == DMA_FROM_DEVICE)
return;
@@ -671,9 +672,13 @@ static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
*/
static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
{
- dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir);
- return dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
- dmab->dev.dir, DEFAULT_GFP);
+ void *p;
+
+ p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
+ dmab->dev.dir, DEFAULT_GFP);
+ if (p)
+ dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr);
+ return p;
}
static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 621883e71194..a056b3ef3c84 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -172,6 +172,19 @@ unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
}
EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave);
+unsigned long _snd_pcm_stream_lock_irqsave_nested(struct snd_pcm_substream *substream)
+{
+ unsigned long flags = 0;
+ if (substream->pcm->nonatomic)
+ mutex_lock_nested(&substream->self_group.mutex,
+ SINGLE_DEPTH_NESTING);
+ else
+ spin_lock_irqsave_nested(&substream->self_group.lock, flags,
+ SINGLE_DEPTH_NESTING);
+ return flags;
+}
+EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave_nested);
+
/**
* snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream
* @substream: PCM substream
diff --git a/sound/hda/intel-sdw-acpi.c b/sound/hda/intel-sdw-acpi.c
index b7758dbe2371..5cb92f7ccbca 100644
--- a/sound/hda/intel-sdw-acpi.c
+++ b/sound/hda/intel-sdw-acpi.c
@@ -50,11 +50,11 @@ static bool is_link_enabled(struct fwnode_handle *fw_node, int i)
static int
sdw_intel_scan_controller(struct sdw_intel_acpi_info *info)
{
- struct acpi_device *adev;
+ struct acpi_device *adev = acpi_fetch_acpi_dev(info->handle);
int ret, i;
u8 count;
- if (acpi_bus_get_device(info->handle, &adev))
+ if (!adev)
return -EINVAL;
/* Found controller, find links supported */
@@ -119,7 +119,6 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
void *cdata, void **return_value)
{
struct sdw_intel_acpi_info *info = cdata;
- struct acpi_device *adev;
acpi_status status;
u64 adr;
@@ -127,7 +126,7 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
if (ACPI_FAILURE(status))
return AE_OK; /* keep going */
- if (acpi_bus_get_device(handle, &adev)) {
+ if (!acpi_fetch_acpi_dev(handle)) {
pr_err("%s: Couldn't find ACPI handle\n", __func__);
return AE_NOT_FOUND;
}
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 82c492b05667..cd1db943b7e0 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -981,7 +981,7 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
int id = HDA_FIXUP_ID_NOT_SET;
const char *name = NULL;
const char *type = NULL;
- int vendor, device;
+ unsigned int vendor, device;
if (codec->fixup_id != HDA_FIXUP_ID_NOT_SET)
return;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 7016b48227bf..f552785d301e 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -3000,6 +3000,10 @@ void snd_hda_codec_shutdown(struct hda_codec *codec)
{
struct hda_pcm *cpcm;
+ /* Skip the shutdown if codec is not registered */
+ if (!codec->registered)
+ return;
+
list_for_each_entry(cpcm, &codec->pcm_list_head, list)
snd_pcm_suspend_all(cpcm->pcm);
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 3bf5e3410703..fc114e522480 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -91,6 +91,12 @@ static void snd_hda_gen_spec_free(struct hda_gen_spec *spec)
free_kctls(spec);
snd_array_free(&spec->paths);
snd_array_free(&spec->loopback_list);
+#ifdef CONFIG_SND_HDA_GENERIC_LEDS
+ if (spec->led_cdevs[LED_AUDIO_MUTE])
+ led_classdev_unregister(spec->led_cdevs[LED_AUDIO_MUTE]);
+ if (spec->led_cdevs[LED_AUDIO_MICMUTE])
+ led_classdev_unregister(spec->led_cdevs[LED_AUDIO_MICMUTE]);
+#endif
}
/*
@@ -3922,7 +3928,10 @@ static int create_mute_led_cdev(struct hda_codec *codec,
enum led_brightness),
bool micmute)
{
+ struct hda_gen_spec *spec = codec->spec;
struct led_classdev *cdev;
+ int idx = micmute ? LED_AUDIO_MICMUTE : LED_AUDIO_MUTE;
+ int err;
cdev = devm_kzalloc(&codec->core.dev, sizeof(*cdev), GFP_KERNEL);
if (!cdev)
@@ -3932,10 +3941,14 @@ static int create_mute_led_cdev(struct hda_codec *codec,
cdev->max_brightness = 1;
cdev->default_trigger = micmute ? "audio-micmute" : "audio-mute";
cdev->brightness_set_blocking = callback;
- cdev->brightness = ledtrig_audio_get(micmute ? LED_AUDIO_MICMUTE : LED_AUDIO_MUTE);
+ cdev->brightness = ledtrig_audio_get(idx);
cdev->flags = LED_CORE_SUSPENDRESUME;
- return devm_led_classdev_register(&codec->core.dev, cdev);
+ err = led_classdev_register(&codec->core.dev, cdev);
+ if (err < 0)
+ return err;
+ spec->led_cdevs[idx] = cdev;
+ return 0;
}
/**
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 8e1bc8ea74fc..34eba40cc6e6 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -294,6 +294,9 @@ struct hda_gen_spec {
struct hda_jack_callback *cb);
void (*mic_autoswitch_hook)(struct hda_codec *codec,
struct hda_jack_callback *cb);
+
+ /* leds */
+ struct led_classdev *led_cdevs[NUM_AUDIO_LEDS];
};
/* values for add_stereo_mix_input flag */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4b0338c4c543..572ff0d1fafe 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1615,6 +1615,7 @@ static const struct snd_pci_quirk probe_mask_list[] = {
/* forced codec slots */
SND_PCI_QUIRK(0x1043, 0x1262, "ASUS W5Fm", 0x103),
SND_PCI_QUIRK(0x1046, 0x1262, "ASUS W5F", 0x103),
+ SND_PCI_QUIRK(0x1558, 0x0351, "Schenker Dock 15", 0x105),
/* WinFast VP200 H (Teradici) user reported broken communication */
SND_PCI_QUIRK(0x3a21, 0x040d, "WinFast VP200 H", 0x101),
{}
@@ -1798,8 +1799,6 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
assign_position_fix(chip, check_position_fix(chip, position_fix[dev]));
- check_probe_mask(chip, dev);
-
if (single_cmd < 0) /* allow fallback to single_cmd at errors */
chip->fallback_to_single_cmd = 1;
else /* explicitly set to single_cmd or not */
@@ -1825,6 +1824,8 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
chip->bus.core.needs_damn_long_delay = 1;
}
+ check_probe_mask(chip, dev);
+
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
if (err < 0) {
dev_err(card->dev, "Error creating device [card]!\n");
@@ -1940,6 +1941,7 @@ static int azx_first_init(struct azx *chip)
dma_bits = 32;
if (dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(dma_bits)))
dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(32));
+ dma_set_max_seg_size(&pci->dev, UINT_MAX);
/* read number of streams from GCAP register instead of using
* hardcoded value
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 668274e52674..3a42457984e9 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -98,6 +98,7 @@ struct alc_spec {
unsigned int gpio_mic_led_mask;
struct alc_coef_led mute_led_coef;
struct alc_coef_led mic_led_coef;
+ struct mutex coef_mutex;
hda_nid_t headset_mic_pin;
hda_nid_t headphone_mic_pin;
@@ -137,8 +138,24 @@ struct alc_spec {
* COEF access helper functions
*/
-static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
- unsigned int coef_idx)
+static void coef_mutex_lock(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+
+ snd_hda_power_up_pm(codec);
+ mutex_lock(&spec->coef_mutex);
+}
+
+static void coef_mutex_unlock(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+
+ mutex_unlock(&spec->coef_mutex);
+ snd_hda_power_down_pm(codec);
+}
+
+static int __alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
+ unsigned int coef_idx)
{
unsigned int val;
@@ -147,28 +164,56 @@ static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
return val;
}
+static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
+ unsigned int coef_idx)
+{
+ unsigned int val;
+
+ coef_mutex_lock(codec);
+ val = __alc_read_coefex_idx(codec, nid, coef_idx);
+ coef_mutex_unlock(codec);
+ return val;
+}
+
#define alc_read_coef_idx(codec, coef_idx) \
alc_read_coefex_idx(codec, 0x20, coef_idx)
-static void alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
- unsigned int coef_idx, unsigned int coef_val)
+static void __alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
+ unsigned int coef_idx, unsigned int coef_val)
{
snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_COEF_INDEX, coef_idx);
snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_PROC_COEF, coef_val);
}
+static void alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
+ unsigned int coef_idx, unsigned int coef_val)
+{
+ coef_mutex_lock(codec);
+ __alc_write_coefex_idx(codec, nid, coef_idx, coef_val);
+ coef_mutex_unlock(codec);
+}
+
#define alc_write_coef_idx(codec, coef_idx, coef_val) \
alc_write_coefex_idx(codec, 0x20, coef_idx, coef_val)
+static void __alc_update_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
+ unsigned int coef_idx, unsigned int mask,
+ unsigned int bits_set)
+{
+ unsigned int val = __alc_read_coefex_idx(codec, nid, coef_idx);
+
+ if (val != -1)
+ __alc_write_coefex_idx(codec, nid, coef_idx,
+ (val & ~mask) | bits_set);
+}
+
static void alc_update_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
unsigned int coef_idx, unsigned int mask,
unsigned int bits_set)
{
- unsigned int val = alc_read_coefex_idx(codec, nid, coef_idx);
-
- if (val != -1)
- alc_write_coefex_idx(codec, nid, coef_idx,
- (val & ~mask) | bits_set);
+ coef_mutex_lock(codec);
+ __alc_update_coefex_idx(codec, nid, coef_idx, mask, bits_set);
+ coef_mutex_unlock(codec);
}
#define alc_update_coef_idx(codec, coef_idx, mask, bits_set) \
@@ -201,13 +246,15 @@ struct coef_fw {
static void alc_process_coef_fw(struct hda_codec *codec,
const struct coef_fw *fw)
{
+ coef_mutex_lock(codec);
for (; fw->nid; fw++) {
if (fw->mask == (unsigned short)-1)
- alc_write_coefex_idx(codec, fw->nid, fw->idx, fw->val);
+ __alc_write_coefex_idx(codec, fw->nid, fw->idx, fw->val);
else
- alc_update_coefex_idx(codec, fw->nid, fw->idx,
- fw->mask, fw->val);
+ __alc_update_coefex_idx(codec, fw->nid, fw->idx,
+ fw->mask, fw->val);
}
+ coef_mutex_unlock(codec);
}
/*
@@ -1153,6 +1200,7 @@ static int alc_alloc_spec(struct hda_codec *codec, hda_nid_t mixer_nid)
codec->spdif_status_reset = 1;
codec->forced_resume = 1;
codec->patch_ops = alc_patch_ops;
+ mutex_init(&spec->coef_mutex);
err = alc_codec_rename_from_preset(codec);
if (err < 0) {
@@ -2125,6 +2173,7 @@ static void alc1220_fixup_gb_x570(struct hda_codec *codec,
{
static const hda_nid_t conn1[] = { 0x0c };
static const struct coef_fw gb_x570_coefs[] = {
+ WRITE_COEF(0x07, 0x03c0),
WRITE_COEF(0x1a, 0x01c1),
WRITE_COEF(0x1b, 0x0202),
WRITE_COEF(0x43, 0x3005),
@@ -2551,7 +2600,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_GB_X570),
- SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_GB_X570),
+ SND_PCI_QUIRK(0x1458, 0xa0d5, "Gigabyte X570S Aorus Master", ALC1220_FIXUP_GB_X570),
SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x1229, "MSI-GP73", ALC1220_FIXUP_CLEVO_P950),
@@ -2626,6 +2676,7 @@ static const struct hda_model_fixup alc882_fixup_models[] = {
{.id = ALC882_FIXUP_NO_PRIMARY_HP, .name = "no-primary-hp"},
{.id = ALC887_FIXUP_ASUS_BASS, .name = "asus-bass"},
{.id = ALC1220_FIXUP_GB_DUAL_CODECS, .name = "dual-codecs"},
+ {.id = ALC1220_FIXUP_GB_X570, .name = "gb-x570"},
{.id = ALC1220_FIXUP_CLEVO_P950, .name = "clevo-p950"},
{}
};
@@ -8969,6 +9020,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
@@ -9127,6 +9179,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ SND_PCI_QUIRK(0x17aa, 0x383d, "Legion Y9000X 2019", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP),
SND_PCI_QUIRK(0x17aa, 0x3847, "Legion 7 16ACHG6", ALC287_FIXUP_LEGION_16ACHG6),
SND_PCI_QUIRK(0x17aa, 0x384a, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
diff --git a/sound/soc/amd/acp/acp-mach-common.c b/sound/soc/amd/acp/acp-mach-common.c
index c9caade5cb74..cd05ee2802c9 100644
--- a/sound/soc/amd/acp/acp-mach-common.c
+++ b/sound/soc/amd/acp/acp-mach-common.c
@@ -303,11 +303,11 @@ static const struct snd_soc_dapm_route rt1019_map_lr[] = {
static struct snd_soc_codec_conf rt1019_conf[] = {
{
- .dlc = COMP_CODEC_CONF("i2c-10EC1019:00"),
+ .dlc = COMP_CODEC_CONF("i2c-10EC1019:01"),
.name_prefix = "Left",
},
{
- .dlc = COMP_CODEC_CONF("i2c-10EC1019:01"),
+ .dlc = COMP_CODEC_CONF("i2c-10EC1019:00"),
.name_prefix = "Right",
},
};
diff --git a/sound/soc/amd/acp/acp-mach.h b/sound/soc/amd/acp/acp-mach.h
index fd6299844ebe..c855f50d6b34 100644
--- a/sound/soc/amd/acp/acp-mach.h
+++ b/sound/soc/amd/acp/acp-mach.h
@@ -21,7 +21,6 @@
#include <linux/gpio/consumer.h>
#define EN_SPKR_GPIO_GB 0x11F
-#define EN_SPKR_GPIO_NK 0x146
#define EN_SPKR_GPIO_NONE -EINVAL
enum be_id {
diff --git a/sound/soc/amd/acp/acp-sof-mach.c b/sound/soc/amd/acp/acp-sof-mach.c
index 07de46142655..4cc431e54fe1 100644
--- a/sound/soc/amd/acp/acp-sof-mach.c
+++ b/sound/soc/amd/acp/acp-sof-mach.c
@@ -37,7 +37,7 @@ static struct acp_card_drvdata sof_rt5682_max_data = {
.hs_codec_id = RT5682,
.amp_codec_id = MAX98360A,
.dmic_codec_id = DMIC,
- .gpio_spkr_en = EN_SPKR_GPIO_NK,
+ .gpio_spkr_en = EN_SPKR_GPIO_NONE,
};
static struct acp_card_drvdata sof_rt5682s_max_data = {
@@ -47,7 +47,7 @@ static struct acp_card_drvdata sof_rt5682s_max_data = {
.hs_codec_id = RT5682S,
.amp_codec_id = MAX98360A,
.dmic_codec_id = DMIC,
- .gpio_spkr_en = EN_SPKR_GPIO_NK,
+ .gpio_spkr_en = EN_SPKR_GPIO_NONE,
};
static const struct snd_kcontrol_new acp_controls[] = {
diff --git a/sound/soc/codecs/cpcap.c b/sound/soc/codecs/cpcap.c
index 598e09024e23..ffdf8b615efa 100644
--- a/sound/soc/codecs/cpcap.c
+++ b/sound/soc/codecs/cpcap.c
@@ -1667,6 +1667,8 @@ static int cpcap_codec_probe(struct platform_device *pdev)
{
struct device_node *codec_node =
of_get_child_by_name(pdev->dev.parent->of_node, "audio-codec");
+ if (!codec_node)
+ return -ENODEV;
pdev->dev.of_node = codec_node;
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index 4aaee1873a11..4415fb364d4d 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -150,7 +150,6 @@ static const struct snd_kcontrol_new cs4265_snd_controls[] = {
SOC_SINGLE("E to F Buffer Disable Switch", CS4265_SPDIF_CTL1,
6, 1, 0),
SOC_ENUM("C Data Access", cam_mode_enum),
- SOC_SINGLE("SPDIF Switch", CS4265_SPDIF_CTL2, 5, 1, 1),
SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2,
3, 1, 0),
SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum),
@@ -186,7 +185,7 @@ static const struct snd_soc_dapm_widget cs4265_dapm_widgets[] = {
SND_SOC_DAPM_SWITCH("Loopback", SND_SOC_NOPM, 0, 0,
&loopback_ctl),
- SND_SOC_DAPM_SWITCH("SPDIF", SND_SOC_NOPM, 0, 0,
+ SND_SOC_DAPM_SWITCH("SPDIF", CS4265_SPDIF_CTL2, 5, 1,
&spdif_switch),
SND_SOC_DAPM_SWITCH("DAC", CS4265_PWRCTL, 1, 1,
&dac_switch),
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index b61f980cabdc..b07607a9ecea 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -277,7 +277,7 @@ struct hdmi_codec_priv {
bool busy;
struct snd_soc_jack *jack;
unsigned int jack_status;
- u8 iec_status[5];
+ u8 iec_status[AES_IEC958_STATUS_SIZE];
};
static const struct snd_soc_dapm_widget hdmi_widgets[] = {
diff --git a/sound/soc/codecs/lpass-rx-macro.c b/sound/soc/codecs/lpass-rx-macro.c
index aec5127260fd..6ffe88345de5 100644
--- a/sound/soc/codecs/lpass-rx-macro.c
+++ b/sound/soc/codecs/lpass-rx-macro.c
@@ -2688,8 +2688,8 @@ static uint32_t get_iir_band_coeff(struct snd_soc_component *component,
int reg, b2_reg;
/* Address does not automatically update if reading */
- reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B1_CTL + 16 * iir_idx;
- b2_reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B2_CTL + 16 * iir_idx;
+ reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B1_CTL + 0x80 * iir_idx;
+ b2_reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B2_CTL + 0x80 * iir_idx;
snd_soc_component_write(component, reg,
((band_idx * BAND_MAX + coeff_idx) *
@@ -2718,7 +2718,7 @@ static uint32_t get_iir_band_coeff(struct snd_soc_component *component,
static void set_iir_band_coeff(struct snd_soc_component *component,
int iir_idx, int band_idx, uint32_t value)
{
- int reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B2_CTL + 16 * iir_idx;
+ int reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B2_CTL + 0x80 * iir_idx;
snd_soc_component_write(component, reg, (value & 0xFF));
snd_soc_component_write(component, reg, (value >> 8) & 0xFF);
@@ -2739,7 +2739,7 @@ static int rx_macro_put_iir_band_audio_mixer(
int iir_idx = ctl->iir_idx;
int band_idx = ctl->band_idx;
u32 coeff[BAND_MAX];
- int reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B1_CTL + 16 * iir_idx;
+ int reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B1_CTL + 0x80 * iir_idx;
memcpy(&coeff[0], ucontrol->value.bytes.data, params->max);
diff --git a/sound/soc/codecs/max9759.c b/sound/soc/codecs/max9759.c
index d75fd61b9032..bc57d7687f16 100644
--- a/sound/soc/codecs/max9759.c
+++ b/sound/soc/codecs/max9759.c
@@ -64,7 +64,8 @@ static int speaker_gain_control_put(struct snd_kcontrol *kcontrol,
struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
struct max9759 *priv = snd_soc_component_get_drvdata(c);
- if (ucontrol->value.integer.value[0] > 3)
+ if (ucontrol->value.integer.value[0] < 0 ||
+ ucontrol->value.integer.value[0] > 3)
return -EINVAL;
priv->gain = ucontrol->value.integer.value[0];
diff --git a/sound/soc/codecs/rt5668.c b/sound/soc/codecs/rt5668.c
index fb09715bf932..5b12cbf2ba21 100644
--- a/sound/soc/codecs/rt5668.c
+++ b/sound/soc/codecs/rt5668.c
@@ -1022,11 +1022,13 @@ static void rt5668_jack_detect_handler(struct work_struct *work)
container_of(work, struct rt5668_priv, jack_detect_work.work);
int val, btn_type;
- while (!rt5668->component)
- usleep_range(10000, 15000);
-
- while (!rt5668->component->card->instantiated)
- usleep_range(10000, 15000);
+ if (!rt5668->component || !rt5668->component->card ||
+ !rt5668->component->card->instantiated) {
+ /* card not yet ready, try later */
+ mod_delayed_work(system_power_efficient_wq,
+ &rt5668->jack_detect_work, msecs_to_jiffies(15));
+ return;
+ }
mutex_lock(&rt5668->calibrate_mutex);
diff --git a/sound/soc/codecs/rt5682-i2c.c b/sound/soc/codecs/rt5682-i2c.c
index 20e0f90ea498..20fc0f3766de 100644
--- a/sound/soc/codecs/rt5682-i2c.c
+++ b/sound/soc/codecs/rt5682-i2c.c
@@ -59,18 +59,12 @@ static void rt5682_jd_check_handler(struct work_struct *work)
struct rt5682_priv *rt5682 = container_of(work, struct rt5682_priv,
jd_check_work.work);
- if (snd_soc_component_read(rt5682->component, RT5682_AJD1_CTRL)
- & RT5682_JDH_RS_MASK) {
+ if (snd_soc_component_read(rt5682->component, RT5682_AJD1_CTRL) & RT5682_JDH_RS_MASK)
/* jack out */
- rt5682->jack_type = rt5682_headset_detect(rt5682->component, 0);
-
- snd_soc_jack_report(rt5682->hs_jack, rt5682->jack_type,
- SND_JACK_HEADSET |
- SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3);
- } else {
+ mod_delayed_work(system_power_efficient_wq,
+ &rt5682->jack_detect_work, 0);
+ else
schedule_delayed_work(&rt5682->jd_check_work, 500);
- }
}
static irqreturn_t rt5682_irq(int irq, void *data)
@@ -198,7 +192,6 @@ static int rt5682_i2c_probe(struct i2c_client *i2c,
}
mutex_init(&rt5682->calibrate_mutex);
- mutex_init(&rt5682->jdet_mutex);
rt5682_calibrate(rt5682);
rt5682_apply_patch_list(rt5682, &i2c->dev);
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index 415ec564c82e..be68d573a490 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -922,15 +922,13 @@ static void rt5682_enable_push_button_irq(struct snd_soc_component *component,
*
* Returns detect status.
*/
-int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
+static int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
{
struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
struct snd_soc_dapm_context *dapm = &component->dapm;
unsigned int val, count;
if (jack_insert) {
- snd_soc_dapm_mutex_lock(dapm);
-
snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1,
RT5682_PWR_VREF2 | RT5682_PWR_MB,
RT5682_PWR_VREF2 | RT5682_PWR_MB);
@@ -981,8 +979,6 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
snd_soc_component_update_bits(component, RT5682_MICBIAS_2,
RT5682_PWR_CLK25M_MASK | RT5682_PWR_CLK1M_MASK,
RT5682_PWR_CLK25M_PU | RT5682_PWR_CLK1M_PU);
-
- snd_soc_dapm_mutex_unlock(dapm);
} else {
rt5682_enable_push_button_irq(component, false);
snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
@@ -1011,7 +1007,6 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
dev_dbg(component->dev, "jack_type = %d\n", rt5682->jack_type);
return rt5682->jack_type;
}
-EXPORT_SYMBOL_GPL(rt5682_headset_detect);
static int rt5682_set_jack_detect(struct snd_soc_component *component,
struct snd_soc_jack *hs_jack, void *data)
@@ -1094,15 +1089,20 @@ void rt5682_jack_detect_handler(struct work_struct *work)
{
struct rt5682_priv *rt5682 =
container_of(work, struct rt5682_priv, jack_detect_work.work);
+ struct snd_soc_dapm_context *dapm;
int val, btn_type;
- while (!rt5682->component)
- usleep_range(10000, 15000);
+ if (!rt5682->component || !rt5682->component->card ||
+ !rt5682->component->card->instantiated) {
+ /* card not yet ready, try later */
+ mod_delayed_work(system_power_efficient_wq,
+ &rt5682->jack_detect_work, msecs_to_jiffies(15));
+ return;
+ }
- while (!rt5682->component->card->instantiated)
- usleep_range(10000, 15000);
+ dapm = snd_soc_component_get_dapm(rt5682->component);
- mutex_lock(&rt5682->jdet_mutex);
+ snd_soc_dapm_mutex_lock(dapm);
mutex_lock(&rt5682->calibrate_mutex);
val = snd_soc_component_read(rt5682->component, RT5682_AJD1_CTRL)
@@ -1162,6 +1162,9 @@ void rt5682_jack_detect_handler(struct work_struct *work)
rt5682->irq_work_delay_time = 50;
}
+ mutex_unlock(&rt5682->calibrate_mutex);
+ snd_soc_dapm_mutex_unlock(dapm);
+
snd_soc_jack_report(rt5682->hs_jack, rt5682->jack_type,
SND_JACK_HEADSET |
SND_JACK_BTN_0 | SND_JACK_BTN_1 |
@@ -1174,9 +1177,6 @@ void rt5682_jack_detect_handler(struct work_struct *work)
else
cancel_delayed_work_sync(&rt5682->jd_check_work);
}
-
- mutex_unlock(&rt5682->calibrate_mutex);
- mutex_unlock(&rt5682->jdet_mutex);
}
EXPORT_SYMBOL_GPL(rt5682_jack_detect_handler);
@@ -1526,7 +1526,6 @@ static int rt5682_hp_event(struct snd_soc_dapm_widget *w,
{
struct snd_soc_component *component =
snd_soc_dapm_to_component(w->dapm);
- struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -1538,17 +1537,12 @@ static int rt5682_hp_event(struct snd_soc_dapm_widget *w,
RT5682_DEPOP_1, 0x60, 0x60);
snd_soc_component_update_bits(component,
RT5682_DAC_ADC_DIG_VOL1, 0x00c0, 0x0080);
-
- mutex_lock(&rt5682->jdet_mutex);
-
snd_soc_component_update_bits(component, RT5682_HP_CTRL_2,
RT5682_HP_C2_DAC_L_EN | RT5682_HP_C2_DAC_R_EN,
RT5682_HP_C2_DAC_L_EN | RT5682_HP_C2_DAC_R_EN);
usleep_range(5000, 10000);
snd_soc_component_update_bits(component, RT5682_CHARGE_PUMP_1,
RT5682_CP_SW_SIZE_MASK, RT5682_CP_SW_SIZE_L);
-
- mutex_unlock(&rt5682->jdet_mutex);
break;
case SND_SOC_DAPM_POST_PMD:
diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h
index c917c76200ea..52ff0d9c36c5 100644
--- a/sound/soc/codecs/rt5682.h
+++ b/sound/soc/codecs/rt5682.h
@@ -1463,7 +1463,6 @@ struct rt5682_priv {
int jack_type;
int irq_work_delay_time;
- struct mutex jdet_mutex;
};
extern const char *rt5682_supply_names[RT5682_NUM_SUPPLIES];
@@ -1473,7 +1472,6 @@ int rt5682_sel_asrc_clk_src(struct snd_soc_component *component,
void rt5682_apply_patch_list(struct rt5682_priv *rt5682, struct device *dev);
-int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert);
void rt5682_jack_detect_handler(struct work_struct *work);
bool rt5682_volatile_register(struct device *dev, unsigned int reg);
diff --git a/sound/soc/codecs/rt5682s.c b/sound/soc/codecs/rt5682s.c
index efa1016831dd..1e662d1be2b3 100644
--- a/sound/soc/codecs/rt5682s.c
+++ b/sound/soc/codecs/rt5682s.c
@@ -824,11 +824,13 @@ static void rt5682s_jack_detect_handler(struct work_struct *work)
container_of(work, struct rt5682s_priv, jack_detect_work.work);
int val, btn_type;
- while (!rt5682s->component)
- usleep_range(10000, 15000);
-
- while (!rt5682s->component->card->instantiated)
- usleep_range(10000, 15000);
+ if (!rt5682s->component || !rt5682s->component->card ||
+ !rt5682s->component->card->instantiated) {
+ /* card not yet ready, try later */
+ mod_delayed_work(system_power_efficient_wq,
+ &rt5682s->jack_detect_work, msecs_to_jiffies(15));
+ return;
+ }
mutex_lock(&rt5682s->jdet_mutex);
mutex_lock(&rt5682s->calibrate_mutex);
diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
index 6549e7fef3e3..c5ea3b115966 100644
--- a/sound/soc/codecs/tas2770.c
+++ b/sound/soc/codecs/tas2770.c
@@ -38,10 +38,12 @@ static void tas2770_reset(struct tas2770_priv *tas2770)
gpiod_set_value_cansleep(tas2770->reset_gpio, 0);
msleep(20);
gpiod_set_value_cansleep(tas2770->reset_gpio, 1);
+ usleep_range(1000, 2000);
}
snd_soc_component_write(tas2770->component, TAS2770_SW_RST,
TAS2770_RST);
+ usleep_range(1000, 2000);
}
static int tas2770_set_bias_level(struct snd_soc_component *component,
@@ -110,6 +112,7 @@ static int tas2770_codec_resume(struct snd_soc_component *component)
if (tas2770->sdz_gpio) {
gpiod_set_value_cansleep(tas2770->sdz_gpio, 1);
+ usleep_range(1000, 2000);
} else {
ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
TAS2770_PWR_CTRL_MASK,
@@ -510,8 +513,10 @@ static int tas2770_codec_probe(struct snd_soc_component *component)
tas2770->component = component;
- if (tas2770->sdz_gpio)
+ if (tas2770->sdz_gpio) {
gpiod_set_value_cansleep(tas2770->sdz_gpio, 1);
+ usleep_range(1000, 2000);
+ }
tas2770_reset(tas2770);
diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c
index eff200a07d9f..36cbc66914f9 100644
--- a/sound/soc/codecs/wcd938x.c
+++ b/sound/soc/codecs/wcd938x.c
@@ -1432,14 +1432,10 @@ static int wcd938x_sdw_connect_port(struct wcd938x_sdw_ch_info *ch_info,
return 0;
}
-static int wcd938x_connect_port(struct wcd938x_sdw_priv *wcd, u8 ch_id, u8 enable)
+static int wcd938x_connect_port(struct wcd938x_sdw_priv *wcd, u8 port_num, u8 ch_id, u8 enable)
{
- u8 port_num;
-
- port_num = wcd->ch_info[ch_id].port_num;
-
return wcd938x_sdw_connect_port(&wcd->ch_info[ch_id],
- &wcd->port_config[port_num],
+ &wcd->port_config[port_num - 1],
enable);
}
@@ -2563,7 +2559,7 @@ static int wcd938x_ear_pa_put_gain(struct snd_kcontrol *kcontrol,
WCD938X_EAR_GAIN_MASK,
ucontrol->value.integer.value[0]);
- return 0;
+ return 1;
}
static int wcd938x_get_compander(struct snd_kcontrol *kcontrol,
@@ -2593,6 +2589,7 @@ static int wcd938x_set_compander(struct snd_kcontrol *kcontrol,
struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
struct wcd938x_sdw_priv *wcd;
int value = ucontrol->value.integer.value[0];
+ int portidx;
struct soc_mixer_control *mc;
bool hphr;
@@ -2606,12 +2603,14 @@ static int wcd938x_set_compander(struct snd_kcontrol *kcontrol,
else
wcd938x->comp1_enable = value;
+ portidx = wcd->ch_info[mc->reg].port_num;
+
if (value)
- wcd938x_connect_port(wcd, mc->reg, true);
+ wcd938x_connect_port(wcd, portidx, mc->reg, true);
else
- wcd938x_connect_port(wcd, mc->reg, false);
+ wcd938x_connect_port(wcd, portidx, mc->reg, false);
- return 0;
+ return 1;
}
static int wcd938x_ldoh_get(struct snd_kcontrol *kcontrol,
@@ -2882,9 +2881,11 @@ static int wcd938x_get_swr_port(struct snd_kcontrol *kcontrol,
struct wcd938x_sdw_priv *wcd;
struct soc_mixer_control *mixer = (struct soc_mixer_control *)kcontrol->private_value;
int dai_id = mixer->shift;
- int portidx = mixer->reg;
+ int portidx, ch_idx = mixer->reg;
+
wcd = wcd938x->sdw_priv[dai_id];
+ portidx = wcd->ch_info[ch_idx].port_num;
ucontrol->value.integer.value[0] = wcd->port_enable[portidx];
@@ -2899,12 +2900,14 @@ static int wcd938x_set_swr_port(struct snd_kcontrol *kcontrol,
struct wcd938x_sdw_priv *wcd;
struct soc_mixer_control *mixer =
(struct soc_mixer_control *)kcontrol->private_value;
- int portidx = mixer->reg;
+ int ch_idx = mixer->reg;
+ int portidx;
int dai_id = mixer->shift;
bool enable;
wcd = wcd938x->sdw_priv[dai_id];
+ portidx = wcd->ch_info[ch_idx].port_num;
if (ucontrol->value.integer.value[0])
enable = true;
else
@@ -2912,9 +2915,9 @@ static int wcd938x_set_swr_port(struct snd_kcontrol *kcontrol,
wcd->port_enable[portidx] = enable;
- wcd938x_connect_port(wcd, portidx, enable);
+ wcd938x_connect_port(wcd, portidx, ch_idx, enable);
- return 0;
+ return 1;
}
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index f3672e3d1703..0582585236a2 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -1441,7 +1441,8 @@ static int wm_adsp_buffer_parse_coeff(struct cs_dsp_coeff_ctl *cs_ctl)
int ret, i;
for (i = 0; i < 5; ++i) {
- ret = cs_dsp_coeff_read_ctrl(cs_ctl, 0, &coeff_v1, sizeof(coeff_v1));
+ ret = cs_dsp_coeff_read_ctrl(cs_ctl, 0, &coeff_v1,
+ min(cs_ctl->len, sizeof(coeff_v1)));
if (ret < 0)
return ret;
diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c
index af3c3b90c0ac..83b4a22bf15a 100644
--- a/sound/soc/fsl/pcm030-audio-fabric.c
+++ b/sound/soc/fsl/pcm030-audio-fabric.c
@@ -93,16 +93,21 @@ static int pcm030_fabric_probe(struct platform_device *op)
dev_err(&op->dev, "platform_device_alloc() failed\n");
ret = platform_device_add(pdata->codec_device);
- if (ret)
+ if (ret) {
dev_err(&op->dev, "platform_device_add() failed: %d\n", ret);
+ platform_device_put(pdata->codec_device);
+ }
ret = snd_soc_register_card(card);
- if (ret)
+ if (ret) {
dev_err(&op->dev, "snd_soc_register_card() failed: %d\n", ret);
+ platform_device_del(pdata->codec_device);
+ platform_device_put(pdata->codec_device);
+ }
platform_set_drvdata(op, pdata);
-
return ret;
+
}
static int pcm030_fabric_remove(struct platform_device *op)
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index a89d1cfdda32..78419e18717d 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -28,6 +28,30 @@ static const struct snd_soc_ops simple_ops = {
.hw_params = asoc_simple_hw_params,
};
+static int asoc_simple_parse_platform(struct device_node *node,
+ struct snd_soc_dai_link_component *dlc)
+{
+ struct of_phandle_args args;
+ int ret;
+
+ if (!node)
+ return 0;
+
+ /*
+ * Get node via "sound-dai = <&phandle port>"
+ * it will be used as xxx_of_node on soc_bind_dai_link()
+ */
+ ret = of_parse_phandle_with_args(node, DAI, CELL, 0, &args);
+ if (ret)
+ return ret;
+
+ /* dai_name is not required and may not exist for plat component */
+
+ dlc->of_node = args.np;
+
+ return 0;
+}
+
static int asoc_simple_parse_dai(struct device_node *node,
struct snd_soc_dai_link_component *dlc,
int *is_single_link)
@@ -289,7 +313,7 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv,
if (ret < 0)
goto dai_link_of_err;
- ret = asoc_simple_parse_dai(plat, platforms, NULL);
+ ret = asoc_simple_parse_platform(plat, platforms);
if (ret < 0)
goto dai_link_of_err;
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 148ddf4cace0..aeca58246fc7 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -952,6 +952,7 @@ static int skl_first_init(struct hdac_bus *bus)
/* allow 64bit DMA address if supported by H/W */
if (dma_set_mask_and_coherent(bus->dev, DMA_BIT_MASK(64)))
dma_set_mask_and_coherent(bus->dev, DMA_BIT_MASK(32));
+ dma_set_max_seg_size(bus->dev, UINT_MAX);
/* initialize streams */
snd_hdac_ext_stream_init_all
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index 9306b7ca2644..0d154350f180 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -216,7 +216,7 @@ config SND_SOC_MT8195_MT6359_RT1019_RT5682
config SND_SOC_MT8195_MT6359_RT1011_RT5682
tristate "ASoC Audio driver for MT8195 with MT6359 RT1011 RT5682 codec"
- depends on I2C
+ depends on I2C && GPIOLIB
depends on SND_SOC_MT8195 && MTK_PMIC_WRAP
select SND_SOC_MT6359
select SND_SOC_RT1011
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index a59e9d20cb46..4b1773c1fb95 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -524,7 +524,7 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
return -EINVAL;
}
- ret = regmap_update_bits(map, reg_irqclr, val_irqclr, val_irqclr);
+ ret = regmap_write_bits(map, reg_irqclr, val_irqclr, val_irqclr);
if (ret) {
dev_err(soc_runtime->dev, "error writing to irqclear reg: %d\n", ret);
return ret;
@@ -665,7 +665,7 @@ static irqreturn_t lpass_dma_interrupt_handler(
return -EINVAL;
}
if (interrupts & LPAIF_IRQ_PER(chan)) {
- rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_PER(chan) | val));
+ rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_PER(chan) | val));
if (rv) {
dev_err(soc_runtime->dev,
"error writing to irqclear reg: %d\n", rv);
@@ -676,7 +676,7 @@ static irqreturn_t lpass_dma_interrupt_handler(
}
if (interrupts & LPAIF_IRQ_XRUN(chan)) {
- rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_XRUN(chan) | val));
+ rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_XRUN(chan) | val));
if (rv) {
dev_err(soc_runtime->dev,
"error writing to irqclear reg: %d\n", rv);
@@ -688,7 +688,7 @@ static irqreturn_t lpass_dma_interrupt_handler(
}
if (interrupts & LPAIF_IRQ_ERR(chan)) {
- rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_ERR(chan) | val));
+ rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_ERR(chan) | val));
if (rv) {
dev_err(soc_runtime->dev,
"error writing to irqclear reg: %d\n", rv);
diff --git a/sound/soc/qcom/qdsp6/q6apm-dai.c b/sound/soc/qcom/qdsp6/q6apm-dai.c
index eb1c3aec479b..19c4a90ec1ea 100644
--- a/sound/soc/qcom/qdsp6/q6apm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6apm-dai.c
@@ -308,8 +308,11 @@ static int q6apm_dai_close(struct snd_soc_component *component,
struct snd_pcm_runtime *runtime = substream->runtime;
struct q6apm_dai_rtd *prtd = runtime->private_data;
- q6apm_graph_stop(prtd->graph);
- q6apm_unmap_memory_regions(prtd->graph, substream->stream);
+ if (prtd->state) { /* only stop graph that is started */
+ q6apm_graph_stop(prtd->graph);
+ q6apm_unmap_memory_regions(prtd->graph, substream->stream);
+ }
+
q6apm_graph_close(prtd->graph);
prtd->graph = NULL;
kfree(prtd);
diff --git a/sound/soc/soc-acpi.c b/sound/soc/soc-acpi.c
index cbd7ea48837b..142476f1396f 100644
--- a/sound/soc/soc-acpi.c
+++ b/sound/soc/soc-acpi.c
@@ -55,16 +55,13 @@ EXPORT_SYMBOL_GPL(snd_soc_acpi_find_machine);
static acpi_status snd_soc_acpi_find_package(acpi_handle handle, u32 level,
void *context, void **ret)
{
- struct acpi_device *adev;
+ struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
acpi_status status;
struct snd_soc_acpi_package_context *pkg_ctx = context;
pkg_ctx->data_valid = false;
- if (acpi_bus_get_device(handle, &adev))
- return AE_OK;
-
- if (adev->status.present && adev->status.functional) {
+ if (adev && adev->status.present && adev->status.functional) {
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *myobj = NULL;
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index 08eaa9ddf191..a0ca58ba1627 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -308,7 +308,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
unsigned int sign_bit = mc->sign_bit;
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
- int err;
+ int err, ret;
bool type_2r = false;
unsigned int val2 = 0;
unsigned int val, val_mask;
@@ -316,13 +316,27 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
if (sign_bit)
mask = BIT(sign_bit + 1) - 1;
- val = ((ucontrol->value.integer.value[0] + min) & mask);
+ if (ucontrol->value.integer.value[0] < 0)
+ return -EINVAL;
+ val = ucontrol->value.integer.value[0];
+ if (mc->platform_max && ((int)val + min) > mc->platform_max)
+ return -EINVAL;
+ if (val > max - min)
+ return -EINVAL;
+ val = (val + min) & mask;
if (invert)
val = max - val;
val_mask = mask << shift;
val = val << shift;
if (snd_soc_volsw_is_stereo(mc)) {
- val2 = ((ucontrol->value.integer.value[1] + min) & mask);
+ if (ucontrol->value.integer.value[1] < 0)
+ return -EINVAL;
+ val2 = ucontrol->value.integer.value[1];
+ if (mc->platform_max && ((int)val2 + min) > mc->platform_max)
+ return -EINVAL;
+ if (val2 > max - min)
+ return -EINVAL;
+ val2 = (val2 + min) & mask;
if (invert)
val2 = max - val2;
if (reg == reg2) {
@@ -336,12 +350,18 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
err = snd_soc_component_update_bits(component, reg, val_mask, val);
if (err < 0)
return err;
+ ret = err;
- if (type_2r)
+ if (type_2r) {
err = snd_soc_component_update_bits(component, reg2, val_mask,
- val2);
+ val2);
+ /* Don't discard any error code or drop change flag */
+ if (ret == 0 || err < 0) {
+ ret = err;
+ }
+ }
- return err;
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_put_volsw);
@@ -407,15 +427,24 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
int min = mc->min;
unsigned int mask = (1U << (fls(min + max) - 1)) - 1;
int err = 0;
+ int ret;
unsigned int val, val_mask;
+ if (ucontrol->value.integer.value[0] < 0)
+ return -EINVAL;
+ val = ucontrol->value.integer.value[0];
+ if (mc->platform_max && val > mc->platform_max)
+ return -EINVAL;
+ if (val > max - min)
+ return -EINVAL;
val_mask = mask << shift;
- val = (ucontrol->value.integer.value[0] + min) & mask;
+ val = (val + min) & mask;
val = val << shift;
err = snd_soc_component_update_bits(component, reg, val_mask, val);
if (err < 0)
return err;
+ ret = err;
if (snd_soc_volsw_is_stereo(mc)) {
unsigned int val2;
@@ -426,6 +455,11 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
err = snd_soc_component_update_bits(component, reg2, val_mask,
val2);
+
+ /* Don't discard any error code or drop change flag */
+ if (ret == 0 || err < 0) {
+ ret = err;
+ }
}
return err;
}
@@ -485,7 +519,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
unsigned int val, val_mask;
- int ret;
+ int err, ret;
if (invert)
val = (max - ucontrol->value.integer.value[0]) & mask;
@@ -494,9 +528,10 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
val_mask = mask << shift;
val = val << shift;
- ret = snd_soc_component_update_bits(component, reg, val_mask, val);
- if (ret < 0)
- return ret;
+ err = snd_soc_component_update_bits(component, reg, val_mask, val);
+ if (err < 0)
+ return err;
+ ret = err;
if (snd_soc_volsw_is_stereo(mc)) {
if (invert)
@@ -506,8 +541,12 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
val_mask = mask << shift;
val = val << shift;
- ret = snd_soc_component_update_bits(component, rreg, val_mask,
+ err = snd_soc_component_update_bits(component, rreg, val_mask,
val);
+ /* Don't discard any error code or drop change flag */
+ if (ret == 0 || err < 0) {
+ ret = err;
+ }
}
return ret;
@@ -856,8 +895,11 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol,
unsigned long mask = (1UL<<mc->nbits)-1;
long max = mc->max;
long val = ucontrol->value.integer.value[0];
+ int ret = 0;
unsigned int i;
+ if (val < mc->min || val > mc->max)
+ return -EINVAL;
if (invert)
val = max - val;
val &= mask;
@@ -868,9 +910,11 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol,
regmask, regval);
if (err < 0)
return err;
+ if (err > 0)
+ ret = err;
}
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_put_xr_sx);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 7abfc48b26ca..9a954680d492 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -46,8 +46,8 @@ static inline void snd_soc_dpcm_stream_lock_irq(struct snd_soc_pcm_runtime *rtd,
snd_pcm_stream_lock_irq(snd_soc_dpcm_get_substream(rtd, stream));
}
-#define snd_soc_dpcm_stream_lock_irqsave(rtd, stream, flags) \
- snd_pcm_stream_lock_irqsave(snd_soc_dpcm_get_substream(rtd, stream), flags)
+#define snd_soc_dpcm_stream_lock_irqsave_nested(rtd, stream, flags) \
+ snd_pcm_stream_lock_irqsave_nested(snd_soc_dpcm_get_substream(rtd, stream), flags)
static inline void snd_soc_dpcm_stream_unlock_irq(struct snd_soc_pcm_runtime *rtd,
int stream)
@@ -1268,6 +1268,7 @@ static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe,
void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_dpcm *dpcm, *d;
+ LIST_HEAD(deleted_dpcms);
snd_soc_dpcm_mutex_assert_held(fe);
@@ -1287,13 +1288,18 @@ void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
/* BEs still alive need new FE */
dpcm_be_reparent(fe, dpcm->be, stream);
- dpcm_remove_debugfs_state(dpcm);
-
list_del(&dpcm->list_be);
+ list_move(&dpcm->list_fe, &deleted_dpcms);
+ }
+ snd_soc_dpcm_stream_unlock_irq(fe, stream);
+
+ while (!list_empty(&deleted_dpcms)) {
+ dpcm = list_first_entry(&deleted_dpcms, struct snd_soc_dpcm,
+ list_fe);
list_del(&dpcm->list_fe);
+ dpcm_remove_debugfs_state(dpcm);
kfree(dpcm);
}
- snd_soc_dpcm_stream_unlock_irq(fe, stream);
}
/* get BE for DAI widget and stream */
@@ -2094,7 +2100,7 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
be = dpcm->be;
be_substream = snd_soc_dpcm_get_substream(be, stream);
- snd_soc_dpcm_stream_lock_irqsave(be, stream, flags);
+ snd_soc_dpcm_stream_lock_irqsave_nested(be, stream, flags);
/* is this op for this BE ? */
if (!snd_soc_dpcm_be_can_update(fe, be, stream))
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index c8fb082209ce..1385695d7745 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -956,6 +956,7 @@ int hda_dsp_probe(struct snd_sof_dev *sdev)
dev_dbg(sdev->dev, "DMA mask is 32 bit\n");
dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(32));
}
+ dma_set_max_seg_size(&pci->dev, UINT_MAX);
/* init streams */
ret = hda_dsp_stream_init(sdev);
diff --git a/sound/soc/xilinx/xlnx_formatter_pcm.c b/sound/soc/xilinx/xlnx_formatter_pcm.c
index 91afea9d5de6..ce19a6058b27 100644
--- a/sound/soc/xilinx/xlnx_formatter_pcm.c
+++ b/sound/soc/xilinx/xlnx_formatter_pcm.c
@@ -37,6 +37,7 @@
#define XLNX_AUD_XFER_COUNT 0x28
#define XLNX_AUD_CH_STS_START 0x2C
#define XLNX_BYTES_PER_CH 0x44
+#define XLNX_AUD_ALIGN_BYTES 64
#define AUD_STS_IOC_IRQ_MASK BIT(31)
#define AUD_STS_CH_STS_MASK BIT(29)
@@ -368,12 +369,32 @@ static int xlnx_formatter_pcm_open(struct snd_soc_component *component,
snd_soc_set_runtime_hwparams(substream, &xlnx_pcm_hardware);
runtime->private_data = stream_data;
- /* Resize the period size divisible by 64 */
+ /* Resize the period bytes as divisible by 64 */
err = snd_pcm_hw_constraint_step(runtime, 0,
- SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 64);
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
+ XLNX_AUD_ALIGN_BYTES);
if (err) {
dev_err(component->dev,
- "unable to set constraint on period bytes\n");
+ "Unable to set constraint on period bytes\n");
+ return err;
+ }
+
+ /* Resize the buffer bytes as divisible by 64 */
+ err = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+ XLNX_AUD_ALIGN_BYTES);
+ if (err) {
+ dev_err(component->dev,
+ "Unable to set constraint on buffer bytes\n");
+ return err;
+ }
+
+ /* Set periods as integer multiple */
+ err = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0) {
+ dev_err(component->dev,
+ "Unable to set constraint on periods to be integer\n");
return err;
}
diff --git a/sound/usb/implicit.c b/sound/usb/implicit.c
index 70319c822c10..2d444ec74202 100644
--- a/sound/usb/implicit.c
+++ b/sound/usb/implicit.c
@@ -47,13 +47,13 @@ struct snd_usb_implicit_fb_match {
static const struct snd_usb_implicit_fb_match playback_implicit_fb_quirks[] = {
/* Generic matching */
IMPLICIT_FB_GENERIC_DEV(0x0499, 0x1509), /* Steinberg UR22 */
- IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2080), /* M-Audio FastTrack Ultra */
- IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2081), /* M-Audio FastTrack Ultra */
IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2030), /* M-Audio Fast Track C400 */
IMPLICIT_FB_GENERIC_DEV(0x0763, 0x2031), /* M-Audio Fast Track C600 */
/* Fixed EP */
/* FIXME: check the availability of generic matching */
+ IMPLICIT_FB_FIXED_DEV(0x0763, 0x2080, 0x81, 2), /* M-Audio FastTrack Ultra */
+ IMPLICIT_FB_FIXED_DEV(0x0763, 0x2081, 0x81, 2), /* M-Audio FastTrack Ultra */
IMPLICIT_FB_FIXED_DEV(0x2466, 0x8010, 0x81, 2), /* Fractal Audio Axe-Fx III */
IMPLICIT_FB_FIXED_DEV(0x31e9, 0x0001, 0x81, 2), /* Solid State Logic SSL2 */
IMPLICIT_FB_FIXED_DEV(0x31e9, 0x0002, 0x81, 2), /* Solid State Logic SSL2+ */
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index e8f3f8d622ec..a5641956ef10 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -1527,6 +1527,10 @@ error:
usb_audio_err(chip,
"cannot get connectors status: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n",
UAC_GET_CUR, validx, idx, cval->val_type);
+
+ if (val)
+ *val = 0;
+
return filter_error(cval, ret);
}
@@ -3674,17 +3678,14 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list)
err = snd_usb_set_cur_mix_value(cval, c + 1, idx,
cval->cache_val[idx]);
if (err < 0)
- return err;
+ break;
}
idx++;
}
} else {
/* master */
- if (cval->cached) {
- err = snd_usb_set_cur_mix_value(cval, 0, 0, *cval->cache_val);
- if (err < 0)
- return err;
- }
+ if (cval->cached)
+ snd_usb_set_cur_mix_value(cval, 0, 0, *cval->cache_val);
}
return 0;
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index b1522e43173e..0ea39565e623 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -84,7 +84,7 @@
* combination.
*/
{
- USB_DEVICE(0x041e, 0x4095),
+ USB_AUDIO_DEVICE(0x041e, 0x4095),
.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
.ifnum = QUIRK_ANY_INTERFACE,
.type = QUIRK_COMPOSITE,
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
index 1c94eaff1931..4a3ff6468aa7 100644
--- a/sound/x86/intel_hdmi_audio.c
+++ b/sound/x86/intel_hdmi_audio.c
@@ -1261,7 +1261,7 @@ static int had_pcm_mmap(struct snd_pcm_substream *substream,
{
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return remap_pfn_range(vma, vma->vm_start,
- substream->dma_buffer.addr >> PAGE_SHIFT,
+ substream->runtime->dma_addr >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 18de5f76f198..6db4e2932b3d 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -299,7 +299,9 @@
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
+#define X86_FEATURE_AMX_BF16 (18*32+22) /* AMX bf16 Support */
#define X86_FEATURE_AMX_TILE (18*32+24) /* AMX tile Support */
+#define X86_FEATURE_AMX_INT8 (18*32+25) /* AMX int8 Support */
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index 3faf0f97edb1..a4a39c3e0f19 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -476,6 +476,7 @@
#define MSR_AMD64_ICIBSEXTDCTL 0xc001103c
#define MSR_AMD64_IBSOPDATA4 0xc001103d
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
+#define MSR_AMD64_SVM_AVIC_DOORBELL 0xc001011b
#define MSR_AMD64_VM_PAGE_FLUSH 0xc001011e
#define MSR_AMD64_SEV_ES_GHCB 0xc0010130
#define MSR_AMD64_SEV 0xc0010131
diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile
index 9ddeca947635..320a88ac28c9 100644
--- a/tools/bpf/resolve_btfids/Makefile
+++ b/tools/bpf/resolve_btfids/Makefile
@@ -9,7 +9,11 @@ ifeq ($(V),1)
msg =
else
Q = @
- msg = @printf ' %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))";
+ ifeq ($(silent),1)
+ msg =
+ else
+ msg = @printf ' %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))";
+ endif
MAKEFLAGS=--no-print-directory
endif
diff --git a/tools/cgroup/memcg_slabinfo.py b/tools/cgroup/memcg_slabinfo.py
index 1600b17dbb8a..1d3a90d93fe2 100644
--- a/tools/cgroup/memcg_slabinfo.py
+++ b/tools/cgroup/memcg_slabinfo.py
@@ -11,7 +11,7 @@ from drgn.helpers.linux import list_for_each_entry, list_empty
from drgn.helpers.linux import for_each_page
from drgn.helpers.linux.cpumask import for_each_online_cpu
from drgn.helpers.linux.percpu import per_cpu_ptr
-from drgn import container_of, FaultError, Object
+from drgn import container_of, FaultError, Object, cast
DESC = """
@@ -69,15 +69,15 @@ def oo_objects(s):
def count_partial(n, fn):
- nr_pages = 0
- for page in list_for_each_entry('struct page', n.partial.address_of_(),
- 'lru'):
- nr_pages += fn(page)
- return nr_pages
+ nr_objs = 0
+ for slab in list_for_each_entry('struct slab', n.partial.address_of_(),
+ 'slab_list'):
+ nr_objs += fn(slab)
+ return nr_objs
-def count_free(page):
- return page.objects - page.inuse
+def count_free(slab):
+ return slab.objects - slab.inuse
def slub_get_slabinfo(s, cfg):
@@ -145,14 +145,14 @@ def detect_kernel_config():
return cfg
-def for_each_slab_page(prog):
+def for_each_slab(prog):
PGSlab = 1 << prog.constant('PG_slab')
PGHead = 1 << prog.constant('PG_head')
for page in for_each_page(prog):
try:
if page.flags.value_() & PGSlab:
- yield page
+ yield cast('struct slab *', page)
except FaultError:
pass
@@ -190,13 +190,13 @@ def main():
'list'):
obj_cgroups.add(ptr.value_())
- # look over all slab pages, belonging to non-root memcgs
- # and look for objects belonging to the given memory cgroup
- for page in for_each_slab_page(prog):
- objcg_vec_raw = page.memcg_data.value_()
+ # look over all slab folios and look for objects belonging
+ # to the given memory cgroup
+ for slab in for_each_slab(prog):
+ objcg_vec_raw = slab.memcg_data.value_()
if objcg_vec_raw == 0:
continue
- cache = page.slab_cache
+ cache = slab.slab_cache
if not cache:
continue
addr = cache.value_()
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index b46bcdb0cab1..507ee1f2aa96 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -1134,6 +1134,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_VM_GPA_BITS 207
#define KVM_CAP_XSAVE2 208
#define KVM_CAP_SYS_ATTRIBUTES 209
+#define KVM_CAP_PPC_AIL_MODE_3 210
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1624,9 +1625,6 @@ struct kvm_enc_region {
#define KVM_S390_NORMAL_RESET _IO(KVMIO, 0xc3)
#define KVM_S390_CLEAR_RESET _IO(KVMIO, 0xc4)
-/* Available with KVM_CAP_XSAVE2 */
-#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave)
-
struct kvm_s390_pv_sec_parm {
__u64 origin;
__u64 length;
@@ -2048,4 +2046,7 @@ struct kvm_stats_desc {
#define KVM_GET_STATS_FD _IO(KVMIO, 0xce)
+/* Available with KVM_CAP_XSAVE2 */
+#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave)
+
#endif /* __LINUX_KVM_H */
diff --git a/tools/include/uapi/linux/lirc.h b/tools/include/uapi/linux/lirc.h
deleted file mode 100644
index 45fcbf99d72e..000000000000
--- a/tools/include/uapi/linux/lirc.h
+++ /dev/null
@@ -1,229 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * lirc.h - linux infrared remote control header file
- * last modified 2010/07/13 by Jarod Wilson
- */
-
-#ifndef _LINUX_LIRC_H
-#define _LINUX_LIRC_H
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-#define PULSE_BIT 0x01000000
-#define PULSE_MASK 0x00FFFFFF
-
-#define LIRC_MODE2_SPACE 0x00000000
-#define LIRC_MODE2_PULSE 0x01000000
-#define LIRC_MODE2_FREQUENCY 0x02000000
-#define LIRC_MODE2_TIMEOUT 0x03000000
-
-#define LIRC_VALUE_MASK 0x00FFFFFF
-#define LIRC_MODE2_MASK 0xFF000000
-
-#define LIRC_SPACE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_SPACE)
-#define LIRC_PULSE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_PULSE)
-#define LIRC_FREQUENCY(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_FREQUENCY)
-#define LIRC_TIMEOUT(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_TIMEOUT)
-
-#define LIRC_VALUE(val) ((val)&LIRC_VALUE_MASK)
-#define LIRC_MODE2(val) ((val)&LIRC_MODE2_MASK)
-
-#define LIRC_IS_SPACE(val) (LIRC_MODE2(val) == LIRC_MODE2_SPACE)
-#define LIRC_IS_PULSE(val) (LIRC_MODE2(val) == LIRC_MODE2_PULSE)
-#define LIRC_IS_FREQUENCY(val) (LIRC_MODE2(val) == LIRC_MODE2_FREQUENCY)
-#define LIRC_IS_TIMEOUT(val) (LIRC_MODE2(val) == LIRC_MODE2_TIMEOUT)
-
-/* used heavily by lirc userspace */
-#define lirc_t int
-
-/*** lirc compatible hardware features ***/
-
-#define LIRC_MODE2SEND(x) (x)
-#define LIRC_SEND2MODE(x) (x)
-#define LIRC_MODE2REC(x) ((x) << 16)
-#define LIRC_REC2MODE(x) ((x) >> 16)
-
-#define LIRC_MODE_RAW 0x00000001
-#define LIRC_MODE_PULSE 0x00000002
-#define LIRC_MODE_MODE2 0x00000004
-#define LIRC_MODE_SCANCODE 0x00000008
-#define LIRC_MODE_LIRCCODE 0x00000010
-
-
-#define LIRC_CAN_SEND_RAW LIRC_MODE2SEND(LIRC_MODE_RAW)
-#define LIRC_CAN_SEND_PULSE LIRC_MODE2SEND(LIRC_MODE_PULSE)
-#define LIRC_CAN_SEND_MODE2 LIRC_MODE2SEND(LIRC_MODE_MODE2)
-#define LIRC_CAN_SEND_LIRCCODE LIRC_MODE2SEND(LIRC_MODE_LIRCCODE)
-
-#define LIRC_CAN_SEND_MASK 0x0000003f
-
-#define LIRC_CAN_SET_SEND_CARRIER 0x00000100
-#define LIRC_CAN_SET_SEND_DUTY_CYCLE 0x00000200
-#define LIRC_CAN_SET_TRANSMITTER_MASK 0x00000400
-
-#define LIRC_CAN_REC_RAW LIRC_MODE2REC(LIRC_MODE_RAW)
-#define LIRC_CAN_REC_PULSE LIRC_MODE2REC(LIRC_MODE_PULSE)
-#define LIRC_CAN_REC_MODE2 LIRC_MODE2REC(LIRC_MODE_MODE2)
-#define LIRC_CAN_REC_SCANCODE LIRC_MODE2REC(LIRC_MODE_SCANCODE)
-#define LIRC_CAN_REC_LIRCCODE LIRC_MODE2REC(LIRC_MODE_LIRCCODE)
-
-#define LIRC_CAN_REC_MASK LIRC_MODE2REC(LIRC_CAN_SEND_MASK)
-
-#define LIRC_CAN_SET_REC_CARRIER (LIRC_CAN_SET_SEND_CARRIER << 16)
-#define LIRC_CAN_SET_REC_DUTY_CYCLE (LIRC_CAN_SET_SEND_DUTY_CYCLE << 16)
-
-#define LIRC_CAN_SET_REC_DUTY_CYCLE_RANGE 0x40000000
-#define LIRC_CAN_SET_REC_CARRIER_RANGE 0x80000000
-#define LIRC_CAN_GET_REC_RESOLUTION 0x20000000
-#define LIRC_CAN_SET_REC_TIMEOUT 0x10000000
-#define LIRC_CAN_SET_REC_FILTER 0x08000000
-
-#define LIRC_CAN_MEASURE_CARRIER 0x02000000
-#define LIRC_CAN_USE_WIDEBAND_RECEIVER 0x04000000
-
-#define LIRC_CAN_SEND(x) ((x)&LIRC_CAN_SEND_MASK)
-#define LIRC_CAN_REC(x) ((x)&LIRC_CAN_REC_MASK)
-
-#define LIRC_CAN_NOTIFY_DECODE 0x01000000
-
-/*** IOCTL commands for lirc driver ***/
-
-#define LIRC_GET_FEATURES _IOR('i', 0x00000000, __u32)
-
-#define LIRC_GET_SEND_MODE _IOR('i', 0x00000001, __u32)
-#define LIRC_GET_REC_MODE _IOR('i', 0x00000002, __u32)
-#define LIRC_GET_REC_RESOLUTION _IOR('i', 0x00000007, __u32)
-
-#define LIRC_GET_MIN_TIMEOUT _IOR('i', 0x00000008, __u32)
-#define LIRC_GET_MAX_TIMEOUT _IOR('i', 0x00000009, __u32)
-
-/* code length in bits, currently only for LIRC_MODE_LIRCCODE */
-#define LIRC_GET_LENGTH _IOR('i', 0x0000000f, __u32)
-
-#define LIRC_SET_SEND_MODE _IOW('i', 0x00000011, __u32)
-#define LIRC_SET_REC_MODE _IOW('i', 0x00000012, __u32)
-/* Note: these can reset the according pulse_width */
-#define LIRC_SET_SEND_CARRIER _IOW('i', 0x00000013, __u32)
-#define LIRC_SET_REC_CARRIER _IOW('i', 0x00000014, __u32)
-#define LIRC_SET_SEND_DUTY_CYCLE _IOW('i', 0x00000015, __u32)
-#define LIRC_SET_TRANSMITTER_MASK _IOW('i', 0x00000017, __u32)
-
-/*
- * when a timeout != 0 is set the driver will send a
- * LIRC_MODE2_TIMEOUT data packet, otherwise LIRC_MODE2_TIMEOUT is
- * never sent, timeout is disabled by default
- */
-#define LIRC_SET_REC_TIMEOUT _IOW('i', 0x00000018, __u32)
-
-/* 1 enables, 0 disables timeout reports in MODE2 */
-#define LIRC_SET_REC_TIMEOUT_REPORTS _IOW('i', 0x00000019, __u32)
-
-/*
- * if enabled from the next key press on the driver will send
- * LIRC_MODE2_FREQUENCY packets
- */
-#define LIRC_SET_MEASURE_CARRIER_MODE _IOW('i', 0x0000001d, __u32)
-
-/*
- * to set a range use LIRC_SET_REC_CARRIER_RANGE with the
- * lower bound first and later LIRC_SET_REC_CARRIER with the upper bound
- */
-#define LIRC_SET_REC_CARRIER_RANGE _IOW('i', 0x0000001f, __u32)
-
-#define LIRC_SET_WIDEBAND_RECEIVER _IOW('i', 0x00000023, __u32)
-
-/*
- * Return the recording timeout, which is either set by
- * the ioctl LIRC_SET_REC_TIMEOUT or by the kernel after setting the protocols.
- */
-#define LIRC_GET_REC_TIMEOUT _IOR('i', 0x00000024, __u32)
-
-/*
- * struct lirc_scancode - decoded scancode with protocol for use with
- * LIRC_MODE_SCANCODE
- *
- * @timestamp: Timestamp in nanoseconds using CLOCK_MONOTONIC when IR
- * was decoded.
- * @flags: should be 0 for transmit. When receiving scancodes,
- * LIRC_SCANCODE_FLAG_TOGGLE or LIRC_SCANCODE_FLAG_REPEAT can be set
- * depending on the protocol
- * @rc_proto: see enum rc_proto
- * @keycode: the translated keycode. Set to 0 for transmit.
- * @scancode: the scancode received or to be sent
- */
-struct lirc_scancode {
- __u64 timestamp;
- __u16 flags;
- __u16 rc_proto;
- __u32 keycode;
- __u64 scancode;
-};
-
-/* Set if the toggle bit of rc-5 or rc-6 is enabled */
-#define LIRC_SCANCODE_FLAG_TOGGLE 1
-/* Set if this is a nec or sanyo repeat */
-#define LIRC_SCANCODE_FLAG_REPEAT 2
-
-/**
- * enum rc_proto - the Remote Controller protocol
- *
- * @RC_PROTO_UNKNOWN: Protocol not known
- * @RC_PROTO_OTHER: Protocol known but proprietary
- * @RC_PROTO_RC5: Philips RC5 protocol
- * @RC_PROTO_RC5X_20: Philips RC5x 20 bit protocol
- * @RC_PROTO_RC5_SZ: StreamZap variant of RC5
- * @RC_PROTO_JVC: JVC protocol
- * @RC_PROTO_SONY12: Sony 12 bit protocol
- * @RC_PROTO_SONY15: Sony 15 bit protocol
- * @RC_PROTO_SONY20: Sony 20 bit protocol
- * @RC_PROTO_NEC: NEC protocol
- * @RC_PROTO_NECX: Extended NEC protocol
- * @RC_PROTO_NEC32: NEC 32 bit protocol
- * @RC_PROTO_SANYO: Sanyo protocol
- * @RC_PROTO_MCIR2_KBD: RC6-ish MCE keyboard
- * @RC_PROTO_MCIR2_MSE: RC6-ish MCE mouse
- * @RC_PROTO_RC6_0: Philips RC6-0-16 protocol
- * @RC_PROTO_RC6_6A_20: Philips RC6-6A-20 protocol
- * @RC_PROTO_RC6_6A_24: Philips RC6-6A-24 protocol
- * @RC_PROTO_RC6_6A_32: Philips RC6-6A-32 protocol
- * @RC_PROTO_RC6_MCE: MCE (Philips RC6-6A-32 subtype) protocol
- * @RC_PROTO_SHARP: Sharp protocol
- * @RC_PROTO_XMP: XMP protocol
- * @RC_PROTO_CEC: CEC protocol
- * @RC_PROTO_IMON: iMon Pad protocol
- * @RC_PROTO_RCMM12: RC-MM protocol 12 bits
- * @RC_PROTO_RCMM24: RC-MM protocol 24 bits
- * @RC_PROTO_RCMM32: RC-MM protocol 32 bits
- */
-enum rc_proto {
- RC_PROTO_UNKNOWN = 0,
- RC_PROTO_OTHER = 1,
- RC_PROTO_RC5 = 2,
- RC_PROTO_RC5X_20 = 3,
- RC_PROTO_RC5_SZ = 4,
- RC_PROTO_JVC = 5,
- RC_PROTO_SONY12 = 6,
- RC_PROTO_SONY15 = 7,
- RC_PROTO_SONY20 = 8,
- RC_PROTO_NEC = 9,
- RC_PROTO_NECX = 10,
- RC_PROTO_NEC32 = 11,
- RC_PROTO_SANYO = 12,
- RC_PROTO_MCIR2_KBD = 13,
- RC_PROTO_MCIR2_MSE = 14,
- RC_PROTO_RC6_0 = 15,
- RC_PROTO_RC6_6A_20 = 16,
- RC_PROTO_RC6_6A_24 = 17,
- RC_PROTO_RC6_6A_32 = 18,
- RC_PROTO_RC6_MCE = 19,
- RC_PROTO_SHARP = 20,
- RC_PROTO_XMP = 21,
- RC_PROTO_CEC = 22,
- RC_PROTO_IMON = 23,
- RC_PROTO_RCMM12 = 24,
- RC_PROTO_RCMM24 = 25,
- RC_PROTO_RCMM32 = 26,
-};
-
-#endif
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index 4cd39aaccbe7..82858b697c05 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -465,6 +465,8 @@ struct perf_event_attr {
/*
* User provided data if sigtrap=1, passed back to user via
* siginfo_t::si_perf_data, e.g. to permit user to identify the event.
+ * Note, siginfo_t::si_perf_data is long-sized, and sig_data will be
+ * truncated accordingly on 32 bit architectures.
*/
__u64 sig_data;
};
@@ -1332,9 +1334,9 @@ union perf_mem_data_src {
/* hop level */
#define PERF_MEM_HOPS_0 0x01 /* remote core, same node */
-#define PERF_MEM_HOPS_1 0x02 /* remote node, same socket */
-#define PERF_MEM_HOPS_2 0x03 /* remote socket, same board */
-#define PERF_MEM_HOPS_3 0x04 /* remote board */
+#define PERF_MEM_HOPS_1 0x02 /* remote node, same socket */
+#define PERF_MEM_HOPS_2 0x03 /* remote socket, same board */
+#define PERF_MEM_HOPS_3 0x04 /* remote board */
/* 5-7 available */
#define PERF_MEM_HOPS_SHIFT 43
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
index bb73e9a0b24f..e998764f0262 100644
--- a/tools/include/uapi/linux/prctl.h
+++ b/tools/include/uapi/linux/prctl.h
@@ -272,4 +272,7 @@ struct prctl_mm_map {
# define PR_SCHED_CORE_SCOPE_THREAD_GROUP 1
# define PR_SCHED_CORE_SCOPE_PROCESS_GROUP 2
+#define PR_SET_VMA 0x53564d41
+# define PR_SET_VMA_ANON_NAME 0
+
#endif /* _LINUX_PRCTL_H */
diff --git a/tools/include/uapi/sound/asound.h b/tools/include/uapi/sound/asound.h
index 5fbb79e30819..2d3e5df39a59 100644
--- a/tools/include/uapi/sound/asound.h
+++ b/tools/include/uapi/sound/asound.h
@@ -56,8 +56,10 @@
* *
****************************************************************************/
+#define AES_IEC958_STATUS_SIZE 24
+
struct snd_aes_iec958 {
- unsigned char status[24]; /* AES/IEC958 channel status bits */
+ unsigned char status[AES_IEC958_STATUS_SIZE]; /* AES/IEC958 channel status bits */
unsigned char subcode[147]; /* AES/IEC958 subcode bits */
unsigned char pad; /* nothing */
unsigned char dig_subframe[4]; /* AES/IEC958 subframe bits */
@@ -202,6 +204,11 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_S24_BE ((__force snd_pcm_format_t) 7) /* low three bytes */
#define SNDRV_PCM_FORMAT_U24_LE ((__force snd_pcm_format_t) 8) /* low three bytes */
#define SNDRV_PCM_FORMAT_U24_BE ((__force snd_pcm_format_t) 9) /* low three bytes */
+/*
+ * For S32/U32 formats, 'msbits' hardware parameter is often used to deliver information about the
+ * available bit count in most significant bit. It's for the case of so-called 'left-justified' or
+ * `right-padding` sample which has less width than 32 bit.
+ */
#define SNDRV_PCM_FORMAT_S32_LE ((__force snd_pcm_format_t) 10)
#define SNDRV_PCM_FORMAT_S32_BE ((__force snd_pcm_format_t) 11)
#define SNDRV_PCM_FORMAT_U32_LE ((__force snd_pcm_format_t) 12)
@@ -300,7 +307,7 @@ typedef int __bitwise snd_pcm_subformat_t;
#define SNDRV_PCM_INFO_HAS_LINK_ESTIMATED_ATIME 0x04000000 /* report estimated link audio time */
#define SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME 0x08000000 /* report synchronized audio/system time */
#define SNDRV_PCM_INFO_EXPLICIT_SYNC 0x10000000 /* needs explicit sync of pointers and data */
-
+#define SNDRV_PCM_INFO_NO_REWINDS 0x20000000 /* hardware can only support monotonic changes of appl_ptr */
#define SNDRV_PCM_INFO_DRAIN_TRIGGER 0x40000000 /* internal kernel flag - trigger in drain */
#define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */
diff --git a/tools/lib/perf/include/internal/cpumap.h b/tools/lib/perf/include/internal/cpumap.h
index 581f9ffb4237..1973a18c096b 100644
--- a/tools/lib/perf/include/internal/cpumap.h
+++ b/tools/lib/perf/include/internal/cpumap.h
@@ -3,11 +3,7 @@
#define __LIBPERF_INTERNAL_CPUMAP_H
#include <linux/refcount.h>
-
-/** A wrapper around a CPU to avoid confusion with the perf_cpu_map's map's indices. */
-struct perf_cpu {
- int cpu;
-};
+#include <perf/cpumap.h>
/**
* A sized, reference counted, sorted array of integers representing CPU
diff --git a/tools/lib/perf/include/perf/cpumap.h b/tools/lib/perf/include/perf/cpumap.h
index 15b8faafd615..4a2edbdb5e2b 100644
--- a/tools/lib/perf/include/perf/cpumap.h
+++ b/tools/lib/perf/include/perf/cpumap.h
@@ -7,6 +7,11 @@
#include <stdio.h>
#include <stdbool.h>
+/** A wrapper around a CPU to avoid confusion with the perf_cpu_map's map's indices. */
+struct perf_cpu {
+ int cpu;
+};
+
LIBPERF_API struct perf_cpu_map *perf_cpu_map__dummy_new(void);
LIBPERF_API struct perf_cpu_map *perf_cpu_map__default_new(void);
LIBPERF_API struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list);
diff --git a/tools/lib/perf/libperf.map b/tools/lib/perf/libperf.map
index 93696affda2e..6fa0d651576b 100644
--- a/tools/lib/perf/libperf.map
+++ b/tools/lib/perf/libperf.map
@@ -2,6 +2,7 @@ LIBPERF_0.0.1 {
global:
libperf_init;
perf_cpu_map__dummy_new;
+ perf_cpu_map__default_new;
perf_cpu_map__get;
perf_cpu_map__put;
perf_cpu_map__new;
diff --git a/tools/lib/perf/mmap.c b/tools/lib/perf/mmap.c
index f7ee07cb5818..0d1634cedf44 100644
--- a/tools/lib/perf/mmap.c
+++ b/tools/lib/perf/mmap.c
@@ -13,6 +13,7 @@
#include <internal/lib.h>
#include <linux/kernel.h>
#include <linux/math64.h>
+#include <linux/stringify.h>
#include "internal.h"
void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
@@ -294,6 +295,103 @@ static u64 read_timestamp(void)
return low | ((u64)high) << 32;
}
+#elif defined(__aarch64__)
+#define read_sysreg(r) ({ \
+ u64 __val; \
+ asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
+ __val; \
+})
+
+static u64 read_pmccntr(void)
+{
+ return read_sysreg(pmccntr_el0);
+}
+
+#define PMEVCNTR_READ(idx) \
+ static u64 read_pmevcntr_##idx(void) { \
+ return read_sysreg(pmevcntr##idx##_el0); \
+ }
+
+PMEVCNTR_READ(0);
+PMEVCNTR_READ(1);
+PMEVCNTR_READ(2);
+PMEVCNTR_READ(3);
+PMEVCNTR_READ(4);
+PMEVCNTR_READ(5);
+PMEVCNTR_READ(6);
+PMEVCNTR_READ(7);
+PMEVCNTR_READ(8);
+PMEVCNTR_READ(9);
+PMEVCNTR_READ(10);
+PMEVCNTR_READ(11);
+PMEVCNTR_READ(12);
+PMEVCNTR_READ(13);
+PMEVCNTR_READ(14);
+PMEVCNTR_READ(15);
+PMEVCNTR_READ(16);
+PMEVCNTR_READ(17);
+PMEVCNTR_READ(18);
+PMEVCNTR_READ(19);
+PMEVCNTR_READ(20);
+PMEVCNTR_READ(21);
+PMEVCNTR_READ(22);
+PMEVCNTR_READ(23);
+PMEVCNTR_READ(24);
+PMEVCNTR_READ(25);
+PMEVCNTR_READ(26);
+PMEVCNTR_READ(27);
+PMEVCNTR_READ(28);
+PMEVCNTR_READ(29);
+PMEVCNTR_READ(30);
+
+/*
+ * Read a value direct from PMEVCNTR<idx>
+ */
+static u64 read_perf_counter(unsigned int counter)
+{
+ static u64 (* const read_f[])(void) = {
+ read_pmevcntr_0,
+ read_pmevcntr_1,
+ read_pmevcntr_2,
+ read_pmevcntr_3,
+ read_pmevcntr_4,
+ read_pmevcntr_5,
+ read_pmevcntr_6,
+ read_pmevcntr_7,
+ read_pmevcntr_8,
+ read_pmevcntr_9,
+ read_pmevcntr_10,
+ read_pmevcntr_11,
+ read_pmevcntr_13,
+ read_pmevcntr_12,
+ read_pmevcntr_14,
+ read_pmevcntr_15,
+ read_pmevcntr_16,
+ read_pmevcntr_17,
+ read_pmevcntr_18,
+ read_pmevcntr_19,
+ read_pmevcntr_20,
+ read_pmevcntr_21,
+ read_pmevcntr_22,
+ read_pmevcntr_23,
+ read_pmevcntr_24,
+ read_pmevcntr_25,
+ read_pmevcntr_26,
+ read_pmevcntr_27,
+ read_pmevcntr_28,
+ read_pmevcntr_29,
+ read_pmevcntr_30,
+ read_pmccntr
+ };
+
+ if (counter < ARRAY_SIZE(read_f))
+ return (read_f[counter])();
+
+ return 0;
+}
+
+static u64 read_timestamp(void) { return read_sysreg(cntvct_el0); }
+
#else
static u64 read_perf_counter(unsigned int counter __maybe_unused) { return 0; }
static u64 read_timestamp(void) { return 0; }
diff --git a/tools/lib/perf/tests/test-cpumap.c b/tools/lib/perf/tests/test-cpumap.c
index d39378eaf897..87b0510a556f 100644
--- a/tools/lib/perf/tests/test-cpumap.c
+++ b/tools/lib/perf/tests/test-cpumap.c
@@ -14,6 +14,8 @@ static int libperf_print(enum libperf_print_level level,
int test_cpumap(int argc, char **argv)
{
struct perf_cpu_map *cpus;
+ struct perf_cpu cpu;
+ int idx;
__T_START;
@@ -27,6 +29,15 @@ int test_cpumap(int argc, char **argv)
perf_cpu_map__put(cpus);
perf_cpu_map__put(cpus);
+ cpus = perf_cpu_map__default_new();
+ if (!cpus)
+ return -1;
+
+ perf_cpu_map__for_each_cpu(cpu, idx, cpus)
+ __T("wrong cpu number", cpu.cpu != -1);
+
+ perf_cpu_map__put(cpus);
+
__T_END;
return tests_failed == 0 ? 0 : -1;
}
diff --git a/tools/lib/perf/tests/test-evlist.c b/tools/lib/perf/tests/test-evlist.c
index b3479dfa9a1c..fa854c83b7e7 100644
--- a/tools/lib/perf/tests/test-evlist.c
+++ b/tools/lib/perf/tests/test-evlist.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE // needed for sched.h to get sched_[gs]etaffinity and CPU_(ZERO,SET)
+#include <inttypes.h>
#include <sched.h>
#include <stdio.h>
#include <stdarg.h>
@@ -526,12 +527,12 @@ static int test_stat_multiplexing(void)
min = counts[0].val;
for (i = 0; i < EVENT_NUM; i++) {
- __T_VERBOSE("Event %2d -- Raw count = %lu, run = %lu, enable = %lu\n",
+ __T_VERBOSE("Event %2d -- Raw count = %" PRIu64 ", run = %" PRIu64 ", enable = %" PRIu64 "\n",
i, counts[i].val, counts[i].run, counts[i].ena);
perf_counts_values__scale(&counts[i], true, &scaled);
if (scaled == 1) {
- __T_VERBOSE("\t Scaled count = %lu (%.2lf%%, %lu/%lu)\n",
+ __T_VERBOSE("\t Scaled count = %" PRIu64 " (%.2lf%%, %" PRIu64 "/%" PRIu64 ")\n",
counts[i].val,
(double)counts[i].run / (double)counts[i].ena * 100.0,
counts[i].run, counts[i].ena);
diff --git a/tools/lib/perf/tests/test-evsel.c b/tools/lib/perf/tests/test-evsel.c
index 33ae9334861a..89be89afb24d 100644
--- a/tools/lib/perf/tests/test-evsel.c
+++ b/tools/lib/perf/tests/test-evsel.c
@@ -130,6 +130,9 @@ static int test_stat_user_read(int event)
struct perf_event_attr attr = {
.type = PERF_TYPE_HARDWARE,
.config = event,
+#ifdef __aarch64__
+ .config1 = 0x2, /* Request user access */
+#endif
};
int err, i;
@@ -150,7 +153,7 @@ static int test_stat_user_read(int event)
pc = perf_evsel__mmap_base(evsel, 0, 0);
__T("failed to get mmapped address", pc);
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
__T("userspace counter access not supported", pc->cap_user_rdpmc);
__T("userspace counter access not enabled", pc->index);
__T("userspace counter width not set", pc->pmc_width >= 32);
diff --git a/tools/lib/subcmd/subcmd-util.h b/tools/lib/subcmd/subcmd-util.h
index 794a375dad36..b2aec04fce8f 100644
--- a/tools/lib/subcmd/subcmd-util.h
+++ b/tools/lib/subcmd/subcmd-util.h
@@ -50,15 +50,8 @@ static NORETURN inline void die(const char *err, ...)
static inline void *xrealloc(void *ptr, size_t size)
{
void *ret = realloc(ptr, size);
- if (!ret && !size)
- ret = realloc(ptr, 1);
- if (!ret) {
- ret = realloc(ptr, size);
- if (!ret && !size)
- ret = realloc(ptr, 1);
- if (!ret)
- die("Out of memory, realloc failed");
- }
+ if (!ret)
+ die("Out of memory, realloc failed");
return ret;
}
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index c2d2ab9a2861..7c33ec67c4a9 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -2854,7 +2854,7 @@ static inline bool func_uaccess_safe(struct symbol *func)
static inline const char *call_dest_name(struct instruction *insn)
{
- static char pvname[16];
+ static char pvname[19];
struct reloc *rel;
int idx;
diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
index dec24dc0e767..a8785dec5ca6 100644
--- a/tools/perf/builtin-ftrace.c
+++ b/tools/perf/builtin-ftrace.c
@@ -1115,6 +1115,7 @@ enum perf_ftrace_subcommand {
int cmd_ftrace(int argc, const char **argv)
{
int ret;
+ int (*cmd_func)(struct perf_ftrace *) = NULL;
struct perf_ftrace ftrace = {
.tracer = DEFAULT_TRACER,
.target = { .uid = UINT_MAX, },
@@ -1221,6 +1222,28 @@ int cmd_ftrace(int argc, const char **argv)
goto out_delete_filters;
}
+ switch (subcmd) {
+ case PERF_FTRACE_TRACE:
+ if (!argc && target__none(&ftrace.target))
+ ftrace.target.system_wide = true;
+ cmd_func = __cmd_ftrace;
+ break;
+ case PERF_FTRACE_LATENCY:
+ if (list_empty(&ftrace.filters)) {
+ pr_err("Should provide a function to measure\n");
+ parse_options_usage(ftrace_usage, options, "T", 1);
+ ret = -EINVAL;
+ goto out_delete_filters;
+ }
+ cmd_func = __cmd_latency;
+ break;
+ case PERF_FTRACE_NONE:
+ default:
+ pr_err("Invalid subcommand\n");
+ ret = -EINVAL;
+ goto out_delete_filters;
+ }
+
ret = target__validate(&ftrace.target);
if (ret) {
char errbuf[512];
@@ -1248,27 +1271,7 @@ int cmd_ftrace(int argc, const char **argv)
goto out_delete_evlist;
}
- switch (subcmd) {
- case PERF_FTRACE_TRACE:
- if (!argc && target__none(&ftrace.target))
- ftrace.target.system_wide = true;
- ret = __cmd_ftrace(&ftrace);
- break;
- case PERF_FTRACE_LATENCY:
- if (list_empty(&ftrace.filters)) {
- pr_err("Should provide a function to measure\n");
- parse_options_usage(ftrace_usage, options, "T", 1);
- ret = -EINVAL;
- goto out_delete_evlist;
- }
- ret = __cmd_latency(&ftrace);
- break;
- case PERF_FTRACE_NONE:
- default:
- pr_err("Invalid subcommand\n");
- ret = -EINVAL;
- break;
- }
+ ret = cmd_func(&ftrace);
out_delete_evlist:
evlist__delete(ftrace.evlist);
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index abae8184e171..fa478ddcd18a 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -463,7 +463,7 @@ static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
return -EINVAL;
if (PRINT_FIELD(WEIGHT) &&
- evsel__check_stype(evsel, PERF_SAMPLE_WEIGHT_TYPE, "WEIGHT", PERF_OUTPUT_WEIGHT))
+ evsel__do_check_stype(evsel, PERF_SAMPLE_WEIGHT_TYPE, "WEIGHT", PERF_OUTPUT_WEIGHT, allow_user_set))
return -EINVAL;
if (PRINT_FIELD(SYM) &&
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 32844d8a0ea5..52b137a184a6 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1536,13 +1536,20 @@ static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
return fprintf(fp, " ? ");
}
+static pid_t workload_pid = -1;
static bool done = false;
static bool interrupted = false;
-static void sig_handler(int sig)
+static void sighandler_interrupt(int sig __maybe_unused)
{
- done = true;
- interrupted = sig == SIGINT;
+ done = interrupted = true;
+}
+
+static void sighandler_chld(int sig __maybe_unused, siginfo_t *info,
+ void *context __maybe_unused)
+{
+ if (info->si_pid == workload_pid)
+ done = true;
}
static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
@@ -3938,7 +3945,6 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
bool draining = false;
trace->live = true;
- signal(SIGCHLD, sig_handler);
if (!trace->raw_augmented_syscalls) {
if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
@@ -4018,6 +4024,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
fprintf(trace->output, "Couldn't run the workload!\n");
goto out_delete_evlist;
}
+ workload_pid = evlist->workload.pid;
}
err = evlist__open(evlist);
@@ -4887,10 +4894,16 @@ int cmd_trace(int argc, const char **argv)
const char * const trace_subcommands[] = { "record", NULL };
int err = -1;
char bf[BUFSIZ];
+ struct sigaction sigchld_act;
signal(SIGSEGV, sighandler_dump_stack);
signal(SIGFPE, sighandler_dump_stack);
- signal(SIGINT, sig_handler);
+ signal(SIGINT, sighandler_interrupt);
+
+ memset(&sigchld_act, 0, sizeof(sigchld_act));
+ sigchld_act.sa_flags = SA_SIGINFO;
+ sigchld_act.sa_sigaction = sighandler_chld;
+ sigaction(SIGCHLD, &sigchld_act, NULL);
trace.evlist = evlist__new();
trace.sctbl = syscalltbl__new();
diff --git a/tools/perf/tests/attr/README b/tools/perf/tests/attr/README
index a36f49fb4dbe..1116fc6bf2ac 100644
--- a/tools/perf/tests/attr/README
+++ b/tools/perf/tests/attr/README
@@ -45,8 +45,10 @@ Following tests are defined (with perf commands):
perf record -d kill (test-record-data)
perf record -F 100 kill (test-record-freq)
perf record -g kill (test-record-graph-default)
+ perf record -g kill (test-record-graph-default-aarch64)
perf record --call-graph dwarf kill (test-record-graph-dwarf)
perf record --call-graph fp kill (test-record-graph-fp)
+ perf record --call-graph fp kill (test-record-graph-fp-aarch64)
perf record --group -e cycles,instructions kill (test-record-group)
perf record -e '{cycles,instructions}' kill (test-record-group1)
perf record -e '{cycles/period=1/,instructions/period=2/}:S' kill (test-record-group2)
diff --git a/tools/perf/tests/attr/test-record-graph-default b/tools/perf/tests/attr/test-record-graph-default
index 5d8234d50845..f0a18b4ea4f5 100644
--- a/tools/perf/tests/attr/test-record-graph-default
+++ b/tools/perf/tests/attr/test-record-graph-default
@@ -2,6 +2,8 @@
command = record
args = --no-bpf-event -g kill >/dev/null 2>&1
ret = 1
+# arm64 enables registers in the default mode (fp)
+arch = !aarch64
[event:base-record]
sample_type=295
diff --git a/tools/perf/tests/attr/test-record-graph-default-aarch64 b/tools/perf/tests/attr/test-record-graph-default-aarch64
new file mode 100644
index 000000000000..e98d62efb6f7
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-graph-default-aarch64
@@ -0,0 +1,9 @@
+[config]
+command = record
+args = --no-bpf-event -g kill >/dev/null 2>&1
+ret = 1
+arch = aarch64
+
+[event:base-record]
+sample_type=4391
+sample_regs_user=1073741824
diff --git a/tools/perf/tests/attr/test-record-graph-fp b/tools/perf/tests/attr/test-record-graph-fp
index 5630521c0b0f..a6e60e839205 100644
--- a/tools/perf/tests/attr/test-record-graph-fp
+++ b/tools/perf/tests/attr/test-record-graph-fp
@@ -2,6 +2,8 @@
command = record
args = --no-bpf-event --call-graph fp kill >/dev/null 2>&1
ret = 1
+# arm64 enables registers in fp mode
+arch = !aarch64
[event:base-record]
sample_type=295
diff --git a/tools/perf/tests/attr/test-record-graph-fp-aarch64 b/tools/perf/tests/attr/test-record-graph-fp-aarch64
new file mode 100644
index 000000000000..cbeea9971285
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-graph-fp-aarch64
@@ -0,0 +1,9 @@
+[config]
+command = record
+args = --no-bpf-event --call-graph fp kill >/dev/null 2>&1
+ret = 1
+arch = aarch64
+
+[event:base-record]
+sample_type=4391
+sample_regs_user=1073741824
diff --git a/tools/perf/tests/sigtrap.c b/tools/perf/tests/sigtrap.c
index 1f147fe6595f..e32ece90e164 100644
--- a/tools/perf/tests/sigtrap.c
+++ b/tools/perf/tests/sigtrap.c
@@ -22,19 +22,6 @@
#include "tests.h"
#include "../perf-sys.h"
-/*
- * PowerPC and S390 do not support creation of instruction breakpoints using the
- * perf_event interface.
- *
- * Just disable the test for these architectures until these issues are
- * resolved.
- */
-#if defined(__powerpc__) || defined(__s390x__)
-#define BP_ACCOUNT_IS_SUPPORTED 0
-#else
-#define BP_ACCOUNT_IS_SUPPORTED 1
-#endif
-
#define NUM_THREADS 5
static struct {
@@ -135,7 +122,7 @@ static int test__sigtrap(struct test_suite *test __maybe_unused, int subtest __m
char sbuf[STRERR_BUFSIZE];
int i, fd, ret = TEST_FAIL;
- if (!BP_ACCOUNT_IS_SUPPORTED) {
+ if (!BP_SIGNAL_IS_SUPPORTED) {
pr_debug("Test not supported on this architecture");
return TEST_SKIP;
}
diff --git a/tools/perf/trace/beauty/prctl_option.sh b/tools/perf/trace/beauty/prctl_option.sh
index 3109d7b05e11..3d278785fe57 100755
--- a/tools/perf/trace/beauty/prctl_option.sh
+++ b/tools/perf/trace/beauty/prctl_option.sh
@@ -4,7 +4,7 @@
[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
printf "static const char *prctl_options[] = {\n"
-regex='^#define[[:space:]]+PR_(\w+)[[:space:]]*([[:xdigit:]]+).*'
+regex='^#define[[:space:]]{1}PR_(\w+)[[:space:]]*([[:xdigit:]]+)([[:space:]]*\/.*)?$'
egrep $regex ${header_dir}/prctl.h | grep -v PR_SET_PTRACER | \
sed -r "s/$regex/\2 \1/g" | \
sort -n | xargs printf "\t[%s] = \"%s\",\n"
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 01900689dc00..8190a124b99d 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -2036,6 +2036,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
memset(&objdump_process, 0, sizeof(objdump_process));
objdump_process.argv = objdump_argv;
objdump_process.out = -1;
+ objdump_process.err = -1;
if (start_command(&objdump_process)) {
pr_err("Failure starting to run %s\n", command);
err = -1;
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 7ecfaac7536a..16ec605a9fe4 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -1220,9 +1220,10 @@ bpf__obj_config_map(struct bpf_object *obj,
pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
out:
- free(map_name);
if (!err)
*key_scan_pos += strlen(map_opt);
+
+ free(map_name);
return err;
}
diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c
index 631e34a0b66f..ac60c08e8e2a 100644
--- a/tools/perf/util/bpf_counter_cgroup.c
+++ b/tools/perf/util/bpf_counter_cgroup.c
@@ -266,7 +266,7 @@ static int bperf_cgrp__read(struct evsel *evsel)
idx = evsel->core.idx;
err = bpf_map_lookup_elem(reading_map_fd, &idx, values);
if (err) {
- pr_err("bpf map lookup falied: idx=%u, event=%s, cgrp=%s\n",
+ pr_err("bpf map lookup failed: idx=%u, event=%s, cgrp=%s\n",
idx, evsel__name(evsel), evsel->cgrp->name);
goto out;
}
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 4f672f7d008c..8b95fb3c4d7b 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -50,8 +50,6 @@ struct cs_etm_auxtrace {
u8 timeless_decoding;
u8 snapshot_mode;
u8 data_queued;
- u8 sample_branches;
- u8 sample_instructions;
int num_cpu;
u64 latest_kernel_timestamp;
@@ -410,8 +408,8 @@ static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm,
{
struct cs_etm_packet *tmp;
- if (etm->sample_branches || etm->synth_opts.last_branch ||
- etm->sample_instructions) {
+ if (etm->synth_opts.branches || etm->synth_opts.last_branch ||
+ etm->synth_opts.instructions) {
/*
* Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
* the next incoming packet.
@@ -1365,7 +1363,6 @@ static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
err = cs_etm__synth_event(session, &attr, id);
if (err)
return err;
- etm->sample_branches = true;
etm->branches_sample_type = attr.sample_type;
etm->branches_id = id;
id += 1;
@@ -1389,7 +1386,6 @@ static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
err = cs_etm__synth_event(session, &attr, id);
if (err)
return err;
- etm->sample_instructions = true;
etm->instructions_sample_type = attr.sample_type;
etm->instructions_id = id;
id += 1;
@@ -1420,7 +1416,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq,
tidq->prev_packet->last_instr_taken_branch)
cs_etm__update_last_branch_rb(etmq, tidq);
- if (etm->sample_instructions &&
+ if (etm->synth_opts.instructions &&
tidq->period_instructions >= etm->instructions_sample_period) {
/*
* Emit instruction sample periodically
@@ -1503,7 +1499,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq,
}
}
- if (etm->sample_branches) {
+ if (etm->synth_opts.branches) {
bool generate_sample = false;
/* Generate sample for tracing on packet */
@@ -1557,6 +1553,7 @@ static int cs_etm__flush(struct cs_etm_queue *etmq,
goto swap_packet;
if (etmq->etm->synth_opts.last_branch &&
+ etmq->etm->synth_opts.instructions &&
tidq->prev_packet->sample_type == CS_ETM_RANGE) {
u64 addr;
@@ -1582,7 +1579,7 @@ static int cs_etm__flush(struct cs_etm_queue *etmq,
}
- if (etm->sample_branches &&
+ if (etm->synth_opts.branches &&
tidq->prev_packet->sample_type == CS_ETM_RANGE) {
err = cs_etm__synth_branch_sample(etmq, tidq);
if (err)
@@ -1614,6 +1611,7 @@ static int cs_etm__end_block(struct cs_etm_queue *etmq,
* the trace.
*/
if (etmq->etm->synth_opts.last_branch &&
+ etmq->etm->synth_opts.instructions &&
tidq->prev_packet->sample_type == CS_ETM_RANGE) {
u64 addr;
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index f5d260b1df4d..15a4547d608e 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -44,10 +44,6 @@ int perf_data__create_dir(struct perf_data *data, int nr)
if (!files)
return -ENOMEM;
- data->dir.version = PERF_DIR_VERSION;
- data->dir.files = files;
- data->dir.nr = nr;
-
for (i = 0; i < nr; i++) {
struct perf_data_file *file = &files[i];
@@ -62,6 +58,9 @@ int perf_data__create_dir(struct perf_data *data, int nr)
file->fd = ret;
}
+ data->dir.version = PERF_DIR_VERSION;
+ data->dir.files = files;
+ data->dir.nr = nr;
return 0;
out_err:
diff --git a/tools/perf/util/evlist-hybrid.c b/tools/perf/util/evlist-hybrid.c
index 7f234215147d..57f02beef023 100644
--- a/tools/perf/util/evlist-hybrid.c
+++ b/tools/perf/util/evlist-hybrid.c
@@ -154,8 +154,8 @@ int evlist__fix_hybrid_cpus(struct evlist *evlist, const char *cpu_list)
perf_cpu_map__put(matched_cpus);
perf_cpu_map__put(unmatched_cpus);
}
-
- ret = (unmatched_count == events_nr) ? -1 : 0;
+ if (events_nr)
+ ret = (unmatched_count == events_nr) ? -1 : 0;
out:
perf_cpu_map__put(cpus);
return ret;
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index f70ba56912d4..394550003693 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -2073,6 +2073,7 @@ static void ip__resolve_ams(struct thread *thread,
ams->addr = ip;
ams->al_addr = al.addr;
+ ams->al_level = al.level;
ams->ms.maps = al.maps;
ams->ms.sym = al.sym;
ams->ms.map = al.map;
@@ -2092,6 +2093,7 @@ static void ip__resolve_data(struct thread *thread,
ams->addr = addr;
ams->al_addr = al.addr;
+ ams->al_level = al.level;
ams->ms.maps = al.maps;
ams->ms.sym = al.sym;
ams->ms.map = al.map;
diff --git a/tools/perf/util/map_symbol.h b/tools/perf/util/map_symbol.h
index 7d22ade082c8..e08817b0c30f 100644
--- a/tools/perf/util/map_symbol.h
+++ b/tools/perf/util/map_symbol.h
@@ -18,6 +18,7 @@ struct addr_map_symbol {
struct map_symbol ms;
u64 addr;
u64 al_addr;
+ char al_level;
u64 phys_addr;
u64 data_page_size;
};
diff --git a/tools/perf/util/perf_event_attr_fprintf.c b/tools/perf/util/perf_event_attr_fprintf.c
index 47b7531f51da..98af3fa4ea35 100644
--- a/tools/perf/util/perf_event_attr_fprintf.c
+++ b/tools/perf/util/perf_event_attr_fprintf.c
@@ -52,7 +52,7 @@ static void __p_branch_sample_type(char *buf, size_t size, u64 value)
bit_name(ABORT_TX), bit_name(IN_TX), bit_name(NO_TX),
bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP),
bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES),
- bit_name(HW_INDEX),
+ bit_name(TYPE_SAVE), bit_name(HW_INDEX),
{ .name = NULL, }
};
#undef bit_name
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 2c0d30f08e78..498b05708db5 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1503,11 +1503,12 @@ static int machines__deliver_event(struct machines *machines,
++evlist->stats.nr_unknown_id;
return 0;
}
- dump_sample(evsel, event, sample, perf_env__arch(machine->env));
if (machine == NULL) {
++evlist->stats.nr_unprocessable_samples;
+ dump_sample(evsel, event, sample, perf_env__arch(NULL));
return 0;
}
+ dump_sample(evsel, event, sample, perf_env__arch(machine->env));
return evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
case PERF_RECORD_MMAP:
return tool->mmap(tool, event, sample, machine);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index cfba8c337783..2da081ef532b 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -915,7 +915,7 @@ static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
struct addr_map_symbol *from = &he->branch_info->from;
return _hist_entry__sym_snprintf(&from->ms, from->al_addr,
- he->level, bf, size, width);
+ from->al_level, bf, size, width);
}
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
@@ -928,7 +928,7 @@ static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
struct addr_map_symbol *to = &he->branch_info->to;
return _hist_entry__sym_snprintf(&to->ms, to->al_addr,
- he->level, bf, size, width);
+ to->al_level, bf, size, width);
}
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index 5db83e51ceef..9cbe351b141f 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -585,15 +585,16 @@ static void collect_all_aliases(struct perf_stat_config *config, struct evsel *c
alias = list_prepare_entry(counter, &(evlist->core.entries), core.node);
list_for_each_entry_continue (alias, &evlist->core.entries, core.node) {
- if (strcmp(evsel__name(alias), evsel__name(counter)) ||
- alias->scale != counter->scale ||
- alias->cgrp != counter->cgrp ||
- strcmp(alias->unit, counter->unit) ||
- evsel__is_clock(alias) != evsel__is_clock(counter) ||
- !strcmp(alias->pmu_name, counter->pmu_name))
- break;
- alias->merged_stat = true;
- cb(config, alias, data, false);
+ /* Merge events with the same name, etc. but on different PMUs. */
+ if (!strcmp(evsel__name(alias), evsel__name(counter)) &&
+ alias->scale == counter->scale &&
+ alias->cgrp == counter->cgrp &&
+ !strcmp(alias->unit, counter->unit) &&
+ evsel__is_clock(alias) == evsel__is_clock(counter) &&
+ strcmp(alias->pmu_name, counter->pmu_name)) {
+ alias->merged_stat = true;
+ cb(config, alias, data, false);
+ }
}
}
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index 70f095624a0b..b654de0841f8 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -1784,6 +1784,25 @@ int __machine__synthesize_threads(struct machine *machine, struct perf_tool *too
perf_event__handler_t process, bool needs_mmap,
bool data_mmap, unsigned int nr_threads_synthesize)
{
+ /*
+ * When perf runs in non-root PID namespace, and the namespace's proc FS
+ * is not mounted, nsinfo__is_in_root_namespace() returns false.
+ * In this case, the proc FS is coming for the parent namespace, thus
+ * perf tool will wrongly gather process info from its parent PID
+ * namespace.
+ *
+ * To avoid the confusion that the perf tool runs in a child PID
+ * namespace but it synthesizes thread info from its parent PID
+ * namespace, returns failure with warning.
+ */
+ if (!nsinfo__is_in_root_namespace()) {
+ pr_err("Perf runs in non-root PID namespace but it tries to ");
+ pr_err("gather process info from its parent PID namespace.\n");
+ pr_err("Please mount the proc file system properly, e.g. ");
+ pr_err("add the option '--mount-proc' for unshare command.\n");
+ return -EPERM;
+ }
+
if (target__has_task(target))
return perf_event__synthesize_thread_map(tool, threads, process, machine,
needs_mmap, data_mmap);
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index b0be5f40a3f1..79d102304470 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -90,7 +90,7 @@ EXTRA_WARNINGS += -Wstrict-aliasing=3
else ifneq ($(CROSS_COMPILE),)
CLANG_CROSS_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%))
-GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)gcc))
+GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)gcc 2>/dev/null))
ifneq ($(GCC_TOOLCHAIN_DIR),)
CLANG_CROSS_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
CLANG_CROSS_FLAGS += --sysroot=$(shell $(CROSS_COMPILE)gcc -print-sysroot)
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
index 44bbe54f25f1..3c4196cef3ed 100644
--- a/tools/testing/kunit/kunit_kernel.py
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -6,6 +6,7 @@
# Author: Felix Guo <felixguoxiuping@gmail.com>
# Author: Brendan Higgins <brendanhiggins@google.com>
+import importlib.abc
import importlib.util
import logging
import subprocess
diff --git a/tools/testing/kunit/run_checks.py b/tools/testing/kunit/run_checks.py
index 4f32133ed77c..13d854afca9d 100755
--- a/tools/testing/kunit/run_checks.py
+++ b/tools/testing/kunit/run_checks.py
@@ -61,7 +61,7 @@ def main(argv: Sequence[str]) -> None:
elif isinstance(ex, subprocess.CalledProcessError):
print(f'{name}: FAILED')
else:
- print('{name}: unexpected exception: {ex}')
+ print(f'{name}: unexpected exception: {ex}')
continue
output = ex.output
diff --git a/tools/testing/selftests/bpf/prog_tests/timer_crash.c b/tools/testing/selftests/bpf/prog_tests/timer_crash.c
new file mode 100644
index 000000000000..f74b82305da8
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/timer_crash.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "timer_crash.skel.h"
+
+enum {
+ MODE_ARRAY,
+ MODE_HASH,
+};
+
+static void test_timer_crash_mode(int mode)
+{
+ struct timer_crash *skel;
+
+ skel = timer_crash__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "timer_crash__open_and_load"))
+ return;
+ skel->bss->pid = getpid();
+ skel->bss->crash_map = mode;
+ if (!ASSERT_OK(timer_crash__attach(skel), "timer_crash__attach"))
+ goto end;
+ usleep(1);
+end:
+ timer_crash__destroy(skel);
+}
+
+void test_timer_crash(void)
+{
+ if (test__start_subtest("array"))
+ test_timer_crash_mode(MODE_ARRAY);
+ if (test__start_subtest("hash"))
+ test_timer_crash_mode(MODE_HASH);
+}
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
index 2966564b8497..6c85b00f27b2 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
@@ -235,7 +235,7 @@ SEC("sk_msg1")
int bpf_prog4(struct sk_msg_md *msg)
{
int *bytes, zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
- int *start, *end, *start_push, *end_push, *start_pop, *pop;
+ int *start, *end, *start_push, *end_push, *start_pop, *pop, err = 0;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
if (bytes)
@@ -249,8 +249,11 @@ int bpf_prog4(struct sk_msg_md *msg)
bpf_msg_pull_data(msg, *start, *end, 0);
start_push = bpf_map_lookup_elem(&sock_bytes, &two);
end_push = bpf_map_lookup_elem(&sock_bytes, &three);
- if (start_push && end_push)
- bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (start_push && end_push) {
+ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (err)
+ return SK_DROP;
+ }
start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
pop = bpf_map_lookup_elem(&sock_bytes, &five);
if (start_pop && pop)
@@ -263,6 +266,7 @@ int bpf_prog6(struct sk_msg_md *msg)
{
int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, key = 0;
int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop, *f;
+ int err = 0;
__u64 flags = 0;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
@@ -279,8 +283,11 @@ int bpf_prog6(struct sk_msg_md *msg)
start_push = bpf_map_lookup_elem(&sock_bytes, &two);
end_push = bpf_map_lookup_elem(&sock_bytes, &three);
- if (start_push && end_push)
- bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (start_push && end_push) {
+ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (err)
+ return SK_DROP;
+ }
start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
pop = bpf_map_lookup_elem(&sock_bytes, &five);
@@ -338,7 +345,7 @@ SEC("sk_msg5")
int bpf_prog10(struct sk_msg_md *msg)
{
int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop;
- int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
+ int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, err = 0;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
if (bytes)
@@ -352,8 +359,11 @@ int bpf_prog10(struct sk_msg_md *msg)
bpf_msg_pull_data(msg, *start, *end, 0);
start_push = bpf_map_lookup_elem(&sock_bytes, &two);
end_push = bpf_map_lookup_elem(&sock_bytes, &three);
- if (start_push && end_push)
- bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (start_push && end_push) {
+ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (err)
+ return SK_PASS;
+ }
start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
pop = bpf_map_lookup_elem(&sock_bytes, &five);
if (start_pop && pop)
diff --git a/tools/testing/selftests/bpf/progs/timer_crash.c b/tools/testing/selftests/bpf/progs/timer_crash.c
new file mode 100644
index 000000000000..f8f7944e70da
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/timer_crash.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+struct map_elem {
+ struct bpf_timer timer;
+ struct bpf_spin_lock lock;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct map_elem);
+} amap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct map_elem);
+} hmap SEC(".maps");
+
+int pid = 0;
+int crash_map = 0; /* 0 for amap, 1 for hmap */
+
+SEC("fentry/do_nanosleep")
+int sys_enter(void *ctx)
+{
+ struct map_elem *e, value = {};
+ void *map = crash_map ? (void *)&hmap : (void *)&amap;
+
+ if (bpf_get_current_task_btf()->tgid != pid)
+ return 0;
+
+ *(void **)&value = (void *)0xdeadcaf3;
+
+ bpf_map_update_elem(map, &(int){0}, &value, 0);
+ /* For array map, doing bpf_map_update_elem will do a
+ * check_and_free_timer_in_array, which will trigger the crash if timer
+ * pointer was overwritten, for hmap we need to use bpf_timer_cancel.
+ */
+ if (crash_map == 1) {
+ e = bpf_map_lookup_elem(map, &(int){0});
+ if (!e)
+ return 0;
+ bpf_timer_cancel(&e->timer);
+ }
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_lirc_mode2_user.c b/tools/testing/selftests/bpf/test_lirc_mode2_user.c
index ebf68dce5504..2893e9f2f1e0 100644
--- a/tools/testing/selftests/bpf/test_lirc_mode2_user.c
+++ b/tools/testing/selftests/bpf/test_lirc_mode2_user.c
@@ -28,7 +28,6 @@
// 5. We can read keycode from same /dev/lirc device
#include <linux/bpf.h>
-#include <linux/lirc.h>
#include <linux/input.h>
#include <errno.h>
#include <stdio.h>
diff --git a/tools/testing/selftests/clone3/clone3.c b/tools/testing/selftests/clone3/clone3.c
index 076cf4325f78..cd4582129c7d 100644
--- a/tools/testing/selftests/clone3/clone3.c
+++ b/tools/testing/selftests/clone3/clone3.c
@@ -126,8 +126,6 @@ static void test_clone3(uint64_t flags, size_t size, int expected,
int main(int argc, char *argv[])
{
- pid_t pid;
-
uid_t uid = getuid();
ksft_print_header();
diff --git a/tools/testing/selftests/cpufreq/main.sh b/tools/testing/selftests/cpufreq/main.sh
index 31f8c9a76c5f..60ce18ed0666 100755
--- a/tools/testing/selftests/cpufreq/main.sh
+++ b/tools/testing/selftests/cpufreq/main.sh
@@ -194,5 +194,5 @@ prerequisite
# Run requested functions
clear_dumps $OUTFILE
-do_test >> $OUTFILE.txt
+do_test | tee -a $OUTFILE.txt
dmesg_dumps $OUTFILE
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
index bcb110e830ce..dea33dc93790 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
@@ -50,8 +50,8 @@ for current_test in ${TESTS:-$ALL_TESTS}; do
else
log_test "'$current_test' [$profile] overflow $target"
fi
+ RET_FIN=$(( RET_FIN || RET ))
done
- RET_FIN=$(( RET_FIN || RET ))
done
done
current_test=""
diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh
index 3e3e06ea5703..86e787895f78 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh
@@ -60,7 +60,8 @@ __tc_police_test()
tc_police_rules_create $count $should_fail
- offload_count=$(tc filter show dev $swp1 ingress | grep in_hw | wc -l)
+ offload_count=$(tc -j filter show dev $swp1 ingress |
+ jq "[.[] | select(.options.in_hw == true)] | length")
((offload_count == count))
check_err_fail $should_fail $? "tc police offload count"
}
diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile
index dd61118df66e..2d7fca446c7f 100644
--- a/tools/testing/selftests/exec/Makefile
+++ b/tools/testing/selftests/exec/Makefile
@@ -3,9 +3,9 @@ CFLAGS = -Wall
CFLAGS += -Wno-nonnull
CFLAGS += -D_GNU_SOURCE
-TEST_PROGS := binfmt_script non-regular
-TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216
-TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir pipe
+TEST_PROGS := binfmt_script
+TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216 non-regular
+TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir
# Makefile is a run-time dependency, since it's accessed by the execveat test
TEST_FILES := Makefile
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc
index e96e279e0533..25432b8cd5bd 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc
@@ -19,7 +19,7 @@ fail() { # mesg
FILTER=set_ftrace_filter
FUNC1="schedule"
-FUNC2="do_softirq"
+FUNC2="scheduler_tick"
ALL_FUNCS="#### all functions enabled ####"
diff --git a/tools/testing/selftests/futex/Makefile b/tools/testing/selftests/futex/Makefile
index 12631f0076a1..11e157d7533b 100644
--- a/tools/testing/selftests/futex/Makefile
+++ b/tools/testing/selftests/futex/Makefile
@@ -11,7 +11,7 @@ all:
@for DIR in $(SUBDIRS); do \
BUILD_TARGET=$(OUTPUT)/$$DIR; \
mkdir $$BUILD_TARGET -p; \
- make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
+ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
if [ -e $$DIR/$(TEST_PROGS) ]; then \
rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/; \
fi \
@@ -32,6 +32,6 @@ override define CLEAN
@for DIR in $(SUBDIRS); do \
BUILD_TARGET=$(OUTPUT)/$$DIR; \
mkdir $$BUILD_TARGET -p; \
- make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
+ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
done
endef
diff --git a/tools/testing/selftests/ir/ir_loopback.c b/tools/testing/selftests/ir/ir_loopback.c
index 06256c96df12..f4a15cbdd5ea 100644
--- a/tools/testing/selftests/ir/ir_loopback.c
+++ b/tools/testing/selftests/ir/ir_loopback.c
@@ -29,6 +29,16 @@
#define SYSFS_PATH_MAX 256
#define DNAME_PATH_MAX 256
+/*
+ * Support ancient lirc.h which does not have these values. Can be removed
+ * once RHEL 8 is no longer a relevant testing platform.
+ */
+#if RC_PROTO_MAX < 26
+#define RC_PROTO_RCMM12 24
+#define RC_PROTO_RCMM24 25
+#define RC_PROTO_RCMM32 26
+#endif
+
static const struct {
enum rc_proto proto;
const char *name;
diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
index 471eaa7b3a3f..11779405dc80 100644
--- a/tools/testing/selftests/kselftest_harness.h
+++ b/tools/testing/selftests/kselftest_harness.h
@@ -877,7 +877,8 @@ static void __timeout_handler(int sig, siginfo_t *info, void *ucontext)
}
t->timed_out = true;
- kill(t->pid, SIGKILL);
+ // signal process group
+ kill(-(t->pid), SIGKILL);
}
void __wait_for_test(struct __test_metadata *t)
@@ -987,6 +988,7 @@ void __run_test(struct __fixture_metadata *f,
ksft_print_msg("ERROR SPAWNING TEST CHILD\n");
t->passed = 0;
} else if (t->pid == 0) {
+ setpgrp();
t->fn(t, variant);
if (t->skip)
_exit(255);
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 0e4926bc9a58..17c3f0749f05 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -82,7 +82,6 @@ TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
-TEST_GEN_PROGS_x86_64 += x86_64/vmx_pi_mmio_test
TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests
TEST_GEN_PROGS_x86_64 += x86_64/amx_test
TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c
index 9ad38bd360a4..b08d30bf71c5 100644
--- a/tools/testing/selftests/kvm/aarch64/arch_timer.c
+++ b/tools/testing/selftests/kvm/aarch64/arch_timer.c
@@ -366,6 +366,7 @@ static struct kvm_vm *test_vm_create(void)
{
struct kvm_vm *vm;
unsigned int i;
+ int ret;
int nr_vcpus = test_args.nr_vcpus;
vm = vm_create_default_with_vcpus(nr_vcpus, 0, 0, guest_code, NULL);
@@ -382,7 +383,11 @@ static struct kvm_vm *test_vm_create(void)
ucall_init(vm, NULL);
test_init_timer_irq(vm);
- vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
+ ret = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
+ if (ret < 0) {
+ print_skip("Failed to create vgic-v3");
+ exit(KSFT_SKIP);
+ }
/* Make all the test's cmdline args visible to the guest */
sync_global_to_guest(vm, test_args);
diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c
index e6c7d7f8fbd1..7eca97799917 100644
--- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c
+++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c
@@ -761,6 +761,10 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
gic_fd = vgic_v3_setup(vm, 1, nr_irqs,
GICD_BASE_GPA, GICR_BASE_GPA);
+ if (gic_fd < 0) {
+ print_skip("Failed to create vgic-v3, skipping");
+ exit(KSFT_SKIP);
+ }
vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT,
guest_irq_handlers[args.eoi_split][args.level_sensitive]);
diff --git a/tools/testing/selftests/kvm/lib/aarch64/vgic.c b/tools/testing/selftests/kvm/lib/aarch64/vgic.c
index b3a0fca0d780..f5cd0c536d85 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/vgic.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/vgic.c
@@ -52,7 +52,9 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs,
nr_vcpus, nr_vcpus_created);
/* Distributor setup */
- gic_fd = kvm_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3, false);
+ if (_kvm_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3,
+ false, &gic_fd) != 0)
+ return -1;
kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS,
0, &nr_irqs, true);
diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c
index 192a2899bae8..94df2692e6e4 100644
--- a/tools/testing/selftests/memfd/memfd_test.c
+++ b/tools/testing/selftests/memfd/memfd_test.c
@@ -455,6 +455,7 @@ static void mfd_fail_write(int fd)
printf("mmap()+mprotect() didn't fail as expected\n");
abort();
}
+ munmap(p, mfd_def_size);
}
/* verify PUNCH_HOLE fails */
diff --git a/tools/testing/selftests/mincore/mincore_selftest.c b/tools/testing/selftests/mincore/mincore_selftest.c
index e54106643337..4c88238fc8f0 100644
--- a/tools/testing/selftests/mincore/mincore_selftest.c
+++ b/tools/testing/selftests/mincore/mincore_selftest.c
@@ -207,15 +207,21 @@ TEST(check_file_mmap)
errno = 0;
fd = open(".", O_TMPFILE | O_RDWR, 0600);
- ASSERT_NE(-1, fd) {
- TH_LOG("Can't create temporary file: %s",
- strerror(errno));
+ if (fd < 0) {
+ ASSERT_EQ(errno, EOPNOTSUPP) {
+ TH_LOG("Can't create temporary file: %s",
+ strerror(errno));
+ }
+ SKIP(goto out_free, "O_TMPFILE not supported by filesystem.");
}
errno = 0;
retval = fallocate(fd, 0, 0, FILE_SIZE);
- ASSERT_EQ(0, retval) {
- TH_LOG("Error allocating space for the temporary file: %s",
- strerror(errno));
+ if (retval) {
+ ASSERT_EQ(errno, EOPNOTSUPP) {
+ TH_LOG("Error allocating space for the temporary file: %s",
+ strerror(errno));
+ }
+ SKIP(goto out_close, "fallocate not supported by filesystem.");
}
/*
@@ -271,7 +277,9 @@ TEST(check_file_mmap)
}
munmap(addr, FILE_SIZE);
+out_close:
close(fd);
+out_free:
free(vec);
}
diff --git a/tools/testing/selftests/mount_setattr/mount_setattr_test.c b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
index f31205f04ee0..8c5fea68ae67 100644
--- a/tools/testing/selftests/mount_setattr/mount_setattr_test.c
+++ b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
@@ -1236,7 +1236,7 @@ static int get_userns_fd(unsigned long nsid, unsigned long hostid, unsigned long
}
/**
- * Validate that an attached mount in our mount namespace can be idmapped.
+ * Validate that an attached mount in our mount namespace cannot be idmapped.
* (The kernel enforces that the mount's mount namespace and the caller's mount
* namespace match.)
*/
@@ -1259,7 +1259,7 @@ TEST_F(mount_setattr_idmapped, attached_mount_inside_current_mount_namespace)
attr.userns_fd = get_userns_fd(0, 10000, 10000);
ASSERT_GE(attr.userns_fd, 0);
- ASSERT_EQ(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
+ ASSERT_NE(sys_mount_setattr(open_tree_fd, "", AT_EMPTY_PATH, &attr, sizeof(attr)), 0);
ASSERT_EQ(close(attr.userns_fd), 0);
ASSERT_EQ(close(open_tree_fd), 0);
}
diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
index 2674ba20d524..ff821025d309 100755
--- a/tools/testing/selftests/net/mptcp/diag.sh
+++ b/tools/testing/selftests/net/mptcp/diag.sh
@@ -71,6 +71,36 @@ chk_msk_remote_key_nr()
__chk_nr "grep -c remote_key" $*
}
+# $1: ns, $2: port
+wait_local_port_listen()
+{
+ local listener_ns="${1}"
+ local port="${2}"
+
+ local port_hex i
+
+ port_hex="$(printf "%04X" "${port}")"
+ for i in $(seq 10); do
+ ip netns exec "${listener_ns}" cat /proc/net/tcp | \
+ awk "BEGIN {rc=1} {if (\$2 ~ /:${port_hex}\$/ && \$4 ~ /0A/) {rc=0; exit}} END {exit rc}" &&
+ break
+ sleep 0.1
+ done
+}
+
+wait_connected()
+{
+ local listener_ns="${1}"
+ local port="${2}"
+
+ local port_hex i
+
+ port_hex="$(printf "%04X" "${port}")"
+ for i in $(seq 10); do
+ ip netns exec ${listener_ns} grep -q " 0100007F:${port_hex} " /proc/net/tcp && break
+ sleep 0.1
+ done
+}
trap cleanup EXIT
ip netns add $ns
@@ -81,15 +111,15 @@ echo "a" | \
ip netns exec $ns \
./mptcp_connect -p 10000 -l -t ${timeout_poll} \
0.0.0.0 >/dev/null &
-sleep 0.1
+wait_local_port_listen $ns 10000
chk_msk_nr 0 "no msk on netns creation"
echo "b" | \
timeout ${timeout_test} \
ip netns exec $ns \
- ./mptcp_connect -p 10000 -j -t ${timeout_poll} \
+ ./mptcp_connect -p 10000 -r 0 -t ${timeout_poll} \
127.0.0.1 >/dev/null &
-sleep 0.1
+wait_connected $ns 10000
chk_msk_nr 2 "after MPC handshake "
chk_msk_remote_key_nr 2 "....chk remote_key"
chk_msk_fallback_nr 0 "....chk no fallback"
@@ -101,13 +131,13 @@ echo "a" | \
ip netns exec $ns \
./mptcp_connect -p 10001 -l -s TCP -t ${timeout_poll} \
0.0.0.0 >/dev/null &
-sleep 0.1
+wait_local_port_listen $ns 10001
echo "b" | \
timeout ${timeout_test} \
ip netns exec $ns \
- ./mptcp_connect -p 10001 -j -t ${timeout_poll} \
+ ./mptcp_connect -p 10001 -r 0 -t ${timeout_poll} \
127.0.0.1 >/dev/null &
-sleep 0.1
+wait_connected $ns 10001
chk_msk_fallback_nr 1 "check fallback"
flush_pids
@@ -119,7 +149,7 @@ for I in `seq 1 $NR_CLIENTS`; do
./mptcp_connect -p $((I+10001)) -l -w 10 \
-t ${timeout_poll} 0.0.0.0 >/dev/null &
done
-sleep 0.1
+wait_local_port_listen $ns $((NR_CLIENTS + 10001))
for I in `seq 1 $NR_CLIENTS`; do
echo "b" | \
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index cb5809b89081..f0f4ab96b8f3 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -763,8 +763,8 @@ run_tests_disconnect()
run_tests_lo "$ns1" "$ns1" dead:beef:1::1 1 "-I 3 -i $old_cin"
# restore previous status
- cout=$old_cout
- cout_disconnect="$cout".disconnect
+ sin=$old_sin
+ sin_disconnect="$cout".disconnect
cin=$old_cin
cin_disconnect="$cin".disconnect
connect_per_transfer=1
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index b8bdbec0cf69..0c8a2a20b96c 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -660,6 +660,7 @@ chk_join_nr()
local ack_nr=$4
local count
local dump_stats
+ local with_cookie
printf "%02u %-36s %s" "$TEST_COUNT" "$msg" "syn"
count=`ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinSynRx | awk '{print $2}'`
@@ -673,12 +674,20 @@ chk_join_nr()
fi
echo -n " - synack"
+ with_cookie=`ip netns exec $ns2 sysctl -n net.ipv4.tcp_syncookies`
count=`ip netns exec $ns2 nstat -as | grep MPTcpExtMPJoinSynAckRx | awk '{print $2}'`
[ -z "$count" ] && count=0
if [ "$count" != "$syn_ack_nr" ]; then
- echo "[fail] got $count JOIN[s] synack expected $syn_ack_nr"
- ret=1
- dump_stats=1
+ # simult connections exceeding the limit with cookie enabled could go up to
+ # synack validation as the conn limit can be enforced reliably only after
+ # the subflow creation
+ if [ "$with_cookie" = 2 ] && [ "$count" -gt "$syn_ack_nr" ] && [ "$count" -le "$syn_nr" ]; then
+ echo -n "[ ok ]"
+ else
+ echo "[fail] got $count JOIN[s] synack expected $syn_ack_nr"
+ ret=1
+ dump_stats=1
+ fi
else
echo -n "[ ok ]"
fi
@@ -752,11 +761,17 @@ chk_add_nr()
local mis_ack_nr=${8:-0}
local count
local dump_stats
+ local timeout
+
+ timeout=`ip netns exec $ns1 sysctl -n net.mptcp.add_addr_timeout`
printf "%-39s %s" " " "add"
- count=`ip netns exec $ns2 nstat -as | grep MPTcpExtAddAddr | awk '{print $2}'`
+ count=`ip netns exec $ns2 nstat -as MPTcpExtAddAddr | grep MPTcpExtAddAddr | awk '{print $2}'`
[ -z "$count" ] && count=0
- if [ "$count" != "$add_nr" ]; then
+
+ # if the test configured a short timeout tolerate greater then expected
+ # add addrs options, due to retransmissions
+ if [ "$count" != "$add_nr" ] && [ "$timeout" -gt 1 -o "$count" -lt "$add_nr" ]; then
echo "[fail] got $count ADD_ADDR[s] expected $add_nr"
ret=1
dump_stats=1
@@ -961,7 +976,7 @@ wait_for_tw()
local ns=$1
while [ $time -lt $timeout_ms ]; do
- local cnt=$(ip netns exec $ns ss -t state time-wait |wc -l)
+ local cnt=$(ip netns exec $ns nstat -as TcpAttemptFails | grep TcpAttemptFails | awk '{print $2}')
[ "$cnt" = 1 ] && return 1
time=$((time + 100))
@@ -1158,7 +1173,11 @@ signal_address_tests()
ip netns exec $ns2 ./pm_nl_ctl add 10.0.2.2 flags signal
ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags signal
ip netns exec $ns2 ./pm_nl_ctl add 10.0.4.2 flags signal
- run_tests $ns1 $ns2 10.0.1.1
+
+ # the peer could possibly miss some addr notification, allow retransmission
+ ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=1
+ run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+ chk_join_nr "signal addresses race test" 3 3 3
# the server will not signal the address terminating
# the MPC subflow
diff --git a/tools/testing/selftests/netfilter/.gitignore b/tools/testing/selftests/netfilter/.gitignore
index 8448f74adfec..4cb887b57413 100644
--- a/tools/testing/selftests/netfilter/.gitignore
+++ b/tools/testing/selftests/netfilter/.gitignore
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
nf-queue
+connect_close
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index ffca314897c4..7e81c9a7fff9 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -6,9 +6,9 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
nft_concat_range.sh nft_conntrack_helper.sh \
nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
- conntrack_vrf.sh
+ conntrack_vrf.sh nft_synproxy.sh
LDLIBS = -lmnl
-TEST_GEN_FILES = nf-queue
+TEST_GEN_FILES = nf-queue connect_close
include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/connect_close.c b/tools/testing/selftests/netfilter/connect_close.c
new file mode 100644
index 000000000000..1c3b0add54c4
--- /dev/null
+++ b/tools/testing/selftests/netfilter/connect_close.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#include <signal.h>
+
+#include <arpa/inet.h>
+#include <sys/socket.h>
+
+#define PORT 12345
+#define RUNTIME 10
+
+static struct {
+ unsigned int timeout;
+ unsigned int port;
+} opts = {
+ .timeout = RUNTIME,
+ .port = PORT,
+};
+
+static void handler(int sig)
+{
+ _exit(sig == SIGALRM ? 0 : 1);
+}
+
+static void set_timeout(void)
+{
+ struct sigaction action = {
+ .sa_handler = handler,
+ };
+
+ sigaction(SIGALRM, &action, NULL);
+
+ alarm(opts.timeout);
+}
+
+static void do_connect(const struct sockaddr_in *dst)
+{
+ int s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+
+ if (s >= 0)
+ fcntl(s, F_SETFL, O_NONBLOCK);
+
+ connect(s, (struct sockaddr *)dst, sizeof(*dst));
+ close(s);
+}
+
+static void do_accept(const struct sockaddr_in *src)
+{
+ int c, one = 1, s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+
+ if (s < 0)
+ return;
+
+ setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one));
+ setsockopt(s, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one));
+
+ bind(s, (struct sockaddr *)src, sizeof(*src));
+
+ listen(s, 16);
+
+ c = accept(s, NULL, NULL);
+ if (c >= 0)
+ close(c);
+
+ close(s);
+}
+
+static int accept_loop(void)
+{
+ struct sockaddr_in src = {
+ .sin_family = AF_INET,
+ .sin_port = htons(opts.port),
+ };
+
+ inet_pton(AF_INET, "127.0.0.1", &src.sin_addr);
+
+ set_timeout();
+
+ for (;;)
+ do_accept(&src);
+
+ return 1;
+}
+
+static int connect_loop(void)
+{
+ struct sockaddr_in dst = {
+ .sin_family = AF_INET,
+ .sin_port = htons(opts.port),
+ };
+
+ inet_pton(AF_INET, "127.0.0.1", &dst.sin_addr);
+
+ set_timeout();
+
+ for (;;)
+ do_connect(&dst);
+
+ return 1;
+}
+
+static void parse_opts(int argc, char **argv)
+{
+ int c;
+
+ while ((c = getopt(argc, argv, "t:p:")) != -1) {
+ switch (c) {
+ case 't':
+ opts.timeout = atoi(optarg);
+ break;
+ case 'p':
+ opts.port = atoi(optarg);
+ break;
+ }
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ pid_t p;
+
+ parse_opts(argc, argv);
+
+ p = fork();
+ if (p < 0)
+ return 111;
+
+ if (p > 0)
+ return accept_loop();
+
+ return connect_loop();
+}
diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh
index ed61f6cab60f..b35010cc7f6a 100755
--- a/tools/testing/selftests/netfilter/nft_concat_range.sh
+++ b/tools/testing/selftests/netfilter/nft_concat_range.sh
@@ -27,7 +27,7 @@ TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto
net6_port_net6_port net_port_mac_proto_net"
# Reported bugs, also described by TYPE_ variables below
-BUGS="flush_remove_add"
+BUGS="flush_remove_add reload"
# List of possible paths to pktgen script from kernel tree for performance tests
PKTGEN_SCRIPT_PATHS="
@@ -354,6 +354,23 @@ TYPE_flush_remove_add="
display Add two elements, flush, re-add
"
+TYPE_reload="
+display net,mac with reload
+type_spec ipv4_addr . ether_addr
+chain_spec ip daddr . ether saddr
+dst addr4
+src mac
+start 1
+count 1
+src_delta 2000
+tools sendip nc bash
+proto udp
+
+race_repeat 0
+
+perf_duration 0
+"
+
# Set template for all tests, types and rules are filled in depending on test
set_template='
flush ruleset
@@ -1473,6 +1490,59 @@ test_bug_flush_remove_add() {
nft flush ruleset
}
+# - add ranged element, check that packets match it
+# - reload the set, check packets still match
+test_bug_reload() {
+ setup veth send_"${proto}" set || return ${KSELFTEST_SKIP}
+ rstart=${start}
+
+ range_size=1
+ for i in $(seq "${start}" $((start + count))); do
+ end=$((start + range_size))
+
+ # Avoid negative or zero-sized port ranges
+ if [ $((end / 65534)) -gt $((start / 65534)) ]; then
+ start=${end}
+ end=$((end + 1))
+ fi
+ srcstart=$((start + src_delta))
+ srcend=$((end + src_delta))
+
+ add "$(format)" || return 1
+ range_size=$((range_size + 1))
+ start=$((end + range_size))
+ done
+
+ # check kernel does allocate pcpu sctrach map
+ # for reload with no elemet add/delete
+ ( echo flush set inet filter test ;
+ nft list set inet filter test ) | nft -f -
+
+ start=${rstart}
+ range_size=1
+
+ for i in $(seq "${start}" $((start + count))); do
+ end=$((start + range_size))
+
+ # Avoid negative or zero-sized port ranges
+ if [ $((end / 65534)) -gt $((start / 65534)) ]; then
+ start=${end}
+ end=$((end + 1))
+ fi
+ srcstart=$((start + src_delta))
+ srcend=$((end + src_delta))
+
+ for j in $(seq ${start} $((range_size / 2 + 1)) ${end}); do
+ send_match "${j}" $((j + src_delta)) || return 1
+ done
+
+ range_size=$((range_size + 1))
+ start=$((end + range_size))
+ done
+
+ nft flush ruleset
+}
+
test_reported_issues() {
eval test_bug_"${subtest}"
}
@@ -1531,4 +1601,4 @@ for name in ${TESTS}; do
done
done
-[ ${passed} -eq 0 ] && exit ${KSELFTEST_SKIP}
+[ ${passed} -eq 0 ] && exit ${KSELFTEST_SKIP} || exit 0
diff --git a/tools/testing/selftests/netfilter/nft_fib.sh b/tools/testing/selftests/netfilter/nft_fib.sh
index 6caf6ac8c285..695a1958723f 100755
--- a/tools/testing/selftests/netfilter/nft_fib.sh
+++ b/tools/testing/selftests/netfilter/nft_fib.sh
@@ -174,6 +174,7 @@ test_ping() {
ip netns exec ${nsrouter} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.rp_filter=0 > /dev/null
sleep 3
diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
index 349a319a9e51..79fe627b9e81 100755
--- a/tools/testing/selftests/netfilter/nft_nat.sh
+++ b/tools/testing/selftests/netfilter/nft_nat.sh
@@ -899,6 +899,144 @@ EOF
ip netns exec "$ns0" nft delete table $family nat
}
+test_stateless_nat_ip()
+{
+ local lret=0
+
+ ip netns exec "$ns0" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+ ip netns exec "$ns0" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+ ip netns exec "$ns2" ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping $ns1 from $ns2 before loading stateless rules"
+ return 1
+ fi
+
+ip netns exec "$ns0" nft -f /dev/stdin <<EOF
+table ip stateless {
+ map xlate_in {
+ typeof meta iifname . ip saddr . ip daddr : ip daddr
+ elements = {
+ "veth1" . 10.0.2.99 . 10.0.1.99 : 10.0.2.2,
+ }
+ }
+ map xlate_out {
+ typeof meta iifname . ip saddr . ip daddr : ip daddr
+ elements = {
+ "veth0" . 10.0.1.99 . 10.0.2.2 : 10.0.2.99
+ }
+ }
+
+ chain prerouting {
+ type filter hook prerouting priority -400; policy accept;
+ ip saddr set meta iifname . ip saddr . ip daddr map @xlate_in
+ ip daddr set meta iifname . ip saddr . ip daddr map @xlate_out
+ }
+}
+EOF
+ if [ $? -ne 0 ]; then
+ echo "SKIP: Could not add ip statless rules"
+ return $ksft_skip
+ fi
+
+ reset_counters
+
+ ip netns exec "$ns2" ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping $ns1 from $ns2 with stateless rules"
+ lret=1
+ fi
+
+ # ns1 should have seen packets from .2.2, due to stateless rewrite.
+ expect="packets 1 bytes 84"
+ cnt=$(ip netns exec "$ns1" nft list counter inet filter ns0insl | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter "$ns1" ns0insl "$expect" "test_stateless 1"
+ lret=1
+ fi
+
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec "$ns2" nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter "$ns2" ns1$dir "$expect" "test_stateless 2"
+ lret=1
+ fi
+ done
+
+ # ns1 should not have seen packets from ns2, due to masquerade
+ expect="packets 0 bytes 0"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter "$ns1" ns0$dir "$expect" "test_stateless 3"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec "$ns0" nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter "$ns0" ns1$dir "$expect" "test_stateless 4"
+ lret=1
+ fi
+ done
+
+ reset_counters
+
+ socat -h > /dev/null 2>&1
+ if [ $? -ne 0 ];then
+ echo "SKIP: Could not run stateless nat frag test without socat tool"
+ if [ $lret -eq 0 ]; then
+ return $ksft_skip
+ fi
+
+ ip netns exec "$ns0" nft delete table ip stateless
+ return $lret
+ fi
+
+ local tmpfile=$(mktemp)
+ dd if=/dev/urandom of=$tmpfile bs=4096 count=1 2>/dev/null
+
+ local outfile=$(mktemp)
+ ip netns exec "$ns1" timeout 3 socat -u UDP4-RECV:4233 OPEN:$outfile < /dev/null &
+ sc_r=$!
+
+ sleep 1
+ # re-do with large ping -> ip fragmentation
+ ip netns exec "$ns2" timeout 3 socat - UDP4-SENDTO:"10.0.1.99:4233" < "$tmpfile" > /dev/null
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: failed to test udp $ns1 to $ns2 with stateless ip nat" 1>&2
+ lret=1
+ fi
+
+ wait
+
+ cmp "$tmpfile" "$outfile"
+ if [ $? -ne 0 ]; then
+ ls -l "$tmpfile" "$outfile"
+ echo "ERROR: in and output file mismatch when checking udp with stateless nat" 1>&2
+ lret=1
+ fi
+
+ rm -f "$tmpfile" "$outfile"
+
+ # ns1 should have seen packets from 2.2, due to stateless rewrite.
+ expect="packets 3 bytes 4164"
+ cnt=$(ip netns exec "$ns1" nft list counter inet filter ns0insl | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter "$ns1" ns0insl "$expect" "test_stateless 5"
+ lret=1
+ fi
+
+ ip netns exec "$ns0" nft delete table ip stateless
+ if [ $? -ne 0 ]; then
+ echo "ERROR: Could not delete table ip stateless" 1>&2
+ lret=1
+ fi
+
+ test $lret -eq 0 && echo "PASS: IP statless for $ns2"
+
+ return $lret
+}
+
# ip netns exec "$ns0" ping -c 1 -q 10.0.$i.99
for i in 0 1 2; do
ip netns exec ns$i-$sfx nft -f /dev/stdin <<EOF
@@ -965,6 +1103,19 @@ table inet filter {
EOF
done
+# special case for stateless nat check, counter needs to
+# be done before (input) ip defragmentation
+ip netns exec ns1-$sfx nft -f /dev/stdin <<EOF
+table inet filter {
+ counter ns0insl {}
+
+ chain pre {
+ type filter hook prerouting priority -400; policy accept;
+ ip saddr 10.0.2.2 counter name "ns0insl"
+ }
+}
+EOF
+
sleep 3
# test basic connectivity
for i in 1 2; do
@@ -1019,6 +1170,7 @@ $test_inet_nat && test_redirect inet
$test_inet_nat && test_redirect6 inet
test_port_shadowing
+test_stateless_nat_ip
if [ $ret -ne 0 ];then
echo -n "FAIL: "
diff --git a/tools/testing/selftests/netfilter/nft_queue.sh b/tools/testing/selftests/netfilter/nft_queue.sh
index 7d27f1f3bc01..e12729753351 100755
--- a/tools/testing/selftests/netfilter/nft_queue.sh
+++ b/tools/testing/selftests/netfilter/nft_queue.sh
@@ -113,6 +113,7 @@ table inet $name {
chain output {
type filter hook output priority $prio; policy accept;
tcp dport 12345 queue num 3
+ tcp sport 23456 queue num 3
jump nfq
}
chain post {
@@ -296,6 +297,23 @@ test_tcp_localhost()
wait 2>/dev/null
}
+test_tcp_localhost_connectclose()
+{
+ tmpfile=$(mktemp) || exit 1
+
+ ip netns exec ${nsrouter} ./connect_close -p 23456 -t $timeout &
+
+ ip netns exec ${nsrouter} ./nf-queue -q 3 -t $timeout &
+ local nfqpid=$!
+
+ sleep 1
+ rm -f "$tmpfile"
+
+ wait $rpid
+ [ $? -eq 0 ] && echo "PASS: tcp via loopback with connect/close"
+ wait 2>/dev/null
+}
+
test_tcp_localhost_requeue()
{
ip netns exec ${nsrouter} nft -f /dev/stdin <<EOF
@@ -424,6 +442,7 @@ test_queue 20
test_tcp_forward
test_tcp_localhost
+test_tcp_localhost_connectclose
test_tcp_localhost_requeue
test_icmp_vrf
diff --git a/tools/testing/selftests/netfilter/nft_synproxy.sh b/tools/testing/selftests/netfilter/nft_synproxy.sh
new file mode 100755
index 000000000000..b62933b680d6
--- /dev/null
+++ b/tools/testing/selftests/netfilter/nft_synproxy.sh
@@ -0,0 +1,117 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+rnd=$(mktemp -u XXXXXXXX)
+nsr="nsr-$rnd" # synproxy machine
+ns1="ns1-$rnd" # iperf client
+ns2="ns2-$rnd" # iperf server
+
+checktool (){
+ if ! $1 > /dev/null 2>&1; then
+ echo "SKIP: Could not $2"
+ exit $ksft_skip
+ fi
+}
+
+checktool "nft --version" "run test without nft tool"
+checktool "ip -Version" "run test without ip tool"
+checktool "iperf3 --version" "run test without iperf3"
+checktool "ip netns add $nsr" "create net namespace"
+
+modprobe -q nf_conntrack
+
+ip netns add $ns1
+ip netns add $ns2
+
+cleanup() {
+ ip netns pids $ns1 | xargs kill 2>/dev/null
+ ip netns pids $ns2 | xargs kill 2>/dev/null
+ ip netns del $ns1
+ ip netns del $ns2
+
+ ip netns del $nsr
+}
+
+trap cleanup EXIT
+
+ip link add veth0 netns $nsr type veth peer name eth0 netns $ns1
+ip link add veth1 netns $nsr type veth peer name eth0 netns $ns2
+
+for dev in lo veth0 veth1; do
+ip -net $nsr link set $dev up
+done
+
+ip -net $nsr addr add 10.0.1.1/24 dev veth0
+ip -net $nsr addr add 10.0.2.1/24 dev veth1
+
+ip netns exec $nsr sysctl -q net.ipv4.conf.veth0.forwarding=1
+ip netns exec $nsr sysctl -q net.ipv4.conf.veth1.forwarding=1
+ip netns exec $nsr sysctl -q net.netfilter.nf_conntrack_tcp_loose=0
+
+for n in $ns1 $ns2; do
+ ip -net $n link set lo up
+ ip -net $n link set eth0 up
+done
+ip -net $ns1 addr add 10.0.1.99/24 dev eth0
+ip -net $ns2 addr add 10.0.2.99/24 dev eth0
+ip -net $ns1 route add default via 10.0.1.1
+ip -net $ns2 route add default via 10.0.2.1
+
+# test basic connectivity
+if ! ip netns exec $ns1 ping -c 1 -q 10.0.2.99 > /dev/null; then
+ echo "ERROR: $ns1 cannot reach $ns2" 1>&2
+ exit 1
+fi
+
+if ! ip netns exec $ns2 ping -c 1 -q 10.0.1.99 > /dev/null; then
+ echo "ERROR: $ns2 cannot reach $ns1" 1>&2
+ exit 1
+fi
+
+ip netns exec $ns2 iperf3 -s > /dev/null 2>&1 &
+# ip netns exec $nsr tcpdump -vvv -n -i veth1 tcp | head -n 10 &
+
+sleep 1
+
+ip netns exec $nsr nft -f - <<EOF
+table inet filter {
+ chain prerouting {
+ type filter hook prerouting priority -300; policy accept;
+ meta iif veth0 tcp flags syn counter notrack
+ }
+
+ chain forward {
+ type filter hook forward priority 0; policy accept;
+
+ ct state new,established counter accept
+
+ meta iif veth0 meta l4proto tcp ct state untracked,invalid synproxy mss 1460 sack-perm timestamp
+
+ ct state invalid counter drop
+
+ # make ns2 unreachable w.o. tcp synproxy
+ tcp flags syn counter drop
+ }
+}
+EOF
+if [ $? -ne 0 ]; then
+ echo "SKIP: Cannot add nft synproxy"
+ exit $ksft_skip
+fi
+
+ip netns exec $ns1 timeout 5 iperf3 -c 10.0.2.99 -n $((1 * 1024 * 1024)) > /dev/null
+
+if [ $? -ne 0 ]; then
+ echo "FAIL: iperf3 returned an error" 1>&2
+ ret=$?
+ ip netns exec $nsr nft list ruleset
+else
+ echo "PASS: synproxy connection successful"
+fi
+
+exit $ret
diff --git a/tools/testing/selftests/netfilter/nft_zones_many.sh b/tools/testing/selftests/netfilter/nft_zones_many.sh
index 04633119b29a..5a8db0b48928 100755
--- a/tools/testing/selftests/netfilter/nft_zones_many.sh
+++ b/tools/testing/selftests/netfilter/nft_zones_many.sh
@@ -9,7 +9,7 @@ ns="ns-$sfx"
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
-zones=20000
+zones=2000
have_ct_tool=0
ret=0
@@ -75,10 +75,10 @@ EOF
while [ $i -lt $max_zones ]; do
local start=$(date +%s%3N)
- i=$((i + 10000))
+ i=$((i + 1000))
j=$((j + 1))
# nft rule in output places each packet in a different zone.
- dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" socat STDIN UDP:127.0.0.1:12345,sourceport=12345
+ dd if=/dev/zero of=/dev/stdout bs=8k count=1000 2>/dev/null | ip netns exec "$ns" socat STDIN UDP:127.0.0.1:12345,sourceport=12345
if [ $? -ne 0 ] ;then
ret=1
break
@@ -86,7 +86,7 @@ EOF
stop=$(date +%s%3N)
local duration=$((stop-start))
- echo "PASS: added 10000 entries in $duration ms (now $i total, loop $j)"
+ echo "PASS: added 1000 entries in $duration ms (now $i total, loop $j)"
done
if [ $have_ct_tool -eq 1 ]; then
@@ -128,11 +128,11 @@ test_conntrack_tool() {
break
fi
- if [ $((i%10000)) -eq 0 ];then
+ if [ $((i%1000)) -eq 0 ];then
stop=$(date +%s%3N)
local duration=$((stop-start))
- echo "PASS: added 10000 entries in $duration ms (now $i total)"
+ echo "PASS: added 1000 entries in $duration ms (now $i total)"
start=$stop
fi
done
diff --git a/tools/testing/selftests/openat2/Makefile b/tools/testing/selftests/openat2/Makefile
index 4b93b1417b86..843ba56d8e49 100644
--- a/tools/testing/selftests/openat2/Makefile
+++ b/tools/testing/selftests/openat2/Makefile
@@ -5,4 +5,4 @@ TEST_GEN_PROGS := openat2_test resolve_test rename_attack_test
include ../lib.mk
-$(TEST_GEN_PROGS): helpers.c
+$(TEST_GEN_PROGS): helpers.c helpers.h
diff --git a/tools/testing/selftests/openat2/helpers.h b/tools/testing/selftests/openat2/helpers.h
index a6ea27344db2..7056340b9339 100644
--- a/tools/testing/selftests/openat2/helpers.h
+++ b/tools/testing/selftests/openat2/helpers.h
@@ -9,6 +9,7 @@
#define _GNU_SOURCE
#include <stdint.h>
+#include <stdbool.h>
#include <errno.h>
#include <linux/types.h>
#include "../kselftest.h"
@@ -62,11 +63,12 @@ bool needs_openat2(const struct open_how *how);
(similar to chroot(2)). */
#endif /* RESOLVE_IN_ROOT */
-#define E_func(func, ...) \
- do { \
- if (func(__VA_ARGS__) < 0) \
- ksft_exit_fail_msg("%s:%d %s failed\n", \
- __FILE__, __LINE__, #func);\
+#define E_func(func, ...) \
+ do { \
+ errno = 0; \
+ if (func(__VA_ARGS__) < 0) \
+ ksft_exit_fail_msg("%s:%d %s failed - errno:%d\n", \
+ __FILE__, __LINE__, #func, errno); \
} while (0)
#define E_asprintf(...) E_func(asprintf, __VA_ARGS__)
diff --git a/tools/testing/selftests/openat2/openat2_test.c b/tools/testing/selftests/openat2/openat2_test.c
index 1bddbe934204..7fb902099de4 100644
--- a/tools/testing/selftests/openat2/openat2_test.c
+++ b/tools/testing/selftests/openat2/openat2_test.c
@@ -259,6 +259,16 @@ void test_openat2_flags(void)
unlink(path);
fd = sys_openat2(AT_FDCWD, path, &test->how);
+ if (fd < 0 && fd == -EOPNOTSUPP) {
+ /*
+ * Skip the testcase if it failed because not supported
+ * by FS. (e.g. a valid O_TMPFILE combination on NFS)
+ */
+ ksft_test_result_skip("openat2 with %s fails with %d (%s)\n",
+ test->name, fd, strerror(-fd));
+ goto next;
+ }
+
if (test->err >= 0)
failed = (fd < 0);
else
@@ -303,7 +313,7 @@ skip:
else
resultfn("openat2 with %s fails with %d (%s)\n",
test->name, test->err, strerror(-test->err));
-
+next:
free(fdpath);
fflush(stdout);
}
diff --git a/tools/testing/selftests/perf_events/sigtrap_threads.c b/tools/testing/selftests/perf_events/sigtrap_threads.c
index 8e83cf91513a..6d849dc2bee0 100644
--- a/tools/testing/selftests/perf_events/sigtrap_threads.c
+++ b/tools/testing/selftests/perf_events/sigtrap_threads.c
@@ -44,9 +44,10 @@ static struct {
} ctx;
/* Unique value to check si_perf_data is correctly set from perf_event_attr::sig_data. */
-#define TEST_SIG_DATA(addr) (~(unsigned long)(addr))
+#define TEST_SIG_DATA(addr, id) (~(unsigned long)(addr) + id)
-static struct perf_event_attr make_event_attr(bool enabled, volatile void *addr)
+static struct perf_event_attr make_event_attr(bool enabled, volatile void *addr,
+ unsigned long id)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_BREAKPOINT,
@@ -60,7 +61,7 @@ static struct perf_event_attr make_event_attr(bool enabled, volatile void *addr)
.inherit_thread = 1, /* ... but only cloned with CLONE_THREAD. */
.remove_on_exec = 1, /* Required by sigtrap. */
.sigtrap = 1, /* Request synchronous SIGTRAP on event. */
- .sig_data = TEST_SIG_DATA(addr),
+ .sig_data = TEST_SIG_DATA(addr, id),
};
return attr;
}
@@ -110,7 +111,7 @@ FIXTURE(sigtrap_threads)
FIXTURE_SETUP(sigtrap_threads)
{
- struct perf_event_attr attr = make_event_attr(false, &ctx.iterate_on);
+ struct perf_event_attr attr = make_event_attr(false, &ctx.iterate_on, 0);
struct sigaction action = {};
int i;
@@ -165,7 +166,7 @@ TEST_F(sigtrap_threads, enable_event)
EXPECT_EQ(ctx.tids_want_signal, 0);
EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT);
- EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on));
+ EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on, 0));
/* Check enabled for parent. */
ctx.iterate_on = 0;
@@ -175,7 +176,7 @@ TEST_F(sigtrap_threads, enable_event)
/* Test that modification propagates to all inherited events. */
TEST_F(sigtrap_threads, modify_and_enable_event)
{
- struct perf_event_attr new_attr = make_event_attr(true, &ctx.iterate_on);
+ struct perf_event_attr new_attr = make_event_attr(true, &ctx.iterate_on, 42);
EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_MODIFY_ATTRIBUTES, &new_attr), 0);
run_test_threads(_metadata, self);
@@ -184,7 +185,7 @@ TEST_F(sigtrap_threads, modify_and_enable_event)
EXPECT_EQ(ctx.tids_want_signal, 0);
EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT);
- EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on));
+ EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on, 42));
/* Check enabled for parent. */
ctx.iterate_on = 0;
@@ -204,7 +205,7 @@ TEST_F(sigtrap_threads, signal_stress)
EXPECT_EQ(ctx.tids_want_signal, 0);
EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT);
- EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on));
+ EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on, 0));
}
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/pidfd/pidfd.h b/tools/testing/selftests/pidfd/pidfd.h
index 01f8d3c0cf2c..6922d6417e1c 100644
--- a/tools/testing/selftests/pidfd/pidfd.h
+++ b/tools/testing/selftests/pidfd/pidfd.h
@@ -68,7 +68,7 @@
#define PIDFD_SKIP 3
#define PIDFD_XFAIL 4
-int wait_for_pid(pid_t pid)
+static inline int wait_for_pid(pid_t pid)
{
int status, ret;
@@ -78,13 +78,20 @@ again:
if (errno == EINTR)
goto again;
+ ksft_print_msg("waitpid returned -1, errno=%d\n", errno);
return -1;
}
- if (!WIFEXITED(status))
+ if (!WIFEXITED(status)) {
+ ksft_print_msg(
+ "waitpid !WIFEXITED, WIFSIGNALED=%d, WTERMSIG=%d\n",
+ WIFSIGNALED(status), WTERMSIG(status));
return -1;
+ }
- return WEXITSTATUS(status);
+ ret = WEXITSTATUS(status);
+ ksft_print_msg("waitpid WEXITSTATUS=%d\n", ret);
+ return ret;
}
static inline int sys_pidfd_open(pid_t pid, unsigned int flags)
diff --git a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
index 22558524f71c..3fd8e903118f 100644
--- a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
@@ -12,6 +12,7 @@
#include <string.h>
#include <syscall.h>
#include <sys/wait.h>
+#include <sys/mman.h>
#include "pidfd.h"
#include "../kselftest.h"
@@ -80,7 +81,10 @@ static inline int error_check(struct error *err, const char *test_name)
return err->code;
}
+#define CHILD_STACK_SIZE 8192
+
struct child {
+ char *stack;
pid_t pid;
int fd;
};
@@ -89,17 +93,22 @@ static struct child clone_newns(int (*fn)(void *), void *args,
struct error *err)
{
static int flags = CLONE_PIDFD | CLONE_NEWPID | CLONE_NEWNS | SIGCHLD;
- size_t stack_size = 1024;
- char *stack[1024] = { 0 };
struct child ret;
if (!(flags & CLONE_NEWUSER) && geteuid() != 0)
flags |= CLONE_NEWUSER;
+ ret.stack = mmap(NULL, CHILD_STACK_SIZE, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
+ if (ret.stack == MAP_FAILED) {
+ error_set(err, -1, "mmap of stack failed (errno %d)", errno);
+ return ret;
+ }
+
#ifdef __ia64__
- ret.pid = __clone2(fn, stack, stack_size, flags, args, &ret.fd);
+ ret.pid = __clone2(fn, ret.stack, CHILD_STACK_SIZE, flags, args, &ret.fd);
#else
- ret.pid = clone(fn, stack + stack_size, flags, args, &ret.fd);
+ ret.pid = clone(fn, ret.stack + CHILD_STACK_SIZE, flags, args, &ret.fd);
#endif
if (ret.pid < 0) {
@@ -129,6 +138,11 @@ static inline int child_join(struct child *child, struct error *err)
else if (r > 0)
error_set(err, r, "child %d reported: %d", child->pid, r);
+ if (munmap(child->stack, CHILD_STACK_SIZE)) {
+ error_set(err, -1, "munmap of child stack failed (errno %d)", errno);
+ r = -1;
+ }
+
return r;
}
diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c
index 529eb700ac26..9a2d64901d59 100644
--- a/tools/testing/selftests/pidfd/pidfd_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_test.c
@@ -441,7 +441,6 @@ static void test_pidfd_poll_exec(int use_waitpid)
{
int pid, pidfd = 0;
int status, ret;
- pthread_t t1;
time_t prog_start = time(NULL);
const char *test_name = "pidfd_poll check for premature notification on child thread exec";
@@ -500,13 +499,14 @@ static int child_poll_leader_exit_test(void *args)
*/
*child_exit_secs = time(NULL);
syscall(SYS_exit, 0);
+ /* Never reached, but appeases compiler thinking we should return. */
+ exit(0);
}
static void test_pidfd_poll_leader_exit(int use_waitpid)
{
int pid, pidfd = 0;
- int status, ret;
- time_t prog_start = time(NULL);
+ int status, ret = 0;
const char *test_name = "pidfd_poll check for premature notification on non-empty"
"group leader exit";
diff --git a/tools/testing/selftests/pidfd/pidfd_wait.c b/tools/testing/selftests/pidfd/pidfd_wait.c
index be2943f072f6..17999e082aa7 100644
--- a/tools/testing/selftests/pidfd/pidfd_wait.c
+++ b/tools/testing/selftests/pidfd/pidfd_wait.c
@@ -39,7 +39,7 @@ static int sys_waitid(int which, pid_t pid, siginfo_t *info, int options,
TEST(wait_simple)
{
- int pidfd = -1, status = 0;
+ int pidfd = -1;
pid_t parent_tid = -1;
struct clone_args args = {
.parent_tid = ptr_to_u64(&parent_tid),
@@ -47,7 +47,6 @@ TEST(wait_simple)
.flags = CLONE_PIDFD | CLONE_PARENT_SETTID,
.exit_signal = SIGCHLD,
};
- int ret;
pid_t pid;
siginfo_t info = {
.si_signo = 0,
@@ -88,7 +87,7 @@ TEST(wait_simple)
TEST(wait_states)
{
- int pidfd = -1, status = 0;
+ int pidfd = -1;
pid_t parent_tid = -1;
struct clone_args args = {
.parent_tid = ptr_to_u64(&parent_tid),
diff --git a/tools/testing/selftests/rtc/settings b/tools/testing/selftests/rtc/settings
index ba4d85f74cd6..a953c96aa16e 100644
--- a/tools/testing/selftests/rtc/settings
+++ b/tools/testing/selftests/rtc/settings
@@ -1 +1 @@
-timeout=90
+timeout=180
diff --git a/tools/testing/selftests/seccomp/Makefile b/tools/testing/selftests/seccomp/Makefile
index 0ebfe8b0e147..585f7a0c10cb 100644
--- a/tools/testing/selftests/seccomp/Makefile
+++ b/tools/testing/selftests/seccomp/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-CFLAGS += -Wl,-no-as-needed -Wall
+CFLAGS += -Wl,-no-as-needed -Wall -isystem ../../../../usr/include/
LDFLAGS += -lpthread
TEST_GEN_PROGS := seccomp_bpf seccomp_benchmark
diff --git a/tools/testing/selftests/vDSO/vdso_test_abi.c b/tools/testing/selftests/vDSO/vdso_test_abi.c
index 3d603f1394af..883ca85424bc 100644
--- a/tools/testing/selftests/vDSO/vdso_test_abi.c
+++ b/tools/testing/selftests/vDSO/vdso_test_abi.c
@@ -33,110 +33,114 @@ typedef long (*vdso_clock_gettime_t)(clockid_t clk_id, struct timespec *ts);
typedef long (*vdso_clock_getres_t)(clockid_t clk_id, struct timespec *ts);
typedef time_t (*vdso_time_t)(time_t *t);
-static int vdso_test_gettimeofday(void)
+#define VDSO_TEST_PASS_MSG() "\n%s(): PASS\n", __func__
+#define VDSO_TEST_FAIL_MSG(x) "\n%s(): %s FAIL\n", __func__, x
+#define VDSO_TEST_SKIP_MSG(x) "\n%s(): SKIP: Could not find %s\n", __func__, x
+
+static void vdso_test_gettimeofday(void)
{
/* Find gettimeofday. */
vdso_gettimeofday_t vdso_gettimeofday =
(vdso_gettimeofday_t)vdso_sym(version, name[0]);
if (!vdso_gettimeofday) {
- printf("Could not find %s\n", name[0]);
- return KSFT_SKIP;
+ ksft_test_result_skip(VDSO_TEST_SKIP_MSG(name[0]));
+ return;
}
struct timeval tv;
long ret = vdso_gettimeofday(&tv, 0);
if (ret == 0) {
- printf("The time is %lld.%06lld\n",
- (long long)tv.tv_sec, (long long)tv.tv_usec);
+ ksft_print_msg("The time is %lld.%06lld\n",
+ (long long)tv.tv_sec, (long long)tv.tv_usec);
+ ksft_test_result_pass(VDSO_TEST_PASS_MSG());
} else {
- printf("%s failed\n", name[0]);
- return KSFT_FAIL;
+ ksft_test_result_fail(VDSO_TEST_FAIL_MSG(name[0]));
}
-
- return KSFT_PASS;
}
-static int vdso_test_clock_gettime(clockid_t clk_id)
+static void vdso_test_clock_gettime(clockid_t clk_id)
{
/* Find clock_gettime. */
vdso_clock_gettime_t vdso_clock_gettime =
(vdso_clock_gettime_t)vdso_sym(version, name[1]);
if (!vdso_clock_gettime) {
- printf("Could not find %s\n", name[1]);
- return KSFT_SKIP;
+ ksft_test_result_skip(VDSO_TEST_SKIP_MSG(name[1]));
+ return;
}
struct timespec ts;
long ret = vdso_clock_gettime(clk_id, &ts);
if (ret == 0) {
- printf("The time is %lld.%06lld\n",
- (long long)ts.tv_sec, (long long)ts.tv_nsec);
+ ksft_print_msg("The time is %lld.%06lld\n",
+ (long long)ts.tv_sec, (long long)ts.tv_nsec);
+ ksft_test_result_pass(VDSO_TEST_PASS_MSG());
} else {
- printf("%s failed\n", name[1]);
- return KSFT_FAIL;
+ ksft_test_result_fail(VDSO_TEST_FAIL_MSG(name[1]));
}
-
- return KSFT_PASS;
}
-static int vdso_test_time(void)
+static void vdso_test_time(void)
{
/* Find time. */
vdso_time_t vdso_time =
(vdso_time_t)vdso_sym(version, name[2]);
if (!vdso_time) {
- printf("Could not find %s\n", name[2]);
- return KSFT_SKIP;
+ ksft_test_result_skip(VDSO_TEST_SKIP_MSG(name[2]));
+ return;
}
long ret = vdso_time(NULL);
if (ret > 0) {
- printf("The time in hours since January 1, 1970 is %lld\n",
+ ksft_print_msg("The time in hours since January 1, 1970 is %lld\n",
(long long)(ret / 3600));
+ ksft_test_result_pass(VDSO_TEST_PASS_MSG());
} else {
- printf("%s failed\n", name[2]);
- return KSFT_FAIL;
+ ksft_test_result_fail(VDSO_TEST_FAIL_MSG(name[2]));
}
-
- return KSFT_PASS;
}
-static int vdso_test_clock_getres(clockid_t clk_id)
+static void vdso_test_clock_getres(clockid_t clk_id)
{
+ int clock_getres_fail = 0;
+
/* Find clock_getres. */
vdso_clock_getres_t vdso_clock_getres =
(vdso_clock_getres_t)vdso_sym(version, name[3]);
if (!vdso_clock_getres) {
- printf("Could not find %s\n", name[3]);
- return KSFT_SKIP;
+ ksft_test_result_skip(VDSO_TEST_SKIP_MSG(name[3]));
+ return;
}
struct timespec ts, sys_ts;
long ret = vdso_clock_getres(clk_id, &ts);
if (ret == 0) {
- printf("The resolution is %lld %lld\n",
- (long long)ts.tv_sec, (long long)ts.tv_nsec);
+ ksft_print_msg("The vdso resolution is %lld %lld\n",
+ (long long)ts.tv_sec, (long long)ts.tv_nsec);
} else {
- printf("%s failed\n", name[3]);
- return KSFT_FAIL;
+ clock_getres_fail++;
}
ret = syscall(SYS_clock_getres, clk_id, &sys_ts);
- if ((sys_ts.tv_sec != ts.tv_sec) || (sys_ts.tv_nsec != ts.tv_nsec)) {
- printf("%s failed\n", name[3]);
- return KSFT_FAIL;
- }
+ ksft_print_msg("The syscall resolution is %lld %lld\n",
+ (long long)sys_ts.tv_sec, (long long)sys_ts.tv_nsec);
- return KSFT_PASS;
+ if ((sys_ts.tv_sec != ts.tv_sec) || (sys_ts.tv_nsec != ts.tv_nsec))
+ clock_getres_fail++;
+
+ if (clock_getres_fail > 0) {
+ ksft_test_result_fail(VDSO_TEST_FAIL_MSG(name[3]));
+ } else {
+ ksft_test_result_pass(VDSO_TEST_PASS_MSG());
+ }
}
const char *vdso_clock_name[12] = {
@@ -158,36 +162,23 @@ const char *vdso_clock_name[12] = {
* This function calls vdso_test_clock_gettime and vdso_test_clock_getres
* with different values for clock_id.
*/
-static inline int vdso_test_clock(clockid_t clock_id)
+static inline void vdso_test_clock(clockid_t clock_id)
{
- int ret0, ret1;
-
- ret0 = vdso_test_clock_gettime(clock_id);
- /* A skipped test is considered passed */
- if (ret0 == KSFT_SKIP)
- ret0 = KSFT_PASS;
-
- ret1 = vdso_test_clock_getres(clock_id);
- /* A skipped test is considered passed */
- if (ret1 == KSFT_SKIP)
- ret1 = KSFT_PASS;
+ ksft_print_msg("\nclock_id: %s\n", vdso_clock_name[clock_id]);
- ret0 += ret1;
+ vdso_test_clock_gettime(clock_id);
- printf("clock_id: %s", vdso_clock_name[clock_id]);
-
- if (ret0 > 0)
- printf(" [FAIL]\n");
- else
- printf(" [PASS]\n");
-
- return ret0;
+ vdso_test_clock_getres(clock_id);
}
+#define VDSO_TEST_PLAN 16
+
int main(int argc, char **argv)
{
unsigned long sysinfo_ehdr = getauxval(AT_SYSINFO_EHDR);
- int ret;
+
+ ksft_print_header();
+ ksft_set_plan(VDSO_TEST_PLAN);
if (!sysinfo_ehdr) {
printf("AT_SYSINFO_EHDR is not present!\n");
@@ -201,44 +192,42 @@ int main(int argc, char **argv)
vdso_init_from_sysinfo_ehdr(getauxval(AT_SYSINFO_EHDR));
- ret = vdso_test_gettimeofday();
+ vdso_test_gettimeofday();
#if _POSIX_TIMERS > 0
#ifdef CLOCK_REALTIME
- ret += vdso_test_clock(CLOCK_REALTIME);
+ vdso_test_clock(CLOCK_REALTIME);
#endif
#ifdef CLOCK_BOOTTIME
- ret += vdso_test_clock(CLOCK_BOOTTIME);
+ vdso_test_clock(CLOCK_BOOTTIME);
#endif
#ifdef CLOCK_TAI
- ret += vdso_test_clock(CLOCK_TAI);
+ vdso_test_clock(CLOCK_TAI);
#endif
#ifdef CLOCK_REALTIME_COARSE
- ret += vdso_test_clock(CLOCK_REALTIME_COARSE);
+ vdso_test_clock(CLOCK_REALTIME_COARSE);
#endif
#ifdef CLOCK_MONOTONIC
- ret += vdso_test_clock(CLOCK_MONOTONIC);
+ vdso_test_clock(CLOCK_MONOTONIC);
#endif
#ifdef CLOCK_MONOTONIC_RAW
- ret += vdso_test_clock(CLOCK_MONOTONIC_RAW);
+ vdso_test_clock(CLOCK_MONOTONIC_RAW);
#endif
#ifdef CLOCK_MONOTONIC_COARSE
- ret += vdso_test_clock(CLOCK_MONOTONIC_COARSE);
+ vdso_test_clock(CLOCK_MONOTONIC_COARSE);
#endif
#endif
- ret += vdso_test_time();
-
- if (ret > 0)
- return KSFT_FAIL;
+ vdso_test_time();
- return KSFT_PASS;
+ ksft_print_cnts();
+ return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL;
}
diff --git a/tools/testing/selftests/vm/hugepage-mremap.c b/tools/testing/selftests/vm/hugepage-mremap.c
index 2a7c33631a29..1d689084a54b 100644
--- a/tools/testing/selftests/vm/hugepage-mremap.c
+++ b/tools/testing/selftests/vm/hugepage-mremap.c
@@ -3,9 +3,10 @@
* hugepage-mremap:
*
* Example of remapping huge page memory in a user application using the
- * mremap system call. Code assumes a hugetlbfs filesystem is mounted
- * at './huge'. The amount of memory used by this test is decided by a command
- * line argument in MBs. If missing, the default amount is 10MB.
+ * mremap system call. The path to a file in a hugetlbfs filesystem must
+ * be passed as the last argument to this test. The amount of memory used
+ * by this test in MBs can optionally be passed as an argument. If no memory
+ * amount is passed, the default amount is 10MB.
*
* To make sure the test triggers pmd sharing and goes through the 'unshare'
* path in the mremap code use 1GB (1024) or more.
@@ -25,7 +26,6 @@
#define DEFAULT_LENGTH_MB 10UL
#define MB_TO_BYTES(x) (x * 1024 * 1024)
-#define FILE_NAME "huge/hugepagefile"
#define PROTECTION (PROT_READ | PROT_WRITE | PROT_EXEC)
#define FLAGS (MAP_SHARED | MAP_ANONYMOUS)
@@ -107,17 +107,26 @@ static void register_region_with_uffd(char *addr, size_t len)
int main(int argc, char *argv[])
{
+ size_t length;
+
+ if (argc != 2 && argc != 3) {
+ printf("Usage: %s [length_in_MB] <hugetlb_file>\n", argv[0]);
+ exit(1);
+ }
+
/* Read memory length as the first arg if valid, otherwise fallback to
- * the default length. Any additional args are ignored.
+ * the default length.
*/
- size_t length = argc > 1 ? (size_t)atoi(argv[1]) : 0UL;
+ if (argc == 3)
+ length = argc > 2 ? (size_t)atoi(argv[1]) : 0UL;
length = length > 0 ? length : DEFAULT_LENGTH_MB;
length = MB_TO_BYTES(length);
int ret = 0;
- int fd = open(FILE_NAME, O_CREAT | O_RDWR, 0755);
+ /* last arg is the hugetlb file name */
+ int fd = open(argv[argc-1], O_CREAT | O_RDWR, 0755);
if (fd < 0) {
perror("Open failed");
@@ -169,5 +178,8 @@ int main(int argc, char *argv[])
munmap(addr, length);
+ close(fd);
+ unlink(argv[argc-1]);
+
return ret;
}
diff --git a/tools/testing/selftests/vm/map_fixed_noreplace.c b/tools/testing/selftests/vm/map_fixed_noreplace.c
index d91bde511268..eed44322d1a6 100644
--- a/tools/testing/selftests/vm/map_fixed_noreplace.c
+++ b/tools/testing/selftests/vm/map_fixed_noreplace.c
@@ -17,9 +17,6 @@
#define MAP_FIXED_NOREPLACE 0x100000
#endif
-#define BASE_ADDRESS (256ul * 1024 * 1024)
-
-
static void dump_maps(void)
{
char cmd[32];
@@ -28,18 +25,46 @@ static void dump_maps(void)
system(cmd);
}
+static unsigned long find_base_addr(unsigned long size)
+{
+ void *addr;
+ unsigned long flags;
+
+ flags = MAP_PRIVATE | MAP_ANONYMOUS;
+ addr = mmap(NULL, size, PROT_NONE, flags, -1, 0);
+ if (addr == MAP_FAILED) {
+ printf("Error: couldn't map the space we need for the test\n");
+ return 0;
+ }
+
+ if (munmap(addr, size) != 0) {
+ printf("Error: couldn't map the space we need for the test\n");
+ return 0;
+ }
+ return (unsigned long)addr;
+}
+
int main(void)
{
+ unsigned long base_addr;
unsigned long flags, addr, size, page_size;
char *p;
page_size = sysconf(_SC_PAGE_SIZE);
+ //let's find a base addr that is free before we start the tests
+ size = 5 * page_size;
+ base_addr = find_base_addr(size);
+ if (!base_addr) {
+ printf("Error: couldn't map the space we need for the test\n");
+ return 1;
+ }
+
flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE;
// Check we can map all the areas we need below
errno = 0;
- addr = BASE_ADDRESS;
+ addr = base_addr;
size = 5 * page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
@@ -60,7 +85,7 @@ int main(void)
printf("unmap() successful\n");
errno = 0;
- addr = BASE_ADDRESS + page_size;
+ addr = base_addr + page_size;
size = 3 * page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
@@ -80,7 +105,7 @@ int main(void)
* +4 | free | new
*/
errno = 0;
- addr = BASE_ADDRESS;
+ addr = base_addr;
size = 5 * page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
@@ -101,7 +126,7 @@ int main(void)
* +4 | free |
*/
errno = 0;
- addr = BASE_ADDRESS + (2 * page_size);
+ addr = base_addr + (2 * page_size);
size = page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
@@ -121,7 +146,7 @@ int main(void)
* +4 | free | new
*/
errno = 0;
- addr = BASE_ADDRESS + (3 * page_size);
+ addr = base_addr + (3 * page_size);
size = 2 * page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
@@ -141,7 +166,7 @@ int main(void)
* +4 | free |
*/
errno = 0;
- addr = BASE_ADDRESS;
+ addr = base_addr;
size = 2 * page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
@@ -161,7 +186,7 @@ int main(void)
* +4 | free |
*/
errno = 0;
- addr = BASE_ADDRESS;
+ addr = base_addr;
size = page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
@@ -181,7 +206,7 @@ int main(void)
* +4 | free | new
*/
errno = 0;
- addr = BASE_ADDRESS + (4 * page_size);
+ addr = base_addr + (4 * page_size);
size = page_size;
p = mmap((void *)addr, size, PROT_NONE, flags, -1, 0);
printf("mmap() @ 0x%lx-0x%lx p=%p result=%m\n", addr, addr + size, p);
@@ -192,7 +217,7 @@ int main(void)
return 1;
}
- addr = BASE_ADDRESS;
+ addr = base_addr;
size = 5 * page_size;
if (munmap((void *)addr, size) != 0) {
dump_maps();
diff --git a/tools/testing/selftests/vm/run_vmtests.sh b/tools/testing/selftests/vm/run_vmtests.sh
index 75d401741394..71d2dc198fc1 100755
--- a/tools/testing/selftests/vm/run_vmtests.sh
+++ b/tools/testing/selftests/vm/run_vmtests.sh
@@ -111,13 +111,14 @@ fi
echo "-----------------------"
echo "running hugepage-mremap"
echo "-----------------------"
-./hugepage-mremap 256
+./hugepage-mremap $mnt/huge_mremap
if [ $? -ne 0 ]; then
echo "[FAIL]"
exitcode=1
else
echo "[PASS]"
fi
+rm -f $mnt/huge_mremap
echo "NOTE: The above hugetlb tests provide minimal coverage. Use"
echo " https://github.com/libhugetlbfs/libhugetlbfs.git for"
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index d3fd24f9fae8..3fc1d2ee2948 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -46,6 +46,7 @@
#include <signal.h>
#include <poll.h>
#include <string.h>
+#include <linux/mman.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/ioctl.h>
@@ -1417,6 +1418,7 @@ static void userfaultfd_pagemap_test(unsigned int test_pgsize)
static int userfaultfd_stress(void)
{
void *area;
+ char *tmp_area;
unsigned long nr;
struct uffdio_register uffdio_register;
struct uffd_stats uffd_stats[nr_cpus];
@@ -1527,9 +1529,13 @@ static int userfaultfd_stress(void)
count_verify[nr], nr);
/* prepare next bounce */
- swap(area_src, area_dst);
+ tmp_area = area_src;
+ area_src = area_dst;
+ area_dst = tmp_area;
- swap(area_src_alias, area_dst_alias);
+ tmp_area = area_src_alias;
+ area_src_alias = area_dst_alias;
+ area_dst_alias = tmp_area;
uffd_stats_report(uffd_stats, nr_cpus);
}
diff --git a/tools/testing/selftests/zram/zram.sh b/tools/testing/selftests/zram/zram.sh
index 232e958ec454..b0b91d9b0dc2 100755
--- a/tools/testing/selftests/zram/zram.sh
+++ b/tools/testing/selftests/zram/zram.sh
@@ -2,9 +2,6 @@
# SPDX-License-Identifier: GPL-2.0
TCID="zram.sh"
-# Kselftest framework requirement - SKIP code is 4.
-ksft_skip=4
-
. ./zram_lib.sh
run_zram () {
@@ -18,14 +15,4 @@ echo ""
check_prereqs
-# check zram module exists
-MODULE_PATH=/lib/modules/`uname -r`/kernel/drivers/block/zram/zram.ko
-if [ -f $MODULE_PATH ]; then
- run_zram
-elif [ -b /dev/zram0 ]; then
- run_zram
-else
- echo "$TCID : No zram.ko module or /dev/zram0 device file not found"
- echo "$TCID : CONFIG_ZRAM is not set"
- exit $ksft_skip
-fi
+run_zram
diff --git a/tools/testing/selftests/zram/zram01.sh b/tools/testing/selftests/zram/zram01.sh
index 114863d9fb87..8f4affe34f3e 100755
--- a/tools/testing/selftests/zram/zram01.sh
+++ b/tools/testing/selftests/zram/zram01.sh
@@ -33,9 +33,7 @@ zram_algs="lzo"
zram_fill_fs()
{
- local mem_free0=$(free -m | awk 'NR==2 {print $4}')
-
- for i in $(seq 0 $(($dev_num - 1))); do
+ for i in $(seq $dev_start $dev_end); do
echo "fill zram$i..."
local b=0
while [ true ]; do
@@ -45,29 +43,17 @@ zram_fill_fs()
b=$(($b + 1))
done
echo "zram$i can be filled with '$b' KB"
- done
- local mem_free1=$(free -m | awk 'NR==2 {print $4}')
- local used_mem=$(($mem_free0 - $mem_free1))
+ local mem_used_total=`awk '{print $3}' "/sys/block/zram$i/mm_stat"`
+ local v=$((100 * 1024 * $b / $mem_used_total))
+ if [ "$v" -lt 100 ]; then
+ echo "FAIL compression ratio: 0.$v:1"
+ ERR_CODE=-1
+ return
+ fi
- local total_size=0
- for sm in $zram_sizes; do
- local s=$(echo $sm | sed 's/M//')
- total_size=$(($total_size + $s))
+ echo "zram compression ratio: $(echo "scale=2; $v / 100 " | bc):1: OK"
done
-
- echo "zram used ${used_mem}M, zram disk sizes ${total_size}M"
-
- local v=$((100 * $total_size / $used_mem))
-
- if [ "$v" -lt 100 ]; then
- echo "FAIL compression ratio: 0.$v:1"
- ERR_CODE=-1
- zram_cleanup
- return
- fi
-
- echo "zram compression ratio: $(echo "scale=2; $v / 100 " | bc):1: OK"
}
check_prereqs
@@ -81,7 +67,6 @@ zram_mount
zram_fill_fs
zram_cleanup
-zram_unload
if [ $ERR_CODE -ne 0 ]; then
echo "$TCID : [FAIL]"
diff --git a/tools/testing/selftests/zram/zram02.sh b/tools/testing/selftests/zram/zram02.sh
index e83b404807c0..2418b0c4ed13 100755
--- a/tools/testing/selftests/zram/zram02.sh
+++ b/tools/testing/selftests/zram/zram02.sh
@@ -36,7 +36,6 @@ zram_set_memlimit
zram_makeswap
zram_swapoff
zram_cleanup
-zram_unload
if [ $ERR_CODE -ne 0 ]; then
echo "$TCID : [FAIL]"
diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh
index 6f872f266fd1..21ec1966de76 100755
--- a/tools/testing/selftests/zram/zram_lib.sh
+++ b/tools/testing/selftests/zram/zram_lib.sh
@@ -5,12 +5,17 @@
# Author: Alexey Kodanev <alexey.kodanev@oracle.com>
# Modified: Naresh Kamboju <naresh.kamboju@linaro.org>
-MODULE=0
dev_makeswap=-1
dev_mounted=-1
-
+dev_start=0
+dev_end=-1
+module_load=-1
+sys_control=-1
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
+kernel_version=`uname -r | cut -d'.' -f1,2`
+kernel_major=${kernel_version%.*}
+kernel_minor=${kernel_version#*.}
trap INT
@@ -25,68 +30,104 @@ check_prereqs()
fi
}
+kernel_gte()
+{
+ major=${1%.*}
+ minor=${1#*.}
+
+ if [ $kernel_major -gt $major ]; then
+ return 0
+ elif [[ $kernel_major -eq $major && $kernel_minor -ge $minor ]]; then
+ return 0
+ fi
+
+ return 1
+}
+
zram_cleanup()
{
echo "zram cleanup"
local i=
- for i in $(seq 0 $dev_makeswap); do
+ for i in $(seq $dev_start $dev_makeswap); do
swapoff /dev/zram$i
done
- for i in $(seq 0 $dev_mounted); do
+ for i in $(seq $dev_start $dev_mounted); do
umount /dev/zram$i
done
- for i in $(seq 0 $(($dev_num - 1))); do
+ for i in $(seq $dev_start $dev_end); do
echo 1 > /sys/block/zram${i}/reset
rm -rf zram$i
done
-}
+ if [ $sys_control -eq 1 ]; then
+ for i in $(seq $dev_start $dev_end); do
+ echo $i > /sys/class/zram-control/hot_remove
+ done
+ fi
-zram_unload()
-{
- if [ $MODULE -ne 0 ] ; then
- echo "zram rmmod zram"
+ if [ $module_load -eq 1 ]; then
rmmod zram > /dev/null 2>&1
fi
}
zram_load()
{
- # check zram module exists
- MODULE_PATH=/lib/modules/`uname -r`/kernel/drivers/block/zram/zram.ko
- if [ -f $MODULE_PATH ]; then
- MODULE=1
- echo "create '$dev_num' zram device(s)"
- modprobe zram num_devices=$dev_num
- if [ $? -ne 0 ]; then
- echo "failed to insert zram module"
- exit 1
- fi
-
- dev_num_created=$(ls /dev/zram* | wc -w)
+ echo "create '$dev_num' zram device(s)"
+
+ # zram module loaded, new kernel
+ if [ -d "/sys/class/zram-control" ]; then
+ echo "zram modules already loaded, kernel supports" \
+ "zram-control interface"
+ dev_start=$(ls /dev/zram* | wc -w)
+ dev_end=$(($dev_start + $dev_num - 1))
+ sys_control=1
+
+ for i in $(seq $dev_start $dev_end); do
+ cat /sys/class/zram-control/hot_add > /dev/null
+ done
+
+ echo "all zram devices (/dev/zram$dev_start~$dev_end" \
+ "successfully created"
+ return 0
+ fi
- if [ "$dev_num_created" -ne "$dev_num" ]; then
- echo "unexpected num of devices: $dev_num_created"
- ERR_CODE=-1
+ # detect old kernel or built-in
+ modprobe zram num_devices=$dev_num
+ if [ ! -d "/sys/class/zram-control" ]; then
+ if grep -q '^zram' /proc/modules; then
+ rmmod zram > /dev/null 2>&1
+ if [ $? -ne 0 ]; then
+ echo "zram module is being used on old kernel" \
+ "without zram-control interface"
+ exit $ksft_skip
+ fi
else
- echo "zram load module successful"
+ echo "test needs CONFIG_ZRAM=m on old kernel without" \
+ "zram-control interface"
+ exit $ksft_skip
fi
- elif [ -b /dev/zram0 ]; then
- echo "/dev/zram0 device file found: OK"
- else
- echo "ERROR: No zram.ko module or no /dev/zram0 device found"
- echo "$TCID : CONFIG_ZRAM is not set"
- exit 1
+ modprobe zram num_devices=$dev_num
fi
+
+ module_load=1
+ dev_end=$(($dev_num - 1))
+ echo "all zram devices (/dev/zram0~$dev_end) successfully created"
}
zram_max_streams()
{
echo "set max_comp_streams to zram device(s)"
- local i=0
+ kernel_gte 4.7
+ if [ $? -eq 0 ]; then
+ echo "The device attribute max_comp_streams was"\
+ "deprecated in 4.7"
+ return 0
+ fi
+
+ local i=$dev_start
for max_s in $zram_max_streams; do
local sys_path="/sys/block/zram${i}/max_comp_streams"
echo $max_s > $sys_path || \
@@ -98,7 +139,7 @@ zram_max_streams()
echo "FAIL can't set max_streams '$max_s', get $max_stream"
i=$(($i + 1))
- echo "$sys_path = '$max_streams' ($i/$dev_num)"
+ echo "$sys_path = '$max_streams'"
done
echo "zram max streams: OK"
@@ -108,15 +149,16 @@ zram_compress_alg()
{
echo "test that we can set compression algorithm"
- local algs=$(cat /sys/block/zram0/comp_algorithm)
+ local i=$dev_start
+ local algs=$(cat /sys/block/zram${i}/comp_algorithm)
echo "supported algs: $algs"
- local i=0
+
for alg in $zram_algs; do
local sys_path="/sys/block/zram${i}/comp_algorithm"
echo "$alg" > $sys_path || \
echo "FAIL can't set '$alg' to $sys_path"
i=$(($i + 1))
- echo "$sys_path = '$alg' ($i/$dev_num)"
+ echo "$sys_path = '$alg'"
done
echo "zram set compression algorithm: OK"
@@ -125,14 +167,14 @@ zram_compress_alg()
zram_set_disksizes()
{
echo "set disk size to zram device(s)"
- local i=0
+ local i=$dev_start
for ds in $zram_sizes; do
local sys_path="/sys/block/zram${i}/disksize"
echo "$ds" > $sys_path || \
echo "FAIL can't set '$ds' to $sys_path"
i=$(($i + 1))
- echo "$sys_path = '$ds' ($i/$dev_num)"
+ echo "$sys_path = '$ds'"
done
echo "zram set disksizes: OK"
@@ -142,14 +184,14 @@ zram_set_memlimit()
{
echo "set memory limit to zram device(s)"
- local i=0
+ local i=$dev_start
for ds in $zram_mem_limits; do
local sys_path="/sys/block/zram${i}/mem_limit"
echo "$ds" > $sys_path || \
echo "FAIL can't set '$ds' to $sys_path"
i=$(($i + 1))
- echo "$sys_path = '$ds' ($i/$dev_num)"
+ echo "$sys_path = '$ds'"
done
echo "zram set memory limit: OK"
@@ -158,8 +200,8 @@ zram_set_memlimit()
zram_makeswap()
{
echo "make swap with zram device(s)"
- local i=0
- for i in $(seq 0 $(($dev_num - 1))); do
+ local i=$dev_start
+ for i in $(seq $dev_start $dev_end); do
mkswap /dev/zram$i > err.log 2>&1
if [ $? -ne 0 ]; then
cat err.log
@@ -182,7 +224,7 @@ zram_makeswap()
zram_swapoff()
{
local i=
- for i in $(seq 0 $dev_makeswap); do
+ for i in $(seq $dev_start $dev_end); do
swapoff /dev/zram$i > err.log 2>&1
if [ $? -ne 0 ]; then
cat err.log
@@ -196,7 +238,7 @@ zram_swapoff()
zram_makefs()
{
- local i=0
+ local i=$dev_start
for fs in $zram_filesystems; do
# if requested fs not supported default it to ext2
which mkfs.$fs > /dev/null 2>&1 || fs=ext2
@@ -215,7 +257,7 @@ zram_makefs()
zram_mount()
{
local i=0
- for i in $(seq 0 $(($dev_num - 1))); do
+ for i in $(seq $dev_start $dev_end); do
echo "mount /dev/zram$i"
mkdir zram$i
mount /dev/zram$i zram$i > /dev/null || \
diff --git a/tools/tracing/rtla/Makefile b/tools/tracing/rtla/Makefile
index 7c39728d08de..5a1eda617992 100644
--- a/tools/tracing/rtla/Makefile
+++ b/tools/tracing/rtla/Makefile
@@ -1,5 +1,6 @@
NAME := rtla
-VERSION := 0.5
+# Follow the kernel version
+VERSION := $(shell cat VERSION 2> /dev/null || make -sC ../../.. kernelversion)
# From libtracefs:
# Makefiles suck: This macro sets a default value of $(2) for the
@@ -85,6 +86,7 @@ clean: doc_clean
tarball: clean
rm -rf $(NAME)-$(VERSION) && mkdir $(NAME)-$(VERSION)
+ echo $(VERSION) > $(NAME)-$(VERSION)/VERSION
cp -r $(DIRS) $(FILES) $(NAME)-$(VERSION)
mkdir $(NAME)-$(VERSION)/Documentation/
cp -rp $(SRCTREE)/../../../Documentation/tools/rtla/* $(NAME)-$(VERSION)/Documentation/
diff --git a/tools/tracing/rtla/src/osnoise.c b/tools/tracing/rtla/src/osnoise.c
index 7b73d1eccd0e..e60f1862bad0 100644
--- a/tools/tracing/rtla/src/osnoise.c
+++ b/tools/tracing/rtla/src/osnoise.c
@@ -750,6 +750,9 @@ void osnoise_put_context(struct osnoise_context *context)
*/
void osnoise_destroy_tool(struct osnoise_tool *top)
{
+ if (!top)
+ return;
+
trace_instance_destroy(&top->trace);
if (top->context)
@@ -807,7 +810,7 @@ struct osnoise_tool *osnoise_init_trace_tool(char *tracer)
retval = enable_tracer_by_name(trace->trace.inst, tracer);
if (retval) {
- err_msg("Could not enable osnoiser tracer for tracing\n");
+ err_msg("Could not enable %s tracer for tracing\n", tracer);
goto out_err;
}
diff --git a/tools/tracing/rtla/src/osnoise_hist.c b/tools/tracing/rtla/src/osnoise_hist.c
index 180fcbe423cd..52c053cc1789 100644
--- a/tools/tracing/rtla/src/osnoise_hist.c
+++ b/tools/tracing/rtla/src/osnoise_hist.c
@@ -426,7 +426,7 @@ static void osnoise_hist_usage(char *usage)
static const char * const msg[] = {
"",
" usage: rtla osnoise hist [-h] [-D] [-d s] [-p us] [-r us] [-s us] [-S us] [-t[=file]] \\",
- " [-c cpu-list] [-P priority] [-b N] [-e N] [--no-header] [--no-summary] \\",
+ " [-c cpu-list] [-P priority] [-b N] [-E N] [--no-header] [--no-summary] \\",
" [--no-index] [--with-zeros]",
"",
" -h/--help: print this menu",
@@ -439,7 +439,7 @@ static void osnoise_hist_usage(char *usage)
" -D/--debug: print debug info",
" -t/--trace[=file]: save the stopped trace to [file|osnoise_trace.txt]",
" -b/--bucket-size N: set the histogram bucket size (default 1)",
- " -e/--entries N: set the number of entries of the histogram (default 256)",
+ " -E/--entries N: set the number of entries of the histogram (default 256)",
" --no-header: do not print header",
" --no-summary: do not print summary",
" --no-index: do not print index",
@@ -486,7 +486,7 @@ static struct osnoise_hist_params
while (1) {
static struct option long_options[] = {
{"bucket-size", required_argument, 0, 'b'},
- {"entries", required_argument, 0, 'e'},
+ {"entries", required_argument, 0, 'E'},
{"cpus", required_argument, 0, 'c'},
{"debug", no_argument, 0, 'D'},
{"duration", required_argument, 0, 'd'},
@@ -507,7 +507,7 @@ static struct osnoise_hist_params
/* getopt_long stores the option index here. */
int option_index = 0;
- c = getopt_long(argc, argv, "c:b:d:e:Dhp:P:r:s:S:t::0123",
+ c = getopt_long(argc, argv, "c:b:d:E:Dhp:P:r:s:S:t::0123",
long_options, &option_index);
/* detect the end of the options. */
@@ -534,7 +534,7 @@ static struct osnoise_hist_params
if (!params->duration)
osnoise_hist_usage("Invalid -D duration\n");
break;
- case 'e':
+ case 'E':
params->entries = get_llong_from_str(optarg);
if ((params->entries < 10) || (params->entries > 9999999))
osnoise_hist_usage("Entries must be > 10 and < 9999999\n");
@@ -701,9 +701,9 @@ osnoise_hist_set_signals(struct osnoise_hist_params *params)
int osnoise_hist_main(int argc, char *argv[])
{
struct osnoise_hist_params *params;
+ struct osnoise_tool *record = NULL;
+ struct osnoise_tool *tool = NULL;
struct trace_instance *trace;
- struct osnoise_tool *record;
- struct osnoise_tool *tool;
int return_value = 1;
int retval;
@@ -792,9 +792,8 @@ int osnoise_hist_main(int argc, char *argv[])
out_hist:
osnoise_free_histogram(tool->data);
out_destroy:
+ osnoise_destroy_tool(record);
osnoise_destroy_tool(tool);
- if (params->trace_output)
- osnoise_destroy_tool(record);
free(params);
out_exit:
exit(return_value);
diff --git a/tools/tracing/rtla/src/osnoise_top.c b/tools/tracing/rtla/src/osnoise_top.c
index 332b2ac205fc..7af769b9c0de 100644
--- a/tools/tracing/rtla/src/osnoise_top.c
+++ b/tools/tracing/rtla/src/osnoise_top.c
@@ -483,9 +483,9 @@ static void osnoise_top_set_signals(struct osnoise_top_params *params)
int osnoise_top_main(int argc, char **argv)
{
struct osnoise_top_params *params;
+ struct osnoise_tool *record = NULL;
+ struct osnoise_tool *tool = NULL;
struct trace_instance *trace;
- struct osnoise_tool *record;
- struct osnoise_tool *tool;
int return_value = 1;
int retval;
@@ -571,9 +571,9 @@ int osnoise_top_main(int argc, char **argv)
out_top:
osnoise_free_top(tool->data);
+ osnoise_destroy_tool(record);
osnoise_destroy_tool(tool);
- if (params->trace_output)
- osnoise_destroy_tool(record);
+ free(params);
out_exit:
exit(return_value);
}
diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c
index 235f9620ef3d..237e1735afa7 100644
--- a/tools/tracing/rtla/src/timerlat_hist.c
+++ b/tools/tracing/rtla/src/timerlat_hist.c
@@ -429,7 +429,7 @@ static void timerlat_hist_usage(char *usage)
char *msg[] = {
"",
" usage: [rtla] timerlat hist [-h] [-q] [-d s] [-D] [-n] [-p us] [-i us] [-T us] [-s us] [-t[=file]] \\",
- " [-c cpu-list] [-P priority] [-e N] [-b N] [--no-irq] [--no-thread] [--no-header] [--no-summary] \\",
+ " [-c cpu-list] [-P priority] [-E N] [-b N] [--no-irq] [--no-thread] [--no-header] [--no-summary] \\",
" [--no-index] [--with-zeros]",
"",
" -h/--help: print this menu",
@@ -443,7 +443,7 @@ static void timerlat_hist_usage(char *usage)
" -T/--trace[=file]: save the stopped trace to [file|timerlat_trace.txt]",
" -n/--nano: display data in nanoseconds",
" -b/--bucket-size N: set the histogram bucket size (default 1)",
- " -e/--entries N: set the number of entries of the histogram (default 256)",
+ " -E/--entries N: set the number of entries of the histogram (default 256)",
" --no-irq: ignore IRQ latencies",
" --no-thread: ignore thread latencies",
" --no-header: do not print header",
@@ -494,7 +494,7 @@ static struct timerlat_hist_params
{"cpus", required_argument, 0, 'c'},
{"bucket-size", required_argument, 0, 'b'},
{"debug", no_argument, 0, 'D'},
- {"entries", required_argument, 0, 'e'},
+ {"entries", required_argument, 0, 'E'},
{"duration", required_argument, 0, 'd'},
{"help", no_argument, 0, 'h'},
{"irq", required_argument, 0, 'i'},
@@ -516,7 +516,7 @@ static struct timerlat_hist_params
/* getopt_long stores the option index here. */
int option_index = 0;
- c = getopt_long(argc, argv, "c:b:d:e:Dhi:np:P:s:t::T:012345",
+ c = getopt_long(argc, argv, "c:b:d:E:Dhi:np:P:s:t::T:012345",
long_options, &option_index);
/* detect the end of the options. */
@@ -543,7 +543,7 @@ static struct timerlat_hist_params
if (!params->duration)
timerlat_hist_usage("Invalid -D duration\n");
break;
- case 'e':
+ case 'E':
params->entries = get_llong_from_str(optarg);
if ((params->entries < 10) || (params->entries > 9999999))
timerlat_hist_usage("Entries must be > 10 and < 9999999\n");
@@ -729,9 +729,9 @@ timerlat_hist_set_signals(struct timerlat_hist_params *params)
int timerlat_hist_main(int argc, char *argv[])
{
struct timerlat_hist_params *params;
+ struct osnoise_tool *record = NULL;
+ struct osnoise_tool *tool = NULL;
struct trace_instance *trace;
- struct osnoise_tool *record;
- struct osnoise_tool *tool;
int return_value = 1;
int retval;
@@ -813,9 +813,8 @@ int timerlat_hist_main(int argc, char *argv[])
out_hist:
timerlat_free_histogram(tool->data);
+ osnoise_destroy_tool(record);
osnoise_destroy_tool(tool);
- if (params->trace_output)
- osnoise_destroy_tool(record);
free(params);
out_exit:
exit(return_value);
diff --git a/tools/tracing/rtla/src/timerlat_top.c b/tools/tracing/rtla/src/timerlat_top.c
index 1ebd5291539c..d4187f6534ed 100644
--- a/tools/tracing/rtla/src/timerlat_top.c
+++ b/tools/tracing/rtla/src/timerlat_top.c
@@ -521,9 +521,9 @@ timerlat_top_set_signals(struct timerlat_top_params *params)
int timerlat_top_main(int argc, char *argv[])
{
struct timerlat_top_params *params;
+ struct osnoise_tool *record = NULL;
+ struct osnoise_tool *top = NULL;
struct trace_instance *trace;
- struct osnoise_tool *record;
- struct osnoise_tool *top;
int return_value = 1;
int retval;
@@ -609,9 +609,8 @@ int timerlat_top_main(int argc, char *argv[])
out_top:
timerlat_free_top(top->data);
+ osnoise_destroy_tool(record);
osnoise_destroy_tool(top);
- if (params->trace_output)
- osnoise_destroy_tool(record);
free(params);
out_exit:
exit(return_value);
diff --git a/tools/tracing/rtla/src/trace.c b/tools/tracing/rtla/src/trace.c
index 107a0c6387f7..83de259abcc1 100644
--- a/tools/tracing/rtla/src/trace.c
+++ b/tools/tracing/rtla/src/trace.c
@@ -20,14 +20,14 @@ int enable_tracer_by_name(struct tracefs_instance *inst, const char *tracer_name
tracer = TRACEFS_TRACER_CUSTOM;
- debug_msg("enabling %s tracer\n", tracer_name);
+ debug_msg("Enabling %s tracer\n", tracer_name);
retval = tracefs_tracer_set(inst, tracer, tracer_name);
if (retval < 0) {
if (errno == ENODEV)
- err_msg("tracer %s not found!\n", tracer_name);
+ err_msg("Tracer %s not found!\n", tracer_name);
- err_msg("failed to enable the tracer %s\n", tracer_name);
+ err_msg("Failed to enable the %s tracer\n", tracer_name);
return -1;
}
@@ -44,7 +44,7 @@ void disable_tracer(struct tracefs_instance *inst)
retval = tracefs_tracer_set(inst, t);
if (retval < 0)
- err_msg("oops, error disabling tracer\n");
+ err_msg("Oops, error disabling tracer\n");
}
/*
diff --git a/tools/tracing/rtla/src/utils.c b/tools/tracing/rtla/src/utils.c
index 1c9f0eea6166..ffaf8ec84001 100644
--- a/tools/tracing/rtla/src/utils.c
+++ b/tools/tracing/rtla/src/utils.c
@@ -77,11 +77,11 @@ void get_duration(time_t start_time, char *output, int output_size)
time_t duration;
duration = difftime(now, start_time);
- tm_info = localtime(&duration);
+ tm_info = gmtime(&duration);
snprintf(output, output_size, "%3d %02d:%02d:%02d",
tm_info->tm_yday,
- tm_info->tm_hour - 1,
+ tm_info->tm_hour,
tm_info->tm_min,
tm_info->tm_sec);
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 58d31da8a2f7..0afc016cc54d 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -5528,9 +5528,7 @@ static int kvm_suspend(void)
static void kvm_resume(void)
{
if (kvm_usage_count) {
-#ifdef CONFIG_LOCKDEP
- WARN_ON(lockdep_is_held(&kvm_count_lock));
-#endif
+ lockdep_assert_not_held(&kvm_count_lock);
hardware_enable_nolock(NULL);
}
}